1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package flate
6
7import (
8	"fmt"
9	"io"
10	"math"
11)
12
13const (
14	NoCompression      = 0
15	BestSpeed          = 1
16	BestCompression    = 9
17	DefaultCompression = -1
18
19	// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
20	// entropy encoding. This mode is useful in compressing data that has
21	// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
22	// that lacks an entropy encoder. Compression gains are achieved when
23	// certain bytes in the input stream occur more frequently than others.
24	//
25	// Note that HuffmanOnly produces a compressed output that is
26	// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
27	// continue to be able to decompress this output.
28	HuffmanOnly = -2
29)
30
31const (
32	logWindowSize = 15
33	windowSize    = 1 << logWindowSize
34	windowMask    = windowSize - 1
35
36	// The LZ77 step produces a sequence of literal tokens and <length, offset>
37	// pair tokens. The offset is also known as distance. The underlying wire
38	// format limits the range of lengths and offsets. For example, there are
39	// 256 legitimate lengths: those in the range [3, 258]. This package's
40	// compressor uses a higher minimum match length, enabling optimizations
41	// such as finding matches via 32-bit loads and compares.
42	baseMatchLength = 3       // The smallest match length per the RFC section 3.2.5
43	minMatchLength  = 4       // The smallest match length that the compressor actually emits
44	maxMatchLength  = 258     // The largest match length
45	baseMatchOffset = 1       // The smallest match offset
46	maxMatchOffset  = 1 << 15 // The largest match offset
47
48	// The maximum number of tokens we put into a single flate block, just to
49	// stop things from getting too large.
50	maxFlateBlockTokens = 1 << 14
51	maxStoreBlockSize   = 65535
52	hashBits            = 17 // After 17 performance degrades
53	hashSize            = 1 << hashBits
54	hashMask            = (1 << hashBits) - 1
55	maxHashOffset       = 1 << 24
56
57	skipNever = math.MaxInt32
58)
59
60type compressionLevel struct {
61	level, good, lazy, nice, chain, fastSkipHashing int
62}
63
64var levels = []compressionLevel{
65	{0, 0, 0, 0, 0, 0}, // NoCompression.
66	{1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go.
67	// For levels 2-3 we don't bother trying with lazy matches.
68	{2, 4, 0, 16, 8, 5},
69	{3, 4, 0, 32, 32, 6},
70	// Levels 4-9 use increasingly more lazy matching
71	// and increasingly stringent conditions for "good enough".
72	{4, 4, 4, 16, 16, skipNever},
73	{5, 8, 16, 32, 32, skipNever},
74	{6, 8, 16, 128, 128, skipNever},
75	{7, 8, 32, 128, 256, skipNever},
76	{8, 32, 128, 258, 1024, skipNever},
77	{9, 32, 258, 258, 4096, skipNever},
78}
79
80type compressor struct {
81	compressionLevel
82
83	w          *huffmanBitWriter
84	bulkHasher func([]byte, []uint32)
85
86	// compression algorithm
87	fill      func(*compressor, []byte) int // copy data to window
88	step      func(*compressor)             // process window
89	sync      bool                          // requesting flush
90	bestSpeed *deflateFast                  // Encoder for BestSpeed
91
92	// Input hash chains
93	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
94	// If hashHead[hashValue] is within the current window, then
95	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
96	// with the same hash value.
97	chainHead  int
98	hashHead   [hashSize]uint32
99	hashPrev   [windowSize]uint32
100	hashOffset int
101
102	// input window: unprocessed data is window[index:windowEnd]
103	index         int
104	window        []byte
105	windowEnd     int
106	blockStart    int  // window index where current tokens start
107	byteAvailable bool // if true, still need to process window[index-1].
108
109	// queued output tokens
110	tokens []token
111
112	// deflate state
113	length         int
114	offset         int
115	hash           uint32
116	maxInsertIndex int
117	err            error
118
119	// hashMatch must be able to contain hashes for the maximum match length.
120	hashMatch [maxMatchLength - 1]uint32
121}
122
123func (d *compressor) fillDeflate(b []byte) int {
124	if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
125		// shift the window by windowSize
126		copy(d.window, d.window[windowSize:2*windowSize])
127		d.index -= windowSize
128		d.windowEnd -= windowSize
129		if d.blockStart >= windowSize {
130			d.blockStart -= windowSize
131		} else {
132			d.blockStart = math.MaxInt32
133		}
134		d.hashOffset += windowSize
135		if d.hashOffset > maxHashOffset {
136			delta := d.hashOffset - 1
137			d.hashOffset -= delta
138			d.chainHead -= delta
139
140			// Iterate over slices instead of arrays to avoid copying
141			// the entire table onto the stack (Issue #18625).
142			for i, v := range d.hashPrev[:] {
143				if int(v) > delta {
144					d.hashPrev[i] = uint32(int(v) - delta)
145				} else {
146					d.hashPrev[i] = 0
147				}
148			}
149			for i, v := range d.hashHead[:] {
150				if int(v) > delta {
151					d.hashHead[i] = uint32(int(v) - delta)
152				} else {
153					d.hashHead[i] = 0
154				}
155			}
156		}
157	}
158	n := copy(d.window[d.windowEnd:], b)
159	d.windowEnd += n
160	return n
161}
162
163func (d *compressor) writeBlock(tokens []token, index int) error {
164	if index > 0 {
165		var window []byte
166		if d.blockStart <= index {
167			window = d.window[d.blockStart:index]
168		}
169		d.blockStart = index
170		d.w.writeBlock(tokens, false, window)
171		return d.w.err
172	}
173	return nil
174}
175
176// fillWindow will fill the current window with the supplied
177// dictionary and calculate all hashes.
178// This is much faster than doing a full encode.
179// Should only be used after a reset.
180func (d *compressor) fillWindow(b []byte) {
181	// Do not fill window if we are in store-only mode.
182	if d.compressionLevel.level < 2 {
183		return
184	}
185	if d.index != 0 || d.windowEnd != 0 {
186		panic("internal error: fillWindow called with stale data")
187	}
188
189	// If we are given too much, cut it.
190	if len(b) > windowSize {
191		b = b[len(b)-windowSize:]
192	}
193	// Add all to window.
194	n := copy(d.window, b)
195
196	// Calculate 256 hashes at the time (more L1 cache hits)
197	loops := (n + 256 - minMatchLength) / 256
198	for j := 0; j < loops; j++ {
199		index := j * 256
200		end := index + 256 + minMatchLength - 1
201		if end > n {
202			end = n
203		}
204		toCheck := d.window[index:end]
205		dstSize := len(toCheck) - minMatchLength + 1
206
207		if dstSize <= 0 {
208			continue
209		}
210
211		dst := d.hashMatch[:dstSize]
212		d.bulkHasher(toCheck, dst)
213		var newH uint32
214		for i, val := range dst {
215			di := i + index
216			newH = val
217			hh := &d.hashHead[newH&hashMask]
218			// Get previous value with the same hash.
219			// Our chain should point to the previous value.
220			d.hashPrev[di&windowMask] = *hh
221			// Set the head of the hash chain to us.
222			*hh = uint32(di + d.hashOffset)
223		}
224		d.hash = newH
225	}
226	// Update window information.
227	d.windowEnd = n
228	d.index = n
229}
230
231// Try to find a match starting at index whose length is greater than prevSize.
232// We only look at chainCount possibilities before giving up.
233func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
234	minMatchLook := maxMatchLength
235	if lookahead < minMatchLook {
236		minMatchLook = lookahead
237	}
238
239	win := d.window[0 : pos+minMatchLook]
240
241	// We quit when we get a match that's at least nice long
242	nice := len(win) - pos
243	if d.nice < nice {
244		nice = d.nice
245	}
246
247	// If we've got a match that's good enough, only look in 1/4 the chain.
248	tries := d.chain
249	length = prevLength
250	if length >= d.good {
251		tries >>= 2
252	}
253
254	wEnd := win[pos+length]
255	wPos := win[pos:]
256	minIndex := pos - windowSize
257
258	for i := prevHead; tries > 0; tries-- {
259		if wEnd == win[i+length] {
260			n := matchLen(win[i:], wPos, minMatchLook)
261
262			if n > length && (n > minMatchLength || pos-i <= 4096) {
263				length = n
264				offset = pos - i
265				ok = true
266				if n >= nice {
267					// The match is good enough that we don't try to find a better one.
268					break
269				}
270				wEnd = win[pos+n]
271			}
272		}
273		if i == minIndex {
274			// hashPrev[i & windowMask] has already been overwritten, so stop now.
275			break
276		}
277		i = int(d.hashPrev[i&windowMask]) - d.hashOffset
278		if i < minIndex || i < 0 {
279			break
280		}
281	}
282	return
283}
284
285func (d *compressor) writeStoredBlock(buf []byte) error {
286	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
287		return d.w.err
288	}
289	d.w.writeBytes(buf)
290	return d.w.err
291}
292
293const hashmul = 0x1e35a7bd
294
295// hash4 returns a hash representation of the first 4 bytes
296// of the supplied slice.
297// The caller must ensure that len(b) >= 4.
298func hash4(b []byte) uint32 {
299	return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
300}
301
302// bulkHash4 will compute hashes using the same
303// algorithm as hash4
304func bulkHash4(b []byte, dst []uint32) {
305	if len(b) < minMatchLength {
306		return
307	}
308	hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
309	dst[0] = (hb * hashmul) >> (32 - hashBits)
310	end := len(b) - minMatchLength + 1
311	for i := 1; i < end; i++ {
312		hb = (hb << 8) | uint32(b[i+3])
313		dst[i] = (hb * hashmul) >> (32 - hashBits)
314	}
315}
316
317// matchLen returns the number of matching bytes in a and b
318// up to length 'max'. Both slices must be at least 'max'
319// bytes in size.
320func matchLen(a, b []byte, max int) int {
321	a = a[:max]
322	b = b[:len(a)]
323	for i, av := range a {
324		if b[i] != av {
325			return i
326		}
327	}
328	return max
329}
330
331// encSpeed will compress and store the currently added data,
332// if enough has been accumulated or we at the end of the stream.
333// Any error that occurred will be in d.err
334func (d *compressor) encSpeed() {
335	// We only compress if we have maxStoreBlockSize.
336	if d.windowEnd < maxStoreBlockSize {
337		if !d.sync {
338			return
339		}
340
341		// Handle small sizes.
342		if d.windowEnd < 128 {
343			switch {
344			case d.windowEnd == 0:
345				return
346			case d.windowEnd <= 16:
347				d.err = d.writeStoredBlock(d.window[:d.windowEnd])
348			default:
349				d.w.writeBlockHuff(false, d.window[:d.windowEnd])
350				d.err = d.w.err
351			}
352			d.windowEnd = 0
353			d.bestSpeed.reset()
354			return
355		}
356
357	}
358	// Encode the block.
359	d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd])
360
361	// If we removed less than 1/16th, Huffman compress the block.
362	if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
363		d.w.writeBlockHuff(false, d.window[:d.windowEnd])
364	} else {
365		d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
366	}
367	d.err = d.w.err
368	d.windowEnd = 0
369}
370
371func (d *compressor) initDeflate() {
372	d.window = make([]byte, 2*windowSize)
373	d.hashOffset = 1
374	d.tokens = make([]token, 0, maxFlateBlockTokens+1)
375	d.length = minMatchLength - 1
376	d.offset = 0
377	d.byteAvailable = false
378	d.index = 0
379	d.hash = 0
380	d.chainHead = -1
381	d.bulkHasher = bulkHash4
382}
383
384func (d *compressor) deflate() {
385	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
386		return
387	}
388
389	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
390	if d.index < d.maxInsertIndex {
391		d.hash = hash4(d.window[d.index : d.index+minMatchLength])
392	}
393
394Loop:
395	for {
396		if d.index > d.windowEnd {
397			panic("index > windowEnd")
398		}
399		lookahead := d.windowEnd - d.index
400		if lookahead < minMatchLength+maxMatchLength {
401			if !d.sync {
402				break Loop
403			}
404			if d.index > d.windowEnd {
405				panic("index > windowEnd")
406			}
407			if lookahead == 0 {
408				// Flush current output block if any.
409				if d.byteAvailable {
410					// There is still one pending token that needs to be flushed
411					d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1])))
412					d.byteAvailable = false
413				}
414				if len(d.tokens) > 0 {
415					if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
416						return
417					}
418					d.tokens = d.tokens[:0]
419				}
420				break Loop
421			}
422		}
423		if d.index < d.maxInsertIndex {
424			// Update the hash
425			d.hash = hash4(d.window[d.index : d.index+minMatchLength])
426			hh := &d.hashHead[d.hash&hashMask]
427			d.chainHead = int(*hh)
428			d.hashPrev[d.index&windowMask] = uint32(d.chainHead)
429			*hh = uint32(d.index + d.hashOffset)
430		}
431		prevLength := d.length
432		prevOffset := d.offset
433		d.length = minMatchLength - 1
434		d.offset = 0
435		minIndex := d.index - windowSize
436		if minIndex < 0 {
437			minIndex = 0
438		}
439
440		if d.chainHead-d.hashOffset >= minIndex &&
441			(d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
442				d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) {
443			if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
444				d.length = newLength
445				d.offset = newOffset
446			}
447		}
448		if d.fastSkipHashing != skipNever && d.length >= minMatchLength ||
449			d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength {
450			// There was a match at the previous step, and the current match is
451			// not better. Output the previous match.
452			if d.fastSkipHashing != skipNever {
453				d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset)))
454			} else {
455				d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset)))
456			}
457			// Insert in the hash table all strings up to the end of the match.
458			// index and index-1 are already inserted. If there is not enough
459			// lookahead, the last two strings are not inserted into the hash
460			// table.
461			if d.length <= d.fastSkipHashing {
462				var newIndex int
463				if d.fastSkipHashing != skipNever {
464					newIndex = d.index + d.length
465				} else {
466					newIndex = d.index + prevLength - 1
467				}
468				for d.index++; d.index < newIndex; d.index++ {
469					if d.index < d.maxInsertIndex {
470						d.hash = hash4(d.window[d.index : d.index+minMatchLength])
471						// Get previous value with the same hash.
472						// Our chain should point to the previous value.
473						hh := &d.hashHead[d.hash&hashMask]
474						d.hashPrev[d.index&windowMask] = *hh
475						// Set the head of the hash chain to us.
476						*hh = uint32(d.index + d.hashOffset)
477					}
478				}
479				if d.fastSkipHashing == skipNever {
480					d.byteAvailable = false
481					d.length = minMatchLength - 1
482				}
483			} else {
484				// For matches this long, we don't bother inserting each individual
485				// item into the table.
486				d.index += d.length
487				if d.index < d.maxInsertIndex {
488					d.hash = hash4(d.window[d.index : d.index+minMatchLength])
489				}
490			}
491			if len(d.tokens) == maxFlateBlockTokens {
492				// The block includes the current character
493				if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
494					return
495				}
496				d.tokens = d.tokens[:0]
497			}
498		} else {
499			if d.fastSkipHashing != skipNever || d.byteAvailable {
500				i := d.index - 1
501				if d.fastSkipHashing != skipNever {
502					i = d.index
503				}
504				d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
505				if len(d.tokens) == maxFlateBlockTokens {
506					if d.err = d.writeBlock(d.tokens, i+1); d.err != nil {
507						return
508					}
509					d.tokens = d.tokens[:0]
510				}
511			}
512			d.index++
513			if d.fastSkipHashing == skipNever {
514				d.byteAvailable = true
515			}
516		}
517	}
518}
519
520func (d *compressor) fillStore(b []byte) int {
521	n := copy(d.window[d.windowEnd:], b)
522	d.windowEnd += n
523	return n
524}
525
526func (d *compressor) store() {
527	if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
528		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
529		d.windowEnd = 0
530	}
531}
532
533// storeHuff compresses and stores the currently added data
534// when the d.window is full or we are at the end of the stream.
535// Any error that occurred will be in d.err
536func (d *compressor) storeHuff() {
537	if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
538		return
539	}
540	d.w.writeBlockHuff(false, d.window[:d.windowEnd])
541	d.err = d.w.err
542	d.windowEnd = 0
543}
544
545func (d *compressor) write(b []byte) (n int, err error) {
546	if d.err != nil {
547		return 0, d.err
548	}
549	n = len(b)
550	for len(b) > 0 {
551		d.step(d)
552		b = b[d.fill(d, b):]
553		if d.err != nil {
554			return 0, d.err
555		}
556	}
557	return n, nil
558}
559
560func (d *compressor) syncFlush() error {
561	if d.err != nil {
562		return d.err
563	}
564	d.sync = true
565	d.step(d)
566	if d.err == nil {
567		d.w.writeStoredHeader(0, false)
568		d.w.flush()
569		d.err = d.w.err
570	}
571	d.sync = false
572	return d.err
573}
574
575func (d *compressor) init(w io.Writer, level int) (err error) {
576	d.w = newHuffmanBitWriter(w)
577
578	switch {
579	case level == NoCompression:
580		d.window = make([]byte, maxStoreBlockSize)
581		d.fill = (*compressor).fillStore
582		d.step = (*compressor).store
583	case level == HuffmanOnly:
584		d.window = make([]byte, maxStoreBlockSize)
585		d.fill = (*compressor).fillStore
586		d.step = (*compressor).storeHuff
587	case level == BestSpeed:
588		d.compressionLevel = levels[level]
589		d.window = make([]byte, maxStoreBlockSize)
590		d.fill = (*compressor).fillStore
591		d.step = (*compressor).encSpeed
592		d.bestSpeed = newDeflateFast()
593		d.tokens = make([]token, maxStoreBlockSize)
594	case level == DefaultCompression:
595		level = 6
596		fallthrough
597	case 2 <= level && level <= 9:
598		d.compressionLevel = levels[level]
599		d.initDeflate()
600		d.fill = (*compressor).fillDeflate
601		d.step = (*compressor).deflate
602	default:
603		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
604	}
605	return nil
606}
607
608func (d *compressor) reset(w io.Writer) {
609	d.w.reset(w)
610	d.sync = false
611	d.err = nil
612	switch d.compressionLevel.level {
613	case NoCompression:
614		d.windowEnd = 0
615	case BestSpeed:
616		d.windowEnd = 0
617		d.tokens = d.tokens[:0]
618		d.bestSpeed.reset()
619	default:
620		d.chainHead = -1
621		for i := range d.hashHead {
622			d.hashHead[i] = 0
623		}
624		for i := range d.hashPrev {
625			d.hashPrev[i] = 0
626		}
627		d.hashOffset = 1
628		d.index, d.windowEnd = 0, 0
629		d.blockStart, d.byteAvailable = 0, false
630		d.tokens = d.tokens[:0]
631		d.length = minMatchLength - 1
632		d.offset = 0
633		d.hash = 0
634		d.maxInsertIndex = 0
635	}
636}
637
638func (d *compressor) close() error {
639	if d.err != nil {
640		return d.err
641	}
642	d.sync = true
643	d.step(d)
644	if d.err != nil {
645		return d.err
646	}
647	if d.w.writeStoredHeader(0, true); d.w.err != nil {
648		return d.w.err
649	}
650	d.w.flush()
651	return d.w.err
652}
653
654// NewWriter returns a new Writer compressing data at the given level.
655// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
656// higher levels typically run slower but compress more. Level 0
657// (NoCompression) does not attempt any compression; it only adds the
658// necessary DEFLATE framing.
659// Level -1 (DefaultCompression) uses the default compression level.
660// Level -2 (HuffmanOnly) will use Huffman compression only, giving
661// a very fast compression for all types of input, but sacrificing considerable
662// compression efficiency.
663//
664// If level is in the range [-2, 9] then the error returned will be nil.
665// Otherwise the error returned will be non-nil.
666func NewWriter(w io.Writer, level int) (*Writer, error) {
667	var dw Writer
668	if err := dw.d.init(w, level); err != nil {
669		return nil, err
670	}
671	return &dw, nil
672}
673
674// NewWriterDict is like NewWriter but initializes the new
675// Writer with a preset dictionary. The returned Writer behaves
676// as if the dictionary had been written to it without producing
677// any compressed output. The compressed data written to w
678// can only be decompressed by a Reader initialized with the
679// same dictionary.
680func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
681	dw := &dictWriter{w}
682	zw, err := NewWriter(dw, level)
683	if err != nil {
684		return nil, err
685	}
686	zw.d.fillWindow(dict)
687	zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
688	return zw, err
689}
690
691type dictWriter struct {
692	w io.Writer
693}
694
695func (w *dictWriter) Write(b []byte) (n int, err error) {
696	return w.w.Write(b)
697}
698
699// A Writer takes data written to it and writes the compressed
700// form of that data to an underlying writer (see NewWriter).
701type Writer struct {
702	d    compressor
703	dict []byte
704}
705
706// Write writes data to w, which will eventually write the
707// compressed form of data to its underlying writer.
708func (w *Writer) Write(data []byte) (n int, err error) {
709	return w.d.write(data)
710}
711
712// Flush flushes any pending data to the underlying writer.
713// It is useful mainly in compressed network protocols, to ensure that
714// a remote reader has enough data to reconstruct a packet.
715// Flush does not return until the data has been written.
716// Calling Flush when there is no pending data still causes the Writer
717// to emit a sync marker of at least 4 bytes.
718// If the underlying writer returns an error, Flush returns that error.
719//
720// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
721func (w *Writer) Flush() error {
722	// For more about flushing:
723	// https://www.bolet.org/~pornin/deflate-flush.html
724	return w.d.syncFlush()
725}
726
727// Close flushes and closes the writer.
728func (w *Writer) Close() error {
729	return w.d.close()
730}
731
732// Reset discards the writer's state and makes it equivalent to
733// the result of NewWriter or NewWriterDict called with dst
734// and w's level and dictionary.
735func (w *Writer) Reset(dst io.Writer) {
736	if dw, ok := w.d.w.writer.(*dictWriter); ok {
737		// w was created with NewWriterDict
738		dw.w = dst
739		w.d.reset(dw)
740		w.d.fillWindow(w.dict)
741	} else {
742		// w was created with NewWriter
743		w.d.reset(dst)
744	}
745}
746