1// Copyright 2009 The Go Authors. All rights reserved. 2// Copyright (c) 2015 Klaus Post 3// Use of this source code is governed by a BSD-style 4// license that can be found in the LICENSE file. 5 6package flate 7 8import ( 9 "fmt" 10 "io" 11 "math" 12) 13 14const ( 15 NoCompression = 0 16 BestSpeed = 1 17 BestCompression = 9 18 DefaultCompression = -1 19 20 // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman 21 // entropy encoding. This mode is useful in compressing data that has 22 // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) 23 // that lacks an entropy encoder. Compression gains are achieved when 24 // certain bytes in the input stream occur more frequently than others. 25 // 26 // Note that HuffmanOnly produces a compressed output that is 27 // RFC 1951 compliant. That is, any valid DEFLATE decompressor will 28 // continue to be able to decompress this output. 29 HuffmanOnly = -2 30 ConstantCompression = HuffmanOnly // compatibility alias. 31 32 logWindowSize = 15 33 windowSize = 1 << logWindowSize 34 windowMask = windowSize - 1 35 logMaxOffsetSize = 15 // Standard DEFLATE 36 minMatchLength = 4 // The smallest match that the compressor looks for 37 maxMatchLength = 258 // The longest match for the compressor 38 minOffsetSize = 1 // The shortest offset that makes any sense 39 40 // The maximum number of tokens we put into a single flat block, just too 41 // stop things from getting too large. 42 maxFlateBlockTokens = 1 << 14 43 maxStoreBlockSize = 65535 44 hashBits = 17 // After 17 performance degrades 45 hashSize = 1 << hashBits 46 hashMask = (1 << hashBits) - 1 47 hashShift = (hashBits + minMatchLength - 1) / minMatchLength 48 maxHashOffset = 1 << 24 49 50 skipNever = math.MaxInt32 51 52 debugDeflate = false 53) 54 55type compressionLevel struct { 56 good, lazy, nice, chain, fastSkipHashing, level int 57} 58 59// Compression levels have been rebalanced from zlib deflate defaults 60// to give a bigger spread in speed and compression. 61// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ 62var levels = []compressionLevel{ 63 {}, // 0 64 // Level 1-6 uses specialized algorithm - values not used 65 {0, 0, 0, 0, 0, 1}, 66 {0, 0, 0, 0, 0, 2}, 67 {0, 0, 0, 0, 0, 3}, 68 {0, 0, 0, 0, 0, 4}, 69 {0, 0, 0, 0, 0, 5}, 70 {0, 0, 0, 0, 0, 6}, 71 // Levels 7-9 use increasingly more lazy matching 72 // and increasingly stringent conditions for "good enough". 73 {8, 8, 24, 16, skipNever, 7}, 74 {10, 16, 24, 64, skipNever, 8}, 75 {32, 258, 258, 4096, skipNever, 9}, 76} 77 78// advancedState contains state for the advanced levels, with bigger hash tables, etc. 79type advancedState struct { 80 // deflate state 81 length int 82 offset int 83 maxInsertIndex int 84 85 // Input hash chains 86 // hashHead[hashValue] contains the largest inputIndex with the specified hash value 87 // If hashHead[hashValue] is within the current window, then 88 // hashPrev[hashHead[hashValue] & windowMask] contains the previous index 89 // with the same hash value. 90 chainHead int 91 hashHead [hashSize]uint32 92 hashPrev [windowSize]uint32 93 hashOffset int 94 95 // input window: unprocessed data is window[index:windowEnd] 96 index int 97 hashMatch [maxMatchLength + minMatchLength]uint32 98 99 hash uint32 100 ii uint16 // position of last match, intended to overflow to reset. 101} 102 103type compressor struct { 104 compressionLevel 105 106 w *huffmanBitWriter 107 108 // compression algorithm 109 fill func(*compressor, []byte) int // copy data to window 110 step func(*compressor) // process window 111 112 window []byte 113 windowEnd int 114 blockStart int // window index where current tokens start 115 err error 116 117 // queued output tokens 118 tokens tokens 119 fast fastEnc 120 state *advancedState 121 122 sync bool // requesting flush 123 byteAvailable bool // if true, still need to process window[index-1]. 124} 125 126func (d *compressor) fillDeflate(b []byte) int { 127 s := d.state 128 if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { 129 // shift the window by windowSize 130 copy(d.window[:], d.window[windowSize:2*windowSize]) 131 s.index -= windowSize 132 d.windowEnd -= windowSize 133 if d.blockStart >= windowSize { 134 d.blockStart -= windowSize 135 } else { 136 d.blockStart = math.MaxInt32 137 } 138 s.hashOffset += windowSize 139 if s.hashOffset > maxHashOffset { 140 delta := s.hashOffset - 1 141 s.hashOffset -= delta 142 s.chainHead -= delta 143 // Iterate over slices instead of arrays to avoid copying 144 // the entire table onto the stack (Issue #18625). 145 for i, v := range s.hashPrev[:] { 146 if int(v) > delta { 147 s.hashPrev[i] = uint32(int(v) - delta) 148 } else { 149 s.hashPrev[i] = 0 150 } 151 } 152 for i, v := range s.hashHead[:] { 153 if int(v) > delta { 154 s.hashHead[i] = uint32(int(v) - delta) 155 } else { 156 s.hashHead[i] = 0 157 } 158 } 159 } 160 } 161 n := copy(d.window[d.windowEnd:], b) 162 d.windowEnd += n 163 return n 164} 165 166func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { 167 if index > 0 || eof { 168 var window []byte 169 if d.blockStart <= index { 170 window = d.window[d.blockStart:index] 171 } 172 d.blockStart = index 173 d.w.writeBlock(tok, eof, window) 174 return d.w.err 175 } 176 return nil 177} 178 179// writeBlockSkip writes the current block and uses the number of tokens 180// to determine if the block should be stored on no matches, or 181// only huffman encoded. 182func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { 183 if index > 0 || eof { 184 if d.blockStart <= index { 185 window := d.window[d.blockStart:index] 186 // If we removed less than a 64th of all literals 187 // we huffman compress the block. 188 if int(tok.n) > len(window)-int(tok.n>>6) { 189 d.w.writeBlockHuff(eof, window, d.sync) 190 } else { 191 // Write a dynamic huffman block. 192 d.w.writeBlockDynamic(tok, eof, window, d.sync) 193 } 194 } else { 195 d.w.writeBlock(tok, eof, nil) 196 } 197 d.blockStart = index 198 return d.w.err 199 } 200 return nil 201} 202 203// fillWindow will fill the current window with the supplied 204// dictionary and calculate all hashes. 205// This is much faster than doing a full encode. 206// Should only be used after a start/reset. 207func (d *compressor) fillWindow(b []byte) { 208 // Do not fill window if we are in store-only or huffman mode. 209 if d.level <= 0 { 210 return 211 } 212 if d.fast != nil { 213 // encode the last data, but discard the result 214 if len(b) > maxMatchOffset { 215 b = b[len(b)-maxMatchOffset:] 216 } 217 d.fast.Encode(&d.tokens, b) 218 d.tokens.Reset() 219 return 220 } 221 s := d.state 222 // If we are given too much, cut it. 223 if len(b) > windowSize { 224 b = b[len(b)-windowSize:] 225 } 226 // Add all to window. 227 n := copy(d.window[d.windowEnd:], b) 228 229 // Calculate 256 hashes at the time (more L1 cache hits) 230 loops := (n + 256 - minMatchLength) / 256 231 for j := 0; j < loops; j++ { 232 startindex := j * 256 233 end := startindex + 256 + minMatchLength - 1 234 if end > n { 235 end = n 236 } 237 tocheck := d.window[startindex:end] 238 dstSize := len(tocheck) - minMatchLength + 1 239 240 if dstSize <= 0 { 241 continue 242 } 243 244 dst := s.hashMatch[:dstSize] 245 bulkHash4(tocheck, dst) 246 var newH uint32 247 for i, val := range dst { 248 di := i + startindex 249 newH = val & hashMask 250 // Get previous value with the same hash. 251 // Our chain should point to the previous value. 252 s.hashPrev[di&windowMask] = s.hashHead[newH] 253 // Set the head of the hash chain to us. 254 s.hashHead[newH] = uint32(di + s.hashOffset) 255 } 256 s.hash = newH 257 } 258 // Update window information. 259 d.windowEnd += n 260 s.index = n 261} 262 263// Try to find a match starting at index whose length is greater than prevSize. 264// We only look at chainCount possibilities before giving up. 265// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead 266func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { 267 minMatchLook := maxMatchLength 268 if lookahead < minMatchLook { 269 minMatchLook = lookahead 270 } 271 272 win := d.window[0 : pos+minMatchLook] 273 274 // We quit when we get a match that's at least nice long 275 nice := len(win) - pos 276 if d.nice < nice { 277 nice = d.nice 278 } 279 280 // If we've got a match that's good enough, only look in 1/4 the chain. 281 tries := d.chain 282 length = prevLength 283 if length >= d.good { 284 tries >>= 2 285 } 286 287 wEnd := win[pos+length] 288 wPos := win[pos:] 289 minIndex := pos - windowSize 290 291 for i := prevHead; tries > 0; tries-- { 292 if wEnd == win[i+length] { 293 n := matchLen(win[i:i+minMatchLook], wPos) 294 295 if n > length && (n > minMatchLength || pos-i <= 4096) { 296 length = n 297 offset = pos - i 298 ok = true 299 if n >= nice { 300 // The match is good enough that we don't try to find a better one. 301 break 302 } 303 wEnd = win[pos+n] 304 } 305 } 306 if i == minIndex { 307 // hashPrev[i & windowMask] has already been overwritten, so stop now. 308 break 309 } 310 i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset 311 if i < minIndex || i < 0 { 312 break 313 } 314 } 315 return 316} 317 318func (d *compressor) writeStoredBlock(buf []byte) error { 319 if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { 320 return d.w.err 321 } 322 d.w.writeBytes(buf) 323 return d.w.err 324} 325 326// hash4 returns a hash representation of the first 4 bytes 327// of the supplied slice. 328// The caller must ensure that len(b) >= 4. 329func hash4(b []byte) uint32 { 330 b = b[:4] 331 return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) 332} 333 334// bulkHash4 will compute hashes using the same 335// algorithm as hash4 336func bulkHash4(b []byte, dst []uint32) { 337 if len(b) < 4 { 338 return 339 } 340 hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 341 dst[0] = hash4u(hb, hashBits) 342 end := len(b) - 4 + 1 343 for i := 1; i < end; i++ { 344 hb = (hb << 8) | uint32(b[i+3]) 345 dst[i] = hash4u(hb, hashBits) 346 } 347} 348 349func (d *compressor) initDeflate() { 350 d.window = make([]byte, 2*windowSize) 351 d.byteAvailable = false 352 d.err = nil 353 if d.state == nil { 354 return 355 } 356 s := d.state 357 s.index = 0 358 s.hashOffset = 1 359 s.length = minMatchLength - 1 360 s.offset = 0 361 s.hash = 0 362 s.chainHead = -1 363} 364 365// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, 366// meaning it always has lazy matching on. 367func (d *compressor) deflateLazy() { 368 s := d.state 369 // Sanity enables additional runtime tests. 370 // It's intended to be used during development 371 // to supplement the currently ad-hoc unit tests. 372 const sanity = debugDeflate 373 374 if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { 375 return 376 } 377 378 s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) 379 if s.index < s.maxInsertIndex { 380 s.hash = hash4(d.window[s.index : s.index+minMatchLength]) 381 } 382 383 for { 384 if sanity && s.index > d.windowEnd { 385 panic("index > windowEnd") 386 } 387 lookahead := d.windowEnd - s.index 388 if lookahead < minMatchLength+maxMatchLength { 389 if !d.sync { 390 return 391 } 392 if sanity && s.index > d.windowEnd { 393 panic("index > windowEnd") 394 } 395 if lookahead == 0 { 396 // Flush current output block if any. 397 if d.byteAvailable { 398 // There is still one pending token that needs to be flushed 399 d.tokens.AddLiteral(d.window[s.index-1]) 400 d.byteAvailable = false 401 } 402 if d.tokens.n > 0 { 403 if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { 404 return 405 } 406 d.tokens.Reset() 407 } 408 return 409 } 410 } 411 if s.index < s.maxInsertIndex { 412 // Update the hash 413 s.hash = hash4(d.window[s.index : s.index+minMatchLength]) 414 ch := s.hashHead[s.hash&hashMask] 415 s.chainHead = int(ch) 416 s.hashPrev[s.index&windowMask] = ch 417 s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) 418 } 419 prevLength := s.length 420 prevOffset := s.offset 421 s.length = minMatchLength - 1 422 s.offset = 0 423 minIndex := s.index - windowSize 424 if minIndex < 0 { 425 minIndex = 0 426 } 427 428 if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { 429 if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { 430 s.length = newLength 431 s.offset = newOffset 432 } 433 } 434 if prevLength >= minMatchLength && s.length <= prevLength { 435 // There was a match at the previous step, and the current match is 436 // not better. Output the previous match. 437 d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) 438 439 // Insert in the hash table all strings up to the end of the match. 440 // index and index-1 are already inserted. If there is not enough 441 // lookahead, the last two strings are not inserted into the hash 442 // table. 443 var newIndex int 444 newIndex = s.index + prevLength - 1 445 // Calculate missing hashes 446 end := newIndex 447 if end > s.maxInsertIndex { 448 end = s.maxInsertIndex 449 } 450 end += minMatchLength - 1 451 startindex := s.index + 1 452 if startindex > s.maxInsertIndex { 453 startindex = s.maxInsertIndex 454 } 455 tocheck := d.window[startindex:end] 456 dstSize := len(tocheck) - minMatchLength + 1 457 if dstSize > 0 { 458 dst := s.hashMatch[:dstSize] 459 bulkHash4(tocheck, dst) 460 var newH uint32 461 for i, val := range dst { 462 di := i + startindex 463 newH = val & hashMask 464 // Get previous value with the same hash. 465 // Our chain should point to the previous value. 466 s.hashPrev[di&windowMask] = s.hashHead[newH] 467 // Set the head of the hash chain to us. 468 s.hashHead[newH] = uint32(di + s.hashOffset) 469 } 470 s.hash = newH 471 } 472 473 s.index = newIndex 474 d.byteAvailable = false 475 s.length = minMatchLength - 1 476 if d.tokens.n == maxFlateBlockTokens { 477 // The block includes the current character 478 if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { 479 return 480 } 481 d.tokens.Reset() 482 } 483 } else { 484 // Reset, if we got a match this run. 485 if s.length >= minMatchLength { 486 s.ii = 0 487 } 488 // We have a byte waiting. Emit it. 489 if d.byteAvailable { 490 s.ii++ 491 d.tokens.AddLiteral(d.window[s.index-1]) 492 if d.tokens.n == maxFlateBlockTokens { 493 if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { 494 return 495 } 496 d.tokens.Reset() 497 } 498 s.index++ 499 500 // If we have a long run of no matches, skip additional bytes 501 // Resets when s.ii overflows after 64KB. 502 if s.ii > 31 { 503 n := int(s.ii >> 5) 504 for j := 0; j < n; j++ { 505 if s.index >= d.windowEnd-1 { 506 break 507 } 508 509 d.tokens.AddLiteral(d.window[s.index-1]) 510 if d.tokens.n == maxFlateBlockTokens { 511 if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { 512 return 513 } 514 d.tokens.Reset() 515 } 516 s.index++ 517 } 518 // Flush last byte 519 d.tokens.AddLiteral(d.window[s.index-1]) 520 d.byteAvailable = false 521 // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength 522 if d.tokens.n == maxFlateBlockTokens { 523 if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { 524 return 525 } 526 d.tokens.Reset() 527 } 528 } 529 } else { 530 s.index++ 531 d.byteAvailable = true 532 } 533 } 534 } 535} 536 537func (d *compressor) store() { 538 if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { 539 d.err = d.writeStoredBlock(d.window[:d.windowEnd]) 540 d.windowEnd = 0 541 } 542} 543 544// fillWindow will fill the buffer with data for huffman-only compression. 545// The number of bytes copied is returned. 546func (d *compressor) fillBlock(b []byte) int { 547 n := copy(d.window[d.windowEnd:], b) 548 d.windowEnd += n 549 return n 550} 551 552// storeHuff will compress and store the currently added data, 553// if enough has been accumulated or we at the end of the stream. 554// Any error that occurred will be in d.err 555func (d *compressor) storeHuff() { 556 if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { 557 return 558 } 559 d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) 560 d.err = d.w.err 561 d.windowEnd = 0 562} 563 564// storeFast will compress and store the currently added data, 565// if enough has been accumulated or we at the end of the stream. 566// Any error that occurred will be in d.err 567func (d *compressor) storeFast() { 568 // We only compress if we have maxStoreBlockSize. 569 if d.windowEnd < len(d.window) { 570 if !d.sync { 571 return 572 } 573 // Handle extremely small sizes. 574 if d.windowEnd < 128 { 575 if d.windowEnd == 0 { 576 return 577 } 578 if d.windowEnd <= 32 { 579 d.err = d.writeStoredBlock(d.window[:d.windowEnd]) 580 } else { 581 d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) 582 d.err = d.w.err 583 } 584 d.tokens.Reset() 585 d.windowEnd = 0 586 d.fast.Reset() 587 return 588 } 589 } 590 591 d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) 592 // If we made zero matches, store the block as is. 593 if d.tokens.n == 0 { 594 d.err = d.writeStoredBlock(d.window[:d.windowEnd]) 595 // If we removed less than 1/16th, huffman compress the block. 596 } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { 597 d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) 598 d.err = d.w.err 599 } else { 600 d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) 601 d.err = d.w.err 602 } 603 d.tokens.Reset() 604 d.windowEnd = 0 605} 606 607// write will add input byte to the stream. 608// Unless an error occurs all bytes will be consumed. 609func (d *compressor) write(b []byte) (n int, err error) { 610 if d.err != nil { 611 return 0, d.err 612 } 613 n = len(b) 614 for len(b) > 0 { 615 d.step(d) 616 b = b[d.fill(d, b):] 617 if d.err != nil { 618 return 0, d.err 619 } 620 } 621 return n, d.err 622} 623 624func (d *compressor) syncFlush() error { 625 d.sync = true 626 if d.err != nil { 627 return d.err 628 } 629 d.step(d) 630 if d.err == nil { 631 d.w.writeStoredHeader(0, false) 632 d.w.flush() 633 d.err = d.w.err 634 } 635 d.sync = false 636 return d.err 637} 638 639func (d *compressor) init(w io.Writer, level int) (err error) { 640 d.w = newHuffmanBitWriter(w) 641 642 switch { 643 case level == NoCompression: 644 d.window = make([]byte, maxStoreBlockSize) 645 d.fill = (*compressor).fillBlock 646 d.step = (*compressor).store 647 case level == ConstantCompression: 648 d.w.logNewTablePenalty = 4 649 d.window = make([]byte, maxStoreBlockSize) 650 d.fill = (*compressor).fillBlock 651 d.step = (*compressor).storeHuff 652 case level == DefaultCompression: 653 level = 5 654 fallthrough 655 case level >= 1 && level <= 6: 656 d.w.logNewTablePenalty = 6 657 d.fast = newFastEnc(level) 658 d.window = make([]byte, maxStoreBlockSize) 659 d.fill = (*compressor).fillBlock 660 d.step = (*compressor).storeFast 661 case 7 <= level && level <= 9: 662 d.w.logNewTablePenalty = 10 663 d.state = &advancedState{} 664 d.compressionLevel = levels[level] 665 d.initDeflate() 666 d.fill = (*compressor).fillDeflate 667 d.step = (*compressor).deflateLazy 668 default: 669 return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) 670 } 671 d.level = level 672 return nil 673} 674 675// reset the state of the compressor. 676func (d *compressor) reset(w io.Writer) { 677 d.w.reset(w) 678 d.sync = false 679 d.err = nil 680 // We only need to reset a few things for Snappy. 681 if d.fast != nil { 682 d.fast.Reset() 683 d.windowEnd = 0 684 d.tokens.Reset() 685 return 686 } 687 switch d.compressionLevel.chain { 688 case 0: 689 // level was NoCompression or ConstantCompresssion. 690 d.windowEnd = 0 691 default: 692 s := d.state 693 s.chainHead = -1 694 for i := range s.hashHead { 695 s.hashHead[i] = 0 696 } 697 for i := range s.hashPrev { 698 s.hashPrev[i] = 0 699 } 700 s.hashOffset = 1 701 s.index, d.windowEnd = 0, 0 702 d.blockStart, d.byteAvailable = 0, false 703 d.tokens.Reset() 704 s.length = minMatchLength - 1 705 s.offset = 0 706 s.hash = 0 707 s.ii = 0 708 s.maxInsertIndex = 0 709 } 710} 711 712func (d *compressor) close() error { 713 if d.err != nil { 714 return d.err 715 } 716 d.sync = true 717 d.step(d) 718 if d.err != nil { 719 return d.err 720 } 721 if d.w.writeStoredHeader(0, true); d.w.err != nil { 722 return d.w.err 723 } 724 d.w.flush() 725 d.w.reset(nil) 726 return d.w.err 727} 728 729// NewWriter returns a new Writer compressing data at the given level. 730// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); 731// higher levels typically run slower but compress more. 732// Level 0 (NoCompression) does not attempt any compression; it only adds the 733// necessary DEFLATE framing. 734// Level -1 (DefaultCompression) uses the default compression level. 735// Level -2 (ConstantCompression) will use Huffman compression only, giving 736// a very fast compression for all types of input, but sacrificing considerable 737// compression efficiency. 738// 739// If level is in the range [-2, 9] then the error returned will be nil. 740// Otherwise the error returned will be non-nil. 741func NewWriter(w io.Writer, level int) (*Writer, error) { 742 var dw Writer 743 if err := dw.d.init(w, level); err != nil { 744 return nil, err 745 } 746 return &dw, nil 747} 748 749// NewWriterDict is like NewWriter but initializes the new 750// Writer with a preset dictionary. The returned Writer behaves 751// as if the dictionary had been written to it without producing 752// any compressed output. The compressed data written to w 753// can only be decompressed by a Reader initialized with the 754// same dictionary. 755func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { 756 zw, err := NewWriter(w, level) 757 if err != nil { 758 return nil, err 759 } 760 zw.d.fillWindow(dict) 761 zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. 762 return zw, err 763} 764 765// A Writer takes data written to it and writes the compressed 766// form of that data to an underlying writer (see NewWriter). 767type Writer struct { 768 d compressor 769 dict []byte 770} 771 772// Write writes data to w, which will eventually write the 773// compressed form of data to its underlying writer. 774func (w *Writer) Write(data []byte) (n int, err error) { 775 return w.d.write(data) 776} 777 778// Flush flushes any pending data to the underlying writer. 779// It is useful mainly in compressed network protocols, to ensure that 780// a remote reader has enough data to reconstruct a packet. 781// Flush does not return until the data has been written. 782// Calling Flush when there is no pending data still causes the Writer 783// to emit a sync marker of at least 4 bytes. 784// If the underlying writer returns an error, Flush returns that error. 785// 786// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. 787func (w *Writer) Flush() error { 788 // For more about flushing: 789 // http://www.bolet.org/~pornin/deflate-flush.html 790 return w.d.syncFlush() 791} 792 793// Close flushes and closes the writer. 794func (w *Writer) Close() error { 795 return w.d.close() 796} 797 798// Reset discards the writer's state and makes it equivalent to 799// the result of NewWriter or NewWriterDict called with dst 800// and w's level and dictionary. 801func (w *Writer) Reset(dst io.Writer) { 802 if len(w.dict) > 0 { 803 // w was created with NewWriterDict 804 w.d.reset(dst) 805 if dst != nil { 806 w.d.fillWindow(w.dict) 807 } 808 } else { 809 // w was created with NewWriter 810 w.d.reset(dst) 811 } 812} 813 814// ResetDict discards the writer's state and makes it equivalent to 815// the result of NewWriter or NewWriterDict called with dst 816// and w's level, but sets a specific dictionary. 817func (w *Writer) ResetDict(dst io.Writer, dict []byte) { 818 w.dict = dict 819 w.d.reset(dst) 820 w.d.fillWindow(w.dict) 821} 822