aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/klauspost/compress/flate
diff options
context:
space:
mode:
authoruzhas <uzhas@ydb.tech>2023-11-16 16:04:50 +0300
committeruzhas <uzhas@ydb.tech>2023-11-16 17:46:46 +0300
commit46f0c0079bb50609d2eeb6586642bcf114fc5239 (patch)
tree84e4e4978d57fe5de321ba69bf9d0c290de60a66 /vendor/github.com/klauspost/compress/flate
parent73045e389397816cc2bdd6cd7818b4bce427b265 (diff)
downloadydb-46f0c0079bb50609d2eeb6586642bcf114fc5239.tar.gz
enable ya make for go projects
Diffstat (limited to 'vendor/github.com/klauspost/compress/flate')
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go988
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate_test.go665
-rw-r--r--vendor/github.com/klauspost/compress/flate/dict_decoder.go184
-rw-r--r--vendor/github.com/klauspost/compress/flate/dict_decoder_test.go139
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go216
-rw-r--r--vendor/github.com/klauspost/compress/flate/flate_test.go366
-rw-r--r--vendor/github.com/klauspost/compress/flate/fuzz_test.go128
-rw-r--r--vendor/github.com/klauspost/compress/flate/gotest/ya.make16
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go1182
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go381
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go417
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go159
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go201
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go793
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate_gen.go1283
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate_test.go281
-rw-r--r--vendor/github.com/klauspost/compress/flate/level1.go241
-rw-r--r--vendor/github.com/klauspost/compress/flate/level2.go214
-rw-r--r--vendor/github.com/klauspost/compress/flate/level3.go241
-rw-r--r--vendor/github.com/klauspost/compress/flate/level4.go221
-rw-r--r--vendor/github.com/klauspost/compress/flate/level5.go310
-rw-r--r--vendor/github.com/klauspost/compress/flate/level6.go325
-rw-r--r--vendor/github.com/klauspost/compress/flate/reader_test.go106
-rw-r--r--vendor/github.com/klauspost/compress/flate/regmask_amd64.go37
-rw-r--r--vendor/github.com/klauspost/compress/flate/regmask_other.go40
-rw-r--r--vendor/github.com/klauspost/compress/flate/stateless.go318
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/fuzz/FuzzEncoding.zipbin0 -> 1213291 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/fuzz/encode-raw-corpus.zipbin0 -> 683330 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expectbin0 -> 79 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinputbin0 -> 79 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.goldenbin0 -> 8204 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.inbin0 -> 65535 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expectbin0 -> 78 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect-noinputbin0 -> 78 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expectbin0 -> 78 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinputbin0 -> 78 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expectbin0 -> 1698 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinputbin0 -> 1698 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.goldenbin0 -> 1606 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expectbin0 -> 1696 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect-noinputbin0 -> 1696 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expectbin0 -> 1696 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinputbin0 -> 1696 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expectbin0 -> 1005 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinputbin0 -> 1052 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.goldenbin0 -> 1005 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.inbin0 -> 1000 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expectbin0 -> 1005 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect-noinputbin0 -> 1054 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expectbin0 -> 1005 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinputbin0 -> 1054 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expectbin0 -> 186 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinputbin0 -> 186 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.goldenbin0 -> 246 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in4
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expectbin0 -> 186 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect-noinputbin0 -> 186 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expectbin0 -> 186 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinputbin0 -> 186 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.goldenbin0 -> 65540 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.inbin0 -> 65535 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expectbin0 -> 33 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinputbin0 -> 33 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.goldenbin0 -> 1812 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in2
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expectbin0 -> 32 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect-noinputbin0 -> 32 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expectbin0 -> 32 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinputbin0 -> 32 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.goldenbin0 -> 231 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in14
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expectbin0 -> 231 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect-noinputbin0 -> 231 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expectbin0 -> 231 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinputbin0 -> 231 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect4
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput4
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden3
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in13
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect-noinput1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expectbin0 -> 6 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinputbin0 -> 6 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.goldenbin0 -> 51 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in1
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expectbin0 -> 6 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect-noinputbin0 -> 6 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expectbin0 -> 6 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinputbin0 -> 6 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinputbin0 -> 208 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/null-long-match.sync.expect-noinputbin0 -> 206 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinputbin0 -> 206 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/regression.zipbin0 -> 483763 bytes
-rw-r--r--vendor/github.com/klauspost/compress/flate/testdata/tokens.bin63
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go379
-rw-r--r--vendor/github.com/klauspost/compress/flate/token_test.go54
-rw-r--r--vendor/github.com/klauspost/compress/flate/writer_test.go541
-rw-r--r--vendor/github.com/klauspost/compress/flate/ya.make51
103 files changed, 10592 insertions, 0 deletions
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 0000000000..5faea0b2b3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,988 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+ ConstantCompression = HuffmanOnly // compatibility alias.
+
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+ logMaxOffsetSize = 15 // Standard DEFLATE
+ minMatchLength = 4 // The smallest match that the compressor looks for
+ maxMatchLength = 258 // The longest match for the compressor
+ minOffsetSize = 1 // The shortest offset that makes any sense
+
+ // The maximum number of tokens we will encode at the time.
+ // Smaller sizes usually creates less optimal blocks.
+ // Bigger can make context switching slow.
+ // We use this for levels 7-9, so we make it big.
+ maxFlateBlockTokens = 1 << 15
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ hashShift = (hashBits + minMatchLength - 1) / minMatchLength
+ maxHashOffset = 1 << 28
+
+ skipNever = math.MaxInt32
+
+ debugDeflate = false
+)
+
+type compressionLevel struct {
+ good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+ {}, // 0
+ // Level 1-6 uses specialized algorithm - values not used
+ {0, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 2},
+ {0, 0, 0, 0, 0, 3},
+ {0, 0, 0, 0, 0, 4},
+ {0, 0, 0, 0, 0, 5},
+ {0, 0, 0, 0, 0, 6},
+ // Levels 7-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {8, 12, 16, 24, skipNever, 7},
+ {16, 30, 40, 64, skipNever, 8},
+ {32, 258, 258, 1024, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+ // deflate state
+ length int
+ offset int
+ maxInsertIndex int
+ chainHead int
+ hashOffset int
+
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ hashMatch [maxMatchLength + minMatchLength]uint32
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+}
+
+type compressor struct {
+ compressionLevel
+
+ h *huffmanEncoder
+ w *huffmanBitWriter
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ err error
+
+ // queued output tokens
+ tokens tokens
+ fast fastEnc
+ state *advancedState
+
+ sync bool // requesting flush
+ byteAvailable bool // if true, still need to process window[index-1].
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ s := d.state
+ if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ //copy(d.window[:], d.window[windowSize:2*windowSize])
+ *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
+ s.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ s.hashOffset += windowSize
+ if s.hashOffset > maxHashOffset {
+ delta := s.hashOffset - 1
+ s.hashOffset -= delta
+ s.chainHead -= delta
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range s.hashPrev[:] {
+ if int(v) > delta {
+ s.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ s.hashPrev[i] = 0
+ }
+ }
+ for i, v := range s.hashHead[:] {
+ if int(v) > delta {
+ s.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ s.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ //d.w.writeBlock(tok, eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ return d.w.err
+ }
+ return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ if d.blockStart <= index {
+ window := d.window[d.blockStart:index]
+ // If we removed less than a 64th of all literals
+ // we huffman compress the block.
+ if int(tok.n) > len(window)-int(tok.n>>6) {
+ d.w.writeBlockHuff(eof, window, d.sync)
+ } else {
+ // Write a dynamic huffman block.
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ }
+ } else {
+ d.w.writeBlock(tok, eof, nil)
+ }
+ d.blockStart = index
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only or huffman mode.
+ if d.level <= 0 {
+ return
+ }
+ if d.fast != nil {
+ // encode the last data, but discard the result
+ if len(b) > maxMatchOffset {
+ b = b[len(b)-maxMatchOffset:]
+ }
+ d.fast.Encode(&d.tokens, b)
+ d.tokens.Reset()
+ return
+ }
+ s := d.state
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window[d.windowEnd:], b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ startindex := j * 256
+ end := startindex + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ }
+ // Update window information.
+ d.windowEnd += n
+ s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = minMatchLength - 1
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+ offset = 0
+
+ if d.chain < 100 {
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+ }
+
+ // Minimum gain to accept a match.
+ cGain := 4
+
+ // Some like it higher (CSV), some like it lower (JSON)
+ const baseCost = 3
+ // Base is 4 bytes at with an additional cost.
+ // Matches must be better than this.
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ // Calculate gain. Estimate
+ newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+ //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
+ if newGain > cGain {
+ length = n
+ offset = pos - i
+ cGain = newGain
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ return hash4u(binary.LittleEndian.Uint32(b), hashBits)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> (32 - h)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < 4 {
+ return
+ }
+ hb := binary.LittleEndian.Uint32(b)
+
+ dst[0] = hash4u(hb, hashBits)
+ end := len(b) - 4 + 1
+ for i := 1; i < end; i++ {
+ hb = (hb >> 8) | uint32(b[i+3])<<24
+ dst[i] = hash4u(hb, hashBits)
+ }
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.byteAvailable = false
+ d.err = nil
+ if d.state == nil {
+ return
+ }
+ s := d.state
+ s.index = 0
+ s.hashOffset = 1
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+ s := d.state
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = debugDeflate
+
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+ if d.windowEnd != s.index && d.chain > 100 {
+ // Get literal huffman coder.
+ if d.h == nil {
+ d.h = newHuffmanEncoder(maxFlateBlockTokens)
+ }
+ var tmp [256]uint16
+ for _, v := range d.window[s.index:d.windowEnd] {
+ tmp[v]++
+ }
+ d.h.generate(tmp[:], 15)
+ }
+
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+
+ for {
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - s.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ return
+ }
+ }
+ if s.index < s.maxInsertIndex {
+ // Update the hash
+ hash := hash4(d.window[s.index:])
+ ch := s.hashHead[hash]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[hash] = uint32(s.index + s.hashOffset)
+ }
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
+ }
+ }
+
+ if prevLength >= minMatchLength && s.length <= prevLength {
+ // No better match, but check for better match at end...
+ //
+ // Skip forward a number of bytes.
+ // Offset of 2 seems to yield best results. 3 is sometimes better.
+ const checkOff = 2
+
+ // Check all, except full length
+ if prevLength < maxMatchLength-checkOff {
+ prevIndex := s.index - 1
+ if prevIndex+prevLength < s.maxInsertIndex {
+ end := lookahead
+ if lookahead > maxMatchLength+checkOff {
+ end = maxMatchLength + checkOff
+ }
+ end += prevIndex
+
+ // Hash at match end.
+ h := hash4(d.window[prevIndex+prevLength:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+ if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+ length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+ // It seems like a pure length metric is best.
+ if length > prevLength {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+
+ // Extend back...
+ for i := checkOff - 1; i >= 0; i-- {
+ if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
+ // Emit tokens we "owe"
+ for j := 0; j <= i; j++ {
+ d.tokens.AddLiteral(d.window[prevIndex+j])
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ }
+ break
+ } else {
+ prevLength++
+ }
+ }
+ } else if false {
+ // Check one further ahead.
+ // Only rarely better, disabled for now.
+ prevIndex++
+ h := hash4(d.window[prevIndex+prevLength:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+ if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+ length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+ // It seems like a pure length metric is best.
+ if length > prevLength+checkOff {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+ prevIndex--
+
+ // Extend back...
+ for i := checkOff; i >= 0; i-- {
+ if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
+ // Emit tokens we "owe"
+ for j := 0; j <= i; j++ {
+ d.tokens.AddLiteral(d.window[prevIndex+j])
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ }
+ break
+ } else {
+ prevLength++
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ newIndex := s.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ }
+
+ s.index = newIndex
+ d.byteAvailable = false
+ s.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.ii = 0
+ } else {
+ // Reset, if we got a match this run.
+ if s.length >= minMatchLength {
+ s.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ s.ii++
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when s.ii overflows after 64KB.
+ if n := int(s.ii) - d.chain; n > 0 {
+ n = 1 + int(n>>6)
+ for j := 0; j < n; j++ {
+ if s.index >= d.windowEnd-1 {
+ break
+ }
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ // Index...
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ s.index++
+ }
+ // Flush last byte
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ }
+ } else {
+ s.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < len(d.window) {
+ if !d.sync {
+ return
+ }
+ // Handle extremely small sizes.
+ if d.windowEnd < 128 {
+ if d.windowEnd == 0 {
+ return
+ }
+ if d.windowEnd <= 32 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ } else {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+ d.fast.Reset()
+ return
+ }
+ }
+
+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+ // If we made zero matches, store the block as is.
+ if d.tokens.n == 0 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ // If we removed less than 1/16th, huffman compress the block.
+ } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ } else {
+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ if d.windowEnd == len(d.window) || d.sync {
+ d.step(d)
+ }
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+ d.sync = true
+ if d.err != nil {
+ return d.err
+ }
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).store
+ case level == ConstantCompression:
+ d.w.logNewTablePenalty = 10
+ d.window = make([]byte, 32<<10)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeHuff
+ case level == DefaultCompression:
+ level = 5
+ fallthrough
+ case level >= 1 && level <= 6:
+ d.w.logNewTablePenalty = 7
+ d.fast = newFastEnc(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ case 7 <= level && level <= 9:
+ d.w.logNewTablePenalty = 8
+ d.state = &advancedState{}
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ d.step = (*compressor).deflateLazy
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ d.level = level
+ return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ // We only need to reset a few things for Snappy.
+ if d.fast != nil {
+ d.fast.Reset()
+ d.windowEnd = 0
+ d.tokens.Reset()
+ return
+ }
+ switch d.compressionLevel.chain {
+ case 0:
+ // level was NoCompression or ConstantCompresssion.
+ d.windowEnd = 0
+ default:
+ s := d.state
+ s.chainHead = -1
+ for i := range s.hashHead {
+ s.hashHead[i] = 0
+ }
+ for i := range s.hashPrev {
+ s.hashPrev[i] = 0
+ }
+ s.hashOffset = 1
+ s.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens.Reset()
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.ii = 0
+ s.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ d.w.reset(nil)
+ return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ zw, err := NewWriter(w, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // http://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if len(w.dict) > 0 {
+ // w was created with NewWriterDict
+ w.d.reset(dst)
+ if dst != nil {
+ w.d.fillWindow(w.dict)
+ }
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+ w.dict = dict
+ w.d.reset(dst)
+ w.d.fillWindow(w.dict)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/deflate_test.go b/vendor/github.com/klauspost/compress/flate/deflate_test.go
new file mode 100644
index 0000000000..f9584ceb3a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate_test.go
@@ -0,0 +1,665 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "testing"
+)
+
+type deflateTest struct {
+ in []byte
+ level int
+ out []byte
+}
+
+type deflateInflateTest struct {
+ in []byte
+}
+
+type reverseBitsTest struct {
+ in uint16
+ bitCount uint8
+ out uint16
+}
+
+var deflateTests = []*deflateTest{
+ 0: {[]byte{}, 0, []byte{0x3, 0x0}},
+ 1: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}},
+ 2: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}},
+ 3: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}},
+
+ 4: {[]byte{0x11}, 0, []byte{0x0, 0x1, 0x0, 0xfe, 0xff, 0x11, 0x3, 0x0}},
+ 5: {[]byte{0x11, 0x12}, 0, []byte{0x0, 0x2, 0x0, 0xfd, 0xff, 0x11, 0x12, 0x3, 0x0}},
+ 6: {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0,
+ []byte{0x0, 0x8, 0x0, 0xf7, 0xff, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x3, 0x0},
+ },
+ 7: {[]byte{}, 1, []byte{0x3, 0x0}},
+ 8: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}},
+ 9: {[]byte{0x11, 0x12}, BestCompression, []byte{0x12, 0x14, 0x2, 0xc, 0x0}},
+ 10: {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, BestCompression, []byte{0x12, 0x84, 0x1, 0xc0, 0x0}},
+ 11: {[]byte{}, 9, []byte{0x3, 0x0}},
+ 12: {[]byte{0x11}, 9, []byte{0x12, 0x4, 0xc, 0x0}},
+ 13: {[]byte{0x11, 0x12}, 9, []byte{0x12, 0x14, 0x2, 0xc, 0x0}},
+ 14: {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 9, []byte{0x12, 0x84, 0x1, 0xc0, 0x0}},
+}
+
+var deflateInflateTests = []*deflateInflateTest{
+ {[]byte{}},
+ {[]byte{0x11}},
+ {[]byte{0x11, 0x12}},
+ {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}},
+ {[]byte{0x11, 0x10, 0x13, 0x41, 0x21, 0x21, 0x41, 0x13, 0x87, 0x78, 0x13}},
+ {largeDataChunk()},
+}
+
+var reverseBitsTests = []*reverseBitsTest{
+ {1, 1, 1},
+ {1, 2, 2},
+ {1, 3, 4},
+ {1, 4, 8},
+ {1, 5, 16},
+ {17, 5, 17},
+ {257, 9, 257},
+ {29, 5, 23},
+}
+
+func largeDataChunk() []byte {
+ result := make([]byte, 100000)
+ for i := range result {
+ result[i] = byte(i * i & 0xFF)
+ }
+ return result
+}
+
+func TestBulkHash4(t *testing.T) {
+ for _, x := range deflateTests {
+ y := x.out
+ if len(y) >= minMatchLength {
+ y = append(y, y...)
+ for j := 4; j < len(y); j++ {
+ y := y[:j]
+ dst := make([]uint32, len(y)-minMatchLength+1)
+ for i := range dst {
+ dst[i] = uint32(i + 100)
+ }
+ bulkHash4(y, dst)
+ for i, val := range dst {
+ got := val
+ expect := hash4(y[i:])
+ if got != expect && got == uint32(i)+100 {
+ t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, expect)
+ } else if got != expect {
+ t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect)
+ } else {
+ //t.Logf("Len:%d Index:%d OK (0x%08x)", len(y), i, got)
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestDeflate(t *testing.T) {
+ for i, h := range deflateTests {
+ var buf bytes.Buffer
+ w, err := NewWriter(&buf, h.level)
+ if err != nil {
+ t.Errorf("NewWriter: %v", err)
+ continue
+ }
+ w.Write(h.in)
+ w.Close()
+ if !bytes.Equal(buf.Bytes(), h.out) {
+ t.Errorf("%d: Deflate(%d, %x) got \n%#v, want \n%#v", i, h.level, h.in, buf.Bytes(), h.out)
+ }
+ }
+}
+
+// A sparseReader returns a stream consisting of 0s followed by 1<<16 1s.
+// This tests missing hash references in a very large input.
+type sparseReader struct {
+ l int64
+ cur int64
+}
+
+func (r *sparseReader) Read(b []byte) (n int, err error) {
+ if r.cur >= r.l {
+ return 0, io.EOF
+ }
+ n = len(b)
+ cur := r.cur + int64(n)
+ if cur > r.l {
+ n -= int(cur - r.l)
+ cur = r.l
+ }
+ for i := range b[0:n] {
+ if r.cur+int64(i) >= r.l-1<<16 {
+ b[i] = 1
+ } else {
+ b[i] = 0
+ }
+ }
+ r.cur = cur
+ return
+}
+
+func TestVeryLongSparseChunk(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping sparse chunk during short test")
+ }
+ var buf bytes.Buffer
+ w, err := NewWriter(&buf, 1)
+ if err != nil {
+ t.Errorf("NewWriter: %v", err)
+ return
+ }
+ if _, err = io.Copy(w, &sparseReader{l: 23e8}); err != nil {
+ t.Errorf("Compress failed: %v", err)
+ return
+ }
+ t.Log("Length:", buf.Len())
+}
+
+type syncBuffer struct {
+ buf bytes.Buffer
+ mu sync.RWMutex
+ closed bool
+ ready chan bool
+}
+
+func newSyncBuffer() *syncBuffer {
+ return &syncBuffer{ready: make(chan bool, 1)}
+}
+
+func (b *syncBuffer) Read(p []byte) (n int, err error) {
+ for {
+ b.mu.RLock()
+ n, err = b.buf.Read(p)
+ b.mu.RUnlock()
+ if n > 0 || b.closed {
+ return
+ }
+ <-b.ready
+ }
+}
+
+func (b *syncBuffer) signal() {
+ select {
+ case b.ready <- true:
+ default:
+ }
+}
+
+func (b *syncBuffer) Write(p []byte) (n int, err error) {
+ n, err = b.buf.Write(p)
+ b.signal()
+ return
+}
+
+func (b *syncBuffer) WriteMode() {
+ b.mu.Lock()
+}
+
+func (b *syncBuffer) ReadMode() {
+ b.mu.Unlock()
+ b.signal()
+}
+
+func (b *syncBuffer) Close() error {
+ b.closed = true
+ b.signal()
+ return nil
+}
+
+func testSync(t *testing.T, level int, input []byte, name string) {
+ if len(input) == 0 {
+ return
+ }
+
+ t.Logf("--testSync %d, %d, %s", level, len(input), name)
+ buf := newSyncBuffer()
+ buf1 := new(bytes.Buffer)
+ buf.WriteMode()
+ w, err := NewWriter(io.MultiWriter(buf, buf1), level)
+ if err != nil {
+ t.Errorf("NewWriter: %v", err)
+ return
+ }
+ r := NewReader(buf)
+
+ // Write half the input and read back.
+ for i := 0; i < 2; i++ {
+ var lo, hi int
+ if i == 0 {
+ lo, hi = 0, (len(input)+1)/2
+ } else {
+ lo, hi = (len(input)+1)/2, len(input)
+ }
+ t.Logf("#%d: write %d-%d", i, lo, hi)
+ if _, err := w.Write(input[lo:hi]); err != nil {
+ t.Errorf("testSync: write: %v", err)
+ return
+ }
+ if i == 0 {
+ if err := w.Flush(); err != nil {
+ t.Errorf("testSync: flush: %v", err)
+ return
+ }
+ } else {
+ if err := w.Close(); err != nil {
+ t.Errorf("testSync: close: %v", err)
+ }
+ }
+ buf.ReadMode()
+ out := make([]byte, hi-lo+1)
+ m, err := io.ReadAtLeast(r, out, hi-lo)
+ t.Logf("#%d: read %d", i, m)
+ if m != hi-lo || err != nil {
+ t.Errorf("testSync/%d (%d, %d, %s): read %d: %d, %v (%d left)", i, level, len(input), name, hi-lo, m, err, buf.buf.Len())
+ return
+ }
+ if !bytes.Equal(input[lo:hi], out[:hi-lo]) {
+ t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo])
+ return
+ }
+ // This test originally checked that after reading
+ // the first half of the input, there was nothing left
+ // in the read buffer (buf.buf.Len() != 0) but that is
+ // not necessarily the case: the write Flush may emit
+ // some extra framing bits that are not necessary
+ // to process to obtain the first half of the uncompressed
+ // data. The test ran correctly most of the time, because
+ // the background goroutine had usually read even
+ // those extra bits by now, but it's not a useful thing to
+ // check.
+ buf.WriteMode()
+ }
+ buf.ReadMode()
+ out := make([]byte, 10)
+ if n, err := r.Read(out); n > 0 || err != io.EOF {
+ t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n])
+ }
+ if buf.buf.Len() != 0 {
+ t.Errorf("testSync (%d, %d, %s): extra data at end", level, len(input), name)
+ }
+ r.Close()
+
+ // stream should work for ordinary reader too
+ r = NewReader(buf1)
+ out, err = io.ReadAll(r)
+ if err != nil {
+ t.Errorf("testSync: read: %s", err)
+ return
+ }
+ r.Close()
+ if !bytes.Equal(input, out) {
+ t.Errorf("testSync: decompress(compress(data)) != data: level=%d input=%s", level, name)
+ }
+}
+
+func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) {
+ var buffer bytes.Buffer
+ w, err := NewWriter(&buffer, level)
+ if err != nil {
+ t.Errorf("NewWriter: %v", err)
+ return
+ }
+ w.Write(input)
+ w.Close()
+ if limit > 0 {
+ t.Logf("level: %d - Size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len())
+ }
+ if limit > 0 && buffer.Len() > limit {
+ t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit)
+ }
+
+ r := NewReader(&buffer)
+ out, err := io.ReadAll(r)
+ if err != nil {
+ t.Errorf("read: %s", err)
+ return
+ }
+ r.Close()
+ if !bytes.Equal(input, out) {
+ os.WriteFile("testdata/fails/"+t.Name()+".got", out, os.ModePerm)
+ os.WriteFile("testdata/fails/"+t.Name()+".want", input, os.ModePerm)
+ t.Errorf("decompress(compress(data)) != data: level=%d input=%s", level, name)
+ return
+ }
+ testSync(t, level, input, name)
+}
+
+func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) {
+ for i := 0; i < 10; i++ {
+ testToFromWithLevelAndLimit(t, i, input, name, limit[i])
+ }
+ testToFromWithLevelAndLimit(t, -2, input, name, limit[10])
+}
+
+func TestDeflateInflate(t *testing.T) {
+ for i, h := range deflateInflateTests {
+ testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{})
+ }
+}
+
+func TestReverseBits(t *testing.T) {
+ for _, h := range reverseBitsTests {
+ if v := reverseBits(h.in, h.bitCount); v != h.out {
+ t.Errorf("reverseBits(%v,%v) = %v, want %v",
+ h.in, h.bitCount, v, h.out)
+ }
+ }
+}
+
+type deflateInflateStringTest struct {
+ filename string
+ label string
+ limit [11]int // Number 11 is ConstantCompression
+}
+
+var deflateInflateStringTests = []deflateInflateStringTest{
+ {
+ "../testdata/e.txt",
+ "2.718281828...",
+ [...]int{100018, 67900, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683 + 100},
+ },
+ {
+ "../testdata/Mark.Twain-Tom.Sawyer.txt",
+ "Mark.Twain-Tom.Sawyer",
+ [...]int{387999, 185000, 182361, 179974, 174124, 168819, 162936, 160506, 160295, 160295, 233460 + 100},
+ },
+}
+
+func TestDeflateInflateString(t *testing.T) {
+ for _, test := range deflateInflateStringTests {
+ gold, err := os.ReadFile(test.filename)
+ if err != nil {
+ t.Error(err)
+ }
+ // Remove returns that may be present on Windows
+ neutral := strings.Map(func(r rune) rune {
+ if r != '\r' {
+ return r
+ }
+ return -1
+ }, string(gold))
+
+ testToFromWithLimit(t, []byte(neutral), test.label, test.limit)
+
+ if testing.Short() {
+ break
+ }
+ }
+}
+
+func TestReaderDict(t *testing.T) {
+ const (
+ dict = "hello world"
+ text = "hello again world"
+ )
+ var b bytes.Buffer
+ w, err := NewWriter(&b, 5)
+ if err != nil {
+ t.Fatalf("NewWriter: %v", err)
+ }
+ w.Write([]byte(dict))
+ w.Flush()
+ b.Reset()
+ w.Write([]byte(text))
+ w.Close()
+
+ r := NewReaderDict(&b, []byte(dict))
+ data, err := io.ReadAll(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(data) != "hello again world" {
+ t.Fatalf("read returned %q want %q", string(data), text)
+ }
+}
+
+func TestWriterDict(t *testing.T) {
+ const (
+ dict = "hello world Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."
+ text = "hello world Lorem ipsum dolor sit amet"
+ )
+ // This test is sensitive to algorithm changes that skip
+ // data in favour of speed. Higher levels are less prone to this
+ // so we test level 4-9.
+ for l := 4; l < 9; l++ {
+ var b bytes.Buffer
+ w, err := NewWriter(&b, l)
+ if err != nil {
+ t.Fatalf("level %d, NewWriter: %v", l, err)
+ }
+ w.Write([]byte(dict))
+ w.Flush()
+ b.Reset()
+ w.Write([]byte(text))
+ w.Close()
+
+ var b1 bytes.Buffer
+ w, _ = NewWriterDict(&b1, l, []byte(dict))
+ w.Write([]byte(text))
+ w.Close()
+
+ if !bytes.Equal(b1.Bytes(), b.Bytes()) {
+ t.Errorf("level %d, writer wrote\n%v\n want\n%v", l, b1.Bytes(), b.Bytes())
+ }
+ }
+}
+
+// See http://code.google.com/p/go/issues/detail?id=2508
+func TestRegression2508(t *testing.T) {
+ if testing.Short() {
+ t.Logf("test disabled with -short")
+ return
+ }
+ w, err := NewWriter(io.Discard, 1)
+ if err != nil {
+ t.Fatalf("NewWriter: %v", err)
+ }
+ buf := make([]byte, 1024)
+ for i := 0; i < 131072; i++ {
+ if _, err := w.Write(buf); err != nil {
+ t.Fatalf("writer failed: %v", err)
+ }
+ }
+ w.Close()
+}
+
+func TestWriterReset(t *testing.T) {
+ for level := -2; level <= 9; level++ {
+ if level == -1 {
+ level++
+ }
+ if testing.Short() && level > 1 {
+ break
+ }
+ w, err := NewWriter(io.Discard, level)
+ if err != nil {
+ t.Fatalf("NewWriter: %v", err)
+ }
+ buf := []byte("hello world")
+ for i := 0; i < 1024; i++ {
+ w.Write(buf)
+ }
+ w.Reset(io.Discard)
+
+ wref, err := NewWriter(io.Discard, level)
+ if err != nil {
+ t.Fatalf("NewWriter: %v", err)
+ }
+
+ // DeepEqual doesn't compare functions.
+ w.d.fill, wref.d.fill = nil, nil
+ w.d.step, wref.d.step = nil, nil
+ w.d.state, wref.d.state = nil, nil
+ w.d.fast, wref.d.fast = nil, nil
+
+ // hashMatch is always overwritten when used.
+ if w.d.tokens.n != 0 {
+ t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, w.d.tokens.n)
+ }
+ // As long as the length is 0, we don't care about the content.
+ w.d.tokens = wref.d.tokens
+
+ // We don't care if there are values in the window, as long as it is at d.index is 0
+ w.d.window = wref.d.window
+ if !reflect.DeepEqual(w, wref) {
+ t.Errorf("level %d Writer not reset after Reset", level)
+ }
+ }
+
+ for i := HuffmanOnly; i <= BestCompression; i++ {
+ testResetOutput(t, fmt.Sprint("level-", i), func(w io.Writer) (*Writer, error) { return NewWriter(w, i) })
+ }
+ dict := []byte(strings.Repeat("we are the world - how are you?", 3))
+ for i := HuffmanOnly; i <= BestCompression; i++ {
+ testResetOutput(t, fmt.Sprint("dict-level-", i), func(w io.Writer) (*Writer, error) { return NewWriterDict(w, i, dict) })
+ }
+ for i := HuffmanOnly; i <= BestCompression; i++ {
+ testResetOutput(t, fmt.Sprint("dict-reset-level-", i), func(w io.Writer) (*Writer, error) {
+ w2, err := NewWriter(nil, i)
+ if err != nil {
+ return w2, err
+ }
+ w2.ResetDict(w, dict)
+ return w2, nil
+ })
+ }
+}
+
+func testResetOutput(t *testing.T, name string, newWriter func(w io.Writer) (*Writer, error)) {
+ t.Run(name, func(t *testing.T) {
+ buf := new(bytes.Buffer)
+ w, err := newWriter(buf)
+ if err != nil {
+ t.Fatalf("NewWriter: %v", err)
+ }
+ b := []byte("hello world - how are you doing?")
+ for i := 0; i < 1024; i++ {
+ w.Write(b)
+ }
+ w.Close()
+ out1 := buf.Bytes()
+
+ buf2 := new(bytes.Buffer)
+ w.Reset(buf2)
+ for i := 0; i < 1024; i++ {
+ w.Write(b)
+ }
+ w.Close()
+ out2 := buf2.Bytes()
+
+ if len(out1) != len(out2) {
+ t.Errorf("got %d, expected %d bytes", len(out2), len(out1))
+ }
+ if !bytes.Equal(out1, out2) {
+ mm := 0
+ for i, b := range out1[:len(out2)] {
+ if b != out2[i] {
+ t.Errorf("mismatch index %d: %02x, expected %02x", i, out2[i], b)
+ }
+ mm++
+ if mm == 10 {
+ t.Fatal("Stopping")
+ }
+ }
+ }
+ t.Logf("got %d bytes", len(out1))
+ })
+}
+
+// TestBestSpeed tests that round-tripping through deflate and then inflate
+// recovers the original input. The Write sizes are near the thresholds in the
+// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize
+// (65535).
+func TestBestSpeed(t *testing.T) {
+ abc := make([]byte, 128)
+ for i := range abc {
+ abc[i] = byte(i)
+ }
+ abcabc := bytes.Repeat(abc, 131072/len(abc))
+ var want []byte
+
+ testCases := [][]int{
+ {65536, 0},
+ {65536, 1},
+ {65536, 1, 256},
+ {65536, 1, 65536},
+ {65536, 14},
+ {65536, 15},
+ {65536, 16},
+ {65536, 16, 256},
+ {65536, 16, 65536},
+ {65536, 127},
+ {65536, 128},
+ {65536, 128, 256},
+ {65536, 128, 65536},
+ {65536, 129},
+ {65536, 65536, 256},
+ {65536, 65536, 65536},
+ }
+
+ for i, tc := range testCases {
+ if testing.Short() && i > 5 {
+ t.Skip()
+ }
+ for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} {
+ tc[0] = firstN
+ outer:
+ for _, flush := range []bool{false, true} {
+ buf := new(bytes.Buffer)
+ want = want[:0]
+
+ w, err := NewWriter(buf, BestSpeed)
+ if err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err)
+ continue
+ }
+ for _, n := range tc {
+ want = append(want, abcabc[:n]...)
+ if _, err := w.Write(abcabc[:n]); err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err)
+ continue outer
+ }
+ if !flush {
+ continue
+ }
+ if err := w.Flush(); err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err)
+ continue outer
+ }
+ }
+ if err := w.Close(); err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err)
+ continue
+ }
+
+ r := NewReader(buf)
+ got, err := io.ReadAll(r)
+ if err != nil {
+ t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err)
+ continue
+ }
+ r.Close()
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush)
+ continue
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 0000000000..bb36351a5a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+loop:
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ if dstPos < endPos {
+ goto loop // Avoid for-loop so that this function can be inlined
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go
new file mode 100644
index 0000000000..9275cff791
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go
@@ -0,0 +1,139 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestDictDecoder(t *testing.T) {
+ const (
+ abc = "ABC\n"
+ fox = "The quick brown fox jumped over the lazy dog!\n"
+ poem = "The Road Not Taken\nRobert Frost\n" +
+ "\n" +
+ "Two roads diverged in a yellow wood,\n" +
+ "And sorry I could not travel both\n" +
+ "And be one traveler, long I stood\n" +
+ "And looked down one as far as I could\n" +
+ "To where it bent in the undergrowth;\n" +
+ "\n" +
+ "Then took the other, as just as fair,\n" +
+ "And having perhaps the better claim,\n" +
+ "Because it was grassy and wanted wear;\n" +
+ "Though as for that the passing there\n" +
+ "Had worn them really about the same,\n" +
+ "\n" +
+ "And both that morning equally lay\n" +
+ "In leaves no step had trodden black.\n" +
+ "Oh, I kept the first for another day!\n" +
+ "Yet knowing how way leads on to way,\n" +
+ "I doubted if I should ever come back.\n" +
+ "\n" +
+ "I shall be telling this with a sigh\n" +
+ "Somewhere ages and ages hence:\n" +
+ "Two roads diverged in a wood, and I-\n" +
+ "I took the one less traveled by,\n" +
+ "And that has made all the difference.\n"
+ )
+
+ var poemRefs = []struct {
+ dist int // Backward distance (0 if this is an insertion)
+ length int // Length of copy or insertion
+ }{
+ {0, 38}, {33, 3}, {0, 48}, {79, 3}, {0, 11}, {34, 5}, {0, 6}, {23, 7},
+ {0, 8}, {50, 3}, {0, 2}, {69, 3}, {34, 5}, {0, 4}, {97, 3}, {0, 4},
+ {43, 5}, {0, 6}, {7, 4}, {88, 7}, {0, 12}, {80, 3}, {0, 2}, {141, 4},
+ {0, 1}, {196, 3}, {0, 3}, {157, 3}, {0, 6}, {181, 3}, {0, 2}, {23, 3},
+ {77, 3}, {28, 5}, {128, 3}, {110, 4}, {70, 3}, {0, 4}, {85, 6}, {0, 2},
+ {182, 6}, {0, 4}, {133, 3}, {0, 7}, {47, 5}, {0, 20}, {112, 5}, {0, 1},
+ {58, 3}, {0, 8}, {59, 3}, {0, 4}, {173, 3}, {0, 5}, {114, 3}, {0, 4},
+ {92, 5}, {0, 2}, {71, 3}, {0, 2}, {76, 5}, {0, 1}, {46, 3}, {96, 4},
+ {130, 4}, {0, 3}, {360, 3}, {0, 3}, {178, 5}, {0, 7}, {75, 3}, {0, 3},
+ {45, 6}, {0, 6}, {299, 6}, {180, 3}, {70, 6}, {0, 1}, {48, 3}, {66, 4},
+ {0, 3}, {47, 5}, {0, 9}, {325, 3}, {0, 1}, {359, 3}, {318, 3}, {0, 2},
+ {199, 3}, {0, 1}, {344, 3}, {0, 3}, {248, 3}, {0, 10}, {310, 3}, {0, 3},
+ {93, 6}, {0, 3}, {252, 3}, {157, 4}, {0, 2}, {273, 5}, {0, 14}, {99, 4},
+ {0, 1}, {464, 4}, {0, 2}, {92, 4}, {495, 3}, {0, 1}, {322, 4}, {16, 4},
+ {0, 3}, {402, 3}, {0, 2}, {237, 4}, {0, 2}, {432, 4}, {0, 1}, {483, 5},
+ {0, 2}, {294, 4}, {0, 2}, {306, 3}, {113, 5}, {0, 1}, {26, 4}, {164, 3},
+ {488, 4}, {0, 1}, {542, 3}, {248, 6}, {0, 5}, {205, 3}, {0, 8}, {48, 3},
+ {449, 6}, {0, 2}, {192, 3}, {328, 4}, {9, 5}, {433, 3}, {0, 3}, {622, 25},
+ {615, 5}, {46, 5}, {0, 2}, {104, 3}, {475, 10}, {549, 3}, {0, 4}, {597, 8},
+ {314, 3}, {0, 1}, {473, 6}, {317, 5}, {0, 1}, {400, 3}, {0, 3}, {109, 3},
+ {151, 3}, {48, 4}, {0, 4}, {125, 3}, {108, 3}, {0, 2},
+ }
+
+ var got, want bytes.Buffer
+ var dd dictDecoder
+ dd.init(1<<11, nil)
+
+ var writeCopy = func(dist, length int) {
+ for length > 0 {
+ cnt := dd.tryWriteCopy(dist, length)
+ if cnt == 0 {
+ cnt = dd.writeCopy(dist, length)
+ }
+
+ length -= cnt
+ if dd.availWrite() == 0 {
+ got.Write(dd.readFlush())
+ }
+ }
+ }
+ var writeString = func(str string) {
+ for len(str) > 0 {
+ cnt := copy(dd.writeSlice(), str)
+ str = str[cnt:]
+ dd.writeMark(cnt)
+ if dd.availWrite() == 0 {
+ got.Write(dd.readFlush())
+ }
+ }
+ }
+
+ writeString(".")
+ want.WriteByte('.')
+
+ str := poem
+ for _, ref := range poemRefs {
+ if ref.dist == 0 {
+ writeString(str[:ref.length])
+ } else {
+ writeCopy(ref.dist, ref.length)
+ }
+ str = str[ref.length:]
+ }
+ want.WriteString(poem)
+
+ writeCopy(dd.histSize(), 33)
+ want.Write(want.Bytes()[:33])
+
+ writeString(abc)
+ writeCopy(len(abc), 59*len(abc))
+ want.WriteString(strings.Repeat(abc, 60))
+
+ writeString(fox)
+ writeCopy(len(fox), 9*len(fox))
+ want.WriteString(strings.Repeat(fox, 10))
+
+ writeString(".")
+ writeCopy(1, 9)
+ want.WriteString(strings.Repeat(".", 10))
+
+ writeString(strings.ToUpper(poem))
+ writeCopy(len(poem), 7*len(poem))
+ want.WriteString(strings.Repeat(strings.ToUpper(poem), 8))
+
+ writeCopy(dd.histSize(), 10)
+ want.Write(want.Bytes()[want.Len()-dd.histSize():][:10])
+
+ got.Write(dd.readFlush())
+ if got.String() != want.String() {
+ t.Errorf("final string mismatch:\ngot %q\nwant %q", got.String(), want.String())
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 0000000000..24caf5f70b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,216 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
+
+type fastEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+ switch level {
+ case 1:
+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 2:
+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 3:
+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 4:
+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 5:
+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 6:
+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ bTableBits = 17 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load3232(b []byte, i int32) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+func load6432(b []byte, i int32) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+type tableEntry struct {
+ offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+ hist []byte
+ cur int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < maxMatchOffset*2 {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
+}
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+ switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
+ case 5:
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+ case 6:
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+ case 7:
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+ case 8:
+ return uint32((u * prime8bytes) >> (64 - length))
+ default:
+ return (uint32(u) * prime4bytes) >> (32 - length)
+ }
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
+
+// matchLen returns the maximum length.
+// 'a' must be the shortest of the two.
+func matchLen(a, b []byte) int {
+ var checked int
+
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
+ }
+ checked += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return i + checked
+ }
+ }
+ return len(a) + checked
+}
diff --git a/vendor/github.com/klauspost/compress/flate/flate_test.go b/vendor/github.com/klauspost/compress/flate/flate_test.go
new file mode 100644
index 0000000000..9817efefd9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/flate_test.go
@@ -0,0 +1,366 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test tests some internals of the flate package.
+// The tests in package compress/gzip serve as the
+// end-to-end test of the decompressor.
+
+package flate
+
+import (
+ "archive/zip"
+ "bytes"
+ "compress/flate"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "testing"
+)
+
+// The following test should not panic.
+func TestIssue5915(t *testing.T) {
+ bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, 5, 5, 6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 6, 0, 11, 0, 8, 0, 6, 6, 10, 8}
+ var h huffmanDecoder
+ if h.init(bits) {
+ t.Fatalf("Given sequence of bits is bad, and should not succeed.")
+ }
+}
+
+// The following test should not panic.
+func TestIssue5962(t *testing.T) {
+ bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0,
+ 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11}
+ var h huffmanDecoder
+ if h.init(bits) {
+ t.Fatalf("Given sequence of bits is bad, and should not succeed.")
+ }
+}
+
+// The following test should not panic.
+func TestIssue6255(t *testing.T) {
+ bits1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11}
+ bits2 := []int{11, 13}
+ var h huffmanDecoder
+ if !h.init(bits1) {
+ t.Fatalf("Given sequence of bits is good and should succeed.")
+ }
+ if h.init(bits2) {
+ t.Fatalf("Given sequence of bits is bad and should not succeed.")
+ }
+}
+
+func TestInvalidEncoding(t *testing.T) {
+ // Initialize Huffman decoder to recognize "0".
+ var h huffmanDecoder
+ if !h.init([]int{1}) {
+ t.Fatal("Failed to initialize Huffman decoder")
+ }
+
+ // Initialize decompressor with invalid Huffman coding.
+ var f decompressor
+ f.r = bytes.NewReader([]byte{0xff})
+
+ _, err := f.huffSym(&h)
+ if err == nil {
+ t.Fatal("Should have rejected invalid bit sequence")
+ }
+}
+
+func TestRegressions(t *testing.T) {
+ // Test fuzzer regressions
+ data, err := os.ReadFile("testdata/regression.zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, tt := range zr.File {
+ data, err := tt.Open()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data1, err := io.ReadAll(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Run(tt.Name, func(t *testing.T) {
+ if testing.Short() && len(data1) > 10000 {
+ t.SkipNow()
+ }
+ for level := 0; level <= 9; level++ {
+ t.Run(fmt.Sprint(tt.Name+"-level", 1), func(t *testing.T) {
+ buf := new(bytes.Buffer)
+ fw, err := NewWriter(buf, level)
+ if err != nil {
+ t.Error(err)
+ }
+ n, err := fw.Write(data1)
+ if n != len(data1) {
+ t.Error("short write")
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ err = fw.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ fr1 := NewReader(buf)
+ data2, err := io.ReadAll(fr1)
+ if err != nil {
+ t.Error(err)
+ }
+ if !bytes.Equal(data1, data2) {
+ t.Error("not equal")
+ }
+ // Do it again...
+ buf.Reset()
+ fw.Reset(buf)
+ n, err = fw.Write(data1)
+ if n != len(data1) {
+ t.Error("short write")
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ err = fw.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ fr1 = flate.NewReader(buf)
+ data2, err = io.ReadAll(fr1)
+ if err != nil {
+ t.Error(err)
+ }
+ if !bytes.Equal(data1, data2) {
+ t.Error("not equal")
+ }
+ })
+ }
+ t.Run(tt.Name+"stateless", func(t *testing.T) {
+ // Split into two and use history...
+ buf := new(bytes.Buffer)
+ err = StatelessDeflate(buf, data1[:len(data1)/2], false, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Use top half as dictionary...
+ dict := data1[:len(data1)/2]
+ err = StatelessDeflate(buf, data1[len(data1)/2:], true, dict)
+ if err != nil {
+ t.Error(err)
+ }
+ t.Log(buf.Len())
+ fr1 := NewReader(buf)
+ data2, err := io.ReadAll(fr1)
+ if err != nil {
+ t.Error(err)
+ }
+ if !bytes.Equal(data1, data2) {
+ //fmt.Printf("want:%x\ngot: %x\n", data1, data2)
+ t.Error("not equal")
+ }
+ })
+ })
+ }
+}
+
+func TestInvalidBits(t *testing.T) {
+ oversubscribed := []int{1, 2, 3, 4, 4, 5}
+ incomplete := []int{1, 2, 4, 4}
+ var h huffmanDecoder
+ if h.init(oversubscribed) {
+ t.Fatal("Should reject oversubscribed bit-length set")
+ }
+ if h.init(incomplete) {
+ t.Fatal("Should reject incomplete bit-length set")
+ }
+}
+
+func TestStreams(t *testing.T) {
+ // To verify any of these hexstrings as valid or invalid flate streams
+ // according to the C zlib library, you can use the Python wrapper library:
+ // >>> hex_string = "010100feff11"
+ // >>> import zlib
+ // >>> zlib.decompress(hex_string.decode("hex"), -15) # Negative means raw DEFLATE
+ // '\x11'
+
+ testCases := []struct {
+ desc string // Description of the stream
+ stream string // Hexstring of the input DEFLATE stream
+ want string // Expected result. Use "fail" to expect failure
+ }{{
+ "degenerate HCLenTree",
+ "05e0010000000000100000000000000000000000000000000000000000000000" +
+ "00000000000000000004",
+ "fail",
+ }, {
+ "complete HCLenTree, empty HLitTree, empty HDistTree",
+ "05e0010400000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000010",
+ "fail",
+ }, {
+ "empty HCLenTree",
+ "05e0010000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000010",
+ "fail",
+ }, {
+ "complete HCLenTree, complete HLitTree, empty HDistTree, use missing HDist symbol",
+ "000100feff000de0010400000000100000000000000000000000000000000000" +
+ "0000000000000000000000000000002c",
+ "fail",
+ }, {
+ "complete HCLenTree, complete HLitTree, degenerate HDistTree, use missing HDist symbol",
+ "000100feff000de0010000000000000000000000000000000000000000000000" +
+ "00000000000000000610000000004070",
+ "fail",
+ }, {
+ "complete HCLenTree, empty HLitTree, empty HDistTree",
+ "05e0010400000000100400000000000000000000000000000000000000000000" +
+ "0000000000000000000000000008",
+ "fail",
+ }, {
+ "complete HCLenTree, empty HLitTree, degenerate HDistTree",
+ "05e0010400000000100400000000000000000000000000000000000000000000" +
+ "0000000000000000000800000008",
+ "fail",
+ }, {
+ "complete HCLenTree, degenerate HLitTree, degenerate HDistTree, use missing HLit symbol",
+ "05e0010400000000100000000000000000000000000000000000000000000000" +
+ "0000000000000000001c",
+ "fail",
+ }, {
+ "complete HCLenTree, complete HLitTree, too large HDistTree",
+ "edff870500000000200400000000000000000000000000000000000000000000" +
+ "000000000000000000080000000000000004",
+ "fail",
+ }, {
+ "complete HCLenTree, complete HLitTree, empty HDistTree, excessive repeater code",
+ "edfd870500000000200400000000000000000000000000000000000000000000" +
+ "000000000000000000e8b100",
+ "fail",
+ }, {
+ "complete HCLenTree, complete HLitTree, empty HDistTree of normal length 30",
+ "05fd01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" +
+ "ffffffffffffffffff07000000fe01",
+ "",
+ }, {
+ "complete HCLenTree, complete HLitTree, empty HDistTree of excessive length 31",
+ "05fe01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" +
+ "ffffffffffffffffff07000000fc03",
+ "fail",
+ }, {
+ "complete HCLenTree, over-subscribed HLitTree, empty HDistTree",
+ "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" +
+ "ffffffffffffffffff07f00f",
+ "fail",
+ }, {
+ "complete HCLenTree, under-subscribed HLitTree, empty HDistTree",
+ "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" +
+ "fffffffffcffffffff07f00f",
+ "fail",
+ }, {
+ "complete HCLenTree, complete HLitTree with single code, empty HDistTree",
+ "05e001240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" +
+ "ffffffffffffffffff07f00f",
+ "01",
+ }, {
+ "complete HCLenTree, complete HLitTree with multiple codes, empty HDistTree",
+ "05e301240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" +
+ "ffffffffffffffffff07807f",
+ "01",
+ }, {
+ "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HDist symbol",
+ "000100feff000de0010400000000100000000000000000000000000000000000" +
+ "0000000000000000000000000000003c",
+ "00000000",
+ }, {
+ "complete HCLenTree, degenerate HLitTree, degenerate HDistTree",
+ "05e0010400000000100000000000000000000000000000000000000000000000" +
+ "0000000000000000000c",
+ "",
+ }, {
+ "complete HCLenTree, degenerate HLitTree, empty HDistTree",
+ "05e0010400000000100000000000000000000000000000000000000000000000" +
+ "00000000000000000004",
+ "",
+ }, {
+ "complete HCLenTree, complete HLitTree, empty HDistTree, spanning repeater code",
+ "edfd870500000000200400000000000000000000000000000000000000000000" +
+ "000000000000000000e8b000",
+ "",
+ }, {
+ "complete HCLenTree with length codes, complete HLitTree, empty HDistTree",
+ "ede0010400000000100000000000000000000000000000000000000000000000" +
+ "0000000000000000000400004000",
+ "",
+ }, {
+ "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit symbol 284 with count 31",
+ "000100feff00ede0010400000000100000000000000000000000000000000000" +
+ "000000000000000000000000000000040000407f00",
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "000000",
+ }, {
+ "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit and HDist symbols",
+ "0cc2010d00000082b0ac4aff0eb07d27060000ffff",
+ "616263616263",
+ }, {
+ "fixed block, use reserved symbol 287",
+ "33180700",
+ "fail",
+ }, {
+ "raw block",
+ "010100feff11",
+ "11",
+ }, {
+ "issue 10426 - over-subscribed HCLenTree causes a hang",
+ "344c4a4e494d4b070000ff2e2eff2e2e2e2e2eff",
+ "fail",
+ }, {
+ "issue 11030 - empty HDistTree unexpectedly leads to error",
+ "05c0070600000080400fff37a0ca",
+ "",
+ }, {
+ "issue 11033 - empty HDistTree unexpectedly leads to error",
+ "050fb109c020cca5d017dcbca044881ee1034ec149c8980bbc413c2ab35be9dc" +
+ "b1473449922449922411202306ee97b0383a521b4ffdcf3217f9f7d3adb701",
+ "3130303634342068652e706870005d05355f7ed957ff084a90925d19e3ebc6d0" +
+ "c6d7",
+ }}
+
+ for i, tc := range testCases {
+ data, err := hex.DecodeString(tc.stream)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err = io.ReadAll(NewReader(bytes.NewReader(data)))
+ if tc.want == "fail" {
+ if err == nil {
+ t.Errorf("#%d (%s): got nil error, want non-nil", i, tc.desc)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("#%d (%s): %v", i, tc.desc, err)
+ continue
+ }
+ if got := hex.EncodeToString(data); got != tc.want {
+ t.Errorf("#%d (%s):\ngot %q\nwant %q", i, tc.desc, got, tc.want)
+ }
+
+ }
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/fuzz_test.go b/vendor/github.com/klauspost/compress/flate/fuzz_test.go
new file mode 100644
index 0000000000..527bad25d1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fuzz_test.go
@@ -0,0 +1,128 @@
+//go:build go1.18
+// +build go1.18
+
+package flate
+
+import (
+ "bytes"
+ "flag"
+ "io"
+ "os"
+ "strconv"
+ "testing"
+
+ "github.com/klauspost/compress/internal/fuzz"
+)
+
+// Fuzzing tweaks:
+var fuzzStartF = flag.Int("start", HuffmanOnly, "Start fuzzing at this level")
+var fuzzEndF = flag.Int("end", BestCompression, "End fuzzing at this level (inclusive)")
+var fuzzMaxF = flag.Int("max", 1<<20, "Maximum input size")
+var fuzzSLF = flag.Bool("sl", true, "Include stateless encodes")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ os.Exit(m.Run())
+}
+
+func FuzzEncoding(f *testing.F) {
+ fuzz.AddFromZip(f, "testdata/regression.zip", fuzz.TypeRaw, false)
+ fuzz.AddFromZip(f, "testdata/fuzz/encode-raw-corpus.zip", fuzz.TypeRaw, testing.Short())
+ fuzz.AddFromZip(f, "testdata/fuzz/FuzzEncoding.zip", fuzz.TypeGoFuzz, testing.Short())
+
+ startFuzz := *fuzzStartF
+ endFuzz := *fuzzEndF
+ maxSize := *fuzzMaxF
+ stateless := *fuzzSLF
+
+ decoder := NewReader(nil)
+ buf := new(bytes.Buffer)
+ encs := make([]*Writer, endFuzz-startFuzz+1)
+ for i := range encs {
+ var err error
+ encs[i], err = NewWriter(nil, i+startFuzz)
+ if err != nil {
+ f.Fatal(err.Error())
+ }
+ }
+
+ f.Fuzz(func(t *testing.T, data []byte) {
+ if len(data) > maxSize {
+ return
+ }
+ for level := startFuzz; level <= endFuzz; level++ {
+ msg := "level " + strconv.Itoa(level) + ":"
+ buf.Reset()
+ fw := encs[level-startFuzz]
+ fw.Reset(buf)
+ n, err := fw.Write(data)
+ if n != len(data) {
+ t.Fatal(msg + "short write")
+ }
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ err = fw.Close()
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ decoder.(Resetter).Reset(buf, nil)
+ data2, err := io.ReadAll(decoder)
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ if !bytes.Equal(data, data2) {
+ t.Fatal(msg + "not equal")
+ }
+ // Do it again...
+ msg = "level " + strconv.Itoa(level) + " (reset):"
+ buf.Reset()
+ fw.Reset(buf)
+ n, err = fw.Write(data)
+ if n != len(data) {
+ t.Fatal(msg + "short write")
+ }
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ err = fw.Close()
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ decoder.(Resetter).Reset(buf, nil)
+ data2, err = io.ReadAll(decoder)
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ if !bytes.Equal(data, data2) {
+ t.Fatal(msg + "not equal")
+ }
+ }
+ if !stateless {
+ return
+ }
+ // Split into two and use history...
+ buf.Reset()
+ err := StatelessDeflate(buf, data[:len(data)/2], false, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Use top half as dictionary...
+ dict := data[:len(data)/2]
+ err = StatelessDeflate(buf, data[len(data)/2:], true, dict)
+ if err != nil {
+ t.Error(err)
+ }
+
+ decoder.(Resetter).Reset(buf, nil)
+ data2, err := io.ReadAll(decoder)
+ if err != nil {
+ t.Error(err)
+ }
+ if !bytes.Equal(data, data2) {
+ //fmt.Printf("want:%x\ngot: %x\n", data1, data2)
+ t.Error("not equal")
+ }
+ })
+}
diff --git a/vendor/github.com/klauspost/compress/flate/gotest/ya.make b/vendor/github.com/klauspost/compress/flate/gotest/ya.make
new file mode 100644
index 0000000000..8b7d9ffe4d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/gotest/ya.make
@@ -0,0 +1,16 @@
+GO_TEST_FOR(vendor/github.com/klauspost/compress/flate)
+
+LICENSE(
+ Apache-2.0 AND
+ BSD-3-Clause AND
+ MIT
+)
+
+DATA(
+ arcadia/vendor/github.com/klauspost/compress/testdata
+ arcadia/vendor/github.com/klauspost/compress/flate/testdata
+)
+
+TEST_CWD(vendor/github.com/klauspost/compress/flate)
+
+END()
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 0000000000..f70594c34e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,1182 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // maxPredefinedTokens is the maximum number of tokens
+ // where we check if fixed size is smaller.
+ maxPredefinedTokens = 250
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 246
+)
+
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]uint8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
+// offset code word extra bits.
+var offsetExtraBits = [32]int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ /* extended window */
+ 14, 14,
+}
+
+var offsetCombined = [32]uint32{}
+
+func init() {
+ var offsetBase = [32]uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000,
+ }
+
+ for i := range offsetCombined[:] {
+ // Don't use extended window values...
+ if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
+ continue
+ }
+ offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
+ }
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits.
+ bits uint64
+ nbits uint8
+ nbytes uint8
+ lastHuffMan bool
+ literalEncoding *huffmanEncoder
+ tmpLitEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+ lastHeader int
+ // Set between 0 (reused block can be up to 2x the size)
+ logNewTablePenalty uint
+ bytes [256 + 8]byte
+ literalFreq [lengthCodesStart + 32]uint16
+ offsetFreq [32]uint16
+ codegenFreq [codegenCodeCount]uint16
+
+ // codegen must have an extra space for the final symbol.
+ codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalEncoding: newHuffmanEncoder(literalCount),
+ tmpLitEncoding: newHuffmanEncoder(literalCount),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+ w.lastHeader = 0
+ w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
+ a := t.offHist[:offsetCodeCount]
+ b := w.offsetEncoding.codes
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+
+ a = t.extraHist[:literalCount-256]
+ b = w.literalEncoding.codes[256:literalCount]
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+
+ a = t.litHist[:256]
+ b = w.literalEncoding.codes[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+ return true
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
+ w.bits |= uint64(b) << (w.nbits & 63)
+ w.nbits += nb
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen[:] // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = litEnc.codes[i].len()
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = offEnc.codes[i].len()
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+ numCodegens := len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+ size = litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:])
+ return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ header, numCodegens := w.headerSize()
+ size = header +
+ litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:]) +
+ extraBits
+ return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+ total := 0
+ for i, n := range w.literalFreq[257:literalCount] {
+ total += int(n) * int(lengthExtraBits[i&31])
+ }
+ for i, n := range w.offsetFreq[:offsetCodeCount] {
+ total += int(n) * int(offsetExtraBits[i&31])
+ }
+ return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ // The function does not get inlined if we "& 63" the shift.
+ w.bits |= c.code64() << (w.nbits & 63)
+ w.nbits += c.len()
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+
+ // We over-write, but faster...
+ binary.LittleEndian.PutUint64(w.bytes[n:], bits)
+ n += 6
+
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+
+ w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord = uint32(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[codeWord])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ }
+ }
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+ if length == 0 && isEof {
+ w.writeFixedHeader(isEof)
+ // EOB: 7 bits, value: 0
+ w.writeBits(0, 7)
+ w.flush()
+ return
+ }
+
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens.AddEOB()
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, false)
+ w.generate()
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ extraBits = w.extraBitSize()
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = math.MaxInt32
+ if tokens.n < maxPredefinedTokens {
+ size = w.fixedSize(extraBits)
+ }
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ sync = sync || eof
+ if sync {
+ tokens.AddEOB()
+ }
+
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+ // We will not try to reuse.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+
+ // fillReuse enables filling of empty values.
+ // This will make encodings always reusable without testing.
+ // However, this does not appear to benefit on most cases.
+ const fillReuse = false
+
+ // Check if we can reuse...
+ if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+ extraBits := 0
+ ssize, storable := w.storedSize(input)
+
+ const usePrefs = true
+ if storable || w.lastHeader > 0 {
+ extraBits = w.extraBitSize()
+ }
+
+ var size int
+
+ // Check if we should reuse.
+ if w.lastHeader > 0 {
+ // Estimate size for using a new table.
+ // Use the previous header size as the best estimate.
+ newSize := w.lastHeader + tokens.EstimatedBits()
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
+
+ // The estimated size is calculated as an optimal table.
+ // We add a penalty to make it more realistic and re-use a bit more.
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
+
+ // Check if a new table is better.
+ if newSize < reuseSize {
+ // Write the EOB we owe.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ size = newSize
+ w.lastHeader = 0
+ } else {
+ size = reuseSize
+ }
+
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
+
+ // We want a new block/table
+ if w.lastHeader == 0 {
+ if fillReuse && !sync {
+ w.fillTokens()
+ numLiterals, numOffsets = maxNumLit, maxNumDist
+ } else {
+ w.literalFreq[endBlockMarker] = 1
+ }
+
+ w.generate()
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
+ var numCodegens int
+ if fillReuse && !sync {
+ // Reindex for accurate size...
+ w.indexTokens(tokens, true)
+ }
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ // Store predefined, if we don't get a reasonable improvement.
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+
+ if storable && ssize <= size {
+ // Store bytes, if we don't get an improvement.
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ if !sync {
+ w.lastHeader, _ = w.headerSize()
+ }
+ w.lastHuffMan = false
+ }
+
+ if sync {
+ w.lastHeader = 0
+ }
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+func (w *huffmanBitWriter) fillTokens() {
+ for i, v := range w.literalFreq[:literalCount] {
+ if v == 0 {
+ w.literalFreq[i] = 1
+ }
+ }
+ for i, v := range w.offsetFreq[:offsetCodeCount] {
+ if v == 0 {
+ w.offsetFreq[i] = 1
+ }
+ }
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+ //copy(w.literalFreq[:], t.litHist[:])
+ *(*[256]uint16)(w.literalFreq[:]) = t.litHist
+ //copy(w.literalFreq[256:], t.extraHist[:])
+ *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+ w.offsetFreq = t.offHist
+
+ if t.n == 0 {
+ return
+ }
+ if filled {
+ return maxNumLit, maxNumDist
+ }
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ return
+}
+
+func (w *huffmanBitWriter) generate() {
+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ var deferEOB bool
+ if tokens[len(tokens)-1] == endBlockMarker {
+ tokens = tokens[:len(tokens)-1]
+ deferEOB = true
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := leCodes[:256]
+ offs := oeCodes[:32]
+ lengths := leCodes[lengthCodesStart:]
+ lengths = lengths[:32]
+
+ // Go 1.16 LOVES having these on stack.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ for _, t := range tokens {
+ if t < 256 {
+ //w.writeCode(lits[t.literal()])
+ c := lits[t]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ continue
+ }
+
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length) & 31
+ if false {
+ w.writeCode(lengths[lengthCode])
+ } else {
+ // inlined
+ c := lengths[lengthCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+
+ if lengthCode >= lengthExtraBitsMinCode {
+ extraLengthBits := lengthExtraBits[lengthCode]
+ //w.writeBits(extraLength, extraLengthBits)
+ extraLength := int32(length - lengthBase[lengthCode])
+ bits |= uint64(extraLength) << (nbits & 63)
+ nbits += extraLengthBits
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := (offset >> 16) & 31
+ if false {
+ w.writeCode(offs[offsetCode])
+ } else {
+ // inlined
+ c := offs[offsetCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+
+ if offsetCode >= offsetExtraBitsMinCode {
+ offsetComb := offsetCombined[offsetCode]
+ //w.writeBits(extraOffset, extraOffsetBits)
+ bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+ nbits += uint8(offsetComb)
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+ }
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if deferEOB {
+ w.writeCode(leCodes[endBlockMarker])
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ w := newHuffmanBitWriter(nil)
+ w.offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq[:] {
+ w.literalFreq[i] = 0
+ }
+ if !w.lastHuffMan {
+ for i := range w.offsetFreq[:] {
+ w.offsetFreq[i] = 0
+ }
+ }
+
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+
+ // Add everything as literals
+ // We have to estimate the header size.
+ // Assume header is around 70 bytes:
+ // https://stackoverflow.com/a/25454430
+ const guessHeaderSizeBits = 70 * 8
+ histogram(input, w.literalFreq[:numLiterals])
+ ssize, storable := w.storedSize(input)
+ if storable && len(input) > 1024 {
+ // Quick check for incompressible content.
+ abs := float64(0)
+ avg := float64(len(input)) / 256
+ max := float64(len(input) * 2)
+ for _, v := range w.literalFreq[:256] {
+ diff := float64(v) - avg
+ abs += diff * diff
+ if abs > max {
+ break
+ }
+ }
+ if abs < max {
+ if debugDeflate {
+ fmt.Println("stored", abs, "<", max)
+ }
+ // No chance we can compress this...
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
+ w.literalFreq[endBlockMarker] = 1
+ w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
+ estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
+ if estBits < math.MaxInt32 {
+ estBits += w.lastHeader
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
+ }
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if storable && ssize <= estBits {
+ if debugDeflate {
+ fmt.Println("stored,", ssize, "<=", estBits)
+ }
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ if w.lastHeader > 0 {
+ reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
+
+ if estBits < reuseSize {
+ if debugDeflate {
+ fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
+ }
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ } else if debugDeflate {
+ fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ }
+ }
+
+ count := 0
+ if w.lastHeader == 0 {
+ // Use the temp encoding, so swap.
+ w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ numCodegens := w.codegens()
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHuffMan = true
+ w.lastHeader, _ = w.headerSize()
+ if debugDeflate {
+ count += w.lastHeader
+ fmt.Println("header:", count/8)
+ }
+ }
+
+ encoding := w.literalEncoding.codes[:256]
+ // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ if debugDeflate {
+ count -= int(nbytes)*8 + int(nbits)
+ }
+ // Unroll, write 3 codes/loop.
+ // Fastest number of unrolls.
+ for len(input) > 3 {
+ // We must have at least 48 bits free.
+ if nbits >= 8 {
+ n := nbits >> 3
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ bits >>= (n * 8) & 63
+ nbits -= n * 8
+ nbytes += n
+ }
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ a, b := encoding[input[0]], encoding[input[1]]
+ bits |= a.code64() << (nbits & 63)
+ bits |= b.code64() << ((nbits + a.len()) & 63)
+ c := encoding[input[2]]
+ nbits += b.len() + a.len()
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ input = input[3:]
+ }
+
+ // Remaining...
+ for _, t := range input {
+ if nbits >= 48 {
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ bits |= c.code64() << (nbits & 63)
+
+ nbits += c.len()
+ if debugDeflate {
+ count += int(c.len())
+ }
+ }
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if debugDeflate {
+ nb := count + int(nbytes)*8 + int(nbits)
+ fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+ }
+ // Flush if needed to have space.
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+
+ if eof || sync {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go
new file mode 100644
index 0000000000..dfb93e326c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go
@@ -0,0 +1,381 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+var update = flag.Bool("update", false, "update reference files")
+
+// TestBlockHuff tests huffman encoding against reference files
+// to detect possible regressions.
+// If encoding/bit allocation changes you can regenerate these files
+// by using the -update flag.
+func TestBlockHuff(t *testing.T) {
+ // determine input files
+ match, err := filepath.Glob("testdata/huffman-*.in")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, in := range match {
+ out := in // for files where input and output are identical
+ if strings.HasSuffix(in, ".in") {
+ out = in[:len(in)-len(".in")] + ".golden"
+ }
+ t.Run(in, func(t *testing.T) {
+ testBlockHuff(t, in, out)
+ })
+ }
+}
+
+func testBlockHuff(t *testing.T, in, out string) {
+ all, err := os.ReadFile(in)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ var buf bytes.Buffer
+ bw := newHuffmanBitWriter(&buf)
+ bw.logNewTablePenalty = 8
+ bw.writeBlockHuff(false, all, false)
+ bw.flush()
+ got := buf.Bytes()
+
+ want, err := os.ReadFile(out)
+ if err != nil && !*update {
+ t.Error(err)
+ return
+ }
+
+ t.Logf("Testing %q", in)
+ if !bytes.Equal(got, want) {
+ if *update {
+ if in != out {
+ t.Logf("Updating %q", out)
+ if err := os.WriteFile(out, got, 0666); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+ // in == out: don't accidentally destroy input
+ t.Errorf("WARNING: -update did not rewrite input file %s", in)
+ }
+
+ t.Errorf("%q != %q (see %q)", in, out, in+".got")
+ if err := os.WriteFile(in+".got", got, 0666); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+ t.Log("Output ok")
+
+ // Test if the writer produces the same output after reset.
+ buf.Reset()
+ bw.reset(&buf)
+ bw.writeBlockHuff(false, all, false)
+ bw.flush()
+ got = buf.Bytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("after reset %q != %q (see %q)", in, out, in+".reset.got")
+ if err := os.WriteFile(in+".reset.got", got, 0666); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+ t.Log("Reset ok")
+ testWriterEOF(t, "huff", huffTest{input: in}, true)
+}
+
+type huffTest struct {
+ tokens []token
+ input string // File name of input data matching the tokens.
+ want string // File name of data with the expected output with input available.
+ wantNoInput string // File name of the expected output when no input is available.
+}
+
+const ml = 0x7fc00000 // Maximum length token. Used to reduce the size of writeBlockTests
+
+var writeBlockTests = []huffTest{
+ {
+ input: "testdata/huffman-null-max.in",
+ want: "testdata/huffman-null-max.%s.expect",
+ wantNoInput: "testdata/huffman-null-max.%s.expect-noinput",
+ tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x0, 0x0},
+ },
+ {
+ input: "testdata/huffman-pi.in",
+ want: "testdata/huffman-pi.%s.expect",
+ wantNoInput: "testdata/huffman-pi.%s.expect-noinput",
+ tokens: []token{0x33, 0x2e, 0x31, 0x34, 0x31, 0x35, 0x39, 0x32, 0x36, 0x35, 0x33, 0x35, 0x38, 0x39, 0x37, 0x39, 0x33, 0x32, 0x33, 0x38, 0x34, 0x36, 0x32, 0x36, 0x34, 0x33, 0x33, 0x38, 0x33, 0x32, 0x37, 0x39, 0x35, 0x30, 0x32, 0x38, 0x38, 0x34, 0x31, 0x39, 0x37, 0x31, 0x36, 0x39, 0x33, 0x39, 0x39, 0x33, 0x37, 0x35, 0x31, 0x30, 0x35, 0x38, 0x32, 0x30, 0x39, 0x37, 0x34, 0x39, 0x34, 0x34, 0x35, 0x39, 0x32, 0x33, 0x30, 0x37, 0x38, 0x31, 0x36, 0x34, 0x30, 0x36, 0x32, 0x38, 0x36, 0x32, 0x30, 0x38, 0x39, 0x39, 0x38, 0x36, 0x32, 0x38, 0x30, 0x33, 0x34, 0x38, 0x32, 0x35, 0x33, 0x34, 0x32, 0x31, 0x31, 0x37, 0x30, 0x36, 0x37, 0x39, 0x38, 0x32, 0x31, 0x34, 0x38, 0x30, 0x38, 0x36, 0x35, 0x31, 0x33, 0x32, 0x38, 0x32, 0x33, 0x30, 0x36, 0x36, 0x34, 0x37, 0x30, 0x39, 0x33, 0x38, 0x34, 0x34, 0x36, 0x30, 0x39, 0x35, 0x35, 0x30, 0x35, 0x38, 0x32, 0x32, 0x33, 0x31, 0x37, 0x32, 0x35, 0x33, 0x35, 0x39, 0x34, 0x30, 0x38, 0x31, 0x32, 0x38, 0x34, 0x38, 0x31, 0x31, 0x31, 0x37, 0x34, 0x4040007e, 0x34, 0x31, 0x30, 0x32, 0x37, 0x30, 0x31, 0x39, 0x33, 0x38, 0x35, 0x32, 0x31, 0x31, 0x30, 0x35, 0x35, 0x35, 0x39, 0x36, 0x34, 0x34, 0x36, 0x32, 0x32, 0x39, 0x34, 0x38, 0x39, 0x35, 0x34, 0x39, 0x33, 0x30, 0x33, 0x38, 0x31, 0x40400012, 0x32, 0x38, 0x38, 0x31, 0x30, 0x39, 0x37, 0x35, 0x36, 0x36, 0x35, 0x39, 0x33, 0x33, 0x34, 0x34, 0x36, 0x40400047, 0x37, 0x35, 0x36, 0x34, 0x38, 0x32, 0x33, 0x33, 0x37, 0x38, 0x36, 0x37, 0x38, 0x33, 0x31, 0x36, 0x35, 0x32, 0x37, 0x31, 0x32, 0x30, 0x31, 0x39, 0x30, 0x39, 0x31, 0x34, 0x4040001a, 0x35, 0x36, 0x36, 0x39, 0x32, 0x33, 0x34, 0x36, 0x404000b2, 0x36, 0x31, 0x30, 0x34, 0x35, 0x34, 0x33, 0x32, 0x36, 0x40400032, 0x31, 0x33, 0x33, 0x39, 0x33, 0x36, 0x30, 0x37, 0x32, 0x36, 0x30, 0x32, 0x34, 0x39, 0x31, 0x34, 0x31, 0x32, 0x37, 0x33, 0x37, 0x32, 0x34, 0x35, 0x38, 0x37, 0x30, 0x30, 0x36, 0x36, 0x30, 0x36, 0x33, 0x31, 0x35, 0x35, 0x38, 0x38, 0x31, 0x37, 0x34, 0x38, 0x38, 0x31, 0x35, 0x32, 0x30, 0x39, 0x32, 0x30, 0x39, 0x36, 0x32, 0x38, 0x32, 0x39, 0x32, 0x35, 0x34, 0x30, 0x39, 0x31, 0x37, 0x31, 0x35, 0x33, 0x36, 0x34, 0x33, 0x36, 0x37, 0x38, 0x39, 0x32, 0x35, 0x39, 0x30, 0x33, 0x36, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x30, 0x35, 0x33, 0x30, 0x35, 0x34, 0x38, 0x38, 0x32, 0x30, 0x34, 0x36, 0x36, 0x35, 0x32, 0x31, 0x33, 0x38, 0x34, 0x31, 0x34, 0x36, 0x39, 0x35, 0x31, 0x39, 0x34, 0x31, 0x35, 0x31, 0x31, 0x36, 0x30, 0x39, 0x34, 0x33, 0x33, 0x30, 0x35, 0x37, 0x32, 0x37, 0x30, 0x33, 0x36, 0x35, 0x37, 0x35, 0x39, 0x35, 0x39, 0x31, 0x39, 0x35, 0x33, 0x30, 0x39, 0x32, 0x31, 0x38, 0x36, 0x31, 0x31, 0x37, 0x404000e9, 0x33, 0x32, 0x40400009, 0x39, 0x33, 0x31, 0x30, 0x35, 0x31, 0x31, 0x38, 0x35, 0x34, 0x38, 0x30, 0x37, 0x4040010e, 0x33, 0x37, 0x39, 0x39, 0x36, 0x32, 0x37, 0x34, 0x39, 0x35, 0x36, 0x37, 0x33, 0x35, 0x31, 0x38, 0x38, 0x35, 0x37, 0x35, 0x32, 0x37, 0x32, 0x34, 0x38, 0x39, 0x31, 0x32, 0x32, 0x37, 0x39, 0x33, 0x38, 0x31, 0x38, 0x33, 0x30, 0x31, 0x31, 0x39, 0x34, 0x39, 0x31, 0x32, 0x39, 0x38, 0x33, 0x33, 0x36, 0x37, 0x33, 0x33, 0x36, 0x32, 0x34, 0x34, 0x30, 0x36, 0x35, 0x36, 0x36, 0x34, 0x33, 0x30, 0x38, 0x36, 0x30, 0x32, 0x31, 0x33, 0x39, 0x34, 0x39, 0x34, 0x36, 0x33, 0x39, 0x35, 0x32, 0x32, 0x34, 0x37, 0x33, 0x37, 0x31, 0x39, 0x30, 0x37, 0x30, 0x32, 0x31, 0x37, 0x39, 0x38, 0x40800099, 0x37, 0x30, 0x32, 0x37, 0x37, 0x30, 0x35, 0x33, 0x39, 0x32, 0x31, 0x37, 0x31, 0x37, 0x36, 0x32, 0x39, 0x33, 0x31, 0x37, 0x36, 0x37, 0x35, 0x40800232, 0x37, 0x34, 0x38, 0x31, 0x40400006, 0x36, 0x36, 0x39, 0x34, 0x30, 0x404001e7, 0x30, 0x30, 0x30, 0x35, 0x36, 0x38, 0x31, 0x32, 0x37, 0x31, 0x34, 0x35, 0x32, 0x36, 0x33, 0x35, 0x36, 0x30, 0x38, 0x32, 0x37, 0x37, 0x38, 0x35, 0x37, 0x37, 0x31, 0x33, 0x34, 0x32, 0x37, 0x35, 0x37, 0x37, 0x38, 0x39, 0x36, 0x40400129, 0x33, 0x36, 0x33, 0x37, 0x31, 0x37, 0x38, 0x37, 0x32, 0x31, 0x34, 0x36, 0x38, 0x34, 0x34, 0x30, 0x39, 0x30, 0x31, 0x32, 0x32, 0x34, 0x39, 0x35, 0x33, 0x34, 0x33, 0x30, 0x31, 0x34, 0x36, 0x35, 0x34, 0x39, 0x35, 0x38, 0x35, 0x33, 0x37, 0x31, 0x30, 0x35, 0x30, 0x37, 0x39, 0x404000ca, 0x36, 0x40400153, 0x38, 0x39, 0x32, 0x33, 0x35, 0x34, 0x404001c9, 0x39, 0x35, 0x36, 0x31, 0x31, 0x32, 0x31, 0x32, 0x39, 0x30, 0x32, 0x31, 0x39, 0x36, 0x30, 0x38, 0x36, 0x34, 0x30, 0x33, 0x34, 0x34, 0x31, 0x38, 0x31, 0x35, 0x39, 0x38, 0x31, 0x33, 0x36, 0x32, 0x39, 0x37, 0x37, 0x34, 0x40400074, 0x30, 0x39, 0x39, 0x36, 0x30, 0x35, 0x31, 0x38, 0x37, 0x30, 0x37, 0x32, 0x31, 0x31, 0x33, 0x34, 0x39, 0x40800000, 0x38, 0x33, 0x37, 0x32, 0x39, 0x37, 0x38, 0x30, 0x34, 0x39, 0x39, 0x404002da, 0x39, 0x37, 0x33, 0x31, 0x37, 0x33, 0x32, 0x38, 0x4040018a, 0x36, 0x33, 0x31, 0x38, 0x35, 0x40400301, 0x404002e8, 0x34, 0x35, 0x35, 0x33, 0x34, 0x36, 0x39, 0x30, 0x38, 0x33, 0x30, 0x32, 0x36, 0x34, 0x32, 0x35, 0x32, 0x32, 0x33, 0x30, 0x404002e3, 0x40400267, 0x38, 0x35, 0x30, 0x33, 0x35, 0x32, 0x36, 0x31, 0x39, 0x33, 0x31, 0x31, 0x40400212, 0x31, 0x30, 0x31, 0x30, 0x30, 0x30, 0x33, 0x31, 0x33, 0x37, 0x38, 0x33, 0x38, 0x37, 0x35, 0x32, 0x38, 0x38, 0x36, 0x35, 0x38, 0x37, 0x35, 0x33, 0x33, 0x32, 0x30, 0x38, 0x33, 0x38, 0x31, 0x34, 0x32, 0x30, 0x36, 0x40400140, 0x4040012b, 0x31, 0x34, 0x37, 0x33, 0x30, 0x33, 0x35, 0x39, 0x4080032e, 0x39, 0x30, 0x34, 0x32, 0x38, 0x37, 0x35, 0x35, 0x34, 0x36, 0x38, 0x37, 0x33, 0x31, 0x31, 0x35, 0x39, 0x35, 0x40400355, 0x33, 0x38, 0x38, 0x32, 0x33, 0x35, 0x33, 0x37, 0x38, 0x37, 0x35, 0x4080037f, 0x39, 0x4040013a, 0x31, 0x40400148, 0x38, 0x30, 0x35, 0x33, 0x4040018a, 0x32, 0x32, 0x36, 0x38, 0x30, 0x36, 0x36, 0x31, 0x33, 0x30, 0x30, 0x31, 0x39, 0x32, 0x37, 0x38, 0x37, 0x36, 0x36, 0x31, 0x31, 0x31, 0x39, 0x35, 0x39, 0x40400237, 0x36, 0x40800124, 0x38, 0x39, 0x33, 0x38, 0x30, 0x39, 0x35, 0x32, 0x35, 0x37, 0x32, 0x30, 0x31, 0x30, 0x36, 0x35, 0x34, 0x38, 0x35, 0x38, 0x36, 0x33, 0x32, 0x37, 0x4040009a, 0x39, 0x33, 0x36, 0x31, 0x35, 0x33, 0x40400220, 0x4080015c, 0x32, 0x33, 0x30, 0x33, 0x30, 0x31, 0x39, 0x35, 0x32, 0x30, 0x33, 0x35, 0x33, 0x30, 0x31, 0x38, 0x35, 0x32, 0x40400171, 0x40400075, 0x33, 0x36, 0x32, 0x32, 0x35, 0x39, 0x39, 0x34, 0x31, 0x33, 0x40400254, 0x34, 0x39, 0x37, 0x32, 0x31, 0x37, 0x404000de, 0x33, 0x34, 0x37, 0x39, 0x31, 0x33, 0x31, 0x35, 0x31, 0x35, 0x35, 0x37, 0x34, 0x38, 0x35, 0x37, 0x32, 0x34, 0x32, 0x34, 0x35, 0x34, 0x31, 0x35, 0x30, 0x36, 0x39, 0x4040013f, 0x38, 0x32, 0x39, 0x35, 0x33, 0x33, 0x31, 0x31, 0x36, 0x38, 0x36, 0x31, 0x37, 0x32, 0x37, 0x38, 0x40400337, 0x39, 0x30, 0x37, 0x35, 0x30, 0x39, 0x4040010d, 0x37, 0x35, 0x34, 0x36, 0x33, 0x37, 0x34, 0x36, 0x34, 0x39, 0x33, 0x39, 0x33, 0x31, 0x39, 0x32, 0x35, 0x35, 0x30, 0x36, 0x30, 0x34, 0x30, 0x30, 0x39, 0x4040026b, 0x31, 0x36, 0x37, 0x31, 0x31, 0x33, 0x39, 0x30, 0x30, 0x39, 0x38, 0x40400335, 0x34, 0x30, 0x31, 0x32, 0x38, 0x35, 0x38, 0x33, 0x36, 0x31, 0x36, 0x30, 0x33, 0x35, 0x36, 0x33, 0x37, 0x30, 0x37, 0x36, 0x36, 0x30, 0x31, 0x30, 0x34, 0x40400172, 0x38, 0x31, 0x39, 0x34, 0x32, 0x39, 0x4080041e, 0x404000ef, 0x4040028b, 0x37, 0x38, 0x33, 0x37, 0x34, 0x404004a8, 0x38, 0x32, 0x35, 0x35, 0x33, 0x37, 0x40800209, 0x32, 0x36, 0x38, 0x4040002e, 0x34, 0x30, 0x34, 0x37, 0x404001d1, 0x34, 0x404004b5, 0x4040038d, 0x38, 0x34, 0x404003a8, 0x36, 0x40c0031f, 0x33, 0x33, 0x31, 0x33, 0x36, 0x37, 0x37, 0x30, 0x32, 0x38, 0x39, 0x38, 0x39, 0x31, 0x35, 0x32, 0x40400062, 0x35, 0x32, 0x31, 0x36, 0x32, 0x30, 0x35, 0x36, 0x39, 0x36, 0x40400411, 0x30, 0x35, 0x38, 0x40400477, 0x35, 0x40400498, 0x35, 0x31, 0x31, 0x40400209, 0x38, 0x32, 0x34, 0x33, 0x30, 0x30, 0x33, 0x35, 0x35, 0x38, 0x37, 0x36, 0x34, 0x30, 0x32, 0x34, 0x37, 0x34, 0x39, 0x36, 0x34, 0x37, 0x33, 0x32, 0x36, 0x33, 0x4040043e, 0x39, 0x39, 0x32, 0x4040044b, 0x34, 0x32, 0x36, 0x39, 0x40c002c5, 0x37, 0x404001d6, 0x34, 0x4040053d, 0x4040041d, 0x39, 0x33, 0x34, 0x31, 0x37, 0x404001ad, 0x31, 0x32, 0x4040002a, 0x34, 0x4040019e, 0x31, 0x35, 0x30, 0x33, 0x30, 0x32, 0x38, 0x36, 0x31, 0x38, 0x32, 0x39, 0x37, 0x34, 0x35, 0x35, 0x35, 0x37, 0x30, 0x36, 0x37, 0x34, 0x40400135, 0x35, 0x30, 0x35, 0x34, 0x39, 0x34, 0x35, 0x38, 0x404001c5, 0x39, 0x40400051, 0x35, 0x36, 0x404001ec, 0x37, 0x32, 0x31, 0x30, 0x37, 0x39, 0x40400159, 0x33, 0x30, 0x4040010a, 0x33, 0x32, 0x31, 0x31, 0x36, 0x35, 0x33, 0x34, 0x34, 0x39, 0x38, 0x37, 0x32, 0x30, 0x32, 0x37, 0x4040011b, 0x30, 0x32, 0x33, 0x36, 0x34, 0x4040022e, 0x35, 0x34, 0x39, 0x39, 0x31, 0x31, 0x39, 0x38, 0x40400418, 0x34, 0x4040011b, 0x35, 0x33, 0x35, 0x36, 0x36, 0x33, 0x36, 0x39, 0x40400450, 0x32, 0x36, 0x35, 0x404002e4, 0x37, 0x38, 0x36, 0x32, 0x35, 0x35, 0x31, 0x404003da, 0x31, 0x37, 0x35, 0x37, 0x34, 0x36, 0x37, 0x32, 0x38, 0x39, 0x30, 0x39, 0x37, 0x37, 0x37, 0x37, 0x40800453, 0x30, 0x30, 0x30, 0x404005fd, 0x37, 0x30, 0x404004df, 0x36, 0x404003e9, 0x34, 0x39, 0x31, 0x4040041e, 0x40400297, 0x32, 0x31, 0x34, 0x37, 0x37, 0x32, 0x33, 0x35, 0x30, 0x31, 0x34, 0x31, 0x34, 0x40400643, 0x33, 0x35, 0x36, 0x404004af, 0x31, 0x36, 0x31, 0x33, 0x36, 0x31, 0x31, 0x35, 0x37, 0x33, 0x35, 0x32, 0x35, 0x40400504, 0x33, 0x34, 0x4040005b, 0x31, 0x38, 0x4040047b, 0x38, 0x34, 0x404005e7, 0x33, 0x33, 0x32, 0x33, 0x39, 0x30, 0x37, 0x33, 0x39, 0x34, 0x31, 0x34, 0x33, 0x33, 0x33, 0x34, 0x35, 0x34, 0x37, 0x37, 0x36, 0x32, 0x34, 0x40400242, 0x32, 0x35, 0x31, 0x38, 0x39, 0x38, 0x33, 0x35, 0x36, 0x39, 0x34, 0x38, 0x35, 0x35, 0x36, 0x32, 0x30, 0x39, 0x39, 0x32, 0x31, 0x39, 0x32, 0x32, 0x32, 0x31, 0x38, 0x34, 0x32, 0x37, 0x4040023e, 0x32, 0x404000ba, 0x36, 0x38, 0x38, 0x37, 0x36, 0x37, 0x31, 0x37, 0x39, 0x30, 0x40400055, 0x30, 0x40800106, 0x36, 0x36, 0x404003e7, 0x38, 0x38, 0x36, 0x32, 0x37, 0x32, 0x404006dc, 0x31, 0x37, 0x38, 0x36, 0x30, 0x38, 0x35, 0x37, 0x40400073, 0x33, 0x408002fc, 0x37, 0x39, 0x37, 0x36, 0x36, 0x38, 0x31, 0x404002bd, 0x30, 0x30, 0x39, 0x35, 0x33, 0x38, 0x38, 0x40400638, 0x33, 0x404006a5, 0x30, 0x36, 0x38, 0x30, 0x30, 0x36, 0x34, 0x32, 0x32, 0x35, 0x31, 0x32, 0x35, 0x32, 0x4040057b, 0x37, 0x33, 0x39, 0x32, 0x40400297, 0x40400474, 0x34, 0x408006b3, 0x38, 0x36, 0x32, 0x36, 0x39, 0x34, 0x35, 0x404001e5, 0x34, 0x31, 0x39, 0x36, 0x35, 0x32, 0x38, 0x35, 0x30, 0x40400099, 0x4040039c, 0x31, 0x38, 0x36, 0x33, 0x404001be, 0x34, 0x40800154, 0x32, 0x30, 0x33, 0x39, 0x4040058b, 0x34, 0x35, 0x404002bc, 0x32, 0x33, 0x37, 0x4040042c, 0x36, 0x40400510, 0x35, 0x36, 0x40400638, 0x37, 0x31, 0x39, 0x31, 0x37, 0x32, 0x38, 0x40400171, 0x37, 0x36, 0x34, 0x36, 0x35, 0x37, 0x35, 0x37, 0x33, 0x39, 0x40400101, 0x33, 0x38, 0x39, 0x40400748, 0x38, 0x33, 0x32, 0x36, 0x34, 0x35, 0x39, 0x39, 0x35, 0x38, 0x404006a7, 0x30, 0x34, 0x37, 0x38, 0x404001de, 0x40400328, 0x39, 0x4040002d, 0x36, 0x34, 0x30, 0x37, 0x38, 0x39, 0x35, 0x31, 0x4040008e, 0x36, 0x38, 0x33, 0x4040012f, 0x32, 0x35, 0x39, 0x35, 0x37, 0x30, 0x40400468, 0x38, 0x32, 0x32, 0x404002c8, 0x32, 0x4040061b, 0x34, 0x30, 0x37, 0x37, 0x32, 0x36, 0x37, 0x31, 0x39, 0x34, 0x37, 0x38, 0x40400319, 0x38, 0x32, 0x36, 0x30, 0x31, 0x34, 0x37, 0x36, 0x39, 0x39, 0x30, 0x39, 0x404004e8, 0x30, 0x31, 0x33, 0x36, 0x33, 0x39, 0x34, 0x34, 0x33, 0x4040027f, 0x33, 0x30, 0x40400105, 0x32, 0x30, 0x33, 0x34, 0x39, 0x36, 0x32, 0x35, 0x32, 0x34, 0x35, 0x31, 0x37, 0x404003b5, 0x39, 0x36, 0x35, 0x31, 0x34, 0x33, 0x31, 0x34, 0x32, 0x39, 0x38, 0x30, 0x39, 0x31, 0x39, 0x30, 0x36, 0x35, 0x39, 0x32, 0x40400282, 0x37, 0x32, 0x32, 0x31, 0x36, 0x39, 0x36, 0x34, 0x36, 0x40400419, 0x4040007a, 0x35, 0x4040050e, 0x34, 0x40800565, 0x38, 0x40400559, 0x39, 0x37, 0x4040057b, 0x35, 0x34, 0x4040049d, 0x4040023e, 0x37, 0x4040065a, 0x38, 0x34, 0x36, 0x38, 0x31, 0x33, 0x4040008c, 0x36, 0x38, 0x33, 0x38, 0x36, 0x38, 0x39, 0x34, 0x32, 0x37, 0x37, 0x34, 0x31, 0x35, 0x35, 0x39, 0x39, 0x31, 0x38, 0x35, 0x4040005a, 0x32, 0x34, 0x35, 0x39, 0x35, 0x33, 0x39, 0x35, 0x39, 0x34, 0x33, 0x31, 0x404005b7, 0x37, 0x40400012, 0x36, 0x38, 0x30, 0x38, 0x34, 0x35, 0x404002e7, 0x37, 0x33, 0x4040081e, 0x39, 0x35, 0x38, 0x34, 0x38, 0x36, 0x35, 0x33, 0x38, 0x404006e8, 0x36, 0x32, 0x404000f2, 0x36, 0x30, 0x39, 0x404004b6, 0x36, 0x30, 0x38, 0x30, 0x35, 0x31, 0x32, 0x34, 0x33, 0x38, 0x38, 0x34, 0x4040013a, 0x4040000b, 0x34, 0x31, 0x33, 0x4040030f, 0x37, 0x36, 0x32, 0x37, 0x38, 0x40400341, 0x37, 0x31, 0x35, 0x4040059b, 0x33, 0x35, 0x39, 0x39, 0x37, 0x37, 0x30, 0x30, 0x31, 0x32, 0x39, 0x40400472, 0x38, 0x39, 0x34, 0x34, 0x31, 0x40400277, 0x36, 0x38, 0x35, 0x35, 0x4040005f, 0x34, 0x30, 0x36, 0x33, 0x404008e6, 0x32, 0x30, 0x37, 0x32, 0x32, 0x40400158, 0x40800203, 0x34, 0x38, 0x31, 0x35, 0x38, 0x40400205, 0x404001fe, 0x4040027a, 0x40400298, 0x33, 0x39, 0x34, 0x35, 0x32, 0x32, 0x36, 0x37, 0x40c00496, 0x38, 0x4040058a, 0x32, 0x31, 0x404002ea, 0x32, 0x40400387, 0x35, 0x34, 0x36, 0x36, 0x36, 0x4040051b, 0x32, 0x33, 0x39, 0x38, 0x36, 0x34, 0x35, 0x36, 0x404004c4, 0x31, 0x36, 0x33, 0x35, 0x40800253, 0x40400811, 0x37, 0x404008ad, 0x39, 0x38, 0x4040045e, 0x39, 0x33, 0x36, 0x33, 0x34, 0x4040075b, 0x37, 0x34, 0x33, 0x32, 0x34, 0x4040047b, 0x31, 0x35, 0x30, 0x37, 0x36, 0x404004bb, 0x37, 0x39, 0x34, 0x35, 0x31, 0x30, 0x39, 0x4040003e, 0x30, 0x39, 0x34, 0x30, 0x404006a6, 0x38, 0x38, 0x37, 0x39, 0x37, 0x31, 0x30, 0x38, 0x39, 0x33, 0x404008f0, 0x36, 0x39, 0x31, 0x33, 0x36, 0x38, 0x36, 0x37, 0x32, 0x4040025b, 0x404001fe, 0x35, 0x4040053f, 0x40400468, 0x40400801, 0x31, 0x37, 0x39, 0x32, 0x38, 0x36, 0x38, 0x404008cc, 0x38, 0x37, 0x34, 0x37, 0x4080079e, 0x38, 0x32, 0x34, 0x4040097a, 0x38, 0x4040025b, 0x37, 0x31, 0x34, 0x39, 0x30, 0x39, 0x36, 0x37, 0x35, 0x39, 0x38, 0x404006ef, 0x33, 0x36, 0x35, 0x40400134, 0x38, 0x31, 0x4040005c, 0x40400745, 0x40400936, 0x36, 0x38, 0x32, 0x39, 0x4040057e, 0x38, 0x37, 0x32, 0x32, 0x36, 0x35, 0x38, 0x38, 0x30, 0x40400611, 0x35, 0x40400249, 0x34, 0x32, 0x37, 0x30, 0x34, 0x37, 0x37, 0x35, 0x35, 0x4040081e, 0x33, 0x37, 0x39, 0x36, 0x34, 0x31, 0x34, 0x35, 0x31, 0x35, 0x32, 0x404005fd, 0x32, 0x33, 0x34, 0x33, 0x36, 0x34, 0x35, 0x34, 0x404005de, 0x34, 0x34, 0x34, 0x37, 0x39, 0x35, 0x4040003c, 0x40400523, 0x408008e6, 0x34, 0x31, 0x4040052a, 0x33, 0x40400304, 0x35, 0x32, 0x33, 0x31, 0x40800841, 0x31, 0x36, 0x36, 0x31, 0x404008b2, 0x35, 0x39, 0x36, 0x39, 0x35, 0x33, 0x36, 0x32, 0x33, 0x31, 0x34, 0x404005ff, 0x32, 0x34, 0x38, 0x34, 0x39, 0x33, 0x37, 0x31, 0x38, 0x37, 0x31, 0x31, 0x30, 0x31, 0x34, 0x35, 0x37, 0x36, 0x35, 0x34, 0x40400761, 0x30, 0x32, 0x37, 0x39, 0x39, 0x33, 0x34, 0x34, 0x30, 0x33, 0x37, 0x34, 0x32, 0x30, 0x30, 0x37, 0x4040093f, 0x37, 0x38, 0x35, 0x33, 0x39, 0x30, 0x36, 0x32, 0x31, 0x39, 0x40800299, 0x40400345, 0x38, 0x34, 0x37, 0x408003d2, 0x38, 0x33, 0x33, 0x32, 0x31, 0x34, 0x34, 0x35, 0x37, 0x31, 0x40400284, 0x40400776, 0x34, 0x33, 0x35, 0x30, 0x40400928, 0x40400468, 0x35, 0x33, 0x31, 0x39, 0x31, 0x30, 0x34, 0x38, 0x34, 0x38, 0x31, 0x30, 0x30, 0x35, 0x33, 0x37, 0x30, 0x36, 0x404008bc, 0x4080059d, 0x40800781, 0x31, 0x40400559, 0x37, 0x4040031b, 0x35, 0x404007ec, 0x4040040c, 0x36, 0x33, 0x408007dc, 0x34, 0x40400971, 0x4080034e, 0x408003f5, 0x38, 0x4080052d, 0x40800887, 0x39, 0x40400187, 0x39, 0x31, 0x404008ce, 0x38, 0x31, 0x34, 0x36, 0x37, 0x35, 0x31, 0x4040062b, 0x31, 0x32, 0x33, 0x39, 0x40c001a9, 0x39, 0x30, 0x37, 0x31, 0x38, 0x36, 0x34, 0x39, 0x34, 0x32, 0x33, 0x31, 0x39, 0x36, 0x31, 0x35, 0x36, 0x404001ec, 0x404006bc, 0x39, 0x35, 0x40400926, 0x40400469, 0x4040011b, 0x36, 0x30, 0x33, 0x38, 0x40400a25, 0x4040016f, 0x40400384, 0x36, 0x32, 0x4040045a, 0x35, 0x4040084c, 0x36, 0x33, 0x38, 0x39, 0x33, 0x37, 0x37, 0x38, 0x37, 0x404008c5, 0x404000f8, 0x39, 0x37, 0x39, 0x32, 0x30, 0x37, 0x37, 0x33, 0x404005d7, 0x32, 0x31, 0x38, 0x32, 0x35, 0x36, 0x404007df, 0x36, 0x36, 0x404006d6, 0x34, 0x32, 0x4080067e, 0x36, 0x404006e6, 0x34, 0x34, 0x40400024, 0x35, 0x34, 0x39, 0x32, 0x30, 0x32, 0x36, 0x30, 0x35, 0x40400ab3, 0x408003e4, 0x32, 0x30, 0x31, 0x34, 0x39, 0x404004d2, 0x38, 0x35, 0x30, 0x37, 0x33, 0x40400599, 0x36, 0x36, 0x36, 0x30, 0x40400194, 0x32, 0x34, 0x33, 0x34, 0x30, 0x40400087, 0x30, 0x4040076b, 0x38, 0x36, 0x33, 0x40400956, 0x404007e4, 0x4040042b, 0x40400174, 0x35, 0x37, 0x39, 0x36, 0x32, 0x36, 0x38, 0x35, 0x36, 0x40400140, 0x35, 0x30, 0x38, 0x40400523, 0x35, 0x38, 0x37, 0x39, 0x36, 0x39, 0x39, 0x40400711, 0x35, 0x37, 0x34, 0x40400a18, 0x38, 0x34, 0x30, 0x404008b3, 0x31, 0x34, 0x35, 0x39, 0x31, 0x4040078c, 0x37, 0x30, 0x40400234, 0x30, 0x31, 0x40400be7, 0x31, 0x32, 0x40400c74, 0x30, 0x404003c3, 0x33, 0x39, 0x40400b2a, 0x40400112, 0x37, 0x31, 0x35, 0x404003b0, 0x34, 0x32, 0x30, 0x40800bf2, 0x39, 0x40400bc2, 0x30, 0x37, 0x40400341, 0x40400795, 0x40400aaf, 0x40400c62, 0x32, 0x31, 0x40400960, 0x32, 0x35, 0x31, 0x4040057b, 0x40400944, 0x39, 0x32, 0x404001b2, 0x38, 0x32, 0x36, 0x40400b66, 0x32, 0x40400278, 0x33, 0x32, 0x31, 0x35, 0x37, 0x39, 0x31, 0x39, 0x38, 0x34, 0x31, 0x34, 0x4080087b, 0x39, 0x31, 0x36, 0x34, 0x408006e8, 0x39, 0x40800b58, 0x404008db, 0x37, 0x32, 0x32, 0x40400321, 0x35, 0x404008a4, 0x40400141, 0x39, 0x31, 0x30, 0x404000bc, 0x40400c5b, 0x35, 0x32, 0x38, 0x30, 0x31, 0x37, 0x40400231, 0x37, 0x31, 0x32, 0x40400914, 0x38, 0x33, 0x32, 0x40400373, 0x31, 0x40400589, 0x30, 0x39, 0x33, 0x35, 0x33, 0x39, 0x36, 0x35, 0x37, 0x4040064b, 0x31, 0x30, 0x38, 0x33, 0x40400069, 0x35, 0x31, 0x4040077a, 0x40400d5a, 0x31, 0x34, 0x34, 0x34, 0x32, 0x31, 0x30, 0x30, 0x40400202, 0x30, 0x33, 0x4040019c, 0x31, 0x31, 0x30, 0x33, 0x40400c81, 0x40400009, 0x40400026, 0x40c00602, 0x35, 0x31, 0x36, 0x404005d9, 0x40800883, 0x4040092a, 0x35, 0x40800c42, 0x38, 0x35, 0x31, 0x37, 0x31, 0x34, 0x33, 0x37, 0x40400605, 0x4040006d, 0x31, 0x35, 0x35, 0x36, 0x35, 0x30, 0x38, 0x38, 0x404003b9, 0x39, 0x38, 0x39, 0x38, 0x35, 0x39, 0x39, 0x38, 0x32, 0x33, 0x38, 0x404001cf, 0x404009ba, 0x33, 0x4040016c, 0x4040043e, 0x404009c3, 0x38, 0x40800e05, 0x33, 0x32, 0x40400107, 0x35, 0x40400305, 0x33, 0x404001ca, 0x39, 0x4040041b, 0x39, 0x38, 0x4040087d, 0x34, 0x40400cb8, 0x37, 0x4040064b, 0x30, 0x37, 0x404000e5, 0x34, 0x38, 0x31, 0x34, 0x31, 0x40400539, 0x38, 0x35, 0x39, 0x34, 0x36, 0x31, 0x40400bc9, 0x38, 0x30},
+ },
+ {
+ input: "testdata/huffman-rand-1k.in",
+ want: "testdata/huffman-rand-1k.%s.expect",
+ wantNoInput: "testdata/huffman-rand-1k.%s.expect-noinput",
+ tokens: []token{0xf8, 0x8b, 0x96, 0x76, 0x48, 0xd, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xd, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa, 0x64, 0xb, 0xe0, 0x23, 0x29, 0xbd, 0xf7, 0xe7, 0x83, 0x3c, 0xfb, 0xdf, 0xb3, 0xae, 0x4f, 0xa4, 0x47, 0x55, 0x99, 0xde, 0x2f, 0x96, 0x6e, 0x1c, 0x43, 0x4c, 0x87, 0xe2, 0x7c, 0xd9, 0x5f, 0x4c, 0x7c, 0xe8, 0x90, 0x3, 0xdb, 0x30, 0x95, 0xd6, 0x22, 0xc, 0x47, 0xb8, 0x4d, 0x6b, 0xbd, 0x24, 0x11, 0xab, 0x2c, 0xd7, 0xbe, 0x6e, 0x7a, 0xd6, 0x8, 0xa3, 0x98, 0xd8, 0xdd, 0x15, 0x6a, 0xfa, 0x93, 0x30, 0x1, 0x25, 0x1d, 0xa2, 0x74, 0x86, 0x4b, 0x6a, 0x95, 0xe8, 0xe1, 0x4e, 0xe, 0x76, 0xb9, 0x49, 0xa9, 0x5f, 0xa0, 0xa6, 0x63, 0x3c, 0x7e, 0x7e, 0x20, 0x13, 0x4f, 0xbb, 0x66, 0x92, 0xb8, 0x2e, 0xa4, 0xfa, 0x48, 0xcb, 0xae, 0xb9, 0x3c, 0xaf, 0xd3, 0x1f, 0xe1, 0xd5, 0x8d, 0x42, 0x6d, 0xf0, 0xfc, 0x8c, 0xc, 0x0, 0xde, 0x40, 0xab, 0x8b, 0x47, 0x97, 0x4e, 0xa8, 0xcf, 0x8e, 0xdb, 0xa6, 0x8b, 0x20, 0x9, 0x84, 0x7a, 0x66, 0xe5, 0x98, 0x29, 0x2, 0x95, 0xe6, 0x38, 0x32, 0x60, 0x3, 0xe3, 0x9a, 0x1e, 0x54, 0xe8, 0x63, 0x80, 0x48, 0x9c, 0xe7, 0x63, 0x33, 0x6e, 0xa0, 0x65, 0x83, 0xfa, 0xc6, 0xba, 0x7a, 0x43, 0x71, 0x5, 0xf5, 0x68, 0x69, 0x85, 0x9c, 0xba, 0x45, 0xcd, 0x6b, 0xb, 0x19, 0xd1, 0xbb, 0x7f, 0x70, 0x85, 0x92, 0xd1, 0xb4, 0x64, 0x82, 0xb1, 0xe4, 0x62, 0xc5, 0x3c, 0x46, 0x1f, 0x92, 0x31, 0x1c, 0x4e, 0x41, 0x77, 0xf7, 0xe7, 0x87, 0xa2, 0xf, 0x6e, 0xe8, 0x92, 0x3, 0x6b, 0xa, 0xe7, 0xa9, 0x3b, 0x11, 0xda, 0x66, 0x8a, 0x29, 0xda, 0x79, 0xe1, 0x64, 0x8d, 0xe3, 0x54, 0xd4, 0xf5, 0xef, 0x64, 0x87, 0x3b, 0xf4, 0xc2, 0xf4, 0x71, 0x13, 0xa9, 0xe9, 0xe0, 0xa2, 0x6, 0x14, 0xab, 0x5d, 0xa7, 0x96, 0x0, 0xd6, 0xc3, 0xcc, 0x57, 0xed, 0x39, 0x6a, 0x25, 0xcd, 0x76, 0xea, 0xba, 0x3a, 0xf2, 0xa1, 0x95, 0x5d, 0xe5, 0x71, 0xcf, 0x9c, 0x62, 0x9e, 0x6a, 0xfa, 0xd5, 0x31, 0xd1, 0xa8, 0x66, 0x30, 0x33, 0xaa, 0x51, 0x17, 0x13, 0x82, 0x99, 0xc8, 0x14, 0x60, 0x9f, 0x4d, 0x32, 0x6d, 0xda, 0x19, 0x26, 0x21, 0xdc, 0x7e, 0x2e, 0x25, 0x67, 0x72, 0xca, 0xf, 0x92, 0xcd, 0xf6, 0xd6, 0xcb, 0x97, 0x8a, 0x33, 0x58, 0x73, 0x70, 0x91, 0x1d, 0xbf, 0x28, 0x23, 0xa3, 0xc, 0xf1, 0x83, 0xc3, 0xc8, 0x56, 0x77, 0x68, 0xe3, 0x82, 0xba, 0xb9, 0x57, 0x56, 0x57, 0x9c, 0xc3, 0xd6, 0x14, 0x5, 0x3c, 0xb1, 0xaf, 0x93, 0xc8, 0x8a, 0x57, 0x7f, 0x53, 0xfa, 0x2f, 0xaa, 0x6e, 0x66, 0x83, 0xfa, 0x33, 0xd1, 0x21, 0xab, 0x1b, 0x71, 0xb4, 0x7c, 0xda, 0xfd, 0xfb, 0x7f, 0x20, 0xab, 0x5e, 0xd5, 0xca, 0xfd, 0xdd, 0xe0, 0xee, 0xda, 0xba, 0xa8, 0x27, 0x99, 0x97, 0x69, 0xc1, 0x3c, 0x82, 0x8c, 0xa, 0x5c, 0x2d, 0x5b, 0x88, 0x3e, 0x34, 0x35, 0x86, 0x37, 0x46, 0x79, 0xe1, 0xaa, 0x19, 0xfb, 0xaa, 0xde, 0x15, 0x9, 0xd, 0x1a, 0x57, 0xff, 0xb5, 0xf, 0xf3, 0x2b, 0x5a, 0x6a, 0x4d, 0x19, 0x77, 0x71, 0x45, 0xdf, 0x4f, 0xb3, 0xec, 0xf1, 0xeb, 0x18, 0x53, 0x3e, 0x3b, 0x47, 0x8, 0x9a, 0x73, 0xa0, 0x5c, 0x8c, 0x5f, 0xeb, 0xf, 0x3a, 0xc2, 0x43, 0x67, 0xb4, 0x66, 0x67, 0x80, 0x58, 0xe, 0xc1, 0xec, 0x40, 0xd4, 0x22, 0x94, 0xca, 0xf9, 0xe8, 0x92, 0xe4, 0x69, 0x38, 0xbe, 0x67, 0x64, 0xca, 0x50, 0xc7, 0x6, 0x67, 0x42, 0x6e, 0xa3, 0xf0, 0xb7, 0x6c, 0xf2, 0xe8, 0x5f, 0xb1, 0xaf, 0xe7, 0xdb, 0xbb, 0x77, 0xb5, 0xf8, 0xcb, 0x8, 0xc4, 0x75, 0x7e, 0xc0, 0xf9, 0x1c, 0x7f, 0x3c, 0x89, 0x2f, 0xd2, 0x58, 0x3a, 0xe2, 0xf8, 0x91, 0xb6, 0x7b, 0x24, 0x27, 0xe9, 0xae, 0x84, 0x8b, 0xde, 0x74, 0xac, 0xfd, 0xd9, 0xb7, 0x69, 0x2a, 0xec, 0x32, 0x6f, 0xf0, 0x92, 0x84, 0xf1, 0x40, 0xc, 0x8a, 0xbc, 0x39, 0x6e, 0x2e, 0x73, 0xd4, 0x6e, 0x8a, 0x74, 0x2a, 0xdc, 0x60, 0x1f, 0xa3, 0x7, 0xde, 0x75, 0x8b, 0x74, 0xc8, 0xfe, 0x63, 0x75, 0xf6, 0x3d, 0x63, 0xac, 0x33, 0x89, 0xc3, 0xf0, 0xf8, 0x2d, 0x6b, 0xb4, 0x9e, 0x74, 0x8b, 0x5c, 0x33, 0xb4, 0xca, 0xa8, 0xe4, 0x99, 0xb6, 0x90, 0xa1, 0xef, 0xf, 0xd3, 0x61, 0xb2, 0xc6, 0x1a, 0x94, 0x7c, 0x44, 0x55, 0xf4, 0x45, 0xff, 0x9e, 0xa5, 0x5a, 0xc6, 0xa0, 0xe8, 0x2a, 0xc1, 0x8d, 0x6f, 0x34, 0x11, 0xb9, 0xbe, 0x4e, 0xd9, 0x87, 0x97, 0x73, 0xcf, 0x3d, 0x23, 0xae, 0xd5, 0x1a, 0x5e, 0xae, 0x5d, 0x6a, 0x3, 0xf9, 0x22, 0xd, 0x10, 0xd9, 0x47, 0x69, 0x15, 0x3f, 0xee, 0x52, 0xa3, 0x8, 0xd2, 0x3c, 0x51, 0xf4, 0xf8, 0x9d, 0xe4, 0x98, 0x89, 0xc8, 0x67, 0x39, 0xd5, 0x5e, 0x35, 0x78, 0x27, 0xe8, 0x3c, 0x80, 0xae, 0x79, 0x71, 0xd2, 0x93, 0xf4, 0xaa, 0x51, 0x12, 0x1c, 0x4b, 0x1b, 0xe5, 0x6e, 0x15, 0x6f, 0xe4, 0xbb, 0x51, 0x9b, 0x45, 0x9f, 0xf9, 0xc4, 0x8c, 0x2a, 0xfb, 0x1a, 0xdf, 0x55, 0xd3, 0x48, 0x93, 0x27, 0x1, 0x26, 0xc2, 0x6b, 0x55, 0x6d, 0xa2, 0xfb, 0x84, 0x8b, 0xc9, 0x9e, 0x28, 0xc2, 0xef, 0x1a, 0x24, 0xec, 0x9b, 0xae, 0xbd, 0x60, 0xe9, 0x15, 0x35, 0xee, 0x42, 0xa4, 0x33, 0x5b, 0xfa, 0xf, 0xb6, 0xf7, 0x1, 0xa6, 0x2, 0x4c, 0xca, 0x90, 0x58, 0x3a, 0x96, 0x41, 0xe7, 0xcb, 0x9, 0x8c, 0xdb, 0x85, 0x4d, 0xa8, 0x89, 0xf3, 0xb5, 0x8e, 0xfd, 0x75, 0x5b, 0x4f, 0xed, 0xde, 0x3f, 0xeb, 0x38, 0xa3, 0xbe, 0xb0, 0x73, 0xfc, 0xb8, 0x54, 0xf7, 0x4c, 0x30, 0x67, 0x2e, 0x38, 0xa2, 0x54, 0x18, 0xba, 0x8, 0xbf, 0xf2, 0x39, 0xd5, 0xfe, 0xa5, 0x41, 0xc6, 0x66, 0x66, 0xba, 0x81, 0xef, 0x67, 0xe4, 0xe6, 0x3c, 0xc, 0xca, 0xa4, 0xa, 0x79, 0xb3, 0x57, 0x8b, 0x8a, 0x75, 0x98, 0x18, 0x42, 0x2f, 0x29, 0xa3, 0x82, 0xef, 0x9f, 0x86, 0x6, 0x23, 0xe1, 0x75, 0xfa, 0x8, 0xb1, 0xde, 0x17, 0x4a},
+ },
+ {
+ input: "testdata/huffman-rand-limit.in",
+ want: "testdata/huffman-rand-limit.%s.expect",
+ wantNoInput: "testdata/huffman-rand-limit.%s.expect-noinput",
+ tokens: []token{0x61, 0x51c00000, 0xa, 0xf8, 0x8b, 0x96, 0x76, 0x48, 0xa, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xa, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa},
+ },
+ {
+ input: "testdata/huffman-shifts.in",
+ want: "testdata/huffman-shifts.%s.expect",
+ wantNoInput: "testdata/huffman-shifts.%s.expect-noinput",
+ tokens: []token{0x31, 0x30, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x52400001, 0xd, 0xa, 0x32, 0x33, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7f400001},
+ },
+ {
+ input: "testdata/huffman-text-shift.in",
+ want: "testdata/huffman-text-shift.%s.expect",
+ wantNoInput: "testdata/huffman-text-shift.%s.expect-noinput",
+ tokens: []token{0x2f, 0x2f, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x32, 0x30, 0x30, 0x39, 0x54, 0x68, 0x47, 0x6f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x6c, 0x40800016, 0x72, 0x72, 0x76, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x55, 0x6f, 0x66, 0x74, 0x68, 0x69, 0x6f, 0x75, 0x72, 0x63, 0x63, 0x6f, 0x64, 0x69, 0x67, 0x6f, 0x76, 0x72, 0x6e, 0x64, 0x62, 0x79, 0x42, 0x53, 0x44, 0x2d, 0x74, 0x79, 0x6c, 0x40400020, 0x6c, 0x69, 0x63, 0x6e, 0x74, 0x68, 0x74, 0x63, 0x6e, 0x62, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x74, 0x68, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x66, 0x69, 0x6c, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x63, 0x6b, 0x67, 0x6d, 0x69, 0x6e, 0x4040000a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x6f, 0x22, 0x4040000c, 0x66, 0x75, 0x6e, 0x63, 0x6d, 0x69, 0x6e, 0x28, 0x29, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x72, 0x62, 0x3d, 0x6d, 0x6b, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x2c, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x5f, 0x3a, 0x3d, 0x6f, 0x2e, 0x43, 0x72, 0x74, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x40800021, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x58, 0x78, 0x79, 0x7a, 0x21, 0x22, 0x23, 0xc2, 0xa4, 0x25, 0x26, 0x2f, 0x3f, 0x22},
+ },
+ {
+ input: "testdata/huffman-text.in",
+ want: "testdata/huffman-text.%s.expect",
+ wantNoInput: "testdata/huffman-text.%s.expect-noinput",
+ tokens: []token{0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x39, 0x20, 0x54, 0x68, 0x65, 0x20, 0x47, 0x6f, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x73, 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x4080001e, 0x73, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x20, 0x55, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x73, 0x20, 0x67, 0x6f, 0x76, 0x65, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, 0x20, 0x42, 0x53, 0x44, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x40800036, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x4040000f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x22, 0x6f, 0x73, 0x22, 0x4040000e, 0x66, 0x75, 0x6e, 0x63, 0x4080001b, 0x28, 0x29, 0x20, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x61, 0x72, 0x20, 0x62, 0x20, 0x3d, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x65, 0x2c, 0x20, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x20, 0x5f, 0x20, 0x3a, 0x3d, 0x20, 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x61, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x61, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x4080002a, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa},
+ },
+ {
+ input: "testdata/huffman-zero.in",
+ want: "testdata/huffman-zero.%s.expect",
+ wantNoInput: "testdata/huffman-zero.%s.expect-noinput",
+ tokens: []token{0x30, ml, 0x4b800000},
+ },
+ {
+ input: "",
+ want: "",
+ wantNoInput: "testdata/null-long-match.%s.expect-noinput",
+ tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x41400000},
+ },
+}
+
+// TestWriteBlock tests if the writeBlock encoding has changed.
+// To update the reference files use the "-update" flag on the test.
+func TestWriteBlock(t *testing.T) {
+ for _, test := range writeBlockTests {
+ testBlock(t, test, "wb")
+ }
+}
+
+// TestWriteBlockDynamic tests if the writeBlockDynamic encoding has changed.
+// To update the reference files use the "-update" flag on the test.
+func TestWriteBlockDynamic(t *testing.T) {
+ for _, test := range writeBlockTests {
+ testBlock(t, test, "dyn")
+ }
+}
+
+// TestWriteBlockDynamic tests if the writeBlockDynamic encoding has changed.
+// To update the reference files use the "-update" flag on the test.
+func TestWriteBlockDynamicSync(t *testing.T) {
+ for _, test := range writeBlockTests {
+ testBlock(t, test, "sync")
+ }
+}
+
+// testBlock tests a block against its references,
+// or regenerate the references, if "-update" flag is set.
+func testBlock(t *testing.T, test huffTest, ttype string) {
+ if test.want != "" {
+ test.want = fmt.Sprintf(test.want, ttype)
+ }
+ const gotSuffix = ".got"
+ test.wantNoInput = fmt.Sprintf(test.wantNoInput, ttype)
+ tokens := indexTokens(test.tokens)
+ if *update {
+ if test.input != "" {
+ t.Logf("Updating %q", test.want)
+ input, err := os.ReadFile(test.input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ f, err := os.Create(test.want)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer f.Close()
+ bw := newHuffmanBitWriter(f)
+ writeToType(t, ttype, bw, tokens, input)
+ }
+
+ t.Logf("Updating %q", test.wantNoInput)
+ f, err := os.Create(test.wantNoInput)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer f.Close()
+ bw := newHuffmanBitWriter(f)
+ writeToType(t, ttype, bw, tokens, nil)
+ return
+ }
+
+ if test.input != "" {
+ t.Logf("Testing %q", test.want)
+ input, err := os.ReadFile(test.input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ want, err := os.ReadFile(test.want)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ var buf bytes.Buffer
+ bw := newHuffmanBitWriter(&buf)
+ writeToType(t, ttype, bw, tokens, input)
+
+ got := buf.Bytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+gotSuffix)
+ if err := os.WriteFile(test.want+gotSuffix, got, 0666); err != nil {
+ t.Error(err)
+ }
+ }
+ t.Log("Output ok")
+
+ // Test if the writer produces the same output after reset.
+ buf.Reset()
+ bw.reset(&buf)
+ writeToType(t, ttype, bw, tokens, input)
+ bw.flush()
+ got = buf.Bytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("reset: writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".reset"+gotSuffix)
+ if err := os.WriteFile(test.want+".reset"+gotSuffix, got, 0666); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+ t.Log("Reset ok")
+ testWriterEOF(t, "wb", test, true)
+ }
+ t.Logf("Testing %q", test.wantNoInput)
+ wantNI, err := os.ReadFile(test.wantNoInput)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ var buf bytes.Buffer
+ bw := newHuffmanBitWriter(&buf)
+ writeToType(t, ttype, bw, tokens, nil)
+
+ got := buf.Bytes()
+ if !bytes.Equal(got, wantNI) {
+ t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.wantNoInput, test.wantNoInput+gotSuffix)
+ if err := os.WriteFile(test.wantNoInput+gotSuffix, got, 0666); err != nil {
+ t.Error(err)
+ }
+ } else if got[0]&1 == 1 {
+ t.Error("got unexpected EOF")
+ return
+ }
+
+ t.Log("Output ok")
+
+ // Test if the writer produces the same output after reset.
+ buf.Reset()
+ bw.reset(&buf)
+ writeToType(t, ttype, bw, tokens, nil)
+ bw.flush()
+ got = buf.Bytes()
+ if !bytes.Equal(got, wantNI) {
+ t.Errorf("reset: writeBlock did not yield expected result for file %q without input. See %q", test.wantNoInput, test.wantNoInput+".reset"+gotSuffix)
+ if err := os.WriteFile(test.wantNoInput+".reset"+gotSuffix, got, 0666); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+ t.Log("Reset ok")
+ testWriterEOF(t, "wb", test, false)
+}
+
+func writeToType(t *testing.T, ttype string, bw *huffmanBitWriter, tok tokens, input []byte) {
+ switch ttype {
+ case "wb":
+ bw.writeBlock(&tok, false, input)
+ case "dyn":
+ bw.writeBlockDynamic(&tok, false, input, false)
+ case "sync":
+ bw.writeBlockDynamic(&tok, false, input, true)
+ default:
+ panic("unknown test type")
+ }
+
+ if bw.err != nil {
+ t.Error(bw.err)
+ return
+ }
+
+ bw.flush()
+ if bw.err != nil {
+ t.Error(bw.err)
+ return
+ }
+}
+
+// testWriterEOF tests if the written block contains an EOF marker.
+func testWriterEOF(t *testing.T, ttype string, test huffTest, useInput bool) {
+ if useInput && test.input == "" {
+ return
+ }
+ var input []byte
+ if useInput {
+ var err error
+ input, err = os.ReadFile(test.input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ }
+ var buf bytes.Buffer
+ bw := newHuffmanBitWriter(&buf)
+ tokens := indexTokens(test.tokens)
+ switch ttype {
+ case "wb":
+ bw.writeBlock(&tokens, true, input)
+ case "dyn":
+ bw.writeBlockDynamic(&tokens, true, input, true)
+ case "huff":
+ bw.writeBlockHuff(true, input, true)
+ default:
+ panic("unknown test type")
+ }
+ if bw.err != nil {
+ t.Error(bw.err)
+ return
+ }
+
+ bw.flush()
+ if bw.err != nil {
+ t.Error(bw.err)
+ return
+ }
+ b := buf.Bytes()
+ if len(b) == 0 {
+ t.Error("no output received")
+ return
+ }
+ if b[0]&1 != 1 {
+ t.Errorf("block not marked with EOF for input %q", test.input)
+ return
+ }
+ t.Log("EOF ok")
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 0000000000..be7b58b473
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,417 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "math/bits"
+)
+
+const (
+ maxBitsLimit = 16
+ // number of valid literals
+ literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode uint32
+
+func (h hcode) len() uint8 {
+ return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+ return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+ return h == 0
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ bitCount [17]int32
+
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ freqcache [literalCount + 1]literalNode
+}
+
+type literalNode struct {
+ literal uint16
+ freq uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint8) {
+ *h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+ return hcode(length) | (hcode(code) << 8)
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ // Make capacity to next power of two.
+ c := uint(bits.Len32(uint32(size - 1)))
+ return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(literalCount)
+ codes := h.codes
+ var ch uint16
+ for ch = 0; ch < literalCount; ch++ {
+ var bits uint16
+ var size uint8
+ switch {
+ case ch < 144:
+ // size 8, 000110000 .. 10111111
+ bits = ch + 48
+ size = 8
+ case ch < 256:
+ // size 9, 110010000 .. 111111111
+ bits = ch + 400 - 144
+ size = 9
+ case ch < 280:
+ // size 7, 0000000 .. 0010111
+ bits = ch - 256
+ size = 7
+ default:
+ // size 8, 11000000 .. 11000111
+ bits = ch + 192 - 280
+ size = 8
+ }
+ codes[ch] = newhcode(reverseBits(bits, size), size)
+ }
+ return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(30)
+ codes := h.codes
+ for ch := range codes {
+ codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
+ }
+ return h
+}
+
+var fixedLiteralEncoding = generateFixedLiteralEncoding()
+var fixedOffsetEncoding = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []uint16) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ total += int(f) * int(h.codes[i].len())
+ }
+ }
+ return total
+}
+
+func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
+ var total int
+ for _, f := range b {
+ total += int(h.codes[f].len())
+ }
+ return total
+}
+
+// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
+func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ code := h.codes[i]
+ if code.zero() {
+ return math.MaxInt32
+ }
+ total += int(f) * int(code.len())
+ }
+ }
+ return total
+}
+
+// Return the number of literals assigned to each bit size in the Huffman encoding
+//
+// This method is only called when list.length >= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list An array of the literals with non-zero frequencies
+//
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+//
+// maxBits The maximum number of bits that should be used to encode any literal.
+//
+// Must be less than 16.
+//
+// return An integer array in which array[i] indicates the number of literals
+//
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ // Descending to only have 1 bounds check.
+ l2f := int32(list[2].freq)
+ l1f := int32(list[1].freq)
+ l0f := int32(list[0].freq) + int32(list[1].freq)
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: l1f,
+ nextCharFreq: l2f,
+ nextPairFreq: l0f,
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := uint32(maxBits)
+ for level < 16 {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ e := list[n]
+ if e.literal < math.MaxUint16 {
+ l.nextCharFreq = int32(e.freq)
+ } else {
+ l.nextCharFreq = math.MaxInt32
+ }
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ if true {
+ save := leafCounts[level][level]
+ leafCounts[level] = leafCounts[level-1]
+ leafCounts[level][level] = save
+ } else {
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ }
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ sortByLiteral(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+ list := h.freqcache[:len(freq)+1]
+ codes := h.codes[:len(freq)]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ codes[i] = 0
+ }
+ }
+ list[count] = literalNode{}
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ sortByFreq(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+// atLeastOne clamps the result between 1 and 15.
+func atLeastOne(v float32) float32 {
+ if v < 1 {
+ return 1
+ }
+ if v > 15 {
+ return 15
+ }
+ return v
+}
+
+func histogram(b []byte, h []uint16) {
+ if true && len(b) >= 8<<10 {
+ // Split for bigger inputs
+ histogramSplit(b, h)
+ } else {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+ }
+}
+
+func histogramSplit(b []byte, h []uint16) {
+ // Tested, and slightly faster than 2-way.
+ // Writing to separate arrays and combining is also slightly slower.
+ h = h[:256]
+ for len(b)&3 != 0 {
+ h[b[0]]++
+ b = b[1:]
+ }
+ n := len(b) / 4
+ x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+ y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+ for i, t := range x {
+ v0 := &h[t]
+ v1 := &h[y[i]]
+ v3 := &h[w[i]]
+ v2 := &h[z[i]]
+ *v0++
+ *v1++
+ *v2++
+ *v3++
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 0000000000..6c05ba8c1c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+ n := len(data)
+ quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivotByFreq(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSortByFreq(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSortByFreq(data, mhi, b)
+ } else {
+ quickSortByFreq(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSortByFreq(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSortByFreq(data, a, b)
+ }
+}
+
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+ medianOfThreeSortByFreq(data, m, m-s, m+s)
+ medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+ }
+ for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+ }
+ for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 0000000000..93f1aea109
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+ n := len(data)
+ quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort(data, mhi, b)
+ } else {
+ quickSort(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].literal < data[i-6].literal {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSort(data, a, b)
+ }
+}
+func heapSort(data []literalNode, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDown(data, lo, i, first)
+ }
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+ child++
+ }
+ if data[first+root].literal > data[first+child].literal {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree(data, lo, lo+s, lo+2*s)
+ medianOfThree(data, m, m-s, m+s)
+ medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data[a].literal < data[pivot].literal; a++ {
+ }
+ b := a
+ for {
+ for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+ }
+ for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].literal > data[pivot].literal { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+ }
+ for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+ var depth int
+ for i := n; i > 0; i >>= 1 {
+ depth++
+ }
+ return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].literal < data[m1].literal {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 0000000000..414c0bea9f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,793 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+ "bufio"
+ "compress/flate"
+ "fmt"
+ "io"
+ "math/bits"
+ "sync"
+)
+
+const (
+ maxCodeLen = 16 // max length of Huffman code
+ maxCodeLenMask = 15 // mask for max length of Huffman code
+ // The next three numbers come from the RFC section 3.2.7, with the
+ // additional proviso in section 3.2.5 which implies that distance codes
+ // 30 and 31 should never occur in compressed data.
+ maxNumLit = 286
+ maxNumDist = 30
+ numCodes = 19 // number of codes in Huffman meta-code
+
+ debugDecode = false
+)
+
+// Value of length - 3 and extra bits.
+type lengthExtra struct {
+ length, extra uint8
+}
+
+var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError = flate.CorruptInputError
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError = flate.ReadError
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError = flate.WriteError
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+// http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ maxRead int // the maximum number of bits we can read and not overread
+ chunks *[huffmanNumChunks]uint16 // chunks as described above
+ links [][]uint16 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.chunks == nil {
+ h.chunks = &[huffmanNumChunks]uint16{}
+ }
+ if h.maxRead != 0 {
+ *h = huffmanDecoder{chunks: h.chunks, links: h.links}
+ }
+
+ // Count number of codes of each length,
+ // compute maxRead and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n&maxCodeLenMask]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i&maxCodeLenMask] = code
+ code += count[i&maxCodeLenMask]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1<<uint(max) && !(code == 1 && max == 1) {
+ if debugDecode {
+ fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
+ }
+ return false
+ }
+
+ h.maxRead = min
+ chunks := h.chunks[:]
+ for i := range chunks {
+ chunks[i] = 0
+ }
+
+ if max > huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ if cap(h.links) < huffmanNumChunks-link {
+ h.links = make([][]uint16, huffmanNumChunks-link)
+ } else {
+ h.links = h.links[:huffmanNumChunks-link]
+ }
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(bits.Reverse16(uint16(j)))
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
+ if cap(h.links[off]) < numLinks {
+ h.links[off] = make([]uint16, numLinks)
+ } else {
+ links := h.links[off][:0]
+ h.links[off] = links[:numLinks]
+ }
+ }
+ } else {
+ h.links = h.links[:0]
+ }
+
+ for i, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ code := nextcode[n]
+ nextcode[n]++
+ chunk := uint16(i<<huffmanValueShift | n)
+ reverse := int(bits.Reverse16(uint16(code)))
+ reverse >>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// The actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// Decompress state.
+type decompressor struct {
+ // Input source.
+ r Reader
+ roffset int64
+
+ // Huffman decoders for literal/length, distance.
+ h1, h2 huffmanDecoder
+
+ // Length arrays used to define Huffman codes.
+ bits *[maxNumLit + maxNumDist]int
+ codebits *[numCodes]int
+
+ // Output history, buffer.
+ dict dictDecoder
+
+ // Next step in the decompression,
+ // and decompression state.
+ step func(*decompressor)
+ stepState int
+ err error
+ toRead []byte
+ hl, hd *huffmanDecoder
+ copyLen int
+ copyDist int
+
+ // Temporary buffer (avoids repeated allocation).
+ buf [4]byte
+
+ // Input bits, in top of b.
+ b uint32
+
+ nb uint
+ final bool
+}
+
+func (f *decompressor) nextBlock() {
+ for f.nb < 1+2 {
+ if f.err = f.moreBits(); f.err != nil {
+ return
+ }
+ }
+ f.final = f.b&1 == 1
+ f.b >>= 1
+ typ := f.b & 3
+ f.b >>= 2
+ f.nb -= 1 + 2
+ switch typ {
+ case 0:
+ f.dataBlock()
+ if debugDecode {
+ fmt.Println("stored block")
+ }
+ case 1:
+ // compressed, fixed Huffman tables
+ f.hl = &fixedHuffmanDecoder
+ f.hd = nil
+ f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("predefinied huffman block")
+ }
+ case 2:
+ // compressed, dynamic Huffman tables
+ if f.err = f.readHuffman(); f.err != nil {
+ break
+ }
+ f.hl = &f.h1
+ f.hd = &f.h2
+ f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("dynamic huffman block")
+ }
+ default:
+ // 3 is reserved.
+ if debugDecode {
+ fmt.Println("reserved data block encountered")
+ }
+ f.err = CorruptInputError(f.roffset)
+ }
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+ for {
+ if len(f.toRead) > 0 {
+ n := copy(b, f.toRead)
+ f.toRead = f.toRead[n:]
+ if len(f.toRead) == 0 {
+ return n, f.err
+ }
+ return n, nil
+ }
+ if f.err != nil {
+ return 0, f.err
+ }
+ f.step(f)
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
+ }
+}
+
+// Support the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ flushed := false
+ for {
+ if len(f.toRead) > 0 {
+ n, err := w.Write(f.toRead)
+ total += int64(n)
+ if err != nil {
+ f.err = err
+ return total, err
+ }
+ if n != len(f.toRead) {
+ return total, io.ErrShortWrite
+ }
+ f.toRead = f.toRead[:0]
+ }
+ if f.err != nil && flushed {
+ if f.err == io.EOF {
+ return total, nil
+ }
+ return total, f.err
+ }
+ if f.err == nil {
+ f.step(f)
+ }
+ if len(f.toRead) == 0 && f.err != nil && !flushed {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ flushed = true
+ }
+ }
+}
+
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
+ return nil
+ }
+ return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+ // HLIT[5], HDIST[5], HCLEN[4].
+ for f.nb < 5+5+4 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ nlit := int(f.b&0x1F) + 257
+ if nlit > maxNumLit {
+ if debugDecode {
+ fmt.Println("nlit > maxNumLit", nlit)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ ndist := int(f.b&0x1F) + 1
+ if ndist > maxNumDist {
+ if debugDecode {
+ fmt.Println("ndist > maxNumDist", ndist)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ nclen := int(f.b&0xF) + 4
+ // numCodes is 19, so nclen is always valid.
+ f.b >>= 4
+ f.nb -= 5 + 5 + 4
+
+ // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+ for i := 0; i < nclen; i++ {
+ for f.nb < 3 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ f.codebits[codeOrder[i]] = int(f.b & 0x7)
+ f.b >>= 3
+ f.nb -= 3
+ }
+ for i := nclen; i < len(codeOrder); i++ {
+ f.codebits[codeOrder[i]] = 0
+ }
+ if !f.h1.init(f.codebits[0:]) {
+ if debugDecode {
+ fmt.Println("init codebits failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // HLIT + 257 code lengths, HDIST + 1 code lengths,
+ // using the code length Huffman code.
+ for i, n := 0, nlit+ndist; i < n; {
+ x, err := f.huffSym(&f.h1)
+ if err != nil {
+ return err
+ }
+ if x < 16 {
+ // Actual length.
+ f.bits[i] = x
+ i++
+ continue
+ }
+ // Repeat previous length or zero.
+ var rep int
+ var nb uint
+ var b int
+ switch x {
+ default:
+ return InternalError("unexpected length code")
+ case 16:
+ rep = 3
+ nb = 2
+ if i == 0 {
+ if debugDecode {
+ fmt.Println("i==0")
+ }
+ return CorruptInputError(f.roffset)
+ }
+ b = f.bits[i-1]
+ case 17:
+ rep = 3
+ nb = 3
+ b = 0
+ case 18:
+ rep = 11
+ nb = 7
+ b = 0
+ }
+ for f.nb < nb {
+ if err := f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits:", err)
+ }
+ return err
+ }
+ }
+ rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
+ f.b >>= nb & regSizeMaskUint32
+ f.nb -= nb
+ if i+rep > n {
+ if debugDecode {
+ fmt.Println("i+rep > n", i, rep, n)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ for j := 0; j < rep; j++ {
+ f.bits[i] = b
+ i++
+ }
+ }
+
+ if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ if debugDecode {
+ fmt.Println("init2 failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // As an optimization, we can initialize the maxRead bits to read at a time
+ // for the HLIT tree to the length of the EOB marker since we know that
+ // every block must terminate with one. This preserves the property that
+ // we never read any extra bytes after the end of the DEFLATE stream.
+ if f.h1.maxRead < f.bits[endBlockMarker] {
+ f.h1.maxRead = f.bits[endBlockMarker]
+ }
+ if !f.final {
+ // If not the final block, the smallest block possible is
+ // a predefined table, BTYPE=01, with a single EOB marker.
+ // This will take up 3 + 7 bits.
+ f.h1.maxRead += 10
+ }
+
+ return nil
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+ // Uncompressed.
+ // Discard current half-byte.
+ left := (f.nb) & 7
+ f.nb -= left
+ f.b >>= left
+
+ offBytes := f.nb >> 3
+ // Unfilled values will be overwritten.
+ f.buf[0] = uint8(f.b)
+ f.buf[1] = uint8(f.b >> 8)
+ f.buf[2] = uint8(f.b >> 16)
+ f.buf[3] = uint8(f.b >> 24)
+
+ f.roffset += int64(offBytes)
+ f.nb, f.b = 0, 0
+
+ // Length then ones-complement of length.
+ nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+ f.roffset += int64(nr)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+ n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+ nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+ if nn != ^n {
+ if debugDecode {
+ ncomp := ^n
+ fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ if n == 0 {
+ f.toRead = f.dict.readFlush()
+ f.finishBlock()
+ return
+ }
+
+ f.copyLen = int(n)
+ f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+ buf := f.dict.writeSlice()
+ if len(buf) > f.copyLen {
+ buf = buf[:f.copyLen]
+ }
+
+ cnt, err := io.ReadFull(f.r, buf)
+ f.roffset += int64(cnt)
+ f.copyLen -= cnt
+ f.dict.writeMark(cnt)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).copyData
+ return
+ }
+ f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+ if f.final {
+ if f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+ f.err = io.EOF
+ }
+ f.step = (*decompressor).nextBlock
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+ if e == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return e
+}
+
+func (f *decompressor) moreBits() error {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
+ f.nb += 8
+ return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(h.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ return 0, noEOF(err)
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := h.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return 0, f.err
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ return int(chunk >> huffmanValueShift), nil
+ }
+ }
+}
+
+func makeReader(r io.Reader) Reader {
+ if rr, ok := r.(Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+ fixedOnce.Do(func() {
+ // These come from the RFC section 3.2.6.
+ var bits [288]int
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ fixedHuffmanDecoder.init(bits[:])
+ })
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ h1: f.h1,
+ h2: f.h2,
+ dict: f.dict,
+ step: (*decompressor).nextBlock,
+ }
+ f.dict.init(maxMatchOffset, dict)
+ return nil
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, nil)
+ return &f
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, dict)
+ return &f
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 0000000000..61342b6b88
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,1283 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "math/bits"
+ "strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Buffer)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanBytesBuffer
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanBytesReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanBytesReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bufio.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanBufioReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanBufioReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*strings.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanGenericReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanGenericReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+func (f *decompressor) huffmanBlockDecoder() func() {
+ switch f.r.(type) {
+ case *bytes.Buffer:
+ return f.huffmanBytesBuffer
+ case *bytes.Reader:
+ return f.huffmanBytesReader
+ case *bufio.Reader:
+ return f.huffmanBufioReader
+ case *strings.Reader:
+ return f.huffmanStringsReader
+ case Reader:
+ return f.huffmanGenericReader
+ default:
+ return f.huffmanGenericReader
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_test.go b/vendor/github.com/klauspost/compress/flate/inflate_test.go
new file mode 100644
index 0000000000..aac29c6d2b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate_test.go
@@ -0,0 +1,281 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "crypto/rand"
+ "io"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestReset(t *testing.T) {
+ ss := []string{
+ "lorem ipsum izzle fo rizzle",
+ "the quick brown fox jumped over",
+ }
+
+ deflated := make([]bytes.Buffer, 2)
+ for i, s := range ss {
+ w, _ := NewWriter(&deflated[i], 1)
+ w.Write([]byte(s))
+ w.Close()
+ }
+
+ inflated := make([]bytes.Buffer, 2)
+
+ f := NewReader(&deflated[0])
+ io.Copy(&inflated[0], f)
+ f.(Resetter).Reset(&deflated[1], nil)
+ io.Copy(&inflated[1], f)
+ f.Close()
+
+ for i, s := range ss {
+ if s != inflated[i].String() {
+ t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s)
+ }
+ }
+}
+
+func TestReaderTruncated(t *testing.T) {
+ vectors := []struct{ input, output string }{
+ {"\x00", ""},
+ {"\x00\f", ""},
+ {"\x00\f\x00", ""},
+ {"\x00\f\x00\xf3\xff", ""},
+ {"\x00\f\x00\xf3\xffhello", "hello"},
+ {"\x00\f\x00\xf3\xffhello, world", "hello, world"},
+ {"\x02", ""},
+ {"\xf2H\xcd", "He"},
+ {"\xf2H͙0a\u0084\t", "Hel\x90\x90\x90\x90\x90"},
+ {"\xf2H͙0a\u0084\t\x00", "Hel\x90\x90\x90\x90\x90"},
+ }
+
+ for i, v := range vectors {
+ r := strings.NewReader(v.input)
+ zr := NewReader(r)
+ b, err := io.ReadAll(zr)
+ if err != io.ErrUnexpectedEOF {
+ t.Errorf("test %d, error mismatch: got %v, want io.ErrUnexpectedEOF", i, err)
+ }
+ if string(b) != v.output {
+ t.Errorf("test %d, output mismatch: got %q, want %q", i, b, v.output)
+ }
+ }
+}
+
+func TestResetDict(t *testing.T) {
+ dict := []byte("the lorem fox")
+ ss := []string{
+ "lorem ipsum izzle fo rizzle",
+ "the quick brown fox jumped over",
+ }
+
+ deflated := make([]bytes.Buffer, len(ss))
+ for i, s := range ss {
+ w, _ := NewWriterDict(&deflated[i], DefaultCompression, dict)
+ w.Write([]byte(s))
+ w.Close()
+ }
+
+ inflated := make([]bytes.Buffer, len(ss))
+
+ f := NewReader(nil)
+ for i := range inflated {
+ f.(Resetter).Reset(&deflated[i], dict)
+ io.Copy(&inflated[i], f)
+ }
+ f.Close()
+
+ for i, s := range ss {
+ if s != inflated[i].String() {
+ t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s)
+ }
+ }
+}
+
+// Tests ported from zlib/test/infcover.c
+type infTest struct {
+ hex string
+ id string
+ n int
+}
+
+var infTests = []infTest{
+ {"0 0 0 0 0", "invalid stored block lengths", 1},
+ {"3 0", "fixed", 0},
+ {"6", "invalid block type", 1},
+ {"1 1 0 fe ff 0", "stored", 0},
+ {"fc 0 0", "too many length or distance symbols", 1},
+ {"4 0 fe ff", "invalid code lengths set", 1},
+ {"4 0 24 49 0", "invalid bit length repeat", 1},
+ {"4 0 24 e9 ff ff", "invalid bit length repeat", 1},
+ {"4 0 24 e9 ff 6d", "invalid code -- missing end-of-block", 1},
+ {"4 80 49 92 24 49 92 24 71 ff ff 93 11 0", "invalid literal/lengths set", 1},
+ {"4 80 49 92 24 49 92 24 f b4 ff ff c3 84", "invalid distances set", 1},
+ {"4 c0 81 8 0 0 0 0 20 7f eb b 0 0", "invalid literal/length code", 1},
+ {"2 7e ff ff", "invalid distance code", 1},
+ {"c c0 81 0 0 0 0 0 90 ff 6b 4 0", "invalid distance too far back", 1},
+
+ // also trailer mismatch just in inflate()
+ {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 1", "incorrect data check", -1},
+ {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 1", "incorrect length check", -1},
+ {"5 c0 21 d 0 0 0 80 b0 fe 6d 2f 91 6c", "pull 17", 0},
+ {"5 e0 81 91 24 cb b2 2c 49 e2 f 2e 8b 9a 47 56 9f fb fe ec d2 ff 1f", "long code", 0},
+ {"ed c0 1 1 0 0 0 40 20 ff 57 1b 42 2c 4f", "length extra", 0},
+ {"ed cf c1 b1 2c 47 10 c4 30 fa 6f 35 1d 1 82 59 3d fb be 2e 2a fc f c", "long distance and extra", 0},
+ {"ed c0 81 0 0 0 0 80 a0 fd a9 17 a9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6", "window end", 0},
+}
+
+func TestInflate(t *testing.T) {
+ for _, test := range infTests {
+ hex := strings.Split(test.hex, " ")
+ data := make([]byte, len(hex))
+ for i, h := range hex {
+ b, _ := strconv.ParseInt(h, 16, 32)
+ data[i] = byte(b)
+ }
+ buf := bytes.NewReader(data)
+ r := NewReader(buf)
+
+ _, err := io.Copy(io.Discard, r)
+ if (test.n == 0 && err == nil) || (test.n != 0 && err != nil) {
+ t.Logf("%q: OK:", test.id)
+ t.Logf(" - got %v", err)
+ continue
+ }
+
+ if test.n == 0 && err != nil {
+ t.Errorf("%q: Expected no error, but got %v", test.id, err)
+ continue
+ }
+
+ if test.n != 0 && err == nil {
+ t.Errorf("%q:Expected an error, but got none", test.id)
+ continue
+ }
+ t.Fatal(test.n, err)
+ }
+
+ for _, test := range infOutTests {
+ hex := strings.Split(test.hex, " ")
+ data := make([]byte, len(hex))
+ for i, h := range hex {
+ b, _ := strconv.ParseInt(h, 16, 32)
+ data[i] = byte(b)
+ }
+ buf := bytes.NewReader(data)
+ r := NewReader(buf)
+
+ _, err := io.Copy(io.Discard, r)
+ if test.err == (err != nil) {
+ t.Logf("%q: OK:", test.id)
+ t.Logf(" - got %v", err)
+ continue
+ }
+
+ if test.err == false && err != nil {
+ t.Errorf("%q: Expected no error, but got %v", test.id, err)
+ continue
+ }
+
+ if test.err && err == nil {
+ t.Errorf("%q: Expected an error, but got none", test.id)
+ continue
+ }
+ t.Fatal(test.err, err)
+ }
+
+}
+
+// Tests ported from zlib/test/infcover.c
+// Since zlib inflate is push (writer) instead of pull (reader)
+// some of the window size tests have been removed, since they
+// are irrelevant.
+type infOutTest struct {
+ hex string
+ id string
+ step int
+ win int
+ length int
+ err bool
+}
+
+var infOutTests = []infOutTest{
+ {"2 8 20 80 0 3 0", "inflate_fast TYPE return", 0, -15, 258, false},
+ {"63 18 5 40 c 0", "window wrap", 3, -8, 300, false},
+ {"e5 e0 81 ad 6d cb b2 2c c9 01 1e 59 63 ae 7d ee fb 4d fd b5 35 41 68 ff 7f 0f 0 0 0", "fast length extra bits", 0, -8, 258, true},
+ {"25 fd 81 b5 6d 59 b6 6a 49 ea af 35 6 34 eb 8c b9 f6 b9 1e ef 67 49 50 fe ff ff 3f 0 0", "fast distance extra bits", 0, -8, 258, true},
+ {"3 7e 0 0 0 0 0", "fast invalid distance code", 0, -8, 258, true},
+ {"1b 7 0 0 0 0 0", "fast invalid literal/length code", 0, -8, 258, true},
+ {"d c7 1 ae eb 38 c 4 41 a0 87 72 de df fb 1f b8 36 b1 38 5d ff ff 0", "fast 2nd level codes and too far back", 0, -8, 258, true},
+ {"63 18 5 8c 10 8 0 0 0 0", "very common case", 0, -8, 259, false},
+ {"63 60 60 18 c9 0 8 18 18 18 26 c0 28 0 29 0 0 0", "contiguous and wrap around window", 6, -8, 259, false},
+ {"63 0 3 0 0 0 0 0", "copy direct from output", 0, -8, 259, false},
+ {"1f 8b 0 0", "bad gzip method", 0, 31, 0, true},
+ {"1f 8b 8 80", "bad gzip flags", 0, 31, 0, true},
+ {"77 85", "bad zlib method", 0, 15, 0, true},
+ {"78 9c", "bad zlib window size", 0, 8, 0, true},
+ {"1f 8b 8 1e 0 0 0 0 0 0 1 0 0 0 0 0 0", "bad header crc", 0, 47, 1, true},
+ {"1f 8b 8 2 0 0 0 0 0 0 1d 26 3 0 0 0 0 0 0 0 0 0", "check gzip length", 0, 47, 0, true},
+ {"78 90", "bad zlib header check", 0, 47, 0, true},
+ {"8 b8 0 0 0 1", "need dictionary", 0, 8, 0, true},
+ {"63 18 68 30 d0 0 0", "force split window update", 4, -8, 259, false},
+ {"3 0", "use fixed blocks", 0, -15, 1, false},
+ {"", "bad window size", 0, 1, 0, true},
+}
+
+func TestWriteTo(t *testing.T) {
+ input := make([]byte, 100000)
+ n, err := rand.Read(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != len(input) {
+ t.Fatal("did not fill buffer")
+ }
+ compressed := &bytes.Buffer{}
+ w, err := NewWriter(compressed, -2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = w.Write(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != len(input) {
+ t.Fatal("did not fill buffer")
+ }
+ w.Close()
+ buf := compressed.Bytes()
+
+ dec := NewReader(bytes.NewBuffer(buf))
+ // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure.
+ readall, err := io.ReadAll(io.NopCloser(dec))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(readall) != len(input) {
+ t.Fatal("did not decompress everything")
+ }
+
+ dec = NewReader(bytes.NewBuffer(buf))
+ wtbuf := &bytes.Buffer{}
+ written, err := dec.(io.WriterTo).WriteTo(wtbuf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if written != int64(len(input)) {
+ t.Error("Returned length did not match, expected", len(input), "got", written)
+ }
+ if wtbuf.Len() != len(input) {
+ t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len())
+ }
+ if !bytes.Equal(wtbuf.Bytes(), input) {
+ t.Fatal("output did not match input")
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 0000000000..703b9a89aa
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,241 @@
+package flate
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+ fastGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ candidate = e.table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hashLen(now, tableBits, hashBytes)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = now
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+ cv = now
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ var l = int32(4)
+ if false {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else {
+ // inlined:
+ a := src[s+4:]
+ b := src[t+4:]
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ l += int32(bits.TrailingZeros64(diff) >> 3)
+ break
+ }
+ l += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ if len(a) < 8 {
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ l++
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ // Save the match found
+ if false {
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ } else {
+ // Inlined...
+ xoffset := uint32(s - t - baseMatchOffset)
+ xlength := l
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ dst.extraHist[lengthCodes1[uint8(xl)]]++
+ dst.offHist[oc]++
+ dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ dst.n++
+ }
+ }
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hashLen(x, tableBits, hashBytes)
+ e.table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashLen(x, tableBits, hashBytes)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
+ cv = x >> 8
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 0000000000..876dfbe305
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,214 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+ fastGen
+ table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ // When should we start skipping if we haven't found matches in a long while.
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, bTableBits, hashBytes)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash]
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hashLen(now, bTableBits, hashBytes)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = now
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ cv = now
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every second hash in-between, but offset by 1.
+ for i := s - l + 2; i < s-5; i += 7 {
+ x := load6432(src, i)
+ nextHash := hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i}
+ // Skip one
+ x >>= 16
+ nextHash = hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+ // Skip one
+ x >>= 16
+ nextHash = hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hashLen(x, bTableBits, hashBytes)
+ prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
+ e.table[prevHash] = tableEntry{offset: o}
+ e.table[prevHash2] = tableEntry{offset: o + 1}
+ currHash := hashLen(x>>16, bTableBits, hashBytes)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+ cv = x >> 24
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 0000000000..7aa2b72a12
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,241 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+ fastGen
+ table [1 << 16]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ tableBits = 16
+ tableSize = 1 << tableBits
+ hashBytes = 5
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ }
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ e.table[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // Skip if too small.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 7
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ s = nextS
+ nextS = s + 1 + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash]
+ now := load6432(src, nextS)
+
+ // Safe offset distance until s + 4...
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if candidate.offset < minOffset {
+ cv = now
+ // Previous will also be invalid, we have nothing.
+ continue
+ }
+
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
+ break
+ }
+ // Both match and are valid, pick longest.
+ offset := s - (candidate.offset - e.cur)
+ o2 := s - (candidates.Prev.offset - e.cur)
+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+ if l2 > l1 {
+ candidate = candidates.Prev
+ }
+ break
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ }
+ cv = now
+ }
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+8) < len(src) && t > 0 {
+ cv = load6432(src, t)
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + t},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // Store every 5th hash in-between.
+ for i := s - l + 2; i < s-5; i += 6 {
+ nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + i}}
+ }
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s.
+ x := load6432(src, s-2)
+ prevHash := hashLen(x, tableBits, hashBytes)
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2},
+ }
+ x >>= 8
+ prevHash = hashLen(x, tableBits, hashBytes)
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1},
+ }
+ x >>= 8
+ currHash := hashLen(x, tableBits, hashBytes)
+ candidates := e.table[currHash]
+ cv = x
+ e.table[currHash] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+
+ if candidate.offset > minOffset {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Found a match...
+ continue
+ }
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Match at prev...
+ continue
+ }
+ }
+ cv = x >> 8
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 0000000000..23c08b325c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,221 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.bTable[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ e.bTable[nextHashL] = entry
+
+ t = lCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
+ // We got a long match. Use that.
+ break
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ lCandidate = e.bTable[hash7(next, tableBits)]
+
+ // If the next long is a candidate, check if we should use that instead...
+ lOff := nextS - (lCandidate.offset - e.cur)
+ if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+ if l2 > l1 {
+ s = nextS
+ t = lCandidate.offset - e.cur
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic("s-t")
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between
+ if true {
+ i := nextS
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+
+ i += 3
+ for ; i < s-1; i += 3 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ e.bTable[prevHashL] = tableEntry{offset: o}
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 0000000000..83ef50ba45
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,310 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ if l == 0 {
+ // Extend the 4-byte match as long as possible.
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Try to locate a better match by checking the end of best match...
+ if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if t2 >= 0 && off < maxMatchOffset && off > 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 0000000000..f1e9d98fa5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,325 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ // Repeat MUST be > 1 and within range
+ repeat := int32(1)
+ for {
+ const skipLog = 7
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ // Calculate hashes of 'next'
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Long candidate matches at least 4 bytes.
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check the previous long candidate as well.
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ // Current value did not match, but check if previous long value does.
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+
+ // Look up next long candidate (at nextS)
+ lCandidate = e.bTable[nextHashL]
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check repeat at s + repOff
+ const repOff = 1
+ t2 := s - repeat + repOff
+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ l = ml
+ s += repOff
+ // Not worth checking more.
+ break
+ }
+ }
+
+ // If the next long is a candidate, use that...
+ t2 = lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ // This is ok, but check previous as well.
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Try to locate a better match by checking the end-of-match...
+ if sAt := s + l; sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
+ // Test current
+ t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if off < maxMatchOffset {
+ if off > 0 && t2 >= 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ // Test next:
+ t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+ off := s2 - t2
+ if off > 0 && off < maxMatchOffset && t2 >= 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ repeat = s - t
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index after match end.
+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+ cv := load6432(src, i)
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+ }
+ goto emitRemainder
+ }
+
+ // Store every long hash in-between and every second short.
+ if true {
+ for i := nextS + 1; i < s-1; i += 2 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ cv = load6432(src, s)
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/reader_test.go b/vendor/github.com/klauspost/compress/flate/reader_test.go
new file mode 100644
index 0000000000..bc83c1f1d9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/reader_test.go
@@ -0,0 +1,106 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestNlitOutOfRange(t *testing.T) {
+ // Trying to decode this bogus flate data, which has a Huffman table
+ // with nlit=288, should not panic.
+ io.Copy(io.Discard, NewReader(strings.NewReader(
+ "\xfc\xfe\x36\xe7\x5e\x1c\xef\xb3\x55\x58\x77\xb6\x56\xb5\x43\xf4"+
+ "\x6f\xf2\xd2\xe6\x3d\x99\xa0\x85\x8c\x48\xeb\xf8\xda\x83\x04\x2a"+
+ "\x75\xc4\xf8\x0f\x12\x11\xb9\xb4\x4b\x09\xa0\xbe\x8b\x91\x4c")))
+}
+
+const (
+ digits = iota
+ twain
+ random
+)
+
+var testfiles = []string{
+ // Digits is the digits of the irrational number e. Its decimal representation
+ // does not repeat, but there are only 10 possible digits, so it should be
+ // reasonably compressible.
+ digits: "../testdata/e.txt",
+ // Twain is Project Gutenberg's edition of Mark Twain's classic English novel.
+ twain: "../testdata/Mark.Twain-Tom.Sawyer.txt",
+ // Random bytes
+ random: "../testdata/sharnd.out",
+}
+
+func benchmarkDecode(b *testing.B, testfile, level, n int) {
+ b.ReportAllocs()
+ b.StopTimer()
+ b.SetBytes(int64(n))
+ buf0, err := os.ReadFile(testfiles[testfile])
+ if err != nil {
+ b.Fatal(err)
+ }
+ if len(buf0) == 0 {
+ b.Fatalf("test file %q has no data", testfiles[testfile])
+ }
+ compressed := new(bytes.Buffer)
+ w, err := NewWriter(compressed, level)
+ if err != nil {
+ b.Fatal(err)
+ }
+ for i := 0; i < n; i += len(buf0) {
+ if len(buf0) > n-i {
+ buf0 = buf0[:n-i]
+ }
+ io.Copy(w, bytes.NewReader(buf0))
+ }
+ w.Close()
+ buf1 := compressed.Bytes()
+ buf0, compressed, w = nil, nil, nil
+ runtime.GC()
+ b.StartTimer()
+ r := NewReader(bytes.NewReader(buf1))
+ res := r.(Resetter)
+ for i := 0; i < b.N; i++ {
+ res.Reset(bytes.NewReader(buf1), nil)
+ io.Copy(io.Discard, r)
+ }
+}
+
+// These short names are so that gofmt doesn't break the BenchmarkXxx function
+// bodies below over multiple lines.
+const (
+ constant = ConstantCompression
+ speed = BestSpeed
+ default_ = DefaultCompression
+ compress = BestCompression
+)
+
+func BenchmarkDecodeDigitsSpeed1e4(b *testing.B) { benchmarkDecode(b, digits, speed, 1e4) }
+func BenchmarkDecodeDigitsSpeed1e5(b *testing.B) { benchmarkDecode(b, digits, speed, 1e5) }
+func BenchmarkDecodeDigitsSpeed1e6(b *testing.B) { benchmarkDecode(b, digits, speed, 1e6) }
+func BenchmarkDecodeDigitsDefault1e4(b *testing.B) { benchmarkDecode(b, digits, default_, 1e4) }
+func BenchmarkDecodeDigitsDefault1e5(b *testing.B) { benchmarkDecode(b, digits, default_, 1e5) }
+func BenchmarkDecodeDigitsDefault1e6(b *testing.B) { benchmarkDecode(b, digits, default_, 1e6) }
+func BenchmarkDecodeDigitsCompress1e4(b *testing.B) { benchmarkDecode(b, digits, compress, 1e4) }
+func BenchmarkDecodeDigitsCompress1e5(b *testing.B) { benchmarkDecode(b, digits, compress, 1e5) }
+func BenchmarkDecodeDigitsCompress1e6(b *testing.B) { benchmarkDecode(b, digits, compress, 1e6) }
+func BenchmarkDecodeTwainSpeed1e4(b *testing.B) { benchmarkDecode(b, twain, speed, 1e4) }
+func BenchmarkDecodeTwainSpeed1e5(b *testing.B) { benchmarkDecode(b, twain, speed, 1e5) }
+func BenchmarkDecodeTwainSpeed1e6(b *testing.B) { benchmarkDecode(b, twain, speed, 1e6) }
+func BenchmarkDecodeTwainDefault1e4(b *testing.B) { benchmarkDecode(b, twain, default_, 1e4) }
+func BenchmarkDecodeTwainDefault1e5(b *testing.B) { benchmarkDecode(b, twain, default_, 1e5) }
+func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain, default_, 1e6) }
+func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) }
+func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) }
+func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) }
+func BenchmarkDecodeRandomSpeed1e4(b *testing.B) { benchmarkDecode(b, random, speed, 1e4) }
+func BenchmarkDecodeRandomSpeed1e5(b *testing.B) { benchmarkDecode(b, random, speed, 1e5) }
+func BenchmarkDecodeRandomSpeed1e6(b *testing.B) { benchmarkDecode(b, random, speed, 1e6) }
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
new file mode 100644
index 0000000000..6ed28061b2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
@@ -0,0 +1,37 @@
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 7
+ reg8SizeMask16 = 15
+ reg8SizeMask32 = 31
+ reg8SizeMask64 = 63
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = reg8SizeMask8
+ reg16SizeMask16 = reg8SizeMask16
+ reg16SizeMask32 = reg8SizeMask32
+ reg16SizeMask64 = reg8SizeMask64
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = reg8SizeMask8
+ reg32SizeMask16 = reg8SizeMask16
+ reg32SizeMask32 = reg8SizeMask32
+ reg32SizeMask64 = reg8SizeMask64
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = reg8SizeMask8
+ reg64SizeMask16 = reg8SizeMask16
+ reg64SizeMask32 = reg8SizeMask32
+ reg64SizeMask64 = reg8SizeMask64
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = reg8SizeMask8
+ regSizeMaskUint16 = reg8SizeMask16
+ regSizeMaskUint32 = reg8SizeMask32
+ regSizeMaskUint64 = reg8SizeMask64
+)
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go
new file mode 100644
index 0000000000..1b7a2cbd79
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go
@@ -0,0 +1,40 @@
+//go:build !amd64
+// +build !amd64
+
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 0xff
+ reg8SizeMask16 = 0xff
+ reg8SizeMask32 = 0xff
+ reg8SizeMask64 = 0xff
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = 0xffff
+ reg16SizeMask16 = 0xffff
+ reg16SizeMask32 = 0xffff
+ reg16SizeMask64 = 0xffff
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = 0xffffffff
+ reg32SizeMask16 = 0xffffffff
+ reg32SizeMask32 = 0xffffffff
+ reg32SizeMask64 = 0xffffffff
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = 0xffffffffffffffff
+ reg64SizeMask16 = 0xffffffffffffffff
+ reg64SizeMask32 = 0xffffffffffffffff
+ reg64SizeMask64 = 0xffffffffffffffff
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = ^uint(0)
+ regSizeMaskUint16 = ^uint(0)
+ regSizeMaskUint32 = ^uint(0)
+ regSizeMaskUint64 = ^uint(0)
+)
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 0000000000..f3d4139ef3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,318 @@
+package flate
+
+import (
+ "io"
+ "math"
+ "sync"
+)
+
+const (
+ maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
+
+ slTableBits = 13
+ slTableSize = 1 << slTableBits
+ slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+ dst io.Writer
+ closed bool
+}
+
+func (s *statelessWriter) Close() error {
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ // Emit EOF block
+ return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+ err = StatelessDeflate(s.dst, p, false, nil)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+ s.dst = w
+ s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+ return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+ New: func() interface{} {
+ return newHuffmanBitWriter(nil)
+ },
+}
+
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+ var dst tokens
+ bw := bitWriterPool.Get().(*huffmanBitWriter)
+ bw.reset(out)
+ defer func() {
+ // don't keep a reference to our output
+ bw.reset(nil)
+ bitWriterPool.Put(bw)
+ }()
+ if eof && len(in) == 0 {
+ // Just write an EOF block.
+ // Could be faster...
+ bw.writeStoredHeader(0, true)
+ bw.flush()
+ return bw.err
+ }
+
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
+ // For subsequent loops, keep shallow dict reference to avoid alloc+copy.
+ var inDict []byte
+
+ for len(in) > 0 {
+ todo := in
+ if len(inDict) > 0 {
+ if len(todo) > maxStatelessBlock-maxStatelessDict {
+ todo = todo[:maxStatelessBlock-maxStatelessDict]
+ }
+ } else if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
+ }
+ inOrg := in
+ in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
+ // Compress
+ if len(inDict) == 0 {
+ statelessEnc(&dst, todo, int16(len(dict)))
+ } else {
+ statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+ }
+ isEof := eof && len(in) == 0
+
+ if dst.n == 0 {
+ bw.writeStoredHeader(len(uncompressed), isEof)
+ if bw.err != nil {
+ return bw.err
+ }
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+ // If we removed less than 1/16th, huffman compress the block.
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+ } else {
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ inDict = inOrg[len(uncompressed)-maxStatelessDict:]
+ dict = nil
+ dst.Reset()
+ }
+ if bw.err != nil {
+ return bw.err
+ }
+ }
+ if !eof {
+ // Align, only a stored block can do that.
+ bw.writeStoredHeader(0, false)
+ }
+ bw.flush()
+ return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6416(b []byte, i int16) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ type tableEntry struct {
+ offset int16
+ }
+
+ var table [slTableSize]tableEntry
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = 0
+ return
+ }
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
+
+ s := startAt + 1
+ nextEmit := startAt
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int16(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3216(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashSL(cv)
+ candidate = table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit || nextS <= 0 {
+ goto emitRemainder
+ }
+
+ now := load6416(src, nextS)
+ table[nextHash] = tableEntry{offset: s}
+ nextHash = hashSL(uint32(now))
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = table[nextHash]
+ now >>= 8
+ table[nextHash] = tableEntry{offset: s}
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset
+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ // Save the match found
+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6416(src, s-2)
+ o := s - 2
+ prevHash := hashSL(uint32(x))
+ table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashSL(uint32(x))
+ candidate = table[currHash]
+ table[currHash] = tableEntry{offset: o + 2}
+
+ if uint32(x) != load3216(src, candidate.offset) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/fuzz/FuzzEncoding.zip b/vendor/github.com/klauspost/compress/flate/testdata/fuzz/FuzzEncoding.zip
new file mode 100644
index 0000000000..feae35f15f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/fuzz/FuzzEncoding.zip
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/fuzz/encode-raw-corpus.zip b/vendor/github.com/klauspost/compress/flate/testdata/fuzz/encode-raw-corpus.zip
new file mode 100644
index 0000000000..7b33f54fc0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/fuzz/encode-raw-corpus.zip
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect
new file mode 100644
index 0000000000..f4e27a8146
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput
new file mode 100644
index 0000000000..f4e27a8146
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden
new file mode 100644
index 0000000000..db422ca398
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in
new file mode 100644
index 0000000000..5dfddf075b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect
new file mode 100644
index 0000000000..c08165143f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect-noinput
new file mode 100644
index 0000000000..c08165143f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect
new file mode 100644
index 0000000000..c08165143f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput
new file mode 100644
index 0000000000..c08165143f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect
new file mode 100644
index 0000000000..66c76ceb23
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput
new file mode 100644
index 0000000000..66c76ceb23
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden
new file mode 100644
index 0000000000..23d8f7f98b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in
new file mode 100644
index 0000000000..efaed43431
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in
@@ -0,0 +1 @@
+3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622948954930381964428810975665933446128475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920962829254091715364367892590360011330530548820466521384146951941511609433057270365759591953092186117381932611793105118548074462379962749567351885752724891227938183011949129833673362440656643086021394946395224737190702179860943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901224953430146549585371050792279689258923542019956112129021960864034418159813629774771309960518707211349999998372978049951059731732816096318595024459455346908302642522308253344685035261931188171010003137838752886587533208381420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909216420198938095257201065485863278865936153381827968230301952035301852968995773622599413891249721775283479131515574857242454150695950829533116861727855889075098381754637464939319255060400927701671139009848824012858361603563707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104752162056966024058038150193511253382430035587640247496473263914199272604269922796782354781636009341721641219924586315030286182974555706749838505494588586926995690927210797509302955321165344987202755960236480665499119881834797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548161361157352552133475741849468438523323907394143334547762416862518983569485562099219222184272550254256887671790494601653466804988627232791786085784383827967976681454100953883786360950680064225125205117392984896084128488626945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645995813390478027590099465764078951269468398352595709825822620522489407726719478268482601476990902640136394437455305068203496252451749399651431429809190659250937221696461515709858387410597885959772975498930161753928468138268683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244136549762780797715691435997700129616089441694868555848406353422072225828488648158456028506016842739452267467678895252138522549954666727823986456596116354886230577456498035593634568174324112515076069479451096596094025228879710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821682998948722658804857564014270477555132379641451523746234364542858444795265867821051141354735739523113427166102135969536231442952484937187110145765403590279934403742007310578539062198387447808478489683321445713868751943506430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675142691239748940907186494231961567945208095146550225231603881930142093762137855956638937787083039069792077346722182562599661501421503068038447734549202605414665925201497442850732518666002132434088190710486331734649651453905796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007230558763176359421873125147120532928191826186125867321579198414848829164470609575270695722091756711672291098169091528017350671274858322287183520935396572512108357915136988209144421006751033467110314126711136990865851639831501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064204675259070915481416549859461637180 \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect
new file mode 100644
index 0000000000..e4396ac6fe
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect-noinput
new file mode 100644
index 0000000000..e4396ac6fe
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect
new file mode 100644
index 0000000000..e4396ac6fe
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput
new file mode 100644
index 0000000000..e4396ac6fe
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect
new file mode 100644
index 0000000000..09dc798ee3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput
new file mode 100644
index 0000000000..e45583ee31
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden
new file mode 100644
index 0000000000..09dc798ee3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in
new file mode 100644
index 0000000000..ce038ebb5b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect
new file mode 100644
index 0000000000..09dc798ee3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect-noinput
new file mode 100644
index 0000000000..0c24742fde
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect
new file mode 100644
index 0000000000..09dc798ee3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput
new file mode 100644
index 0000000000..0c24742fde
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect
new file mode 100644
index 0000000000..881e59c9ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput
new file mode 100644
index 0000000000..881e59c9ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden
new file mode 100644
index 0000000000..9ca0eb1ce2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in
new file mode 100644
index 0000000000..fb5b1be619
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in
@@ -0,0 +1,4 @@
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ø‹–vH
+…”%€¯Âþè ë†É·ÅÞê}‹ç>ÚßÿlsÞÌçmIGH°èžò1YÞ4´[åà 0ˆ[|]o#©
+¼-#¾Ùíul™ßýpfæîÙ±žnƒYÕÔ€Y˜w‰C8ɯ02š F=gn×ržN!OÆàÔ{¥ö›kÜ*“w(ý´bÚ ç«kQC9/ ’lu>ô5ýC.÷¤uÚê›
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect
new file mode 100644
index 0000000000..881e59c9ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect-noinput
new file mode 100644
index 0000000000..881e59c9ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect
new file mode 100644
index 0000000000..881e59c9ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput
new file mode 100644
index 0000000000..881e59c9ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden
new file mode 100644
index 0000000000..47d53c89c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in
new file mode 100644
index 0000000000..8418633d2a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect
new file mode 100644
index 0000000000..9ad731f3cf
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput
new file mode 100644
index 0000000000..9ad731f3cf
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden
new file mode 100644
index 0000000000..f5133778e1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in
new file mode 100644
index 0000000000..7c7a50d158
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in
@@ -0,0 +1,2 @@
+101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010
+232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323 \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect
new file mode 100644
index 0000000000..7812c1c62d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect-noinput
new file mode 100644
index 0000000000..7812c1c62d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect
new file mode 100644
index 0000000000..7812c1c62d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput
new file mode 100644
index 0000000000..7812c1c62d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect
new file mode 100644
index 0000000000..486bdf6f69
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect
@@ -0,0 +1 @@
+ìÝJó0Çñ¿È!ž<(léP*( éº:§sþ覂ˆÐ”4aI¾%|SVÅÛxO&ÝU>Ÿ7¯CÄqMçu­ðd29ߨ¤xžsÈÞ·§$Ž· Qi^¨t ­wUÙÍŠù;Ó£…C…•‚«´CµZfùºÈ¥6œJ±«­výÔ¶ XdpÂj ¿(´¾œÚ]ôö^v8:K’ÓdHÉ@Ž>.¦À3SAJëÆ.3¶{®;þâ5Få’oJÒY6ϯ×Ë›ÛÕÝúþáñ©ØlŸ_÷Ýç?öÿ÷çè8¾d \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput
new file mode 100644
index 0000000000..486bdf6f69
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput
@@ -0,0 +1 @@
+ìÝJó0Çñ¿È!ž<(léP*( éº:§sþ覂ˆÐ”4aI¾%|SVÅÛxO&ÝU>Ÿ7¯CÄqMçu­ðd29ߨ¤xžsÈÞ·§$Ž· Qi^¨t ­wUÙÍŠù;Ó£…C…•‚«´CµZfùºÈ¥6œJ±«­výÔ¶ XdpÂj ¿(´¾œÚ]ôö^v8:K’ÓdHÉ@Ž>.¦À3SAJëÆ.3¶{®;þâ5Få’oJÒY6ϯ×Ë›ÛÕÝúþáñ©ØlŸ_÷Ýç?öÿ÷çè8¾d \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden
new file mode 100644
index 0000000000..ff023114bb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in
new file mode 100644
index 0000000000..cc5c3ad69d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in
@@ -0,0 +1,14 @@
+//Copyright2009ThGoAuthor.Allrightrrvd.
+//UofthiourccodigovrndbyBSD-tyl
+//licnthtcnbfoundinthLICENSEfil.
+
+pckgmin
+
+import"o"
+
+funcmin(){
+ vrb=mk([]byt,65535)
+ f,_:=o.Crt("huffmn-null-mx.in")
+ f.Writ(b)
+}
+ABCDEFGHIJKLMNOPQRSTUVXxyz!"#¤%&/?" \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect
new file mode 100644
index 0000000000..71ce3aeb75
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect-noinput
new file mode 100644
index 0000000000..71ce3aeb75
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect
new file mode 100644
index 0000000000..71ce3aeb75
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput
new file mode 100644
index 0000000000..71ce3aeb75
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect
new file mode 100644
index 0000000000..b9cc20d0eb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect
@@ -0,0 +1,4 @@
+ìÝßJó0Æñãr=ê`KÇû2AasÄ“)ˆHšþ²„¥IÉŸbï]Ökùòyž{hÂ0E{6ÿ6›[¼ÂcÀ¾dbØ;‡™"%Š#u‚³¦Á["llB
+%*‚
+Á&œÃHÑS‡v‚Äýéaòäh¾9«È'B62CI– Cñ¬G6„ç§Ãñåt„¶Žgœ R]ä™ÐKë¯mû!ÄŒ*¤êšºx5[½Äg‹QF´Ø¡—ª?>Û)Ó
+7Ûíÿí’³…^á w;„$‘d¦º2Eë^úµ/έ{ù-¬¯æ©x6SÝ.9ûåì \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput
new file mode 100644
index 0000000000..b9cc20d0eb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput
@@ -0,0 +1,4 @@
+ìÝßJó0Æñãr=ê`KÇû2AasÄ“)ˆHšþ²„¥IÉŸbï]Ökùòyž{hÂ0E{6ÿ6›[¼ÂcÀ¾dbØ;‡™"%Š#u‚³¦Á["llB
+%*‚
+Á&œÃHÑS‡v‚Äýéaòäh¾9«È'B62CI– Cñ¬G6„ç§Ãñåt„¶Žgœ R]ä™ÐKë¯mû!ÄŒ*¤êšºx5[½Äg‹QF´Ø¡—ª?>Û)Ó
+7Ûíÿí’³…^á w;„$‘d¦º2Eë^úµ/έ{ù-¬¯æ©x6SÝ.9ûåì \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden
new file mode 100644
index 0000000000..6d34c61fe0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden
@@ -0,0 +1,3 @@
+ÀAKó0ðóx¾ÃŸžZØÚñ¾LPØaÎ!‚x™âADÒöI–&#I‹EüîþšÇp]¢LÆ¿íö¯Fðp˜² 1Õ88‡h“¢$‰³ô5SÓà- ‚F66!…)v‚.ô›0„Y¢—í…ûóÃ&åÅ SÓÀÙN|d£2:åÑ
+t˜|ë‘àùéxz9Ÿ ­“š‰éªº‹£²ž‰ÉŽ×3Š
+&&=ù£²¾¬ðôšUD‹=Fu‘òã³]²¬q³ÛýßUL+½Æîö©>FQYÊÂLZÊoüäÜfTßµõEÅ´Òõ{´Yʶbúeú \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in
new file mode 100644
index 0000000000..73398b98b5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in
@@ -0,0 +1,13 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+func main() {
+ var b = make([]byte, 65535)
+ f, _ := os.Create("huffman-null-max.in")
+ f.Write(b)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect
new file mode 100644
index 0000000000..d448727c32
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect
@@ -0,0 +1 @@
+Ë_Kó0Åñëò½ê`KÇó0AasÄ›)^ˆHšþ²„¥IÉŸbß»¬—_>ç4 a˜¢=›Œ›Í-^ á1`_² 1 ìÃÌ ‘Å‘:ÁYÓà-‚F66!…A…Ž`Îa¤è©C;Aâþô°Nyr4ßœUä!™¡¤GKСøÖ#ÂóÓáør:B[G‚3Ω.òLè¥õ׶ýbFRuM]¼š­^⇳Å(#ZìÐË ÕŸí”i…›íöÿvÉÙB¯ð…»B‡H2S]™¢u/ýÚçÖ½üÖWóT¼G›©n—œýrö \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect-noinput
new file mode 100644
index 0000000000..d448727c32
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.sync.expect-noinput
@@ -0,0 +1 @@
+Ë_Kó0Åñëò½ê`KÇó0AasÄ›)^ˆHšþ²„¥IÉŸbß»¬—_>ç4 a˜¢=›Œ›Í-^ á1`_² 1 ìÃÌ ‘Å‘:ÁYÓà-‚F66!…A…Ž`Îa¤è©C;Aâþô°Nyr4ßœUä!™¡¤GKСøÖ#ÂóÓáør:B[G‚3Ω.òLè¥õ׶ýbFRuM]¼š­^⇳Å(#ZìÐË ÕŸí”i…›íöÿvÉÙB¯ð…»B‡H2S]™¢u/ýÚçÖ½üÖWóT¼G›©n—œýrö \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect
new file mode 100644
index 0000000000..d448727c32
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect
@@ -0,0 +1 @@
+Ë_Kó0Åñëò½ê`KÇó0AasÄ›)^ˆHšþ²„¥IÉŸbß»¬—_>ç4 a˜¢=›Œ›Í-^ á1`_² 1 ìÃÌ ‘Å‘:ÁYÓà-‚F66!…A…Ž`Îa¤è©C;Aâþô°Nyr4ßœUä!™¡¤GKСøÖ#ÂóÓáør:B[G‚3Ω.òLè¥õ׶ýbFRuM]¼š­^⇳Å(#ZìÐË ÕŸí”i…›íöÿvÉÙB¯ð…»B‡H2S]™¢u/ýÚçÖ½üÖWóT¼G›©n—œýrö \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput
new file mode 100644
index 0000000000..d448727c32
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput
@@ -0,0 +1 @@
+Ë_Kó0Åñëò½ê`KÇó0AasÄ›)^ˆHšþ²„¥IÉŸbß»¬—_>ç4 a˜¢=›Œ›Í-^ á1`_² 1 ìÃÌ ‘Å‘:ÁYÓà-‚F66!…A…Ž`Îa¤è©C;Aâþô°Nyr4ßœUä!™¡¤GKСøÖ#ÂóÓáør:B[G‚3Ω.òLè¥õ׶ýbFRuM]¼š­^⇳Å(#ZìÐË ÕŸí”i…›íöÿvÉÙB¯ð…»B‡H2S]™¢u/ýÚçÖ½üÖWóT¼G›©n—œýrö \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect
new file mode 100644
index 0000000000..dbe401c54c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput
new file mode 100644
index 0000000000..dbe401c54c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden
new file mode 100644
index 0000000000..5abdbaff9a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in
new file mode 100644
index 0000000000..349be0e6ec
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in
@@ -0,0 +1 @@
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect
new file mode 100644
index 0000000000..dbe401c54c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect-noinput
new file mode 100644
index 0000000000..dbe401c54c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect
new file mode 100644
index 0000000000..dbe401c54c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput
new file mode 100644
index 0000000000..dbe401c54c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput
new file mode 100644
index 0000000000..62d55e6b83
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.sync.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.sync.expect-noinput
new file mode 100644
index 0000000000..8b92d9fc20
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.sync.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput
new file mode 100644
index 0000000000..8b92d9fc20
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/regression.zip b/vendor/github.com/klauspost/compress/flate/testdata/regression.zip
new file mode 100644
index 0000000000..73cf84036d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/regression.zip
Binary files differ
diff --git a/vendor/github.com/klauspost/compress/flate/testdata/tokens.bin b/vendor/github.com/klauspost/compress/flate/testdata/tokens.bin
new file mode 100644
index 0000000000..b93c6968ab
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/testdata/tokens.bin
@@ -0,0 +1,63 @@
+<mediawiki xmlns="http://www.›€€Œ.org/xml/export-0.3/"°€€†:xsi´€€”w3­€€„2001/XMLSchema-instance" xsi:s”€€„Locationó€€Î ¨€€È.xsd" version="0.3­€„:lang="en">
+ <siteinfo>
+ Ž€€ˆname>Wikipú€‚</“€€Œ¢€€†base>뀈en.¶‚€‚¨€€„Ÿ‚€„€€‚/Main_Page</¬€€„·€€†generator>MediaWiki 1.6alpha</€€Ž­€€†c¸€€‚first-letter</’€€„€€„ <€‚spaces’€€ž key="-2"退†</¬€€Œ«€€¬1">Special­€€À0" /É€€ª1">TalkÅ€€À2">User©€€À3©€€† tØ€€Æ4">Ü‚€‚”ƒ€„Ý€€À5®€€ Ê6">Image€À7ª€€ˆÚ€€Ê8µƒ€ˆ“€‚½€À9®€€ Ê10">Template À1Óƒ€‚®€€ˆ Ì2">HelpÞ€€Â3ª€€†Ú€€Ì4">Category½€Â5®€€Ž Ì00">PortŽ†€Ä101­€€Šà€€¦À€s·†€‚Òˆ€†õˆ€Š<pag˜‡€ˆ<title>AaA</Š€€†–€€†id>1</idŽ€€ˆreviÔ‰€‚€€†Ÿ€€†32899315¦€€Ž΀€„mestamp>2005-12-27T18:46:47Z ‚Ÿ€€ŠØ€€„ <contribuˆ‰€Œ€€€‚<user•Š€„Jsmethers</“€€Œ¦€€Žid>614213«€Ž쀂Ó€€ <text xml:É€„="preserve">#REDIRECT [[AAA]]</¬€€‚怀†</û€’</º‚€Š€ lgeriA。Ò‚€‚¤€„ €‚ ‚€Ç‚€¤18063769€”§‚€ŽÇ‚€„07-03T11:13:13Z ‚Ç‚€ØDocu‚€ª802ý€€–À‚€¤minor†‹€comment>adding cur_id=5: {{R from CamelCase}}Ò€€‚®€€†—‚€„ƒ€ÎÔ‚€„a]]Ô€€¦§ƒ€ämericanSamoaÉ‚€‚­ƒ€”6ˆ‚€Ž­ƒ€°9Ôƒ€Ë€‚ƒ€Œ­ƒ€˜4:1õ…€ä­ƒ€¼to°ƒ€Š6 Û‚€¦°ƒ€èÝ‚€ˆ Þ‚€„·ƒ€”ppliedEthics·ƒ€œ8·ƒ€º5898943¦€€Ž·ƒ€š2-02-25T15:43:11·ƒ€Ðip>Conø“€ˆ script</i䉀ŽІ€ÔAutomated cÔ€€ŒÁ€€‚¿†€ä´‚€† ethics]]
+³†€èccessibleComputingƒ€œ10“†€Žº†€¦‚ƒ€†º†€°3-04‚ƒ€‚22:18:38Z䀀‚艀ØAms80¬Œ€ª75„€˜˜ƒ€ÔFixing redirect¼€€‚“ƒ€äÌ‚€Œ_cÍ‚€ŠôŒ€ìd¸€ ¹€ºˆƒ€ˆꉀ‹†€ 9-22T16:02:5ˆƒ€ændre Engelsƒ€ª300Žƒ€ÞÀ…€‚ך€„É€¶da programming „›€‚uageé‚€ìnarchismï‚€ž2ÀŽø…€¤4213683–ƒ€ï‚€š6-û…€„T01:41:25ï‚€âCJames7߆€‚ì‚€¦83238«€Α€‚©’€¢耪/* „‚€Št Communism */ too many bracketsÙ€€‚›†€Ê{{Ö€€Šm}}
+'''倀Šm''' originŽŠ€„as a term of abuse ¯€„ us€€‚gainst early [[work®‡€‚class]] [[radical]]s includž€€‚the [[Digger¦€€‚of’€€ˆEnglish Revolution]] and±€€ˆsans-culotte|''Ž€€’s''È€€’FrencÇ€€–.[Ο€ˆuk.encarta.msn.com/encyclop’›€‚_761568770/£‚€Œ.html] WhilstŒ€„’‚€„is stillŒ‚€†in a pejorative wayó€‚describe ''&quot;any act thaÆ‚€ˆviolent means³€€ˆtroy退„organiz¿¢€„©‚€‚societyË€€†''&lt;ref&gt;à€Šwww.cas.sc.edu/socy/faculty/deflem/zhistorintpolý€‚င History of Intú—€‚µ£€‚al Police CoopeÞ€‚on],𔀆™ƒ€‚final protocols¥€‚–€€‚¢€†È€€–Conference¥€€‚Rome for®€€„Social Defense AÛ„€ˆ™…€Šts¾‚€†, 1898&lt;/÷€ˆ, it h©…€‚lso been taken up •€€‚ posi’ƒ€„label by self-definÅ…€‚惀ˆts.
+
+The word '''˜€€Š”†€„is [[etymology|derived„‚€„]]À€„[[Greekö‰€Œ|Greek]] ''[[Wik¶‚€‚ary:&amp;#945;‰€€ˆ57;“€€œ61“€€Š6€€Ž3±€€Š45;|”€€ŠÆ€€ö]]'' (ªƒ€†without [[÷€‚ü†€‚s (ruler, chief, king)­€€†).›ƒ€Œm§ˆ€†[[politð‡€‚ philosophy]], is¡‚€„beliefù…€†''Ù€€„s'' are unne𑀂aryñ‡€„should be abo”ˆ€‚ed, althoughÞ„€‚re·€€„differÕˆ€‚i±…€‚pretŸ…€„sß„€‚wha®‡€‚isÚ†€†°€”«„€‚referñ†€„rel„[[s†…€†move¼š€‚]]s)°€†advÆ©€‚eü€€‚ elimiž†€†耀‚authoritarian institu‚s, particul£Š€„…‚€‚[[state]].Å…€‚¼‡€˜쨀ˆƒ€‚e쨀ŽDª…€‚iЀ€„_of_±…€Šm­†€‚˜€€Š‡€„ƒ€Š] on WÈ€€Š, a€’€„ed 2006ö€€‚¼†€Š ð…€ŒÕƒ€†[[À€†¶ƒ€‚‘€€†Ûƒ€‚mostÜ€€Œts ‡Œ€‚it, does not imply [[chaos]], [[nihilism]], orÔ„€‚nomie]], but ra。 a harmoniouà†€‚anti-Í‚€”]]ò‰€Š. In pla»ˆ€†Ûƒ€„„€‚regardLJ€‚s‡ƒ€˜…€ŽstructuresÜ„€„coercive eco”€‚c²ƒ€˜Æ‚€Štsõƒ€Ž„€‚al¨„€†؃€‚ õ«€‚d upon [[¯€‚nt¼…€„s©€€„£€€‚‘€†autonom߀‚individuals, [[mutual aid£‚€‚õ…€‚[[™‰€„govüŠ€‚nce]]. Ñ™€„
+üŒ€‚e™€ŒüŒ€„•ƒ€„easily ̉€Šby™‚€†it is €ŠÜ€€‚Ôƒ€‚Ñ€†ù…€‚offer˜Š€Ž†„€„½€€„they­‡€†ve to†‡€„ truly freŒ‚€†‘ƒ€„However, ideÿ‚€‚b¿ˆ€‚how an«€Œt»‚€„ety mightí„€‚k v臀‚considerably, esœ­€†ly ‰‰€‚ r€€„tž‡€‚§ƒ€Šs; µ„€‚e‹€‚Ç€„disagreª‡€„ý€€’¦€”÷€€ˆbe brou‚€‚§€„.
+
+== Oí‘€„˜„€†predecessors ==
+
+[[Peter Kropotkin|‰€€Œ³ƒ€Šo‘€‚s, arguœˆ€‚a退‚fo™…€„corded [[ú‚€‚ory³€€‚humanÁ‚€Š was ed oµ‚€’principles.倒‘€¶®€„.쀌[[Mä„€†Aid: A Factor¿†€‚E²…€‚¥…€†ª€€†'', 1902.žˆ€”Mù‡€†thropolog„€„follow”‚€Ž»…€„ö˜€††’€‚æ‹€„v“‹€‚‚€„hunter-ga¨‚€‚er bands wƃ€‚egalµŠ€Šÿ…€‚lÊ•€‚d di…€†§€‚l«ƒ€‚rÁ‰€„umuž­€‚d we‰Œ€‚Öˆ€„decree¹€€‚wå…€‚d had eq׆€„󉀄¾„€‚resourcÈ‚€šFriedrich»€ˆ|†€€†¤ƒ€‚Freiš€€„Ê‚€’µ€’arx‰‚€‚¡‹€„Ù†€„ve/”€€‚/Ü…€‚s/1884/oØ„€„-family/indexæ’€‚ð„€Œ¦’€ˆFamiþ…€‚PrivÞŒ€‚Property,‘‚€„°Œ€‚S®Œ€„¥ƒ€884É€„Ä‹€Š
+[[þ°€„:WilliamGodwin.jpg|thumb|right|150px|£€€ˆ ¤€€†]]
+
+Æ€Št½ƒ€‚¤—€–Œ€‚ž€€†y O¦…€„sÚ‰€Š€‚Þ„€‚rray Rothbard|ˆ€€Š]] find„ˆ€attitude¢„€„[[Taoisº’€‚Æ”€„[[H¡†€†•‚€‚China|AnciÕ‡€‚€€„¬Ž€–•€žŸ–€ˆ(Toronto)¶ƒ€‚က†§€„€ˆ.''𬀂pril 14]] [[2002ˆ€€‚܃€toxicpop.co.uk/library/t΀€„Áƒ€„T¡€€ˆ mirror¾€€–geocitiesÚ˜€„SoHo/5705¿€€‚anɘ€„ Vanity site€€Š΃€’‹€€‚–€Šà‚€°, ü‚€†뀂œ„€††€’lewrockwellˆ€„r˜ƒ€ˆ/aׂ€†-chinese‘€†Ñ‚€†˜€€„ð‚€„ese LiberÒ‘€ˆTÈ›€‚­ˆ€„退†¬ˆ€‚an extrË™€‚À„…€¢mise¥†€†journals/jls/9_2ƒ€€‚_3.pdf”˜€‚cept‘†€ŒRol º˜€‚llecÀ‰€‚š…€‚¢˜€ŠChange Toward Laissez Fairež€Ò€‚Jô€€†Ì€€‚Í€’Studies, 9 (2) Fall 1990Ô†€” 銀´ found similarþ€ˆØ…€„stoicism|ˆ€€„Û„€‚[ZenòŽ€‚ CitiumÓ€‚Accord¢Š€„oÇŠ€Ž, Zeno‰“€ˆrepudi„„ˆ€‚omnipotºš€Š’€€‚s—ˆ€‚ý™€‚ù€€‚tÙ²€‚n¢ƒ€‚ñ…€„regi¿•€‚­‡€„«ƒ€‚Ü€‚oclaimЀ€ˆs䀂eigntyŸ‚€‚’€€‚moral law€€Š°‘€Ž“€†. ‰€’ô…€b¹‹€‚crayonö…€„îµ€‚.jsp½†€Œbritt1910À†€‚l‡€ˆ”€‚, wš€€‚en by ð‚€˜㜀ˆEžŸ€ˆaŸŸ€‚ Britannica, 1910]´ƒ€’À›€†[[AnabaptistÄ™€‚of 16th century Eu㊀‚½”€„some𩀄Œ‘€Œž‚€‚ñ‘€„religi·“€‚Í€‚runn¬˜€‚of modernÚ‰€Œm. [[Bertrå‚€‚Russellž‡€‚in û˜€‚''Û‰€West¼€€‚PŸš€Œʆ€‚writes®Ž€†ˆƒ€‚¾€Žs„€ž­…€‚sincñ€‚ey helÕƒ€‚¾€€ˆgoodœ¤€‚ will be gui‡‘€‚at ­“€‚y mo›„€‚¬”€‚[[the Holy Spirit]]...[f]•‹€‚ºš€„preŇ€‚—€‚y arrive a¤œ€‚‚¥€„²€€„.¡„€†Œƒ€‚»‰€Œ”‚€š|œ‚€›€€Š½‰€Žÿˆ€Šºœ€ŽÒ€€†'' in ''AŸ¡€’Ç‚€žä…€†Œ†€‚connecŠ†€„È”€„‡˜€Ž ƒ€‚ù’€‚al circum«Å€†sÀ‰€†º‚€‚Ê¥€‚iest …®€„‰„€‚•€€‚¹¹€„nt da®ƒ€„1945«ˆ€šÎ¥€ˆ (True Levº‰€‚rs)|‡‰€‚œ€€ˆ˜€‚rЃ€ˆ¦€€–逆”’€†׊€‚¬œ€„à‚€Štic€Œ dur¶ˆ€„„¥€‚ime…€‚‹€€‚¼¦€ŽCivil War‚…€‚ð€‚¤š€‚é–€Š¡˜€†ô…€‚®š€‚×…€º̃€’쇀zpub净„notes/aan-ß•€‚Ù‡€Š½…€‚܇€„t Timeline],á‚€†ɇ€²94„Ó‘€Œ
+InÙ€ˆˆeraÒ€‚“€€‚ƒ©€†toˆ€„€€‚§€„to矀„Þ€„thing›—€†—†€‚n ™€„î–€„[[Louis-Armand de Lom d'Arce de Lahontan, Baron €€„’€€ˆ|¹€€’Ÿ€€ ]]ô‡€ŽNouveaux voyages dݧ€‚l'Amérique septentr㦀„eØ„€‚(1703), w©–€‚ he½¨€Œ܇€‚䃀‚NÛ¨€†ò·€Šø‹€‚ÿ€„Uni¨Œ€‚©”€„s|indigenº‰€‚©€€ŠÆž€ŽÝ€€‚󕀂had noÄŒ€Šlaw«¡€‚ris´¡€†r„†€‚sâ–€„p˜•€ˆp´Š€‚rÀ€€‚as be󢀆…Š€ˆyºƒ€‚÷‡€Šª„€ˆeµ°€‚.lib.virginia©€„cgi-local/DHI/dhi.cgi?id=dv1-12 Dic©¦€ˆÛ…€Šö‡€I玀„- ANARCHISM]ø€€‚³„€Šˆ‹€‚∀ˆ Means¯„€‚—¥€‚lÊ‘€ŽѬ€ˆleader¢‹€‚Ø‚€†§‚€Š Ind。Mÿ£€Œ,Û¨€„repe‹€‚ly±‚€†ƒ€‚at hŠ€„½‡€†ò€’ù†€†soü†€„°‹€‚[his³–€‚cestors­Š€ˆä…€„1793¬Œ€„‰€‚꺀‚kø€Š¢™€‚È­€œÛ¢€‚×€š pub¤¦€† ''An EnquiryÙ’€†rning«€„š§€†Justice‹–€‚©ƒ€ˆweb.bilkent.edu.tr/On·‡€‚ˬ€„e•¯€†.upennƒ€„jlynch/Frank/ǘ€†/pjtpÛ€„]. A§§€Š›€€† did noí­€„䀄઀ˆÀŽ€Œú…€‚˱€„Þ›€‚rÒ„€ˆ™€„havˆ€‚ᣀˆ€„book…€‚­‚€‚‡ˆ€†majoµ€€ ÷„€‚€ƒ€†ÿ€€ˆ­€€ˆ¦ƒ€†¸“€„eƒž€„錀Œ²©€„À€€Šmœƒ€ˆ Buµ™€‚õ€€†point no¥€€Œt’‹€Žyet exist£©€„®±€ˆ‘‰€„『tï‚€‚쟀‚known mainly˜€‚¯¨€†ult hurlŸ‹€†¬€‚[[bourgeoi¶Œ€‚[[GirondiÓ…€‚”€‚mà €„ݲ€† el«…€„ˆ€Œ·„€ŒrȲ€.
+
+==T´‚€Œ‚®€„®€„ô€€‚Õ€Œ==Ìœ€ŒPierre_Joseph_ProudhoÕœ€†110px|thumb|left|«€€† «€€†€‚«€€„]]
+{{ma¥ˆ€‚rticles|[[Ö€€†-ª€€œ¹‚€„‚î—€‚¸›€‚(õ£€Š‚€‚ory)]]}}
+
+Itý£€‚›Ž€‚o‚€‚ù‘€Žit wasn't until¿—€‚•€„ꀀ ®†€’[[Wɇ€‚isùž€Œ?”®€„in 1840½Œ€‚ƒ„€‚у€ˆÄ€†®‚€ŒŽ€€†؃€„adop—ˆ€‚Óƒ€‚Ý‚€†¼‹€†pˆ‘€‚. I „ì±€†iψ€‚asoò‡€‚až¦€‚me š—€„¹‚€ŒÅ€€‚󀀂¬…€…Ž€ˆú€€ŒŸ€€‚or•ª€†À¤Ì€€nsw»•€‚’€„Ö€€„a©€„Ó¢€‚ÏŸ€†×€ˆ[[ƒ‚€Š¬€‚thef”€‚䀈I®€‚is â¡€‚›€‚oppo‚ª€‚Ѐ€‚ª³€‚ò­€ˆ¼‰€‚•£€Š¹€€†¸Œ€Š€€† (propriété뀌own¡€‚„¢¿€‚lete right„“€„º€€†¿ˆ€‚߆€‚ˆ¹€„’€€†ý€€‚i£€‚¶€ˆꇀ†Á¨€‚sh, such as exploiõ¿€‚ûˆ€‚k退‚‰ƒ€‚profitæ‘€Š éÍ€‚=Ö€€†pÁ…€ˆ-Ø€€‚’€€†ⶀ„Ò„€¨|ñ…€Ž, õ„€”Þ•€Ž슀ŒïÚ€‚¿¤€’…±€„×›€‚/subject/ø…€Šs/‡€Šˆ€€‚üŽ€„/ch03.htm…ž€‚p­Š€‚3. L£¦€„û€ˆ effi⟀„ ca±‚€‚¡”€ˆdo‰‡€„ö‰€‚Ë€€ˆ¬ž€Œœ“€†¿Ÿ€Œ†€¢ƒ‚€†¨‹€‚”†€‚±‚€‚·€‚“‚€‚—„€‚õ–€‚™¯€†ˆ‚€Š suppor†€‚Ѭ€„he caꈀ„'possesߧ€‚' -✀s can„€†limiº€€‚·¥€„€„€„È€‚Ƨ€Œ, capit㧀‚nd홀„ú‰€„aÔž€„㎀‚é…€†Àª€Žõ€‚”¨€„ö¢€‚³€€‚jƒŽ€†‚¬€‚±€Š's õ¨€Ž冀†«’€Ž½€Œµ¯€Šº€‚] (¯€‚ellisme), invol•¸€‚—€‚xc¦¡€†̃€†–¯€‚Ó“€‚ˆ€‚groups cíµ€„trad¡Ž€†produc’¢€Œir‡ª€„r usÃ…€‚''€€†ÿ–€„''Г€ˆre¦™€ˆœ‘€„e amount of±‡€„·€€‚°˜€„¤€Œin怀Š ‰€„TÛ‡€†‡€‚ensureœ‰€†¹¡€‚n²€‚˜€€‚°†€†®„€† ‚”€†ofè–€†s. W݆€ˆÏ€†«°€‚ly join toge¤€€‚ö€€‚co-Í…€‚Ë•€†œ€‚shopž·€„Å·€†est-»€€‚ bankú€€ˆb¯–€‚t upæ—€‚provideÍ€†ž€„°°€‚𫀎›€‚Š˜€‚¿‚€„䀒샀²£€†ñš€„influentialÉ€€„in ø’€ˆœ€‚¹‚€‚ÑÁ€„ñ€Œí¯€‚꬀‚iƈ€‚llowЈ€‚É€€„acŸ—€„Ì“€ˆ[[R€€Œ¥‚€‚1848¢˜€†º’€‚ce.ý„€”‘€Œø•€„¤‡€Šˆ‹€‚Š€†x:  deve®Á€„Ê€€‚a numbБ€†®€‚s overœ€„lifÜ¡€„Œ’€†ý‚€‚ô¡€†´º€´€„ofÀ€€„£‚€„‰€‚Forò€†detail£¯€„scuŇ€„ seꀂ쎀¦|õ…€‚]].''
+
+==Max Stirner's EgѪ€‚==¤€ ¨€€™€Œ®€€†„€ˆ嚀ŠÝž€‚Egoƒ€„Its Own'' Š“³€†ù€‚±’€„st¶€Ž«„€‚®€„Ž €ˆÀ€ߘ€‚‡®€–noÌ €„ofàš€†§š€‚¿…€‚†a¥‰€†, natur¢¸€‚Ž€€‚‰€„ï〆lþ‚€Œ ®…€‚Í€€û€€‚—¶€‚-¼„€†m„€€‚illuÕ‚€‚–´€‚ ''ghostsŽ‘€†Õ…€„iøœ€‚say•…€‚€€¬‡€†¥‹€†§€€‚爀”°¶€‚t󀄀Š€‚Ï€ŠHeݹ€Œd eÜ‚€„Æ‚€„a›Ž€‚mÜ€€‚amorꉀ„ž™€„刀†Ø€€’™‡€†uûœ€‚‰…€‚'û¹€ù†€„Ô€€„ts' ©“€„whe©Œ€‚¬’€„ †€†ir†•€„ô‡€ŒÚ‡€‚do so. For himë‚€sä¼€†come鸀Š–µ€‚À€‚ظ€„:þ€ˆWho奀‚é–€„sŒ¹€„to‘Ä€„,ˆ€€‚defe‚€‚¤‚€‚釀‚g•€€„him¶µ€‚ong𥀂܃€†§‚€ŠA®€€‚Ù‚€†WÈ„€‚I¥‘€†in my powžÂ€‚á„€„çÀ€‚yÉ‘€‚. So long’€‚I’¼€‚ert my쀄a“€‚lder, I aÇŠ€†ò€€‚rieš·€ˆš€Œ€ˆ
+
+³†€ˆ nØ€„ÈŽ€ˆhimЀ€†””€’-«€‚Ü…€Œ÷‚€„΀€‚è—€„ 'ƒ€†'. N¡‚€‚theless,ꇀŽ‡…€†ŽŠ€’o«™€‚nyöƒ€¥€„¥»€‚-¤†€‚ô¼€„­Ž€†ꃀ‚£Ã€’­€„À–瀀‚ÁÀ†º¿€†divers†Š€‚==ÅÙ€Œ退”Ú€€Œ¦ˆ€‚˜™€ŠBenjaminTuckeræµ€ീ†™€„ü¨€‚¥€€† ¦€€†]]™€¤‚’¿€†Ò›€‚È€ŠÛ€‚³‡€‚— €–€¨Ž‰€Œ1825 [[Josiah Warreë €‚墀‚Ä€†ip膀„Œš€‚¨Â€‚ÌÏ€†Þ²€†]] experiÅ›€‚ hea쀂œ«€„Robº„€‚Owe¤€€‚ö‚€‚쀄New Harmon§Ã€‚™‡€ˆf˜‹€†à€€„few years amidst muc’ƒ€ˆÏË€‚conflict.¨€ˆ blam¨Š€„ý–€„Ö«€‚ty'sÒ€€„‰€‚o挀‚⯀‚•ˆ€‚[[Œ„€ŽÅ°€’¨‹€ˆ¨€€Ž¸¤€š. ŠŠ±€‚eedš‹€‚oþ½€ˆseƒ‚€ŽØ“€„耊©‚€Šies耈˜À€ˆˆ¼€‚‡€‚³€‚Ú”€†¤‡€†Ä€‚ܱ€ Àƒ€Ž¦€€†Åž€‚[[Utopia (žy)|œ€€†×€ˆ[[M÷¯€†T™¬€‚ij€‚õƒ€„33Ô€ŠwrotŸ™€†Þœ€’ €‚Peaceful Ž€ŽistÌ–€‚Ѐ†òŽ€„ø•€„Ñ“€‚”‚€„b‰€‚«Ÿ€Šœ€Žperiodÿ†€‚û‘€„退Ž. ï…€˜£Œ€‚톀‚atŸ€Šö€†ƒ¡€‚Ù€€Žman‚ƒ€‚íó€‚倂¼€‚forç¾€†¦€€„doctrin™€‚wÑŠ€„š€„Ϻ€†ª¢€(''඀†y'' XIV (Dece–‘€‚­®€‚00):1)Í€‚”‡€œ becamƒ€‚÷“€†¡†€ˆýƒ€ˆŠ€‚á‹€ˆeetò€‚߆€”¯€„¦€ŽB. ò΀‚nÝ€‚ediÏ€€‚Ô€‚‘ƒ€’¨€Ž“­€„AugustÔ¾€‚1Š‚€‚¼€ˆ908;¬€‚ide÷Œ€„¬Å€†Ÿƒ€˜n¬°€‚Û„€Žist-½€Š®ƒ€žiss‘€‚ý€†õÀ€‚ئ€‚°Ð€Œ. «‰€ˆ'ò“€‚“º€„ö€ˆ߀€”‚€incorpora瀂Õƒ€‚°“€„¤‡€‚aáÆ€‚Ü€„þ›€†orists: ‚€ˆ'Ú“€ˆ؇€‚Ñ™€|Ι€‚alö–€„¡Þ€„;Ÿˆ€ˆ¯€€–cÉ’€‚퀀‚¦›€„à€€‚price|—€€„€Ÿ€†š€€˜ªš€‚õ‰€‚heterodox•š€ˆics|“€€Œ]]³€’[[l。½¯€‚æ¡€‚倀‚value]]);픀‚˜ž€ˆ]]'s marke—‚€»»€‚Á”€”'õ¯€‚‘€„mÔ€‚aÑ€‚[[HerÝŠ€„Spenc¦€€ˆ¤†€†‚º€ˆ²œ€„Å™€„dom™€€†£ƒ€ŒÓÌ€‚ong¬€‚逌“‚€‚ªƒ€Ž'sŠ”€†Õ„€‚·†€‚€€‚‹™€ˆÍ€‚ŒŽ€‚ã‘€‚eÛ›€†¿Ÿ€†䀀†[[󮀚€Ÿ€ŠÊ”€†àÆ€†È„€†a’»€Š¾¡€’tÆ„€„-pa󢀈§Ÿ€‚ü†€œ|ì„€†²¡€‚–‡€Šê—€‚±€€†­¡€’Ü»€¼††€€ˆ37ž¡€„“¡€‚”«€‚–—€†Pay €Œ⮀„󎀆•¿€ˆy: Sel²·€†s F‹·€ŠWr²£€„Ï…€„¡€Š R.Ѐˆ,Æ€‚guard Press, •Ž€‚Yorkòˆ€‚26, Kraus Re‡ €‚¾Þ€‚., Millwood, NY¦€€‚73.Ë¡€’[[Ê„€ˆž…€†Ò€‚§¤€‚—Ÿ€‚üˆ€‚þ¥€„«“€‚û—€‚∀„à˜€’øŽ€„¤Î€Ê€€ˆsystem¡€„œÎ€‚…„€‚Á°€„´”€‚e abun²¡€†of 뀂pet耄Ü–€ˆèÔ€Šº„€„fÅ—€‚Ò‚€‚ Ÿ€’Ý°€‚÷›€‚ƒ€‚receiv‹ˆ€ˆfull³†€†Û€€‚î–€†é„€‚rõ¡€‚Oœ€‚ 19•¾€¹…€ŽÉ“€‚Ûˆ€‚ludLysa³®€„Spoon²„€†[[StepÒ—€‚P­¹€‚¥–€‚rew¼Ê€„¡€‚[[ViêÌ€„Yarrosþ¬€Fïý€‚ÛÀ„rna适alÛ“€’akunin»€‚鬀„㬀†Ø“€†͆€„Ù“€‚£€€†|Mikhail €€ˆ 1814-1876ë“€¬ó€€ª¡€„ingm݉€„AµÓ€’Ü€‚ဌÍ€„M¯§€„ì“€ŽÀÀ€†™´€‚rsh reöŸ€‚onŒ €Šþ‚€†û®€ŽÀ…€„ûŸ€‚. Twentyš“€ˆí±€†¸€„64„€„•€‚£€Ê´€‚ÜŸ€‚¶¦€‚–”€„À€€‚'Ý‚€ ',¢›€†뀂mô–€ŒÀˆan¨€󶀂currenÿƒ€Œö…€‚׊€Œ. DâÑ€‚oÍœ€„genuiné‹€‚nks”€€‚„‚€‚÷ဂ‚¤€ˆ±½€Š×€–ô€ˆsign«©€‚ant“…€‚ø‡€Œstarœ“€‚Karlû‚€„]] ·‘€‚a¶·€„’€‚fig—•€‚ÈŽ€ˆÜ€€”:”€‚­€€‚ш€„Œƒ€‚o eøž€„sucõ”€‚¿€€‚Gene«Ÿ€‚Counci È€„¾‚€‚À€. ÂÈ€„­²€†o˜«€„÷×€„toŽ„€„ ¶‘€„Á€„¹€€‚¸±€|±€Št«³€‚who©®€ŒÊ•€ˆÔ„€ˆ…ˆ€‚Â’€‚ ShortŽ´€‚fЫ€‚[[Ø…€˜€•€ˆ°Œ€‚á„€ˆrsÔ¦€„Œ€†1868̈€†üƒ€ ê‚€Špolari®¯€‚into two camps,˜‰€„ä‚€„é…€„ˆ€ŠÆ€†irì–€ŠÙƒ€‚ô‚€†ž™€‚s®¨€‚³±€‚earÿ‘€‚Ф€†퀂 betw‚〄Ÿ€€‚က‚‹ƒ€„ëÈ€‚±Ž€‚Òÿ€‚y¼€€†Õ„€ŠäÚ€‚rý”€„û€€Šfavoured (inœÊ€Ž'ï €‚rds)¤Ž€ˆ‘ð€†©‹€ˆ‘–€„åÛ€‚ggle—逌¥¬€ˆÀ¡€„怂ÿŠ€‚蔀„²à€Œ¦€„ŸÄ€Žpar¿Ò€‚eÊÛ€ˆgi¿Ê€†‡Ÿ€ˆ A¿‹€‚Ž´€‚ù€‚…„€„뉀‚˜ƒ€–focÕ瀄on΀€šÅ‚€‚ity.
+
+逊cha¿Î€‚e™ƒ€†Ѐ€‚é’€Œa²‘€‚×à€”˜‰€‚‚¤Ù€‚iÙ…€†‹€‚iì“€‚º€€‚𔀄a•¥€‚þ€‚ž€€‚ož¡€†®‡€„𽀆€‚È«€‚endʪ€‚È¿€‚a²µ€„®ƒ€‚[[rulØü€„¿ë€ˆÚÈ€†a“˜€‚¶Ú€†Ø€€‚st²Ž€„ì¿€Š¬…€š|Û€ˆ­€‚ ‹€ˆÕ€¦lñŠ‚cyc¯Ì€†hp/adpage.phpÀ€‚1969 S¿Í€‚®†€ˆ‘‹€†´±€Ž„€‚ƒ±€’1872±€‚Ü€Œ climax¦œ€‚¬€„§é€ˆspli¢Û€‚¦…€Žtwo‡¯€ŠÛƒ€‚‡‹€„Hague CongÛ€‚ (1872)|•€€–ƒœ€‚µ®€„iá‹€‚ÂÍ€‚cµ±€„΂€ˆÿ×€†Úˆ€ŠÒŒ€Žt_͈€Ž_to_Œ´€†m|”Ÿ€Š†€Œ¡†€’nž„€†®†€‚ö€€‚ªŠ€ˆñ¤€‚ÊË€†µ€‚˹€‚Åꀈdemocracy|‘§€„€€Œt‘Ñ€‚ò怂n©À€‚[[´›€†ãÓ€†©€€†‰€‚®Â€”''“Œ€Ž𨀈¥€€„˜„€„d¤€‚nЕ€‚ÄŸ€†®†€„»Œ€’Ç•€‚lòÆ€‚…€‚of œ£€‚-w»Ä€†é€s|'€€–'.]]–¤€„‰œ€ˆ¿“€‚—¡€„‰¤€„°¬€†|œ€€ŽÙŠ€Œ}}¦¤€ŒÌЀ„¾ˆ€ŒÍ€¨¤€€„䈀ŽŽÚ€‚Ú˜€Š¤‚€„Í…€ˆ bothÇ‹€Œ‚š€‚Œ€ˆæ…€‚µŒ€ŒÕ€‚tí„€†Ü‹€Œ¡â€Ž숀†1870±™€‚…ð€‚Û€Š¬Ž€†d awayÒŒ€†ö€€ˆ's‰•€ˆ„¾€„i•›€„ (Ï–€†co¢Ö€„±Þ€‚m‘€€†)¸€„embracÑý€„ª€†¤‚€‚Óœ€„s.Ë‚€ŒtÈÇ€‚å〄¾€†³²€ ‰ë€£»€‚Ï€€‚“×€„iveµÝ€‚È„€‚†‰€„…·€†b߀‚s݈†ëÌ€†need,£è€„û”€ˆÙ˜€Šnefac.net/node/157ŠÝ€„ûÍ€ˆ›‚€ŒÚƒ€Œ¸¬€†¶À€ˆDéjacqueø€†¤€†perÓ¾€„ä«€‚‘€‚be£ª€ŽÚš€ŽÜ…€ò…€ªÇ‚€†±ô€ŒÇ。ó€†onbooks²‰€„bleed/ÑÍ€ŠšÕ€‚/De”€†¤€†΀‚×Û€„ªŠ€ŠíÆ€ˆjoseph.d¬€€ˆ.éµ€‚.fr/ecrits/lettreapjpÀ€€‚ De l'être-¹ä€‚õ¬€‚âle et femelle - L³€€„ à P.Jåµ€ŽŽŒ€‚±‚€œ] (§Ù€„áµ€ˆ¤û€Š|€€†]])ØŠ€”Unlike·¶€Œ,Ü’€‚ô™€’ª‚€†í耆䃀‚‡‰€‚䀮ÍŸ€‚⊀ˆ¦”€†ÛÊ€„Á²€ˆ tošì€†®¯€‚he ̈€‚sfaö’€„ºˆ€‚퉀‚°ž€ˆÔ„€‚º‘€‚hat܆€‚ ma®é€‚µ€€‚ir‡³€†e³€Šå±€‚nnounce©€†ø¡€†¦Ï€ˆUS pub…£€‚ed ¢Ý€ˆ Le—€ˆaireú‹€‚58-1861).
+
+PÁ¡€‚¯ˆ€Ž,ø‹€ˆsee³’€Š–뀆imp¬‚n‘‚€„ü¢€„, outl˜€„†€‚ׇ€Œ€Œ”’€‚ConqueªŠ€†Bre›€‚nd Fiel½ç€†ˆª€‚•€€‚À™€‚¸º€ˆHe fel«†€‚-œ€‚…‹€„•€‚¥È€„beneå–€„l©‡€‚ýº€‚䜀Œ,î´€†—“€„™”€‚õ€†×–€„Ä•€†·è€°˜‚€‚97). Subsequº­€‚µ‡€ ê‚€‚Èœ€„ Emma Goldô¨€‚À€‚Alexל€†Berkman. M—Š€‚©Š€ˆÌ€€†o-syndß“€‚‚ˆ€‚Ô˜€Ž(õ¸€‚²³€‚w) saw­€€ˆ£€€‚ð‰€Š•õ€„ö€†¾—€ˆve. Isaac Puent†¦€‚1932Ùþ€‚ÙŒ€„o÷ƒ€Šrio¶•€„ÓÇ€ŠbŲ€†SpanŸý€‚CNT샀‚Ÿ€‚manifesto¨ €„ëø€„t-Ñš€–׎€‚¬î€‚
+
+Sÿš€‚¨€Œs½Ž€‚liked mergÚ‹€‚º€Ž²‘€† Œ€ˆm. Sç…€‚§¹€„»¢€Šꀂ €€ŠÁŒ€‚aintaÞ„€„ㆀ„¶ö€„º†€Š‰¯€šÜ€„®‡€‚Ô©€„s…Õ€‚ñ€€†œŠ€†y. 𻀂exaˆÇ€‚,ü¢€Žù¢€Šw‰þ€†ŽÀ€‚ess¹€‚¦˜€ˆ…‚€„󎀌僀„߆€ˆ¤€€‚÷…€‚÷‡€‚‰Š€ŠŠÆ€‚zeteticÓŠ€†mac±à€‚debates/apx1pubsˆŠ€‚l],Ö€¨‚€Š‚‚€Ž타„‰€†pseudo†«€Œ–Ž€ˆ.׉€‚§¦€¸/¨¦€‚ö€‚ŠÌ€‚agandaùˆ€‚ûƒ€‚deedœÎ€JohannØ€„Ö‘€‚¥¡€ˆ±¡€†[[¡€€†À¾€Šn‚ˆ€‚spokº­€‚úº€ˆ߀€‚€€†¦ª€‚Ù‘€Šÿ€€¦þÍ€‚󑀌¬Ê€ˆÿˆ€†õý€„ïÆ€‚rayÈ€‚íØ€„ger‚Ø€‚ꆀ‚က†±÷€‚u˜¥€‚i¹Î€‚²Ÿ€‚»À€highîÉ€‚filô€‚—€‚t³™€‚sü»€‚Á‡€‚Áƒ€‚[[riot]]s£¢€‚assaýƒ€‚„“€€ˆËÑ€‚rre´‹€„’€€„ý€€‚[[te¨ê€‚¢€„̆€‚ˆ¡€„Ÿƒ€Š‘€‚™†€„[[¸†€Ž]]a艀„ý‚€ˆª¢€‚­¥€–encouraged¤€„¨€€‚Ç›€Ž¾€†ce, ôŸ€‚‚Ì€„ÏÒ€‚mb]]Ш€„”€‚Ôƒ€‚ÀžÇ€€‚§­€‚adì•€„î¿€‚|€€‚˜€€„Ž€€„íÿ€‚o furtº«€‚Ä€Š¢‡€‚退„ù€„›ƒ€ˆ怂ªœ€‚—’€‚en§£€Š'Þ«€‚Þ„€ŒÚˆ€ˆÞ„€‚]]'†¢€‚One…€‚Ÿ€‚¿‹€„³„€žŒ€„ý—€„º‹€†gЇ€†󄀘¦‡€‚o said—†€ˆÑ€€‚§Õ€„Ƀ€‚–©€Œ¡ä€†quickeφ€„dÁ€†ÜÔ€ˆly¼Ÿ€„‡³€‚w£ç€„Å€€‚anÝû€„øƒ€„«€‚‰Š€‚ô€‚n¹¤€‚”š€‚ereÜà€‚Ö×€‚ssacøú€‚Ò€ˆenem؃€people mus÷™€‚âÇ€„â‘€‚ð€„€ˆ{{fact}} 톀‚'€«€‚¶ÿ€‚‡³€‚æ߀‚o¨ƒ€„ß„€Œ, dynamite, earŽŠ€‚Û¿€‚´€‚åü€‚kerŒ‚€ˆD¦€€‚¾‡€‚Ž€€†
+
+ª—€Œ›ª€‚ØÛ€„noë—€„nsensuÍ£€‚Þ”€‚ݱ€‚egitimacy‘‘€‚uti×€‚ဂø„€ŠÄ€‚ýÀˆ.У€¬[[ErricϤ€‚ß…€‚sta´ƒ€‚ÇŒ€‚ùŠ€ŒÓ¸€†Ø€€’†¦€„«‚˜Ã逌desirableÅ€€‚…€†ݧ€–setýƒ€‚s.ÜÙ€Žè’€‚me §…€‚ù€„y dºà€‚¯’€„Ù†€ŠõŒ€Žé‚€Ž. (·€ŒÇ‚€†On V»ˆ€„c Ó€ŠЄ€‚Ι€ˆ‚Ä€†›ª€„fŸ˜€„NechaevØ’€‚­€†Ά€Šõˆ€‚Ö€ŽidentifiÙ‰€†¥€‚÷›€‚o-pacif¬€€‚|‰€€ŠÀ€€Ë‚€‚䊀Š°Œ€„ÓÔ€†[[nonv¢€ˆÊž€‚[[Leo Tolsto;€ˆoseË€’iƒ‚€‚å“€‚vieÝ«€‚‚ƒ€„¦º€‚˜ƒ€‚[[ChÛ¶€‚´à€‚—€†Áž€‚“€€ ˆˆ„µ‘€þ»€„Ê€€„not³ƒ€„Á†€ŠÖ€€†¦€ŒtÜŽ€‚i©é€†Ç®€ˆ„Œ€Š¸€‚…€†«¶€†ž’€ŠÓÊ€„see£þ€‚|¬€€†Ç’€’¥ž€‚É€ŒFlaÉÈ€„¦€€ˆ î’€Žm.sv¯€Œ75pxË逄red-and-bÀ€„flag, comÓˆ€‚º€„Ñ„€‚É¿€†„„€‚Ù€€‚–‚€Š‹”€„Ÿ€€„®€˜ ˆ„º¥€‚§†ŒÒž€Š¼£€Œñƒ€ˆœ€’«Ÿ€‚Õ¢€Š™€€”Ž™±€‚y 20å±€ÿ€‚«€‚㥀„°‚€ŒÀ—€‚–€€„Á€‚Š¿¨€Å…€„Ù€‚‚Œ€„¥€€‚ˆ¦€‚ñÙ€‚™‚‚Ó€€‚—Ç€‚צ‚©œ€‚鮀‚󓀆Ô—€„•ƒ€Š pursuѲ€„indö–€‚“—€‚±Œ€†«Ž€„®Ù€Š[[gŸ­€ˆ£€€‚k³†„©„€„prim™©€‚ö‹€Š°–€ˆ‚€žØŸ€’훀„¢€€†°’€‚º’€Šº¡€„“ª€‚ghŠ”€„Ç´€‚›€€Š¾€€˜õ‚€’
+
+A¥­€„Œ‚€‚[[PÔ¬€‚Π€ˆe|1871†Õ€ˆÂÍ€‚]] Ôœ€† €ˆ¯…€‚«ƒ„rgedÕ€‚õÒ€„cƒ€‚Í€€‚''Bourses„뀂TravailÝÕ€‚ù”‚ÆŽë‚€†s°§€Œî ¶Ö€„unioã‹‚š¦€’v— Ý€€„[[Confédér·€†GénŒ€€‚le duð€€Š©½€‚œ°€ŽnfedΚ€Šofñš€„, CGT)¹‡€„‚ˆ€‚¶š€†1895¡ƒ€‚Æ€‚‹¡€†“倒¶…€’™€Ž.ÿ‹€‚mì’€‚Pataud寀ˆ”€€Œougetº½€„ô€‚¤‚€‚™Œ€‚က‚CGT saw¢¾€‚®™€ŠÖ›€†Å™€†]] Ô€Š´€€„Ô²€‚û¾€‚Ž€ˆ»„€‚Ò„€„. Ƀ€†191ý´€†×€€‚³‚€‚ˆ¥€’·„€†Á€’mÔ€„£Ÿ€ˆappeßú€†[[BolshevÙ„€„. úƒ€ˆ-styleÁ„€’º‚€„aû³€„ific¢ž€‚ô€€‚ô®€‚ª…€‚Ú¶€†é…€‚­ €‚o 1921Å”€†remß™€†¼€€®Spainþ〈©‚€‚mid¬ó€‚0Ù“Š߶€‚ø†€Œ̓€‚É„€‚¾€‚he Worl¶‚(IWW),ù ½†€„1905š€‚¥€€‚US¨‰„o⯀„LJ€”ÿ„€„û‹€‚ˆ€€ŠЃ€ˆsᆀ‚t‚ƒ€¤߀‚ush¡¯€‚È¿€‚Ûˆ€„‘Ï€‚²œ€ŒâÈ€„923 100,000 mÓÆ€„ì’€‚µ›€‚dÖ±€†˜€„…Á€ˆÍ€‚£Ú€†3²€€†‚‰€‚€‚匀‚—‹€‚licië´€„ÑŠ€„—¡€„毀„À«€†„‰‚by rank󂀄Œ˜€„̬€Œ,Õ¨€‚odyׄ€‚a s¯ø€„ð€€‚Ÿ€Žûœ€†hœž€‚nspi¤“€„ÒŸ€‚AnglophªÛ€‚Š„€Žð–„­«€ŠCNT_tu_votar_y_ellos_decû€‚⚀„‡€’€Ÿ€‚րㅀ„¸Ç€†2004. Reads: DoŽè€‚let。Ÿ˜€Ši—Ü€‚Ö•‚ ô€‚lives/ You voÍ‹€‚Ј€‚…’€†cide/À€ˆaæÛ€‚ it/êò€„y, Aè—€„, Self-managÂ…€„•€„§ €Šã‚€Œ¢‰€’ Óˆ€ŽïÒ€ˆLj€Ž¥€‚ ¬€‚'s,õ„€‚0€„äý€‚Ń€‚ÈŒ€‚ÌŽ‚Ź€‚ssful„‰€„²€€‚Û‰€†Ö€€„ción Nacؾ€„ delÊŠ€‚bajoÙ‰€‚¾ô€‚—€€„Ú‰€œ®Ã€‚ur:Ú¡€‚ü…€šý€€‚PŽ‡€Š‚ц€„‰€€„‡ƒ€‚„€Šñ‰€†÷”€Œƒ‚€Š¯‹€‚¯„€‚†¢›€ˆsŠÐ€‚¦®€‚aÑ…€ŠhipÉ„€‚1.58 mill™¾€‚934 …€„ꀀ„play‰€‚뀀ˆrolè–€„“€‚[[œ£€Œÿù€Žø´€‚Seeª„:ïñ€‚Ö¿€΄‚­ˆ€‚Ñ’€„SŸ‹€ŽµÞ€‚kÏ€€‚Ricardo Flores MagónÚ€‚›˜€‚keyŽº€ˆߥ€‚€ˆMex层ö –ß”€‚atin‰÷€Š|€€”£Ž€‚¥„€Š™Š€†ùŸ‚ÖÈ€„žŽ€Š净‚exteüŸ‘€„ZapŸª€‚ta Armû˜€„܃€Œ¼©€„´€„|¥€€Œ]] rebªå€‚ر€ˆÅ€€†Þª€‚ory occup²€€„Œ‹€Œ‚in ArgÖ„‚naŒ‰€„Berlin—€€‚192†ƒ„€Š½€ˆ‰€Œ늀‚á³€Œꀘ½Ù€Œ㌀’å…€Œô„€Ž[[¸Ä€ ¼ƒ€„Contempor™’€‚Ì‚€†Ë€€’m—“€„inuµÜ€‚ùŒ€‚즆¨…€ŒŸ‰€„¯“€‚áÔ€‚;¨Ö€†smalle펀‚žó€„瀄ˆ†€‚s, 20s•‡€„30s.´œ€‚—‡€‚larg†¾€‚Ê€†Ê»€‚†€†È€‚Û€Œtoda˘€„ù„€Š’€‚Ù€€„ÿ‡€‚Àƒ€‚’ƒ€†š‡€Œʇ€†Œ‘€Šɇ€–€‚®€€ˆNT„‡€‚À€‚Çð€„怂paid-upà†€˜6ÇŒ€„ö‚€‚ÐŽ€‚­È€ˆñ•€‚Õ†€‚÷†€ˆvÖÿ€‚¦€ˆï©€„[[s †€ˆ©Ù€‚î´€„ê’€„ ´›€ˆ´€„Ø‹€’õ€ŒÆ€€‚»¤€‚˜Œ€‚¼Ž€‚­Ó€‚ŸÄ€ˆSolidarity A‘‘‚ÿ¥€„˜€†«€€„KÓ‡€‚£€€ŽFÞ‰€„ø€€‚ׇ€‚Ú€–Ò–€€ˆÂÄ€‚œˆ€‚–¢€†×쀂…ó€‚熀‚2»Ž€‚€„À‚€Š. é„€”cÍ€‚c¾€„타†ô„€–à‚€†Î¥€Šš…€‚ €žm†æ„€‚¤•€„߆‚é…€‚³Š€‚“À€‚¸¨€‚Ç€€‚ÙÛ€‚«€‚dë’€‚à†€„½°€Œâ瀂¡¤€„Žê€‚·€€‚. Pƒ­€‚‘©€‚‚‚€‚¦€Šs×…€‚ÔÀ€„Bob BlackÑÜ€‚ÿ€€„¿€¤Ÿ©€ˆs½Å€‚ƒ¾€‚Ä€‚¦†€‚”€†Ξˆ³™€’Man¼Ô€‚Û›€†|‹€‚’€ˆ΃€‚Ô€†£ð€„´‡€„휀ˆ™–€’«„€‚´¢€Š up¤â€‚†”€‚ƒšˆ †€‚Ó€€‚ý‘€‚s–„€Œ†©€‚ˆÄ€ˆ……€‚œ„€‚„€†™â€‚–€’Ô«€‚¢†€‚ž€‚­–€‚‰Û€Ž«ž€„main‰„›€€–ô€€‚1917øª€‚½€€‚Þ€†£€€¦šŒ€‚߈€„seis°»€‚e‚Ɔ€‚ÿ€‚‘—€ˆ¢€‚Ô€€‚ñ‚€†„€‚²€€„¤ˆ€Œ타‚‘€€„þ €Žñš€‚«“€Š¥ž€ŠËà€ˆa°å€„ùƒ€‚ ‚Ùƒ€‚—€†k»‹€ˆ´½€„FebrušŠ€†d Octoberº†€s£§€‚Œ½€–Ѥ„ù§€„Ô”€ˆ†€‚က‚߀€ŒßpÛ½€Œ›€€–žƒ€‚¦¦€‚urn«€‚²Ä€†¢€€„ô€ŠñÇ€‚¸žŠ §…€‚À€†ݾ€„i。´ƒ‚¢á€Šë°€‚ch cul𦄑—€†Œ€‚1918 [[KroñЂd¥¢€‚߀ˆ ˆ€‚²‚€inâÑ€„½²€‚ýƒ€†€†imí…†öž‚r dׂº˜€‚der‹Ä€‚¡Ø€‚orÔ€Š‚vÏÑ€„§¥„‡‚€Œs¡—€„[[Ukra扂®‡€‚좀‚〄´Æ€ˆä‹€ˆº„€Ž ‘€Œ|civil waã퀄¨‚€ˆ³ƒ€„Whiù†€‚¥‚€‚뀀Žù‡€„Í€€ˆMakhnovshÅ—‚ö°‚asa®¯€‚Š€‚ÿ €‚¼ä€„Në„„«€€ˆ]]).
+
+Expðÿ€†ï…Œ°€úœ€‚¢·€Ž“€‚n㙀‚¦·€œᡆleavœ„€‚ׂ€’amongñƒ€„Ʀ€‚ÃÊ€ˆ™Â€‚‹©€‚sponseˉ€‚×€Œ“”€„cž…€†ç‚€‚󄀂„þ†€†’€€‚ダŽu¡‰‚ing. Bothžª€ˆÚ”€„icÿ÷€„u󌀂Ñ›€†iǪ€‚°â€‚‡¥€‚†„€„¦€†, ã‹€ˆ˜Ð€‚xpo€‚×¥€„¼€€‚›€ŽÒ£€ˆ¶€†themðˆ€‚Í©€ˆ¯Ü€„ŒË€ˆŽ€‚ò¶€‚u÷€‚‚É€„’º€†ø€€‚ofºÇ€Šõ˜€†ÐÊ€‚Àõ€‚†€‚l׋€‚oµØ€‚…ꀂ∀‚æ„€††€„É€€‚Š€ŒÀ€„‘€€‚‡€ŠÁ‰€Ž§ƒ€„š€€‚resulì‚€„逆ë„€ƒ‡„s½ä€‚u›´€‚­ ‚ÿ€‚ýƒ€Œ‚‰€Œ瀀‚¶“€ly”»€ˆûŠ€Š¦ƒ€‚ꊀ‚vã…€„†¡€‚¢€Œ㌀‚š™€‚Á‰€‚謀ˆÞ“€‚Ì­€ˆ;¦¤€ˆ”€‚맀„eÄ£€‚ew û€†Šƒ€‚ø³„ÏŽ€ŒŠ€„§ˆ€ŒúŒ€†¼€€‚¬Š€Šü†€†Éö€†¨€€„‚‚€‚US¦¢€„󀀈º˜€†™™€†÷œ€€€Ž퀀‚¶€€‚[[CGT¾‡€„ñ…€‚IWWã…€„g怆™„€„gý‚€„mselves©Ç€Á‚€Š‰€€‚¹ç€‚“‚œš€†Ê“€‚mÌ‚€†|¡¦€†½€‚”–€”ŠŽ€„In Ʀ€„¬€†É”‚elo Truda]] Œ‰€‚p¦€‚Òƒ€Šð€€ŠÍŸ€‚iles Š€ˆç’€ˆ¾€‚÷‡€˜Ä…€‚ƒ“€„¡‚€‚a㌀‚¢‰€Œn„뀌Ÿ€ˆ new•€„™‚€„Ï•€ˆõ’€„¿‡€šµ‚€‚ðÒ€‚Ö®ˆ¯Á€‚ýŒ€ˆš°€‚Tþ†€„†ß€‚ß–€‚¡¾€†,¿è€Ž¼€€‚[[Platû•€‚´¢€‚O¿Ì€ð¸‚š€€ˆ×€€‚¬€€‚Ò™€„—¿€‚Ó…€‚Æ„€Š⯀„󎀂²ˆ€‚〆Ü•ŠÈ©€Œ®‚€Žß‚€„‰¢€„茀„É€†ú€„ˆ€„s. €‡€†''ü€€ŠùÍ€‚º˜€ŒõÙ€‚Ò¡€†˜±€„™€€„õ˜€˜“ƒ€‚®ƒ€„“ƒ€‚Â뀂ÙÊ€„¸‚€‚䙀Œ¢€€‚¿…€ŠÞ‚€Š‡ˆ€‚ñŒ€„ƶ€„¬á€‚õ«†û€‚'½Ä€„ ¾€‚“’€‚ù €‚', 'tacʆ‘€€Œ É€Šv„ˆ€„䊀‚ib”µ€„'Ò…€„'œ €†ÜÏ€‚'.Ü‚€ŒŒ€‚¯€ˆœ™€ˆ§„€†ñ‚€„¨—€¤•’ŽÕ€‚IŒ²‚¨Ž€‚¡ƒ€‚UK'ÁŒ€‚÷Ž€Œ°—€”““€†¦€€‚èµ€‚ [[North Eas†€‚ª€€ó€‚€€Ž郀’ð€€‚À€€‚n¹€€‚e¸€€ˆö•”逄bñ®‚¡š„Canadaø“€ŒfŠç€„厀Šfas££‚û“€Œ ’Aƒµ‚ €€ˆ„Ž€Ž´†€ˆ¸ €’ëЀCNT-ǵ‚¶Ù€‚-car-Øž€ˆ¹¥€„·á€†¿¥€†270px|­¡€„ÁŒ€‚ÎŒ€‚1936²‘€‚M¨›€†‚‚€‚耂Ú›€ˆè„€‚熀†øµ€‚ ˆ carØ¡‚ˆ½€‚×€”¸€€†¼€†»¹‚µ‚€‚ô¦€‚‘Ž€†¸Ñ€„ÉÍ€†vÖ‡€ˆ|€€½€‚ ‘ €†¾©‚]]
+I󂀆1920Þ‚€†193ú£€ˆÖ­„iØŒ€‚Ⱥ€‚ˆš€”±¾‚Á퀄ãô€„¶‘€„˜€„ß©€„ü’€†– ‚sû‡€‚†‡€†䀀‚ƒÚ€‚§€‚¾€Šû‚€‚¬€Œç’€„¬‡€„³â‚s,–€€ˆ׆€’­õ€‚c˜„ffiíÁ‚ choiceÎÿ€‚ͼˆק€„Ò“€‚Ø€€‚­½‚p–»‚¾¨€‚軄©€†¹½€„›†€„÷©€ˆ®•€ˆSoviet-Ý’€‚Ù‹€„쮀„ƒ‰€‚Ô„€Œ´•€‚•‘‚Ý„€ˆtë½€„y? Luigi Fabbri‰†€‚휀‚¢»€‚™¯€‚Itaû€‚²€€ˆmω€†Â’€˜rguÓ€‚¼‹€„Úƒ€†ª¤€ˆ÷»€‚ØÆ€„ó€‚ÿÞ€‚t:
+
+:÷º€†Fá…€†¼¹€‚ݳ€‚ð‰‚ anꉀ„à‹€„ê‚€‚縆“™€„ˆõ€„,û¥€†•‘€‚¨€€„s, u³€‚‡¹€†®Š‚ü’†®ƒ€‚‹©€„ŸÝ€”ÿ†€„š€€Œ³€€†È€„뀀šimaginò¹€‚Ѐ€„·‰ŒØ€€†utæ©€„glorŸ°€„Û’€‚¬€‚Ù€€‚Š€„ª”€†p¸Þ€‚”Å‚Ú…€ˆ·Š€Œ€€‚—€ŠyÝ¿€ŠÃÀ€Š펀„²€†žÂ€‚ʧ€‚×€€‚Õ‚€†tÊ怂Ú‹€‚l¹•€‚ð‹€†ÌÅ€Œ²„€‚ª€€‚Κ€Œš©€‚ rioØŒ€‚ÒŽ€žË€„Ó˜€‚—Ñ€‚a '¢‹€‚eŠŠ‚ont'ë•€ˆ„Ö€Žž¢‚or.univ-montp3.fr/ra_forum/en/ Â€†/berry_david/­€†m_or_á €Ž£Ê€†£§€‚Ö©€„”‹€†È€ŠÑ›€Žù¾€‚ñŒ€‚toˆ†€†±Í€‚…†€Ý£€††¬„a”£€ˆÝ€‚ò’‚¥Ì€„”—€‚by󧀄ôŽ€Šrs “†€‚ÍÈ€„鉀„ ÞÛ€„¦¤€ŠЕ€ŠñÀ€†¸¨€„36,Ê‚€„Å€€‚¤Ž†ú€ˆœ‚€„Æ„€„Ђ€ŒŒ¥€ˆhelbÙ‹€„»€€‚µ€–bŠ½€‚îည. Monthßꀈˆ‚€†Øဒ¦–€‚pon¤ƒ€‚Ë¢€„¹…€‚tË€‚§ƒ€‚coupñ€†倀‚§‹€„‚¦€„´›€ŒÆ®€‚€‚-39)ü€„²œ€„w†³‚Ü„€„repü—€‚€‚Á€€‚‘›€„™€Œ Ž€ÈŒ€Ž|€€‚ÞŒ€ˆt-䀈‰µ€‚¨€ŽfᛀŠ…‹€†w™€†ü¢€‚ø‘€aÍŠ€„militiaí‘€‚¨ž‚À‘€‚rol€€‚Ž€‚Š–€†[[ciÓô€‚•€€‚Ι€‚rcelon£¬€‚ø€‚¥ß€‚Щ€‚·ˆ‚¢ø€‚f ru¼ž€‚ׄ€„ÂÀ‚š„eyšÆ€„¼Œ€Š€·†ÍŒ€z€„󀀂£€‚ýƒ€†‘¢€‚ºœ€Š™€€‚€€‚ªê€‚ÿ†€Š«„€Š¦„€ˆ9»ƒ€†›‚€Œ®²€ˆlos˃€‚¹‘€‚³’€‚Ã…€‚bÁ°‚à耄€è€†׃€„»€€‚Áƒ€‚ኀ‚ú–€‚¸â€„⚀‚÷„€‚Žæ€ˆö°€†ÛÀ‚ɹ€„¯µ€‚ø €‚¢†€„‹‚€‚͇€ˆ綀ˆðŠ€‚Ž¨€Š§€€‚tro¬Ü„¦š€‚ ÐÔ€‚ž…€ŒÙŠ€Ž.Ä倂€€†»Œ€„troops½†€„倂¹†€‚°œ€„È‚€ˆÛ€‚ø€€‚ËÝ€‚ecu’…€‚‘ €„[[POUM|dis‚nༀ‚rƒ¸€‚飀‚¯€€‚¤‚€ŽçÔ€‚˜°Šî’€†197¾€„ö—€Œꀄ’Í€„Œ”’É€„Ù‚€‚€‚€Žneo’€Š|‹€€Žš©€‚ƒ€‚¶“‚I¥­€‚̪‚»‡€†¾€€‚˜“€ˆKingdom¸‚€†€€À…€„þ†€†Á€„´…€„a„€‚ä‘€‚ÿ’€|Œ€€ò€€Žø¥€º€Šß…€ˆ…©€‚—‚€„]]Ù¤€„ðƒ€„yÌ©€ŒЃ€‚recÿ¹€‚comb—³€‚gü„€Šþ€ˆphysí–€„ °€†¦Éˆˆ°€„rely°€€‚Ý¥€‚¶Ê€‚œÐ€‚½ï€‚í‚€š9§°€‚¤½€‚›¸†tendé适–º€„皀ˆü€ŽUS˜‚€Œm¯µ€”«ˆ€„ti-RaË€„︀†ýˆ€‚US)¶”€‚œ€€„倆€€”K€€Œfa]]÷’†RʵŒ뀀ŒËÕ€LeoþÉ€ˆ®•€„¨•€†òö€†œÊ€”|¨Ê€”ñ‰‚8-1910¶Õ€ŒÅ–€’Ê€¦ª…€‚œ€‚Š…€†³ž€†綈Ж€†
+äÏ€‚¥€Œt cul‘Ý€„Ä‚€‚ï•€„b䃀‚š†€‚l‹‹€„ifÞ‘€„outûŒ€†Ë„€Ž athie„€Š䀀ˆu„ˆ€„”瀎ǃ€‚‰„€‚Ú¨€‚Û€‚쉀‚«‚€Œò €†臀‚s™¾‚ŠŽ€‚À›€‚orûš€„§€€ŒÁ±€‚±“€ŠoÕ‡€Œ¼€€Šlas°•€„ù€‚퀀ˆ’·€‚›€†‚Ôˆ€„¦ €„¬Ž–߀€Œ¬–€‚di⤀†؉€„ÀŒÑ„€„ô£€†ÂŽ€‚¡…€„烀šâ¾€„Œƒ€ ¢Í€ ̈€„µ€Š픀„­”€‚ˆÒ€ŠŸØ€‚erÚ’€Ž¡€€‚Ѐ‚GodÇ™€‚߀‚Øž€†õÊ€‚thly…“€Ž˜¤€‚ü°€ˆ˜Š€Ž팀ˆöÑ€‚»­ŠchurcheÁÔ€†Ñ。Àž€†Š€„Jesus' teaò©€‚g‹Œ€ˆõ€„¹ö€‚Ñ„€ˆticÃ䀆€€†orruÖ€„û”„¡Ñ€†‹‚€Œi‘¥Œ󖀂declÿ‹€„¤ƒ€‚o˜€‚é±€‚„ƒ€ˆ˜„€„­Ù‚. ÙÏ€ž€”€‚‚›÷€†¨€Š舀†Ÿ€‚¾¥Œ’®€‚Þ€€†ׂ€‚ cheek€†½Ì‚Ÿš€‚ricÂ…€‚ÍÑ€ˆmÖÑ€Œµ…€„º€‚õ–€„½§ŠÔ‰€ˆÄ„€‚Ô€Œ‰‚€Œ¢˜€†¿‡€”¦ƒ€„“ƒ€„²€€‚þÒ€€‚£‹€Šof God¯˜€‚™¿€‚in YouýÜ€†»Ù€ˆÛ¦€‚aÛ…€„·™‚basé•€‚Ò䀆ŠÜ€‚º¯€‚·Ñ€“—€Œ÷ˆ€‚ƒ–€‚Ȇ„ȶ€‚Ä‚€ ⇀„¬‚€‚–œ€‚ [[»«€†œ™€‚ƒ‡€„„´¨€‚ªž€‚‹‚€‚y¹·€†occaø€€‚ƒ€„[[taxŽÅ€|Š€€† tax߇€„»€€„Ω€„€¤„ƒ€‚[[vege꤀†‚ƒ€‚€€Ž¯’€‚»’€‚v—¨€‚š€€ˆ•€€‚Û˜€Š.
+
+Õ€€š®€‚ñ‘€‚…Û€†瘂¹˜‚rooÈ“€‚s olí³€„ñ„€‚è„€Š's birÜÍ‚˜€€ˆ[[á…€†›†€†ÿº€‚xhibðÚ€‚Žž€„ø…€À‚€„Ï΀‚‚î·€Š¸‚€†alŸï€ˆ†ƒ€‚½Î†. Byˆ®€obey utte뀀„릀‚ºõ€ŽBibl«æ€‚û†€Ž €€Š×Ѐ„©Á†犀‚Š€€ˆÚ‡€‚›€†Ô„€‚sixteen‚Ó€»Ÿ€†ô–€ˆ¯€‚óµÓ€†ð€†'†‹€Š-ò¹€Œ•©€”Ѐ‚«¶€Ž›™€‚­´†§€‚iŒ‡š„Ç€€†´©€ŽÞÓ€‚„€‚¤€‚t¿‚obeû·‚ÅÉ€‚oƇ€Šó€Œቀ†¡‚€Šreje‚û€„¢ý€‚•€†)±š€‚»€‚û‰€„hier䀄û…€ˆ÷†€†ªŒ€‚(²€‚indµÑ‚non¦å€‚Á‚€†ú®€„€º€ŠÄó€†Äž€†Þ­€‚瘀‚godŽ¯€‚çà€„ß‚€†­Ü€”‡€‚Õ‚£€„itežª€„typÍ‘€‚þž‚µ”€‚󠀄œ€Ž½„€’beginnÚŠ€„««€‚¹‚€†iÓØ€ŽšÅ€‚abal§¤€‚,™á€†Ô€Š¬†€„ß‚€ŠmodelsöŒ€ˆž’‚ÎÅ„À„Ò£€‚늀‚ʼn€„ßÅ€ˆ鸀‚º€‚„†€‚礀„Ñ’€‚ž¤€‚ò €€Š©€Š½€‚Ì­€ˆÃЂi-Xu‡³€„[[Buddž€‚¿¨€Œ퀀„›®€„󀀂ˆ¡¤€‚by ï‘€ˆ¾„€„ù€€‚[[well-fiÇÆ‚šâ€†›¯€„Ì®€‚ªã€†Ï€€„‡˜€ˆý†€‚æÿ€„û‰€‚¾€€‚envž·‚¬‰€‚Ò€€„¸„¦Ì€„¢»€„¶€†µ€€‚똀†ãØ‚¹€„¥š€‚ó̈‚Éž€‚aŽ†€„˜ç€’n€ˆƒ‚€Œù±€‚¤ä€”Šë€†min⃀‚¨å„Starhawk²€„hoÂÔ€„e¶Ð€‚tenÉ¿€‚²Ž€‚­µ€„ܘ€ˆ󜀂iŠ›€‚—œ€†¢…€‚[[a¿€‚÷€„Ū€„µ“€–femÕš€‚Ç”€’Û-4’Ÿ„—€‚Ë”€ŒÞ¸€–¶”€Œ|Ù€€†a-FÓ€€ˆ‡”€‚Eõ‰€„¦õ€ŒŠ¦õ€ˆ€€Š™Ž€‚뉀Œ[[Jenny d'HéØ ur倊[[Juliette AdamÖª€‚¾Ã€„Ì…€‚Áƒ€„[[mysogynó‚亀ˆ󄀎§ž€„ƒö€ŠßÖ€‚u雀Œ185ÞÔ€‚ ¤¿€ˆa-fÄ€ˆˆƒ€‚a°æ‚Ñž€„ŠîŒ¹€Š…€‚Ø‘€„Œ¡€‚妀„È„€‚ò‘€„ªŸ€‚áÊ„patriõ†]΀€†f­‚æ„€‚âæ‚roblem­€‚Ú‰€‚âð€‚ ¹¼€‚l¹€Ž£ø€„¦ƒ€„ª”€„µÔ€ˆᎀ‚À‡€†Ö™€„a¡Û‚dú€‚–„ÐІ±¶‚©Ô€‚«€€‚š‹€„„’€‚aÌÍ‚Û€€†瀎'' d­ï€‚„£€ŒÇ€‚‹€†70sô×õÄ€€ˆ¿Ú„sallydaœŠ€‚òîˆo‰„€ŒTwoòª„ò€‚†ð€„݇€ˆ¡€€Ž - Two䞀‚¥€€ˆ - Wñ…€‚¿¡€„:Ó°‚Þ„€†²€€Œt€‚’µ€†žû€„Ä„Š,Öƒ€’㘀„ond-wavÜö€‚Å‚€†|”€€«®€‚–€€†Ò³€ŽÓÚ€‚†„€˜,µä€„œ…€‚̓€”ÞŠ€†¬Ü€Œ€†å‚€†û®„½Œ€†‰¢€„öû€‚î­„©Ï€‚ð¹‚uÜ£€‚¼€€Œ®’€„‚³‚ú˜€†§ƒ€‚ÍÏ€‚ ê€„È…€ˆd›ˆ€‚Ø€„½Ñ‚ðÝ€‚©€„femaƒ«€‚³€š‹“€‚Ø“‚¯€‚‚”‚¦„€‚爄©’€†˲€Š×€ŽÏš€„yÌ뀆ÖÊ€„·ä‚×Á€ˆÔ¬€‚ª€‚ƒŠ€‚áညÔŠ€‚‚’–€‚ÔÀ€†‹ì€„àŽ€†•€€‚匀˜Ýû€†È€‚‚‚€‚³‚€‚î…€‚‘²€‚‘¸„Åž€„‘“€ŠæÈ€Ž¸â€‚ƒð‚Ü€„¹ø€‚¼€€‚creñ‚€Œg¤Ÿ€‚ÒÔ€†¥’€†Õ€Žׯ€‚©€€¹€€‚ ‘†È€„ÃÀ„þ¥€ˆË€ž»ì€‚áå‚Š–€‚€€ˆ§Í„‚€ŒÕ‹€ schoo‹‘ˆðŠ†‡ˆ€„addÓœ€„sÿ‚€ŒÖ€€‚×Ç‚«Ž€‚[[Eco-™ƒ€ˆÔˆ€‚­é€Œû€€Š߸€‚º€€Žâž‚óÞ€‚of ¸æ‚ÿ€€’Ä€€ˆö€€‚þ€€„ßÑ€„Ï…€š߀€‚ñŒ€„Ê€†¨ç€šø‡€†20th-cܧ€‚­¯€‚ò˜€„Õ‚€†ª’€„„ƒ€†’œ€Œ•Œ€–Œ„€„[[Voltairà”‚de CleyÆ´‚ÏŽ€‚œ€†»¨€„퀀ˆĆ€„¨‡€”Äš€‚Mó΀‚WollstonecraËÁ‚Ȭ€‚»°€‚otoø’€‚•‘€„ö¨€‚ews†…€†ÙÊ–쀂瀆Ì‚€¦€€ŽprecursÜš‚Itï´€Šb®¹€‚šŸ€‚ǃ€„žŽ€ˆ怀„Ó€Œ²†€‚ì‚€‚™‚€‚y€†Åž€†´€€‚è„€„©œ‚²Ä€‚–€€„Ö•€‚î•€Œè—€„ê÷€‚Ì€€Šµ‹€„’û‚Š²ŠMiss÷€€Œì‚˜€ˆist;˜±‚€£€‚‹•€‚ŽŸˆist. Sö‚‚‚‡”€‚Ñð€‚ŠùŒᢀ†Œ­€‚¡²ˆÁ±‚ª€€‚¯–€‚æ¡€‚r„Ç‚. I make ó½‚ar¡î†€ý€‚ileÔ¬€‚Ì°€‚‘„€„Ÿ’€„ƒ³€„Õô€ˆ߀€ ƒ‚€‚›—€„›€€†ω€„a­µ€ˆЀ‚‰€†ÿ€€‚©€€‚¦ñ€ŽÑ쀄¥õ€Œžª‚SÿôŠ¾Ã€‚Ä€€„¬‚”–’Štirœ’€‚ž«€‚l¦†€‚¬‚±‚€‚”Ñ€„½€€ˆÅ‚Œ–€‚亀‚Þ‰€‚¢œ€‚涀ŠÛö€„alwˆ«‚úŽ€„â„€†¾€€„±††Ó¢€‚¼ï„¶ó€Ši»„€Š¸µ€Šܪ€‚Ú€‚‰±€¤Ï€€‚ƒ…€ˆ€ŒÒ¿€ˆ±µŠFreÚ倂mené €Š¾š€†è。Þƒ€„fŠ”‚…”€„Ì…€Ž£ƒ€‚Œˆ€ŒŸ…„.Û±€„ˆ€‚Þ—€‚rn day°€€£˜€ŠËЀœ€€‚ ‚쌀Šªð€‚‘Š€ˆÍÆ€뀀Š䚀ˆ×€€†Ñ€‚€„Ÿ€„groÝ´€‚ €†Ú€’of Quiet RumoÄ‘€‚™‚€¢І‚‘´€‚é–€‚Ñ´€ˆto sprõ–€‚˜Š€‚œÊ€‚kiú˜€ˆú¬€„ª…€Š♀†˜‚€‚€†è‚€‚¶€ŠŸ€ˆ’ꀈbro倀‚“‚€Œ. Wendy McElÔÿ‚ü€€‚½µ€ˆ…ƒ€„‡€š—‰€Œm take☀‚耀ˆû™€‚¯¨„‚Ò‚«‚€‚°ò€‚lퟀ‚—€‚ü…€Ž›€”websiteýŠˆ“€šiÛ€€ˆts.net I-€€”Ÿ’€”Ȭ€‚Œ€†o-펀ŽǬ€ªé˜ Smile.JPGÒ¬€ˆ‹˜€”¬€€˜Ø­€‚®È€‚-1995)š˜€šð€€’}}
+…€ž‹ˆ€‚žò€‚¨«‚ê‘€‚ᯀ‚ƒ±€ˆ™”€„s-«¥€†ÿŒ€„ÎÆ€ˆ¶ð€‚Öª€„‘‡€†„‡€„ü€‚Ý€‚‡ë€–¿›€†ý…€‚“”€‚“„£œ€†ö„€‚[[fㆀ‚í§†À±€„Š‘€Ž豈‚ÁŽˆ󧀆braná©€‚¸€€‚È„€Šݾ‚tÊúŽç‹€„ð‡€‚݆‚ソ†€‚Ò€€Ž¼“‚ÜЀˆt€“‚‹€„Ê€€ŠÓˆ‚oï¿€„³‘‚g‹á€‚›€€ˆÁ‚€’³Œ€‚Àù€Ž»€€Š©¬€†–ƒ€œ€£€‚ynthÓ¦€‚µ€‚[[Ò­€„Ô €‚­ˆ†ï„€‚÷€‚ƒ…€‚[[AÝn©‚€Œ µ€‚ÿž€‚ger™ƒ€‚„”€„Ê‚€„‰²€ˆÙ—€‚×€€‚¨¹€‚ÏÊ€˜³€“‘€‚†°‚ê„ëù†ß‹€‚À€‚¥€€ †€„øÖ‚©€„¯ƒ€‚Æú€„-agÅ›„š–€‚’©€Œ½Š€‚¿©€Œª€€‚í•€‚ep‹€„çÞ„󹀄LawÀ‚Cì‹€Šô›€†‘€ˆ“Í‚Ô°†³€‚…€†§ˆ€„‚€†(Ÿ€„‰×€„David ïô„m²¨€‚)†‹€‚Ê‚€‚ø€‚acµ€€¨Jan NarvesÚ”€‚)¥†ŒÇ‚€‚¹‘€„m|Š€€Œˆž€‚õ§€Ž[[Ayn RanÙü„ƼŒNozick’€€‚¬ƒ€†ݼˆAÕ‚€‚inle‘É€†£©€„®¢€æ‚€ž⸀‚Ñ·€Ž€ƒ€Á‹€„”Ý€‚±†€†Ú±€„¹€„áÛ€†®õ€’ÁŠ€‚oö€‚¾Ï€‚°€†¥‚€†HarÐ…€‚Ì„€‚Ralph Raico]]ºŒ€Žßú€‚Û¶€ˆ‹’€’㎀†¢ƒ€‚p­í€ŠRôñˆ›‹€‚‹™€‚¬‚€„¯€’²‚€Ž®Ù€†üƒ€„Gust§«€‚de Molá‚€‚i—•€ŒAuberon »´ˆ]] ဒº€€¢|À€Ž, Ú€€Žë·€‚‘ž€Špraxeology.net/MR-GM-PSÿ—„Pre×É€‚Á€‚Á€†³¯€‚P·ÏŒ±€‚SecuÍ’€‚Ÿ€€†²¯€„À¦€‚sÿÁ€‚€…€„J. Huston McCulloch, O¸®€Œ Paî¼€‚ Se÷Ë€‚ #2 (Riš¥‚d M. E“¡€‚¦Æ‚Editor)粎:†¯€‚ Ce¯Ø€‚ü‘€‚¥Ó€”ãòŒMay½€‚7€„란Š´‚€ˆ‹’ÛÅ€„-harÔÚˆ‹„§€€ˆµ€„Ž„€’|”„€Œ£„€†É‚€ŠÑ‚­í‚óô†á„€‚y/1787–â‚uÔ€‚Ëꂬ½€†­€†½‰€„Ä‚€‚‚ˆ€‚Ž²„Ï€‚ury''] Ecáð€‚Polytechn´ç‚,Ÿ€€„rœ„€„Rechercþ§€‚ Ep÷€‚mologie Appl­€€‚e,…€„駚€ˆÃ©e au CNRS (ªõ€‚)•‚€” Opʈ‘€‚­…€žÝÈ€‚spu€‚hת€‚½ç€„s€€„㜈McKa–—€‚ain; Elkin, Gary; Neal, D×…€‚''eŸ€‚ò€‚’ˆ¶‚€‚infoΘ‚“£€„faq/‘€‚nd11ÙÈ€„ Rep˜î‚‡…€‚û‡€„EЂƒ‹€‚d Dš¡€„Éž€‚檀‚ Bry¥×€‚aÀ–€‚ѺŒ²€†È‹€‚T¿Š€‚y FAQ烀†”΂š‹€„5.2]§ƒ€‚°£€Š¬€€‚FAQ VŸ€€ˆ11.2''ºö‚ž€‚d‰Ë€Ž20,÷÷€‚6ˆ‚€„á„€ŠÌ×€†ð耄Ì‚€¤â–†³½€‚”ˆ€„†€„Ì€‚™¯€ˆwhe‘¡€‚™€‚·š€„Û—€†”ƒ€Š¾€€‚ÉÚ€‚l蘀†Æ—€ˆÍ‹€„Ù¢€‚‘Ä€‚²à€†¦®€„ငi⸀‚b­¡€‚þ €‚ü¶€‚’‚€Š„›€‚劀¢ü¶€‚øª€ ¨…€‚ffÂÕ€„ꇚî¡€Žm|G埀”m|Eco-󀌀€‚ñ“€ˆØ’€‚¹Ä€¼ô€‚Êû€Ž·€‚̳€†ÿ€„untÀˆ€„ÀŒ€„ÞÄ€„tak‹€‚¯à€‚Æ•€‚”‹€‚he£€†ÁÅ‚µ®€Ž.Ç¡€ˆ˜€”€€ŒÜ€‚…’€‚[[Ì€šïă€Œ§€€„®‰€‚öÚ€ˆ[[deepŠ‘€‚ÛŠ€‚È€‚¯«Œ´‡„ldviewþ“€†¯¦ˆ»Ž€‚bâÆ‚ØÇ€„¤Ê€„倂[[s¹‹€‚µÑ€‚ßÊ€‚Ë€€„¡€–§ €ˆ“€‚’퀜„¦€Œ°¡‚‚‚€‚ˆ‚Ê„earth-䞀ˆª‚€‚ÖˆŠí¿€‚. Ofªâ€†íÁ€„±é€‚Ó „Ø‚°€‚退‚EÀ€€‚ø³†!]]ô˜€Œ¹ž€‚ꀀ‚akeæã‚΀€‚ñ¢€t°•€‚sit¬Ž€‚•‚€‚Anoì…€†à€€ˆö€†‰‰€„退‚[[e¥„€ŽÅ€‚þž€†see…€†Þ¨€† „oÒ‹€‚°Ã€„°›€‚ “‚aphý›€‚úƒ€ˆª€€–w—€‚´Çˆă€Œmœ¦€†™É€ˆ©ƒ€‚ª¯€„qƽ†Äò€“–€Ž¹‡€„‹¶€†ò€„Ø¥€˜×ó€‚þ €‚¥§€ˆˆ³€‚elf.¥¨€ŽPœ§€퇀„Ș€–ëøŠ쀌ûÁ€„˜§€‚vocú­€„a reÝ¿€†º³€‚pre-§€¿„€‚usuå­€‚š€€„agrЃ€‚à”€†¹¯€ŽI‚™€‚×Û„Ï€€‚ó€ªÇ€.œ €ˆis­€€Žéš‚chnï…€ˆú€€„„†€‚Ö€€„é–€‚¦€‚‚Œ€‚[[alie´ƒ€†|Š€€ˆ°Ï€„îÔ€†§¸€†á„€‚̓€„–Þ‚­†€‚ƒÁ€‚i¨Ý”Ú€€ˆ‡„€„療ÓÍ€‚„µ€„Ó–€†ÑÓ€‚©á€†ƒ…€ˆ¦Ë€ŒLudË‚˜€‚Ÿ‚€‚ꀀ‚£µ€‚þ¸€„„“€„Jean-J¡ª„s RoÕþ‚au]]. ¬ƒ€’þ€€ˆüÅ€†À€‚‚‹€‚ext€€‚Ž€€‚€ß‚¤Ž€„€€„S÷…€‚£ˆ€‚, ¶†€’€€Œ͆€Šì䀄š‚€„ÌŒ‚´‚€„׆€Šs¨‰€‚John ZerzÊ—€‚ƒí€ˆ„¬€š &amp;mdash;•›€„…Û€„˜€‚·­€†ž€€’€¥€†n•¼€‚Õ߀‚®®€ˆ•†€ˆ¬Ÿ„À•€‚숂ved»…€Œº¯€ˆñ‹€’¤ô€‚¬ñ„¡€ˆÆ¡€ Ôƒ€„–Ž€„ÃÀ€„'®Œ€Še'઄uГ€‚-ga“ˆ€‚ÕÒ€‚íÀ„Ø‹€‚„œ‚ughå퀊“„€„'þ€‚ÿ€‚Œ°‚ƒ—€‚aè–‚〄Ì‹€Šáí‚Ÿµ€‚Ôà€‚Ä €‚µü€†Šž€Œÿƒ€‚offshoots==
+—Ž€Ž²†à†€‚§€„ec鱄c±€€„syncÉŸ€„–‡€Ž¢€‚Ì€€‚¬ƒ€œÏ€Ň€Š‹€‚†€†me¨…€Œ196¢Ò€„áဂ7‰€€‚˜¢Šꀂ´²€‚ʈ£€€†¨¯€†耈¦’€‚¸È€‚. »Í€„ú„€†sダ„ÿÕÄ€‚là‚€‚Ó«€‚‹Á„nà€Œºã†Ù‚€‚ê쀂𫀂ƒ°€„怀†ô€€ŽÒ£€‚Ư€‚bov§ð€‚ᵊHakim Bey.jpe¸Ï€Š„|Á¹‚œ€€ˆ麀„*'''—ü€Œ™ƒ€ˆy''ë‚–€€œ (‹€„¶À€‚™àŠ•¥€)ø‹€‚ñÆ곂。ÑŠ€ˆµˆ€ŽÚ¢€Œal •™€†û£€‚‰€€† -”Å€ˆª…€‚ñà€‚…—€„ÌÙ€‚ǂ肺¹„›€€‚etc. -Óƒ€„¸‚€‚scap醀‚懀„ùÒ„ì‚€‚[[id¼š€†¤º€‚À€Œ.×€Ž耀‚Å ¼€€‚·‹€‚à„€Œ‹§€‚¨‘€„weaken­š€†§¿€‚À„attach¶Š€‚þ€€‚Úˆ€‚rÓ°€‚Æ€†Ѐ‚€ôŽªƒ€Ž¬€‚¦¯€‚l·Î€‚sue„ï†s («Æ€‚ti-í÷€„“Ÿ€‚€€„nuÄ‘€€‚쀂)½°€„ø‚€‚àÀ€‚êÊ€„û¢€’¸€ŠÈ€‚ˆ‡€‚ȃ€‚Ž’„pecË’„⌀‚×€€„¶©€–íû€Ž¿€‚«€Š´Ò€‚œµ€‚Ä‚€‚Ô‚€‚Ñ€ˆÁÜ€„eu.€‚û±€†êÁ€„ì‚€‚㊀†言ŽÖ€”speí“€†Ò†€‚ƒ£€ŠþÑ€„º“€‚þ¥€ŠoadÖ‡€ˆŽ€Š²‚€ˆhun¨€€‚gan¦€ˆë „ÐÉ€ŠÔ€€‚–Ä‚¹Í€„¬€‚þ®€‚£ô„absÕÀ‚Ä€‚þ±€„´½€„ݸ€Œ. IÓ‘€Œ²Æ€† ƒ€„€Ž‡Ë€‚ø„€„߆€‚ÝÒ€„Ž†€žñÄ€ˆ‘×€‚C‘Í€‚thInc]]ã’€‚÷§€‚gaz¦µ€‚З€Šy: A JÊ´ˆof Dܯ€‚e A߀‚’€ˆá„€‚œÙ‚ƒ¤€„Jˆø„McQuinÁ¸€„Çÿ€‚¡ƒŒŽ€€‚ɇ€½€„Ë‚€„sà˜€º›€‚rm“€„,‡€‚ûŠ„Í›€Žħ€„''Aö…€„yõ—ˆLefÚ‚dž½œ€ˆŽœ€¬a²€€‚Ÿ„€†mŽœ€†߀€’ - ³‹€†Û€€˜½¬€” se†•€„«“€„ƒ€„퀀ŠÕ…€Šm.ws/post耀‚倀†‹ƒ€Ž½€€ˆ§„¸€€¤œ†€ŒÆ€€‚.Èœ€‚ù–Ê€€Ž†y¾ ‚•Š€Œáé‘€‚“‚€„–Š€‚碀‚Ç…€‚ po剀‚»‚€‚œ•€‚Ë©€‚ÙƲ„€„Þ§€„SaulýÔ‚Û§€„,¹€†«€‚eivÓ—€‚í¯€ˆψ€‚€¡€‚•€‚ÿ¥€„ǯ€„Šœ€„³„Öü€ˆ눀‚LacÛ€‚''þø€†fâµ€†a¶‚€‚¾­€ŒƇ€‚ïø€Œ¨ˆ€˜ö†€„¢«€„¸€Š²ˆ€‚Àª€‚À„€„ú퀂stŇŠó€‚˜”€‚Óˆ€ˆàÌ€‚зŒto×€ˆ'î©€†Ü€†—‚€„,Úê‚•µ‚°Ä€‚ÉŠ€„혀‚‰¶€‚§Ï‚–‰„˜¬†·¬‚§‰€†ÉÞ‚ r•ö„›€€‚dÞ䀂܆€†¦‚€‚îÉ„Þ®€„嘀†¥€†ȉ„Æ€ˆ¬À€„[[s©É€‚Ç‚€‚¨€€–囀‚n›Å†¨†€ˆý‘ˆmoìÑ€„õ†€‚‡Ð„þ‘€ˆƒ‚€‚À›€Œ™¡‚”Ç‚Š€‚ÆÞ†Ò“€†·€Œþ’€„ co“’€‚¯¤„Š€‚žâŒÖˆ€†úœ€„fÈõ‚Ô©€ˆå·€†ñ—€„,®Ý€Š蚀‚ssib¶€‚µ€‚𰀂ýˆ€†Ä deg ›€‚ofçÒ€Š±€‚™§‚Õ¼€†or‰€€ˆÛ™‚b홀‚oupe‡ï‚ö´€‚¾€‚rubric. Noneä눱•„ï¡€‚¸è€‚¬»€‚쎀‚󉀌Š‚€˜“ƒ€†ƈ€‚Ù…€˜[[ToddÒ§€‚ç…€‚[[Gilles Deleuz‘Æ€‚烀‚[[Félix Guat®€‚✀‚''Ex²÷€‚´Œ€‚Ï…€‚³‹€‚:߇€„¡…€Šm CŠŽ€‚inghouseàˆ˜‰€ «€‚³€€Œ¬¥€„Æ€€„¢„€ˆé…€‚”Ü€„Ñ„€‚È€€„…‰€––ˆ€Žþ¥€‚•ˆ€†Í€€Œ’ˆ€ŠI÷Ž€‚ €€Œ™ˆ€†ž€€¬¼œ€†‹‹€‚¼ƒ€‚õŽ€–À€ŒÚ›€†Áà€‚of«€€„¥‡€”½Ž‚r×û€‚˜Ž€Š¹û€†í…€‚ï„ñ€Ž˜€’ß‘€‚Œ€ˆŒŒ€ŠlÌŽ€”딂ËÇ€‚•‚€‚Ü•„ aff‡Ô€‚y–Ž€ˆÒþ‚rryŸ€€‚Þ—€‚ø¯Š¿Ú€†ã“€†n¢º€Œ­Š€‚èÒ‚ƒ¹€‚®´‚〆º€ŽÇò€‚»”€„ಀ‚­€€„ÛŠ€‚®€€‚¹ €†”Ú€‚€†²Á€†o›–€„”€’î…€‚·–€‚´š€‚œ‚€Œsü›€‚o—¡€†¸ˆ€Š¤…€‚Wolfi L²Ô€‚treiche¥ï€‚™€‚¼‰‚f”¸€‚È­€‚Bon‰Ä‚¹…€‚ž’€†€ƒ€‚‰é€‚Ë€€Šü€‚Ï“€†¬€„ JoÒ®€ˆË€€„Š€€†•Œ€‚쀆é„€‚TÍÒ€‚onâ•€†ž€ˆ´‘€ˆê—„Úõ€ŒÞ€„›‡€„USൄµ€ˆ§ˆ€ˆ—“„úÄ€‚fulΪ€‚‰Ù€„¢†€‚Ò†€ŒK›—‚ng­ß€„ AbacuˆÞ€„Î…€Ž߆ „€ªÚ…€ŒSø˜€‚ 'a—€‚Ò€ˆÔ…€†›€€®‘¢€„‰Ž€„刄Ç€‚wû—€‚òø€ˆï ¨ƒ€‚unconn—Ú€†­–€‚exts.ò¬€†ý¬€‚刀‚胀„¢‰€‚Ï€€„âÜ€‚Ò΂iÞ…€‚ˆŠ€‚big 'A'á‚€Œ¢€€„®€€‚„¦€„l³€Ž°­€‚spunk.org²À‚™—€‚/introͲ€‚¥œ€‚e/sp001689Ë€†µƒ€Šm: I°˜€ˆÊŠ€‚MethodŽ€€„?].¿Ò€ˆ‡€ž´‰€„ù´€‚–ø€‚¾€€†Ç€„Õ€€Š®™€‚؆€†Û‚€”®€„a°€‚ü›€†ŽÅ†”€‚󀀆À€„¹ª€„erparts;ˆµ€ˆ€†€‚–É€‚õ›€‚Œ‚€ŒÅÍŒa wa¼è€„适›³€‚«˜€„’…€‚œé€ˆ„…€Œþ΀Š¨‹€‚³¹†¯Î€‚ÿÆ€ŠåÄ€ŠA¿€‚¹˜€†Ü€’šƒ€ŠÚ€¦sha¯¾€‚ñ¦€„·€ˆ ã€„È€†½€¤½¢€‚å·€†Graeb§‡€’Õ€‚j Grubacic]]’ €‚î耂nÑø‚ªä†veƒ‘€žÛ‚€‚¸ª€†¬‘€„‹Ž€„Ì€‚½ž€’š‰€†”›€‚™ú‚oœà€†賂r¿‚€ˆˆ…€‚Ô¤‚nãú‚帀„ìù€„È€†þ‚€ŠãË€‚š‡’ÄÛ€Ž Š†, volu¹Üˆߎ€ˆ—€„Ôîˆaid±˜€†netõˆ€‚ÉÆ€„lŽ–€†cru”¡€‚Ï¿€‚戀†󅀂‘€†®”€‚胀‚ƒû‚•‘€Œž€€‚endϤ€„if–³€„Š™€‚eanÑž€‚Šî‚lÉ€‚퀄™€€‚busine¥‹€‚¬Ü‚˜€–ʬ€‚o´™‚zª‹€‚ž„€‚Óý„󉀄±¶€‚˜à€†²‘€†¸‚€‚o¾ð„vis™€‚㬀‚ˆÇ€‚𤀄Ü€€„gunÕ„€ˆ«‡€’zmag«€„—ˆ€„nt/showÔ‡€ˆ.cfm?SÙ£€‚onID=41–¦€„ItemID=4796]´¤€ŽÒž€„s==¹—€„C…Ò€‚p¶€€‚Ø€†Á…€‚Ó‰€†ñ¤€Šá¡€ˆñ瀄‹©€Ž¿©€Œù¢€‚‚€†ñË€†¿®ˆÑ€‚‰‚€†Ä­€ˆ¤½€‚™ð€„g˜ö‚„‚€‚˜ã€Š¥½‚æ‚€Žǯ€„uÈ€„öŸ€†Ÿ˜€„¿×€†Öø€‚òú‚ï‚€‚ñÆ€‚müÇ€ˆÝŸ„æâ‚͆‘ù€‚¬¡€‚e˜î€’¡¯€‚Ø€ˆ›€€‚È¡€Œϸ„‚¯†¸€†eµâ€‚ÉÄ€ŒÚ€ŠÏ€‚æΡ€„©ƒ€„ ;Ú¦€†Ô€€­€€‚š€€„©€‚àô‚¯ñ€†l •€ˆž‰€†goºž€„É›€ˆ''cause''Ø€€‚chaos,¡‚€¥€€‚warÄ€ªŽ’Ë€‚฀„îÖ€„غ‚óú‚nopo„Å‚ˆŒ€†ʼn€‹ˆ€ˆ°÷€–|m°€€ƒ€Šò€„ñá‚Å¡€‚”€€Šõ…€‚advÛ‘€„eŒá‚„ó€†e瀄 Much eff’„€‚¥¤€Œdø₵–€„Ž¥€‚ëÒ€†„€‚how¦ˆ€Ú–€‚ì©€„Ó«€ˆhandleö“€‚¸Ã€‚á .¶€šÞƒ€ŠŠ¸€†Ãß‚ety•€Šú€†Ö§€„ò‚€†¼°€vereignè…€Š[[¶ €„Œ€Œˆš€‚ᎀ„¡ˆ€ˆ…Ñ€„×€‚¨ƒ€‚„€ˆù“€‚ÌË€Š¬ƒ€†subjug½‰€„¶‚€„´³€–¡°€ˆ郀‚olor²€€†›£€„†€‚Ñ适ý¢€ŠÜ€„üŠ€†Ò£€†´€€‚‚€†™¾†ÇÆ€ˆŠ”€Š[[Ashanti A´×€„¶ƒ€‚[[Lorenzo Komboa Ervi™€€„ö€€‚ ™€‚m MbahÛŒ€†Ç‚€Št P¶†„ð€€‚C¥€‚§¯€‚a§²€‚e¾ƒ€„ׇ€„ↄ…€€‚´€„caucas¹É€‚Ƀ€Œ‹…€„exp»È€‚¦…€‚iÝš€‚÷¦€„뛊¤«‚¾é€‚觀‚÷€†’€‚³Ý€‚¯‡€Š¦Ÿ€„󆀂, p„·ù€‚뀂«€€Ž¡À€‚§Í€Œ¡Ž€‚ȯŒÅ€€Šƃ€‚…“€„É€Ž›€‚Í–€ˆŽ«€Œꈄ„€Žor ethn¡®€‚ߊ‚rvу€†ù€€‚Ù€€ŒûÙ€ˆö¨€‚íÕ‚Ü€†Ƴ€‚o—žˆà‹€„Í€‚prÏ¥„î…€‚Õ€€ˆ뎀ˆ(Å¥€„rac̳€‚)ƒ¬€†lø¡€†™å€‚epa¯€‚ó€‚ÛŸ€‚áÒ€‚¸€€„é³€„Ç€†Ä€‚蘀†ꉀ‚Ń€‚öý€ iÓ‰€†Ÿƒ€’È€€†ÿ”€† °€„¯‚€ŠË€‚·ù€‚nvol­²€‚ܶ€„Å©€òÛ„cèñ€‚onfá³€‚¬€‚˃€‚Æœ€‚ôë„oߦ€‚ë—€‚è²Þº€ׄ€„hiap熂ö³„o €‚aü‚€Žš‡€¦€†÷‚€†¬’€„¦€Œ¾Ù€‚c«‚€‚š²€ˆÔ€‚Neo׆€‚È¡€ˆå…€„Glob®ª€Œ臀†NçÞ€†¥´€‚Í€€Œ©«„ªƒ€‚³ƒ„Ä€€ä’€‚¹ˆ€‚€¥€„mpt½‰€‚¼Š€‚ÌÑ€ŒcoercŸ‡€„‡’€‚g„ scaž¡€‚–œ€‚¼•€‚•œ€‚ÿ³€ˆžŒ€Šƈª€„ÃË€Šü…€‚‘›€‚rld Ban€í€„€€ŠT§Ã„O¨œ€LJ€‚[[G8|G„€‚Õ‚€‚EÅÿ€‚–€€‚Þ‡€‚Ì€€’EÏЀˆ Forum†˜‚€”ý¯€‚倆mbigu¸þ€„erm€†‹€‚±™€Œ¨’€„ÿ„€„Ú€‚Ç™€ŠÈ‚€Žƒ–„Õ€‚ÉŒ‚’„€‚̇€Šs¶‚€„•€‚Ù€€†oÉ€€„ ï€‚í‚€¹€‚/“†€„§‹€ŒimpeëÇ‚ɇ€†(„ž€†Š„¸á‚ŠÀ€ˆ€„ed)ÓÇŠreÝ—€„ÉÁ€†󀀂²°€ˆžƒ€†„€ˆ“…€. O°·€‚s舀˜¼€†óÍ€’Õ“†蔀ˆÍ€€”·“€†×€Œô€€‚ã·€„·§€„expansÁˆ€‚„¨€ˆµà€‚€€Š¶¡€†³ƒ€„䃀‚蟀Š〄߀ˆ†ásoಀˆs¨Š¸‘€‚Æœ€„‹‚„venéµ€‚‘†€‚ParÍ €‚lÈ €„†€†Ö“€„߀†󇀄try»€‚š§€‚Ûñ‚¶˜€Ž胀„Ó‘€„-Ø߀‚ª †¾…€”¹€‚ý€†outᤀ‚s,ÔŸ€† Ù…€Œ[[Food Not BombÝö€„€Ä‚简†¿Ë„edu¯•†¾†€‚öÿ„˜†€Žhome-Ý·€†±›€„neighborhÔ€€‚m¾è‚ò„€‚/arbit†·ˆ€†㢀ˆso ñ¿€„“‰€‚‡˜€„¶—€†Ѐ†ç‚€‚‚€Ó€„aº¹€„Œ´€ €€ˆœ‹€‚àƒ€‚shelŠ¥€„Œ€€‚oldœ¢€ŠÊÕ€ˆî—€‚‡ƒ€ˆTÙÀ€Œþ‚€†Rec‰ª€‚ðÀ€Œ‘¿€ˆñ€‚Ÿº€ˆmad €†©ƒ€Œ›–€†ôà€†easier¶ƒ€‚‹“€ŠŠƒ€‚¶Ê€„×€„ivaú”€‚Û“‚Ñ€„†€ˆŒ€€†Ž‡€ŒÔ¦†æ¾€ˆon-l”³€‚鸀ˆߊ€„•²€„tþ­‚céõ€‚üŠ€‚perç’€‚Ì‚€‚½ª€„Âö€‚ŠË€‚Ô¶€‚gift-Ï‹€†”­†¶„€†𯀄–Æ„ÿ€‚ing|‡€€ˆ·í€‚ic š€€‚›„€„ [[opeï‚€‚urÈ£€„programú¬‚Þ‰€’•‚‚ softwò‡€‚Õ¯€‚¼‚€‚à‰€‚TƼ€„cyber-È€¯’€Œ±ƒ€‚[[GNU䊀†LinuxŠ€€†IndyÓ„€„¤…€‚΀‚ݤ€‚kˆ«€„Õª€‚!-- ***NEEDS SOURCE THAT E-GOLD IS USED BY ANARCHISTS*** [[PºŽ€„ͬ€„cryptograp˜¯€‚⊀„냀„anony耄digÿú‚ ñ€„˜„͆€e-goðÊ„Ÿ€†Local Exch¸á‚«Œ€„•€‚SƆ€†¦€€„Ò‡€’郀‚ч€‚²€‚ƒ€‚aé’€‚nì½€‚. --¯Ô€‚¦Ñ€†ô„€Œö°‡€†«¶€ˆº…€Žƒ„€‚‘ó€ˆbÈ¿‚weaµé€‚žŒ€‚efe²¼€‚±¢€ˆa±Ù€‚ã´€‚Ý€€†À•‚Õ®€†Œƒ€„Œˆ€‚Û€€Šage†ê€„s¤€€„ð„€„‹€†ù†€‚eviõ‰‚•ã€¬modulaware.com/a/?m=sÑš„›ž€„id=0684832720¦„€‚ S‰˜€Š In—‹€‚dual -¢Š€‚s•••Ø€„Ñ£€„Ú€‚¾„€‚±€˜«®€®ü¹€†ypt•Ï€Ž×€„§Û€„CypherŽ§€‚ªü€„´‹€†Û„ܨ€ˆ±Ô€„þ€†©‹€„㢀„­±€‚ÁÔ‚¨€€†]] (ª™€„Ÿ€Œ[[w™¾€‚)«€‚bª€‚•€†¸‰€ˆiüÛ€О‚û—€ˆ”ÜŒ…„€€’øÇ€‚¢µ€„ª¤€‚¾ãŽú€‚ÛË„ªŠ˜ˆ‡‚ÿŒ‚òÌ€‚°©„´ÜŒ|㙀‚Åœ€Šï„€‚õ˜€Œã„€†ô퀂ï’€ˆ¿€‚㮀‚·š„ô€ˆ¶…€„…Ô€„µ—€†Ÿö€‚Ç‚€„”‚€‚ؽ„‚£€Š,¢Š€†domeØ‚Ý¿€ˆ¹€‚á‹€‚÷›€‚Ûš„¡Œ€Šubôõˆ»‚€„ü߀‚olph BŠ½€‚eЀ„´˜€„’€„뀆©€‚ÇŒ€‚hûž€‚Ùò€‚Ú€ŠÙ†€‚¿ßˆ¯ˆ€‚…³€˜þ¯€Š¯»€‚σ€‚_é«€„/warÅ€€†½€€„ž»‚øª€†WºÑ€‚™”€†H倀”ñ™€„ò„€”. A loÞ–€„­„€ŒÅ‚€ŽÄ“€‚­Ã€„‰’€ˆÊ€„ã°€„d¢€ˆParlia¿Š€‚¨‡€Šª€†I–Ä€Œ¹“€„¿ú€ˆ Žƒ¬€‚¾–€Šsò ‚úˆ€‚in ò†€„‰”€‚, beÈÀ„œ€€Šam긆˜Ä€†don±€€‚È‚€‚ü€„‰Ù€„µ‚€˜©™ˆ.aolÔ‡€„vlntryst/hitler«‚€†Þ€‚Vƨ€ŠÛ‰€‚Ìý€‚y IÊ €ˆš€‚Ç‚Ö«€ŒH»€€„€˜½ö€„€€Š؆€‚—–€ˆ†€„Ô€€‚äÈ€†å‚€‚Ï€ˆ…€ˆemphasizeÉîŠƬ€‚Ø€‚y©Ò‚Ç»€‚gardÊð€Š´…€„Š€€†neiΆ€„b¦Ò‚t©›€‚r bulletsŒ¨€ˆÙ€„€ŽÅ«‚Ä€Œï‚€Š½Ì€‚Ó¢€„­²€†''The EthܬˆV¦€€„°Á€¬ˆ¦‚÷ƒ€‚¶‚€‚Ìð„ˆ‘‚²€€„/¥ª‚cs_of_€€†.phpæ‚€šù‚€‚§€€†—‚€‚§€€†ׂ€”ù€†George H.Ý퀂th]]. (Alsõ”€‚Ò–€Š§€†•ˆ€„„Oxymor½–€‚r What?—‚€ˆÏ€„Joe Peacotû™€ŒÒ€Œ €€„Fú°€‚Woodworõ€€‚±æ‚技‚Secê…€˜±™€–ꃀˆÓ€„냀Šaõø€„o‹€†ë¼€†³€‚ဂÊ€€„Ò‰€‚Ž‚€‚Áž€‚¸€€‚¢„§š€Œ»¤€†ÛÜ‚ú—€‚''È¡€†'' eachˆ¨Í€‚ut–¦€„¿Á€„aÉÿ€‚o¥±€„Ü—€„€€‚†¡€‚,ÊÅ€†µ€€ˆö¦€‚¨Œ€‚“瀎³‚‚´…€„y|ˆ€€ˆÆ‹€†‡€‚äÛ€†̉€„å¾€†Ѿ€Š›Š€‚ aÎ󀄣•‚¼Ï€„d½É€‚û¤€†¶í€„f¤Š€„coe¢‚Ç…€„æ©€‚eΟ‚€‚¨…€Ž³Š€ˆ˜€†ng¹†€ˆÀ¾€‚¾¦€‚þ€‚Ý›€„Ì¿€„˜Ü€„Ì€€„Ì¡€†뀌»ø€„逄 Œ€†Í€€‚rt–•€ˆ®€„coa¨Ö€‚on-builò†€‚¬‚€„at lea»Ã€‚™î€Ž•«ˆ·€€†øË€Š‰¬€‚úŸ€‚s‘¸€Š¡Š€‚ý€‚Ò¨€‚é‚€‚Žƒ€‚adj䮀‚vž•€‚.— †C±³€‚¦°‚烀‚õ€ŒÓó€‚:''M‚”‚³¥€„le:­¾€„®€€¨¨©€‚'''÷ëŠ.󊀂øÒ€ˆ…€Ž¼‹€‚Ë„€†‹€„¡²€Š››‚ith ÏŽ€Š¿‚€„Ç®€஀‚meï—€Š»Ù€„ëþ„Û“†beöŠ€„o«¶€‚¿€€‚„ž€‚“€‚îò€Šúª€‚„€€„à…€‚Æ€‚erick Engels„ŒŒ§¯€‚¨€ŠÆ´‚‘¨€„Ú€‚ñƒ€‚Ø€€ˆ “î‚gh:˜‚€‚¹‡€†ADz€¨†€‚«Ã€ˆlﮀ†–󀆵”€’§”€„£Œ€„؆€„;€‚–Ž€ˆ†€„—€€‚by ·³€‚ÀŸ€‚¯…€‚È€€‚¼È€†›„€‚Æ €‚É«€„ÈÅ€‚Ûü€„Úþ€„¤€€‚Ü„€„¶€€†by°±€iflõ¾€‚bay‰Ä€‚怆c„¾€‚n —Ÿ€˜¶€€„¢Å€„Ì£€„¯…€‚Ž–€‚……€‚allî‹€†i”€†¤Å†®ú€„«€‚y󰀎w½à€‚虊ÊŽvaiƒÁ€‚t¤š€‚¬§€‚«ý†’‚€‚Šßˆ¶€’က‚Ðó†—€ˆð€‚aÑЀ‚¿Šº€€„´¨€‚á’€‚‡È€‚›À€‚Woul³¤€†΄´ó€‚mun´„€ˆlˆ–€‚Ö›€‚Ƈ€‚ñ炋ׂŸÉ€‚Û‚„‚»™€„ဂ‹ž€„²Ž€‚‚€ˆÈ€„Ö€€‚ஆŠ…€ˆغ€Š˜€€‚ºÞ‚geois?š„€†³’€ °½€‚²«Šˆ¶€„‡€„ve/”€€‚/¸€‚s/1872/10/ø€€Œ“€‚ ''On ¼í€‚“Ÿ€‚y''”€”ø†€„Utop–™€ˆú†€‚Ž•€ŠŽÅ€„ò†€†Ÿà€„쑆ɘ„föž€‚ýÈ€„or òŽ€„Àõ€„Æ€€„ÞŠ€‚×™€„’ƒ€‚›´€‚Š½€‚aÒ 튀Š'ü‰€‚nic‰¡€ŠÕö€”Ñ€†„Ü€ˆ, Carl LandauËꀂõÍ€”Á»Œî±€‚›¦€„€†‘€‚‘‡€Šö½€„‹‡€†un¤Ë„Œ–€Œ±€ˆ šƒ‚唀‚at 䤎ᆀ‚a¤‘€ˆ†Ê€‚er evil¦ƒ€†Ï€‚𩀂Ç‹€‚ªŒ€‚ˆ‰€‚¨Š€‚€€†»Ã€†¬–‚‘ˆ€‚ce.–€€†€ù€‚ꀀþƒ€„Œ€‚¢„€‚a»€€ŠÔú‚适°¶„¬‡€ˆcease iÓÆ€‚¯€‚Ø€€ŽÆÚ€‚ÄÜ€†s½€€†´€„®¥ŠabsurdʹŽýƒ€‚Ù”€Š[ß‚€”|è‚€ŠÁƒ€‚™€€‚ŒŒ€‚å‚€žžÕ€‚H±ß€†ê…€‚IdeÌ€‚ndÜÃŒóî‚(1959) (retrñà€„ׯ€†Ÿ€†à„€ŠË¬‘€„€ð€’È¿€‚Ü„€‚Jan„Õ„2§Ö‚[[2006]É…€”“«Ž¥ˆ€Š½Š–¢‡€‚î­€˜Œ€†ž§€„Š„€Š♀†Ë€’‘…€˜ÍŒ€‚à€ŽBenjamin Tuck𚀂ÅÊ€Œ°‰‚…€‚y-f™¤€‚ôš€‚í­€ꃀ„š €‚›€„𞀌ËÝ€„ƒ€ˆ²€‚Œ€€‚.Ø€Ž¥‹€†×…€Š‡«€‚¼€†s SiŠž€‚Óø€ˆ¼é„À‚proudly´Á€‚˜ý€‚ê„€‚•÷€‚acterѶ€Š‘ƒ€†ˆ†€ˆë–€Š•ˆ€„[[SŸ‘€†ŒÔ€„|Clasò€„Á€€Š«ˆ€„¦Ž„„’€‚ᢀ‚Ï‚€„Ÿ€€Œšš„¬€Š„‡€„Õÿ€‚¶€†ú“€‚¦Š€„ƒõ€„s˜‹€‚ºº€ˆ–€€Š[[petit›Š€iƒ¦€‚‚erha绀‚¿í€„lumpenprol妆ø¥€„œº€‚e.g©É‚ekhanovõ…€”G. V›€€Øú„‡‚€†òÀ€’ØŠ€‚å‚€‚튀”pЀ€Š/²ó‚úØ€ˆ/óó€‚x슀„Ú…€Šů€„™‰€Ž]Þ€€†û˜€†áô€Š–ˆ€‚ a 𴈺‰€†¥í€ˆÜ€€‚— €”Ú‚€„ô“ˆenä‚€šs spoilÉ¥€‚middle-³ƒ€„©†€„dilettante⬀„×€„¸ã‚ýƒ‚ÑŠ€‚¶°€„ƒ€‚õŸ€„ï«€„Ï»€Ž¾¶†€€Œå×€‚û‘‚e”¤€‚‹ç€‚'''TacË’€‚ߌ€ˆ‚ƒ€‚ÈŒ€ŒIÝ€€‚退‚ëÅ€‚aÖ€‚Ž„€ŽΓ€‚Ê€†±‹€†Ã’€‚by '›×€ts', 'Ì®€‚²¤€Œts'ꈀ„‰†€„«„€„ŠŠ€‚eÒ»€‚g 'tŠ€‚üŽ‚ק€†'Š­€œÔœ€‚reaucraø瀂€‚Ö¨€ˆbehèž‚a dogmœ€€„facadˆŸ€¤­Ì€ˆ.ôñ€‚ing—Æ€‚£„€ˆŽÅ€„s/SI/en/display/20å΀‚À逄°Š€„·…€‚Spectacle]뀂aƒª€„ 91쀀‚„€ŠÜ‚€„Hypocrisy솀„Ú˜€„¯‚€†®™€‚i¼®€‚†§€†ýš€‚¬…€‚’Š€†Þ‹€ŒΡ†P. d'H¹•€‚С†,î…€¤pinn½€„~suÅÄ€‚ne/whm2003/h»€€Š2ã…€‚üï„И€Žý÷€ˆ¿…€ˆc¿‡€‚è¡Œ倀†õ…€˜¿ƒ€‚ﻀ„Ï„€†Ó¤€‚Ƀ€„ÙŠ€‚Ý„€‚Ͷ€’s’°‚tably [[PiÅ΀‚-Joseph耀Œ|ñ€€Œ¬€ŠMikhail“Þ€Š|›Þ€ˆü€‚ä쀘[[hä‚€Šȯ€‚¤¨€ŠË€‚צ€„ç’€„†Œ€‚ÙÏ€†Ñ‘€„•€€Š›€‚h‹€Šdism®–‚Û‹€„䃀‚“‚‚ʳ€„prejudÊÌ„¢ñ‚9Ê­ÔÞ€„È€€‚ɪ€‚Íý„¥’€‚¶–€‚‰Ü€„¯€€‚á…€„nt-ñ”€‚ø€€’ƒ†€ˆ„€†iseÌ€„rŠ¨€‚²€ˆ߈€„¯Ê€‚inuŸ†€‚†ª€†[[euro•‡€‚ric¯‚€ˆœÑ€„šŸ€„û€€‚impИ€‚„Ž€Ï€‚ùÛ€ˆÜñ„î‚€Šš€†Ü¿ˆª€‚ßø´€ˆ«€€†[[Cercl´€€Ž]].ç…€‚¤…€ŠÁ€steÓË€‚éµ€‚Ò’€ˆ׆€†iö„€‚€å€ŒŽ€‚‡³€‚gøà€„¤ä€˜ÿŠ€†­¹€’ÓÅ€‚Ѐñ€‚€Ê€‚§ƒ€tÔÀ‚󧀂ò€‚Ì€†Ꙁˆsus¨‡€‚½‘€‚㿆 syÍÉ€‚hiz‹‚€‚by¥õŒ÷€€†¸€€„Û˜€ˆ™€€ŠýÆ€†适ÀÄ€‚µÄ€„ÝÛ€„ÐÒ€”À¾€Šî•‚ƒê€„‡ƒ€‚¿‚fª†Óƒ€Štئ€Š¦ß‚ŠÓ€‚ly¥Ë€„Ú〄irdµ‰€‚Ì‹€„ÜŽ€‚o·Ä‚‘‹€†¹€Œ×Ï€‚ñ玔´€ˆÖ€€‚•‡€‚䀈s΋‚쌀†Ü„€ˆΖ€‚‘€‚´œ€‚ñ‰€ˆက„‹Æˆ‹ÌŠ€€‚(üŠ€ˆ) Re¦Â€†anÏ–€’ù©‹ù€„í­€‚ÿ–€†„…€Œ·£€‚Stanley G. Payþ¬€ˆï倆ü°€‚he„φÜ倂gimîÕ€‚盀‚ª÷€„ª€‚õÄ€„œ€†ÖÆ„¥®‚egot¤º€†Ж€‚ÒÔˆò€ˆƼ€ï´‚È©† µé€„쀀„þÌ€‚´…€’Ò„€”|ª…†Ûˆ€‚é„€„¶–€‚¥ƒ€†Ü…€’gmu.edu/dÐÅ€‚î„€„s/e“Á€ˆs/bcî…„/spainî…€„Ϩ€‚Õ„€†o-SЀ†ô‚€„ÅÊ„ÙŠ€¤£€„︀„œ¹€‚henþ€‚aß–Noam_chomsk©ÃžŸÌ€„|©±€„am C¥€€†Ô²€‚1928–)]]ÞÝ„Þ™‚üƒ€‚’…€Ššú€‚ݧ‚Õ£ˆ˜œ€‚ÌÉ„”Ù€ˆù•€„„¡€ˆrÞÅ€ˆ™Ð€„Å™€†ƒ€‚ø€„celebrÑ¥€†°œ€‚»„€†¢‘€‚­É‚àÒ€‚ï›𘀄剀†­…€‚AÓ£ŠÂŒ€„™€€ŽÖ€ˆûÕ€„½Ç€„»„€‚²–€„¾ˆºÆ€‚½×€„—€ŠÚ€€„ƒŒ€Šly éli¢¸€‚øÖ€†è³€†±€€‚ºûŠñº€‚¬€ˆœƒ€„¾€Ž¿€Œª€‚-avo‰Û€’ts:
+
+*Ñ€‚À–€‚I¸ä‚쇀‚essÔà€†‹»€„gu¨€„Å“€†”ƒ€–»€€Œ×Ĉ fiö‡€„܉€‚ˆ“€„ [[Ursula K. Le Guû€‚¶€€ˆÛŠ€‚al ­‰€‚À’„ó·‚öä„ ZinÀ€‚
+* —ƒ€‚r¯…‚È×€ˆÖ€€ŒHËÕ€‚” sÖ”„Ô€€ˆ[[Avant-ீ„‡€‚r·‚€‚΄€‚icoláƒÿ€‚sselló‚õí„Denõ™‚¸†€‚Š‚€†ì ‚t¦°€‚‡¾Œ½€‚¸Š€„¨Á€†Æš€‚ down¥€€„ÆÚ‚penhageÕŒ€‚²†€„²ç€‚¡ƒ€‚ω€‚employÙ‹€„Ä’€‚ºú†€–€„¡€„Ðû€Š ðË„˜Ó€„‹€‚‡»€Žª€€„¨Š€†e­»‚”Ÿ€ˆ•½€‚Ÿ¤€„ity)|Š€€†˜ª€‚ø€€„squa軂¿€Œê„€‚kÝŽ€†Ÿ§€‚sti¸ë‚hrÿ„€Š¤€‚ÅÒŽì怂Ü€‚ataÈË€„Ю€„Mž¬€‚œ¦€‚¤—€†fa|¥¬€†ÿÔ€„to Ú΂NaziüŽ€ˆË‘€„¾†ü€€ˆµ‘†åþ€‚õÖ€ˆ‹îŠ¹…€„¼î€Œ÷¾€‚’™€†ª‡„­î€”Ü°€Œ[[AŽÏ€†“€€„ò€€ˆù€‚•‹€„ƒ‡€‚d™Ý„áÌ€‚so˜×€„Û‚€†gÉ€‚±Øˆ™¦€‚·Ã€‚­Ï€‚ß„€†­€ˆ𓀂ÐÒ€‚ÕÔ€ˆŸ¢ˆ. ¡Ö†™¯€‚¥€‚¯Â€„©Æ€‚ty¥©€„ø‡€Šm rÅÍ€‚þ€‚耀Œ‘×€‚ŠË€‚ø‡€ˆ¢«€‚Œƒ€‚lin„ë‘€”žÆ€‚Ž€†က„ª™€Ž ½€‚ rock,ÉÀ€‚òˆ€†퀈û’€Šg‡»‚ip hop€‚œ¼€„kÇ€€ˆ¬ˆ€‚Ž‚€„becå•€‚gé’€‚‹„ÑÌ€‚dium˜€ŽÓ¤†£€€‚É€‚Ô€–essage”Ñ€‚nš€€„[[U£Ü†Û瀂dom|UK‹“€„isò€†Ëà€ˆ©®€ŠÍ€€‚[[Ë€ŒÌ€Ð¥€‚e b¶€‚«¯€‚Áœ€†ˆ‚›‹€„Á€€„Ÿ€‚ͪ€„þŠ€Š䀄ç²€‚…¿€†ñÈŒÿ„€‚™¤€‚•‡€†[[Dutchû©€ˆ|DutŠÅ„Ђ€„ˆEx]]Ú¾€ŒexemplညÜ·€†ÉÖ€‚ion.
+''¼ù€‚…¿€Šdetai‚€„»ù€„œ€†o-ÐÀ€†©±€†’ €‚ü‚€‚==
+È€‚ÁÅ€‚(PlÒ¦€„öô€‚¦Ñ€‚²„€„adõ‚€„ך€„i¹†€‚ü•€„à€‚¤€†À¨‚ ex”‚†¾†êÙ„ÛœŽÀ€€”à„€‚툀†’€†Œ€€‚•É€„p£Ö€‚age)ëÄ€ˆÙŽ€‚r ¬€‚à˜€ˆ¤€€Œrele׊€‚€†臀‚p㚀†Ѐ€Šm»Š€‚Ù‰€‚Û”€‚brieý®€‚mmaê¡‚Š€‚Õ€‚sù€„¡å€‚–Ê€‚ÿŒ [[ž€®ˆÃ€†ïÅ€„œŽ€Š«€€‚ö€€Œ¥‰€‚é‚€ŽŠŠ€ˆ–€€š²òŽ¯€€šÛ‡€Š—€€ˆâ倆Ž€€Œþ‹€Œ¦Ù«Œ½€€ˆÛ¬€‚ª€€œnihi²€²»†ô€€bùÝ€šà€‚¥€€Šˆ€’ ¾€‚•¡€”†€ˆ“€€„ûü€‚¬€€’ÌŽ€‚΀‚˜§€‚œ¢€„”¤€»Ž€‚÷Ä€ŠÜ‚€‚symbolÏ€€„ú‚€„þ“€†ism/Links|LÌ„€˜§Ú€‚n«ÿ‚­€€„€€šÀ‚˜€€¦ÓÞ€”¥€€ŠMaj®œ€†å䀄¢–€†€ž€‚œ€‚Àµ€‚t®€€ˆPas瀂§©€‚Å“€„ó€Œá…€‚ŒŽ€‚Ê“€„ꀂ===®¬€†¬‹€„™°€‚¥‹‚=
+*Ä€€‚ç²€딀„871)™€€‚Hayš€‚¯‚iotš€€†86š€€„¨ˆ€‚¾ýŠÊÓ‚×Ú€‚(1917 ÛÉ€„­Žˆ192É€€†ûûŽÅâŒ΀€„Ÿ€€Š§›€ŠRŒ·€Œž€€†36) (ψ€†‡ƒ€Œí‹€‚ÿ–€†瀆½€€¦)
+*³¡†68,Ž€Š(€€‚€„WTO Miôš€‚Ò×€„ Conúê„—¾€†1999|¢€€„eetˆ‰€‚in SeattlŸ‚€†999)Ò‚€„Books=¼›„main›„€œ™Ç‚oksý‚°‚€‚º•€Žˆ€‚ sß²€„Lj€‚¥€€„³¡€†㨀„“§€„Š›€Œï’€„߈€„­‰€‚”Ý‚ˆ€„ì½€‚¼†„à†€„ᛀ‚b ‚Îâ‚ùš€ˆÚˆ€¢ꀀ„׈€† £€œ톀‚ò¬€‚ÁÔ‚Õ„€‚뉀‚ÞÇ€†Ä‚Çš€Šd¬‚mac.pitzerÏš€„œƒ€Št_AÈ©‚ves/b죀†/godÓ÷€„ate‹€€’_ch“¡†]ˆ€‚½¼–…€†Ó€€Šmá’†˜Ú€† Essays€âgÁº†/Á»ˆCW¬Ç€„þ€€„Peter K¯â€‚tk‡‚€ŽMutÏ€‚Aidî²€‚FŒô„®ƒ€‚E¥Å€‚ê—€‚|¡€€Ž‘€”§¸€‚gutenberg¾¢€„eÝÊ€‚/4341Á¦€¦÷€€ŠWꀄsᣀ‚ý°€„?Ü€€Ä360Ì€„Rudolf RockÅ´€†Òƒ€‚Ì‚€†o-Synd‚ˆ€‚Ö‚€‚(‹„€‚)|AÅ€„š€€”‚Ê€€‚Ç°œË€€„Ê…€‚ÐÇ€ŽÕŠ€†y²€€‚¢„€Œÿ€‚À§Žr¿«ˆÁÇ€ˆ†€€ˆ.asp¯€„Max Stirn󵀆­€‚က„go AÁ•€„s Owdž†‚…€Š߀€‚df.€Ö‚se/~triad/sÀ€†/Ô€€„äá”Ô€€Žœ“€ˆ。ä…€‚¦Úœက””•€‚domnowƒ€„®¡€‚inyou·„€ˆÓŠ€‚Ö‚€†Ì‚€‚bÎÔ‚gion/Þž€ˆÝŠ€„ÙŒ€‚f󊀂Ž¦ŽñŒ€šω€’°€€†ƒ€„˜€€ˆ‚‡€‚EnglishøÓ€‚di€Ú†¨€€„ÕҜ苀†R™ƒŠáò€‚æ’€‚divž—€†=¢€†font-size: 85%º€†ëÖ€‚¬€€„õˆ€Œsº«„“€€„/div €€„ê°€„ÏÙ€„‘‚²€ˆnƒÇ€‚r°ý†¬–€„€€Œ·Š€‚À€‚†€‚cÖ¼€‚T´¢‚mœä€‚«‰€‚re-‹ý€‚Õ®€‚
+# {{note|bill}}‡ƒ€Œns52.super-hÐ߀‚“Ì€„~vaz1net/¬€€‚þ³€ˆ €‚“°€ˆ/­†‚aw¦ƒ€ˆÙ€€ŒŸ¯Ž߀€”€€Žö®€„±¯Œ·€€’èþˆµ€€flag.ª‘€„¹€€„Ž÷€„t/¬€€Šˆ€€„_pö¯†牀ˆ‰€Œîù€‚etonÑ€€Ñ…€‚ŒÁ€ˆžì€ˆꀆÿŽ€†_ñ“€Œ/»€€Š_boÝÚ€€‚ Ag®Ä‚tã­‚¨Ÿ‚¼Î‚A ˆ - Bo´°„Ö„€Šts‰€Yarros-NotþÀ€ˆ’€‚[Vi½ô‚ œ€€†£€€ˆ, •€€†ƒˆ€„󇀈'' VII¡“€‚˜¼€Œíµ€„1892⌀‚怀Œtotseõ‚€퀂“€€„΃€„Ÿö‚¹€ˆ̃€161594߀‚l ¦€’¾¤€‚Ë€Šö†€„•£€”•‡€‚£‰Œ˜“€„==
+Έ€‚overwhelmÏ€‚˜¨Œ怄¾›†“ˆ€‚°€€„î—€‚Ÿø€†—ž€„ø‚€ˆ«ˆ€‚´—€Œô쀄«Û€‚d€‚â…€‚ù“€žm´¿‚ÄÌ€‚¹à€„s|link¹Þ€‚bÄ€‚¿•€‚
+{{wikiþ†€‚e|Deȇ„¥Õ€‚ˆ€‚ö€€Œ}}
+*«…€Š”€€†oblogs.¸€ˆû„€„‚€ˆ™€€†] Blog—®€„¦‚€ŠÎð‚Å€€Œ¾€È¼€€ˆy ЀŠ]þ€”•‚€„“瀂ûÜ€’Ź€†¸›€†㟀ˆ³€†‚€„‰š€‚£š€‚‰„esij€†Øž€†iˆŽƒ€„o³É€‚º§€ˆ”º€„s.
+*HuÚ€‚ÒÊý€€„Š«€‚«›€‚þ‘€‚ÌÝ‚Ÿ‹€‚‹ñ€‚rt bioÂþ€‚ñ‚€„•€„ô÷€þ‚€‚s‰…€Œᾂ´ó†on«“€„‘…€„bleed/g°é€‚ry‡€€Šß¼€ŽÞ’€ˆDaily B«€€‚'sì‚€Ž EncycÐ¥„ia]ø‚€Žô…€‚­‚€‚ЗŠ/þ–”] (ñ。ž€€Ž|œ„€‚À€„²€„Ñ•€„‰„€ŠÊ€€„w–€†«à€‚®¿†l Work“²€‚Õ‚€†Ž€€‚ld]
+Ë€„란‚Aº•Š!‹ƒ€‚„€†Ì„©‚€‚ …¢€‚‚½€‚¼€€„ •€‚ɺ€†°þ€ˆ¨ñ€‚öž€‚s far beyoƒ‚anageable sizÆò‚ÍŸ€ˆªã„˜ƒ€‚îÀ€‚¡Š倀„®Ë€†ʼn€‚裂ᆀ–ßÛ€Žý”€‚i•…†í®„‚´Ö€„О€‚‡›€„º€€‚‹È€ˆknown •‰‚¥„‚ↀ‚ÿ怊Àš€‚ll b­ç‚utin¸‡€‚›€€‚ᨀŒ¢€„‘„€‚΀€‚kæ°‚ú€”Õ瀄£ú€‚lŽã„§ €Š»£‚Category:÷ƒ€Šm|*]]˜€€’For‘Ó€†ø²€Žë’€ˆ„€†ª€€Pþ‰€††Õ€‚•‰†ˆù„rî‚ints¶€€¾Á¹€†e©€€¬ù½€‚±€„ilosophy¨€€”
+[[ar:لاسلطوية̀„ast:ª€€‚quismu’€€„bg:Ðнархизъ왈€†s«€€„hiza‹‘€‚ÃÆ‚€€„¼€€„e¢€€„c¢€€†ƒ‚€„Æó‚
+[[d¤€€†k£€€Œde€€„£€€’eo’€€„kiismoµ€€„eÈ€€†Ú€€„‘€€ˆt쀀„Ý‚€Œeu¡€€„×€€‚ €€†fa:دولت‌زدائی½€€„fi­€€Œi€€†r΀€„…€„—€†gl¢€€„ò€€he:×נרכיז×Ê€€„hø€€†chiz《id’€€„‹€‚Ë€€†is:Stjór඀‚sisstefÄž€‚
+[[iÇ€†ø€€„¸€†ja:アナキズム¨€€„ko:아나키즘“€€„l¼€€Œzmaû€€†n¶€†Ï€€„ü€€†nn‘€€„€ŽnÞ‚€Š¡€€Šp³€€ŒzɃ€†p×€€†û€r³€€†î‚€Œru–„€˜”„€ˆscoÈ‚€Ž€„simple”€€šk€€Š´‚€Œs„€†绀‚¸€€ˆr š°䀀Šv½€€„À€‚€€„th:ลัทธิอนาŽ€€Š›à¹„ตย±€€††ÅŸñ€Šzh:无政府主义§€€„zh-min-nan:Hui-thóng-tÄ«-chú-gÄ«]]</text>
+ <™”€‚áö€„€€‚</page‰€€„õˆ€‚ˆ€€‚ <tÿ。>Afgha’¡€‚anª£€†y</™€€†È€€†id>13</id€€ˆ×€€’€€€‚ €€„5898948¦€€ŽÞ€€„m¸ˆ‚mp>2002-08-27T03:07:44Z</Ÿ€€ŽØ€€„ <¯Á€‚ributor“€€Š <username>Magnus¹÷€‚ske</—€€Œ¾€€ˆŠ€†4ƒ€”²Œ„Ò€€˜<miâ䀂/·€€Š<³¥€‚ent>whoops³€€‚€€†×€€ˆ<Þ€‚ xml:space="õ¥€„rve">#REDIRECT [[¼‚€ˆ·‹€‚Ò‚€•ƒ€€Geo£È€„—ƒ€ ì€—ƒ€²9“‚€”÷‚€Ž—ƒ€†2-25T15:43:11—ƒ€Ðip>Con꽄 ¼‚cript</ip·‚€Œü‚€ÔAutoÔÁ€‚Í Ô€€ŠÁ€€‚Šƒ€â±‚€ŒŒƒ€œ
+£†€üPű€„¢†€ž5ã‚€ŽŠƒ€°50Ɇ€ŽÙ„€‚¢†€ž1T10:42:35Šƒ€Ð¢†€Œ-‚›€‚ril†€ª166§€Ž—ˆ€‚ò†€ Ÿ†€¬fixÌ’€„µ€€‚–ƒ€âDemÉ…€†«œ€‚¦†€œテ¤†€ 7ñ€Ž™ƒ€²1¦€€Ž™ƒ€ 5-17¤†€‚30:05Zþƒ€‚¼‰€ØAxelBoldtšƒ€ª2ÿ€€”˜ƒ€¤ö‚€Šˆƒ€‚redirect</•€€Šö†€Šà…€‚®‰€¾®‚€ˆ‡ƒ€œ¡†€¢ \ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 0000000000..d818790c13
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,379 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+ // bits 16-22 offsetcode - 5 bits
+ // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
+ // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
+ lengthShift = 22
+ offsetMask = 1<<lengthShift - 1
+ typeMask = 3 << 30
+ literalType = 0 << 30
+ matchType = 1 << 30
+ matchOffsetOnlyMask = 0xffff
+)
+
+// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
+// is lengthCodes[length - MIN_MATCH_LENGTH]
+var lengthCodes = [256]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+ 17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 28,
+}
+
+// lengthCodes1 is length codes, but starting at 1.
+var lengthCodes1 = [256]uint8{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
+ 10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
+ 14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
+ 19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 29,
+}
+
+var offsetCodes = [256]uint32{
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+}
+
+// offsetCodes14 are offsetCodes, but with 14 added.
+var offsetCodes14 = [256]uint32{
+ 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+}
+
+type token uint32
+
+type tokens struct {
+ extraHist [32]uint16 // codes 256->maxnumlit
+ offHist [32]uint16 // offset codes
+ litHist [256]uint16 // codes 0->255
+ nFilled int
+ n uint16 // Must be able to contain maxStoreBlockSize
+ tokens [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+ if t.n == 0 {
+ return
+ }
+ t.n = 0
+ t.nFilled = 0
+ for i := range t.litHist[:] {
+ t.litHist[i] = 0
+ }
+ for i := range t.extraHist[:] {
+ t.extraHist[i] = 0
+ }
+ for i := range t.offHist[:] {
+ t.offHist[i] = 0
+ }
+}
+
+func (t *tokens) Fill() {
+ if t.n == 0 {
+ return
+ }
+ for i, v := range t.litHist[:] {
+ if v == 0 {
+ t.litHist[i] = 1
+ t.nFilled++
+ }
+ }
+ for i, v := range t.extraHist[:literalCount-256] {
+ if v == 0 {
+ t.nFilled++
+ t.extraHist[i] = 1
+ }
+ }
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v == 0 {
+ t.offHist[i] = 1
+ }
+ }
+}
+
+func indexTokens(in []token) tokens {
+ var t tokens
+ t.indexTokens(in)
+ return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+ t.Reset()
+ for _, tok := range in {
+ if tok < matchType {
+ t.AddLiteral(tok.literal())
+ continue
+ }
+ t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
+ }
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ for _, v := range lit {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+ t.tokens[t.n] = token(lit)
+ t.litHist[lit]++
+ t.n++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+ ux := int32(math.Float32bits(val))
+ log2 := (float32)(((ux >> 23) & 255) - 128)
+ ux &= -0x7f800001
+ ux += 127 << 23
+ uval := math.Float32frombits(uint32(ux))
+ log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+ return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+ shannon := float32(0)
+ bits := int(0)
+ nMatches := 0
+ total := int(t.n) + t.nFilled
+ if total > 0 {
+ invTotal := 1.0 / float32(total)
+ for _, v := range t.litHist[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ }
+ }
+ // Just add 15 for EOB
+ shannon += 15
+ for i, v := range t.extraHist[1 : literalCount-256] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ bits += int(lengthExtraBits[i&31]) * int(v)
+ nMatches += int(v)
+ }
+ }
+ }
+ if nMatches > 0 {
+ invTotal := 1.0 / float32(nMatches)
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ bits += int(offsetExtraBits[i&31]) * int(v)
+ }
+ }
+ }
+ return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+ if debugDeflate {
+ if xlength >= maxMatchLength+baseMatchLength {
+ panic(fmt.Errorf("invalid length: %v", xlength))
+ }
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oCode := offsetCode(xoffset)
+ xoffset |= oCode << 16
+
+ t.extraHist[lengthCodes1[uint8(xlength)]]++
+ t.offHist[oCode&31]++
+ t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
+ t.n++
+}
+
+// AddMatchLong adds a match to the tokens, potentially longer than max match length.
+// Length should NOT have the base subtracted, only offset should.
+func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
+ if debugDeflate {
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ t.extraHist[lengthCodes1[uint8(xl)]]++
+ t.offHist[oc&31]++
+ t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ t.n++
+ }
+}
+
+func (t *tokens) AddEOB() {
+ t.tokens[t.n] = token(endBlockMarker)
+ t.extraHist[0]++
+ t.n++
+}
+
+func (t *tokens) Slice() []token {
+ return t.tokens[:t.n]
+}
+
+// VarInt returns the tokens as varint encoded bytes.
+func (t *tokens) VarInt() []byte {
+ var b = make([]byte, binary.MaxVarintLen32*int(t.n))
+ var off int
+ for _, v := range t.tokens[:t.n] {
+ off += binary.PutUvarint(b[off:], uint64(v))
+ }
+ return b[:off]
+}
+
+// FromVarInt restores t to the varint encoded tokens provided.
+// Any data in t is removed.
+func (t *tokens) FromVarInt(b []byte) error {
+ var buf = bytes.NewReader(b)
+ var toks []token
+ for {
+ r, err := binary.ReadUvarint(buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ toks = append(toks, token(r))
+ }
+ t.indexTokens(toks)
+ return nil
+}
+
+// Returns the type of a token
+func (t token) typ() uint32 { return uint32(t) & typeMask }
+
+// Returns the literal of a literal token
+func (t token) literal() uint8 { return uint8(t) }
+
+// Returns the extra offset of a match token
+func (t token) offset() uint32 { return uint32(t) & offsetMask }
+
+func (t token) length() uint8 { return uint8(t >> lengthShift) }
+
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+ if false {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off&255]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[(off>>7)&255] + 14
+ } else {
+ return offsetCodes[(off>>14)&255] + 28
+ }
+ }
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[uint8(off)]
+ }
+ return offsetCodes14[uint8(off>>7)]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token_test.go b/vendor/github.com/klauspost/compress/flate/token_test.go
new file mode 100644
index 0000000000..f6346259e8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token_test.go
@@ -0,0 +1,54 @@
+package flate
+
+import (
+ "bytes"
+ "os"
+ "testing"
+)
+
+type testFatal interface {
+ Fatal(args ...interface{})
+}
+
+// loadTestTokens will load test tokens.
+// First block from enwik9, varint encoded.
+func loadTestTokens(t testFatal) *tokens {
+ b, err := os.ReadFile("testdata/tokens.bin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ var tokens tokens
+ err = tokens.FromVarInt(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return &tokens
+}
+
+func Test_tokens_EstimatedBits(t *testing.T) {
+ tok := loadTestTokens(t)
+ // The estimated size, update if method changes.
+ const expect = 221057
+ n := tok.EstimatedBits()
+ var buf bytes.Buffer
+ wr := newHuffmanBitWriter(&buf)
+ wr.writeBlockDynamic(tok, true, nil, true)
+ if wr.err != nil {
+ t.Fatal(wr.err)
+ }
+ wr.flush()
+ t.Log("got:", n, "actual:", buf.Len()*8, "(header not part of estimate)")
+ if n != expect {
+ t.Error("want:", expect, "bits, got:", n)
+ }
+}
+
+func Benchmark_tokens_EstimatedBits(b *testing.B) {
+ tok := loadTestTokens(b)
+ b.ResetTimer()
+ // One "byte", one token iteration.
+ b.SetBytes(1)
+ for i := 0; i < b.N; i++ {
+ _ = tok.EstimatedBits()
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/writer_test.go b/vendor/github.com/klauspost/compress/flate/writer_test.go
new file mode 100644
index 0000000000..0a011a9220
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/writer_test.go
@@ -0,0 +1,541 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "archive/zip"
+ "bytes"
+ "compress/flate"
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestWriterMemUsage(t *testing.T) {
+ testMem := func(t *testing.T, fn func()) {
+ var before, after runtime.MemStats
+ runtime.GC()
+ runtime.ReadMemStats(&before)
+ fn()
+ runtime.GC()
+ runtime.ReadMemStats(&after)
+ t.Logf("%s: Memory Used: %dKB, %d allocs", t.Name(), (after.HeapInuse-before.HeapInuse)/1024, after.HeapObjects-before.HeapObjects)
+ }
+ data := make([]byte, 100000)
+ t.Run(fmt.Sprint("stateless"), func(t *testing.T) {
+ testMem(t, func() {
+ StatelessDeflate(io.Discard, data, false, nil)
+ })
+ })
+ for level := HuffmanOnly; level <= BestCompression; level++ {
+ t.Run(fmt.Sprint("level-", level), func(t *testing.T) {
+ var zr *Writer
+ var err error
+ testMem(t, func() {
+ zr, err = NewWriter(io.Discard, level)
+ if err != nil {
+ t.Fatal(err)
+ }
+ zr.Write(data)
+ })
+ zr.Close()
+ })
+ }
+ for level := HuffmanOnly; level <= BestCompression; level++ {
+ t.Run(fmt.Sprint("stdlib-", level), func(t *testing.T) {
+ var zr *flate.Writer
+ var err error
+ testMem(t, func() {
+ zr, err = flate.NewWriter(io.Discard, level)
+ if err != nil {
+ t.Fatal(err)
+ }
+ zr.Write(data)
+ })
+ zr.Close()
+ })
+ }
+}
+
+func TestWriterRegression(t *testing.T) {
+ data, err := os.ReadFile("testdata/regression.zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for level := HuffmanOnly; level <= BestCompression; level++ {
+ t.Run(fmt.Sprint("level_", level), func(t *testing.T) {
+ zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, tt := range zr.File {
+ if !strings.HasSuffix(t.Name(), "") {
+ continue
+ }
+
+ t.Run(tt.Name, func(t *testing.T) {
+ if testing.Short() && tt.FileInfo().Size() > 10000 {
+ t.SkipNow()
+ }
+ r, err := tt.Open()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ in, err := io.ReadAll(r)
+ if err != nil {
+ t.Error(err)
+ }
+ msg := "level " + strconv.Itoa(level) + ":"
+ buf := new(bytes.Buffer)
+ fw, err := NewWriter(buf, level)
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ n, err := fw.Write(in)
+ if n != len(in) {
+ t.Fatal(msg + "short write")
+ }
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ err = fw.Close()
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ fr1 := NewReader(buf)
+ data2, err := io.ReadAll(fr1)
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ if !bytes.Equal(in, data2) {
+ t.Fatal(msg + "not equal")
+ }
+ // Do it again...
+ msg = "level " + strconv.Itoa(level) + " (reset):"
+ buf.Reset()
+ fw.Reset(buf)
+ n, err = fw.Write(in)
+ if n != len(in) {
+ t.Fatal(msg + "short write")
+ }
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ err = fw.Close()
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ fr1 = NewReader(buf)
+ data2, err = io.ReadAll(fr1)
+ if err != nil {
+ t.Fatal(msg + err.Error())
+ }
+ if !bytes.Equal(in, data2) {
+ t.Fatal(msg + "not equal")
+ }
+ })
+ }
+ })
+ }
+}
+
+func benchmarkEncoder(b *testing.B, testfile, level, n int) {
+ b.SetBytes(int64(n))
+ buf0, err := os.ReadFile(testfiles[testfile])
+ if err != nil {
+ b.Fatal(err)
+ }
+ if len(buf0) == 0 {
+ b.Fatalf("test file %q has no data", testfiles[testfile])
+ }
+ buf1 := make([]byte, n)
+ for i := 0; i < n; i += len(buf0) {
+ if len(buf0) > n-i {
+ buf0 = buf0[:n-i]
+ }
+ copy(buf1[i:], buf0)
+ }
+ buf0 = nil
+ runtime.GC()
+ w, err := NewWriter(io.Discard, level)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ w.Reset(io.Discard)
+ _, err = w.Write(buf1)
+ if err != nil {
+ b.Fatal(err)
+ }
+ err = w.Close()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeDigitsConstant1e4(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e4) }
+func BenchmarkEncodeDigitsConstant1e5(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e5) }
+func BenchmarkEncodeDigitsConstant1e6(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e6) }
+func BenchmarkEncodeDigitsSpeed1e4(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e4) }
+func BenchmarkEncodeDigitsSpeed1e5(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e5) }
+func BenchmarkEncodeDigitsSpeed1e6(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e6) }
+func BenchmarkEncodeDigitsDefault1e4(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e4) }
+func BenchmarkEncodeDigitsDefault1e5(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e5) }
+func BenchmarkEncodeDigitsDefault1e6(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e6) }
+func BenchmarkEncodeDigitsCompress1e4(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e4) }
+func BenchmarkEncodeDigitsCompress1e5(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e5) }
+func BenchmarkEncodeDigitsCompress1e6(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e6) }
+func BenchmarkEncodeDigitsSL1e4(b *testing.B) { benchmarkStatelessEncoder(b, digits, 1e4) }
+func BenchmarkEncodeDigitsSL1e5(b *testing.B) { benchmarkStatelessEncoder(b, digits, 1e5) }
+func BenchmarkEncodeDigitsSL1e6(b *testing.B) { benchmarkStatelessEncoder(b, digits, 1e6) }
+func BenchmarkEncodeTwainConstant1e4(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e4) }
+func BenchmarkEncodeTwainConstant1e5(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e5) }
+func BenchmarkEncodeTwainConstant1e6(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e6) }
+func BenchmarkEncodeTwainSpeed1e4(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e4) }
+func BenchmarkEncodeTwainSpeed1e5(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e5) }
+func BenchmarkEncodeTwainSpeed1e6(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e6) }
+func BenchmarkEncodeTwainDefault1e4(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e4) }
+func BenchmarkEncodeTwainDefault1e5(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e5) }
+func BenchmarkEncodeTwainDefault1e6(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e6) }
+func BenchmarkEncodeTwainCompress1e4(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e4) }
+func BenchmarkEncodeTwainCompress1e5(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e5) }
+func BenchmarkEncodeTwainCompress1e6(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e6) }
+func BenchmarkEncodeTwainSL1e4(b *testing.B) { benchmarkStatelessEncoder(b, twain, 1e4) }
+func BenchmarkEncodeTwainSL1e5(b *testing.B) { benchmarkStatelessEncoder(b, twain, 1e5) }
+func BenchmarkEncodeTwainSL1e6(b *testing.B) { benchmarkStatelessEncoder(b, twain, 1e6) }
+
+func benchmarkStatelessEncoder(b *testing.B, testfile, n int) {
+ b.SetBytes(int64(n))
+ buf0, err := os.ReadFile(testfiles[testfile])
+ if err != nil {
+ b.Fatal(err)
+ }
+ if len(buf0) == 0 {
+ b.Fatalf("test file %q has no data", testfiles[testfile])
+ }
+ buf1 := make([]byte, n)
+ for i := 0; i < n; i += len(buf0) {
+ if len(buf0) > n-i {
+ buf0 = buf0[:n-i]
+ }
+ copy(buf1[i:], buf0)
+ }
+ buf0 = nil
+ runtime.GC()
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ w := NewStatelessWriter(io.Discard)
+ _, err = w.Write(buf1)
+ if err != nil {
+ b.Fatal(err)
+ }
+ err = w.Close()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// A writer that fails after N writes.
+type errorWriter struct {
+ N int
+}
+
+func (e *errorWriter) Write(b []byte) (int, error) {
+ if e.N <= 0 {
+ return 0, io.ErrClosedPipe
+ }
+ e.N--
+ return len(b), nil
+}
+
+// Test if errors from the underlying writer is passed upwards.
+func TestWriteError(t *testing.T) {
+ buf := new(bytes.Buffer)
+ n := 65536
+ if !testing.Short() {
+ n *= 4
+ }
+ for i := 0; i < n; i++ {
+ fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i)
+ }
+ in := buf.Bytes()
+ // We create our own buffer to control number of writes.
+ copyBuf := make([]byte, 128)
+ for l := 0; l < 10; l++ {
+ for fail := 1; fail <= 256; fail *= 2 {
+ // Fail after 'fail' writes
+ ew := &errorWriter{N: fail}
+ w, err := NewWriter(ew, l)
+ if err != nil {
+ t.Fatalf("NewWriter: level %d: %v", l, err)
+ }
+ n, err := copyBuffer(w, bytes.NewBuffer(in), copyBuf)
+ if err == nil {
+ t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew)
+ }
+ n2, err := w.Write([]byte{1, 2, 2, 3, 4, 5})
+ if n2 != 0 {
+ t.Fatal("Level", l, "Expected 0 length write, got", n)
+ }
+ if err == nil {
+ t.Fatal("Level", l, "Expected an error")
+ }
+ err = w.Flush()
+ if err == nil {
+ t.Fatal("Level", l, "Expected an error on flush")
+ }
+ err = w.Close()
+ if err == nil {
+ t.Fatal("Level", l, "Expected an error on close")
+ }
+
+ w.Reset(io.Discard)
+ n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6})
+ if err != nil {
+ t.Fatal("Level", l, "Got unexpected error after reset:", err)
+ }
+ if n2 == 0 {
+ t.Fatal("Level", l, "Got 0 length write, expected > 0")
+ }
+ if testing.Short() {
+ return
+ }
+ }
+ }
+}
+
+// Test if errors from the underlying writer is passed upwards.
+func TestWriter_Reset(t *testing.T) {
+ buf := new(bytes.Buffer)
+ n := 65536
+ if !testing.Short() {
+ n *= 4
+ }
+ for i := 0; i < n; i++ {
+ fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i)
+ }
+ in := buf.Bytes()
+ for l := 0; l < 10; l++ {
+ l := l
+ if testing.Short() && l > 1 {
+ continue
+ }
+ t.Run(fmt.Sprintf("level-%d", l), func(t *testing.T) {
+ t.Parallel()
+ offset := 1
+ if testing.Short() {
+ offset = 256
+ }
+ for ; offset <= 256; offset *= 2 {
+ // Fail after 'fail' writes
+ w, err := NewWriter(io.Discard, l)
+ if err != nil {
+ t.Fatalf("NewWriter: level %d: %v", l, err)
+ }
+ if w.d.fast == nil {
+ t.Skip("Not Fast...")
+ return
+ }
+ for i := 0; i < (bufferReset-len(in)-offset-maxMatchOffset)/maxMatchOffset; i++ {
+ // skip ahead to where we are close to wrap around...
+ w.d.fast.Reset()
+ }
+ w.d.fast.Reset()
+ _, err = w.Write(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 50; i++ {
+ // skip ahead again... This should wrap around...
+ w.d.fast.Reset()
+ }
+ w.d.fast.Reset()
+
+ _, err = w.Write(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < (math.MaxUint32-bufferReset)/maxMatchOffset; i++ {
+ // skip ahead to where we are close to wrap around...
+ w.d.fast.Reset()
+ }
+
+ _, err = w.Write(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = w.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ })
+ }
+}
+
+func TestDeterministicL1(t *testing.T) { testDeterministic(1, t) }
+func TestDeterministicL2(t *testing.T) { testDeterministic(2, t) }
+func TestDeterministicL3(t *testing.T) { testDeterministic(3, t) }
+func TestDeterministicL4(t *testing.T) { testDeterministic(4, t) }
+func TestDeterministicL5(t *testing.T) { testDeterministic(5, t) }
+func TestDeterministicL6(t *testing.T) { testDeterministic(6, t) }
+func TestDeterministicL7(t *testing.T) { testDeterministic(7, t) }
+func TestDeterministicL8(t *testing.T) { testDeterministic(8, t) }
+func TestDeterministicL9(t *testing.T) { testDeterministic(9, t) }
+func TestDeterministicL0(t *testing.T) { testDeterministic(0, t) }
+func TestDeterministicLM2(t *testing.T) { testDeterministic(-2, t) }
+
+func testDeterministic(i int, t *testing.T) {
+ // Test so much we cross a good number of block boundaries.
+ var length = maxStoreBlockSize*30 + 500
+ if testing.Short() {
+ length /= 10
+ }
+
+ // Create a random, but compressible stream.
+ rng := rand.New(rand.NewSource(1))
+ t1 := make([]byte, length)
+ for i := range t1 {
+ t1[i] = byte(rng.Int63() & 7)
+ }
+
+ // Do our first encode.
+ var b1 bytes.Buffer
+ br := bytes.NewBuffer(t1)
+ w, err := NewWriter(&b1, i)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Use a very small prime sized buffer.
+ cbuf := make([]byte, 787)
+ _, err = copyBuffer(w, br, cbuf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ w.Close()
+
+ // We choose a different buffer size,
+ // bigger than a maximum block, and also a prime.
+ var b2 bytes.Buffer
+ cbuf = make([]byte, 81761)
+ br2 := bytes.NewBuffer(t1)
+ w2, err := NewWriter(&b2, i)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = copyBuffer(w2, br2, cbuf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ w2.Close()
+
+ b1b := b1.Bytes()
+ b2b := b2.Bytes()
+
+ if !bytes.Equal(b1b, b2b) {
+ t.Errorf("level %d did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b))
+ }
+
+ // Test using io.WriterTo interface.
+ var b3 bytes.Buffer
+ br = bytes.NewBuffer(t1)
+ w, err = NewWriter(&b3, i)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = br.WriteTo(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+ w.Close()
+
+ b3b := b3.Bytes()
+ if !bytes.Equal(b1b, b3b) {
+ t.Errorf("level %d (io.WriterTo) did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b3b))
+ }
+}
+
+// copyBuffer is a copy of io.CopyBuffer, since we want to support older go versions.
+// This is modified to never use io.WriterTo or io.ReaderFrom interfaces.
+func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
+ if buf == nil {
+ buf = make([]byte, 32*1024)
+ }
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ nw, ew := dst.Write(buf[0:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er == io.EOF {
+ break
+ }
+ if er != nil {
+ err = er
+ break
+ }
+ }
+ return written, err
+}
+
+func BenchmarkCompressAllocations(b *testing.B) {
+ payload := []byte(strings.Repeat("Tiny payload", 20))
+ for j := -2; j <= 9; j++ {
+ b.Run("level("+strconv.Itoa(j)+")", func(b *testing.B) {
+ b.Run("flate", func(b *testing.B) {
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ w, err := NewWriter(io.Discard, j)
+ if err != nil {
+ b.Fatal(err)
+ }
+ w.Write(payload)
+ w.Close()
+ }
+ })
+ })
+ }
+}
+
+func BenchmarkCompressAllocationsSingle(b *testing.B) {
+ payload := []byte(strings.Repeat("Tiny payload", 20))
+ const level = 2
+ b.Run("flate", func(b *testing.B) {
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ w, err := NewWriter(io.Discard, level)
+ if err != nil {
+ b.Fatal(err)
+ }
+ w.Write(payload)
+ w.Close()
+ }
+ })
+}
diff --git a/vendor/github.com/klauspost/compress/flate/ya.make b/vendor/github.com/klauspost/compress/flate/ya.make
new file mode 100644
index 0000000000..4f83849018
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/ya.make
@@ -0,0 +1,51 @@
+GO_LIBRARY()
+
+LICENSE(
+ Apache-2.0 AND
+ BSD-3-Clause AND
+ MIT
+)
+
+SRCS(
+ deflate.go
+ dict_decoder.go
+ fast_encoder.go
+ huffman_bit_writer.go
+ huffman_code.go
+ huffman_sortByFreq.go
+ huffman_sortByLiteral.go
+ inflate.go
+ inflate_gen.go
+ level1.go
+ level2.go
+ level3.go
+ level4.go
+ level5.go
+ level6.go
+ stateless.go
+ token.go
+)
+
+GO_TEST_SRCS(
+ deflate_test.go
+ dict_decoder_test.go
+ flate_test.go
+ fuzz_test.go
+ huffman_bit_writer_test.go
+ inflate_test.go
+ reader_test.go
+ token_test.go
+ writer_test.go
+)
+
+IF (ARCH_X86_64)
+ SRCS(regmask_amd64.go)
+ENDIF()
+
+IF (ARCH_ARM64)
+ SRCS(regmask_other.go)
+ENDIF()
+
+END()
+
+RECURSE(gotest)