aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/compress
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2017-01-14 00:05:42 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2017-01-14 00:05:42 +0000
commitc2047754c300b68c05d65faa8dc2925fe67b71b4 (patch)
treee183ae81a1f48a02945cb6de463a70c5be1b06f6 /libgo/go/compress
parent829afb8f05602bb31c9c597b24df7377fed4f059 (diff)
downloadgcc-c2047754c300b68c05d65faa8dc2925fe67b71b4.zip
gcc-c2047754c300b68c05d65faa8dc2925fe67b71b4.tar.gz
gcc-c2047754c300b68c05d65faa8dc2925fe67b71b4.tar.bz2
libgo: update to Go 1.8 release candidate 1
Compiler changes: * Change map assignment to use mapassign and assign value directly. * Change string iteration to use decoderune, faster for ASCII strings. * Change makeslice to take int, and use makeslice64 for larger values. * Add new noverflow field to hmap struct used for maps. Unresolved problems, to be fixed later: * Commented out test in go/types/sizes_test.go that doesn't compile. * Commented out reflect.TestStructOf test for padding after zero-sized field. Reviewed-on: https://go-review.googlesource.com/35231 gotools/: Updates for Go 1.8rc1. * Makefile.am (go_cmd_go_files): Add bug.go. (s-zdefaultcc): Write defaultPkgConfig. * Makefile.in: Rebuild. From-SVN: r244456
Diffstat (limited to 'libgo/go/compress')
-rw-r--r--libgo/go/compress/flate/deflate.go20
-rw-r--r--libgo/go/compress/flate/deflate_test.go183
-rw-r--r--libgo/go/compress/flate/deflatefast.go186
-rw-r--r--libgo/go/compress/flate/example_test.go245
-rw-r--r--libgo/go/compress/flate/flate_test.go1
-rw-r--r--libgo/go/compress/flate/huffman_bit_writer.go2
-rw-r--r--libgo/go/compress/flate/inflate.go3
-rw-r--r--libgo/go/compress/flate/inflate_test.go29
-rw-r--r--libgo/go/compress/flate/writer_test.go9
-rw-r--r--libgo/go/compress/gzip/example_test.go130
-rw-r--r--libgo/go/compress/gzip/gunzip.go7
-rw-r--r--libgo/go/compress/gzip/gunzip_test.go20
-rw-r--r--libgo/go/compress/gzip/gzip.go23
-rw-r--r--libgo/go/compress/gzip/gzip_test.go4
-rw-r--r--libgo/go/compress/gzip/issue14937_test.go9
-rw-r--r--libgo/go/compress/zlib/reader_test.go18
-rw-r--r--libgo/go/compress/zlib/writer.go11
-rw-r--r--libgo/go/compress/zlib/writer_test.go5
18 files changed, 829 insertions, 76 deletions
diff --git a/libgo/go/compress/flate/deflate.go b/libgo/go/compress/flate/deflate.go
index 9f53d51..97265b3 100644
--- a/libgo/go/compress/flate/deflate.go
+++ b/libgo/go/compress/flate/deflate.go
@@ -84,9 +84,10 @@ type compressor struct {
bulkHasher func([]byte, []uint32)
// compression algorithm
- fill func(*compressor, []byte) int // copy data to window
- step func(*compressor) // process window
- sync bool // requesting flush
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+ sync bool // requesting flush
+ bestSpeed *deflateFast // Encoder for BestSpeed
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
@@ -346,12 +347,13 @@ func (d *compressor) encSpeed() {
d.err = d.w.err
}
d.windowEnd = 0
+ d.bestSpeed.reset()
return
}
}
// Encode the block.
- d.tokens = encodeBestSpeed(d.tokens[:0], d.window[:d.windowEnd])
+ d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd])
// If we removed less than 1/16th, Huffman compress the block.
if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
@@ -519,10 +521,10 @@ func (d *compressor) fillStore(b []byte) int {
}
func (d *compressor) store() {
- if d.windowEnd > 0 {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
}
- d.windowEnd = 0
}
// storeHuff compresses and stores the currently added data
@@ -584,6 +586,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillStore
d.step = (*compressor).encSpeed
+ d.bestSpeed = newDeflateFast()
d.tokens = make([]token, maxStoreBlockSize)
case level == DefaultCompression:
level = 6
@@ -609,6 +612,7 @@ func (d *compressor) reset(w io.Writer) {
case BestSpeed:
d.windowEnd = 0
d.tokens = d.tokens[:0]
+ d.bestSpeed.reset()
default:
d.chainHead = -1
for i := range d.hashHead {
@@ -702,10 +706,12 @@ func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
-// Flush flushes any pending compressed data to the underlying writer.
+// Flush flushes any pending data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
diff --git a/libgo/go/compress/flate/deflate_test.go b/libgo/go/compress/flate/deflate_test.go
index 3322c40..521a260 100644
--- a/libgo/go/compress/flate/deflate_test.go
+++ b/libgo/go/compress/flate/deflate_test.go
@@ -342,6 +342,7 @@ func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int)
}
func TestDeflateInflate(t *testing.T) {
+ t.Parallel()
for i, h := range deflateInflateTests {
testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{})
}
@@ -376,6 +377,7 @@ var deflateInflateStringTests = []deflateInflateStringTest{
}
func TestDeflateInflateString(t *testing.T) {
+ t.Parallel()
if testing.Short() && testenv.Builder() == "" {
t.Skip("skipping in short mode")
}
@@ -463,6 +465,7 @@ func TestRegression2508(t *testing.T) {
}
func TestWriterReset(t *testing.T) {
+ t.Parallel()
for level := 0; level <= 9; level++ {
if testing.Short() && level > 1 {
break
@@ -490,6 +493,7 @@ func TestWriterReset(t *testing.T) {
w.d.fill, wref.d.fill = nil, nil
w.d.step, wref.d.step = nil, nil
w.d.bulkHasher, wref.d.bulkHasher = nil, nil
+ w.d.bestSpeed, wref.d.bestSpeed = nil, nil
// hashMatch is always overwritten when used.
copy(w.d.hashMatch[:], wref.d.hashMatch[:])
if len(w.d.tokens) != 0 {
@@ -558,6 +562,7 @@ func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error))
// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize
// (65535).
func TestBestSpeed(t *testing.T) {
+ t.Parallel()
abc := make([]byte, 128)
for i := range abc {
abc[i] = byte(i)
@@ -647,6 +652,7 @@ func (w *failWriter) Write(b []byte) (int, error) {
}
func TestWriterPersistentError(t *testing.T) {
+ t.Parallel()
d, err := ioutil.ReadFile("../testdata/Mark.Twain-Tom.Sawyer.txt")
if err != nil {
t.Fatalf("ReadFile: %v", err)
@@ -681,3 +687,180 @@ func TestWriterPersistentError(t *testing.T) {
}
}
}
+
+func TestBestSpeedMatch(t *testing.T) {
+ t.Parallel()
+ cases := []struct {
+ previous, current []byte
+ t, s, want int32
+ }{{
+ previous: []byte{0, 0, 0, 1, 2},
+ current: []byte{3, 4, 5, 0, 1, 2, 3, 4, 5},
+ t: -3,
+ s: 3,
+ want: 6,
+ }, {
+ previous: []byte{0, 0, 0, 1, 2},
+ current: []byte{2, 4, 5, 0, 1, 2, 3, 4, 5},
+ t: -3,
+ s: 3,
+ want: 3,
+ }, {
+ previous: []byte{0, 0, 0, 1, 1},
+ current: []byte{3, 4, 5, 0, 1, 2, 3, 4, 5},
+ t: -3,
+ s: 3,
+ want: 2,
+ }, {
+ previous: []byte{0, 0, 0, 1, 2},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -1,
+ s: 0,
+ want: 4,
+ }, {
+ previous: []byte{0, 0, 0, 1, 2, 3, 4, 5, 2, 2},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -7,
+ s: 4,
+ want: 5,
+ }, {
+ previous: []byte{9, 9, 9, 9, 9},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -1,
+ s: 0,
+ want: 0,
+ }, {
+ previous: []byte{9, 9, 9, 9, 9},
+ current: []byte{9, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: 0,
+ s: 1,
+ want: 0,
+ }, {
+ previous: []byte{},
+ current: []byte{9, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -5,
+ s: 1,
+ want: 0,
+ }, {
+ previous: []byte{},
+ current: []byte{9, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: -1,
+ s: 1,
+ want: 0,
+ }, {
+ previous: []byte{},
+ current: []byte{2, 2, 2, 2, 1, 2, 3, 4, 5},
+ t: 0,
+ s: 1,
+ want: 3,
+ }, {
+ previous: []byte{3, 4, 5},
+ current: []byte{3, 4, 5},
+ t: -3,
+ s: 0,
+ want: 3,
+ }, {
+ previous: make([]byte, 1000),
+ current: make([]byte, 1000),
+ t: -1000,
+ s: 0,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, 200),
+ current: make([]byte, 500),
+ t: -200,
+ s: 0,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, 200),
+ current: make([]byte, 500),
+ t: 0,
+ s: 1,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, maxMatchLength-4),
+ current: make([]byte, 500),
+ t: -(maxMatchLength - 4),
+ s: 0,
+ want: maxMatchLength - 4,
+ }, {
+ previous: make([]byte, 200),
+ current: make([]byte, 500),
+ t: -200,
+ s: 400,
+ want: 100,
+ }, {
+ previous: make([]byte, 10),
+ current: make([]byte, 500),
+ t: 200,
+ s: 400,
+ want: 100,
+ }}
+ for i, c := range cases {
+ e := deflateFast{prev: c.previous}
+ got := e.matchLen(c.s, c.t, c.current)
+ if got != c.want {
+ t.Errorf("Test %d: match length, want %d, got %d", i, c.want, got)
+ }
+ }
+}
+
+func TestBestSpeedMaxMatchOffset(t *testing.T) {
+ t.Parallel()
+ const abc, xyz = "abcdefgh", "stuvwxyz"
+ for _, matchBefore := range []bool{false, true} {
+ for _, extra := range []int{0, inputMargin - 1, inputMargin, inputMargin + 1, 2 * inputMargin} {
+ for offsetAdj := -5; offsetAdj <= +5; offsetAdj++ {
+ report := func(desc string, err error) {
+ t.Errorf("matchBefore=%t, extra=%d, offsetAdj=%d: %s%v",
+ matchBefore, extra, offsetAdj, desc, err)
+ }
+
+ offset := maxMatchOffset + offsetAdj
+
+ // Make src to be a []byte of the form
+ // "%s%s%s%s%s" % (abc, zeros0, xyzMaybe, abc, zeros1)
+ // where:
+ // zeros0 is approximately maxMatchOffset zeros.
+ // xyzMaybe is either xyz or the empty string.
+ // zeros1 is between 0 and 30 zeros.
+ // The difference between the two abc's will be offset, which
+ // is maxMatchOffset plus or minus a small adjustment.
+ src := make([]byte, offset+len(abc)+extra)
+ copy(src, abc)
+ if !matchBefore {
+ copy(src[offset-len(xyz):], xyz)
+ }
+ copy(src[offset:], abc)
+
+ buf := new(bytes.Buffer)
+ w, err := NewWriter(buf, BestSpeed)
+ if err != nil {
+ report("NewWriter: ", err)
+ continue
+ }
+ if _, err := w.Write(src); err != nil {
+ report("Write: ", err)
+ continue
+ }
+ if err := w.Close(); err != nil {
+ report("Writer.Close: ", err)
+ continue
+ }
+
+ r := NewReader(buf)
+ dst, err := ioutil.ReadAll(r)
+ r.Close()
+ if err != nil {
+ report("ReadAll: ", err)
+ continue
+ }
+
+ if !bytes.Equal(dst, src) {
+ report("", fmt.Errorf("bytes differ after round-tripping"))
+ continue
+ }
+ }
+ }
+ }
+}
diff --git a/libgo/go/compress/flate/deflatefast.go b/libgo/go/compress/flate/deflatefast.go
index 6b881a4..a1636a3 100644
--- a/libgo/go/compress/flate/deflatefast.go
+++ b/libgo/go/compress/flate/deflatefast.go
@@ -14,12 +14,12 @@ const (
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
)
-func load32(b []byte, i int) uint32 {
+func load32(b []byte, i int32) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
-func load64(b []byte, i int) uint64 {
+func load64(b []byte, i int32) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
@@ -38,31 +38,49 @@ const (
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
-func encodeBestSpeed(dst []token, src []byte) []token {
+type tableEntry struct {
+ val uint32 // Value at destination
+ offset int32
+}
+
+// deflateFast maintains the table for matches,
+// and the previous byte block for cross block matching.
+type deflateFast struct {
+ table [tableSize]tableEntry
+ prev []byte // Previous block, zero length if unknown.
+ cur int32 // Current match offset.
+}
+
+func newDeflateFast() *deflateFast {
+ return &deflateFast{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}
+}
+
+// encode encodes a block given in src and appends tokens
+// to dst and returns the result.
+func (e *deflateFast) encode(dst []token, src []byte) []token {
+ // Ensure that e.cur doesn't wrap.
+ if e.cur > 1<<30 {
+ *e = deflateFast{cur: maxStoreBlockSize, prev: e.prev[:0]}
+ }
+
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
+ e.cur += maxStoreBlockSize
+ e.prev = e.prev[:0]
return emitLiteral(dst, src)
}
- // Initialize the hash table.
- //
- // The table element type is uint16, as s < sLimit and sLimit < len(src)
- // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
- var table [tableSize]uint16
-
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
- sLimit := len(src) - inputMargin
+ sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- nextEmit := 0
-
- // The encoded form must start with a literal, as there are no previous
- // bytes to copy, so we start looking for hash matches at s == 1.
- s := 1
- nextHash := hash(load32(src, s))
+ nextEmit := int32(0)
+ s := int32(0)
+ cv := load32(src, s)
+ nextHash := hash(cv)
for {
// Copied from the C++ snappy implementation:
@@ -80,10 +98,10 @@ func encodeBestSpeed(dst []token, src []byte) []token {
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
- skip := 32
+ skip := int32(32)
nextS := s
- candidate := 0
+ var candidate tableEntry
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
@@ -92,13 +110,18 @@ func encodeBestSpeed(dst []token, src []byte) []token {
if nextS > sLimit {
goto emitRemainder
}
- candidate = int(table[nextHash&tableMask])
- table[nextHash&tableMask] = uint16(s)
- nextHash = hash(load32(src, nextS))
- // TODO: < should be <=, and add a test for that.
- if s-candidate < maxMatchOffset && load32(src, s) == load32(src, candidate) {
- break
+ candidate = e.table[nextHash&tableMask]
+ now := load32(src, nextS)
+ e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
+ nextHash = hash(now)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || cv != candidate.val {
+ // Out of range or not matched.
+ cv = now
+ continue
}
+ break
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
@@ -117,22 +140,16 @@ func encodeBestSpeed(dst []token, src []byte) []token {
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
- base := s
// Extend the 4-byte match as long as possible.
//
- // This is an inlined version of Snappy's:
- // s = extendMatch(src, candidate+4, s+4)
s += 4
- s1 := base + maxMatchLength
- if s1 > len(src) {
- s1 = len(src)
- }
- for i := candidate + 4; s < s1 && src[i] == src[s]; i, s = i+1, s+1 {
- }
+ t := candidate.offset - e.cur + 4
+ l := e.matchLen(s, t, src)
- // matchToken is flate's equivalent of Snappy's emitCopy.
- dst = append(dst, matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)))
+ // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
+ dst = append(dst, matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)))
+ s += l
nextEmit = s
if s >= sLimit {
goto emitRemainder
@@ -145,14 +162,17 @@ func encodeBestSpeed(dst []token, src []byte) []token {
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
- prevHash := hash(uint32(x >> 0))
- table[prevHash&tableMask] = uint16(s - 1)
- currHash := hash(uint32(x >> 8))
- candidate = int(table[currHash&tableMask])
- table[currHash&tableMask] = uint16(s)
- // TODO: >= should be >, and add a test for that.
- if s-candidate >= maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
- nextHash = hash(uint32(x >> 16))
+ prevHash := hash(uint32(x))
+ e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidate = e.table[currHash&tableMask]
+ e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != candidate.val {
+ cv = uint32(x >> 8)
+ nextHash = hash(cv)
s++
break
}
@@ -160,15 +180,91 @@ func encodeBestSpeed(dst []token, src []byte) []token {
}
emitRemainder:
- if nextEmit < len(src) {
+ if int(nextEmit) < len(src) {
dst = emitLiteral(dst, src[nextEmit:])
}
+ e.cur += int32(len(src))
+ e.prev = e.prev[:len(src)]
+ copy(e.prev, src)
return dst
}
func emitLiteral(dst []token, lit []byte) []token {
for _, v := range lit {
- dst = append(dst, token(v))
+ dst = append(dst, literalToken(uint32(v)))
}
return dst
}
+
+// matchLen returns the match length between src[s:] and src[t:].
+// t can be negative to indicate the match is starting in e.prev.
+// We assume that src[s-4:s] and src[t-4:t] already match.
+func (e *deflateFast) matchLen(s, t int32, src []byte) int32 {
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // If we are inside the current block
+ if t >= 0 {
+ b := src[t:]
+ a := src[s:s1]
+ b = b[:len(a)]
+ // Extend the match to be as long as possible.
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i)
+ }
+ }
+ return int32(len(a))
+ }
+
+ // We found a match in the previous block.
+ tp := int32(len(e.prev)) + t
+ if tp < 0 {
+ return 0
+ }
+
+ // Extend the match to be as long as possible.
+ a := src[s:s1]
+ b := e.prev[tp:]
+ if len(b) > len(a) {
+ b = b[:len(a)]
+ }
+ a = a[:len(b)]
+ for i := range b {
+ if a[i] != b[i] {
+ return int32(i)
+ }
+ }
+
+ // If we reached our limit, we matched everything we are
+ // allowed to in the previous block and we return.
+ n := int32(len(b))
+ if int(s+n) == s1 {
+ return n
+ }
+
+ // Continue looking for more matches in the current block.
+ a = src[s+n : s1]
+ b = src[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i) + n
+ }
+ }
+ return int32(len(a)) + n
+}
+
+// Reset resets the encoding history.
+// This ensures that no matches are made to the previous block.
+func (e *deflateFast) reset() {
+ e.prev = e.prev[:0]
+ // Bump the offset, so all matches will fail distance check.
+ e.cur += maxMatchOffset
+
+ // Protect against e.cur wraparound.
+ if e.cur > 1<<30 {
+ *e = deflateFast{cur: maxStoreBlockSize, prev: e.prev[:0]}
+ }
+}
diff --git a/libgo/go/compress/flate/example_test.go b/libgo/go/compress/flate/example_test.go
new file mode 100644
index 0000000..3d44dde
--- /dev/null
+++ b/libgo/go/compress/flate/example_test.go
@@ -0,0 +1,245 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package flate_test
+
+import (
+ "bytes"
+ "compress/flate"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strings"
+ "sync"
+)
+
+// In performance critical applications, Reset can be used to discard the
+// current compressor or decompressor state and reinitialize them quickly
+// by taking advantage of previously allocated memory.
+func Example_reset() {
+ proverbs := []string{
+ "Don't communicate by sharing memory, share memory by communicating.\n",
+ "Concurrency is not parallelism.\n",
+ "The bigger the interface, the weaker the abstraction.\n",
+ "Documentation is for users.\n",
+ }
+
+ var r strings.Reader
+ var b bytes.Buffer
+ buf := make([]byte, 32<<10)
+
+ zw, err := flate.NewWriter(nil, flate.DefaultCompression)
+ if err != nil {
+ log.Fatal(err)
+ }
+ zr := flate.NewReader(nil)
+
+ for _, s := range proverbs {
+ r.Reset(s)
+ b.Reset()
+
+ // Reset the compressor and encode from some input stream.
+ zw.Reset(&b)
+ if _, err := io.CopyBuffer(zw, &r, buf); err != nil {
+ log.Fatal(err)
+ }
+ if err := zw.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Reset the decompressor and decode to some output stream.
+ if err := zr.(flate.Resetter).Reset(&b, nil); err != nil {
+ log.Fatal(err)
+ }
+ if _, err := io.CopyBuffer(os.Stdout, zr, buf); err != nil {
+ log.Fatal(err)
+ }
+ if err := zr.Close(); err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ // Output:
+ // Don't communicate by sharing memory, share memory by communicating.
+ // Concurrency is not parallelism.
+ // The bigger the interface, the weaker the abstraction.
+ // Documentation is for users.
+}
+
+// A preset dictionary can be used to improve the compression ratio.
+// The downside to using a dictionary is that the compressor and decompressor
+// must agree in advance what dictionary to use.
+func Example_dictionary() {
+ // The dictionary is a string of bytes. When compressing some input data,
+ // the compressor will attempt to substitute substrings with matches found
+ // in the dictionary. As such, the dictionary should only contain substrings
+ // that are expected to be found in the actual data stream.
+ const dict = `<?xml version="1.0"?>` + `<book>` + `<data>` + `<meta name="` + `" content="`
+
+ // The data to compress should (but is not required to) contain frequent
+ // substrings that match those in the dictionary.
+ const data = `<?xml version="1.0"?>
+<book>
+ <meta name="title" content="The Go Programming Language"/>
+ <meta name="authors" content="Alan Donovan and Brian Kernighan"/>
+ <meta name="published" content="2015-10-26"/>
+ <meta name="isbn" content="978-0134190440"/>
+ <data>...</data>
+</book>
+`
+
+ var b bytes.Buffer
+
+ // Compress the data using the specially crafted dictionary.
+ zw, err := flate.NewWriterDict(&b, flate.DefaultCompression, []byte(dict))
+ if err != nil {
+ log.Fatal(err)
+ }
+ if _, err := io.Copy(zw, strings.NewReader(data)); err != nil {
+ log.Fatal(err)
+ }
+ if err := zw.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // The decompressor must use the same dictionary as the compressor.
+ // Otherwise, the input may appear as corrupted.
+ fmt.Println("Decompressed output using the dictionary:")
+ zr := flate.NewReaderDict(bytes.NewReader(b.Bytes()), []byte(dict))
+ if _, err := io.Copy(os.Stdout, zr); err != nil {
+ log.Fatal(err)
+ }
+ if err := zr.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println()
+
+ // Substitute all of the bytes in the dictionary with a '#' to visually
+ // demonstrate the approximate effectiveness of using a preset dictionary.
+ fmt.Println("Substrings matched by the dictionary are marked with #:")
+ hashDict := []byte(dict)
+ for i := range hashDict {
+ hashDict[i] = '#'
+ }
+ zr = flate.NewReaderDict(&b, hashDict)
+ if _, err := io.Copy(os.Stdout, zr); err != nil {
+ log.Fatal(err)
+ }
+ if err := zr.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // Decompressed output using the dictionary:
+ // <?xml version="1.0"?>
+ // <book>
+ // <meta name="title" content="The Go Programming Language"/>
+ // <meta name="authors" content="Alan Donovan and Brian Kernighan"/>
+ // <meta name="published" content="2015-10-26"/>
+ // <meta name="isbn" content="978-0134190440"/>
+ // <data>...</data>
+ // </book>
+ //
+ // Substrings matched by the dictionary are marked with #:
+ // #####################
+ // ######
+ // ############title###########The Go Programming Language"/#
+ // ############authors###########Alan Donovan and Brian Kernighan"/#
+ // ############published###########2015-10-26"/#
+ // ############isbn###########978-0134190440"/#
+ // ######...</#####
+ // </#####
+}
+
+// DEFLATE is suitable for transmitting compressed data across the network.
+func Example_synchronization() {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ // Use io.Pipe to simulate a network connection.
+ // A real network application should take care to properly close the
+ // underlying connection.
+ rp, wp := io.Pipe()
+
+ // Start a goroutine to act as the transmitter.
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ zw, err := flate.NewWriter(wp, flate.BestSpeed)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ b := make([]byte, 256)
+ for _, m := range strings.Fields("A long time ago in a galaxy far, far away...") {
+ // We use a simple framing format where the first byte is the
+ // message length, followed the message itself.
+ b[0] = uint8(copy(b[1:], m))
+
+ if _, err := zw.Write(b[:1+len(m)]); err != nil {
+ log.Fatal(err)
+ }
+
+ // Flush ensures that the receiver can read all data sent so far.
+ if err := zw.Flush(); err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if err := zw.Close(); err != nil {
+ log.Fatal(err)
+ }
+ }()
+
+ // Start a goroutine to act as the receiver.
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ zr := flate.NewReader(rp)
+
+ b := make([]byte, 256)
+ for {
+ // Read the message length.
+ // This is guaranteed to return for every corresponding
+ // Flush and Close on the transmitter side.
+ if _, err := io.ReadFull(zr, b[:1]); err != nil {
+ if err == io.EOF {
+ break // The transmitter closed the stream
+ }
+ log.Fatal(err)
+ }
+
+ // Read the message content.
+ n := int(b[0])
+ if _, err := io.ReadFull(zr, b[:n]); err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Printf("Received %d bytes: %s\n", n, b[:n])
+ }
+ fmt.Println()
+
+ if err := zr.Close(); err != nil {
+ log.Fatal(err)
+ }
+ }()
+
+ // Output:
+ // Received 1 bytes: A
+ // Received 4 bytes: long
+ // Received 4 bytes: time
+ // Received 3 bytes: ago
+ // Received 2 bytes: in
+ // Received 1 bytes: a
+ // Received 6 bytes: galaxy
+ // Received 4 bytes: far,
+ // Received 3 bytes: far
+ // Received 7 bytes: away...
+}
diff --git a/libgo/go/compress/flate/flate_test.go b/libgo/go/compress/flate/flate_test.go
index 83c20498..1e45077 100644
--- a/libgo/go/compress/flate/flate_test.go
+++ b/libgo/go/compress/flate/flate_test.go
@@ -281,6 +281,7 @@ func TestTruncatedStreams(t *testing.T) {
//
// See https://github.com/google/go-github/pull/317 for background.
func TestReaderEarlyEOF(t *testing.T) {
+ t.Parallel()
testSizes := []int{
1, 2, 3, 4, 5, 6, 7, 8,
100, 1000, 10000, 100000,
diff --git a/libgo/go/compress/flate/huffman_bit_writer.go b/libgo/go/compress/flate/huffman_bit_writer.go
index d8b5a3e..6cd6281 100644
--- a/libgo/go/compress/flate/huffman_bit_writer.go
+++ b/libgo/go/compress/flate/huffman_bit_writer.go
@@ -520,7 +520,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []b
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
- size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
+ size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
// Store bytes, if we don't get a reasonable improvement.
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
diff --git a/libgo/go/compress/flate/inflate.go b/libgo/go/compress/flate/inflate.go
index 68cc232..9a8c4fc 100644
--- a/libgo/go/compress/flate/inflate.go
+++ b/libgo/go/compress/flate/inflate.go
@@ -344,6 +344,9 @@ func (f *decompressor) Read(b []byte) (int, error) {
return 0, f.err
}
f.step(f)
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
}
}
diff --git a/libgo/go/compress/flate/inflate_test.go b/libgo/go/compress/flate/inflate_test.go
index e0bce71..951decd 100644
--- a/libgo/go/compress/flate/inflate_test.go
+++ b/libgo/go/compress/flate/inflate_test.go
@@ -7,6 +7,8 @@ package flate
import (
"bytes"
"io"
+ "io/ioutil"
+ "strings"
"testing"
)
@@ -38,6 +40,33 @@ func TestReset(t *testing.T) {
}
}
+func TestReaderTruncated(t *testing.T) {
+ vectors := []struct{ input, output string }{
+ {"\x00", ""},
+ {"\x00\f", ""},
+ {"\x00\f\x00", ""},
+ {"\x00\f\x00\xf3\xff", ""},
+ {"\x00\f\x00\xf3\xffhello", "hello"},
+ {"\x00\f\x00\xf3\xffhello, world", "hello, world"},
+ {"\x02", ""},
+ {"\xf2H\xcd", "He"},
+ {"\xf2H͙0a\u0084\t", "Hel\x90\x90\x90\x90\x90"},
+ {"\xf2H͙0a\u0084\t\x00", "Hel\x90\x90\x90\x90\x90"},
+ }
+
+ for i, v := range vectors {
+ r := strings.NewReader(v.input)
+ zr := NewReader(r)
+ b, err := ioutil.ReadAll(zr)
+ if err != io.ErrUnexpectedEOF {
+ t.Errorf("test %d, error mismatch: got %v, want io.ErrUnexpectedEOF", i, err)
+ }
+ if string(b) != v.output {
+ t.Errorf("test %d, output mismatch: got %q, want %q", i, b, v.output)
+ }
+ }
+}
+
func TestResetDict(t *testing.T) {
dict := []byte("the lorem fox")
ss := []string{
diff --git a/libgo/go/compress/flate/writer_test.go b/libgo/go/compress/flate/writer_test.go
index 21cd0b2..c4d36aa 100644
--- a/libgo/go/compress/flate/writer_test.go
+++ b/libgo/go/compress/flate/writer_test.go
@@ -56,6 +56,7 @@ func (e *errorWriter) Write(b []byte) (int, error) {
// Test if errors from the underlying writer is passed upwards.
func TestWriteError(t *testing.T) {
+ t.Parallel()
buf := new(bytes.Buffer)
n := 65536
if !testing.Short() {
@@ -75,7 +76,7 @@ func TestWriteError(t *testing.T) {
if err != nil {
t.Fatalf("NewWriter: level %d: %v", l, err)
}
- n, err := io.CopyBuffer(w, bytes.NewBuffer(in), copyBuffer)
+ n, err := io.CopyBuffer(w, struct{ io.Reader }{bytes.NewBuffer(in)}, copyBuffer)
if err == nil {
t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew)
}
@@ -113,6 +114,7 @@ func TestWriteError(t *testing.T) {
// Test if two runs produce identical results
// even when writing different sizes to the Writer.
func TestDeterministic(t *testing.T) {
+ t.Parallel()
for i := 0; i <= 9; i++ {
t.Run(fmt.Sprint("L", i), func(t *testing.T) { testDeterministic(i, t) })
}
@@ -120,6 +122,7 @@ func TestDeterministic(t *testing.T) {
}
func testDeterministic(i int, t *testing.T) {
+ t.Parallel()
// Test so much we cross a good number of block boundaries.
var length = maxStoreBlockSize*30 + 500
if testing.Short() {
@@ -142,7 +145,7 @@ func testDeterministic(i int, t *testing.T) {
}
// Use a very small prime sized buffer.
cbuf := make([]byte, 787)
- _, err = io.CopyBuffer(w, br, cbuf)
+ _, err = io.CopyBuffer(w, struct{ io.Reader }{br}, cbuf)
if err != nil {
t.Fatal(err)
}
@@ -157,7 +160,7 @@ func testDeterministic(i int, t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = io.CopyBuffer(w2, br2, cbuf)
+ _, err = io.CopyBuffer(w2, struct{ io.Reader }{br2}, cbuf)
if err != nil {
t.Fatal(err)
}
diff --git a/libgo/go/compress/gzip/example_test.go b/libgo/go/compress/gzip/example_test.go
new file mode 100644
index 0000000..4764bcb
--- /dev/null
+++ b/libgo/go/compress/gzip/example_test.go
@@ -0,0 +1,130 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package gzip_test
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "time"
+)
+
+func Example_writerReader() {
+ var buf bytes.Buffer
+ zw := gzip.NewWriter(&buf)
+
+ // Setting the Header fields is optional.
+ zw.Name = "a-new-hope.txt"
+ zw.Comment = "an epic space opera by George Lucas"
+ zw.ModTime = time.Date(1977, time.May, 25, 0, 0, 0, 0, time.UTC)
+
+ _, err := zw.Write([]byte("A long time ago in a galaxy far, far away..."))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if err := zw.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ zr, err := gzip.NewReader(&buf)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC())
+
+ if _, err := io.Copy(os.Stdout, zr); err != nil {
+ log.Fatal(err)
+ }
+
+ if err := zr.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // Name: a-new-hope.txt
+ // Comment: an epic space opera by George Lucas
+ // ModTime: 1977-05-25 00:00:00 +0000 UTC
+ //
+ // A long time ago in a galaxy far, far away...
+}
+
+func ExampleReader_Multistream() {
+ var buf bytes.Buffer
+ zw := gzip.NewWriter(&buf)
+
+ var files = []struct {
+ name string
+ comment string
+ modTime time.Time
+ data string
+ }{
+ {"file-1.txt", "file-header-1", time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC), "Hello Gophers - 1"},
+ {"file-2.txt", "file-header-2", time.Date(2007, time.March, 2, 4, 5, 6, 1, time.UTC), "Hello Gophers - 2"},
+ }
+
+ for _, file := range files {
+ zw.Name = file.name
+ zw.Comment = file.comment
+ zw.ModTime = file.modTime
+
+ if _, err := zw.Write([]byte(file.data)); err != nil {
+ log.Fatal(err)
+ }
+
+ if err := zw.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ zw.Reset(&buf)
+ }
+
+ zr, err := gzip.NewReader(&buf)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for {
+ zr.Multistream(false)
+ fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC())
+
+ if _, err := io.Copy(os.Stdout, zr); err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Print("\n\n")
+
+ err = zr.Reset(&buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if err := zr.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // Name: file-1.txt
+ // Comment: file-header-1
+ // ModTime: 2006-02-01 03:04:05 +0000 UTC
+ //
+ // Hello Gophers - 1
+ //
+ // Name: file-2.txt
+ // Comment: file-header-2
+ // ModTime: 2007-03-02 04:05:06 +0000 UTC
+ //
+ // Hello Gophers - 2
+}
diff --git a/libgo/go/compress/gzip/gunzip.go b/libgo/go/compress/gzip/gunzip.go
index 7e64069..8bd750b 100644
--- a/libgo/go/compress/gzip/gunzip.go
+++ b/libgo/go/compress/gzip/gunzip.go
@@ -186,7 +186,11 @@ func (z *Reader) readHeader() (hdr Header, err error) {
return hdr, ErrHeader
}
flg := z.buf[3]
- hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
+ if t := int64(le.Uint32(z.buf[4:8])); t > 0 {
+ // Section 2.3.1, the zero value for MTIME means that the
+ // modified time is not set.
+ hdr.ModTime = time.Unix(t, 0)
+ }
// z.buf[8] is XFL and is currently ignored.
hdr.OS = z.buf[9]
z.digest = crc32.ChecksumIEEE(z.buf[:10])
@@ -238,6 +242,7 @@ func (z *Reader) readHeader() (hdr Header, err error) {
return hdr, nil
}
+// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
func (z *Reader) Read(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
diff --git a/libgo/go/compress/gzip/gunzip_test.go b/libgo/go/compress/gzip/gunzip_test.go
index fdce919..fdea0c5 100644
--- a/libgo/go/compress/gzip/gunzip_test.go
+++ b/libgo/go/compress/gzip/gunzip_test.go
@@ -339,6 +339,26 @@ var gunzipTests = []gunzipTest{
},
nil,
},
+ {
+ "",
+ "truncated gzip file amid raw-block",
+ "hello",
+ []byte{
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
+ 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f,
+ },
+ io.ErrUnexpectedEOF,
+ },
+ {
+ "",
+ "truncated gzip file amid fixed-block",
+ "He",
+ []byte{
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
+ 0xf2, 0x48, 0xcd,
+ },
+ io.ErrUnexpectedEOF,
+ },
}
func TestDecompressor(t *testing.T) {
diff --git a/libgo/go/compress/gzip/gzip.go b/libgo/go/compress/gzip/gzip.go
index c702c49..aafb442 100644
--- a/libgo/go/compress/gzip/gzip.go
+++ b/libgo/go/compress/gzip/gzip.go
@@ -10,6 +10,7 @@ import (
"fmt"
"hash/crc32"
"io"
+ "time"
)
// These constants are copied from the flate package, so that code that imports
@@ -19,6 +20,7 @@ const (
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
+ HuffmanOnly = flate.HuffmanOnly
)
// A Writer is an io.WriteCloser.
@@ -52,11 +54,11 @@ func NewWriter(w io.Writer) *Writer {
// NewWriterLevel is like NewWriter but specifies the compression level instead
// of assuming DefaultCompression.
//
-// The compression level can be DefaultCompression, NoCompression, or any
-// integer value between BestSpeed and BestCompression inclusive. The error
-// returned will be nil if the level is valid.
+// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
+// or any integer value between BestSpeed and BestCompression inclusive.
+// The error returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
- if level < DefaultCompression || level > BestCompression {
+ if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
}
z := new(Writer)
@@ -142,10 +144,7 @@ func (z *Writer) Write(p []byte) (int, error) {
// Write the GZIP header lazily.
if !z.wroteHeader {
z.wroteHeader = true
- z.buf[0] = gzipID1
- z.buf[1] = gzipID2
- z.buf[2] = gzipDeflate
- z.buf[3] = 0
+ z.buf = [10]byte{0: gzipID1, 1: gzipID2, 2: gzipDeflate}
if z.Extra != nil {
z.buf[3] |= 0x04
}
@@ -155,13 +154,15 @@ func (z *Writer) Write(p []byte) (int, error) {
if z.Comment != "" {
z.buf[3] |= 0x10
}
- le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
+ if z.ModTime.After(time.Unix(0, 0)) {
+ // Section 2.3.1, the zero value for MTIME means that the
+ // modified time is not set.
+ le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
+ }
if z.level == BestCompression {
z.buf[8] = 2
} else if z.level == BestSpeed {
z.buf[8] = 4
- } else {
- z.buf[8] = 0
}
z.buf[9] = z.OS
n, z.err = z.w.Write(z.buf[:10])
diff --git a/libgo/go/compress/gzip/gzip_test.go b/libgo/go/compress/gzip/gzip_test.go
index 09271b2..865c529 100644
--- a/libgo/go/compress/gzip/gzip_test.go
+++ b/libgo/go/compress/gzip/gzip_test.go
@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"io/ioutil"
+ "reflect"
"testing"
"time"
)
@@ -24,6 +25,9 @@ func TestEmpty(t *testing.T) {
if err != nil {
t.Fatalf("NewReader: %v", err)
}
+ if want := (Header{OS: 255}); !reflect.DeepEqual(r.Header, want) {
+ t.Errorf("Header mismatch:\ngot %#v\nwant %#v", r.Header, want)
+ }
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll: %v", err)
diff --git a/libgo/go/compress/gzip/issue14937_test.go b/libgo/go/compress/gzip/issue14937_test.go
index 432ad16..e76d47c 100644
--- a/libgo/go/compress/gzip/issue14937_test.go
+++ b/libgo/go/compress/gzip/issue14937_test.go
@@ -7,7 +7,6 @@ import (
"runtime"
"strings"
"testing"
- "time"
)
// Per golang.org/issue/14937, check that every .gz file
@@ -16,8 +15,12 @@ func TestGZIPFilesHaveZeroMTimes(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skip("skipping in short mode")
}
+ goroot, err := filepath.EvalSymlinks(runtime.GOROOT())
+ if err != nil {
+ t.Fatal("error evaluating GOROOT: ", err)
+ }
var files []string
- err := filepath.Walk(runtime.GOROOT(), func(path string, info os.FileInfo, err error) error {
+ err = filepath.Walk(goroot, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
@@ -53,7 +56,7 @@ func checkZeroMTime(t *testing.T, path string) {
return
}
defer gz.Close()
- if !gz.ModTime.Equal(time.Unix(0, 0)) {
+ if !gz.ModTime.IsZero() {
t.Errorf("gzip file %s has non-zero mtime (%s)", path, gz.ModTime)
}
}
diff --git a/libgo/go/compress/zlib/reader_test.go b/libgo/go/compress/zlib/reader_test.go
index f74bff1..7e27aec 100644
--- a/libgo/go/compress/zlib/reader_test.go
+++ b/libgo/go/compress/zlib/reader_test.go
@@ -121,6 +121,24 @@ var zlibTests = []zlibTest{
},
ErrDictionary,
},
+ {
+ "truncated zlib stream amid raw-block",
+ "hello",
+ []byte{
+ 0x78, 0x9c, 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f,
+ },
+ nil,
+ io.ErrUnexpectedEOF,
+ },
+ {
+ "truncated zlib stream amid fixed-block",
+ "He",
+ []byte{
+ 0x78, 0x9c, 0xf2, 0x48, 0xcd,
+ },
+ nil,
+ io.ErrUnexpectedEOF,
+ },
}
func TestDecompressor(t *testing.T) {
diff --git a/libgo/go/compress/zlib/writer.go b/libgo/go/compress/zlib/writer.go
index 3b4313a..1620c00 100644
--- a/libgo/go/compress/zlib/writer.go
+++ b/libgo/go/compress/zlib/writer.go
@@ -19,6 +19,7 @@ const (
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
+ HuffmanOnly = flate.HuffmanOnly
)
// A Writer takes data written to it and writes the compressed
@@ -47,9 +48,9 @@ func NewWriter(w io.Writer) *Writer {
// NewWriterLevel is like NewWriter but specifies the compression level instead
// of assuming DefaultCompression.
//
-// The compression level can be DefaultCompression, NoCompression, or any
-// integer value between BestSpeed and BestCompression inclusive. The error
-// returned will be nil if the level is valid.
+// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
+// or any integer value between BestSpeed and BestCompression inclusive.
+// The error returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterLevelDict(w, level, nil)
}
@@ -60,7 +61,7 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
// The dictionary may be nil. If not, its contents should not be modified until
// the Writer is closed.
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
- if level < DefaultCompression || level > BestCompression {
+ if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("zlib: invalid compression level: %d", level)
}
return &Writer{
@@ -99,7 +100,7 @@ func (z *Writer) writeHeader() (err error) {
// The next bit, FDICT, is set if a dictionary is given.
// The final five FCHECK bits form a mod-31 checksum.
switch z.level {
- case 0, 1:
+ case -2, 0, 1:
z.scratch[1] = 0 << 6
case 2, 3, 4, 5:
z.scratch[1] = 1 << 6
diff --git a/libgo/go/compress/zlib/writer_test.go b/libgo/go/compress/zlib/writer_test.go
index dd94165..d501974 100644
--- a/libgo/go/compress/zlib/writer_test.go
+++ b/libgo/go/compress/zlib/writer_test.go
@@ -147,6 +147,7 @@ func TestWriter(t *testing.T) {
tag := fmt.Sprintf("#%d", i)
testLevelDict(t, tag, b, DefaultCompression, "")
testLevelDict(t, tag, b, NoCompression, "")
+ testLevelDict(t, tag, b, HuffmanOnly, "")
for level := BestSpeed; level <= BestCompression; level++ {
testLevelDict(t, tag, b, level, "")
}
@@ -157,6 +158,7 @@ func TestWriterBig(t *testing.T) {
for i, fn := range filenames {
testFileLevelDict(t, fn, DefaultCompression, "")
testFileLevelDict(t, fn, NoCompression, "")
+ testFileLevelDict(t, fn, HuffmanOnly, "")
for level := BestSpeed; level <= BestCompression; level++ {
testFileLevelDict(t, fn, level, "")
if level >= 1 && testing.Short() && testenv.Builder() == "" {
@@ -174,6 +176,7 @@ func TestWriterDict(t *testing.T) {
for i, fn := range filenames {
testFileLevelDict(t, fn, DefaultCompression, dictionary)
testFileLevelDict(t, fn, NoCompression, dictionary)
+ testFileLevelDict(t, fn, HuffmanOnly, dictionary)
for level := BestSpeed; level <= BestCompression; level++ {
testFileLevelDict(t, fn, level, dictionary)
if level >= 1 && testing.Short() && testenv.Builder() == "" {
@@ -191,8 +194,10 @@ func TestWriterReset(t *testing.T) {
for _, fn := range filenames {
testFileLevelDictReset(t, fn, NoCompression, nil)
testFileLevelDictReset(t, fn, DefaultCompression, nil)
+ testFileLevelDictReset(t, fn, HuffmanOnly, nil)
testFileLevelDictReset(t, fn, NoCompression, []byte(dictionary))
testFileLevelDictReset(t, fn, DefaultCompression, []byte(dictionary))
+ testFileLevelDictReset(t, fn, HuffmanOnly, []byte(dictionary))
if testing.Short() {
break
}