aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/compress
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2011-12-03 02:17:34 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2011-12-03 02:17:34 +0000
commit2fd401c8f190f1fe43e51a7f726f6ed6119a1f96 (patch)
tree7f76eff391f37fe6467ff4ffbc0c582c9959ea30 /libgo/go/compress
parent02e9018f1616b23f1276151797216717b3564202 (diff)
downloadgcc-2fd401c8f190f1fe43e51a7f726f6ed6119a1f96.zip
gcc-2fd401c8f190f1fe43e51a7f726f6ed6119a1f96.tar.gz
gcc-2fd401c8f190f1fe43e51a7f726f6ed6119a1f96.tar.bz2
libgo: Update to weekly.2011-11-02.
From-SVN: r181964
Diffstat (limited to 'libgo/go/compress')
-rw-r--r--libgo/go/compress/bzip2/bit_reader.go11
-rw-r--r--libgo/go/compress/bzip2/bzip2.go19
-rw-r--r--libgo/go/compress/bzip2/bzip2_test.go3
-rw-r--r--libgo/go/compress/bzip2/huffman.go9
-rw-r--r--libgo/go/compress/flate/deflate.go23
-rw-r--r--libgo/go/compress/flate/deflate_test.go11
-rw-r--r--libgo/go/compress/flate/huffman_bit_writer.go5
-rw-r--r--libgo/go/compress/flate/inflate.go41
-rw-r--r--libgo/go/compress/gzip/gunzip.go24
-rw-r--r--libgo/go/compress/gzip/gunzip_test.go3
-rw-r--r--libgo/go/compress/gzip/gzip.go20
-rw-r--r--libgo/go/compress/lzw/reader.go23
-rw-r--r--libgo/go/compress/lzw/reader_test.go3
-rw-r--r--libgo/go/compress/lzw/writer.go31
-rw-r--r--libgo/go/compress/lzw/writer_test.go4
-rw-r--r--libgo/go/compress/zlib/reader.go20
-rw-r--r--libgo/go/compress/zlib/reader_test.go3
-rw-r--r--libgo/go/compress/zlib/writer.go18
18 files changed, 129 insertions, 142 deletions
diff --git a/libgo/go/compress/bzip2/bit_reader.go b/libgo/go/compress/bzip2/bit_reader.go
index 50f0ec8..d058c14 100644
--- a/libgo/go/compress/bzip2/bit_reader.go
+++ b/libgo/go/compress/bzip2/bit_reader.go
@@ -7,25 +7,24 @@ package bzip2
import (
"bufio"
"io"
- "os"
)
// bitReader wraps an io.Reader and provides the ability to read values,
-// bit-by-bit, from it. Its Read* methods don't return the usual os.Error
+// bit-by-bit, from it. Its Read* methods don't return the usual error
// because the error handling was verbose. Instead, any error is kept and can
// be checked afterwards.
type bitReader struct {
r byteReader
n uint64
bits uint
- err os.Error
+ err error
}
// bitReader needs to read bytes from an io.Reader. We attempt to cast the
// given io.Reader to this interface and, if it doesn't already fit, we wrap in
// a bufio.Reader.
type byteReader interface {
- ReadByte() (byte, os.Error)
+ ReadByte() (byte, error)
}
func newBitReader(r io.Reader) bitReader {
@@ -42,7 +41,7 @@ func newBitReader(r io.Reader) bitReader {
func (br *bitReader) ReadBits64(bits uint) (n uint64) {
for bits > br.bits {
b, err := br.r.ReadByte()
- if err == os.EOF {
+ if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
@@ -83,6 +82,6 @@ func (br *bitReader) ReadBit() bool {
return n != 0
}
-func (br *bitReader) Error() os.Error {
+func (br *bitReader) Error() error {
return br.err
}
diff --git a/libgo/go/compress/bzip2/bzip2.go b/libgo/go/compress/bzip2/bzip2.go
index 8b45723..343cca0 100644
--- a/libgo/go/compress/bzip2/bzip2.go
+++ b/libgo/go/compress/bzip2/bzip2.go
@@ -5,10 +5,7 @@
// Package bzip2 implements bzip2 decompression.
package bzip2
-import (
- "io"
- "os"
-)
+import "io"
// There's no RFC for bzip2. I used the Wikipedia page for reference and a lot
// of guessing: http://en.wikipedia.org/wiki/Bzip2
@@ -19,7 +16,7 @@ import (
// syntactically invalid.
type StructuralError string
-func (s StructuralError) String() string {
+func (s StructuralError) Error() string {
return "bzip2 data invalid: " + string(s)
}
@@ -53,7 +50,7 @@ const bzip2BlockMagic = 0x314159265359
const bzip2FinalMagic = 0x177245385090
// setup parses the bzip2 header.
-func (bz2 *reader) setup() os.Error {
+func (bz2 *reader) setup() error {
br := &bz2.br
magic := br.ReadBits(16)
@@ -76,9 +73,9 @@ func (bz2 *reader) setup() os.Error {
return nil
}
-func (bz2 *reader) Read(buf []byte) (n int, err os.Error) {
+func (bz2 *reader) Read(buf []byte) (n int, err error) {
if bz2.eof {
- return 0, os.EOF
+ return 0, io.EOF
}
if !bz2.setupDone {
@@ -101,7 +98,7 @@ func (bz2 *reader) Read(buf []byte) (n int, err os.Error) {
return
}
-func (bz2 *reader) read(buf []byte) (n int, err os.Error) {
+func (bz2 *reader) read(buf []byte) (n int, err error) {
// bzip2 is a block based compressor, except that it has a run-length
// preprocessing step. The block based nature means that we can
// preallocate fixed-size buffers and reuse them. However, the RLE
@@ -162,7 +159,7 @@ func (bz2 *reader) read(buf []byte) (n int, err os.Error) {
if magic == bzip2FinalMagic {
br.ReadBits64(32) // ignored CRC
bz2.eof = true
- return 0, os.EOF
+ return 0, io.EOF
} else if magic != bzip2BlockMagic {
return 0, StructuralError("bad magic value found")
}
@@ -176,7 +173,7 @@ func (bz2 *reader) read(buf []byte) (n int, err os.Error) {
}
// readBlock reads a bzip2 block. The magic number should already have been consumed.
-func (bz2 *reader) readBlock() (err os.Error) {
+func (bz2 *reader) readBlock() (err error) {
br := &bz2.br
br.ReadBits64(32) // skip checksum. TODO: check it if we can figure out what it is.
randomized := br.ReadBits(1)
diff --git a/libgo/go/compress/bzip2/bzip2_test.go b/libgo/go/compress/bzip2/bzip2_test.go
index 156eea8..7b227ac 100644
--- a/libgo/go/compress/bzip2/bzip2_test.go
+++ b/libgo/go/compress/bzip2/bzip2_test.go
@@ -9,7 +9,6 @@ import (
"encoding/hex"
"io"
"io/ioutil"
- "os"
"testing"
)
@@ -46,7 +45,7 @@ func readerFromHex(s string) io.Reader {
return bytes.NewBuffer(data)
}
-func decompressHex(s string) (out []byte, err os.Error) {
+func decompressHex(s string) (out []byte, err error) {
r := NewReader(readerFromHex(s))
return ioutil.ReadAll(r)
}
diff --git a/libgo/go/compress/bzip2/huffman.go b/libgo/go/compress/bzip2/huffman.go
index dc05739..078c1cb 100644
--- a/libgo/go/compress/bzip2/huffman.go
+++ b/libgo/go/compress/bzip2/huffman.go
@@ -4,10 +4,7 @@
package bzip2
-import (
- "os"
- "sort"
-)
+import "sort"
// A huffmanTree is a binary tree which is navigated, bit-by-bit to reach a
// symbol.
@@ -63,7 +60,7 @@ func (t huffmanTree) Decode(br *bitReader) (v uint16) {
// newHuffmanTree builds a Huffman tree from a slice containing the code
// lengths of each symbol. The maximum code length is 32 bits.
-func newHuffmanTree(lengths []uint8) (huffmanTree, os.Error) {
+func newHuffmanTree(lengths []uint8) (huffmanTree, error) {
// There are many possible trees that assign the same code length to
// each symbol (consider reflecting a tree down the middle, for
// example). Since the code length assignments determine the
@@ -176,7 +173,7 @@ func (n huffmanCodes) Swap(i, j int) {
// buildHuffmanNode takes a slice of sorted huffmanCodes and builds a node in
// the Huffman tree at the given level. It returns the index of the newly
// constructed node.
-func buildHuffmanNode(t *huffmanTree, codes []huffmanCode, level uint32) (nodeIndex uint16, err os.Error) {
+func buildHuffmanNode(t *huffmanTree, codes []huffmanCode, level uint32) (nodeIndex uint16, err error) {
test := uint32(1) << (31 - level)
// We have to search the list of codes to find the divide between the left and right sides.
diff --git a/libgo/go/compress/flate/deflate.go b/libgo/go/compress/flate/deflate.go
index b1cee0b..1f659ba 100644
--- a/libgo/go/compress/flate/deflate.go
+++ b/libgo/go/compress/flate/deflate.go
@@ -7,7 +7,6 @@ package flate
import (
"io"
"math"
- "os"
)
const (
@@ -89,7 +88,7 @@ type compressor struct {
offset int
hash int
maxInsertIndex int
- err os.Error
+ err error
}
func (d *compressor) fillDeflate(b []byte) int {
@@ -123,7 +122,7 @@ func (d *compressor) fillDeflate(b []byte) int {
return n
}
-func (d *compressor) writeBlock(tokens []token, index int, eof bool) os.Error {
+func (d *compressor) writeBlock(tokens []token, index int, eof bool) error {
if index > 0 || eof {
var window []byte
if d.blockStart <= index {
@@ -194,7 +193,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
return
}
-func (d *compressor) writeStoredBlock(buf []byte) os.Error {
+func (d *compressor) writeStoredBlock(buf []byte) error {
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
return d.w.err
}
@@ -365,7 +364,7 @@ func (d *compressor) store() {
d.windowEnd = 0
}
-func (d *compressor) write(b []byte) (n int, err os.Error) {
+func (d *compressor) write(b []byte) (n int, err error) {
n = len(b)
b = b[d.fill(d, b):]
for len(b) > 0 {
@@ -375,7 +374,7 @@ func (d *compressor) write(b []byte) (n int, err os.Error) {
return n, d.err
}
-func (d *compressor) syncFlush() os.Error {
+func (d *compressor) syncFlush() error {
d.sync = true
d.step(d)
if d.err == nil {
@@ -387,7 +386,7 @@ func (d *compressor) syncFlush() os.Error {
return d.err
}
-func (d *compressor) init(w io.Writer, level int) (err os.Error) {
+func (d *compressor) init(w io.Writer, level int) (err error) {
d.w = newHuffmanBitWriter(w)
switch {
@@ -409,7 +408,7 @@ func (d *compressor) init(w io.Writer, level int) (err os.Error) {
return nil
}
-func (d *compressor) close() os.Error {
+func (d *compressor) close() error {
d.sync = true
d.step(d)
if d.err != nil {
@@ -455,7 +454,7 @@ type dictWriter struct {
enabled bool
}
-func (w *dictWriter) Write(b []byte) (n int, err os.Error) {
+func (w *dictWriter) Write(b []byte) (n int, err error) {
if w.enabled {
return w.w.Write(b)
}
@@ -470,7 +469,7 @@ type Writer struct {
// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
-func (w *Writer) Write(data []byte) (n int, err os.Error) {
+func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
@@ -481,13 +480,13 @@ func (w *Writer) Write(data []byte) (n int, err os.Error) {
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
-func (w *Writer) Flush() os.Error {
+func (w *Writer) Flush() error {
// For more about flushing:
// http://www.bolet.org/~pornin/deflate-flush.html
return w.d.syncFlush()
}
// Close flushes and closes the writer.
-func (w *Writer) Close() os.Error {
+func (w *Writer) Close() error {
return w.d.close()
}
diff --git a/libgo/go/compress/flate/deflate_test.go b/libgo/go/compress/flate/deflate_test.go
index 9308236..db2d71d 100644
--- a/libgo/go/compress/flate/deflate_test.go
+++ b/libgo/go/compress/flate/deflate_test.go
@@ -9,7 +9,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "os"
"sync"
"testing"
)
@@ -102,7 +101,7 @@ func newSyncBuffer() *syncBuffer {
return &syncBuffer{ready: make(chan bool, 1)}
}
-func (b *syncBuffer) Read(p []byte) (n int, err os.Error) {
+func (b *syncBuffer) Read(p []byte) (n int, err error) {
for {
b.mu.RLock()
n, err = b.buf.Read(p)
@@ -122,7 +121,7 @@ func (b *syncBuffer) signal() {
}
}
-func (b *syncBuffer) Write(p []byte) (n int, err os.Error) {
+func (b *syncBuffer) Write(p []byte) (n int, err error) {
n, err = b.buf.Write(p)
b.signal()
return
@@ -137,7 +136,7 @@ func (b *syncBuffer) ReadMode() {
b.signal()
}
-func (b *syncBuffer) Close() os.Error {
+func (b *syncBuffer) Close() error {
b.closed = true
b.signal()
return nil
@@ -204,7 +203,7 @@ func testSync(t *testing.T, level int, input []byte, name string) {
}
buf.ReadMode()
out := make([]byte, 10)
- if n, err := r.Read(out); n > 0 || err != os.EOF {
+ if n, err := r.Read(out); n > 0 || err != io.EOF {
t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n])
}
if buf.buf.Len() != 0 {
@@ -225,7 +224,7 @@ func testSync(t *testing.T, level int, input []byte, name string) {
}
}
-func testToFromWithLevel(t *testing.T, level int, input []byte, name string) os.Error {
+func testToFromWithLevel(t *testing.T, level int, input []byte, name string) error {
buffer := bytes.NewBuffer(nil)
w := NewWriter(buffer, level)
w.Write(input)
diff --git a/libgo/go/compress/flate/huffman_bit_writer.go b/libgo/go/compress/flate/huffman_bit_writer.go
index 3981df5..efd99c6 100644
--- a/libgo/go/compress/flate/huffman_bit_writer.go
+++ b/libgo/go/compress/flate/huffman_bit_writer.go
@@ -7,7 +7,6 @@ package flate
import (
"io"
"math"
- "os"
"strconv"
)
@@ -83,7 +82,7 @@ type huffmanBitWriter struct {
literalEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder
- err os.Error
+ err error
}
type WrongValueError struct {
@@ -106,7 +105,7 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
}
}
-func (err WrongValueError) String() string {
+func (err WrongValueError) Error() string {
return "huffmanBitWriter: " + err.name + " should belong to [" + strconv.Itoa64(int64(err.from)) + ";" +
strconv.Itoa64(int64(err.to)) + "] but actual value is " + strconv.Itoa64(int64(err.value))
}
diff --git a/libgo/go/compress/flate/inflate.go b/libgo/go/compress/flate/inflate.go
index 3845f12..3f0c948 100644
--- a/libgo/go/compress/flate/inflate.go
+++ b/libgo/go/compress/flate/inflate.go
@@ -10,7 +10,6 @@ package flate
import (
"bufio"
"io"
- "os"
"strconv"
)
@@ -25,33 +24,33 @@ const (
// A CorruptInputError reports the presence of corrupt input at a given offset.
type CorruptInputError int64
-func (e CorruptInputError) String() string {
+func (e CorruptInputError) Error() string {
return "flate: corrupt input before offset " + strconv.Itoa64(int64(e))
}
// An InternalError reports an error in the flate code itself.
type InternalError string
-func (e InternalError) String() string { return "flate: internal error: " + string(e) }
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
// A ReadError reports an error encountered while reading input.
type ReadError struct {
- Offset int64 // byte offset where error occurred
- Error os.Error // error returned by underlying Read
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Read
}
-func (e *ReadError) String() string {
- return "flate: read error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Error.String()
+func (e *ReadError) Error() string {
+ return "flate: read error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Err.Error()
}
// A WriteError reports an error encountered while writing output.
type WriteError struct {
- Offset int64 // byte offset where error occurred
- Error os.Error // error returned by underlying Write
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Write
}
-func (e *WriteError) String() string {
- return "flate: write error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Error.String()
+func (e *WriteError) Error() string {
+ return "flate: write error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Err.Error()
}
// Huffman decoder is based on
@@ -190,7 +189,7 @@ var fixedHuffmanDecoder = huffmanDecoder{
// the NewReader will introduce its own buffering.
type Reader interface {
io.Reader
- ReadByte() (c byte, err os.Error)
+ ReadByte() (c byte, err error)
}
// Decompress state.
@@ -224,7 +223,7 @@ type decompressor struct {
// and decompression state.
step func(*decompressor)
final bool
- err os.Error
+ err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
@@ -237,7 +236,7 @@ func (f *decompressor) nextBlock() {
f.flush((*decompressor).nextBlock)
return
}
- f.err = os.EOF
+ f.err = io.EOF
return
}
for f.nb < 1+2 {
@@ -272,7 +271,7 @@ func (f *decompressor) nextBlock() {
}
}
-func (f *decompressor) Read(b []byte) (int, os.Error) {
+func (f *decompressor) Read(b []byte) (int, error) {
for {
if len(f.toRead) > 0 {
n := copy(b, f.toRead)
@@ -287,8 +286,8 @@ func (f *decompressor) Read(b []byte) (int, os.Error) {
panic("unreachable")
}
-func (f *decompressor) Close() os.Error {
- if f.err == os.EOF {
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
return nil
}
return f.err
@@ -299,7 +298,7 @@ func (f *decompressor) Close() os.Error {
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
-func (f *decompressor) readHuffman() os.Error {
+func (f *decompressor) readHuffman() error {
// HLIT[5], HDIST[5], HCLEN[4].
for f.nb < 5+5+4 {
if err := f.moreBits(); err != nil {
@@ -625,10 +624,10 @@ func (f *decompressor) setDict(dict []byte) {
f.hw = f.hp
}
-func (f *decompressor) moreBits() os.Error {
+func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte()
if err != nil {
- if err == os.EOF {
+ if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
@@ -640,7 +639,7 @@ func (f *decompressor) moreBits() os.Error {
}
// Read the next Huffman-encoded symbol from f according to h.
-func (f *decompressor) huffSym(h *huffmanDecoder) (int, os.Error) {
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
for n := uint(h.min); n <= uint(h.max); n++ {
lim := h.limit[n]
if lim == -1 {
diff --git a/libgo/go/compress/gzip/gunzip.go b/libgo/go/compress/gzip/gunzip.go
index 6ac9293..a23e515 100644
--- a/libgo/go/compress/gzip/gunzip.go
+++ b/libgo/go/compress/gzip/gunzip.go
@@ -9,10 +9,10 @@ package gzip
import (
"bufio"
"compress/flate"
+ "errors"
"hash"
"hash/crc32"
"io"
- "os"
)
// BUG(nigeltao): Comments and Names don't properly map UTF-8 character codes outside of
@@ -36,8 +36,8 @@ func makeReader(r io.Reader) flate.Reader {
return bufio.NewReader(r)
}
-var HeaderError = os.NewError("invalid gzip header")
-var ChecksumError = os.NewError("gzip checksum error")
+var HeaderError = errors.New("invalid gzip header")
+var ChecksumError = errors.New("gzip checksum error")
// The gzip file stores a header giving metadata about the compressed file.
// That header is exposed as the fields of the Compressor and Decompressor structs.
@@ -71,13 +71,13 @@ type Decompressor struct {
size uint32
flg byte
buf [512]byte
- err os.Error
+ err error
}
// NewReader creates a new Decompressor reading the given reader.
// The implementation buffers input and may read more data than necessary from r.
// It is the caller's responsibility to call Close on the Decompressor when done.
-func NewReader(r io.Reader) (*Decompressor, os.Error) {
+func NewReader(r io.Reader) (*Decompressor, error) {
z := new(Decompressor)
z.r = makeReader(r)
z.digest = crc32.NewIEEE()
@@ -93,8 +93,8 @@ func get4(p []byte) uint32 {
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
}
-func (z *Decompressor) readString() (string, os.Error) {
- var err os.Error
+func (z *Decompressor) readString() (string, error) {
+ var err error
for i := 0; ; i++ {
if i >= len(z.buf) {
return "", HeaderError
@@ -112,7 +112,7 @@ func (z *Decompressor) readString() (string, os.Error) {
panic("not reached")
}
-func (z *Decompressor) read2() (uint32, os.Error) {
+func (z *Decompressor) read2() (uint32, error) {
_, err := io.ReadFull(z.r, z.buf[0:2])
if err != nil {
return 0, err
@@ -120,7 +120,7 @@ func (z *Decompressor) read2() (uint32, os.Error) {
return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil
}
-func (z *Decompressor) readHeader(save bool) os.Error {
+func (z *Decompressor) readHeader(save bool) error {
_, err := io.ReadFull(z.r, z.buf[0:10])
if err != nil {
return err
@@ -186,7 +186,7 @@ func (z *Decompressor) readHeader(save bool) os.Error {
return nil
}
-func (z *Decompressor) Read(p []byte) (n int, err os.Error) {
+func (z *Decompressor) Read(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
@@ -197,7 +197,7 @@ func (z *Decompressor) Read(p []byte) (n int, err os.Error) {
n, err = z.decompressor.Read(p)
z.digest.Write(p[0:n])
z.size += uint32(n)
- if n != 0 || err != os.EOF {
+ if n != 0 || err != io.EOF {
z.err = err
return
}
@@ -227,4 +227,4 @@ func (z *Decompressor) Read(p []byte) (n int, err os.Error) {
}
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
-func (z *Decompressor) Close() os.Error { return z.decompressor.Close() }
+func (z *Decompressor) Close() error { return z.decompressor.Close() }
diff --git a/libgo/go/compress/gzip/gunzip_test.go b/libgo/go/compress/gzip/gunzip_test.go
index 1c08c73..771b0b6 100644
--- a/libgo/go/compress/gzip/gunzip_test.go
+++ b/libgo/go/compress/gzip/gunzip_test.go
@@ -7,7 +7,6 @@ package gzip
import (
"bytes"
"io"
- "os"
"testing"
)
@@ -16,7 +15,7 @@ type gunzipTest struct {
desc string
raw string
gzip []byte
- err os.Error
+ err error
}
var gunzipTests = []gunzipTest{
diff --git a/libgo/go/compress/gzip/gzip.go b/libgo/go/compress/gzip/gzip.go
index 8860d10..94b0f1f 100644
--- a/libgo/go/compress/gzip/gzip.go
+++ b/libgo/go/compress/gzip/gzip.go
@@ -6,10 +6,10 @@ package gzip
import (
"compress/flate"
+ "errors"
"hash"
"hash/crc32"
"io"
- "os"
)
// These constants are copied from the flate package, so that code that imports
@@ -32,11 +32,11 @@ type Compressor struct {
size uint32
closed bool
buf [10]byte
- err os.Error
+ err error
}
// NewWriter calls NewWriterLevel with the default compression level.
-func NewWriter(w io.Writer) (*Compressor, os.Error) {
+func NewWriter(w io.Writer) (*Compressor, error) {
return NewWriterLevel(w, DefaultCompression)
}
@@ -47,7 +47,7 @@ func NewWriter(w io.Writer) (*Compressor, os.Error) {
// It is the caller's responsibility to call Close on the WriteCloser when done.
// level is the compression level, which can be DefaultCompression, NoCompression,
// or any integer value between BestSpeed and BestCompression (inclusive).
-func NewWriterLevel(w io.Writer, level int) (*Compressor, os.Error) {
+func NewWriterLevel(w io.Writer, level int) (*Compressor, error) {
z := new(Compressor)
z.OS = 255 // unknown
z.w = w
@@ -70,9 +70,9 @@ func put4(p []byte, v uint32) {
}
// writeBytes writes a length-prefixed byte slice to z.w.
-func (z *Compressor) writeBytes(b []byte) os.Error {
+func (z *Compressor) writeBytes(b []byte) error {
if len(b) > 0xffff {
- return os.NewError("gzip.Write: Extra data is too large")
+ return errors.New("gzip.Write: Extra data is too large")
}
put2(z.buf[0:2], uint16(len(b)))
_, err := z.w.Write(z.buf[0:2])
@@ -84,12 +84,12 @@ func (z *Compressor) writeBytes(b []byte) os.Error {
}
// writeString writes a string (in ISO 8859-1 (Latin-1) format) to z.w.
-func (z *Compressor) writeString(s string) os.Error {
+func (z *Compressor) writeString(s string) error {
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
// TODO(nigeltao): Convert from UTF-8 to ISO 8859-1 (Latin-1).
for _, v := range s {
if v == 0 || v > 0x7f {
- return os.NewError("gzip.Write: non-ASCII header string")
+ return errors.New("gzip.Write: non-ASCII header string")
}
}
_, err := io.WriteString(z.w, s)
@@ -102,7 +102,7 @@ func (z *Compressor) writeString(s string) os.Error {
return err
}
-func (z *Compressor) Write(p []byte) (int, os.Error) {
+func (z *Compressor) Write(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
@@ -162,7 +162,7 @@ func (z *Compressor) Write(p []byte) (int, os.Error) {
}
// Calling Close does not close the wrapped io.Writer originally passed to NewWriter.
-func (z *Compressor) Close() os.Error {
+func (z *Compressor) Close() error {
if z.err != nil {
return z.err
}
diff --git a/libgo/go/compress/lzw/reader.go b/libgo/go/compress/lzw/reader.go
index 21231c8..c787a95 100644
--- a/libgo/go/compress/lzw/reader.go
+++ b/libgo/go/compress/lzw/reader.go
@@ -16,6 +16,7 @@ package lzw
import (
"bufio"
+ "errors"
"fmt"
"io"
"os"
@@ -45,9 +46,9 @@ type decoder struct {
bits uint32
nBits uint
width uint
- read func(*decoder) (uint16, os.Error) // readLSB or readMSB
- litWidth int // width in bits of literal codes
- err os.Error
+ read func(*decoder) (uint16, error) // readLSB or readMSB
+ litWidth int // width in bits of literal codes
+ err error
// The first 1<<litWidth codes are literal codes.
// The next two codes mean clear and EOF.
@@ -78,7 +79,7 @@ type decoder struct {
}
// readLSB returns the next code for "Least Significant Bits first" data.
-func (d *decoder) readLSB() (uint16, os.Error) {
+func (d *decoder) readLSB() (uint16, error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
@@ -94,7 +95,7 @@ func (d *decoder) readLSB() (uint16, os.Error) {
}
// readMSB returns the next code for "Most Significant Bits first" data.
-func (d *decoder) readMSB() (uint16, os.Error) {
+func (d *decoder) readMSB() (uint16, error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
@@ -109,7 +110,7 @@ func (d *decoder) readMSB() (uint16, os.Error) {
return code, nil
}
-func (d *decoder) Read(b []byte) (int, os.Error) {
+func (d *decoder) Read(b []byte) (int, error) {
for {
if len(d.toRead) > 0 {
n := copy(b, d.toRead)
@@ -132,7 +133,7 @@ func (d *decoder) decode() {
for {
code, err := d.read(d)
if err != nil {
- if err == os.EOF {
+ if err == io.EOF {
err = io.ErrUnexpectedEOF
}
d.err = err
@@ -156,7 +157,7 @@ func (d *decoder) decode() {
continue
case code == d.eof:
d.flush()
- d.err = os.EOF
+ d.err = io.EOF
return
case code <= d.hi:
c, i := code, len(d.output)-1
@@ -186,7 +187,7 @@ func (d *decoder) decode() {
d.prefix[d.hi] = d.last
}
default:
- d.err = os.NewError("lzw: invalid code")
+ d.err = errors.New("lzw: invalid code")
return
}
d.last, d.hi = code, d.hi+1
@@ -211,7 +212,7 @@ func (d *decoder) flush() {
d.o = 0
}
-func (d *decoder) Close() os.Error {
+func (d *decoder) Close() error {
d.err = os.EINVAL // in case any Reads come along
return nil
}
@@ -230,7 +231,7 @@ func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
case MSB:
d.read = (*decoder).readMSB
default:
- d.err = os.NewError("lzw: unknown order")
+ d.err = errors.New("lzw: unknown order")
return d
}
if litWidth < 2 || 8 < litWidth {
diff --git a/libgo/go/compress/lzw/reader_test.go b/libgo/go/compress/lzw/reader_test.go
index f8042b0..0982157 100644
--- a/libgo/go/compress/lzw/reader_test.go
+++ b/libgo/go/compress/lzw/reader_test.go
@@ -8,7 +8,6 @@ import (
"bytes"
"io"
"io/ioutil"
- "os"
"runtime"
"strconv"
"strings"
@@ -19,7 +18,7 @@ type lzwTest struct {
desc string
raw string
compressed string
- err os.Error
+ err error
}
var lzwTests = []lzwTest{
diff --git a/libgo/go/compress/lzw/writer.go b/libgo/go/compress/lzw/writer.go
index 87143b7..3f380fa 100644
--- a/libgo/go/compress/lzw/writer.go
+++ b/libgo/go/compress/lzw/writer.go
@@ -6,6 +6,7 @@ package lzw
import (
"bufio"
+ "errors"
"fmt"
"io"
"os"
@@ -13,20 +14,20 @@ import (
// A writer is a buffered, flushable writer.
type writer interface {
- WriteByte(byte) os.Error
- Flush() os.Error
+ WriteByte(byte) error
+ Flush() error
}
// An errWriteCloser is an io.WriteCloser that always returns a given error.
type errWriteCloser struct {
- err os.Error
+ err error
}
-func (e *errWriteCloser) Write([]byte) (int, os.Error) {
+func (e *errWriteCloser) Write([]byte) (int, error) {
return 0, e.err
}
-func (e *errWriteCloser) Close() os.Error {
+func (e *errWriteCloser) Close() error {
return e.err
}
@@ -50,7 +51,7 @@ type encoder struct {
w writer
// write, bits, nBits and width are the state for converting a code stream
// into a byte stream.
- write func(*encoder, uint32) os.Error
+ write func(*encoder, uint32) error
bits uint32
nBits uint
width uint
@@ -64,7 +65,7 @@ type encoder struct {
savedCode uint32
// err is the first error encountered during writing. Closing the encoder
// will make any future Write calls return os.EINVAL.
- err os.Error
+ err error
// table is the hash table from 20-bit keys to 12-bit values. Each table
// entry contains key<<12|val and collisions resolve by linear probing.
// The keys consist of a 12-bit code prefix and an 8-bit byte suffix.
@@ -73,7 +74,7 @@ type encoder struct {
}
// writeLSB writes the code c for "Least Significant Bits first" data.
-func (e *encoder) writeLSB(c uint32) os.Error {
+func (e *encoder) writeLSB(c uint32) error {
e.bits |= c << e.nBits
e.nBits += e.width
for e.nBits >= 8 {
@@ -87,7 +88,7 @@ func (e *encoder) writeLSB(c uint32) os.Error {
}
// writeMSB writes the code c for "Most Significant Bits first" data.
-func (e *encoder) writeMSB(c uint32) os.Error {
+func (e *encoder) writeMSB(c uint32) error {
e.bits |= c << (32 - e.width - e.nBits)
e.nBits += e.width
for e.nBits >= 8 {
@@ -102,12 +103,12 @@ func (e *encoder) writeMSB(c uint32) os.Error {
// errOutOfCodes is an internal error that means that the encoder has run out
// of unused codes and a clear code needs to be sent next.
-var errOutOfCodes = os.NewError("lzw: out of codes")
+var errOutOfCodes = errors.New("lzw: out of codes")
// incHi increments e.hi and checks for both overflow and running out of
// unused codes. In the latter case, incHi sends a clear code, resets the
// encoder state and returns errOutOfCodes.
-func (e *encoder) incHi() os.Error {
+func (e *encoder) incHi() error {
e.hi++
if e.hi == e.overflow {
e.width++
@@ -130,7 +131,7 @@ func (e *encoder) incHi() os.Error {
}
// Write writes a compressed representation of p to e's underlying writer.
-func (e *encoder) Write(p []byte) (int, os.Error) {
+func (e *encoder) Write(p []byte) (int, error) {
if e.err != nil {
return 0, e.err
}
@@ -188,7 +189,7 @@ loop:
// Close closes the encoder, flushing any pending output. It does not close or
// flush e's underlying writer.
-func (e *encoder) Close() os.Error {
+func (e *encoder) Close() error {
if e.err != nil {
if e.err == os.EINVAL {
return nil
@@ -230,14 +231,14 @@ func (e *encoder) Close() os.Error {
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8.
func NewWriter(w io.Writer, order Order, litWidth int) io.WriteCloser {
- var write func(*encoder, uint32) os.Error
+ var write func(*encoder, uint32) error
switch order {
case LSB:
write = (*encoder).writeLSB
case MSB:
write = (*encoder).writeMSB
default:
- return &errWriteCloser{os.NewError("lzw: unknown order")}
+ return &errWriteCloser{errors.New("lzw: unknown order")}
}
if litWidth < 2 || 8 < litWidth {
return &errWriteCloser{fmt.Errorf("lzw: litWidth %d out of range", litWidth)}
diff --git a/libgo/go/compress/lzw/writer_test.go b/libgo/go/compress/lzw/writer_test.go
index 4c5e522..154cdf8 100644
--- a/libgo/go/compress/lzw/writer_test.go
+++ b/libgo/go/compress/lzw/writer_test.go
@@ -45,7 +45,7 @@ func testFile(t *testing.T, fn string, order Order, litWidth int) {
var b [4096]byte
for {
n, err0 := raw.Read(b[:])
- if err0 != nil && err0 != os.EOF {
+ if err0 != nil && err0 != io.EOF {
t.Errorf("%s (order=%d litWidth=%d): %v", fn, order, litWidth, err0)
return
}
@@ -58,7 +58,7 @@ func testFile(t *testing.T, fn string, order Order, litWidth int) {
t.Errorf("%s (order=%d litWidth=%d): %v", fn, order, litWidth, err1)
return
}
- if err0 == os.EOF {
+ if err0 == io.EOF {
break
}
}
diff --git a/libgo/go/compress/zlib/reader.go b/libgo/go/compress/zlib/reader.go
index 78dabdf..50a1e6c 100644
--- a/libgo/go/compress/zlib/reader.go
+++ b/libgo/go/compress/zlib/reader.go
@@ -26,36 +26,36 @@ package zlib
import (
"bufio"
"compress/flate"
+ "errors"
"hash"
"hash/adler32"
"io"
- "os"
)
const zlibDeflate = 8
-var ChecksumError = os.NewError("zlib checksum error")
-var HeaderError = os.NewError("invalid zlib header")
-var DictionaryError = os.NewError("invalid zlib dictionary")
+var ChecksumError = errors.New("zlib checksum error")
+var HeaderError = errors.New("invalid zlib header")
+var DictionaryError = errors.New("invalid zlib dictionary")
type reader struct {
r flate.Reader
decompressor io.ReadCloser
digest hash.Hash32
- err os.Error
+ err error
scratch [4]byte
}
// NewReader creates a new io.ReadCloser that satisfies reads by decompressing data read from r.
// The implementation buffers input and may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when done.
-func NewReader(r io.Reader) (io.ReadCloser, os.Error) {
+func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderDict(r, nil)
}
// NewReaderDict is like NewReader but uses a preset dictionary.
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
-func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, os.Error) {
+func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
z := new(reader)
if fr, ok := r.(flate.Reader); ok {
z.r = fr
@@ -87,7 +87,7 @@ func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, os.Error) {
return z, nil
}
-func (z *reader) Read(p []byte) (n int, err os.Error) {
+func (z *reader) Read(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
@@ -97,7 +97,7 @@ func (z *reader) Read(p []byte) (n int, err os.Error) {
n, err = z.decompressor.Read(p)
z.digest.Write(p[0:n])
- if n != 0 || err != os.EOF {
+ if n != 0 || err != io.EOF {
z.err = err
return
}
@@ -117,7 +117,7 @@ func (z *reader) Read(p []byte) (n int, err os.Error) {
}
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
-func (z *reader) Close() os.Error {
+func (z *reader) Close() error {
if z.err != nil {
return z.err
}
diff --git a/libgo/go/compress/zlib/reader_test.go b/libgo/go/compress/zlib/reader_test.go
index 195db44..d8f9f21 100644
--- a/libgo/go/compress/zlib/reader_test.go
+++ b/libgo/go/compress/zlib/reader_test.go
@@ -7,7 +7,6 @@ package zlib
import (
"bytes"
"io"
- "os"
"testing"
)
@@ -16,7 +15,7 @@ type zlibTest struct {
raw string
compressed []byte
dict []byte
- err os.Error
+ err error
}
// Compare-to-golden test data was generated by the ZLIB example program at
diff --git a/libgo/go/compress/zlib/writer.go b/libgo/go/compress/zlib/writer.go
index 8f86e9c..bbff637 100644
--- a/libgo/go/compress/zlib/writer.go
+++ b/libgo/go/compress/zlib/writer.go
@@ -6,10 +6,10 @@ package zlib
import (
"compress/flate"
+ "errors"
"hash"
"hash/adler32"
"io"
- "os"
)
// These constants are copied from the flate package, so that code that imports
@@ -27,17 +27,17 @@ type Writer struct {
w io.Writer
compressor *flate.Writer
digest hash.Hash32
- err os.Error
+ err error
scratch [4]byte
}
// NewWriter calls NewWriterLevel with the default compression level.
-func NewWriter(w io.Writer) (*Writer, os.Error) {
+func NewWriter(w io.Writer) (*Writer, error) {
return NewWriterLevel(w, DefaultCompression)
}
// NewWriterLevel calls NewWriterDict with no dictionary.
-func NewWriterLevel(w io.Writer, level int) (*Writer, os.Error) {
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterDict(w, level, nil)
}
@@ -46,7 +46,7 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, os.Error) {
// level is the compression level, which can be DefaultCompression, NoCompression,
// or any integer value between BestSpeed and BestCompression (inclusive).
// dict is the preset dictionary to compress with, or nil to use no dictionary.
-func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, os.Error) {
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
z := new(Writer)
// ZLIB has a two-byte header (as documented in RFC 1950).
// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
@@ -66,7 +66,7 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, os.Error) {
case 7, 8, 9:
z.scratch[1] = 3 << 6
default:
- return nil, os.NewError("level out of range")
+ return nil, errors.New("level out of range")
}
if dict != nil {
z.scratch[1] |= 1 << 5
@@ -94,7 +94,7 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, os.Error) {
return z, nil
}
-func (z *Writer) Write(p []byte) (n int, err os.Error) {
+func (z *Writer) Write(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
@@ -111,7 +111,7 @@ func (z *Writer) Write(p []byte) (n int, err os.Error) {
}
// Flush flushes the underlying compressor.
-func (z *Writer) Flush() os.Error {
+func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
@@ -120,7 +120,7 @@ func (z *Writer) Flush() os.Error {
}
// Calling Close does not close the wrapped io.Writer originally passed to NewWriter.
-func (z *Writer) Close() os.Error {
+func (z *Writer) Close() error {
if z.err != nil {
return z.err
}