diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2016-07-22 18:15:38 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2016-07-22 18:15:38 +0000 |
commit | 22b955cca564a9a3a5b8c9d9dd1e295b7943c128 (patch) | |
tree | abdbd898676e1f853fca2d7e031d105d7ebcf676 /libgo/go/bytes | |
parent | 9d04a3af4c6491536badf6bde9707c907e4d196b (diff) | |
download | gcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.zip gcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.tar.gz gcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.tar.bz2 |
libgo: update to go1.7rc3
Reviewed-on: https://go-review.googlesource.com/25150
From-SVN: r238662
Diffstat (limited to 'libgo/go/bytes')
-rw-r--r-- | libgo/go/bytes/buffer.go | 18 | ||||
-rw-r--r-- | libgo/go/bytes/bytes.go | 28 | ||||
-rw-r--r-- | libgo/go/bytes/bytes_decl.go | 2 | ||||
-rw-r--r-- | libgo/go/bytes/bytes_test.go | 427 | ||||
-rw-r--r-- | libgo/go/bytes/compare_test.go | 2 | ||||
-rw-r--r-- | libgo/go/bytes/equal_test.go | 4 | ||||
-rw-r--r-- | libgo/go/bytes/reader.go | 20 | ||||
-rw-r--r-- | libgo/go/bytes/reader_test.go | 50 |
8 files changed, 310 insertions, 241 deletions
diff --git a/libgo/go/bytes/buffer.go b/libgo/go/bytes/buffer.go index ddaba3b..9154a1b 100644 --- a/libgo/go/bytes/buffer.go +++ b/libgo/go/bytes/buffer.go @@ -17,8 +17,8 @@ import ( type Buffer struct { buf []byte // contents are the bytes buf[off : len(buf)] off int // read at &buf[off], write at &buf[len(buf)] - runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. + runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each call to WriteRune + bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation. lastRead readOp // last read operation, so that Unread* can work correctly. } @@ -44,7 +44,7 @@ var ErrTooLarge = errors.New("bytes.Buffer: too large") func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } // String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "<nil>". +// as a string. If the Buffer is a nil pointer, it returns "<nil>". func (b *Buffer) String() string { if b == nil { // Special case, useful in debugging. @@ -145,7 +145,7 @@ func (b *Buffer) WriteString(s string) (n int, err error) { } // MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond // what is required to hold the contents of r, ReadFrom will not grow the // underlying buffer. const MinRead = 512 @@ -252,7 +252,7 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) { } // Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the +// is drained. The return value n is the number of bytes read. If the // buffer has no data to return, err is io.EOF (unless len(p) is zero); // otherwise it is nil. func (b *Buffer) Read(p []byte) (n int, err error) { @@ -293,14 +293,14 @@ func (b *Buffer) Next(n int) []byte { // ReadByte reads and returns the next byte from the buffer. // If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { +func (b *Buffer) ReadByte() (byte, error) { b.lastRead = opInvalid if b.off >= len(b.buf) { // Buffer is empty, reset to recover space. b.Truncate(0) return 0, io.EOF } - c = b.buf[b.off] + c := b.buf[b.off] b.off++ b.lastRead = opRead return c, nil @@ -347,7 +347,7 @@ func (b *Buffer) UnreadRune() error { } // UnreadByte unreads the last byte returned by the most recent -// read operation. If write has happened since the last read, UnreadByte +// read operation. If write has happened since the last read, UnreadByte // returns an error. func (b *Buffer) UnreadByte() error { if b.lastRead != opReadRune && b.lastRead != opRead { @@ -400,7 +400,7 @@ func (b *Buffer) ReadString(delim byte) (line string, err error) { } // NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It +// contents. It is intended to prepare a Buffer to read existing data. It // can also be used to size the internal buffer for writing. To do that, // buf should have the desired capacity but a length of zero. // diff --git a/libgo/go/bytes/bytes.go b/libgo/go/bytes/bytes.go index b868240..305c85d 100644 --- a/libgo/go/bytes/bytes.go +++ b/libgo/go/bytes/bytes.go @@ -83,6 +83,16 @@ func Contains(b, subslice []byte) bool { return Index(b, subslice) != -1 } +// ContainsAny reports whether any of the UTF-8-encoded Unicode code points in chars are within b. +func ContainsAny(b []byte, chars string) bool { + return IndexAny(b, chars) >= 0 +} + +// ContainsRune reports whether the Unicode code point r is within b. +func ContainsRune(b []byte, r rune) bool { + return IndexRune(b, r) >= 0 +} + // Index returns the index of the first instance of sep in s, or -1 if sep is not present in s. func Index(s, sep []byte) int { n := len(sep) @@ -164,7 +174,7 @@ func IndexRune(s []byte, r rune) int { // IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points. // It returns the byte index of the first occurrence in s of any of the Unicode -// code points in chars. It returns -1 if chars is empty or if there is no code +// code points in chars. It returns -1 if chars is empty or if there is no code // point in common. func IndexAny(s []byte, chars string) int { if len(chars) > 0 { @@ -188,8 +198,8 @@ func IndexAny(s []byte, chars string) int { } // LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code -// points. It returns the byte index of the last occurrence in s of any of -// the Unicode code points in chars. It returns -1 if chars is empty or if +// points. It returns the byte index of the last occurrence in s of any of +// the Unicode code points in chars. It returns -1 if chars is empty or if // there is no code point in common. func LastIndexAny(s []byte, chars string) int { if len(chars) > 0 { @@ -276,7 +286,7 @@ func Fields(s []byte) [][]byte { // FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points. // It splits the slice s at each run of code points c satisfying f(c) and -// returns a slice of subslices of s. If all code points in s satisfy f(c), or +// returns a slice of subslices of s. If all code points in s satisfy f(c), or // len(s) == 0, an empty slice is returned. // FieldsFunc makes no guarantees about the order in which it calls f(c). // If f does not return consistent results for a given c, FieldsFunc may crash. @@ -352,12 +362,12 @@ func HasSuffix(s, suffix []byte) bool { // Map returns a copy of the byte slice s with all its characters modified // according to the mapping function. If mapping returns a negative value, the character is -// dropped from the string with no replacement. The characters in s and the +// dropped from the string with no replacement. The characters in s and the // output are interpreted as UTF-8-encoded Unicode code points. func Map(mapping func(r rune) rune, s []byte) []byte { // In the worst case, the slice can grow when mapped, making - // things unpleasant. But it's so rare we barge in assuming it's - // fine. It could also shrink but that falls out naturally. + // things unpleasant. But it's so rare we barge in assuming it's + // fine. It could also shrink but that falls out naturally. maxbytes := len(s) // length of b nbytes := 0 // number of bytes encoded in b b := make([]byte, maxbytes) @@ -697,7 +707,7 @@ func EqualFold(s, t []byte) bool { return false } - // General case. SimpleFold(x) returns the next equivalent rune > x + // General case. SimpleFold(x) returns the next equivalent rune > x // or wraps around to smaller values. r := unicode.SimpleFold(sr) for r != sr && r < tr { @@ -709,6 +719,6 @@ func EqualFold(s, t []byte) bool { return false } - // One string is empty. Are both? + // One string is empty. Are both? return len(s) == len(t) } diff --git a/libgo/go/bytes/bytes_decl.go b/libgo/go/bytes/bytes_decl.go index b453f21..df0614f 100644 --- a/libgo/go/bytes/bytes_decl.go +++ b/libgo/go/bytes/bytes_decl.go @@ -1,4 +1,4 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/bytes/bytes_test.go b/libgo/go/bytes/bytes_test.go index 8df62fc..c48f662 100644 --- a/libgo/go/bytes/bytes_test.go +++ b/libgo/go/bytes/bytes_test.go @@ -6,6 +6,7 @@ package bytes_test import ( . "bytes" + "fmt" "math/rand" "reflect" "testing" @@ -47,32 +48,6 @@ type BinOpTest struct { i int } -var equalTests = []struct { - a, b []byte - i int -}{ - {[]byte(""), []byte(""), 0}, - {[]byte("a"), []byte(""), 1}, - {[]byte(""), []byte("a"), -1}, - {[]byte("abc"), []byte("abc"), 0}, - {[]byte("ab"), []byte("abc"), -1}, - {[]byte("abc"), []byte("ab"), 1}, - {[]byte("x"), []byte("ab"), 1}, - {[]byte("ab"), []byte("x"), -1}, - {[]byte("x"), []byte("a"), 1}, - {[]byte("b"), []byte("x"), -1}, - // test runtime·memeq's chunked implementation - {[]byte("abcdefgh"), []byte("abcdefgh"), 0}, - {[]byte("abcdefghi"), []byte("abcdefghi"), 0}, - {[]byte("abcdefghi"), []byte("abcdefghj"), -1}, - // nil tests - {nil, nil, 0}, - {[]byte(""), nil, 0}, - {nil, []byte(""), 0}, - {[]byte("a"), nil, 1}, - {nil, []byte("a"), -1}, -} - func TestEqual(t *testing.T) { for _, tt := range compareTests { eql := Equal(tt.a, tt.b) @@ -113,7 +88,7 @@ func TestEqualExhaustive(t *testing.T) { } } -// make sure Equal returns false for minimally different strings. The data +// make sure Equal returns false for minimally different strings. The data // is all zeros except for a single one in one location. func TestNotEqual(t *testing.T) { var size = 128 @@ -335,6 +310,41 @@ func TestIndexByteBig(t *testing.T) { } } +// test a small index across all page offsets +func TestIndexByteSmall(t *testing.T) { + b := make([]byte, 5015) // bigger than a page + // Make sure we find the correct byte even when straddling a page. + for i := 0; i <= len(b)-15; i++ { + for j := 0; j < 15; j++ { + b[i+j] = byte(100 + j) + } + for j := 0; j < 15; j++ { + p := IndexByte(b[i:i+15], byte(100+j)) + if p != j { + t.Errorf("IndexByte(%q, %d) = %d", b[i:i+15], 100+j, p) + } + } + for j := 0; j < 15; j++ { + b[i+j] = 0 + } + } + // Make sure matches outside the slice never trigger. + for i := 0; i <= len(b)-15; i++ { + for j := 0; j < 15; j++ { + b[i+j] = 1 + } + for j := 0; j < 15; j++ { + p := IndexByte(b[i:i+15], byte(0)) + if p != -1 { + t.Errorf("IndexByte(%q, %d) = %d", b[i:i+15], 0, p) + } + } + for j := 0; j < 15; j++ { + b[i+j] = 0 + } + } +} + func TestIndexRune(t *testing.T) { for _, tt := range indexRuneTests { a := []byte(tt.a) @@ -348,165 +358,152 @@ func TestIndexRune(t *testing.T) { var bmbuf []byte -func BenchmarkIndexByte32(b *testing.B) { bmIndexByte(b, IndexByte, 32) } -func BenchmarkIndexByte4K(b *testing.B) { bmIndexByte(b, IndexByte, 4<<10) } -func BenchmarkIndexByte4M(b *testing.B) { bmIndexByte(b, IndexByte, 4<<20) } -func BenchmarkIndexByte64M(b *testing.B) { bmIndexByte(b, IndexByte, 64<<20) } -func BenchmarkIndexBytePortable32(b *testing.B) { bmIndexByte(b, IndexBytePortable, 32) } -func BenchmarkIndexBytePortable4K(b *testing.B) { bmIndexByte(b, IndexBytePortable, 4<<10) } -func BenchmarkIndexBytePortable4M(b *testing.B) { bmIndexByte(b, IndexBytePortable, 4<<20) } -func BenchmarkIndexBytePortable64M(b *testing.B) { bmIndexByte(b, IndexBytePortable, 64<<20) } - -func bmIndexByte(b *testing.B, index func([]byte, byte) int, n int) { - if len(bmbuf) < n { - bmbuf = make([]byte, n) - } - b.SetBytes(int64(n)) - buf := bmbuf[0:n] - buf[n-1] = 'x' - for i := 0; i < b.N; i++ { - j := index(buf, 'x') - if j != n-1 { - b.Fatal("bad index", j) - } +func valName(x int) string { + if s := x >> 20; s<<20 == x { + return fmt.Sprintf("%dM", s) + } + if s := x >> 10; s<<10 == x { + return fmt.Sprintf("%dK", s) } - buf[n-1] = '\x00' + return fmt.Sprint(x) } -func BenchmarkEqual0(b *testing.B) { - var buf [4]byte - buf1 := buf[0:0] - buf2 := buf[1:1] - for i := 0; i < b.N; i++ { - eq := Equal(buf1, buf2) - if !eq { - b.Fatal("bad equal") - } - } -} - -func BenchmarkEqual1(b *testing.B) { bmEqual(b, Equal, 1) } -func BenchmarkEqual6(b *testing.B) { bmEqual(b, Equal, 6) } -func BenchmarkEqual9(b *testing.B) { bmEqual(b, Equal, 9) } -func BenchmarkEqual15(b *testing.B) { bmEqual(b, Equal, 15) } -func BenchmarkEqual16(b *testing.B) { bmEqual(b, Equal, 16) } -func BenchmarkEqual20(b *testing.B) { bmEqual(b, Equal, 20) } -func BenchmarkEqual32(b *testing.B) { bmEqual(b, Equal, 32) } -func BenchmarkEqual4K(b *testing.B) { bmEqual(b, Equal, 4<<10) } -func BenchmarkEqual4M(b *testing.B) { bmEqual(b, Equal, 4<<20) } -func BenchmarkEqual64M(b *testing.B) { bmEqual(b, Equal, 64<<20) } -func BenchmarkEqualPort1(b *testing.B) { bmEqual(b, EqualPortable, 1) } -func BenchmarkEqualPort6(b *testing.B) { bmEqual(b, EqualPortable, 6) } -func BenchmarkEqualPort32(b *testing.B) { bmEqual(b, EqualPortable, 32) } -func BenchmarkEqualPort4K(b *testing.B) { bmEqual(b, EqualPortable, 4<<10) } -func BenchmarkEqualPortable4M(b *testing.B) { bmEqual(b, EqualPortable, 4<<20) } -func BenchmarkEqualPortable64M(b *testing.B) { bmEqual(b, EqualPortable, 64<<20) } - -func bmEqual(b *testing.B, equal func([]byte, []byte) bool, n int) { - if len(bmbuf) < 2*n { - bmbuf = make([]byte, 2*n) - } - b.SetBytes(int64(n)) - buf1 := bmbuf[0:n] - buf2 := bmbuf[n : 2*n] - buf1[n-1] = 'x' - buf2[n-1] = 'x' - for i := 0; i < b.N; i++ { - eq := equal(buf1, buf2) - if !eq { - b.Fatal("bad equal") - } +func benchBytes(b *testing.B, sizes []int, f func(b *testing.B, n int)) { + for _, n := range sizes { + b.Run(valName(n), func(b *testing.B) { + if len(bmbuf) < n { + bmbuf = make([]byte, n) + } + b.SetBytes(int64(n)) + f(b, n) + }) } - buf1[n-1] = '\x00' - buf2[n-1] = '\x00' } -func BenchmarkIndex32(b *testing.B) { bmIndex(b, Index, 32) } -func BenchmarkIndex4K(b *testing.B) { bmIndex(b, Index, 4<<10) } -func BenchmarkIndex4M(b *testing.B) { bmIndex(b, Index, 4<<20) } -func BenchmarkIndex64M(b *testing.B) { bmIndex(b, Index, 64<<20) } +var indexSizes = []int{10, 32, 4 << 10, 4 << 20, 64 << 20} -func bmIndex(b *testing.B, index func([]byte, []byte) int, n int) { - if len(bmbuf) < n { - bmbuf = make([]byte, n) - } - b.SetBytes(int64(n)) - buf := bmbuf[0:n] - buf[n-1] = 'x' - for i := 0; i < b.N; i++ { - j := index(buf, buf[n-7:]) - if j != n-7 { - b.Fatal("bad index", j) - } - } - buf[n-1] = '\x00' +func BenchmarkIndexByte(b *testing.B) { + benchBytes(b, indexSizes, bmIndexByte(IndexByte)) } -func BenchmarkIndexEasy32(b *testing.B) { bmIndexEasy(b, Index, 32) } -func BenchmarkIndexEasy4K(b *testing.B) { bmIndexEasy(b, Index, 4<<10) } -func BenchmarkIndexEasy4M(b *testing.B) { bmIndexEasy(b, Index, 4<<20) } -func BenchmarkIndexEasy64M(b *testing.B) { bmIndexEasy(b, Index, 64<<20) } +func BenchmarkIndexBytePortable(b *testing.B) { + benchBytes(b, indexSizes, bmIndexByte(IndexBytePortable)) +} -func bmIndexEasy(b *testing.B, index func([]byte, []byte) int, n int) { - if len(bmbuf) < n { - bmbuf = make([]byte, n) - } - b.SetBytes(int64(n)) - buf := bmbuf[0:n] - buf[n-1] = 'x' - buf[n-7] = 'x' - for i := 0; i < b.N; i++ { - j := index(buf, buf[n-7:]) - if j != n-7 { - b.Fatal("bad index", j) +func bmIndexByte(index func([]byte, byte) int) func(b *testing.B, n int) { + return func(b *testing.B, n int) { + buf := bmbuf[0:n] + buf[n-1] = 'x' + for i := 0; i < b.N; i++ { + j := index(buf, 'x') + if j != n-1 { + b.Fatal("bad index", j) + } } + buf[n-1] = '\x00' } - buf[n-1] = '\x00' - buf[n-7] = '\x00' } -func BenchmarkCount32(b *testing.B) { bmCount(b, Count, 32) } -func BenchmarkCount4K(b *testing.B) { bmCount(b, Count, 4<<10) } -func BenchmarkCount4M(b *testing.B) { bmCount(b, Count, 4<<20) } -func BenchmarkCount64M(b *testing.B) { bmCount(b, Count, 64<<20) } - -func bmCount(b *testing.B, count func([]byte, []byte) int, n int) { - if len(bmbuf) < n { - bmbuf = make([]byte, n) - } - b.SetBytes(int64(n)) - buf := bmbuf[0:n] - buf[n-1] = 'x' - for i := 0; i < b.N; i++ { - j := count(buf, buf[n-7:]) - if j != 1 { - b.Fatal("bad count", j) +func BenchmarkEqual(b *testing.B) { + b.Run("0", func(b *testing.B) { + var buf [4]byte + buf1 := buf[0:0] + buf2 := buf[1:1] + for i := 0; i < b.N; i++ { + eq := Equal(buf1, buf2) + if !eq { + b.Fatal("bad equal") + } } - } - buf[n-1] = '\x00' + }) + + sizes := []int{1, 6, 9, 15, 16, 20, 32, 4 << 10, 4 << 20, 64 << 20} + benchBytes(b, sizes, bmEqual(Equal)) } -func BenchmarkCountEasy32(b *testing.B) { bmCountEasy(b, Count, 32) } -func BenchmarkCountEasy4K(b *testing.B) { bmCountEasy(b, Count, 4<<10) } -func BenchmarkCountEasy4M(b *testing.B) { bmCountEasy(b, Count, 4<<20) } -func BenchmarkCountEasy64M(b *testing.B) { bmCountEasy(b, Count, 64<<20) } +func BenchmarkEqualPort(b *testing.B) { + sizes := []int{1, 6, 32, 4 << 10, 4 << 20, 64 << 20} + benchBytes(b, sizes, bmEqual(EqualPortable)) +} -func bmCountEasy(b *testing.B, count func([]byte, []byte) int, n int) { - if len(bmbuf) < n { - bmbuf = make([]byte, n) - } - b.SetBytes(int64(n)) - buf := bmbuf[0:n] - buf[n-1] = 'x' - buf[n-7] = 'x' - for i := 0; i < b.N; i++ { - j := count(buf, buf[n-7:]) - if j != 1 { - b.Fatal("bad count", j) +func bmEqual(equal func([]byte, []byte) bool) func(b *testing.B, n int) { + return func(b *testing.B, n int) { + if len(bmbuf) < 2*n { + bmbuf = make([]byte, 2*n) + } + buf1 := bmbuf[0:n] + buf2 := bmbuf[n : 2*n] + buf1[n-1] = 'x' + buf2[n-1] = 'x' + for i := 0; i < b.N; i++ { + eq := equal(buf1, buf2) + if !eq { + b.Fatal("bad equal") + } } + buf1[n-1] = '\x00' + buf2[n-1] = '\x00' } - buf[n-1] = '\x00' - buf[n-7] = '\x00' +} + +func BenchmarkIndex(b *testing.B) { + benchBytes(b, indexSizes, func(b *testing.B, n int) { + buf := bmbuf[0:n] + buf[n-1] = 'x' + for i := 0; i < b.N; i++ { + j := Index(buf, buf[n-7:]) + if j != n-7 { + b.Fatal("bad index", j) + } + } + buf[n-1] = '\x00' + }) +} + +func BenchmarkIndexEasy(b *testing.B) { + benchBytes(b, indexSizes, func(b *testing.B, n int) { + buf := bmbuf[0:n] + buf[n-1] = 'x' + buf[n-7] = 'x' + for i := 0; i < b.N; i++ { + j := Index(buf, buf[n-7:]) + if j != n-7 { + b.Fatal("bad index", j) + } + } + buf[n-1] = '\x00' + buf[n-7] = '\x00' + }) +} + +func BenchmarkCount(b *testing.B) { + benchBytes(b, indexSizes, func(b *testing.B, n int) { + buf := bmbuf[0:n] + buf[n-1] = 'x' + for i := 0; i < b.N; i++ { + j := Count(buf, buf[n-7:]) + if j != 1 { + b.Fatal("bad count", j) + } + } + buf[n-1] = '\x00' + }) +} + +func BenchmarkCountEasy(b *testing.B) { + benchBytes(b, indexSizes, func(b *testing.B, n int) { + buf := bmbuf[0:n] + buf[n-1] = 'x' + buf[n-7] = 'x' + for i := 0; i < b.N; i++ { + j := Count(buf, buf[n-7:]) + if j != 1 { + b.Fatal("bad count", j) + } + } + buf[n-1] = '\x00' + buf[n-7] = '\x00' + }) } type ExplodeTest struct { @@ -760,7 +757,7 @@ func TestMap(t *testing.T) { // Run a couple of awful growth/shrinkage tests a := tenRunes('a') - // 1. Grow. This triggers two reallocations in Map. + // 1. Grow. This triggers two reallocations in Map. maxRune := func(r rune) rune { return unicode.MaxRune } m := Map(maxRune, []byte(a)) expect := tenRunes(unicode.MaxRune) @@ -1207,6 +1204,57 @@ func TestContains(t *testing.T) { } } +var ContainsAnyTests = []struct { + b []byte + substr string + expected bool +}{ + {[]byte(""), "", false}, + {[]byte(""), "a", false}, + {[]byte(""), "abc", false}, + {[]byte("a"), "", false}, + {[]byte("a"), "a", true}, + {[]byte("aaa"), "a", true}, + {[]byte("abc"), "xyz", false}, + {[]byte("abc"), "xcz", true}, + {[]byte("a☺b☻c☹d"), "uvw☻xyz", true}, + {[]byte("aRegExp*"), ".(|)*+?^$[]", true}, + {[]byte(dots + dots + dots), " ", false}, +} + +func TestContainsAny(t *testing.T) { + for _, ct := range ContainsAnyTests { + if ContainsAny(ct.b, ct.substr) != ct.expected { + t.Errorf("ContainsAny(%s, %s) = %v, want %v", + ct.b, ct.substr, !ct.expected, ct.expected) + } + } +} + +var ContainsRuneTests = []struct { + b []byte + r rune + expected bool +}{ + {[]byte(""), 'a', false}, + {[]byte("a"), 'a', true}, + {[]byte("aaa"), 'a', true}, + {[]byte("abc"), 'y', false}, + {[]byte("abc"), 'c', true}, + {[]byte("a☺b☻c☹d"), 'x', false}, + {[]byte("a☺b☻c☹d"), '☻', true}, + {[]byte("aRegExp*"), '*', true}, +} + +func TestContainsRune(t *testing.T) { + for _, ct := range ContainsRuneTests { + if ContainsRune(ct.b, ct.r) != ct.expected { + t.Errorf("ContainsRune(%q, %q) = %v, want %v", + ct.b, ct.r, !ct.expected, ct.expected) + } + } +} + var makeFieldsInput = func() []byte { x := make([]byte, 1<<20) // Input is ~10% space, ~10% 2-byte UTF-8, rest ASCII non-space. @@ -1256,33 +1304,24 @@ func BenchmarkRepeat(b *testing.B) { } } -func benchmarkBytesCompare(b *testing.B, n int) { - var x = make([]byte, n) - var y = make([]byte, n) +func BenchmarkBytesCompare(b *testing.B) { + for n := 1; n <= 2048; n <<= 1 { + b.Run(fmt.Sprint(n), func(b *testing.B) { + var x = make([]byte, n) + var y = make([]byte, n) - for i := 0; i < n; i++ { - x[i] = 'a' - } + for i := 0; i < n; i++ { + x[i] = 'a' + } - for i := 0; i < n; i++ { - y[i] = 'a' - } + for i := 0; i < n; i++ { + y[i] = 'a' + } - b.ResetTimer() - for i := 0; i < b.N; i++ { - Compare(x, y) - } -} - -func BenchmarkBytesCompare1(b *testing.B) { benchmarkBytesCompare(b, 1) } -func BenchmarkBytesCompare2(b *testing.B) { benchmarkBytesCompare(b, 2) } -func BenchmarkBytesCompare4(b *testing.B) { benchmarkBytesCompare(b, 4) } -func BenchmarkBytesCompare8(b *testing.B) { benchmarkBytesCompare(b, 8) } -func BenchmarkBytesCompare16(b *testing.B) { benchmarkBytesCompare(b, 16) } -func BenchmarkBytesCompare32(b *testing.B) { benchmarkBytesCompare(b, 32) } -func BenchmarkBytesCompare64(b *testing.B) { benchmarkBytesCompare(b, 64) } -func BenchmarkBytesCompare128(b *testing.B) { benchmarkBytesCompare(b, 128) } -func BenchmarkBytesCompare256(b *testing.B) { benchmarkBytesCompare(b, 256) } -func BenchmarkBytesCompare512(b *testing.B) { benchmarkBytesCompare(b, 512) } -func BenchmarkBytesCompare1024(b *testing.B) { benchmarkBytesCompare(b, 1024) } -func BenchmarkBytesCompare2048(b *testing.B) { benchmarkBytesCompare(b, 2048) } + b.ResetTimer() + for i := 0; i < b.N; i++ { + Compare(x, y) + } + }) + } +} diff --git a/libgo/go/bytes/compare_test.go b/libgo/go/bytes/compare_test.go index f2d81d5..35088a1 100644 --- a/libgo/go/bytes/compare_test.go +++ b/libgo/go/bytes/compare_test.go @@ -62,7 +62,7 @@ func TestCompareBytes(t *testing.T) { a := make([]byte, n+1) b := make([]byte, n+1) for len := 0; len < 128; len++ { - // randomish but deterministic data. No 0 or 255. + // randomish but deterministic data. No 0 or 255. for i := 0; i < len; i++ { a[i] = byte(1 + 31*i%254) b[i] = byte(1 + 31*i%254) diff --git a/libgo/go/bytes/equal_test.go b/libgo/go/bytes/equal_test.go index 1bf19a7..9fdead8 100644 --- a/libgo/go/bytes/equal_test.go +++ b/libgo/go/bytes/equal_test.go @@ -14,11 +14,11 @@ import ( ) // This file tests the situation where memeq is checking -// data very near to a page boundary. We want to make sure +// data very near to a page boundary. We want to make sure // equal does not read across the boundary and cause a page // fault where it shouldn't. -// This test runs only on linux. The code being tested is +// This test runs only on linux. The code being tested is // not OS-specific, so it does not need to be tested on all // operating systems. diff --git a/libgo/go/bytes/reader.go b/libgo/go/bytes/reader.go index b89d154..28cfc7a 100644 --- a/libgo/go/bytes/reader.go +++ b/libgo/go/bytes/reader.go @@ -36,9 +36,6 @@ func (r *Reader) Len() int { func (r *Reader) Size() int64 { return int64(len(r.s)) } func (r *Reader) Read(b []byte) (n int, err error) { - if len(b) == 0 { - return 0, nil - } if r.i >= int64(len(r.s)) { return 0, io.EOF } @@ -63,14 +60,14 @@ func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) { return } -func (r *Reader) ReadByte() (b byte, err error) { +func (r *Reader) ReadByte() (byte, error) { r.prevRune = -1 if r.i >= int64(len(r.s)) { return 0, io.EOF } - b = r.s[r.i] + b := r.s[r.i] r.i++ - return + return b, nil } func (r *Reader) UnreadByte() error { @@ -111,11 +108,11 @@ func (r *Reader) Seek(offset int64, whence int) (int64, error) { r.prevRune = -1 var abs int64 switch whence { - case 0: + case io.SeekStart: abs = offset - case 1: - abs = int64(r.i) + offset - case 2: + case io.SeekCurrent: + abs = r.i + offset + case io.SeekEnd: abs = int64(len(r.s)) + offset default: return 0, errors.New("bytes.Reader.Seek: invalid whence") @@ -146,5 +143,8 @@ func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { return } +// Reset resets the Reader to be reading from b. +func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} } + // NewReader returns a new Reader reading from b. func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} } diff --git a/libgo/go/bytes/reader_test.go b/libgo/go/bytes/reader_test.go index b929a28..7b3034d 100644 --- a/libgo/go/bytes/reader_test.go +++ b/libgo/go/bytes/reader_test.go @@ -9,7 +9,6 @@ import ( "fmt" "io" "io/ioutil" - "os" "sync" "testing" ) @@ -22,17 +21,18 @@ func TestReader(t *testing.T) { n int want string wantpos int64 + readerr error seekerr string }{ - {seek: os.SEEK_SET, off: 0, n: 20, want: "0123456789"}, - {seek: os.SEEK_SET, off: 1, n: 1, want: "1"}, - {seek: os.SEEK_CUR, off: 1, wantpos: 3, n: 2, want: "34"}, - {seek: os.SEEK_SET, off: -1, seekerr: "bytes.Reader.Seek: negative position"}, - {seek: os.SEEK_SET, off: 1 << 33, wantpos: 1 << 33}, - {seek: os.SEEK_CUR, off: 1, wantpos: 1<<33 + 1}, - {seek: os.SEEK_SET, n: 5, want: "01234"}, - {seek: os.SEEK_CUR, n: 5, want: "56789"}, - {seek: os.SEEK_END, off: -1, n: 1, wantpos: 9, want: "9"}, + {seek: io.SeekStart, off: 0, n: 20, want: "0123456789"}, + {seek: io.SeekStart, off: 1, n: 1, want: "1"}, + {seek: io.SeekCurrent, off: 1, wantpos: 3, n: 2, want: "34"}, + {seek: io.SeekStart, off: -1, seekerr: "bytes.Reader.Seek: negative position"}, + {seek: io.SeekStart, off: 1 << 33, wantpos: 1 << 33, readerr: io.EOF}, + {seek: io.SeekCurrent, off: 1, wantpos: 1<<33 + 1, readerr: io.EOF}, + {seek: io.SeekStart, n: 5, want: "01234"}, + {seek: io.SeekCurrent, n: 5, want: "56789"}, + {seek: io.SeekEnd, off: -1, n: 1, wantpos: 9, want: "9"}, } for i, tt := range tests { @@ -50,8 +50,8 @@ func TestReader(t *testing.T) { } buf := make([]byte, tt.n) n, err := r.Read(buf) - if err != nil { - t.Errorf("%d. read = %v", i, err) + if err != tt.readerr { + t.Errorf("%d. read = %v; want %v", i, err, tt.readerr) continue } got := string(buf[:n]) @@ -63,7 +63,7 @@ func TestReader(t *testing.T) { func TestReadAfterBigSeek(t *testing.T) { r := NewReader([]byte("0123456789")) - if _, err := r.Seek(1<<31+5, os.SEEK_SET); err != nil { + if _, err := r.Seek(1<<31+5, io.SeekStart); err != nil { t.Fatal(err) } if n, err := r.Read(make([]byte, 10)); n != 0 || err != io.EOF { @@ -174,7 +174,7 @@ func TestReaderLen(t *testing.T) { t.Errorf("r.Len(): got %d, want %d", got, want) } if n, err := r.Read(make([]byte, 1)); err != nil || n != 1 { - t.Errorf("Read failed: read %d %v", n, err) + t.Errorf("Read failed: read %d %v; want 1, nil", n, err) } if got, want := r.Len(), 0; got != want { t.Errorf("r.Len(): got %d, want %d", got, want) @@ -188,7 +188,7 @@ var UnreadRuneErrorTests = []struct { {"Read", func(r *Reader) { r.Read([]byte{0}) }}, {"ReadByte", func(r *Reader) { r.ReadByte() }}, {"UnreadRune", func(r *Reader) { r.UnreadRune() }}, - {"Seek", func(r *Reader) { r.Seek(0, 1) }}, + {"Seek", func(r *Reader) { r.Seek(0, io.SeekCurrent) }}, {"WriteTo", func(r *Reader) { r.WriteTo(&Buffer{}) }}, } @@ -256,3 +256,23 @@ func TestReaderLenSize(t *testing.T) { t.Errorf("Size = %d; want 3", r.Size()) } } + +func TestReaderReset(t *testing.T) { + r := NewReader([]byte("世界")) + if _, _, err := r.ReadRune(); err != nil { + t.Errorf("ReadRune: unexpected error: %v", err) + } + + const want = "abcdef" + r.Reset([]byte(want)) + if err := r.UnreadRune(); err == nil { + t.Errorf("UnreadRune: expected error, got nil") + } + buf, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("ReadAll: unexpected error: %v", err) + } + if got := string(buf); got != want { + t.Errorf("ReadAll: got %q, want %q", got, want) + } +} |