aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/bytes
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2017-09-14 17:11:35 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2017-09-14 17:11:35 +0000
commitbc998d034f45d1828a8663b2eed928faf22a7d01 (patch)
tree8d262a22ca7318f4bcd64269fe8fe9e45bcf8d0f /libgo/go/bytes
parenta41a6142df74219f596e612d3a7775f68ca6e96f (diff)
downloadgcc-bc998d034f45d1828a8663b2eed928faf22a7d01.zip
gcc-bc998d034f45d1828a8663b2eed928faf22a7d01.tar.gz
gcc-bc998d034f45d1828a8663b2eed928faf22a7d01.tar.bz2
libgo: update to go1.9
Reviewed-on: https://go-review.googlesource.com/63753 From-SVN: r252767
Diffstat (limited to 'libgo/go/bytes')
-rw-r--r--libgo/go/bytes/buffer.go137
-rw-r--r--libgo/go/bytes/buffer_test.go76
-rw-r--r--libgo/go/bytes/bytes.go60
-rw-r--r--libgo/go/bytes/bytes_amd64.go17
-rw-r--r--libgo/go/bytes/bytes_generic.go6
-rw-r--r--libgo/go/bytes/bytes_s390x.go6
-rw-r--r--libgo/go/bytes/bytes_test.go146
-rw-r--r--libgo/go/bytes/example_test.go9
-rw-r--r--libgo/go/bytes/export_test.go1
9 files changed, 364 insertions, 94 deletions
diff --git a/libgo/go/bytes/buffer.go b/libgo/go/bytes/buffer.go
index 196419d..20e42bb 100644
--- a/libgo/go/bytes/buffer.go
+++ b/libgo/go/bytes/buffer.go
@@ -15,10 +15,15 @@ import (
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
- buf []byte // contents are the bytes buf[off : len(buf)]
- off int // read at &buf[off], write at &buf[len(buf)]
- bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation.
- lastRead readOp // last read operation, so that Unread* can work correctly.
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ lastRead readOp // last read operation, so that Unread* can work correctly.
+ // FIXME: lastRead can fit in a single byte
+
+ // memory to hold first slice; helps small buffers avoid allocation.
+ // FIXME: it would be advisable to align Buffer to cachelines to avoid false
+ // sharing.
+ bootstrap [64]byte
}
// The readOp constants describe the last action performed on
@@ -68,13 +73,13 @@ func (b *Buffer) Cap() int { return cap(b.buf) }
// but continues to use the same allocated storage.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
+ if n == 0 {
+ b.Reset()
+ return
+ }
b.lastRead = opInvalid
- switch {
- case n < 0 || n > b.Len():
+ if n < 0 || n > b.Len() {
panic("bytes.Buffer: truncation out of range")
- case n == 0:
- // Reuse buffer space.
- b.off = 0
}
b.buf = b.buf[0 : b.off+n]
}
@@ -82,7 +87,22 @@ func (b *Buffer) Truncate(n int) {
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
// Reset is the same as Truncate(0).
-func (b *Buffer) Reset() { b.Truncate(0) }
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.off = 0
+ b.lastRead = opInvalid
+}
+
+// tryGrowByReslice is a inlineable version of grow for the fast-case where the
+// internal buffer only needs to be resliced.
+// It returns the index where bytes should be written and whether it succeeded.
+func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
+ if l := len(b.buf); l+n <= cap(b.buf) {
+ b.buf = b.buf[:l+n]
+ return l, true
+ }
+ return 0, false
+}
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
@@ -91,29 +111,33 @@ func (b *Buffer) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
- b.Truncate(0)
+ b.Reset()
}
- if len(b.buf)+n > cap(b.buf) {
- var buf []byte
- if b.buf == nil && n <= len(b.bootstrap) {
- buf = b.bootstrap[0:]
- } else if m+n <= cap(b.buf)/2 {
- // We can slide things down instead of allocating a new
- // slice. We only need m+n <= cap(b.buf) to slide, but
- // we instead let capacity get twice as large so we
- // don't spend all our time copying.
- copy(b.buf[:], b.buf[b.off:])
- buf = b.buf[:m]
- } else {
- // not enough space anywhere
- buf = makeSlice(2*cap(b.buf) + n)
- copy(buf, b.buf[b.off:])
- }
+ // Try to grow by means of a reslice.
+ if i, ok := b.tryGrowByReslice(n); ok {
+ return i
+ }
+ // Check if we can make use of bootstrap array.
+ if b.buf == nil && n <= len(b.bootstrap) {
+ b.buf = b.bootstrap[:n]
+ return 0
+ }
+ if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ } else {
+ // Not enough space anywhere, we need to allocate.
+ buf := makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
b.buf = buf
- b.off = 0
}
- b.buf = b.buf[0 : b.off+m+n]
- return b.off + m
+ // Restore b.off and len(b.buf).
+ b.off = 0
+ b.buf = b.buf[:m+n]
+ return m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
@@ -134,7 +158,10 @@ func (b *Buffer) Grow(n int) {
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
- m := b.grow(len(p))
+ m, ok := b.tryGrowByReslice(len(p))
+ if !ok {
+ m = b.grow(len(p))
+ }
return copy(b.buf[m:], p), nil
}
@@ -143,7 +170,10 @@ func (b *Buffer) Write(p []byte) (n int, err error) {
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
- m := b.grow(len(s))
+ m, ok := b.tryGrowByReslice(len(s))
+ if !ok {
+ m = b.grow(len(s))
+ }
return copy(b.buf[m:], s), nil
}
@@ -161,7 +191,7 @@ func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
// If buffer is empty, reset to recover space.
if b.off >= len(b.buf) {
- b.Truncate(0)
+ b.Reset()
}
for {
if free := cap(b.buf) - len(b.buf); free < MinRead {
@@ -225,7 +255,7 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
}
}
// Buffer is now empty; reset.
- b.Truncate(0)
+ b.Reset()
return
}
@@ -235,7 +265,10 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
- m := b.grow(1)
+ m, ok := b.tryGrowByReslice(1)
+ if !ok {
+ m = b.grow(1)
+ }
b.buf[m] = c
return nil
}
@@ -250,7 +283,10 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
return 1, nil
}
b.lastRead = opInvalid
- m := b.grow(utf8.UTFMax)
+ m, ok := b.tryGrowByReslice(utf8.UTFMax)
+ if !ok {
+ m = b.grow(utf8.UTFMax)
+ }
n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
b.buf = b.buf[:m+n]
return n, nil
@@ -264,7 +300,7 @@ func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
- b.Truncate(0)
+ b.Reset()
if len(p) == 0 {
return
}
@@ -302,7 +338,7 @@ func (b *Buffer) ReadByte() (byte, error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
- b.Truncate(0)
+ b.Reset()
return 0, io.EOF
}
c := b.buf[b.off]
@@ -320,7 +356,7 @@ func (b *Buffer) ReadRune() (r rune, size int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
- b.Truncate(0)
+ b.Reset()
return 0, 0, io.EOF
}
c := b.buf[b.off]
@@ -337,12 +373,12 @@ func (b *Buffer) ReadRune() (r rune, size int, err error) {
// UnreadRune unreads the last rune returned by ReadRune.
// If the most recent read or write operation on the buffer was
-// not a ReadRune, UnreadRune returns an error. (In this regard
+// not a successful ReadRune, UnreadRune returns an error. (In this regard
// it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead <= opInvalid {
- return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
+ return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
}
if b.off >= int(b.lastRead) {
b.off -= int(b.lastRead)
@@ -351,12 +387,13 @@ func (b *Buffer) UnreadRune() error {
return nil
}
-// UnreadByte unreads the last byte returned by the most recent
-// read operation. If write has happened since the last read, UnreadByte
-// returns an error.
+// UnreadByte unreads the last byte returned by the most recent successful
+// read operation that read at least one byte. If a write has happened since
+// the last read, if the last read returned an error, or if the read read zero
+// bytes, UnreadByte returns an error.
func (b *Buffer) UnreadByte() error {
if b.lastRead == opInvalid {
- return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
+ return errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
}
b.lastRead = opInvalid
if b.off > 0 {
@@ -404,10 +441,12 @@ func (b *Buffer) ReadString(delim byte) (line string, err error) {
return string(slice), err
}
-// NewBuffer creates and initializes a new Buffer using buf as its initial
-// contents. It is intended to prepare a Buffer to read existing data. It
-// can also be used to size the internal buffer for writing. To do that,
-// buf should have the desired capacity but a length of zero.
+// NewBuffer creates and initializes a new Buffer using buf as its
+// initial contents. The new Buffer takes ownership of buf, and the
+// caller should not use buf after this call. NewBuffer is intended to
+// prepare a Buffer to read existing data. It can also be used to size
+// the internal buffer for writing. To do that, buf should have the
+// desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
diff --git a/libgo/go/bytes/buffer_test.go b/libgo/go/bytes/buffer_test.go
index b1b85f9..ce2f01a 100644
--- a/libgo/go/bytes/buffer_test.go
+++ b/libgo/go/bytes/buffer_test.go
@@ -6,8 +6,10 @@ package bytes_test
import (
. "bytes"
+ "internal/testenv"
"io"
"math/rand"
+ "os/exec"
"runtime"
"testing"
"unicode/utf8"
@@ -311,6 +313,19 @@ func TestRuneIO(t *testing.T) {
// Check that UnreadRune works
buf.Reset()
+
+ // check at EOF
+ if err := buf.UnreadRune(); err == nil {
+ t.Fatal("UnreadRune at EOF: got no error")
+ }
+ if _, _, err := buf.ReadRune(); err == nil {
+ t.Fatal("ReadRune at EOF: got no error")
+ }
+ if err := buf.UnreadRune(); err == nil {
+ t.Fatal("UnreadRune after ReadRune at EOF: got no error")
+ }
+
+ // check not at EOF
buf.Write(b)
for r := rune(0); r < NRune; r++ {
r1, size, _ := buf.ReadRune()
@@ -473,15 +488,34 @@ func TestReadEmptyAtEOF(t *testing.T) {
func TestUnreadByte(t *testing.T) {
b := new(Buffer)
+
+ // check at EOF
+ if err := b.UnreadByte(); err == nil {
+ t.Fatal("UnreadByte at EOF: got no error")
+ }
+ if _, err := b.ReadByte(); err == nil {
+ t.Fatal("ReadByte at EOF: got no error")
+ }
+ if err := b.UnreadByte(); err == nil {
+ t.Fatal("UnreadByte after ReadByte at EOF: got no error")
+ }
+
+ // check not at EOF
b.WriteString("abcdefghijklmnopqrstuvwxyz")
- _, err := b.ReadBytes('m')
- if err != nil {
- t.Fatalf("ReadBytes: %v", err)
+ // after unsuccessful read
+ if n, err := b.Read(nil); n != 0 || err != nil {
+ t.Fatalf("Read(nil) = %d,%v; want 0,nil", n, err)
+ }
+ if err := b.UnreadByte(); err == nil {
+ t.Fatal("UnreadByte after Read(nil): got no error")
}
- err = b.UnreadByte()
- if err != nil {
+ // after successful read
+ if _, err := b.ReadBytes('m'); err != nil {
+ t.Fatalf("ReadBytes: %v", err)
+ }
+ if err := b.UnreadByte(); err != nil {
t.Fatalf("UnreadByte: %v", err)
}
c, err := b.ReadByte()
@@ -514,6 +548,38 @@ func TestBufferGrowth(t *testing.T) {
}
}
+// Test that tryGrowByReslice is inlined.
+// Only execute on "linux-amd64" builder in order to avoid breakage.
+func TestTryGrowByResliceInlined(t *testing.T) {
+ targetBuilder := "linux-amd64"
+ if testenv.Builder() != targetBuilder {
+ t.Skipf("%q gets executed on %q builder only", t.Name(), targetBuilder)
+ }
+ t.Parallel()
+ goBin := testenv.GoToolPath(t)
+ out, err := exec.Command(goBin, "tool", "nm", goBin).CombinedOutput()
+ if err != nil {
+ t.Fatalf("go tool nm: %v: %s", err, out)
+ }
+ // Verify this doesn't exist:
+ sym := "bytes.(*Buffer).tryGrowByReslice"
+ if Contains(out, []byte(sym)) {
+ t.Errorf("found symbol %q in cmd/go, but should be inlined", sym)
+ }
+}
+
+func BenchmarkWriteByte(b *testing.B) {
+ const n = 4 << 10
+ b.SetBytes(n)
+ buf := NewBuffer(make([]byte, n))
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ for i := 0; i < n; i++ {
+ buf.WriteByte('x')
+ }
+ }
+}
+
func BenchmarkWriteRune(b *testing.B) {
const n = 4 << 10
const r = '☺'
diff --git a/libgo/go/bytes/bytes.go b/libgo/go/bytes/bytes.go
index 406a3825..7c878af 100644
--- a/libgo/go/bytes/bytes.go
+++ b/libgo/go/bytes/bytes.go
@@ -46,36 +46,21 @@ func explode(s []byte, n int) [][]byte {
return a[0:na]
}
-// Count counts the number of non-overlapping instances of sep in s.
-// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
-func Count(s, sep []byte) int {
- n := len(sep)
- if n == 0 {
+// countGeneric actually implements Count
+func countGeneric(s, sep []byte) int {
+ // special case
+ if len(sep) == 0 {
return utf8.RuneCount(s) + 1
}
- if n > len(s) {
- return 0
- }
- count := 0
- c := sep[0]
- i := 0
- t := s[:len(s)-n+1]
- for i < len(t) {
- if t[i] != c {
- o := IndexByte(t[i:], c)
- if o < 0 {
- break
- }
- i += o
- }
- if n == 1 || Equal(s[i:i+n], sep) {
- count++
- i += n
- continue
+ n := 0
+ for {
+ i := Index(s, sep)
+ if i == -1 {
+ return n
}
- i++
+ n++
+ s = s[i+len(sep):]
}
- return count
}
// Contains reports whether subslice is within b.
@@ -229,20 +214,21 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
if n < 0 {
n = Count(s, sep) + 1
}
- c := sep[0]
- start := 0
+
a := make([][]byte, n)
- na := 0
- for i := 0; i+len(sep) <= len(s) && na+1 < n; i++ {
- if s[i] == c && (len(sep) == 1 || Equal(s[i:i+len(sep)], sep)) {
- a[na] = s[start : i+sepSave]
- na++
- start = i + len(sep)
- i += len(sep) - 1
+ n--
+ i := 0
+ for i < n {
+ m := Index(s, sep)
+ if m < 0 {
+ break
}
+ a[i] = s[:m+sepSave]
+ s = s[m+len(sep):]
+ i++
}
- a[na] = s[start:]
- return a[0 : na+1]
+ a[i] = s
+ return a[:i+1]
}
// SplitN slices s into subslices separated by sep and returns a slice of
diff --git a/libgo/go/bytes/bytes_amd64.go b/libgo/go/bytes/bytes_amd64.go
index 58a07ef..d40c744 100644
--- a/libgo/go/bytes/bytes_amd64.go
+++ b/libgo/go/bytes/bytes_amd64.go
@@ -6,17 +6,19 @@
package bytes
+import "internal/cpu"
+
//go:noescape
// indexShortStr returns the index of the first instance of c in s, or -1 if c is not present in s.
// indexShortStr requires 2 <= len(c) <= shortStringLen
-func indexShortStr(s, c []byte) int // ../runtime/asm_$GOARCH.s
-func supportAVX2() bool // ../runtime/asm_$GOARCH.s
+func indexShortStr(s, c []byte) int // ../runtime/asm_amd64.s
+func countByte(s []byte, c byte) int // ../runtime/asm_amd64.s
var shortStringLen int
func init() {
- if supportAVX2() {
+ if cpu.X86.HasAVX2 {
shortStringLen = 63
} else {
shortStringLen = 31
@@ -96,6 +98,15 @@ func Index(s, sep []byte) int {
return -1
}
+// Count counts the number of non-overlapping instances of sep in s.
+// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
+func Count(s, sep []byte) int {
+ if len(sep) == 1 && cpu.X86.HasPOPCNT {
+ return countByte(s, sep[0])
+ }
+ return countGeneric(s, sep)
+}
+
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
diff --git a/libgo/go/bytes/bytes_generic.go b/libgo/go/bytes/bytes_generic.go
index 91baa22..75a9c36 100644
--- a/libgo/go/bytes/bytes_generic.go
+++ b/libgo/go/bytes/bytes_generic.go
@@ -39,3 +39,9 @@ func Index(s, sep []byte) int {
}
return -1
}
+
+// Count counts the number of non-overlapping instances of sep in s.
+// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
+func Count(s, sep []byte) int {
+ return countGeneric(s, sep)
+}
diff --git a/libgo/go/bytes/bytes_s390x.go b/libgo/go/bytes/bytes_s390x.go
index a05ca47..54e013e 100644
--- a/libgo/go/bytes/bytes_s390x.go
+++ b/libgo/go/bytes/bytes_s390x.go
@@ -99,6 +99,12 @@ func Index(s, sep []byte) int {
return -1
}
+// Count counts the number of non-overlapping instances of sep in s.
+// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
+func Count(s, sep []byte) int {
+ return countGeneric(s, sep)
+}
+
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
diff --git a/libgo/go/bytes/bytes_test.go b/libgo/go/bytes/bytes_test.go
index ad01952..d571eb3 100644
--- a/libgo/go/bytes/bytes_test.go
+++ b/libgo/go/bytes/bytes_test.go
@@ -401,6 +401,79 @@ func TestIndexRune(t *testing.T) {
}
}
+// test count of a single byte across page offsets
+func TestCountByte(t *testing.T) {
+ b := make([]byte, 5015) // bigger than a page
+ windows := []int{1, 2, 3, 4, 15, 16, 17, 31, 32, 33, 63, 64, 65, 128}
+ testCountWindow := func(i, window int) {
+ for j := 0; j < window; j++ {
+ b[i+j] = byte(100)
+ p := Count(b[i:i+window], []byte{100})
+ if p != j+1 {
+ t.Errorf("TestCountByte.Count(%q, 100) = %d", b[i:i+window], p)
+ }
+ pGeneric := CountGeneric(b[i:i+window], []byte{100})
+ if pGeneric != j+1 {
+ t.Errorf("TestCountByte.CountGeneric(%q, 100) = %d", b[i:i+window], p)
+ }
+ }
+ }
+
+ maxWnd := windows[len(windows)-1]
+
+ for i := 0; i <= 2*maxWnd; i++ {
+ for _, window := range windows {
+ if window > len(b[i:]) {
+ window = len(b[i:])
+ }
+ testCountWindow(i, window)
+ for j := 0; j < window; j++ {
+ b[i+j] = byte(0)
+ }
+ }
+ }
+ for i := 4096 - (maxWnd + 1); i < len(b); i++ {
+ for _, window := range windows {
+ if window > len(b[i:]) {
+ window = len(b[i:])
+ }
+ testCountWindow(i, window)
+ for j := 0; j < window; j++ {
+ b[i+j] = byte(0)
+ }
+ }
+ }
+}
+
+// Make sure we don't count bytes outside our window
+func TestCountByteNoMatch(t *testing.T) {
+ b := make([]byte, 5015)
+ windows := []int{1, 2, 3, 4, 15, 16, 17, 31, 32, 33, 63, 64, 65, 128}
+ for i := 0; i <= len(b); i++ {
+ for _, window := range windows {
+ if window > len(b[i:]) {
+ window = len(b[i:])
+ }
+ // Fill the window with non-match
+ for j := 0; j < window; j++ {
+ b[i+j] = byte(100)
+ }
+ // Try to find something that doesn't exist
+ p := Count(b[i:i+window], []byte{0})
+ if p != 0 {
+ t.Errorf("TestCountByteNoMatch(%q, 0) = %d", b[i:i+window], p)
+ }
+ pGeneric := CountGeneric(b[i:i+window], []byte{0})
+ if pGeneric != 0 {
+ t.Errorf("TestCountByteNoMatch.CountGeneric(%q, 100) = %d", b[i:i+window], p)
+ }
+ for j := 0; j < window; j++ {
+ b[i+j] = byte(0)
+ }
+ }
+ }
+}
+
var bmbuf []byte
func valName(x int) string {
@@ -594,6 +667,26 @@ func BenchmarkCountEasy(b *testing.B) {
})
}
+func BenchmarkCountSingle(b *testing.B) {
+ benchBytes(b, indexSizes, func(b *testing.B, n int) {
+ buf := bmbuf[0:n]
+ step := 8
+ for i := 0; i < len(buf); i += step {
+ buf[i] = 1
+ }
+ expect := (len(buf) + (step - 1)) / step
+ for i := 0; i < b.N; i++ {
+ j := Count(buf, []byte{1})
+ if j != expect {
+ b.Fatal("bad count", j, expect)
+ }
+ }
+ for i := 0; i < len(buf); i++ {
+ buf[i] = 0
+ }
+ })
+}
+
type ExplodeTest struct {
s string
n int
@@ -1437,6 +1530,59 @@ func BenchmarkTrimSpace(b *testing.B) {
}
}
+func makeBenchInputHard() []byte {
+ tokens := [...]string{
+ "<a>", "<p>", "<b>", "<strong>",
+ "</a>", "</p>", "</b>", "</strong>",
+ "hello", "world",
+ }
+ x := make([]byte, 0, 1<<20)
+ for {
+ i := rand.Intn(len(tokens))
+ if len(x)+len(tokens[i]) >= 1<<20 {
+ break
+ }
+ x = append(x, tokens[i]...)
+ }
+ return x
+}
+
+var benchInputHard = makeBenchInputHard()
+
+func BenchmarkSplitEmptySeparator(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Split(benchInputHard, nil)
+ }
+}
+
+func BenchmarkSplitSingleByteSeparator(b *testing.B) {
+ sep := []byte("/")
+ for i := 0; i < b.N; i++ {
+ Split(benchInputHard, sep)
+ }
+}
+
+func BenchmarkSplitMultiByteSeparator(b *testing.B) {
+ sep := []byte("hello")
+ for i := 0; i < b.N; i++ {
+ Split(benchInputHard, sep)
+ }
+}
+
+func BenchmarkSplitNSingleByteSeparator(b *testing.B) {
+ sep := []byte("/")
+ for i := 0; i < b.N; i++ {
+ SplitN(benchInputHard, sep, 10)
+ }
+}
+
+func BenchmarkSplitNMultiByteSeparator(b *testing.B) {
+ sep := []byte("hello")
+ for i := 0; i < b.N; i++ {
+ SplitN(benchInputHard, sep, 10)
+ }
+}
+
func BenchmarkRepeat(b *testing.B) {
for i := 0; i < b.N; i++ {
Repeat([]byte("-"), 80)
diff --git a/libgo/go/bytes/example_test.go b/libgo/go/bytes/example_test.go
index 0d35a0d..9397277 100644
--- a/libgo/go/bytes/example_test.go
+++ b/libgo/go/bytes/example_test.go
@@ -30,6 +30,15 @@ func ExampleBuffer_reader() {
// Output: Gophers rule!
}
+func ExampleBuffer_Grow() {
+ var b bytes.Buffer
+ b.Grow(64)
+ bb := b.Bytes()
+ b.Write([]byte("64 bytes or fewer"))
+ fmt.Printf("%q", bb[:b.Len()])
+ // Output: "64 bytes or fewer"
+}
+
func ExampleCompare() {
// Interpret Compare's result by comparing it to zero.
var a, b []byte
diff --git a/libgo/go/bytes/export_test.go b/libgo/go/bytes/export_test.go
index f61523e..823c8b0 100644
--- a/libgo/go/bytes/export_test.go
+++ b/libgo/go/bytes/export_test.go
@@ -7,3 +7,4 @@ package bytes
// Export func for testing
var IndexBytePortable = indexBytePortable
var EqualPortable = equalPortable
+var CountGeneric = countGeneric