diff options
author | Ian Lance Taylor <iant@golang.org> | 2018-01-09 01:23:08 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2018-01-09 01:23:08 +0000 |
commit | 1a2f01efa63036a5104f203a4789e682c0e0915d (patch) | |
tree | 373e15778dc8295354584e1f86915ae493b604ff /libgo/go/io | |
parent | 8799df67f2dab88f9fda11739c501780a85575e2 (diff) | |
download | gcc-1a2f01efa63036a5104f203a4789e682c0e0915d.zip gcc-1a2f01efa63036a5104f203a4789e682c0e0915d.tar.gz gcc-1a2f01efa63036a5104f203a4789e682c0e0915d.tar.bz2 |
libgo: update to Go1.10beta1
Update the Go library to the 1.10beta1 release.
Requires a few changes to the compiler for modifications to the map
runtime code, and to handle some nowritebarrier cases in the runtime.
Reviewed-on: https://go-review.googlesource.com/86455
gotools/:
* Makefile.am (go_cmd_vet_files): New variable.
(go_cmd_buildid_files, go_cmd_test2json_files): New variables.
(s-zdefaultcc): Change from constants to functions.
(noinst_PROGRAMS): Add vet, buildid, and test2json.
(cgo$(EXEEXT)): Link against $(LIBGOTOOL).
(vet$(EXEEXT)): New target.
(buildid$(EXEEXT)): New target.
(test2json$(EXEEXT)): New target.
(install-exec-local): Install all $(noinst_PROGRAMS).
(uninstall-local): Uninstasll all $(noinst_PROGRAMS).
(check-go-tool): Depend on $(noinst_PROGRAMS). Copy down
objabi.go.
(check-runtime): Depend on $(noinst_PROGRAMS).
(check-cgo-test, check-carchive-test): Likewise.
(check-vet): New target.
(check): Depend on check-vet. Look at cmd_vet-testlog.
(.PHONY): Add check-vet.
* Makefile.in: Rebuild.
From-SVN: r256365
Diffstat (limited to 'libgo/go/io')
-rw-r--r-- | libgo/go/io/example_test.go | 16 | ||||
-rw-r--r-- | libgo/go/io/io.go | 10 | ||||
-rw-r--r-- | libgo/go/io/io_test.go | 39 | ||||
-rw-r--r-- | libgo/go/io/ioutil/ioutil.go | 23 | ||||
-rw-r--r-- | libgo/go/io/multi.go | 16 | ||||
-rw-r--r-- | libgo/go/io/multi_test.go | 49 | ||||
-rw-r--r-- | libgo/go/io/pipe.go | 177 | ||||
-rw-r--r-- | libgo/go/io/pipe_test.go | 109 |
8 files changed, 334 insertions, 105 deletions
diff --git a/libgo/go/io/example_test.go b/libgo/go/io/example_test.go index af47853..edcd008 100644 --- a/libgo/go/io/example_test.go +++ b/libgo/go/io/example_test.go @@ -243,3 +243,19 @@ func ExampleMultiWriter() { // some io.Reader stream to be read // some io.Reader stream to be read } + +func ExamplePipe() { + r, w := io.Pipe() + + go func() { + fmt.Fprint(w, "some text to be read\n") + w.Close() + }() + + buf := new(bytes.Buffer) + buf.ReadFrom(r) + fmt.Print(buf.String()) + + // Output: + // some text to be read +} diff --git a/libgo/go/io/io.go b/libgo/go/io/io.go index 28dab08..27482de 100644 --- a/libgo/go/io/io.go +++ b/libgo/go/io/io.go @@ -385,8 +385,16 @@ func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) { if rt, ok := dst.(ReaderFrom); ok { return rt.ReadFrom(src) } + size := 32 * 1024 + if l, ok := src.(*LimitedReader); ok && int64(size) > l.N { + if l.N < 1 { + size = 1 + } else { + size = int(l.N) + } + } if buf == nil { - buf = make([]byte, 32*1024) + buf = make([]byte, size) } for { nr, er := src.Read(buf) diff --git a/libgo/go/io/io_test.go b/libgo/go/io/io_test.go index 877e839..0e4ce61 100644 --- a/libgo/go/io/io_test.go +++ b/libgo/go/io/io_test.go @@ -32,6 +32,21 @@ func TestCopy(t *testing.T) { } } +func TestCopyNegative(t *testing.T) { + rb := new(Buffer) + wb := new(Buffer) + rb.WriteString("hello") + Copy(wb, &LimitedReader{R: rb, N: -1}) + if wb.String() != "" { + t.Errorf("Copy on LimitedReader with N<0 copied data") + } + + CopyN(wb, rb, -1) + if wb.String() != "" { + t.Errorf("CopyN with N<0 copied data") + } +} + func TestCopyBuffer(t *testing.T) { rb := new(Buffer) wb := new(Buffer) @@ -156,6 +171,30 @@ func TestCopyNWriteTo(t *testing.T) { } } +func BenchmarkCopyNSmall(b *testing.B) { + bs := bytes.Repeat([]byte{0}, 512+1) + rd := bytes.NewReader(bs) + buf := new(Buffer) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + CopyN(buf, rd, 512) + rd.Reset(bs) + } +} + +func BenchmarkCopyNLarge(b *testing.B) { + bs := bytes.Repeat([]byte{0}, (32*1024)+1) + rd := bytes.NewReader(bs) + buf := new(Buffer) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + CopyN(buf, rd, 32*1024) + rd.Reset(bs) + } +} + type noReadFrom struct { w Writer } diff --git a/libgo/go/io/ioutil/ioutil.go b/libgo/go/io/ioutil/ioutil.go index f0da616..674b270 100644 --- a/libgo/go/io/ioutil/ioutil.go +++ b/libgo/go/io/ioutil/ioutil.go @@ -16,7 +16,7 @@ import ( // readAll reads from r until an error or EOF and returns the data it read // from the internal buffer allocated with a specified capacity. func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) + var buf bytes.Buffer // If the buffer overflows, we will get bytes.ErrTooLarge. // Return that as an error. Any other panic remains. defer func() { @@ -30,6 +30,9 @@ func readAll(r io.Reader, capacity int64) (b []byte, err error) { panic(e) } }() + if int64(int(capacity)) == capacity { + buf.Grow(int(capacity)) + } _, err = buf.ReadFrom(r) return buf.Bytes(), err } @@ -54,20 +57,20 @@ func ReadFile(filename string) ([]byte, error) { defer f.Close() // It's a good but not certain bet that FileInfo will tell us exactly how much to // read, so let's try it but be prepared for the answer to be wrong. - var n int64 + var n int64 = bytes.MinRead if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { + // As initial capacity for readAll, use Size + a little extra in case Size + // is zero, and to avoid another allocation after Read has filled the + // buffer. The readAll call will read into its allocated internal buffer + // cheaply. If the size was wrong, we'll either waste some space off the end + // or reallocate as needed, but in the overwhelmingly common case we'll get + // it just right. + if size := fi.Size() + bytes.MinRead; size > n { n = size } } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) + return readAll(f, n) } // WriteFile writes data to a file named by filename. diff --git a/libgo/go/io/multi.go b/libgo/go/io/multi.go index d784846..65f9909 100644 --- a/libgo/go/io/multi.go +++ b/libgo/go/io/multi.go @@ -95,8 +95,18 @@ func (t *multiWriter) WriteString(s string) (n int, err error) { // MultiWriter creates a writer that duplicates its writes to all the // provided writers, similar to the Unix tee(1) command. +// +// Each write is written to each listed writer, one at a time. +// If a listed writer returns an error, that overall write operation +// stops and returns the error; it does not continue down the list. func MultiWriter(writers ...Writer) Writer { - w := make([]Writer, len(writers)) - copy(w, writers) - return &multiWriter{w} + allWriters := make([]Writer, 0, len(writers)) + for _, w := range writers { + if mw, ok := w.(*multiWriter); ok { + allWriters = append(allWriters, mw.writers...) + } else { + allWriters = append(allWriters, w) + } + } + return &multiWriter{allWriters} } diff --git a/libgo/go/io/multi_test.go b/libgo/go/io/multi_test.go index fef3e00..4acc51a 100644 --- a/libgo/go/io/multi_test.go +++ b/libgo/go/io/multi_test.go @@ -143,6 +143,55 @@ func testMultiWriter(t *testing.T, sink interface { } } +// writerFunc is an io.Writer implemented by the underlying func. +type writerFunc func(p []byte) (int, error) + +func (f writerFunc) Write(p []byte) (int, error) { + return f(p) +} + +// Test that MultiWriter properly flattens chained multiWriters, +func TestMultiWriterSingleChainFlatten(t *testing.T) { + pc := make([]uintptr, 1000) // 1000 should fit the full stack + n := runtime.Callers(0, pc) + var myDepth = callDepth(pc[:n]) + var writeDepth int // will contain the depth from which writerFunc.Writer was called + var w Writer = MultiWriter(writerFunc(func(p []byte) (int, error) { + n := runtime.Callers(1, pc) + writeDepth += callDepth(pc[:n]) + return 0, nil + })) + + mw := w + // chain a bunch of multiWriters + for i := 0; i < 100; i++ { + mw = MultiWriter(w) + } + + mw = MultiWriter(w, mw, w, mw) + mw.Write(nil) // don't care about errors, just want to check the call-depth for Write + + if writeDepth != 4*(myDepth+2) { // 2 should be multiWriter.Write and writerFunc.Write + t.Errorf("multiWriter did not flatten chained multiWriters: expected writeDepth %d, got %d", + 4*(myDepth+2), writeDepth) + } +} + +func TestMultiWriterError(t *testing.T) { + f1 := writerFunc(func(p []byte) (int, error) { + return len(p) / 2, ErrShortWrite + }) + f2 := writerFunc(func(p []byte) (int, error) { + t.Errorf("MultiWriter called f2.Write") + return len(p), nil + }) + w := MultiWriter(f1, f2) + n, err := w.Write(make([]byte, 100)) + if n != 50 || err != ErrShortWrite { + t.Errorf("Write = %d, %v, want 50, ErrShortWrite", n, err) + } +} + // Test that MultiReader copies the input slice and is insulated from future modification. func TestMultiReaderCopy(t *testing.T) { slice := []Reader{strings.NewReader("hello world")} diff --git a/libgo/go/io/pipe.go b/libgo/go/io/pipe.go index b6e7755..4efaf2f 100644 --- a/libgo/go/io/pipe.go +++ b/libgo/go/io/pipe.go @@ -10,110 +10,107 @@ package io import ( "errors" "sync" + "sync/atomic" ) +// atomicError is a type-safe atomic value for errors. +// We use a struct{ error } to ensure consistent use of a concrete type. +type atomicError struct{ v atomic.Value } + +func (a *atomicError) Store(err error) { + a.v.Store(struct{ error }{err}) +} +func (a *atomicError) Load() error { + err, _ := a.v.Load().(struct{ error }) + return err.error +} + // ErrClosedPipe is the error used for read or write operations on a closed pipe. var ErrClosedPipe = errors.New("io: read/write on closed pipe") // A pipe is the shared pipe structure underlying PipeReader and PipeWriter. type pipe struct { - rl sync.Mutex // gates readers one at a time - wl sync.Mutex // gates writers one at a time - l sync.Mutex // protects remaining fields - data []byte // data remaining in pending write - rwait sync.Cond // waiting reader - wwait sync.Cond // waiting writer - rerr error // if reader closed, error to give writes - werr error // if writer closed, error to give reads -} - -func (p *pipe) read(b []byte) (n int, err error) { - // One reader at a time. - p.rl.Lock() - defer p.rl.Unlock() - - p.l.Lock() - defer p.l.Unlock() - for { - if p.rerr != nil { - return 0, ErrClosedPipe - } - if p.data != nil { - break - } - if p.werr != nil { - return 0, p.werr - } - p.rwait.Wait() - } - n = copy(b, p.data) - p.data = p.data[n:] - if len(p.data) == 0 { - p.data = nil - p.wwait.Signal() - } - return + wrMu sync.Mutex // Serializes Write operations + wrCh chan []byte + rdCh chan int + + once sync.Once // Protects closing done + done chan struct{} + rerr atomicError + werr atomicError } -var zero [0]byte +func (p *pipe) Read(b []byte) (n int, err error) { + select { + case <-p.done: + return 0, p.readCloseError() + default: + } -func (p *pipe) write(b []byte) (n int, err error) { - // pipe uses nil to mean not available - if b == nil { - b = zero[:] + select { + case bw := <-p.wrCh: + nr := copy(b, bw) + p.rdCh <- nr + return nr, nil + case <-p.done: + return 0, p.readCloseError() } +} - // One writer at a time. - p.wl.Lock() - defer p.wl.Unlock() +func (p *pipe) readCloseError() error { + rerr := p.rerr.Load() + if werr := p.werr.Load(); rerr == nil && werr != nil { + return werr + } + return ErrClosedPipe +} - p.l.Lock() - defer p.l.Unlock() - if p.werr != nil { +func (p *pipe) CloseRead(err error) error { + if err == nil { err = ErrClosedPipe - return } - p.data = b - p.rwait.Signal() - for { - if p.data == nil { - break - } - if p.rerr != nil { - err = p.rerr - break - } - if p.werr != nil { - err = ErrClosedPipe - break + p.rerr.Store(err) + p.once.Do(func() { close(p.done) }) + return nil +} + +func (p *pipe) Write(b []byte) (n int, err error) { + select { + case <-p.done: + return 0, p.writeCloseError() + default: + p.wrMu.Lock() + defer p.wrMu.Unlock() + } + + for once := true; once || len(b) > 0; once = false { + select { + case p.wrCh <- b: + nw := <-p.rdCh + b = b[nw:] + n += nw + case <-p.done: + return n, p.writeCloseError() } - p.wwait.Wait() } - n = len(b) - len(p.data) - p.data = nil // in case of rerr or werr - return + return n, nil } -func (p *pipe) rclose(err error) { - if err == nil { - err = ErrClosedPipe +func (p *pipe) writeCloseError() error { + werr := p.werr.Load() + if rerr := p.rerr.Load(); werr == nil && rerr != nil { + return rerr } - p.l.Lock() - defer p.l.Unlock() - p.rerr = err - p.rwait.Signal() - p.wwait.Signal() + return ErrClosedPipe } -func (p *pipe) wclose(err error) { +func (p *pipe) CloseWrite(err error) error { if err == nil { err = EOF } - p.l.Lock() - defer p.l.Unlock() - p.werr = err - p.rwait.Signal() - p.wwait.Signal() + p.werr.Store(err) + p.once.Do(func() { close(p.done) }) + return nil } // A PipeReader is the read half of a pipe. @@ -127,7 +124,7 @@ type PipeReader struct { // If the write end is closed with an error, that error is // returned as err; otherwise err is EOF. func (r *PipeReader) Read(data []byte) (n int, err error) { - return r.p.read(data) + return r.p.Read(data) } // Close closes the reader; subsequent writes to the @@ -139,8 +136,7 @@ func (r *PipeReader) Close() error { // CloseWithError closes the reader; subsequent writes // to the write half of the pipe will return the error err. func (r *PipeReader) CloseWithError(err error) error { - r.p.rclose(err) - return nil + return r.p.CloseRead(err) } // A PipeWriter is the write half of a pipe. @@ -154,7 +150,7 @@ type PipeWriter struct { // If the read end is closed with an error, that err is // returned as err; otherwise err is ErrClosedPipe. func (w *PipeWriter) Write(data []byte) (n int, err error) { - return w.p.write(data) + return w.p.Write(data) } // Close closes the writer; subsequent reads from the @@ -169,8 +165,7 @@ func (w *PipeWriter) Close() error { // // CloseWithError always returns nil. func (w *PipeWriter) CloseWithError(err error) error { - w.p.wclose(err) - return nil + return w.p.CloseWrite(err) } // Pipe creates a synchronous in-memory pipe. @@ -189,10 +184,10 @@ func (w *PipeWriter) CloseWithError(err error) error { // Parallel calls to Read and parallel calls to Write are also safe: // the individual calls will be gated sequentially. func Pipe() (*PipeReader, *PipeWriter) { - p := new(pipe) - p.rwait.L = &p.l - p.wwait.L = &p.l - r := &PipeReader{p} - w := &PipeWriter{p} - return r, w + p := &pipe{ + wrCh: make(chan []byte), + rdCh: make(chan int), + done: make(chan struct{}), + } + return &PipeReader{p}, &PipeWriter{p} } diff --git a/libgo/go/io/pipe_test.go b/libgo/go/io/pipe_test.go index 95930e8..f18b1c4 100644 --- a/libgo/go/io/pipe_test.go +++ b/libgo/go/io/pipe_test.go @@ -5,8 +5,11 @@ package io_test import ( + "bytes" "fmt" . "io" + "sort" + "strings" "testing" "time" ) @@ -312,3 +315,109 @@ func TestWriteAfterWriterClose(t *testing.T) { t.Errorf("got: %q; want: %q", writeErr, ErrClosedPipe) } } + +func TestPipeCloseError(t *testing.T) { + type testError1 struct{ error } + type testError2 struct{ error } + + r, w := Pipe() + r.CloseWithError(testError1{}) + if _, err := w.Write(nil); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + r.CloseWithError(testError2{}) + if _, err := w.Write(nil); err != (testError2{}) { + t.Errorf("Write error: got %T, want testError2", err) + } + + r, w = Pipe() + w.CloseWithError(testError1{}) + if _, err := r.Read(nil); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + w.CloseWithError(testError2{}) + if _, err := r.Read(nil); err != (testError2{}) { + t.Errorf("Read error: got %T, want testError2", err) + } +} + +func TestPipeConcurrent(t *testing.T) { + const ( + input = "0123456789abcdef" + count = 8 + readSize = 2 + ) + + t.Run("Write", func(t *testing.T) { + r, w := Pipe() + + for i := 0; i < count; i++ { + go func() { + time.Sleep(time.Millisecond) // Increase probability of race + if n, err := w.Write([]byte(input)); n != len(input) || err != nil { + t.Errorf("Write() = (%d, %v); want (%d, nil)", n, err, len(input)) + } + }() + } + + buf := make([]byte, count*len(input)) + for i := 0; i < len(buf); i += readSize { + if n, err := r.Read(buf[i : i+readSize]); n != readSize || err != nil { + t.Errorf("Read() = (%d, %v); want (%d, nil)", n, err, readSize) + } + } + + // Since each Write is fully gated, if multiple Read calls were needed, + // the contents of Write should still appear together in the output. + got := string(buf) + want := strings.Repeat(input, count) + if got != want { + t.Errorf("got: %q; want: %q", got, want) + } + }) + + t.Run("Read", func(t *testing.T) { + r, w := Pipe() + + c := make(chan []byte, count*len(input)/readSize) + for i := 0; i < cap(c); i++ { + go func() { + time.Sleep(time.Millisecond) // Increase probability of race + buf := make([]byte, readSize) + if n, err := r.Read(buf); n != readSize || err != nil { + t.Errorf("Read() = (%d, %v); want (%d, nil)", n, err, readSize) + } + c <- buf + }() + } + + for i := 0; i < count; i++ { + if n, err := w.Write([]byte(input)); n != len(input) || err != nil { + t.Errorf("Write() = (%d, %v); want (%d, nil)", n, err, len(input)) + } + } + + // Since each read is independent, the only guarantee about the output + // is that it is a permutation of the input in readSized groups. + got := make([]byte, 0, count*len(input)) + for i := 0; i < cap(c); i++ { + got = append(got, (<-c)...) + } + got = sortBytesInGroups(got, readSize) + want := bytes.Repeat([]byte(input), count) + want = sortBytesInGroups(want, readSize) + if string(got) != string(want) { + t.Errorf("got: %q; want: %q", got, want) + } + }) +} + +func sortBytesInGroups(b []byte, n int) []byte { + var groups [][]byte + for len(b) > 0 { + groups = append(groups, b[:n]) + b = b[n:] + } + sort.Slice(groups, func(i, j int) bool { return bytes.Compare(groups[i], groups[j]) < 0 }) + return bytes.Join(groups, nil) +} |