aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/golang.org/x/mod
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-01-02 15:05:27 -0800
committerIan Lance Taylor <iant@golang.org>2020-01-21 23:53:22 -0800
commit5a8ea165926cb0737ab03bc48c18dc5198ab5305 (patch)
tree962dc3357c57f019f85658f99e2e753e30201c27 /libgo/go/golang.org/x/mod
parent6ac6529e155c9baa0aaaed7aca06bd38ebda5b43 (diff)
downloadgcc-5a8ea165926cb0737ab03bc48c18dc5198ab5305.zip
gcc-5a8ea165926cb0737ab03bc48c18dc5198ab5305.tar.gz
gcc-5a8ea165926cb0737ab03bc48c18dc5198ab5305.tar.bz2
libgo: update to Go1.14beta1
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/214297
Diffstat (limited to 'libgo/go/golang.org/x/mod')
-rw-r--r--libgo/go/golang.org/x/mod/LICENSE27
-rw-r--r--libgo/go/golang.org/x/mod/PATENTS22
-rw-r--r--libgo/go/golang.org/x/mod/internal/lazyregexp/lazyre.go78
-rw-r--r--libgo/go/golang.org/x/mod/modfile/print.go165
-rw-r--r--libgo/go/golang.org/x/mod/modfile/read.go909
-rw-r--r--libgo/go/golang.org/x/mod/modfile/rule.go776
-rw-r--r--libgo/go/golang.org/x/mod/module/module.go718
-rw-r--r--libgo/go/golang.org/x/mod/semver/semver.go388
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/cache.go59
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/client.go671
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/dirhash/hash.go132
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/note/note.go681
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/server.go181
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/test.go124
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/tlog/note.go135
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/tlog/tile.go435
-rw-r--r--libgo/go/golang.org/x/mod/sumdb/tlog/tlog.go598
-rw-r--r--libgo/go/golang.org/x/mod/zip/zip.go570
18 files changed, 6669 insertions, 0 deletions
diff --git a/libgo/go/golang.org/x/mod/LICENSE b/libgo/go/golang.org/x/mod/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/libgo/go/golang.org/x/mod/PATENTS b/libgo/go/golang.org/x/mod/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/libgo/go/golang.org/x/mod/internal/lazyregexp/lazyre.go b/libgo/go/golang.org/x/mod/internal/lazyregexp/lazyre.go
new file mode 100644
index 0000000..2681af3
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/internal/lazyregexp/lazyre.go
@@ -0,0 +1,78 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
+// regexp variables without forcing them to be compiled at init.
+package lazyregexp
+
+import (
+ "os"
+ "regexp"
+ "strings"
+ "sync"
+)
+
+// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
+// compiled the first time it is needed.
+type Regexp struct {
+ str string
+ once sync.Once
+ rx *regexp.Regexp
+}
+
+func (r *Regexp) re() *regexp.Regexp {
+ r.once.Do(r.build)
+ return r.rx
+}
+
+func (r *Regexp) build() {
+ r.rx = regexp.MustCompile(r.str)
+ r.str = ""
+}
+
+func (r *Regexp) FindSubmatch(s []byte) [][]byte {
+ return r.re().FindSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatch(s string) []string {
+ return r.re().FindStringSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatchIndex(s string) []int {
+ return r.re().FindStringSubmatchIndex(s)
+}
+
+func (r *Regexp) ReplaceAllString(src, repl string) string {
+ return r.re().ReplaceAllString(src, repl)
+}
+
+func (r *Regexp) FindString(s string) string {
+ return r.re().FindString(s)
+}
+
+func (r *Regexp) FindAllString(s string, n int) []string {
+ return r.re().FindAllString(s, n)
+}
+
+func (r *Regexp) MatchString(s string) bool {
+ return r.re().MatchString(s)
+}
+
+func (r *Regexp) SubexpNames() []string {
+ return r.re().SubexpNames()
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy regexp, delaying the compiling work until it is first
+// needed. If the code is being run as part of tests, the regexp compiling will
+// happen immediately.
+func New(str string) *Regexp {
+ lr := &Regexp{str: str}
+ if inTest {
+ // In tests, always compile the regexps early.
+ lr.re()
+ }
+ return lr
+}
diff --git a/libgo/go/golang.org/x/mod/modfile/print.go b/libgo/go/golang.org/x/mod/modfile/print.go
new file mode 100644
index 0000000..3bbea38
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/modfile/print.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Module file printer.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// Format returns a go.mod file as a byte slice, formatted in standard style.
+func Format(f *FileSyntax) []byte {
+ pr := &printer{}
+ pr.file(f)
+ return pr.Bytes()
+}
+
+// A printer collects the state during printing of a file or expression.
+type printer struct {
+ bytes.Buffer // output buffer
+ comment []Comment // pending end-of-line comments
+ margin int // left margin (indent), a number of tabs
+}
+
+// printf prints to the buffer.
+func (p *printer) printf(format string, args ...interface{}) {
+ fmt.Fprintf(p, format, args...)
+}
+
+// indent returns the position on the current line, in bytes, 0-indexed.
+func (p *printer) indent() int {
+ b := p.Bytes()
+ n := 0
+ for n < len(b) && b[len(b)-1-n] != '\n' {
+ n++
+ }
+ return n
+}
+
+// newline ends the current line, flushing end-of-line comments.
+func (p *printer) newline() {
+ if len(p.comment) > 0 {
+ p.printf(" ")
+ for i, com := range p.comment {
+ if i > 0 {
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ }
+ p.printf("%s", strings.TrimSpace(com.Token))
+ }
+ p.comment = p.comment[:0]
+ }
+
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+}
+
+// trim removes trailing spaces and tabs from the current line.
+func (p *printer) trim() {
+ // Remove trailing spaces and tabs from line we're about to end.
+ b := p.Bytes()
+ n := len(b)
+ for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
+ n--
+ }
+ p.Truncate(n)
+}
+
+// file formats the given file into the print buffer.
+func (p *printer) file(f *FileSyntax) {
+ for _, com := range f.Before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ for i, stmt := range f.Stmt {
+ switch x := stmt.(type) {
+ case *CommentBlock:
+ // comments already handled
+ p.expr(x)
+
+ default:
+ p.expr(x)
+ p.newline()
+ }
+
+ for _, com := range stmt.Comment().After {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ if i+1 < len(f.Stmt) {
+ p.newline()
+ }
+ }
+}
+
+func (p *printer) expr(x Expr) {
+ // Emit line-comments preceding this expression.
+ if before := x.Comment().Before; len(before) > 0 {
+ // Want to print a line comment.
+ // Line comments must be at the current margin.
+ p.trim()
+ if p.indent() > 0 {
+ // There's other text on the line. Start a new line.
+ p.printf("\n")
+ }
+ // Re-indent to margin.
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ for _, com := range before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+ }
+
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("printer: unexpected type %T", x))
+
+ case *CommentBlock:
+ // done
+
+ case *LParen:
+ p.printf("(")
+ case *RParen:
+ p.printf(")")
+
+ case *Line:
+ sep := ""
+ for _, tok := range x.Token {
+ p.printf("%s%s", sep, tok)
+ sep = " "
+ }
+
+ case *LineBlock:
+ for _, tok := range x.Token {
+ p.printf("%s ", tok)
+ }
+ p.expr(&x.LParen)
+ p.margin++
+ for _, l := range x.Line {
+ p.newline()
+ p.expr(l)
+ }
+ p.margin--
+ p.newline()
+ p.expr(&x.RParen)
+ }
+
+ // Queue end-of-line comments for printing when we
+ // reach the end of the line.
+ p.comment = append(p.comment, x.Comment().Suffix...)
+}
diff --git a/libgo/go/golang.org/x/mod/modfile/read.go b/libgo/go/golang.org/x/mod/modfile/read.go
new file mode 100644
index 0000000..616d00e
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/modfile/read.go
@@ -0,0 +1,909 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Module file parser.
+// This is a simplified copy of Google's buildifier parser.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Position describes an arbitrary source position in a file, including the
+// file, line, column, and byte offset.
+type Position struct {
+ Line int // line in input (starting at 1)
+ LineRune int // rune in line (starting at 1)
+ Byte int // byte in input (starting at 0)
+}
+
+// add returns the position at the end of s, assuming it starts at p.
+func (p Position) add(s string) Position {
+ p.Byte += len(s)
+ if n := strings.Count(s, "\n"); n > 0 {
+ p.Line += n
+ s = s[strings.LastIndex(s, "\n")+1:]
+ p.LineRune = 1
+ }
+ p.LineRune += utf8.RuneCountInString(s)
+ return p
+}
+
+// An Expr represents an input element.
+type Expr interface {
+ // Span returns the start and end position of the expression,
+ // excluding leading or trailing comments.
+ Span() (start, end Position)
+
+ // Comment returns the comments attached to the expression.
+ // This method would normally be named 'Comments' but that
+ // would interfere with embedding a type of the same name.
+ Comment() *Comments
+}
+
+// A Comment represents a single // comment.
+type Comment struct {
+ Start Position
+ Token string // without trailing newline
+ Suffix bool // an end of line (not whole line) comment
+}
+
+// Comments collects the comments associated with an expression.
+type Comments struct {
+ Before []Comment // whole-line comments before this expression
+ Suffix []Comment // end-of-line comments after this expression
+
+ // For top-level expressions only, After lists whole-line
+ // comments following the expression.
+ After []Comment
+}
+
+// Comment returns the receiver. This isn't useful by itself, but
+// a Comments struct is embedded into all the expression
+// implementation types, and this gives each of those a Comment
+// method to satisfy the Expr interface.
+func (c *Comments) Comment() *Comments {
+ return c
+}
+
+// A FileSyntax represents an entire go.mod file.
+type FileSyntax struct {
+ Name string // file path
+ Comments
+ Stmt []Expr
+}
+
+func (x *FileSyntax) Span() (start, end Position) {
+ if len(x.Stmt) == 0 {
+ return
+ }
+ start, _ = x.Stmt[0].Span()
+ _, end = x.Stmt[len(x.Stmt)-1].Span()
+ return start, end
+}
+
+// addLine adds a line containing the given tokens to the file.
+//
+// If the first token of the hint matches the first token of the
+// line, the new line is added at the end of the block containing hint,
+// extracting hint into a new block if it is not yet in one.
+//
+// If the hint is non-nil buts its first token does not match,
+// the new line is added after the block containing hint
+// (or hint itself, if not in a block).
+//
+// If no hint is provided, addLine appends the line to the end of
+// the last block with a matching first token,
+// or to the end of the file if no such block exists.
+func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line {
+ if hint == nil {
+ // If no hint given, add to the last statement of the given type.
+ Loop:
+ for i := len(x.Stmt) - 1; i >= 0; i-- {
+ stmt := x.Stmt[i]
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token != nil && stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ case *LineBlock:
+ if stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ }
+ }
+ }
+
+ newLineAfter := func(i int) *Line {
+ new := &Line{Token: tokens}
+ if i == len(x.Stmt) {
+ x.Stmt = append(x.Stmt, new)
+ } else {
+ x.Stmt = append(x.Stmt, nil)
+ copy(x.Stmt[i+2:], x.Stmt[i+1:])
+ x.Stmt[i+1] = new
+ }
+ return new
+ }
+
+ if hint != nil {
+ for i, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt == hint {
+ if stmt.Token == nil || stmt.Token[0] != tokens[0] {
+ return newLineAfter(i)
+ }
+
+ // Convert line to line block.
+ stmt.InBlock = true
+ block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}}
+ stmt.Token = stmt.Token[1:]
+ x.Stmt[i] = block
+ new := &Line{Token: tokens[1:], InBlock: true}
+ block.Line = append(block.Line, new)
+ return new
+ }
+
+ case *LineBlock:
+ if stmt == hint {
+ if stmt.Token[0] != tokens[0] {
+ return newLineAfter(i)
+ }
+
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line = append(stmt.Line, new)
+ return new
+ }
+
+ for j, line := range stmt.Line {
+ if line == hint {
+ if stmt.Token[0] != tokens[0] {
+ return newLineAfter(i)
+ }
+
+ // Add new line after hint within the block.
+ stmt.Line = append(stmt.Line, nil)
+ copy(stmt.Line[j+2:], stmt.Line[j+1:])
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line[j+1] = new
+ return new
+ }
+ }
+ }
+ }
+ }
+
+ new := &Line{Token: tokens}
+ x.Stmt = append(x.Stmt, new)
+ return new
+}
+
+func (x *FileSyntax) updateLine(line *Line, tokens ...string) {
+ if line.InBlock {
+ tokens = tokens[1:]
+ }
+ line.Token = tokens
+}
+
+func (x *FileSyntax) removeLine(line *Line) {
+ line.Token = nil
+}
+
+// Cleanup cleans up the file syntax x after any edit operations.
+// To avoid quadratic behavior, removeLine marks the line as dead
+// by setting line.Token = nil but does not remove it from the slice
+// in which it appears. After edits have all been indicated,
+// calling Cleanup cleans out the dead lines.
+func (x *FileSyntax) Cleanup() {
+ w := 0
+ for _, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token == nil {
+ continue
+ }
+ case *LineBlock:
+ ww := 0
+ for _, line := range stmt.Line {
+ if line.Token != nil {
+ stmt.Line[ww] = line
+ ww++
+ }
+ }
+ if ww == 0 {
+ continue
+ }
+ if ww == 1 {
+ // Collapse block into single line.
+ line := &Line{
+ Comments: Comments{
+ Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
+ Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
+ After: commentsAdd(stmt.Line[0].After, stmt.After),
+ },
+ Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
+ }
+ x.Stmt[w] = line
+ w++
+ continue
+ }
+ stmt.Line = stmt.Line[:ww]
+ }
+ x.Stmt[w] = stmt
+ w++
+ }
+ x.Stmt = x.Stmt[:w]
+}
+
+func commentsAdd(x, y []Comment) []Comment {
+ return append(x[:len(x):len(x)], y...)
+}
+
+func stringsAdd(x, y []string) []string {
+ return append(x[:len(x):len(x)], y...)
+}
+
+// A CommentBlock represents a top-level block of comments separate
+// from any rule.
+type CommentBlock struct {
+ Comments
+ Start Position
+}
+
+func (x *CommentBlock) Span() (start, end Position) {
+ return x.Start, x.Start
+}
+
+// A Line is a single line of tokens.
+type Line struct {
+ Comments
+ Start Position
+ Token []string
+ InBlock bool
+ End Position
+}
+
+func (x *Line) Span() (start, end Position) {
+ return x.Start, x.End
+}
+
+// A LineBlock is a factored block of lines, like
+//
+// require (
+// "x"
+// "y"
+// )
+//
+type LineBlock struct {
+ Comments
+ Start Position
+ LParen LParen
+ Token []string
+ Line []*Line
+ RParen RParen
+}
+
+func (x *LineBlock) Span() (start, end Position) {
+ return x.Start, x.RParen.Pos.add(")")
+}
+
+// An LParen represents the beginning of a parenthesized line block.
+// It is a place to store suffix comments.
+type LParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *LParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An RParen represents the end of a parenthesized line block.
+// It is a place to store whole-line (before) comments.
+type RParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *RParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An input represents a single input file being parsed.
+type input struct {
+ // Lexing state.
+ filename string // name of input file, for errors
+ complete []byte // entire input
+ remaining []byte // remaining input
+ token []byte // token being scanned
+ lastToken string // most recently returned token, for error messages
+ pos Position // current input position
+ comments []Comment // accumulated comments
+ endRule int // position of end of current rule
+
+ // Parser state.
+ file *FileSyntax // returned top-level syntax tree
+ parseError error // error encountered during parsing
+
+ // Comment assignment state.
+ pre []Expr // all expressions, in preorder traversal
+ post []Expr // all expressions, in postorder traversal
+}
+
+func newInput(filename string, data []byte) *input {
+ return &input{
+ filename: filename,
+ complete: data,
+ remaining: data,
+ pos: Position{Line: 1, LineRune: 1, Byte: 0},
+ }
+}
+
+// parse parses the input file.
+func parse(file string, data []byte) (f *FileSyntax, err error) {
+ in := newInput(file, data)
+ // The parser panics for both routine errors like syntax errors
+ // and for programmer bugs like array index errors.
+ // Turn both into error returns. Catching bug panics is
+ // especially important when processing many files.
+ defer func() {
+ if e := recover(); e != nil {
+ if e == in.parseError {
+ err = in.parseError
+ } else {
+ err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
+ }
+ }
+ }()
+
+ // Invoke the parser.
+ in.parseFile()
+ if in.parseError != nil {
+ return nil, in.parseError
+ }
+ in.file.Name = in.filename
+
+ // Assign comments to nearby syntax.
+ in.assignComments()
+
+ return in.file, nil
+}
+
+// Error is called to report an error.
+// The reason s is often "syntax error".
+// Error does not return: it panics.
+func (in *input) Error(s string) {
+ if s == "syntax error" && in.lastToken != "" {
+ s += " near " + in.lastToken
+ }
+ in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
+ panic(in.parseError)
+}
+
+// eof reports whether the input has reached end of file.
+func (in *input) eof() bool {
+ return len(in.remaining) == 0
+}
+
+// peekRune returns the next rune in the input without consuming it.
+func (in *input) peekRune() int {
+ if len(in.remaining) == 0 {
+ return 0
+ }
+ r, _ := utf8.DecodeRune(in.remaining)
+ return int(r)
+}
+
+// peekPrefix reports whether the remaining input begins with the given prefix.
+func (in *input) peekPrefix(prefix string) bool {
+ // This is like bytes.HasPrefix(in.remaining, []byte(prefix))
+ // but without the allocation of the []byte copy of prefix.
+ for i := 0; i < len(prefix); i++ {
+ if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// readRune consumes and returns the next rune in the input.
+func (in *input) readRune() int {
+ if len(in.remaining) == 0 {
+ in.Error("internal lexer error: readRune at EOF")
+ }
+ r, size := utf8.DecodeRune(in.remaining)
+ in.remaining = in.remaining[size:]
+ if r == '\n' {
+ in.pos.Line++
+ in.pos.LineRune = 1
+ } else {
+ in.pos.LineRune++
+ }
+ in.pos.Byte += size
+ return int(r)
+}
+
+type symType struct {
+ pos Position
+ endPos Position
+ text string
+}
+
+// startToken marks the beginning of the next input token.
+// It must be followed by a call to endToken, once the token has
+// been consumed using readRune.
+func (in *input) startToken(sym *symType) {
+ in.token = in.remaining
+ sym.text = ""
+ sym.pos = in.pos
+}
+
+// endToken marks the end of an input token.
+// It records the actual token string in sym.text if the caller
+// has not done that already.
+func (in *input) endToken(sym *symType) {
+ if sym.text == "" {
+ tok := string(in.token[:len(in.token)-len(in.remaining)])
+ sym.text = tok
+ in.lastToken = sym.text
+ }
+ sym.endPos = in.pos
+}
+
+// lex is called from the parser to obtain the next input token.
+// It returns the token value (either a rune like '+' or a symbolic token _FOR)
+// and sets val to the data associated with the token.
+// For all our input tokens, the associated data is
+// val.Pos (the position where the token begins)
+// and val.Token (the input string corresponding to the token).
+func (in *input) lex(sym *symType) int {
+ // Skip past spaces, stopping at non-space or EOF.
+ countNL := 0 // number of newlines we've skipped past
+ for !in.eof() {
+ // Skip over spaces. Count newlines so we can give the parser
+ // information about where top-level blank lines are,
+ // for top-level comment assignment.
+ c := in.peekRune()
+ if c == ' ' || c == '\t' || c == '\r' {
+ in.readRune()
+ continue
+ }
+
+ // Comment runs to end of line.
+ if in.peekPrefix("//") {
+ in.startToken(sym)
+
+ // Is this comment the only thing on its line?
+ // Find the last \n before this // and see if it's all
+ // spaces from there to here.
+ i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
+ suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
+ in.readRune()
+ in.readRune()
+
+ // Consume comment.
+ for len(in.remaining) > 0 && in.readRune() != '\n' {
+ }
+ in.endToken(sym)
+
+ sym.text = strings.TrimRight(sym.text, "\n")
+ in.lastToken = "comment"
+
+ // If we are at top level (not in a statement), hand the comment to
+ // the parser as a _COMMENT token. The grammar is written
+ // to handle top-level comments itself.
+ if !suffix {
+ // Not in a statement. Tell parser about top-level comment.
+ return _COMMENT
+ }
+
+ // Otherwise, save comment for later attachment to syntax tree.
+ if countNL > 1 {
+ in.comments = append(in.comments, Comment{sym.pos, "", false})
+ }
+ in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix})
+ countNL = 1
+ return _EOL
+ }
+
+ if in.peekPrefix("/*") {
+ in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
+ }
+
+ // Found non-space non-comment.
+ break
+ }
+
+ // Found the beginning of the next token.
+ in.startToken(sym)
+ defer in.endToken(sym)
+
+ // End of file.
+ if in.eof() {
+ in.lastToken = "EOF"
+ return _EOF
+ }
+
+ // Punctuation tokens.
+ switch c := in.peekRune(); c {
+ case '\n':
+ in.readRune()
+ return c
+
+ case '(':
+ in.readRune()
+ return c
+
+ case ')':
+ in.readRune()
+ return c
+
+ case '"', '`': // quoted string
+ quote := c
+ in.readRune()
+ for {
+ if in.eof() {
+ in.pos = sym.pos
+ in.Error("unexpected EOF in string")
+ }
+ if in.peekRune() == '\n' {
+ in.Error("unexpected newline in string")
+ }
+ c := in.readRune()
+ if c == quote {
+ break
+ }
+ if c == '\\' && quote != '`' {
+ if in.eof() {
+ in.pos = sym.pos
+ in.Error("unexpected EOF in string")
+ }
+ in.readRune()
+ }
+ }
+ in.endToken(sym)
+ return _STRING
+ }
+
+ // Checked all punctuation. Must be identifier token.
+ if c := in.peekRune(); !isIdent(c) {
+ in.Error(fmt.Sprintf("unexpected input character %#q", c))
+ }
+
+ // Scan over identifier.
+ for isIdent(in.peekRune()) {
+ if in.peekPrefix("//") {
+ break
+ }
+ if in.peekPrefix("/*") {
+ in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
+ }
+ in.readRune()
+ }
+ return _IDENT
+}
+
+// isIdent reports whether c is an identifier rune.
+// We treat nearly all runes as identifier runes.
+func isIdent(c int) bool {
+ return c != 0 && !unicode.IsSpace(rune(c))
+}
+
+// Comment assignment.
+// We build two lists of all subexpressions, preorder and postorder.
+// The preorder list is ordered by start location, with outer expressions first.
+// The postorder list is ordered by end location, with outer expressions last.
+// We use the preorder list to assign each whole-line comment to the syntax
+// immediately following it, and we use the postorder list to assign each
+// end-of-line comment to the syntax immediately preceding it.
+
+// order walks the expression adding it and its subexpressions to the
+// preorder and postorder lists.
+func (in *input) order(x Expr) {
+ if x != nil {
+ in.pre = append(in.pre, x)
+ }
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("order: unexpected type %T", x))
+ case nil:
+ // nothing
+ case *LParen, *RParen:
+ // nothing
+ case *CommentBlock:
+ // nothing
+ case *Line:
+ // nothing
+ case *FileSyntax:
+ for _, stmt := range x.Stmt {
+ in.order(stmt)
+ }
+ case *LineBlock:
+ in.order(&x.LParen)
+ for _, l := range x.Line {
+ in.order(l)
+ }
+ in.order(&x.RParen)
+ }
+ if x != nil {
+ in.post = append(in.post, x)
+ }
+}
+
+// assignComments attaches comments to nearby syntax.
+func (in *input) assignComments() {
+ const debug = false
+
+ // Generate preorder and postorder lists.
+ in.order(in.file)
+
+ // Split into whole-line comments and suffix comments.
+ var line, suffix []Comment
+ for _, com := range in.comments {
+ if com.Suffix {
+ suffix = append(suffix, com)
+ } else {
+ line = append(line, com)
+ }
+ }
+
+ if debug {
+ for _, c := range line {
+ fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign line comments to syntax immediately following.
+ for _, x := range in.pre {
+ start, _ := x.Span()
+ if debug {
+ fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
+ }
+ xcom := x.Comment()
+ for len(line) > 0 && start.Byte >= line[0].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
+ }
+ xcom.Before = append(xcom.Before, line[0])
+ line = line[1:]
+ }
+ }
+
+ // Remaining line comments go at end of file.
+ in.file.After = append(in.file.After, line...)
+
+ if debug {
+ for _, c := range suffix {
+ fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign suffix comments to syntax immediately before.
+ for i := len(in.post) - 1; i >= 0; i-- {
+ x := in.post[i]
+
+ start, end := x.Span()
+ if debug {
+ fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
+ }
+
+ // Do not assign suffix comments to end of line block or whole file.
+ // Instead assign them to the last element inside.
+ switch x.(type) {
+ case *FileSyntax:
+ continue
+ }
+
+ // Do not assign suffix comments to something that starts
+ // on an earlier line, so that in
+ //
+ // x ( y
+ // z ) // comment
+ //
+ // we assign the comment to z and not to x ( ... ).
+ if start.Line != end.Line {
+ continue
+ }
+ xcom := x.Comment()
+ for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
+ }
+ xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
+ suffix = suffix[:len(suffix)-1]
+ }
+ }
+
+ // We assigned suffix comments in reverse.
+ // If multiple suffix comments were appended to the same
+ // expression node, they are now in reverse. Fix that.
+ for _, x := range in.post {
+ reverseComments(x.Comment().Suffix)
+ }
+
+ // Remaining suffix comments go at beginning of file.
+ in.file.Before = append(in.file.Before, suffix...)
+}
+
+// reverseComments reverses the []Comment list.
+func reverseComments(list []Comment) {
+ for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+ list[i], list[j] = list[j], list[i]
+ }
+}
+
+func (in *input) parseFile() {
+ in.file = new(FileSyntax)
+ var sym symType
+ var cb *CommentBlock
+ for {
+ tok := in.lex(&sym)
+ switch tok {
+ case '\n':
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ cb = nil
+ }
+ case _COMMENT:
+ if cb == nil {
+ cb = &CommentBlock{Start: sym.pos}
+ }
+ com := cb.Comment()
+ com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text})
+ case _EOF:
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ }
+ return
+ default:
+ in.parseStmt(&sym)
+ if cb != nil {
+ in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
+ cb = nil
+ }
+ }
+ }
+}
+
+func (in *input) parseStmt(sym *symType) {
+ start := sym.pos
+ end := sym.endPos
+ token := []string{sym.text}
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case '\n', _EOF, _EOL:
+ in.file.Stmt = append(in.file.Stmt, &Line{
+ Start: start,
+ Token: token,
+ End: end,
+ })
+ return
+ case '(':
+ in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym))
+ return
+ default:
+ token = append(token, sym.text)
+ end = sym.endPos
+ }
+ }
+}
+
+func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock {
+ x := &LineBlock{
+ Start: start,
+ Token: token,
+ LParen: LParen{Pos: sym.pos},
+ }
+ var comments []Comment
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case _EOL:
+ // ignore
+ case '\n':
+ if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
+ comments = append(comments, Comment{})
+ }
+ case _COMMENT:
+ comments = append(comments, Comment{Start: sym.pos, Token: sym.text})
+ case _EOF:
+ in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
+ case ')':
+ x.RParen.Before = comments
+ x.RParen.Pos = sym.pos
+ tok = in.lex(sym)
+ if tok != '\n' && tok != _EOF && tok != _EOL {
+ in.Error("syntax error (expected newline after closing paren)")
+ }
+ return x
+ default:
+ l := in.parseLine(sym)
+ x.Line = append(x.Line, l)
+ l.Comment().Before = comments
+ comments = nil
+ }
+ }
+}
+
+func (in *input) parseLine(sym *symType) *Line {
+ start := sym.pos
+ end := sym.endPos
+ token := []string{sym.text}
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case '\n', _EOF, _EOL:
+ return &Line{
+ Start: start,
+ Token: token,
+ End: end,
+ InBlock: true,
+ }
+ default:
+ token = append(token, sym.text)
+ end = sym.endPos
+ }
+ }
+}
+
+const (
+ _EOF = -(1 + iota)
+ _EOL
+ _IDENT
+ _STRING
+ _COMMENT
+)
+
+var (
+ slashSlash = []byte("//")
+ moduleStr = []byte("module")
+)
+
+// ModulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+func ModulePath(mod []byte) string {
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+ return "" // missing module path
+}
diff --git a/libgo/go/golang.org/x/mod/modfile/rule.go b/libgo/go/golang.org/x/mod/modfile/rule.go
new file mode 100644
index 0000000..62af068
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/modfile/rule.go
@@ -0,0 +1,776 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "golang.org/x/mod/internal/lazyregexp"
+ "golang.org/x/mod/module"
+)
+
+// A File is the parsed, interpreted form of a go.mod file.
+type File struct {
+ Module *Module
+ Go *Go
+ Require []*Require
+ Exclude []*Exclude
+ Replace []*Replace
+
+ Syntax *FileSyntax
+}
+
+// A Module is the module statement.
+type Module struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Go is the go statement.
+type Go struct {
+ Version string // "1.23"
+ Syntax *Line
+}
+
+// A Require is a single require statement.
+type Require struct {
+ Mod module.Version
+ Indirect bool // has "// indirect" comment
+ Syntax *Line
+}
+
+// An Exclude is a single exclude statement.
+type Exclude struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Replace is a single replace statement.
+type Replace struct {
+ Old module.Version
+ New module.Version
+ Syntax *Line
+}
+
+func (f *File) AddModuleStmt(path string) error {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ if f.Module == nil {
+ f.Module = &Module{
+ Mod: module.Version{Path: path},
+ Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)),
+ }
+ } else {
+ f.Module.Mod.Path = path
+ f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path))
+ }
+ return nil
+}
+
+func (f *File) AddComment(text string) {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{
+ Comments: Comments{
+ Before: []Comment{
+ {
+ Token: text,
+ },
+ },
+ },
+ })
+}
+
+type VersionFixer func(path, version string) (string, error)
+
+// Parse parses the data, reported in errors as being from file,
+// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found.
+func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, true)
+}
+
+// ParseLax is like Parse but ignores unknown statements.
+// It is used when parsing go.mod files other than the main module,
+// under the theory that most statement types we add in the future will
+// only apply in the main module, like exclude and replace,
+// and so we get better gradual deployments if old go commands
+// simply ignore those statements when found in go.mod files
+// in dependencies.
+func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, false)
+}
+
+func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) {
+ fs, err := parse(file, data)
+ if err != nil {
+ return nil, err
+ }
+ f := &File{
+ Syntax: fs,
+ }
+
+ var errs bytes.Buffer
+ for _, x := range fs.Stmt {
+ switch x := x.(type) {
+ case *Line:
+ f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict)
+
+ case *LineBlock:
+ if len(x.Token) > 1 {
+ if strict {
+ fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
+ }
+ continue
+ }
+ switch x.Token[0] {
+ default:
+ if strict {
+ fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
+ }
+ continue
+ case "module", "require", "exclude", "replace":
+ for _, l := range x.Line {
+ f.add(&errs, l, x.Token[0], l.Token, fix, strict)
+ }
+ }
+ }
+ }
+
+ if errs.Len() > 0 {
+ return nil, errors.New(strings.TrimRight(errs.String(), "\n"))
+ }
+ return f, nil
+}
+
+var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
+
+func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
+ // If strict is false, this module is a dependency.
+ // We ignore all unknown directives as well as main-module-only
+ // directives like replace and exclude. It will work better for
+ // forward compatibility if we can depend on modules that have unknown
+ // statements (presumed relevant only when acting as the main module)
+ // and simply ignore those statements.
+ if !strict {
+ switch verb {
+ case "module", "require", "go":
+ // want these even for dependency go.mods
+ default:
+ return
+ }
+ }
+
+ switch verb {
+ default:
+ fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb)
+
+ case "go":
+ if f.Go != nil {
+ fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ if len(args) != 1 || !GoVersionRE.MatchString(args[0]) {
+ fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ f.Go = &Go{Syntax: line}
+ f.Go.Version = args[0]
+ case "module":
+ if f.Module != nil {
+ fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ f.Module = &Module{Syntax: line}
+ if len(args) != 1 {
+
+ fmt.Fprintf(errs, "%s:%d: usage: module module/path\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ f.Module.Mod = module.Version{Path: s}
+ case "require", "exclude":
+ if len(args) != 2 {
+ fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ v, err := parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
+ return
+ }
+ if verb == "require" {
+ f.Require = append(f.Require, &Require{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ Indirect: isIndirect(line),
+ })
+ } else {
+ f.Exclude = append(f.Exclude, &Exclude{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ })
+ }
+ case "replace":
+ arrow := 2
+ if len(args) >= 2 && args[1] == "=>" {
+ arrow = 1
+ }
+ if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
+ fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ var v string
+ if arrow == 2 {
+ v, err = parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
+ return
+ }
+ }
+ ns, err := parseString(&args[arrow+1])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ nv := ""
+ if len(args) == arrow+2 {
+ if !IsDirectoryPath(ns) {
+ fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ if filepath.Separator == '/' && strings.Contains(ns, `\`) {
+ fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ }
+ if len(args) == arrow+3 {
+ nv, err = parseVersion(verb, ns, &args[arrow+2], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if IsDirectoryPath(ns) {
+ fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns)
+ return
+ }
+ }
+ f.Replace = append(f.Replace, &Replace{
+ Old: module.Version{Path: s, Version: v},
+ New: module.Version{Path: ns, Version: nv},
+ Syntax: line,
+ })
+ }
+}
+
+// isIndirect reports whether line has a "// indirect" comment,
+// meaning it is in go.mod only for its effect on indirect dependencies,
+// so that it can be dropped entirely once the effective version of the
+// indirect dependency reaches the given minimum version.
+func isIndirect(line *Line) bool {
+ if len(line.Suffix) == 0 {
+ return false
+ }
+ f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash)))
+ return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;")
+}
+
+// setIndirect sets line to have (or not have) a "// indirect" comment.
+func setIndirect(line *Line, indirect bool) {
+ if isIndirect(line) == indirect {
+ return
+ }
+ if indirect {
+ // Adding comment.
+ if len(line.Suffix) == 0 {
+ // New comment.
+ line.Suffix = []Comment{{Token: "// indirect", Suffix: true}}
+ return
+ }
+
+ com := &line.Suffix[0]
+ text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash)))
+ if text == "" {
+ // Empty comment.
+ com.Token = "// indirect"
+ return
+ }
+
+ // Insert at beginning of existing comment.
+ com.Token = "// indirect; " + text
+ return
+ }
+
+ // Removing comment.
+ f := strings.Fields(line.Suffix[0].Token)
+ if len(f) == 2 {
+ // Remove whole comment.
+ line.Suffix = nil
+ return
+ }
+
+ // Remove comment prefix.
+ com := &line.Suffix[0]
+ i := strings.Index(com.Token, "indirect;")
+ com.Token = "//" + com.Token[i+len("indirect;"):]
+}
+
+// IsDirectoryPath reports whether the given path should be interpreted
+// as a directory path. Just like on the go command line, relative paths
+// and rooted paths are directory paths; the rest are module paths.
+func IsDirectoryPath(ns string) bool {
+ // Because go.mod files can move from one system to another,
+ // we check all known path syntaxes, both Unix and Windows.
+ return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
+ strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
+ len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
+}
+
+// MustQuote reports whether s must be quoted in order to appear as
+// a single token in a go.mod line.
+func MustQuote(s string) bool {
+ for _, r := range s {
+ if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' {
+ return true
+ }
+ }
+ return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*")
+}
+
+// AutoQuote returns s or, if quoting is required for s to appear in a go.mod,
+// the quotation of s.
+func AutoQuote(s string) string {
+ if MustQuote(s) {
+ return strconv.Quote(s)
+ }
+ return s
+}
+
+func parseString(s *string) (string, error) {
+ t := *s
+ if strings.HasPrefix(t, `"`) {
+ var err error
+ if t, err = strconv.Unquote(t); err != nil {
+ return "", err
+ }
+ } else if strings.ContainsAny(t, "\"'`") {
+ // Other quotes are reserved both for possible future expansion
+ // and to avoid confusion. For example if someone types 'x'
+ // we want that to be a syntax error and not a literal x in literal quotation marks.
+ return "", fmt.Errorf("unquoted string cannot contain quote")
+ }
+ *s = AutoQuote(t)
+ return t, nil
+}
+
+type Error struct {
+ Verb string
+ ModPath string
+ Err error
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s %s: %v", e.Verb, e.ModPath, e.Err)
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+
+func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) {
+ t, err := parseString(s)
+ if err != nil {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: &module.InvalidVersionError{
+ Version: *s,
+ Err: err,
+ },
+ }
+ }
+ if fix != nil {
+ var err error
+ t, err = fix(path, t)
+ if err != nil {
+ if err, ok := err.(*module.ModuleError); ok {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: err.Err,
+ }
+ }
+ return "", err
+ }
+ }
+ if v := module.CanonicalVersion(t); v != "" {
+ *s = v
+ return *s, nil
+ }
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: &module.InvalidVersionError{
+ Version: t,
+ Err: errors.New("must be of the form v1.2.3"),
+ },
+ }
+}
+
+func modulePathMajor(path string) (string, error) {
+ _, major, ok := module.SplitPathVersion(path)
+ if !ok {
+ return "", fmt.Errorf("invalid module path")
+ }
+ return major, nil
+}
+
+func (f *File) Format() ([]byte, error) {
+ return Format(f.Syntax), nil
+}
+
+// Cleanup cleans up the file f after any edit operations.
+// To avoid quadratic behavior, modifications like DropRequire
+// clear the entry but do not remove it from the slice.
+// Cleanup cleans out all the cleared entries.
+func (f *File) Cleanup() {
+ w := 0
+ for _, r := range f.Require {
+ if r.Mod.Path != "" {
+ f.Require[w] = r
+ w++
+ }
+ }
+ f.Require = f.Require[:w]
+
+ w = 0
+ for _, x := range f.Exclude {
+ if x.Mod.Path != "" {
+ f.Exclude[w] = x
+ w++
+ }
+ }
+ f.Exclude = f.Exclude[:w]
+
+ w = 0
+ for _, r := range f.Replace {
+ if r.Old.Path != "" {
+ f.Replace[w] = r
+ w++
+ }
+ }
+ f.Replace = f.Replace[:w]
+
+ f.Syntax.Cleanup()
+}
+
+func (f *File) AddGoStmt(version string) error {
+ if !GoVersionRE.MatchString(version) {
+ return fmt.Errorf("invalid language version string %q", version)
+ }
+ if f.Go == nil {
+ var hint Expr
+ if f.Module != nil && f.Module.Syntax != nil {
+ hint = f.Module.Syntax
+ }
+ f.Go = &Go{
+ Version: version,
+ Syntax: f.Syntax.addLine(hint, "go", version),
+ }
+ } else {
+ f.Go.Version = version
+ f.Syntax.updateLine(f.Go.Syntax, "go", version)
+ }
+ return nil
+}
+
+func (f *File) AddRequire(path, vers string) error {
+ need := true
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ if need {
+ r.Mod.Version = vers
+ f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers)
+ need = false
+ } else {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Require{}
+ }
+ }
+ }
+
+ if need {
+ f.AddNewRequire(path, vers, false)
+ }
+ return nil
+}
+
+func (f *File) AddNewRequire(path, vers string, indirect bool) {
+ line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers)
+ setIndirect(line, indirect)
+ f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line})
+}
+
+func (f *File) SetRequire(req []*Require) {
+ need := make(map[string]string)
+ indirect := make(map[string]bool)
+ for _, r := range req {
+ need[r.Mod.Path] = r.Mod.Version
+ indirect[r.Mod.Path] = r.Indirect
+ }
+
+ for _, r := range f.Require {
+ if v, ok := need[r.Mod.Path]; ok {
+ r.Mod.Version = v
+ r.Indirect = indirect[r.Mod.Path]
+ } else {
+ *r = Require{}
+ }
+ }
+
+ var newStmts []Expr
+ for _, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *LineBlock:
+ if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
+ var newLines []*Line
+ for _, line := range stmt.Line {
+ if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" {
+ if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 {
+ line.Comments.Before = line.Comments.Before[:0]
+ }
+ line.Token[1] = need[p]
+ delete(need, p)
+ setIndirect(line, indirect[p])
+ newLines = append(newLines, line)
+ }
+ }
+ if len(newLines) == 0 {
+ continue // drop stmt
+ }
+ stmt.Line = newLines
+ }
+
+ case *Line:
+ if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
+ if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" {
+ stmt.Token[2] = need[p]
+ delete(need, p)
+ setIndirect(stmt, indirect[p])
+ } else {
+ continue // drop stmt
+ }
+ }
+ }
+ newStmts = append(newStmts, stmt)
+ }
+ f.Syntax.Stmt = newStmts
+
+ for path, vers := range need {
+ f.AddNewRequire(path, vers, indirect[path])
+ }
+ f.SortBlocks()
+}
+
+func (f *File) DropRequire(path string) error {
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Require{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddExclude(path, vers string) error {
+ var hint *Line
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ return nil
+ }
+ if x.Mod.Path == path {
+ hint = x.Syntax
+ }
+ }
+
+ f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)})
+ return nil
+}
+
+func (f *File) DropExclude(path, vers string) error {
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ f.Syntax.removeLine(x.Syntax)
+ *x = Exclude{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ need := true
+ old := module.Version{Path: oldPath, Version: oldVers}
+ new := module.Version{Path: newPath, Version: newVers}
+ tokens := []string{"replace", AutoQuote(oldPath)}
+ if oldVers != "" {
+ tokens = append(tokens, oldVers)
+ }
+ tokens = append(tokens, "=>", AutoQuote(newPath))
+ if newVers != "" {
+ tokens = append(tokens, newVers)
+ }
+
+ var hint *Line
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) {
+ if need {
+ // Found replacement for old; update to use new.
+ r.New = new
+ f.Syntax.updateLine(r.Syntax, tokens...)
+ need = false
+ continue
+ }
+ // Already added; delete other replacements for same.
+ f.Syntax.removeLine(r.Syntax)
+ *r = Replace{}
+ }
+ if r.Old.Path == oldPath {
+ hint = r.Syntax
+ }
+ }
+ if need {
+ f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)})
+ }
+ return nil
+}
+
+func (f *File) DropReplace(oldPath, oldVers string) error {
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && r.Old.Version == oldVers {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Replace{}
+ }
+ }
+ return nil
+}
+
+func (f *File) SortBlocks() {
+ f.removeDups() // otherwise sorting is unsafe
+
+ for _, stmt := range f.Syntax.Stmt {
+ block, ok := stmt.(*LineBlock)
+ if !ok {
+ continue
+ }
+ sort.Slice(block.Line, func(i, j int) bool {
+ li := block.Line[i]
+ lj := block.Line[j]
+ for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
+ if li.Token[k] != lj.Token[k] {
+ return li.Token[k] < lj.Token[k]
+ }
+ }
+ return len(li.Token) < len(lj.Token)
+ })
+ }
+}
+
+func (f *File) removeDups() {
+ have := make(map[module.Version]bool)
+ kill := make(map[*Line]bool)
+ for _, x := range f.Exclude {
+ if have[x.Mod] {
+ kill[x.Syntax] = true
+ continue
+ }
+ have[x.Mod] = true
+ }
+ var excl []*Exclude
+ for _, x := range f.Exclude {
+ if !kill[x.Syntax] {
+ excl = append(excl, x)
+ }
+ }
+ f.Exclude = excl
+
+ have = make(map[module.Version]bool)
+ // Later replacements take priority over earlier ones.
+ for i := len(f.Replace) - 1; i >= 0; i-- {
+ x := f.Replace[i]
+ if have[x.Old] {
+ kill[x.Syntax] = true
+ continue
+ }
+ have[x.Old] = true
+ }
+ var repl []*Replace
+ for _, x := range f.Replace {
+ if !kill[x.Syntax] {
+ repl = append(repl, x)
+ }
+ }
+ f.Replace = repl
+
+ var stmts []Expr
+ for _, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if kill[stmt] {
+ continue
+ }
+ case *LineBlock:
+ var lines []*Line
+ for _, line := range stmt.Line {
+ if !kill[line] {
+ lines = append(lines, line)
+ }
+ }
+ stmt.Line = lines
+ if len(lines) == 0 {
+ continue
+ }
+ }
+ stmts = append(stmts, stmt)
+ }
+ f.Syntax.Stmt = stmts
+}
diff --git a/libgo/go/golang.org/x/mod/module/module.go b/libgo/go/golang.org/x/mod/module/module.go
new file mode 100644
index 0000000..6cd3728
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/module/module.go
@@ -0,0 +1,718 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package module defines the module.Version type along with support code.
+//
+// The module.Version type is a simple Path, Version pair:
+//
+// type Version struct {
+// Path string
+// Version string
+// }
+//
+// There are no restrictions imposed directly by use of this structure,
+// but additional checking functions, most notably Check, verify that
+// a particular path, version pair is valid.
+//
+// Escaped Paths
+//
+// Module paths appear as substrings of file system paths
+// (in the download cache) and of web server URLs in the proxy protocol.
+// In general we cannot rely on file systems to be case-sensitive,
+// nor can we rely on web servers, since they read from file systems.
+// That is, we cannot rely on the file system to keep rsc.io/QUOTE
+// and rsc.io/quote separate. Windows and macOS don't.
+// Instead, we must never require two different casings of a file path.
+// Because we want the download cache to match the proxy protocol,
+// and because we want the proxy protocol to be possible to serve
+// from a tree of static files (which might be stored on a case-insensitive
+// file system), the proxy protocol must never require two different casings
+// of a URL path either.
+//
+// One possibility would be to make the escaped form be the lowercase
+// hexadecimal encoding of the actual path bytes. This would avoid ever
+// needing different casings of a file path, but it would be fairly illegible
+// to most programmers when those paths appeared in the file system
+// (including in file paths in compiler errors and stack traces)
+// in web server logs, and so on. Instead, we want a safe escaped form that
+// leaves most paths unaltered.
+//
+// The safe escaped form is to replace every uppercase letter
+// with an exclamation mark followed by the letter's lowercase equivalent.
+//
+// For example,
+//
+// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
+// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
+// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
+//
+// Import paths that avoid upper-case letters are left unchanged.
+// Note that because import paths are ASCII-only and avoid various
+// problematic punctuation (like : < and >), the escaped form is also ASCII-only
+// and avoids the same problematic punctuation.
+//
+// Import paths have never allowed exclamation marks, so there is no
+// need to define how to escape a literal !.
+//
+// Unicode Restrictions
+//
+// Today, paths are disallowed from using Unicode.
+//
+// Although paths are currently disallowed from using Unicode,
+// we would like at some point to allow Unicode letters as well, to assume that
+// file systems and URLs are Unicode-safe (storing UTF-8), and apply
+// the !-for-uppercase convention for escaping them in the file system.
+// But there are at least two subtle considerations.
+//
+// First, note that not all case-fold equivalent distinct runes
+// form an upper/lower pair.
+// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
+// are three distinct runes that case-fold to each other.
+// When we do add Unicode letters, we must not assume that upper/lower
+// are the only case-equivalent pairs.
+// Perhaps the Kelvin symbol would be disallowed entirely, for example.
+// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
+//
+// Second, it would be nice to allow Unicode marks as well as letters,
+// but marks include combining marks, and then we must deal not
+// only with case folding but also normalization: both U+00E9 ('é')
+// and U+0065 U+0301 ('e' followed by combining acute accent)
+// look the same on the page and are treated by some file systems
+// as the same path. If we do allow Unicode marks in paths, there
+// must be some kind of normalization to allow only one canonical
+// encoding of any character used in an import path.
+package module
+
+// IMPORTANT NOTE
+//
+// This file essentially defines the set of valid import paths for the go command.
+// There are many subtle considerations, including Unicode ambiguity,
+// security, network, and file system representations.
+//
+// This file also defines the set of valid module path and version combinations,
+// another topic with many subtle considerations.
+//
+// Changes to the semantics in this file require approval from rsc.
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/mod/semver"
+ errors "golang.org/x/xerrors"
+)
+
+// A Version (for clients, a module.Version) is defined by a module path and version pair.
+// These are stored in their plain (unescaped) form.
+type Version struct {
+ // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
+ Path string
+
+ // Version is usually a semantic version in canonical form.
+ // There are three exceptions to this general rule.
+ // First, the top-level target of a build has no specific version
+ // and uses Version = "".
+ // Second, during MVS calculations the version "none" is used
+ // to represent the decision to take no version of a given module.
+ // Third, filesystem paths found in "replace" directives are
+ // represented by a path with an empty version.
+ Version string `json:",omitempty"`
+}
+
+// String returns a representation of the Version suitable for logging
+// (Path@Version, or just Path if Version is empty).
+func (m Version) String() string {
+ if m.Version == "" {
+ return m.Path
+ }
+ return m.Path + "@" + m.Version
+}
+
+// A ModuleError indicates an error specific to a module.
+type ModuleError struct {
+ Path string
+ Version string
+ Err error
+}
+
+// VersionError returns a ModuleError derived from a Version and error,
+// or err itself if it is already such an error.
+func VersionError(v Version, err error) error {
+ var mErr *ModuleError
+ if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
+ return err
+ }
+ return &ModuleError{
+ Path: v.Path,
+ Version: v.Version,
+ Err: err,
+ }
+}
+
+func (e *ModuleError) Error() string {
+ if v, ok := e.Err.(*InvalidVersionError); ok {
+ return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
+ }
+ if e.Version != "" {
+ return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
+ }
+ return fmt.Sprintf("module %s: %v", e.Path, e.Err)
+}
+
+func (e *ModuleError) Unwrap() error { return e.Err }
+
+// An InvalidVersionError indicates an error specific to a version, with the
+// module path unknown or specified externally.
+//
+// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
+// must not wrap a ModuleError.
+type InvalidVersionError struct {
+ Version string
+ Pseudo bool
+ Err error
+}
+
+// noun returns either "version" or "pseudo-version", depending on whether
+// e.Version is a pseudo-version.
+func (e *InvalidVersionError) noun() string {
+ if e.Pseudo {
+ return "pseudo-version"
+ }
+ return "version"
+}
+
+func (e *InvalidVersionError) Error() string {
+ return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
+}
+
+func (e *InvalidVersionError) Unwrap() error { return e.Err }
+
+// Check checks that a given module path, version pair is valid.
+// In addition to the path being a valid module path
+// and the version being a valid semantic version,
+// the two must correspond.
+// For example, the path "yaml/v2" only corresponds to
+// semantic versions beginning with "v2.".
+func Check(path, version string) error {
+ if err := CheckPath(path); err != nil {
+ return err
+ }
+ if !semver.IsValid(version) {
+ return &ModuleError{
+ Path: path,
+ Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
+ }
+ }
+ _, pathMajor, _ := SplitPathVersion(path)
+ if err := CheckPathMajor(version, pathMajor); err != nil {
+ return &ModuleError{Path: path, Err: err}
+ }
+ return nil
+}
+
+// firstPathOK reports whether r can appear in the first element of a module path.
+// The first element of the path must be an LDH domain name, at least for now.
+// To avoid case ambiguity, the domain name must be entirely lower case.
+func firstPathOK(r rune) bool {
+ return r == '-' || r == '.' ||
+ '0' <= r && r <= '9' ||
+ 'a' <= r && r <= 'z'
+}
+
+// pathOK reports whether r can appear in an import path element.
+// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
+// This matches what "go get" has historically recognized in import paths.
+// TODO(rsc): We would like to allow Unicode letters, but that requires additional
+// care in the safe encoding (see "escaped paths" above).
+func pathOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
+ '0' <= r && r <= '9' ||
+ 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z'
+ }
+ return false
+}
+
+// fileNameOK reports whether r can appear in a file name.
+// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
+// If we expand the set of allowed characters here, we have to
+// work harder at detecting potential case-folding and normalization collisions.
+// See note about "escaped paths" above.
+func fileNameOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ // Entire set of ASCII punctuation, from which we remove characters:
+ // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
+ // We disallow some shell special characters: " ' * < > ? ` |
+ // (Note that some of those are disallowed by the Windows file system as well.)
+ // We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
+ // We allow spaces (U+0020) in file names.
+ const allowed = "!#$%&()+,-.=@[]^_{}~ "
+ if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
+ return true
+ }
+ for i := 0; i < len(allowed); i++ {
+ if rune(allowed[i]) == r {
+ return true
+ }
+ }
+ return false
+ }
+ // It may be OK to add more ASCII punctuation here, but only carefully.
+ // For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
+ return unicode.IsLetter(r)
+}
+
+// CheckPath checks that a module path is valid.
+// A valid module path is a valid import path, as checked by CheckImportPath,
+// with two additional constraints.
+// First, the leading path element (up to the first slash, if any),
+// by convention a domain name, must contain only lower-case ASCII letters,
+// ASCII digits, dots (U+002E), and dashes (U+002D);
+// it must contain at least one dot and cannot start with a dash.
+// Second, for a final path element of the form /vN, where N looks numeric
+// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
+// and must not contain any dots. For paths beginning with "gopkg.in/",
+// this second requirement is replaced by a requirement that the path
+// follow the gopkg.in server's conventions.
+func CheckPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed module path %q: %v", path, err)
+ }
+ i := strings.Index(path, "/")
+ if i < 0 {
+ i = len(path)
+ }
+ if i == 0 {
+ return fmt.Errorf("malformed module path %q: leading slash", path)
+ }
+ if !strings.Contains(path[:i], ".") {
+ return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
+ }
+ if path[0] == '-' {
+ return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
+ }
+ for _, r := range path[:i] {
+ if !firstPathOK(r) {
+ return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
+ }
+ }
+ if _, _, ok := SplitPathVersion(path); !ok {
+ return fmt.Errorf("malformed module path %q: invalid version", path)
+ }
+ return nil
+}
+
+// CheckImportPath checks that an import path is valid.
+//
+// A valid import path consists of one or more valid path elements
+// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
+//
+// A valid path element is a non-empty string made up of
+// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
+// It must not begin or end with a dot (U+002E), nor contain two dots in a row.
+//
+// The element prefix up to the first dot must not be a reserved file name
+// on Windows, regardless of case (CON, com1, NuL, and so on).
+//
+// CheckImportPath may be less restrictive in the future, but see the
+// top-level package documentation for additional information about
+// subtleties of Unicode.
+func CheckImportPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed import path %q: %v", path, err)
+ }
+ return nil
+}
+
+// checkPath checks that a general path is valid.
+// It returns an error describing why but not mentioning path.
+// Because these checks apply to both module paths and import paths,
+// the caller is expected to add the "malformed ___ path %q: " prefix.
+// fileName indicates whether the final element of the path is a file name
+// (as opposed to a directory name).
+func checkPath(path string, fileName bool) error {
+ if !utf8.ValidString(path) {
+ return fmt.Errorf("invalid UTF-8")
+ }
+ if path == "" {
+ return fmt.Errorf("empty string")
+ }
+ if path[0] == '-' {
+ return fmt.Errorf("leading dash")
+ }
+ if strings.Contains(path, "//") {
+ return fmt.Errorf("double slash")
+ }
+ if path[len(path)-1] == '/' {
+ return fmt.Errorf("trailing slash")
+ }
+ elemStart := 0
+ for i, r := range path {
+ if r == '/' {
+ if err := checkElem(path[elemStart:i], fileName); err != nil {
+ return err
+ }
+ elemStart = i + 1
+ }
+ }
+ if err := checkElem(path[elemStart:], fileName); err != nil {
+ return err
+ }
+ return nil
+}
+
+// checkElem checks whether an individual path element is valid.
+// fileName indicates whether the element is a file name (not a directory name).
+func checkElem(elem string, fileName bool) error {
+ if elem == "" {
+ return fmt.Errorf("empty path element")
+ }
+ if strings.Count(elem, ".") == len(elem) {
+ return fmt.Errorf("invalid path element %q", elem)
+ }
+ if elem[0] == '.' && !fileName {
+ return fmt.Errorf("leading dot in path element")
+ }
+ if elem[len(elem)-1] == '.' {
+ return fmt.Errorf("trailing dot in path element")
+ }
+ charOK := pathOK
+ if fileName {
+ charOK = fileNameOK
+ }
+ for _, r := range elem {
+ if !charOK(r) {
+ return fmt.Errorf("invalid char %q", r)
+ }
+ }
+
+ // Windows disallows a bunch of path elements, sadly.
+ // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+ short := elem
+ if i := strings.Index(short, "."); i >= 0 {
+ short = short[:i]
+ }
+ for _, bad := range badWindowsNames {
+ if strings.EqualFold(bad, short) {
+ return fmt.Errorf("%q disallowed as path element component on Windows", short)
+ }
+ }
+ return nil
+}
+
+// CheckFilePath checks that a slash-separated file path is valid.
+// The definition of a valid file path is the same as the definition
+// of a valid import path except that the set of allowed characters is larger:
+// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
+// and the ASCII punctuation characters
+// “!#$%&()+,-.=@[]^_{}~”.
+// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
+// have special meanings in certain shells or operating systems.)
+//
+// CheckFilePath may be less restrictive in the future, but see the
+// top-level package documentation for additional information about
+// subtleties of Unicode.
+func CheckFilePath(path string) error {
+ if err := checkPath(path, true); err != nil {
+ return fmt.Errorf("malformed file path %q: %v", path, err)
+ }
+ return nil
+}
+
+// badWindowsNames are the reserved file path elements on Windows.
+// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+var badWindowsNames = []string{
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+}
+
+// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
+// and version is either empty or "/vN" for N >= 2.
+// As a special case, gopkg.in paths are recognized directly;
+// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
+// SplitPathVersion returns with ok = false when presented with
+// a path whose last path element does not satisfy the constraints
+// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
+func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
+ if strings.HasPrefix(path, "gopkg.in/") {
+ return splitGopkgIn(path)
+ }
+
+ i := len(path)
+ dot := false
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
+ if path[i-1] == '.' {
+ dot = true
+ }
+ i--
+ }
+ if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
+ return path, "", true
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
+func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
+ if !strings.HasPrefix(path, "gopkg.in/") {
+ return path, "", false
+ }
+ i := len(path)
+ if strings.HasSuffix(path, "-unstable") {
+ i -= len("-unstable")
+ }
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
+ i--
+ }
+ if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
+ // All gopkg.in paths must end in vN for some N.
+ return path, "", false
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// MatchPathMajor reports whether the semantic version v
+// matches the path major version pathMajor.
+//
+// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
+func MatchPathMajor(v, pathMajor string) bool {
+ return CheckPathMajor(v, pathMajor) == nil
+}
+
+// CheckPathMajor returns a non-nil error if the semantic version v
+// does not match the path major version pathMajor.
+func CheckPathMajor(v, pathMajor string) error {
+ // TODO(jayconrod): return errors or panic for invalid inputs. This function
+ // (and others) was covered by integration tests for cmd/go, and surrounding
+ // code protected against invalid inputs like non-canonical versions.
+ if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
+ pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
+ }
+ if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
+ // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
+ // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
+ return nil
+ }
+ m := semver.Major(v)
+ if pathMajor == "" {
+ if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
+ return nil
+ }
+ pathMajor = "v0 or v1"
+ } else if pathMajor[0] == '/' || pathMajor[0] == '.' {
+ if m == pathMajor[1:] {
+ return nil
+ }
+ pathMajor = pathMajor[1:]
+ }
+ return &InvalidVersionError{
+ Version: v,
+ Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
+ }
+}
+
+// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
+// An empty PathMajorPrefix allows either v0 or v1.
+//
+// Note that MatchPathMajor may accept some versions that do not actually begin
+// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
+// pathMajor, even though that pathMajor implies 'v1' tagging.
+func PathMajorPrefix(pathMajor string) string {
+ if pathMajor == "" {
+ return ""
+ }
+ if pathMajor[0] != '/' && pathMajor[0] != '.' {
+ panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
+ }
+ if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
+ pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
+ }
+ m := pathMajor[1:]
+ if m != semver.Major(m) {
+ panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
+ }
+ return m
+}
+
+// CanonicalVersion returns the canonical form of the version string v.
+// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
+func CanonicalVersion(v string) string {
+ cv := semver.Canonical(v)
+ if semver.Build(v) == "+incompatible" {
+ cv += "+incompatible"
+ }
+ return cv
+}
+
+// Sort sorts the list by Path, breaking ties by comparing Version fields.
+// The Version fields are interpreted as semantic versions (using semver.Compare)
+// optionally followed by a tie-breaking suffix introduced by a slash character,
+// like in "v0.0.1/go.mod".
+func Sort(list []Version) {
+ sort.Slice(list, func(i, j int) bool {
+ mi := list[i]
+ mj := list[j]
+ if mi.Path != mj.Path {
+ return mi.Path < mj.Path
+ }
+ // To help go.sum formatting, allow version/file.
+ // Compare semver prefix by semver rules,
+ // file by string order.
+ vi := mi.Version
+ vj := mj.Version
+ var fi, fj string
+ if k := strings.Index(vi, "/"); k >= 0 {
+ vi, fi = vi[:k], vi[k:]
+ }
+ if k := strings.Index(vj, "/"); k >= 0 {
+ vj, fj = vj[:k], vj[k:]
+ }
+ if vi != vj {
+ return semver.Compare(vi, vj) < 0
+ }
+ return fi < fj
+ })
+}
+
+// EscapePath returns the escaped form of the given module path.
+// It fails if the module path is invalid.
+func EscapePath(path string) (escaped string, err error) {
+ if err := CheckPath(path); err != nil {
+ return "", err
+ }
+
+ return escapeString(path)
+}
+
+// EscapeVersion returns the escaped form of the given module version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func EscapeVersion(v string) (escaped string, err error) {
+ if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
+ return "", &InvalidVersionError{
+ Version: v,
+ Err: fmt.Errorf("disallowed version string"),
+ }
+ }
+ return escapeString(v)
+}
+
+func escapeString(s string) (escaped string, err error) {
+ haveUpper := false
+ for _, r := range s {
+ if r == '!' || r >= utf8.RuneSelf {
+ // This should be disallowed by CheckPath, but diagnose anyway.
+ // The correctness of the escaping loop below depends on it.
+ return "", fmt.Errorf("internal error: inconsistency in EscapePath")
+ }
+ if 'A' <= r && r <= 'Z' {
+ haveUpper = true
+ }
+ }
+
+ if !haveUpper {
+ return s, nil
+ }
+
+ var buf []byte
+ for _, r := range s {
+ if 'A' <= r && r <= 'Z' {
+ buf = append(buf, '!', byte(r+'a'-'A'))
+ } else {
+ buf = append(buf, byte(r))
+ }
+ }
+ return string(buf), nil
+}
+
+// UnescapePath returns the module path for the given escaped path.
+// It fails if the escaped path is invalid or describes an invalid path.
+func UnescapePath(escaped string) (path string, err error) {
+ path, ok := unescapeString(escaped)
+ if !ok {
+ return "", fmt.Errorf("invalid escaped module path %q", escaped)
+ }
+ if err := CheckPath(path); err != nil {
+ return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
+ }
+ return path, nil
+}
+
+// UnescapeVersion returns the version string for the given escaped version.
+// It fails if the escaped form is invalid or describes an invalid version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func UnescapeVersion(escaped string) (v string, err error) {
+ v, ok := unescapeString(escaped)
+ if !ok {
+ return "", fmt.Errorf("invalid escaped version %q", escaped)
+ }
+ if err := checkElem(v, true); err != nil {
+ return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
+ }
+ return v, nil
+}
+
+func unescapeString(escaped string) (string, bool) {
+ var buf []byte
+
+ bang := false
+ for _, r := range escaped {
+ if r >= utf8.RuneSelf {
+ return "", false
+ }
+ if bang {
+ bang = false
+ if r < 'a' || 'z' < r {
+ return "", false
+ }
+ buf = append(buf, byte(r+'A'-'a'))
+ continue
+ }
+ if r == '!' {
+ bang = true
+ continue
+ }
+ if 'A' <= r && r <= 'Z' {
+ return "", false
+ }
+ buf = append(buf, byte(r))
+ }
+ if bang {
+ return "", false
+ }
+ return string(buf), true
+}
diff --git a/libgo/go/golang.org/x/mod/semver/semver.go b/libgo/go/golang.org/x/mod/semver/semver.go
new file mode 100644
index 0000000..2988e3c
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/semver/semver.go
@@ -0,0 +1,388 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semver implements comparison of semantic version strings.
+// In this package, semantic version strings must begin with a leading "v",
+// as in "v1.0.0".
+//
+// The general form of a semantic version string accepted by this package is
+//
+// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
+//
+// where square brackets indicate optional parts of the syntax;
+// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
+// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
+// using only alphanumeric characters and hyphens; and
+// all-numeric PRERELEASE identifiers must not have leading zeros.
+//
+// This package follows Semantic Versioning 2.0.0 (see semver.org)
+// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
+// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
+// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
+package semver
+
+// parsed returns the parsed form of a semantic version string.
+type parsed struct {
+ major string
+ minor string
+ patch string
+ short string
+ prerelease string
+ build string
+ err string
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func IsValid(v string) bool {
+ _, ok := parse(v)
+ return ok
+}
+
+// Canonical returns the canonical formatting of the semantic version v.
+// It fills in any missing .MINOR or .PATCH and discards build metadata.
+// Two semantic versions compare equal only if their canonical formattings
+// are identical strings.
+// The canonical invalid semantic version is the empty string.
+func Canonical(v string) string {
+ p, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ if p.build != "" {
+ return v[:len(v)-len(p.build)]
+ }
+ if p.short != "" {
+ return v + p.short
+ }
+ return v
+}
+
+// Major returns the major version prefix of the semantic version v.
+// For example, Major("v2.1.0") == "v2".
+// If v is an invalid semantic version string, Major returns the empty string.
+func Major(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return v[:1+len(pv.major)]
+}
+
+// MajorMinor returns the major.minor version prefix of the semantic version v.
+// For example, MajorMinor("v2.1.0") == "v2.1".
+// If v is an invalid semantic version string, MajorMinor returns the empty string.
+func MajorMinor(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ i := 1 + len(pv.major)
+ if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
+ return v[:j]
+ }
+ return v[:i] + "." + pv.minor
+}
+
+// Prerelease returns the prerelease suffix of the semantic version v.
+// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
+// If v is an invalid semantic version string, Prerelease returns the empty string.
+func Prerelease(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.prerelease
+}
+
+// Build returns the build suffix of the semantic version v.
+// For example, Build("v2.1.0+meta") == "+meta".
+// If v is an invalid semantic version string, Build returns the empty string.
+func Build(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.build
+}
+
+// Compare returns an integer comparing two versions according to
+// semantic version precedence.
+// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
+//
+// An invalid semantic version string is considered less than a valid one.
+// All invalid semantic version strings compare equal to each other.
+func Compare(v, w string) int {
+ pv, ok1 := parse(v)
+ pw, ok2 := parse(w)
+ if !ok1 && !ok2 {
+ return 0
+ }
+ if !ok1 {
+ return -1
+ }
+ if !ok2 {
+ return +1
+ }
+ if c := compareInt(pv.major, pw.major); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.minor, pw.minor); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.patch, pw.patch); c != 0 {
+ return c
+ }
+ return comparePrerelease(pv.prerelease, pw.prerelease)
+}
+
+// Max canonicalizes its arguments and then returns the version string
+// that compares greater.
+func Max(v, w string) string {
+ v = Canonical(v)
+ w = Canonical(w)
+ if Compare(v, w) > 0 {
+ return v
+ }
+ return w
+}
+
+func parse(v string) (p parsed, ok bool) {
+ if v == "" || v[0] != 'v' {
+ p.err = "missing v prefix"
+ return
+ }
+ p.major, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad major version"
+ return
+ }
+ if v == "" {
+ p.minor = "0"
+ p.patch = "0"
+ p.short = ".0.0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad minor prefix"
+ ok = false
+ return
+ }
+ p.minor, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad minor version"
+ return
+ }
+ if v == "" {
+ p.patch = "0"
+ p.short = ".0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad patch prefix"
+ ok = false
+ return
+ }
+ p.patch, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad patch version"
+ return
+ }
+ if len(v) > 0 && v[0] == '-' {
+ p.prerelease, v, ok = parsePrerelease(v)
+ if !ok {
+ p.err = "bad prerelease"
+ return
+ }
+ }
+ if len(v) > 0 && v[0] == '+' {
+ p.build, v, ok = parseBuild(v)
+ if !ok {
+ p.err = "bad build"
+ return
+ }
+ }
+ if v != "" {
+ p.err = "junk on end"
+ ok = false
+ return
+ }
+ ok = true
+ return
+}
+
+func parseInt(v string) (t, rest string, ok bool) {
+ if v == "" {
+ return
+ }
+ if v[0] < '0' || '9' < v[0] {
+ return
+ }
+ i := 1
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ if v[0] == '0' && i != 1 {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parsePrerelease(v string) (t, rest string, ok bool) {
+ // "A pre-release version MAY be denoted by appending a hyphen and
+ // a series of dot separated identifiers immediately following the patch version.
+ // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
+ // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
+ if v == "" || v[0] != '-' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) && v[i] != '+' {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parseBuild(v string) (t, rest string, ok bool) {
+ if v == "" || v[0] != '+' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func isIdentChar(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
+}
+
+func isBadNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v) && i > 1 && v[0] == '0'
+}
+
+func isNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v)
+}
+
+func compareInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func comparePrerelease(x, y string) int {
+ // "When major, minor, and patch are equal, a pre-release version has
+ // lower precedence than a normal version.
+ // Example: 1.0.0-alpha < 1.0.0.
+ // Precedence for two pre-release versions with the same major, minor,
+ // and patch version MUST be determined by comparing each dot separated
+ // identifier from left to right until a difference is found as follows:
+ // identifiers consisting of only digits are compared numerically and
+ // identifiers with letters or hyphens are compared lexically in ASCII
+ // sort order. Numeric identifiers always have lower precedence than
+ // non-numeric identifiers. A larger set of pre-release fields has a
+ // higher precedence than a smaller set, if all of the preceding
+ // identifiers are equal.
+ // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
+ // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
+ if x == y {
+ return 0
+ }
+ if x == "" {
+ return +1
+ }
+ if y == "" {
+ return -1
+ }
+ for x != "" && y != "" {
+ x = x[1:] // skip - or .
+ y = y[1:] // skip - or .
+ var dx, dy string
+ dx, x = nextIdent(x)
+ dy, y = nextIdent(y)
+ if dx != dy {
+ ix := isNum(dx)
+ iy := isNum(dy)
+ if ix != iy {
+ if ix {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if ix {
+ if len(dx) < len(dy) {
+ return -1
+ }
+ if len(dx) > len(dy) {
+ return +1
+ }
+ }
+ if dx < dy {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ }
+ if x == "" {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func nextIdent(x string) (dx, rest string) {
+ i := 0
+ for i < len(x) && x[i] != '.' {
+ i++
+ }
+ return x[:i], x[i:]
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/cache.go b/libgo/go/golang.org/x/mod/sumdb/cache.go
new file mode 100644
index 0000000..629e591
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/cache.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parallel cache.
+// This file is copied from cmd/go/internal/par.
+
+package sumdb
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// parCache runs an action once per key and caches the result.
+type parCache struct {
+ m sync.Map
+}
+
+type cacheEntry struct {
+ done uint32
+ mu sync.Mutex
+ result interface{}
+}
+
+// Do calls the function f if and only if Do is being called for the first time with this key.
+// No call to Do with a given key returns until the one call to f returns.
+// Do returns the value returned by the one call to f.
+func (c *parCache) Do(key interface{}, f func() interface{}) interface{} {
+ entryIface, ok := c.m.Load(key)
+ if !ok {
+ entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry))
+ }
+ e := entryIface.(*cacheEntry)
+ if atomic.LoadUint32(&e.done) == 0 {
+ e.mu.Lock()
+ if atomic.LoadUint32(&e.done) == 0 {
+ e.result = f()
+ atomic.StoreUint32(&e.done, 1)
+ }
+ e.mu.Unlock()
+ }
+ return e.result
+}
+
+// Get returns the cached result associated with key.
+// It returns nil if there is no such result.
+// If the result for key is being computed, Get does not wait for the computation to finish.
+func (c *parCache) Get(key interface{}) interface{} {
+ entryIface, ok := c.m.Load(key)
+ if !ok {
+ return nil
+ }
+ e := entryIface.(*cacheEntry)
+ if atomic.LoadUint32(&e.done) == 0 {
+ return nil
+ }
+ return e.result
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/client.go b/libgo/go/golang.org/x/mod/sumdb/client.go
new file mode 100644
index 0000000..70dd56f
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/client.go
@@ -0,0 +1,671 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sumdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb/note"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+// A ClientOps provides the external operations
+// (file caching, HTTP fetches, and so on) needed by the Client.
+// The methods must be safe for concurrent use by multiple goroutines.
+type ClientOps interface {
+ // ReadRemote reads and returns the content served at the given path
+ // on the remote database server. The path begins with "/lookup" or "/tile/",
+ // and there is no need to parse the path in any way.
+ // It is the implementation's responsibility to turn that path into a full URL
+ // and make the HTTP request. ReadRemote should return an error for
+ // any non-200 HTTP response status.
+ ReadRemote(path string) ([]byte, error)
+
+ // ReadConfig reads and returns the content of the named configuration file.
+ // There are only a fixed set of configuration files.
+ //
+ // "key" returns a file containing the verifier key for the server.
+ //
+ // serverName + "/latest" returns a file containing the latest known
+ // signed tree from the server.
+ // To signal that the client wishes to start with an "empty" signed tree,
+ // ReadConfig can return a successful empty result (0 bytes of data).
+ ReadConfig(file string) ([]byte, error)
+
+ // WriteConfig updates the content of the named configuration file,
+ // changing it from the old []byte to the new []byte.
+ // If the old []byte does not match the stored configuration,
+ // WriteConfig must return ErrWriteConflict.
+ // Otherwise, WriteConfig should atomically replace old with new.
+ // The "key" configuration file is never written using WriteConfig.
+ WriteConfig(file string, old, new []byte) error
+
+ // ReadCache reads and returns the content of the named cache file.
+ // Any returned error will be treated as equivalent to the file not existing.
+ // There can be arbitrarily many cache files, such as:
+ // serverName/lookup/pkg@version
+ // serverName/tile/8/1/x123/456
+ ReadCache(file string) ([]byte, error)
+
+ // WriteCache writes the named cache file.
+ WriteCache(file string, data []byte)
+
+ // Log prints the given log message (such as with log.Print)
+ Log(msg string)
+
+ // SecurityError prints the given security error log message.
+ // The Client returns ErrSecurity from any operation that invokes SecurityError,
+ // but the return value is mainly for testing. In a real program,
+ // SecurityError should typically print the message and call log.Fatal or os.Exit.
+ SecurityError(msg string)
+}
+
+// ErrWriteConflict signals a write conflict during Client.WriteConfig.
+var ErrWriteConflict = errors.New("write conflict")
+
+// ErrSecurity is returned by Client operations that invoke Client.SecurityError.
+var ErrSecurity = errors.New("security error: misbehaving server")
+
+// A Client is a client connection to a checksum database.
+// All the methods are safe for simultaneous use by multiple goroutines.
+type Client struct {
+ ops ClientOps // access to operations in the external world
+
+ didLookup uint32
+
+ // one-time initialized data
+ initOnce sync.Once
+ initErr error // init error, if any
+ name string // name of accepted verifier
+ verifiers note.Verifiers // accepted verifiers (just one, but Verifiers for note.Open)
+ tileReader tileReader
+ tileHeight int
+ nosumdb string
+
+ record parCache // cache of record lookup, keyed by path@vers
+ tileCache parCache // cache of c.readTile, keyed by tile
+
+ latestMu sync.Mutex
+ latest tlog.Tree // latest known tree head
+ latestMsg []byte // encoded signed note for latest
+
+ tileSavedMu sync.Mutex
+ tileSaved map[tlog.Tile]bool // which tiles have been saved using c.ops.WriteCache already
+}
+
+// NewClient returns a new Client using the given Client.
+func NewClient(ops ClientOps) *Client {
+ return &Client{
+ ops: ops,
+ }
+}
+
+// init initiailzes the client (if not already initialized)
+// and returns any initialization error.
+func (c *Client) init() error {
+ c.initOnce.Do(c.initWork)
+ return c.initErr
+}
+
+// initWork does the actual initialization work.
+func (c *Client) initWork() {
+ defer func() {
+ if c.initErr != nil {
+ c.initErr = fmt.Errorf("initializing sumdb.Client: %v", c.initErr)
+ }
+ }()
+
+ c.tileReader.c = c
+ if c.tileHeight == 0 {
+ c.tileHeight = 8
+ }
+ c.tileSaved = make(map[tlog.Tile]bool)
+
+ vkey, err := c.ops.ReadConfig("key")
+ if err != nil {
+ c.initErr = err
+ return
+ }
+ verifier, err := note.NewVerifier(strings.TrimSpace(string(vkey)))
+ if err != nil {
+ c.initErr = err
+ return
+ }
+ c.verifiers = note.VerifierList(verifier)
+ c.name = verifier.Name()
+
+ data, err := c.ops.ReadConfig(c.name + "/latest")
+ if err != nil {
+ c.initErr = err
+ return
+ }
+ if err := c.mergeLatest(data); err != nil {
+ c.initErr = err
+ return
+ }
+}
+
+// SetTileHeight sets the tile height for the Client.
+// Any call to SetTileHeight must happen before the first call to Lookup.
+// If SetTileHeight is not called, the Client defaults to tile height 8.
+// SetTileHeight can be called at most once,
+// and if so it must be called before the first call to Lookup.
+func (c *Client) SetTileHeight(height int) {
+ if atomic.LoadUint32(&c.didLookup) != 0 {
+ panic("SetTileHeight used after Lookup")
+ }
+ if height <= 0 {
+ panic("invalid call to SetTileHeight")
+ }
+ if c.tileHeight != 0 {
+ panic("multiple calls to SetTileHeight")
+ }
+ c.tileHeight = height
+}
+
+// SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client.
+// For any module path matching one of the patterns,
+// Lookup will return ErrGONOSUMDB.
+// SetGONOSUMDB can be called at most once,
+// and if so it must be called before the first call to Lookup.
+func (c *Client) SetGONOSUMDB(list string) {
+ if atomic.LoadUint32(&c.didLookup) != 0 {
+ panic("SetGONOSUMDB used after Lookup")
+ }
+ if c.nosumdb != "" {
+ panic("multiple calls to SetGONOSUMDB")
+ }
+ c.nosumdb = list
+}
+
+// ErrGONOSUMDB is returned by Lookup for paths that match
+// a pattern listed in the GONOSUMDB list (set by SetGONOSUMDB,
+// usually from the environment variable).
+var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)")
+
+func (c *Client) skip(target string) bool {
+ return globsMatchPath(c.nosumdb, target)
+}
+
+// globsMatchPath reports whether any path prefix of target
+// matches one of the glob patterns (as defined by path.Match)
+// in the comma-separated globs list.
+// It ignores any empty or malformed patterns in the list.
+func globsMatchPath(globs, target string) bool {
+ for globs != "" {
+ // Extract next non-empty glob in comma-separated list.
+ var glob string
+ if i := strings.Index(globs, ","); i >= 0 {
+ glob, globs = globs[:i], globs[i+1:]
+ } else {
+ glob, globs = globs, ""
+ }
+ if glob == "" {
+ continue
+ }
+
+ // A glob with N+1 path elements (N slashes) needs to be matched
+ // against the first N+1 path elements of target,
+ // which end just before the N+1'th slash.
+ n := strings.Count(glob, "/")
+ prefix := target
+ // Walk target, counting slashes, truncating at the N+1'th slash.
+ for i := 0; i < len(target); i++ {
+ if target[i] == '/' {
+ if n == 0 {
+ prefix = target[:i]
+ break
+ }
+ n--
+ }
+ }
+ if n > 0 {
+ // Not enough prefix elements.
+ continue
+ }
+ matched, _ := path.Match(glob, prefix)
+ if matched {
+ return true
+ }
+ }
+ return false
+}
+
+// Lookup returns the go.sum lines for the given module path and version.
+// The version may end in a /go.mod suffix, in which case Lookup returns
+// the go.sum lines for the module's go.mod-only hash.
+func (c *Client) Lookup(path, vers string) (lines []string, err error) {
+ atomic.StoreUint32(&c.didLookup, 1)
+
+ if c.skip(path) {
+ return nil, ErrGONOSUMDB
+ }
+
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("%s@%s: %v", path, vers, err)
+ }
+ }()
+
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+
+ // Prepare encoded cache filename / URL.
+ epath, err := module.EscapePath(path)
+ if err != nil {
+ return nil, err
+ }
+ evers, err := module.EscapeVersion(strings.TrimSuffix(vers, "/go.mod"))
+ if err != nil {
+ return nil, err
+ }
+ remotePath := "/lookup/" + epath + "@" + evers
+ file := c.name + remotePath
+
+ // Fetch the data.
+ // The lookupCache avoids redundant ReadCache/GetURL operations
+ // (especially since go.sum lines tend to come in pairs for a given
+ // path and version) and also avoids having multiple of the same
+ // request in flight at once.
+ type cached struct {
+ data []byte
+ err error
+ }
+ result := c.record.Do(file, func() interface{} {
+ // Try the on-disk cache, or else get from web.
+ writeCache := false
+ data, err := c.ops.ReadCache(file)
+ if err != nil {
+ data, err = c.ops.ReadRemote(remotePath)
+ if err != nil {
+ return cached{nil, err}
+ }
+ writeCache = true
+ }
+
+ // Validate the record before using it for anything.
+ id, text, treeMsg, err := tlog.ParseRecord(data)
+ if err != nil {
+ return cached{nil, err}
+ }
+ if err := c.mergeLatest(treeMsg); err != nil {
+ return cached{nil, err}
+ }
+ if err := c.checkRecord(id, text); err != nil {
+ return cached{nil, err}
+ }
+
+ // Now that we've validated the record,
+ // save it to the on-disk cache (unless that's where it came from).
+ if writeCache {
+ c.ops.WriteCache(file, data)
+ }
+
+ return cached{data, nil}
+ }).(cached)
+ if result.err != nil {
+ return nil, result.err
+ }
+
+ // Extract the lines for the specific version we want
+ // (with or without /go.mod).
+ prefix := path + " " + vers + " "
+ var hashes []string
+ for _, line := range strings.Split(string(result.data), "\n") {
+ if strings.HasPrefix(line, prefix) {
+ hashes = append(hashes, line)
+ }
+ }
+ return hashes, nil
+}
+
+// mergeLatest merges the tree head in msg
+// with the Client's current latest tree head,
+// ensuring the result is a consistent timeline.
+// If the result is inconsistent, mergeLatest calls c.ops.SecurityError
+// with a detailed security error message and then
+// (only if c.ops.SecurityError does not exit the program) returns ErrSecurity.
+// If the Client's current latest tree head moves forward,
+// mergeLatest updates the underlying configuration file as well,
+// taking care to merge any independent updates to that configuration.
+func (c *Client) mergeLatest(msg []byte) error {
+ // Merge msg into our in-memory copy of the latest tree head.
+ when, err := c.mergeLatestMem(msg)
+ if err != nil {
+ return err
+ }
+ if when != msgFuture {
+ // msg matched our present or was in the past.
+ // No change to our present, so no update of config file.
+ return nil
+ }
+
+ // Flush our extended timeline back out to the configuration file.
+ // If the configuration file has been updated in the interim,
+ // we need to merge any updates made there as well.
+ // Note that writeConfig is an atomic compare-and-swap.
+ for {
+ msg, err := c.ops.ReadConfig(c.name + "/latest")
+ if err != nil {
+ return err
+ }
+ when, err := c.mergeLatestMem(msg)
+ if err != nil {
+ return err
+ }
+ if when != msgPast {
+ // msg matched our present or was from the future,
+ // and now our in-memory copy matches.
+ return nil
+ }
+
+ // msg (== config) is in the past, so we need to update it.
+ c.latestMu.Lock()
+ latestMsg := c.latestMsg
+ c.latestMu.Unlock()
+ if err := c.ops.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict {
+ // Success or a non-write-conflict error.
+ return err
+ }
+ }
+}
+
+const (
+ msgPast = 1 + iota
+ msgNow
+ msgFuture
+)
+
+// mergeLatestMem is like mergeLatest but is only concerned with
+// updating the in-memory copy of the latest tree head (c.latest)
+// not the configuration file.
+// The when result explains when msg happened relative to our
+// previous idea of c.latest:
+// msgPast means msg was from before c.latest,
+// msgNow means msg was exactly c.latest, and
+// msgFuture means msg was from after c.latest, which has now been updated.
+func (c *Client) mergeLatestMem(msg []byte) (when int, err error) {
+ if len(msg) == 0 {
+ // Accept empty msg as the unsigned, empty timeline.
+ c.latestMu.Lock()
+ latest := c.latest
+ c.latestMu.Unlock()
+ if latest.N == 0 {
+ return msgNow, nil
+ }
+ return msgPast, nil
+ }
+
+ note, err := note.Open(msg, c.verifiers)
+ if err != nil {
+ return 0, fmt.Errorf("reading tree note: %v\nnote:\n%s", err, msg)
+ }
+ tree, err := tlog.ParseTree([]byte(note.Text))
+ if err != nil {
+ return 0, fmt.Errorf("reading tree: %v\ntree:\n%s", err, note.Text)
+ }
+
+ // Other lookups may be calling mergeLatest with other heads,
+ // so c.latest is changing underfoot. We don't want to hold the
+ // c.mu lock during tile fetches, so loop trying to update c.latest.
+ c.latestMu.Lock()
+ latest := c.latest
+ latestMsg := c.latestMsg
+ c.latestMu.Unlock()
+
+ for {
+ // If the tree head looks old, check that it is on our timeline.
+ if tree.N <= latest.N {
+ if err := c.checkTrees(tree, msg, latest, latestMsg); err != nil {
+ return 0, err
+ }
+ if tree.N < latest.N {
+ return msgPast, nil
+ }
+ return msgNow, nil
+ }
+
+ // The tree head looks new. Check that we are on its timeline and try to move our timeline forward.
+ if err := c.checkTrees(latest, latestMsg, tree, msg); err != nil {
+ return 0, err
+ }
+
+ // Install our msg if possible.
+ // Otherwise we will go around again.
+ c.latestMu.Lock()
+ installed := false
+ if c.latest == latest {
+ installed = true
+ c.latest = tree
+ c.latestMsg = msg
+ } else {
+ latest = c.latest
+ latestMsg = c.latestMsg
+ }
+ c.latestMu.Unlock()
+
+ if installed {
+ return msgFuture, nil
+ }
+ }
+}
+
+// checkTrees checks that older (from olderNote) is contained in newer (from newerNote).
+// If an error occurs, such as malformed data or a network problem, checkTrees returns that error.
+// If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed
+// message and calls log.Fatal.
+func (c *Client) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error {
+ thr := tlog.TileHashReader(newer, &c.tileReader)
+ h, err := tlog.TreeHash(older.N, thr)
+ if err != nil {
+ if older.N == newer.N {
+ return fmt.Errorf("checking tree#%d: %v", older.N, err)
+ }
+ return fmt.Errorf("checking tree#%d against tree#%d: %v", older.N, newer.N, err)
+ }
+ if h == older.Hash {
+ return nil
+ }
+
+ // Detected a fork in the tree timeline.
+ // Start by reporting the inconsistent signed tree notes.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "SECURITY ERROR\n")
+ fmt.Fprintf(&buf, "go.sum database server misbehavior detected!\n\n")
+ indent := func(b []byte) []byte {
+ return bytes.Replace(b, []byte("\n"), []byte("\n\t"), -1)
+ }
+ fmt.Fprintf(&buf, "old database:\n\t%s\n", indent(olderNote))
+ fmt.Fprintf(&buf, "new database:\n\t%s\n", indent(newerNote))
+
+ // The notes alone are not enough to prove the inconsistency.
+ // We also need to show that the newer note's tree hash for older.N
+ // does not match older.Hash. The consumer of this report could
+ // of course consult the server to try to verify the inconsistency,
+ // but we are holding all the bits we need to prove it right now,
+ // so we might as well print them and make the report not depend
+ // on the continued availability of the misbehaving server.
+ // Preparing this data only reuses the tiled hashes needed for
+ // tlog.TreeHash(older.N, thr) above, so assuming thr is caching tiles,
+ // there are no new access to the server here, and these operations cannot fail.
+ fmt.Fprintf(&buf, "proof of misbehavior:\n\t%v", h)
+ if p, err := tlog.ProveTree(newer.N, older.N, thr); err != nil {
+ fmt.Fprintf(&buf, "\tinternal error: %v\n", err)
+ } else if err := tlog.CheckTree(p, newer.N, newer.Hash, older.N, h); err != nil {
+ fmt.Fprintf(&buf, "\tinternal error: generated inconsistent proof\n")
+ } else {
+ for _, h := range p {
+ fmt.Fprintf(&buf, "\n\t%v", h)
+ }
+ }
+ c.ops.SecurityError(buf.String())
+ return ErrSecurity
+}
+
+// checkRecord checks that record #id's hash matches data.
+func (c *Client) checkRecord(id int64, data []byte) error {
+ c.latestMu.Lock()
+ latest := c.latest
+ c.latestMu.Unlock()
+
+ if id >= latest.N {
+ return fmt.Errorf("cannot validate record %d in tree of size %d", id, latest.N)
+ }
+ hashes, err := tlog.TileHashReader(latest, &c.tileReader).ReadHashes([]int64{tlog.StoredHashIndex(0, id)})
+ if err != nil {
+ return err
+ }
+ if hashes[0] == tlog.RecordHash(data) {
+ return nil
+ }
+ return fmt.Errorf("cannot authenticate record data in server response")
+}
+
+// tileReader is a *Client wrapper that implements tlog.TileReader.
+// The separate type avoids exposing the ReadTiles and SaveTiles
+// methods on Client itself.
+type tileReader struct {
+ c *Client
+}
+
+func (r *tileReader) Height() int {
+ return r.c.tileHeight
+}
+
+// ReadTiles reads and returns the requested tiles,
+// either from the on-disk cache or the server.
+func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) {
+ // Read all the tiles in parallel.
+ data := make([][]byte, len(tiles))
+ errs := make([]error, len(tiles))
+ var wg sync.WaitGroup
+ for i, tile := range tiles {
+ wg.Add(1)
+ go func(i int, tile tlog.Tile) {
+ defer wg.Done()
+ data[i], errs[i] = r.c.readTile(tile)
+ }(i, tile)
+ }
+ wg.Wait()
+
+ for _, err := range errs {
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return data, nil
+}
+
+// tileCacheKey returns the cache key for the tile.
+func (c *Client) tileCacheKey(tile tlog.Tile) string {
+ return c.name + "/" + tile.Path()
+}
+
+// tileRemotePath returns the remote path for the tile.
+func (c *Client) tileRemotePath(tile tlog.Tile) string {
+ return "/" + tile.Path()
+}
+
+// readTile reads a single tile, either from the on-disk cache or the server.
+func (c *Client) readTile(tile tlog.Tile) ([]byte, error) {
+ type cached struct {
+ data []byte
+ err error
+ }
+
+ result := c.tileCache.Do(tile, func() interface{} {
+ // Try the requested tile in on-disk cache.
+ data, err := c.ops.ReadCache(c.tileCacheKey(tile))
+ if err == nil {
+ c.markTileSaved(tile)
+ return cached{data, nil}
+ }
+
+ // Try the full tile in on-disk cache (if requested tile not already full).
+ // We only save authenticated tiles to the on-disk cache,
+ // so the recreated prefix is equally authenticated.
+ full := tile
+ full.W = 1 << uint(tile.H)
+ if tile != full {
+ data, err := c.ops.ReadCache(c.tileCacheKey(full))
+ if err == nil {
+ c.markTileSaved(tile) // don't save tile later; we already have full
+ return cached{data[:len(data)/full.W*tile.W], nil}
+ }
+ }
+
+ // Try requested tile from server.
+ data, err = c.ops.ReadRemote(c.tileRemotePath(tile))
+ if err == nil {
+ return cached{data, nil}
+ }
+
+ // Try full tile on server.
+ // If the partial tile does not exist, it should be because
+ // the tile has been completed and only the complete one
+ // is available.
+ if tile != full {
+ data, err := c.ops.ReadRemote(c.tileRemotePath(full))
+ if err == nil {
+ // Note: We could save the full tile in the on-disk cache here,
+ // but we don't know if it is valid yet, and we will only find out
+ // about the partial data, not the full data. So let SaveTiles
+ // save the partial tile, and we'll just refetch the full tile later
+ // once we can validate more (or all) of it.
+ return cached{data[:len(data)/full.W*tile.W], nil}
+ }
+ }
+
+ // Nothing worked.
+ // Return the error from the server fetch for the requested (not full) tile.
+ return cached{nil, err}
+ }).(cached)
+
+ return result.data, result.err
+}
+
+// markTileSaved records that tile is already present in the on-disk cache,
+// so that a future SaveTiles for that tile can be ignored.
+func (c *Client) markTileSaved(tile tlog.Tile) {
+ c.tileSavedMu.Lock()
+ c.tileSaved[tile] = true
+ c.tileSavedMu.Unlock()
+}
+
+// SaveTiles saves the now validated tiles.
+func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) {
+ c := r.c
+
+ // Determine which tiles need saving.
+ // (Tiles that came from the cache need not be saved back.)
+ save := make([]bool, len(tiles))
+ c.tileSavedMu.Lock()
+ for i, tile := range tiles {
+ if !c.tileSaved[tile] {
+ save[i] = true
+ c.tileSaved[tile] = true
+ }
+ }
+ c.tileSavedMu.Unlock()
+
+ for i, tile := range tiles {
+ if save[i] {
+ // If WriteCache fails here (out of disk space? i/o error?),
+ // c.tileSaved[tile] is still true and we will not try to write it again.
+ // Next time we run maybe we'll redownload it again and be
+ // more successful.
+ c.ops.WriteCache(c.name+"/"+tile.Path(), data[i])
+ }
+ }
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/dirhash/hash.go b/libgo/go/golang.org/x/mod/sumdb/dirhash/hash.go
new file mode 100644
index 0000000..ef5df6f
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/dirhash/hash.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dirhash defines hashes over directory trees.
+// These hashes are recorded in go.sum files and in the Go checksum database,
+// to allow verifying that a newly-downloaded module has the expected content.
+package dirhash
+
+import (
+ "archive/zip"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// DefaultHash is the default hash function used in new go.sum entries.
+var DefaultHash Hash = Hash1
+
+// A Hash is a directory hash function.
+// It accepts a list of files along with a function that opens the content of each file.
+// It opens, reads, hashes, and closes each file and returns the overall directory hash.
+type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error)
+
+// Hash1 is the "h1:" directory hash function, using SHA-256.
+//
+// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary
+// prepared as if by the Unix command:
+//
+// find . -type f | sort | sha256sum
+//
+// More precisely, the hashed summary contains a single line for each file in the list,
+// ordered by sort.Strings applied to the file names, where each line consists of
+// the hexadecimal SHA-256 hash of the file content,
+// two spaces (U+0020), the file name, and a newline (U+000A).
+//
+// File names with newlines (U+000A) are disallowed.
+func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) {
+ h := sha256.New()
+ files = append([]string(nil), files...)
+ sort.Strings(files)
+ for _, file := range files {
+ if strings.Contains(file, "\n") {
+ return "", errors.New("dirhash: filenames with newlines are not supported")
+ }
+ r, err := open(file)
+ if err != nil {
+ return "", err
+ }
+ hf := sha256.New()
+ _, err = io.Copy(hf, r)
+ r.Close()
+ if err != nil {
+ return "", err
+ }
+ fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file)
+ }
+ return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
+}
+
+// HashDir returns the hash of the local file system directory dir,
+// replacing the directory name itself with prefix in the file names
+// used in the hash function.
+func HashDir(dir, prefix string, hash Hash) (string, error) {
+ files, err := DirFiles(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ osOpen := func(name string) (io.ReadCloser, error) {
+ return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix)))
+ }
+ return hash(files, osOpen)
+}
+
+// DirFiles returns the list of files in the tree rooted at dir,
+// replacing the directory name dir with prefix in each name.
+// The resulting names always use forward slashes.
+func DirFiles(dir, prefix string) ([]string, error) {
+ var files []string
+ dir = filepath.Clean(dir)
+ err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ rel := file
+ if dir != "." {
+ rel = file[len(dir)+1:]
+ }
+ f := filepath.Join(prefix, rel)
+ files = append(files, filepath.ToSlash(f))
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return files, nil
+}
+
+// HashZip returns the hash of the file content in the named zip file.
+// Only the file names and their contents are included in the hash:
+// the exact zip file format encoding, compression method,
+// per-file modification times, and other metadata are ignored.
+func HashZip(zipfile string, hash Hash) (string, error) {
+ z, err := zip.OpenReader(zipfile)
+ if err != nil {
+ return "", err
+ }
+ defer z.Close()
+ var files []string
+ zfiles := make(map[string]*zip.File)
+ for _, file := range z.File {
+ files = append(files, file.Name)
+ zfiles[file.Name] = file
+ }
+ zipOpen := func(name string) (io.ReadCloser, error) {
+ f := zfiles[name]
+ if f == nil {
+ return nil, fmt.Errorf("file %q not found in zip", name) // should never happen
+ }
+ return f.Open()
+ }
+ return hash(files, zipOpen)
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/note/note.go b/libgo/go/golang.org/x/mod/sumdb/note/note.go
new file mode 100644
index 0000000..3c8e67b
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/note/note.go
@@ -0,0 +1,681 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package note defines the notes signed by the Go module database server.
+//
+// This package is part of a DRAFT of what the Go module database server will look like.
+// Do not assume the details here are final!
+//
+// A note is text signed by one or more server keys.
+// The text should be ignored unless the note is signed by
+// a trusted server key and the signature has been verified
+// using the server's public key.
+//
+// A server's public key is identified by a name, typically the "host[/path]"
+// giving the base URL of the server's transparency log.
+// The syntactic restrictions on a name are that it be non-empty,
+// well-formed UTF-8 containing neither Unicode spaces nor plus (U+002B).
+//
+// A Go module database server signs texts using public key cryptography.
+// A given server may have multiple public keys, each
+// identified by the first 32 bits of the SHA-256 hash of
+// the concatenation of the server name, a newline, and
+// the encoded public key.
+//
+// Verifying Notes
+//
+// A Verifier allows verification of signatures by one server public key.
+// It can report the name of the server and the uint32 hash of the key,
+// and it can verify a purported signature by that key.
+//
+// The standard implementation of a Verifier is constructed
+// by NewVerifier starting from a verifier key, which is a
+// plain text string of the form "<name>+<hash>+<keydata>".
+//
+// A Verifiers allows looking up a Verifier by the combination
+// of server name and key hash.
+//
+// The standard implementation of a Verifiers is constructed
+// by VerifierList from a list of known verifiers.
+//
+// A Note represents a text with one or more signatures.
+// An implementation can reject a note with too many signatures
+// (for example, more than 100 signatures).
+//
+// A Signature represents a signature on a note, verified or not.
+//
+// The Open function takes as input a signed message
+// and a set of known verifiers. It decodes and verifies
+// the message signatures and returns a Note structure
+// containing the message text and (verified or unverified) signatures.
+//
+// Signing Notes
+//
+// A Signer allows signing a text with a given key.
+// It can report the name of the server and the hash of the key
+// and can sign a raw text using that key.
+//
+// The standard implementation of a Signer is constructed
+// by NewSigner starting from an encoded signer key, which is a
+// plain text string of the form "PRIVATE+KEY+<name>+<hash>+<keydata>".
+// Anyone with an encoded signer key can sign messages using that key,
+// so it must be kept secret. The encoding begins with the literal text
+// "PRIVATE+KEY" to avoid confusion with the public server key.
+//
+// The Sign function takes as input a Note and a list of Signers
+// and returns an encoded, signed message.
+//
+// Signed Note Format
+//
+// A signed note consists of a text ending in newline (U+000A),
+// followed by a blank line (only a newline),
+// followed by one or more signature lines of this form:
+// em dash (U+2014), space (U+0020),
+// server name, space, base64-encoded signature, newline.
+//
+// Signed notes must be valid UTF-8 and must not contain any
+// ASCII control characters (those below U+0020) other than newline.
+//
+// A signature is a base64 encoding of 4+n bytes.
+//
+// The first four bytes in the signature are the uint32 key hash
+// stored in big-endian order, which is to say they are the first
+// four bytes of the truncated SHA-256 used to derive the key hash
+// in the first place.
+//
+// The remaining n bytes are the result of using the specified key
+// to sign the note text (including the final newline but not the
+// separating blank line).
+//
+// Generating Keys
+//
+// There is only one key type, Ed25519 with algorithm identifier 1.
+// New key types may be introduced in the future as needed,
+// although doing so will require deploying the new algorithms to all clients
+// before starting to depend on them for signatures.
+//
+// The GenerateKey function generates and returns a new signer
+// and corresponding verifier.
+//
+// Example
+//
+// Here is a well-formed signed note:
+//
+// If you think cryptography is the answer to your problem,
+// then you don't know what your problem is.
+//
+// — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=
+//
+// It can be constructed and displayed using:
+//
+// skey := "PRIVATE+KEY+PeterNeumann+c74f20a3+AYEKFALVFGyNhPJEMzD1QIDr+Y7hfZx09iUvxdXHKDFz"
+// text := "If you think cryptography is the answer to your problem,\n" +
+// "then you don't know what your problem is.\n"
+//
+// signer, err := note.NewSigner(skey)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// msg, err := note.Sign(&note.Note{Text: text}, signer)
+// if err != nil {
+// log.Fatal(err)
+// }
+// os.Stdout.Write(msg)
+//
+// The note's text is two lines, including the final newline,
+// and the text is purportedly signed by a server named
+// "PeterNeumann". (Although server names are canonically
+// base URLs, the only syntactic requirement is that they
+// not contain spaces or newlines).
+//
+// If Open is given access to a Verifiers including the
+// Verifier for this key, then it will succeed at verifiying
+// the encoded message and returning the parsed Note:
+//
+// vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW"
+// msg := []byte("If you think cryptography is the answer to your problem,\n" +
+// "then you don't know what your problem is.\n" +
+// "\n" +
+// "— PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=\n")
+//
+// verifier, err := note.NewVerifier(vkey)
+// if err != nil {
+// log.Fatal(err)
+// }
+// verifiers := note.VerifierList(verifier)
+//
+// n, err := note.Open([]byte(msg), verifiers)
+// if err != nil {
+// log.Fatal(err)
+// }
+// fmt.Printf("%s (%08x):\n%s", n.Sigs[0].Name, n.Sigs[0].Hash, n.Text)
+//
+// You can add your own signature to this message by re-signing the note:
+//
+// skey, vkey, err := note.GenerateKey(rand.Reader, "EnochRoot")
+// if err != nil {
+// log.Fatal(err)
+// }
+// _ = vkey // give to verifiers
+//
+// me, err := note.NewSigner(skey)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// msg, err := note.Sign(n, me)
+// if err != nil {
+// log.Fatal(err)
+// }
+// os.Stdout.Write(msg)
+//
+// This will print a doubly-signed message, like:
+//
+// If you think cryptography is the answer to your problem,
+// then you don't know what your problem is.
+//
+// — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=
+// — EnochRoot rwz+eBzmZa0SO3NbfRGzPCpDckykFXSdeX+MNtCOXm2/5n2tiOHp+vAF1aGrQ5ovTG01oOTGwnWLox33WWd1RvMc+QQ=
+//
+package note
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/crypto/ed25519"
+)
+
+// A Verifier verifies messages signed with a specific key.
+type Verifier interface {
+ // Name returns the server name associated with the key.
+ Name() string
+
+ // KeyHash returns the key hash.
+ KeyHash() uint32
+
+ // Verify reports whether sig is a valid signature of msg.
+ Verify(msg, sig []byte) bool
+}
+
+// A Signer signs messages using a specific key.
+type Signer interface {
+ // Name returns the server name associated with the key.
+ Name() string
+
+ // KeyHash returns the key hash.
+ KeyHash() uint32
+
+ // Sign returns a signature for the given message.
+ Sign(msg []byte) ([]byte, error)
+}
+
+// keyHash computes the key hash for the given server name and encoded public key.
+func keyHash(name string, key []byte) uint32 {
+ h := sha256.New()
+ h.Write([]byte(name))
+ h.Write([]byte("\n"))
+ h.Write(key)
+ sum := h.Sum(nil)
+ return binary.BigEndian.Uint32(sum)
+}
+
+var (
+ errVerifierID = errors.New("malformed verifier id")
+ errVerifierAlg = errors.New("unknown verifier algorithm")
+ errVerifierHash = errors.New("invalid verifier hash")
+)
+
+const (
+ algEd25519 = 1
+)
+
+// isValidName reports whether name is valid.
+// It must be non-empty and not have any Unicode spaces or pluses.
+func isValidName(name string) bool {
+ return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+")
+}
+
+// NewVerifier construct a new Verifier from an encoded verifier key.
+func NewVerifier(vkey string) (Verifier, error) {
+ name, vkey := chop(vkey, "+")
+ hash16, key64 := chop(vkey, "+")
+ hash, err1 := strconv.ParseUint(hash16, 16, 32)
+ key, err2 := base64.StdEncoding.DecodeString(key64)
+ if len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 {
+ return nil, errVerifierID
+ }
+ if uint32(hash) != keyHash(name, key) {
+ return nil, errVerifierHash
+ }
+
+ v := &verifier{
+ name: name,
+ hash: uint32(hash),
+ }
+
+ alg, key := key[0], key[1:]
+ switch alg {
+ default:
+ return nil, errVerifierAlg
+
+ case algEd25519:
+ if len(key) != 32 {
+ return nil, errVerifierID
+ }
+ v.verify = func(msg, sig []byte) bool {
+ return ed25519.Verify(key, msg, sig)
+ }
+ }
+
+ return v, nil
+}
+
+// chop chops s at the first instance of sep, if any,
+// and returns the text before and after sep.
+// If sep is not present, chop returns before is s and after is empty.
+func chop(s, sep string) (before, after string) {
+ i := strings.Index(s, sep)
+ if i < 0 {
+ return s, ""
+ }
+ return s[:i], s[i+len(sep):]
+}
+
+// verifier is a trivial Verifier implementation.
+type verifier struct {
+ name string
+ hash uint32
+ verify func([]byte, []byte) bool
+}
+
+func (v *verifier) Name() string { return v.name }
+func (v *verifier) KeyHash() uint32 { return v.hash }
+func (v *verifier) Verify(msg, sig []byte) bool { return v.verify(msg, sig) }
+
+// NewSigner constructs a new Signer from an encoded signer key.
+func NewSigner(skey string) (Signer, error) {
+ priv1, skey := chop(skey, "+")
+ priv2, skey := chop(skey, "+")
+ name, skey := chop(skey, "+")
+ hash16, key64 := chop(skey, "+")
+ hash, err1 := strconv.ParseUint(hash16, 16, 32)
+ key, err2 := base64.StdEncoding.DecodeString(key64)
+ if priv1 != "PRIVATE" || priv2 != "KEY" || len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 {
+ return nil, errSignerID
+ }
+
+ // Note: hash is the hash of the public key and we have the private key.
+ // Must verify hash after deriving public key.
+
+ s := &signer{
+ name: name,
+ hash: uint32(hash),
+ }
+
+ var pubkey []byte
+
+ alg, key := key[0], key[1:]
+ switch alg {
+ default:
+ return nil, errSignerAlg
+
+ case algEd25519:
+ if len(key) != 32 {
+ return nil, errSignerID
+ }
+ key = ed25519.NewKeyFromSeed(key)
+ pubkey = append([]byte{algEd25519}, key[32:]...)
+ s.sign = func(msg []byte) ([]byte, error) {
+ return ed25519.Sign(key, msg), nil
+ }
+ }
+
+ if uint32(hash) != keyHash(name, pubkey) {
+ return nil, errSignerHash
+ }
+
+ return s, nil
+}
+
+var (
+ errSignerID = errors.New("malformed verifier id")
+ errSignerAlg = errors.New("unknown verifier algorithm")
+ errSignerHash = errors.New("invalid verifier hash")
+)
+
+// signer is a trivial Signer implementation.
+type signer struct {
+ name string
+ hash uint32
+ sign func([]byte) ([]byte, error)
+}
+
+func (s *signer) Name() string { return s.name }
+func (s *signer) KeyHash() uint32 { return s.hash }
+func (s *signer) Sign(msg []byte) ([]byte, error) { return s.sign(msg) }
+
+// GenerateKey generates a signer and verifier key pair for a named server.
+// The signer key skey is private and must be kept secret.
+func GenerateKey(rand io.Reader, name string) (skey, vkey string, err error) {
+ pub, priv, err := ed25519.GenerateKey(rand)
+ if err != nil {
+ return "", "", err
+ }
+ pubkey := append([]byte{algEd25519}, pub...)
+ privkey := append([]byte{algEd25519}, priv.Seed()...)
+ h := keyHash(name, pubkey)
+
+ skey = fmt.Sprintf("PRIVATE+KEY+%s+%08x+%s", name, h, base64.StdEncoding.EncodeToString(privkey))
+ vkey = fmt.Sprintf("%s+%08x+%s", name, h, base64.StdEncoding.EncodeToString(pubkey))
+ return skey, vkey, nil
+}
+
+// NewEd25519VerifierKey returns an encoded verifier key using the given name
+// and Ed25519 public key.
+func NewEd25519VerifierKey(name string, key ed25519.PublicKey) (string, error) {
+ if len(key) != ed25519.PublicKeySize {
+ return "", fmt.Errorf("invalid public key size %d, expected %d", len(key), ed25519.PublicKeySize)
+ }
+
+ pubkey := append([]byte{algEd25519}, key...)
+ hash := keyHash(name, pubkey)
+
+ b64Key := base64.StdEncoding.EncodeToString(pubkey)
+ return fmt.Sprintf("%s+%08x+%s", name, hash, b64Key), nil
+}
+
+// A Verifiers is a collection of known verifier keys.
+type Verifiers interface {
+ // Verifier returns the Verifier associated with the key
+ // identified by the name and hash.
+ // If the name, hash pair is unknown, Verifier should return
+ // an UnknownVerifierError.
+ Verifier(name string, hash uint32) (Verifier, error)
+}
+
+// An UnknownVerifierError indicates that the given key is not known.
+// The Open function records signatures without associated verifiers as
+// unverified signatures.
+type UnknownVerifierError struct {
+ Name string
+ KeyHash uint32
+}
+
+func (e *UnknownVerifierError) Error() string {
+ return fmt.Sprintf("unknown key %s+%08x", e.Name, e.KeyHash)
+}
+
+// An ambiguousVerifierError indicates that the given name and hash
+// match multiple keys passed to VerifierList.
+// (If this happens, some malicious actor has taken control of the
+// verifier list, at which point we may as well give up entirely,
+// but we diagnose the problem instead.)
+type ambiguousVerifierError struct {
+ name string
+ hash uint32
+}
+
+func (e *ambiguousVerifierError) Error() string {
+ return fmt.Sprintf("ambiguous key %s+%08x", e.name, e.hash)
+}
+
+// VerifierList returns a Verifiers implementation that uses the given list of verifiers.
+func VerifierList(list ...Verifier) Verifiers {
+ m := make(verifierMap)
+ for _, v := range list {
+ k := nameHash{v.Name(), v.KeyHash()}
+ m[k] = append(m[k], v)
+ }
+ return m
+}
+
+type nameHash struct {
+ name string
+ hash uint32
+}
+
+type verifierMap map[nameHash][]Verifier
+
+func (m verifierMap) Verifier(name string, hash uint32) (Verifier, error) {
+ v, ok := m[nameHash{name, hash}]
+ if !ok {
+ return nil, &UnknownVerifierError{name, hash}
+ }
+ if len(v) > 1 {
+ return nil, &ambiguousVerifierError{name, hash}
+ }
+ return v[0], nil
+}
+
+// A Note is a text and signatures.
+type Note struct {
+ Text string // text of note
+ Sigs []Signature // verified signatures
+ UnverifiedSigs []Signature // unverified signatures
+}
+
+// A Signature is a single signature found in a note.
+type Signature struct {
+ // Name and Hash give the name and key hash
+ // for the key that generated the signature.
+ Name string
+ Hash uint32
+
+ // Base64 records the base64-encoded signature bytes.
+ Base64 string
+}
+
+// An UnverifiedNoteError indicates that the note
+// successfully parsed but had no verifiable signatures.
+type UnverifiedNoteError struct {
+ Note *Note
+}
+
+func (e *UnverifiedNoteError) Error() string {
+ return "note has no verifiable signatures"
+}
+
+// An InvalidSignatureError indicates that the given key was known
+// and the associated Verifier rejected the signature.
+type InvalidSignatureError struct {
+ Name string
+ Hash uint32
+}
+
+func (e *InvalidSignatureError) Error() string {
+ return fmt.Sprintf("invalid signature for key %s+%08x", e.Name, e.Hash)
+}
+
+var (
+ errMalformedNote = errors.New("malformed note")
+ errInvalidSigner = errors.New("invalid signer")
+
+ sigSplit = []byte("\n\n")
+ sigPrefix = []byte("— ")
+)
+
+// Open opens and parses the message msg, checking signatures from the known verifiers.
+//
+// For each signature in the message, Open calls known.Verifier to find a verifier.
+// If known.Verifier returns a verifier and the verifier accepts the signature,
+// Open records the signature in the returned note's Sigs field.
+// If known.Verifier returns a verifier but the verifier rejects the signature,
+// Open returns an InvalidSignatureError.
+// If known.Verifier returns an UnknownVerifierError,
+// Open records the signature in the returned note's UnverifiedSigs field.
+// If known.Verifier returns any other error, Open returns that error.
+//
+// If no known verifier has signed an otherwise valid note,
+// Open returns an UnverifiedNoteError.
+// In this case, the unverified note can be fetched from inside the error.
+func Open(msg []byte, known Verifiers) (*Note, error) {
+ if known == nil {
+ // Treat nil Verifiers as empty list, to produce useful error instead of crash.
+ known = VerifierList()
+ }
+
+ // Must have valid UTF-8 with no non-newline ASCII control characters.
+ for i := 0; i < len(msg); {
+ r, size := utf8.DecodeRune(msg[i:])
+ if r < 0x20 && r != '\n' || r == utf8.RuneError && size == 1 {
+ return nil, errMalformedNote
+ }
+ i += size
+ }
+
+ // Must end with signature block preceded by blank line.
+ split := bytes.LastIndex(msg, sigSplit)
+ if split < 0 {
+ return nil, errMalformedNote
+ }
+ text, sigs := msg[:split+1], msg[split+2:]
+ if len(sigs) == 0 || sigs[len(sigs)-1] != '\n' {
+ return nil, errMalformedNote
+ }
+
+ n := &Note{
+ Text: string(text),
+ }
+
+ // Parse and verify signatures.
+ // Ignore duplicate signatures.
+ seen := make(map[nameHash]bool)
+ seenUnverified := make(map[string]bool)
+ numSig := 0
+ for len(sigs) > 0 {
+ // Pull out next signature line.
+ // We know sigs[len(sigs)-1] == '\n', so IndexByte always finds one.
+ i := bytes.IndexByte(sigs, '\n')
+ line := sigs[:i]
+ sigs = sigs[i+1:]
+
+ if !bytes.HasPrefix(line, sigPrefix) {
+ return nil, errMalformedNote
+ }
+ line = line[len(sigPrefix):]
+ name, b64 := chop(string(line), " ")
+ sig, err := base64.StdEncoding.DecodeString(b64)
+ if err != nil || !isValidName(name) || b64 == "" || len(sig) < 5 {
+ return nil, errMalformedNote
+ }
+ hash := binary.BigEndian.Uint32(sig[0:4])
+ sig = sig[4:]
+
+ if numSig++; numSig > 100 {
+ // Avoid spending forever parsing a note with many signatures.
+ return nil, errMalformedNote
+ }
+
+ v, err := known.Verifier(name, hash)
+ if _, ok := err.(*UnknownVerifierError); ok {
+ // Drop repeated identical unverified signatures.
+ if seenUnverified[string(line)] {
+ continue
+ }
+ seenUnverified[string(line)] = true
+ n.UnverifiedSigs = append(n.UnverifiedSigs, Signature{Name: name, Hash: hash, Base64: b64})
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Drop repeated signatures by a single verifier.
+ if seen[nameHash{name, hash}] {
+ continue
+ }
+ seen[nameHash{name, hash}] = true
+
+ ok := v.Verify(text, sig)
+ if !ok {
+ return nil, &InvalidSignatureError{name, hash}
+ }
+
+ n.Sigs = append(n.Sigs, Signature{Name: name, Hash: hash, Base64: b64})
+ }
+
+ // Parsed and verified all the signatures.
+ if len(n.Sigs) == 0 {
+ return nil, &UnverifiedNoteError{n}
+ }
+ return n, nil
+}
+
+// Sign signs the note with the given signers and returns the encoded message.
+// The new signatures from signers are listed in the encoded message after
+// the existing signatures already present in n.Sigs.
+// If any signer uses the same key as an existing signature,
+// the existing signature is elided from the output.
+func Sign(n *Note, signers ...Signer) ([]byte, error) {
+ var buf bytes.Buffer
+ if !strings.HasSuffix(n.Text, "\n") {
+ return nil, errMalformedNote
+ }
+ buf.WriteString(n.Text)
+
+ // Prepare signatures.
+ var sigs bytes.Buffer
+ have := make(map[nameHash]bool)
+ for _, s := range signers {
+ name := s.Name()
+ hash := s.KeyHash()
+ have[nameHash{name, hash}] = true
+ if !isValidName(name) {
+ return nil, errInvalidSigner
+ }
+
+ sig, err := s.Sign(buf.Bytes()) // buf holds n.Text
+ if err != nil {
+ return nil, err
+ }
+
+ var hbuf [4]byte
+ binary.BigEndian.PutUint32(hbuf[:], hash)
+ b64 := base64.StdEncoding.EncodeToString(append(hbuf[:], sig...))
+ sigs.WriteString("— ")
+ sigs.WriteString(name)
+ sigs.WriteString(" ")
+ sigs.WriteString(b64)
+ sigs.WriteString("\n")
+ }
+
+ buf.WriteString("\n")
+
+ // Emit existing signatures not replaced by new ones.
+ for _, list := range [][]Signature{n.Sigs, n.UnverifiedSigs} {
+ for _, sig := range list {
+ name, hash := sig.Name, sig.Hash
+ if !isValidName(name) {
+ return nil, errMalformedNote
+ }
+ if have[nameHash{name, hash}] {
+ continue
+ }
+ // Double-check hash against base64.
+ raw, err := base64.StdEncoding.DecodeString(sig.Base64)
+ if err != nil || len(raw) < 4 || binary.BigEndian.Uint32(raw) != hash {
+ return nil, errMalformedNote
+ }
+ buf.WriteString("— ")
+ buf.WriteString(sig.Name)
+ buf.WriteString(" ")
+ buf.WriteString(sig.Base64)
+ buf.WriteString("\n")
+ }
+ }
+ buf.Write(sigs.Bytes())
+
+ return buf.Bytes(), nil
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/server.go b/libgo/go/golang.org/x/mod/sumdb/server.go
new file mode 100644
index 0000000..28866f1
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/server.go
@@ -0,0 +1,181 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sumdb implements the HTTP protocols for serving or accessing a module checksum database.
+package sumdb
+
+import (
+ "context"
+ "net/http"
+ "os"
+ "strings"
+
+ "golang.org/x/mod/internal/lazyregexp"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+// A ServerOps provides the external operations
+// (underlying database access and so on) needed by the Server.
+type ServerOps interface {
+ // Signed returns the signed hash of the latest tree.
+ Signed(ctx context.Context) ([]byte, error)
+
+ // ReadRecords returns the content for the n records id through id+n-1.
+ ReadRecords(ctx context.Context, id, n int64) ([][]byte, error)
+
+ // Lookup looks up a record for the given module,
+ // returning the record ID.
+ Lookup(ctx context.Context, m module.Version) (int64, error)
+
+ // ReadTileData reads the content of tile t.
+ // It is only invoked for hash tiles (t.L ≥ 0).
+ ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error)
+}
+
+// A Server is the checksum database HTTP server,
+// which implements http.Handler and should be invoked
+// to serve the paths listed in ServerPaths.
+type Server struct {
+ ops ServerOps
+}
+
+// NewServer returns a new Server using the given operations.
+func NewServer(ops ServerOps) *Server {
+ return &Server{ops: ops}
+}
+
+// ServerPaths are the URL paths the Server can (and should) serve.
+//
+// Typically a server will do:
+//
+// srv := sumdb.NewServer(ops)
+// for _, path := range sumdb.ServerPaths {
+// http.Handle(path, srv)
+// }
+//
+var ServerPaths = []string{
+ "/lookup/",
+ "/latest",
+ "/tile/",
+}
+
+var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`)
+
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ switch {
+ default:
+ http.NotFound(w, r)
+
+ case strings.HasPrefix(r.URL.Path, "/lookup/"):
+ mod := strings.TrimPrefix(r.URL.Path, "/lookup/")
+ if !modVerRE.MatchString(mod) {
+ http.Error(w, "invalid module@version syntax", http.StatusBadRequest)
+ return
+ }
+ i := strings.Index(mod, "@")
+ escPath, escVers := mod[:i], mod[i+1:]
+ path, err := module.UnescapePath(escPath)
+ if err != nil {
+ reportError(w, err)
+ return
+ }
+ vers, err := module.UnescapeVersion(escVers)
+ if err != nil {
+ reportError(w, err)
+ return
+ }
+ id, err := s.ops.Lookup(ctx, module.Version{Path: path, Version: vers})
+ if err != nil {
+ reportError(w, err)
+ return
+ }
+ records, err := s.ops.ReadRecords(ctx, id, 1)
+ if err != nil {
+ // This should never happen - the lookup says the record exists.
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if len(records) != 1 {
+ http.Error(w, "invalid record count returned by ReadRecords", http.StatusInternalServerError)
+ return
+ }
+ msg, err := tlog.FormatRecord(id, records[0])
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ signed, err := s.ops.Signed(ctx)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+ w.Write(msg)
+ w.Write(signed)
+
+ case r.URL.Path == "/latest":
+ data, err := s.ops.Signed(ctx)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+ w.Write(data)
+
+ case strings.HasPrefix(r.URL.Path, "/tile/"):
+ t, err := tlog.ParseTilePath(r.URL.Path[1:])
+ if err != nil {
+ http.Error(w, "invalid tile syntax", http.StatusBadRequest)
+ return
+ }
+ if t.L == -1 {
+ // Record data.
+ start := t.N << uint(t.H)
+ records, err := s.ops.ReadRecords(ctx, start, int64(t.W))
+ if err != nil {
+ reportError(w, err)
+ return
+ }
+ if len(records) != t.W {
+ http.Error(w, "invalid record count returned by ReadRecords", http.StatusInternalServerError)
+ return
+ }
+ var data []byte
+ for i, text := range records {
+ msg, err := tlog.FormatRecord(start+int64(i), text)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ data = append(data, msg...)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+ w.Write(data)
+ return
+ }
+
+ data, err := s.ops.ReadTileData(ctx, t)
+ if err != nil {
+ reportError(w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Write(data)
+ }
+}
+
+// reportError reports err to w.
+// If it's a not-found, the reported error is 404.
+// Otherwise it is an internal server error.
+// The caller must only call reportError in contexts where
+// a not-found err should be reported as 404.
+func reportError(w http.ResponseWriter, err error) {
+ if os.IsNotExist(err) {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/test.go b/libgo/go/golang.org/x/mod/sumdb/test.go
new file mode 100644
index 0000000..e4c166d
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/test.go
@@ -0,0 +1,124 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sumdb
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb/note"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+// NewTestServer constructs a new TestServer
+// that will sign its tree with the given signer key
+// (see golang.org/x/mod/sumdb/note)
+// and fetch new records as needed by calling gosum.
+func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer {
+ return &TestServer{signer: signer, gosum: gosum}
+}
+
+// A TestServer is an in-memory implementation of Server for testing.
+type TestServer struct {
+ signer string
+ gosum func(path, vers string) ([]byte, error)
+
+ mu sync.Mutex
+ hashes testHashes
+ records [][]byte
+ lookup map[string]int64
+}
+
+// testHashes implements tlog.HashReader, reading from a slice.
+type testHashes []tlog.Hash
+
+func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) {
+ var list []tlog.Hash
+ for _, id := range indexes {
+ list = append(list, h[id])
+ }
+ return list, nil
+}
+
+func (s *TestServer) Signed(ctx context.Context) ([]byte, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ size := int64(len(s.records))
+ h, err := tlog.TreeHash(size, s.hashes)
+ if err != nil {
+ return nil, err
+ }
+ text := tlog.FormatTree(tlog.Tree{N: size, Hash: h})
+ signer, err := note.NewSigner(s.signer)
+ if err != nil {
+ return nil, err
+ }
+ return note.Sign(&note.Note{Text: string(text)}, signer)
+}
+
+func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var list [][]byte
+ for i := int64(0); i < n; i++ {
+ if id+i >= int64(len(s.records)) {
+ return nil, fmt.Errorf("missing records")
+ }
+ list = append(list, s.records[id+i])
+ }
+ return list, nil
+}
+
+func (s *TestServer) Lookup(ctx context.Context, m module.Version) (int64, error) {
+ key := m.String()
+ s.mu.Lock()
+ id, ok := s.lookup[key]
+ s.mu.Unlock()
+ if ok {
+ return id, nil
+ }
+
+ // Look up module and compute go.sum lines.
+ data, err := s.gosum(m.Path, m.Version)
+ if err != nil {
+ return 0, err
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // We ran the fetch without the lock.
+ // If another fetch happened and committed, use it instead.
+ id, ok = s.lookup[key]
+ if ok {
+ return id, nil
+ }
+
+ // Add record.
+ id = int64(len(s.records))
+ s.records = append(s.records, data)
+ if s.lookup == nil {
+ s.lookup = make(map[string]int64)
+ }
+ s.lookup[key] = id
+ hashes, err := tlog.StoredHashesForRecordHash(id, tlog.RecordHash([]byte(data)), s.hashes)
+ if err != nil {
+ panic(err)
+ }
+ s.hashes = append(s.hashes, hashes...)
+
+ return id, nil
+}
+
+func (s *TestServer) ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return tlog.ReadTileData(t, s.hashes)
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/tlog/note.go b/libgo/go/golang.org/x/mod/sumdb/tlog/note.go
new file mode 100644
index 0000000..ce5353e
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/tlog/note.go
@@ -0,0 +1,135 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tlog
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Tree is a tree description, to be signed by a go.sum database server.
+type Tree struct {
+ N int64
+ Hash Hash
+}
+
+// FormatTree formats a tree description for inclusion in a note.
+//
+// The encoded form is three lines, each ending in a newline (U+000A):
+//
+// go.sum database tree
+// N
+// Hash
+//
+// where N is in decimal and Hash is in base64.
+//
+// A future backwards-compatible encoding may add additional lines,
+// which the parser can ignore.
+// A future backwards-incompatible encoding would use a different
+// first line (for example, "go.sum database tree v2").
+func FormatTree(tree Tree) []byte {
+ return []byte(fmt.Sprintf("go.sum database tree\n%d\n%s\n", tree.N, tree.Hash))
+}
+
+var errMalformedTree = errors.New("malformed tree note")
+var treePrefix = []byte("go.sum database tree\n")
+
+// ParseTree parses a formatted tree root description.
+func ParseTree(text []byte) (tree Tree, err error) {
+ // The message looks like:
+ //
+ // go.sum database tree
+ // 2
+ // nND/nri/U0xuHUrYSy0HtMeal2vzD9V4k/BO79C+QeI=
+ //
+ // For forwards compatibility, extra text lines after the encoding are ignored.
+ if !bytes.HasPrefix(text, treePrefix) || bytes.Count(text, []byte("\n")) < 3 || len(text) > 1e6 {
+ return Tree{}, errMalformedTree
+ }
+
+ lines := strings.SplitN(string(text), "\n", 4)
+ n, err := strconv.ParseInt(lines[1], 10, 64)
+ if err != nil || n < 0 || lines[1] != strconv.FormatInt(n, 10) {
+ return Tree{}, errMalformedTree
+ }
+
+ h, err := base64.StdEncoding.DecodeString(lines[2])
+ if err != nil || len(h) != HashSize {
+ return Tree{}, errMalformedTree
+ }
+
+ var hash Hash
+ copy(hash[:], h)
+ return Tree{n, hash}, nil
+}
+
+var errMalformedRecord = errors.New("malformed record data")
+
+// FormatRecord formats a record for serving to a client
+// in a lookup response or data tile.
+//
+// The encoded form is the record ID as a single number,
+// then the text of the record, and then a terminating blank line.
+// Record text must be valid UTF-8 and must not contain any ASCII control
+// characters (those below U+0020) other than newline (U+000A).
+// It must end in a terminating newline and not contain any blank lines.
+func FormatRecord(id int64, text []byte) (msg []byte, err error) {
+ if !isValidRecordText(text) {
+ return nil, errMalformedRecord
+ }
+ msg = []byte(fmt.Sprintf("%d\n", id))
+ msg = append(msg, text...)
+ msg = append(msg, '\n')
+ return msg, nil
+}
+
+// isValidRecordText reports whether text is syntactically valid record text.
+func isValidRecordText(text []byte) bool {
+ var last rune
+ for i := 0; i < len(text); {
+ r, size := utf8.DecodeRune(text[i:])
+ if r < 0x20 && r != '\n' || r == utf8.RuneError && size == 1 || last == '\n' && r == '\n' {
+ return false
+ }
+ i += size
+ last = r
+ }
+ if last != '\n' {
+ return false
+ }
+ return true
+}
+
+// ParseRecord parses a record description at the start of text,
+// stopping immediately after the terminating blank line.
+// It returns the record id, the record text, and the remainder of text.
+func ParseRecord(msg []byte) (id int64, text, rest []byte, err error) {
+ // Leading record id.
+ i := bytes.IndexByte(msg, '\n')
+ if i < 0 {
+ return 0, nil, nil, errMalformedRecord
+ }
+ id, err = strconv.ParseInt(string(msg[:i]), 10, 64)
+ if err != nil {
+ return 0, nil, nil, errMalformedRecord
+ }
+ msg = msg[i+1:]
+
+ // Record text.
+ i = bytes.Index(msg, []byte("\n\n"))
+ if i < 0 {
+ return 0, nil, nil, errMalformedRecord
+ }
+ text, rest = msg[:i+1], msg[i+2:]
+ if !isValidRecordText(text) {
+ return 0, nil, nil, errMalformedRecord
+ }
+ return id, text, rest, nil
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/tlog/tile.go b/libgo/go/golang.org/x/mod/sumdb/tlog/tile.go
new file mode 100644
index 0000000..e4aeb14
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/tlog/tile.go
@@ -0,0 +1,435 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tlog
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// A Tile is a description of a transparency log tile.
+// A tile of height H at level L offset N lists W consecutive hashes
+// at level H*L of the tree starting at offset N*(2**H).
+// A complete tile lists 2**H hashes; a partial tile lists fewer.
+// Note that a tile represents the entire subtree of height H
+// with those hashes as the leaves. The levels above H*L
+// can be reconstructed by hashing the leaves.
+//
+// Each Tile can be encoded as a “tile coordinate path”
+// of the form tile/H/L/NNN[.p/W].
+// The .p/W suffix is present only for partial tiles, meaning W < 2**H.
+// The NNN element is an encoding of N into 3-digit path elements.
+// All but the last path element begins with an "x".
+// For example,
+// Tile{H: 3, L: 4, N: 1234067, W: 1}'s path
+// is tile/3/4/x001/x234/067.p/1, and
+// Tile{H: 3, L: 4, N: 1234067, W: 8}'s path
+// is tile/3/4/x001/x234/067.
+// See Tile's Path method and the ParseTilePath function.
+//
+// The special level L=-1 holds raw record data instead of hashes.
+// In this case, the level encodes into a tile path as the path element
+// "data" instead of "-1".
+//
+// See also https://golang.org/design/25530-sumdb#checksum-database
+// and https://research.swtch.com/tlog#tiling_a_log.
+type Tile struct {
+ H int // height of tile (1 ≤ H ≤ 30)
+ L int // level in tiling (-1 ≤ L ≤ 63)
+ N int64 // number within level (0 ≤ N, unbounded)
+ W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile)
+}
+
+// TileForIndex returns the tile of fixed height h ≥ 1
+// and least width storing the given hash storage index.
+//
+// If h ≤ 0, TileForIndex panics.
+func TileForIndex(h int, index int64) Tile {
+ if h <= 0 {
+ panic(fmt.Sprintf("TileForIndex: invalid height %d", h))
+ }
+ t, _, _ := tileForIndex(h, index)
+ return t
+}
+
+// tileForIndex returns the tile of height h ≥ 1
+// storing the given hash index, which can be
+// reconstructed using tileHash(data[start:end]).
+func tileForIndex(h int, index int64) (t Tile, start, end int) {
+ level, n := SplitStoredHashIndex(index)
+ t.H = h
+ t.L = level / h
+ level -= t.L * h // now level within tile
+ t.N = n << uint(level) >> uint(t.H)
+ n -= t.N << uint(t.H) >> uint(level) // now n within tile at level
+ t.W = int((n + 1) << uint(level))
+ return t, int(n<<uint(level)) * HashSize, int((n+1)<<uint(level)) * HashSize
+}
+
+// HashFromTile returns the hash at the given storage index,
+// provided that t == TileForIndex(t.H, index) or a wider version,
+// and data is t's tile data (of length at least t.W*HashSize).
+func HashFromTile(t Tile, data []byte, index int64) (Hash, error) {
+ if t.H < 1 || t.H > 30 || t.L < 0 || t.L >= 64 || t.W < 1 || t.W > 1<<uint(t.H) {
+ return Hash{}, fmt.Errorf("invalid tile %v", t.Path())
+ }
+ if len(data) < t.W*HashSize {
+ return Hash{}, fmt.Errorf("data len %d too short for tile %v", len(data), t.Path())
+ }
+ t1, start, end := tileForIndex(t.H, index)
+ if t.L != t1.L || t.N != t1.N || t.W < t1.W {
+ return Hash{}, fmt.Errorf("index %v is in %v not %v", index, t1.Path(), t.Path())
+ }
+ return tileHash(data[start:end]), nil
+}
+
+// tileHash computes the subtree hash corresponding to the (2^K)-1 hashes in data.
+func tileHash(data []byte) Hash {
+ if len(data) == 0 {
+ panic("bad math in tileHash")
+ }
+ if len(data) == HashSize {
+ var h Hash
+ copy(h[:], data)
+ return h
+ }
+ n := len(data) / 2
+ return NodeHash(tileHash(data[:n]), tileHash(data[n:]))
+}
+
+// NewTiles returns the coordinates of the tiles of height h ≥ 1
+// that must be published when publishing from a tree of
+// size newTreeSize to replace a tree of size oldTreeSize.
+// (No tiles need to be published for a tree of size zero.)
+//
+// If h ≤ 0, TileForIndex panics.
+func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile {
+ if h <= 0 {
+ panic(fmt.Sprintf("NewTiles: invalid height %d", h))
+ }
+ H := uint(h)
+ var tiles []Tile
+ for level := uint(0); newTreeSize>>(H*level) > 0; level++ {
+ oldN := oldTreeSize >> (H * level)
+ newN := newTreeSize >> (H * level)
+ for n := oldN >> H; n < newN>>H; n++ {
+ tiles = append(tiles, Tile{H: h, L: int(level), N: n, W: 1 << H})
+ }
+ n := newN >> H
+ maxW := int(newN - n<<H)
+ minW := 1
+ if oldN > n<<H {
+ minW = int(oldN - n<<H)
+ }
+ for w := minW; w <= maxW; w++ {
+ tiles = append(tiles, Tile{H: h, L: int(level), N: n, W: w})
+ }
+ }
+ return tiles
+}
+
+// ReadTileData reads the hashes for tile t from r
+// and returns the corresponding tile data.
+func ReadTileData(t Tile, r HashReader) ([]byte, error) {
+ size := t.W
+ if size == 0 {
+ size = 1 << uint(t.H)
+ }
+ start := t.N << uint(t.H)
+ indexes := make([]int64, size)
+ for i := 0; i < size; i++ {
+ indexes[i] = StoredHashIndex(t.H*t.L, start+int64(i))
+ }
+
+ hashes, err := r.ReadHashes(indexes)
+ if err != nil {
+ return nil, err
+ }
+ if len(hashes) != len(indexes) {
+ return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes))
+ }
+
+ tile := make([]byte, size*HashSize)
+ for i := 0; i < size; i++ {
+ copy(tile[i*HashSize:], hashes[i][:])
+ }
+ return tile, nil
+}
+
+// To limit the size of any particular directory listing,
+// we encode the (possibly very large) number N
+// by encoding three digits at a time.
+// For example, 123456789 encodes as x123/x456/789.
+// Each directory has at most 1000 each xNNN, NNN, and NNN.p children,
+// so there are at most 3000 entries in any one directory.
+const pathBase = 1000
+
+// Path returns a tile coordinate path describing t.
+func (t Tile) Path() string {
+ n := t.N
+ nStr := fmt.Sprintf("%03d", n%pathBase)
+ for n >= pathBase {
+ n /= pathBase
+ nStr = fmt.Sprintf("x%03d/%s", n%pathBase, nStr)
+ }
+ pStr := ""
+ if t.W != 1<<uint(t.H) {
+ pStr = fmt.Sprintf(".p/%d", t.W)
+ }
+ var L string
+ if t.L == -1 {
+ L = "data"
+ } else {
+ L = fmt.Sprintf("%d", t.L)
+ }
+ return fmt.Sprintf("tile/%d/%s/%s%s", t.H, L, nStr, pStr)
+}
+
+// ParseTilePath parses a tile coordinate path.
+func ParseTilePath(path string) (Tile, error) {
+ f := strings.Split(path, "/")
+ if len(f) < 4 || f[0] != "tile" {
+ return Tile{}, &badPathError{path}
+ }
+ h, err1 := strconv.Atoi(f[1])
+ isData := false
+ if f[2] == "data" {
+ isData = true
+ f[2] = "0"
+ }
+ l, err2 := strconv.Atoi(f[2])
+ if err1 != nil || err2 != nil || h < 1 || l < 0 || h > 30 {
+ return Tile{}, &badPathError{path}
+ }
+ w := 1 << uint(h)
+ if dotP := f[len(f)-2]; strings.HasSuffix(dotP, ".p") {
+ ww, err := strconv.Atoi(f[len(f)-1])
+ if err != nil || ww <= 0 || ww >= w {
+ return Tile{}, &badPathError{path}
+ }
+ w = ww
+ f[len(f)-2] = dotP[:len(dotP)-len(".p")]
+ f = f[:len(f)-1]
+ }
+ f = f[3:]
+ n := int64(0)
+ for _, s := range f {
+ nn, err := strconv.Atoi(strings.TrimPrefix(s, "x"))
+ if err != nil || nn < 0 || nn >= pathBase {
+ return Tile{}, &badPathError{path}
+ }
+ n = n*pathBase + int64(nn)
+ }
+ if isData {
+ l = -1
+ }
+ t := Tile{H: h, L: l, N: n, W: w}
+ if path != t.Path() {
+ return Tile{}, &badPathError{path}
+ }
+ return t, nil
+}
+
+type badPathError struct {
+ path string
+}
+
+func (e *badPathError) Error() string {
+ return fmt.Sprintf("malformed tile path %q", e.path)
+}
+
+// A TileReader reads tiles from a go.sum database log.
+type TileReader interface {
+ // Height returns the height of the available tiles.
+ Height() int
+
+ // ReadTiles returns the data for each requested tile.
+ // If ReadTiles returns err == nil, it must also return
+ // a data record for each tile (len(data) == len(tiles))
+ // and each data record must be the correct length
+ // (len(data[i]) == tiles[i].W*HashSize).
+ //
+ // An implementation of ReadTiles typically reads
+ // them from an on-disk cache or else from a remote
+ // tile server. Tile data downloaded from a server should
+ // be considered suspect and not saved into a persistent
+ // on-disk cache before returning from ReadTiles.
+ // When the client confirms the validity of the tile data,
+ // it will call SaveTiles to signal that they can be safely
+ // written to persistent storage.
+ // See also https://research.swtch.com/tlog#authenticating_tiles.
+ ReadTiles(tiles []Tile) (data [][]byte, err error)
+
+ // SaveTiles informs the TileReader that the tile data
+ // returned by ReadTiles has been confirmed as valid
+ // and can be saved in persistent storage (on disk).
+ SaveTiles(tiles []Tile, data [][]byte)
+}
+
+// TileHashReader returns a HashReader that satisfies requests
+// by loading tiles of the given tree.
+//
+// The returned HashReader checks that loaded tiles are
+// valid for the given tree. Therefore, any hashes returned
+// by the HashReader are already proven to be in the tree.
+func TileHashReader(tree Tree, tr TileReader) HashReader {
+ return &tileHashReader{tree: tree, tr: tr}
+}
+
+type tileHashReader struct {
+ tree Tree
+ tr TileReader
+}
+
+// tileParent returns t's k'th tile parent in the tiles for a tree of size n.
+// If there is no such parent, tileParent returns Tile{}.
+func tileParent(t Tile, k int, n int64) Tile {
+ t.L += k
+ t.N >>= uint(k * t.H)
+ t.W = 1 << uint(t.H)
+ if max := n >> uint(t.L*t.H); t.N<<uint(t.H)+int64(t.W) >= max {
+ if t.N<<uint(t.H) >= max {
+ return Tile{}
+ }
+ t.W = int(max - t.N<<uint(t.H))
+ }
+ return t
+}
+
+func (r *tileHashReader) ReadHashes(indexes []int64) ([]Hash, error) {
+ h := r.tr.Height()
+
+ tileOrder := make(map[Tile]int) // tileOrder[tileKey(tiles[i])] = i
+ var tiles []Tile
+
+ // Plan to fetch tiles necessary to recompute tree hash.
+ // If it matches, those tiles are authenticated.
+ stx := subTreeIndex(0, r.tree.N, nil)
+ stxTileOrder := make([]int, len(stx))
+ for i, x := range stx {
+ tile, _, _ := tileForIndex(h, x)
+ tile = tileParent(tile, 0, r.tree.N)
+ if j, ok := tileOrder[tile]; ok {
+ stxTileOrder[i] = j
+ continue
+ }
+ stxTileOrder[i] = len(tiles)
+ tileOrder[tile] = len(tiles)
+ tiles = append(tiles, tile)
+ }
+
+ // Plan to fetch tiles containing the indexes,
+ // along with any parent tiles needed
+ // for authentication. For most calls,
+ // the parents are being fetched anyway.
+ indexTileOrder := make([]int, len(indexes))
+ for i, x := range indexes {
+ if x >= StoredHashIndex(0, r.tree.N) {
+ return nil, fmt.Errorf("indexes not in tree")
+ }
+
+ tile, _, _ := tileForIndex(h, x)
+
+ // Walk up parent tiles until we find one we've requested.
+ // That one will be authenticated.
+ k := 0
+ for ; ; k++ {
+ p := tileParent(tile, k, r.tree.N)
+ if j, ok := tileOrder[p]; ok {
+ if k == 0 {
+ indexTileOrder[i] = j
+ }
+ break
+ }
+ }
+
+ // Walk back down recording child tiles after parents.
+ // This loop ends by revisiting the tile for this index
+ // (tileParent(tile, 0, r.tree.N)) unless k == 0, in which
+ // case the previous loop did it.
+ for k--; k >= 0; k-- {
+ p := tileParent(tile, k, r.tree.N)
+ if p.W != 1<<uint(p.H) {
+ // Only full tiles have parents.
+ // This tile has a parent, so it must be full.
+ return nil, fmt.Errorf("bad math in tileHashReader: %d %d %v", r.tree.N, x, p)
+ }
+ tileOrder[p] = len(tiles)
+ if k == 0 {
+ indexTileOrder[i] = len(tiles)
+ }
+ tiles = append(tiles, p)
+ }
+ }
+
+ // Fetch all the tile data.
+ data, err := r.tr.ReadTiles(tiles)
+ if err != nil {
+ return nil, err
+ }
+ if len(data) != len(tiles) {
+ return nil, fmt.Errorf("TileReader returned bad result slice (len=%d, want %d)", len(data), len(tiles))
+ }
+ for i, tile := range tiles {
+ if len(data[i]) != tile.W*HashSize {
+ return nil, fmt.Errorf("TileReader returned bad result slice (%v len=%d, want %d)", tile.Path(), len(data[i]), tile.W*HashSize)
+ }
+ }
+
+ // Authenticate the initial tiles against the tree hash.
+ // They are arranged so that parents are authenticated before children.
+ // First the tiles needed for the tree hash.
+ th, err := HashFromTile(tiles[stxTileOrder[len(stx)-1]], data[stxTileOrder[len(stx)-1]], stx[len(stx)-1])
+ if err != nil {
+ return nil, err
+ }
+ for i := len(stx) - 2; i >= 0; i-- {
+ h, err := HashFromTile(tiles[stxTileOrder[i]], data[stxTileOrder[i]], stx[i])
+ if err != nil {
+ return nil, err
+ }
+ th = NodeHash(h, th)
+ }
+ if th != r.tree.Hash {
+ // The tiles do not support the tree hash.
+ // We know at least one is wrong, but not which one.
+ return nil, fmt.Errorf("downloaded inconsistent tile")
+ }
+
+ // Authenticate full tiles against their parents.
+ for i := len(stx); i < len(tiles); i++ {
+ tile := tiles[i]
+ p := tileParent(tile, 1, r.tree.N)
+ j, ok := tileOrder[p]
+ if !ok {
+ return nil, fmt.Errorf("bad math in tileHashReader %d %v: lost parent of %v", r.tree.N, indexes, tile)
+ }
+ h, err := HashFromTile(p, data[j], StoredHashIndex(p.L*p.H, tile.N))
+ if err != nil {
+ return nil, fmt.Errorf("bad math in tileHashReader %d %v: lost hash of %v: %v", r.tree.N, indexes, tile, err)
+ }
+ if h != tileHash(data[i]) {
+ return nil, fmt.Errorf("downloaded inconsistent tile")
+ }
+ }
+
+ // Now we have all the tiles needed for the requested hashes,
+ // and we've authenticated the full tile set against the trusted tree hash.
+ r.tr.SaveTiles(tiles, data)
+
+ // Pull out the requested hashes.
+ hashes := make([]Hash, len(indexes))
+ for i, x := range indexes {
+ j := indexTileOrder[i]
+ h, err := HashFromTile(tiles[j], data[j], x)
+ if err != nil {
+ return nil, fmt.Errorf("bad math in tileHashReader %d %v: lost hash %v: %v", r.tree.N, indexes, x, err)
+ }
+ hashes[i] = h
+ }
+
+ return hashes, nil
+}
diff --git a/libgo/go/golang.org/x/mod/sumdb/tlog/tlog.go b/libgo/go/golang.org/x/mod/sumdb/tlog/tlog.go
new file mode 100644
index 0000000..01d06c4
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/sumdb/tlog/tlog.go
@@ -0,0 +1,598 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tlog implements a tamper-evident log
+// used in the Go module go.sum database server.
+//
+// This package follows the design of Certificate Transparency (RFC 6962)
+// and its proofs are compatible with that system.
+// See TestCertificateTransparency.
+//
+package tlog
+
+import (
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math/bits"
+)
+
+// A Hash is a hash identifying a log record or tree root.
+type Hash [HashSize]byte
+
+// HashSize is the size of a Hash in bytes.
+const HashSize = 32
+
+// String returns a base64 representation of the hash for printing.
+func (h Hash) String() string {
+ return base64.StdEncoding.EncodeToString(h[:])
+}
+
+// MarshalJSON marshals the hash as a JSON string containing the base64-encoded hash.
+func (h Hash) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + h.String() + `"`), nil
+}
+
+// UnmarshalJSON unmarshals a hash from JSON string containing the a base64-encoded hash.
+func (h *Hash) UnmarshalJSON(data []byte) error {
+ if len(data) != 1+44+1 || data[0] != '"' || data[len(data)-2] != '=' || data[len(data)-1] != '"' {
+ return errors.New("cannot decode hash")
+ }
+
+ // As of Go 1.12, base64.StdEncoding.Decode insists on
+ // slicing into target[33:] even when it only writes 32 bytes.
+ // Since we already checked that the hash ends in = above,
+ // we can use base64.RawStdEncoding with the = removed;
+ // RawStdEncoding does not exhibit the same bug.
+ // We decode into a temporary to avoid writing anything to *h
+ // unless the entire input is well-formed.
+ var tmp Hash
+ n, err := base64.RawStdEncoding.Decode(tmp[:], data[1:len(data)-2])
+ if err != nil || n != HashSize {
+ return errors.New("cannot decode hash")
+ }
+ *h = tmp
+ return nil
+}
+
+// ParseHash parses the base64-encoded string form of a hash.
+func ParseHash(s string) (Hash, error) {
+ data, err := base64.StdEncoding.DecodeString(s)
+ if err != nil || len(data) != HashSize {
+ return Hash{}, fmt.Errorf("malformed hash")
+ }
+ var h Hash
+ copy(h[:], data)
+ return h, nil
+}
+
+// maxpow2 returns k, the maximum power of 2 smaller than n,
+// as well as l = log₂ k (so k = 1<<l).
+func maxpow2(n int64) (k int64, l int) {
+ l = 0
+ for 1<<uint(l+1) < n {
+ l++
+ }
+ return 1 << uint(l), l
+}
+
+var zeroPrefix = []byte{0x00}
+
+// RecordHash returns the content hash for the given record data.
+func RecordHash(data []byte) Hash {
+ // SHA256(0x00 || data)
+ // https://tools.ietf.org/html/rfc6962#section-2.1
+ h := sha256.New()
+ h.Write(zeroPrefix)
+ h.Write(data)
+ var h1 Hash
+ h.Sum(h1[:0])
+ return h1
+}
+
+// NodeHash returns the hash for an interior tree node with the given left and right hashes.
+func NodeHash(left, right Hash) Hash {
+ // SHA256(0x01 || left || right)
+ // https://tools.ietf.org/html/rfc6962#section-2.1
+ // We use a stack buffer to assemble the hash input
+ // to avoid allocating a hash struct with sha256.New.
+ var buf [1 + HashSize + HashSize]byte
+ buf[0] = 0x01
+ copy(buf[1:], left[:])
+ copy(buf[1+HashSize:], right[:])
+ return sha256.Sum256(buf[:])
+}
+
+// For information about the stored hash index ordering,
+// see section 3.3 of Crosby and Wallach's paper
+// "Efficient Data Structures for Tamper-Evident Logging".
+// https://www.usenix.org/legacy/event/sec09/tech/full_papers/crosby.pdf
+
+// StoredHashIndex maps the tree coordinates (level, n)
+// to a dense linear ordering that can be used for hash storage.
+// Hash storage implementations that store hashes in sequential
+// storage can use this function to compute where to read or write
+// a given hash.
+func StoredHashIndex(level int, n int64) int64 {
+ // Level L's n'th hash is written right after level L+1's 2n+1'th hash.
+ // Work our way down to the level 0 ordering.
+ // We'll add back the original level count at the end.
+ for l := level; l > 0; l-- {
+ n = 2*n + 1
+ }
+
+ // Level 0's n'th hash is written at n+n/2+n/4+... (eventually n/2ⁱ hits zero).
+ i := int64(0)
+ for ; n > 0; n >>= 1 {
+ i += n
+ }
+
+ return i + int64(level)
+}
+
+// SplitStoredHashIndex is the inverse of StoredHashIndex.
+// That is, SplitStoredHashIndex(StoredHashIndex(level, n)) == level, n.
+func SplitStoredHashIndex(index int64) (level int, n int64) {
+ // Determine level 0 record before index.
+ // StoredHashIndex(0, n) < 2*n,
+ // so the n we want is in [index/2, index/2+log₂(index)].
+ n = index / 2
+ indexN := StoredHashIndex(0, n)
+ if indexN > index {
+ panic("bad math")
+ }
+ for {
+ // Each new record n adds 1 + trailingZeros(n) hashes.
+ x := indexN + 1 + int64(bits.TrailingZeros64(uint64(n+1)))
+ if x > index {
+ break
+ }
+ n++
+ indexN = x
+ }
+ // The hash we want was committed with record n,
+ // meaning it is one of (0, n), (1, n/2), (2, n/4), ...
+ level = int(index - indexN)
+ return level, n >> uint(level)
+}
+
+// StoredHashCount returns the number of stored hashes
+// that are expected for a tree with n records.
+func StoredHashCount(n int64) int64 {
+ if n == 0 {
+ return 0
+ }
+ // The tree will have the hashes up to the last leaf hash.
+ numHash := StoredHashIndex(0, n-1) + 1
+ // And it will have any hashes for subtrees completed by that leaf.
+ for i := uint64(n - 1); i&1 != 0; i >>= 1 {
+ numHash++
+ }
+ return numHash
+}
+
+// StoredHashes returns the hashes that must be stored when writing
+// record n with the given data. The hashes should be stored starting
+// at StoredHashIndex(0, n). The result will have at most 1 + log₂ n hashes,
+// but it will average just under two per call for a sequence of calls for n=1..k.
+//
+// StoredHashes may read up to log n earlier hashes from r
+// in order to compute hashes for completed subtrees.
+func StoredHashes(n int64, data []byte, r HashReader) ([]Hash, error) {
+ return StoredHashesForRecordHash(n, RecordHash(data), r)
+}
+
+// StoredHashesForRecordHash is like StoredHashes but takes
+// as its second argument RecordHash(data) instead of data itself.
+func StoredHashesForRecordHash(n int64, h Hash, r HashReader) ([]Hash, error) {
+ // Start with the record hash.
+ hashes := []Hash{h}
+
+ // Build list of indexes needed for hashes for completed subtrees.
+ // Each trailing 1 bit in the binary representation of n completes a subtree
+ // and consumes a hash from an adjacent subtree.
+ m := int(bits.TrailingZeros64(uint64(n + 1)))
+ indexes := make([]int64, m)
+ for i := 0; i < m; i++ {
+ // We arrange indexes in sorted order.
+ // Note that n>>i is always odd.
+ indexes[m-1-i] = StoredHashIndex(i, n>>uint(i)-1)
+ }
+
+ // Fetch hashes.
+ old, err := r.ReadHashes(indexes)
+ if err != nil {
+ return nil, err
+ }
+ if len(old) != len(indexes) {
+ return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(old))
+ }
+
+ // Build new hashes.
+ for i := 0; i < m; i++ {
+ h = NodeHash(old[m-1-i], h)
+ hashes = append(hashes, h)
+ }
+ return hashes, nil
+}
+
+// A HashReader can read hashes for nodes in the log's tree structure.
+type HashReader interface {
+ // ReadHashes returns the hashes with the given stored hash indexes
+ // (see StoredHashIndex and SplitStoredHashIndex).
+ // ReadHashes must return a slice of hashes the same length as indexes,
+ // or else it must return a non-nil error.
+ // ReadHashes may run faster if indexes is sorted in increasing order.
+ ReadHashes(indexes []int64) ([]Hash, error)
+}
+
+// A HashReaderFunc is a function implementing HashReader.
+type HashReaderFunc func([]int64) ([]Hash, error)
+
+func (f HashReaderFunc) ReadHashes(indexes []int64) ([]Hash, error) {
+ return f(indexes)
+}
+
+// TreeHash computes the hash for the root of the tree with n records,
+// using the HashReader to obtain previously stored hashes
+// (those returned by StoredHashes during the writes of those n records).
+// TreeHash makes a single call to ReadHash requesting at most 1 + log₂ n hashes.
+// The tree of size zero is defined to have an all-zero Hash.
+func TreeHash(n int64, r HashReader) (Hash, error) {
+ if n == 0 {
+ return Hash{}, nil
+ }
+ indexes := subTreeIndex(0, n, nil)
+ hashes, err := r.ReadHashes(indexes)
+ if err != nil {
+ return Hash{}, err
+ }
+ if len(hashes) != len(indexes) {
+ return Hash{}, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes))
+ }
+ hash, hashes := subTreeHash(0, n, hashes)
+ if len(hashes) != 0 {
+ panic("tlog: bad index math in TreeHash")
+ }
+ return hash, nil
+}
+
+// subTreeIndex returns the storage indexes needed to compute
+// the hash for the subtree containing records [lo, hi),
+// appending them to need and returning the result.
+// See https://tools.ietf.org/html/rfc6962#section-2.1
+func subTreeIndex(lo, hi int64, need []int64) []int64 {
+ // See subTreeHash below for commentary.
+ for lo < hi {
+ k, level := maxpow2(hi - lo + 1)
+ if lo&(k-1) != 0 {
+ panic("tlog: bad math in subTreeIndex")
+ }
+ need = append(need, StoredHashIndex(level, lo>>uint(level)))
+ lo += k
+ }
+ return need
+}
+
+// subTreeHash computes the hash for the subtree containing records [lo, hi),
+// assuming that hashes are the hashes corresponding to the indexes
+// returned by subTreeIndex(lo, hi).
+// It returns any leftover hashes.
+func subTreeHash(lo, hi int64, hashes []Hash) (Hash, []Hash) {
+ // Repeatedly partition the tree into a left side with 2^level nodes,
+ // for as large a level as possible, and a right side with the fringe.
+ // The left hash is stored directly and can be read from storage.
+ // The right side needs further computation.
+ numTree := 0
+ for lo < hi {
+ k, _ := maxpow2(hi - lo + 1)
+ if lo&(k-1) != 0 || lo >= hi {
+ panic("tlog: bad math in subTreeHash")
+ }
+ numTree++
+ lo += k
+ }
+
+ if len(hashes) < numTree {
+ panic("tlog: bad index math in subTreeHash")
+ }
+
+ // Reconstruct hash.
+ h := hashes[numTree-1]
+ for i := numTree - 2; i >= 0; i-- {
+ h = NodeHash(hashes[i], h)
+ }
+ return h, hashes[numTree:]
+}
+
+// A RecordProof is a verifiable proof that a particular log root contains a particular record.
+// RFC 6962 calls this a “Merkle audit path.”
+type RecordProof []Hash
+
+// ProveRecord returns the proof that the tree of size t contains the record with index n.
+func ProveRecord(t, n int64, r HashReader) (RecordProof, error) {
+ if t < 0 || n < 0 || n >= t {
+ return nil, fmt.Errorf("tlog: invalid inputs in ProveRecord")
+ }
+ indexes := leafProofIndex(0, t, n, nil)
+ if len(indexes) == 0 {
+ return RecordProof{}, nil
+ }
+ hashes, err := r.ReadHashes(indexes)
+ if err != nil {
+ return nil, err
+ }
+ if len(hashes) != len(indexes) {
+ return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes))
+ }
+
+ p, hashes := leafProof(0, t, n, hashes)
+ if len(hashes) != 0 {
+ panic("tlog: bad index math in ProveRecord")
+ }
+ return p, nil
+}
+
+// leafProofIndex builds the list of indexes needed to construct the proof
+// that leaf n is contained in the subtree with leaves [lo, hi).
+// It appends those indexes to need and returns the result.
+// See https://tools.ietf.org/html/rfc6962#section-2.1.1
+func leafProofIndex(lo, hi, n int64, need []int64) []int64 {
+ // See leafProof below for commentary.
+ if !(lo <= n && n < hi) {
+ panic("tlog: bad math in leafProofIndex")
+ }
+ if lo+1 == hi {
+ return need
+ }
+ if k, _ := maxpow2(hi - lo); n < lo+k {
+ need = leafProofIndex(lo, lo+k, n, need)
+ need = subTreeIndex(lo+k, hi, need)
+ } else {
+ need = subTreeIndex(lo, lo+k, need)
+ need = leafProofIndex(lo+k, hi, n, need)
+ }
+ return need
+}
+
+// leafProof constructs the proof that leaf n is contained in the subtree with leaves [lo, hi).
+// It returns any leftover hashes as well.
+// See https://tools.ietf.org/html/rfc6962#section-2.1.1
+func leafProof(lo, hi, n int64, hashes []Hash) (RecordProof, []Hash) {
+ // We must have lo <= n < hi or else the code here has a bug.
+ if !(lo <= n && n < hi) {
+ panic("tlog: bad math in leafProof")
+ }
+
+ if lo+1 == hi { // n == lo
+ // Reached the leaf node.
+ // The verifier knows what the leaf hash is, so we don't need to send it.
+ return RecordProof{}, hashes
+ }
+
+ // Walk down the tree toward n.
+ // Record the hash of the path not taken (needed for verifying the proof).
+ var p RecordProof
+ var th Hash
+ if k, _ := maxpow2(hi - lo); n < lo+k {
+ // n is on left side
+ p, hashes = leafProof(lo, lo+k, n, hashes)
+ th, hashes = subTreeHash(lo+k, hi, hashes)
+ } else {
+ // n is on right side
+ th, hashes = subTreeHash(lo, lo+k, hashes)
+ p, hashes = leafProof(lo+k, hi, n, hashes)
+ }
+ return append(p, th), hashes
+}
+
+var errProofFailed = errors.New("invalid transparency proof")
+
+// CheckRecord verifies that p is a valid proof that the tree of size t
+// with hash th has an n'th record with hash h.
+func CheckRecord(p RecordProof, t int64, th Hash, n int64, h Hash) error {
+ if t < 0 || n < 0 || n >= t {
+ return fmt.Errorf("tlog: invalid inputs in CheckRecord")
+ }
+ th2, err := runRecordProof(p, 0, t, n, h)
+ if err != nil {
+ return err
+ }
+ if th2 == th {
+ return nil
+ }
+ return errProofFailed
+}
+
+// runRecordProof runs the proof p that leaf n is contained in the subtree with leaves [lo, hi).
+// Running the proof means constructing and returning the implied hash of that
+// subtree.
+func runRecordProof(p RecordProof, lo, hi, n int64, leafHash Hash) (Hash, error) {
+ // We must have lo <= n < hi or else the code here has a bug.
+ if !(lo <= n && n < hi) {
+ panic("tlog: bad math in runRecordProof")
+ }
+
+ if lo+1 == hi { // m == lo
+ // Reached the leaf node.
+ // The proof must not have any unnecessary hashes.
+ if len(p) != 0 {
+ return Hash{}, errProofFailed
+ }
+ return leafHash, nil
+ }
+
+ if len(p) == 0 {
+ return Hash{}, errProofFailed
+ }
+
+ k, _ := maxpow2(hi - lo)
+ if n < lo+k {
+ th, err := runRecordProof(p[:len(p)-1], lo, lo+k, n, leafHash)
+ if err != nil {
+ return Hash{}, err
+ }
+ return NodeHash(th, p[len(p)-1]), nil
+ } else {
+ th, err := runRecordProof(p[:len(p)-1], lo+k, hi, n, leafHash)
+ if err != nil {
+ return Hash{}, err
+ }
+ return NodeHash(p[len(p)-1], th), nil
+ }
+}
+
+// A TreeProof is a verifiable proof that a particular log tree contains
+// as a prefix all records present in an earlier tree.
+// RFC 6962 calls this a “Merkle consistency proof.”
+type TreeProof []Hash
+
+// ProveTree returns the proof that the tree of size t contains
+// as a prefix all the records from the tree of smaller size n.
+func ProveTree(t, n int64, h HashReader) (TreeProof, error) {
+ if t < 1 || n < 1 || n > t {
+ return nil, fmt.Errorf("tlog: invalid inputs in ProveTree")
+ }
+ indexes := treeProofIndex(0, t, n, nil)
+ if len(indexes) == 0 {
+ return TreeProof{}, nil
+ }
+ hashes, err := h.ReadHashes(indexes)
+ if err != nil {
+ return nil, err
+ }
+ if len(hashes) != len(indexes) {
+ return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes))
+ }
+
+ p, hashes := treeProof(0, t, n, hashes)
+ if len(hashes) != 0 {
+ panic("tlog: bad index math in ProveTree")
+ }
+ return p, nil
+}
+
+// treeProofIndex builds the list of indexes needed to construct
+// the sub-proof related to the subtree containing records [lo, hi).
+// See https://tools.ietf.org/html/rfc6962#section-2.1.2.
+func treeProofIndex(lo, hi, n int64, need []int64) []int64 {
+ // See treeProof below for commentary.
+ if !(lo < n && n <= hi) {
+ panic("tlog: bad math in treeProofIndex")
+ }
+
+ if n == hi {
+ if lo == 0 {
+ return need
+ }
+ return subTreeIndex(lo, hi, need)
+ }
+
+ if k, _ := maxpow2(hi - lo); n <= lo+k {
+ need = treeProofIndex(lo, lo+k, n, need)
+ need = subTreeIndex(lo+k, hi, need)
+ } else {
+ need = subTreeIndex(lo, lo+k, need)
+ need = treeProofIndex(lo+k, hi, n, need)
+ }
+ return need
+}
+
+// treeProof constructs the sub-proof related to the subtree containing records [lo, hi).
+// It returns any leftover hashes as well.
+// See https://tools.ietf.org/html/rfc6962#section-2.1.2.
+func treeProof(lo, hi, n int64, hashes []Hash) (TreeProof, []Hash) {
+ // We must have lo < n <= hi or else the code here has a bug.
+ if !(lo < n && n <= hi) {
+ panic("tlog: bad math in treeProof")
+ }
+
+ // Reached common ground.
+ if n == hi {
+ if lo == 0 {
+ // This subtree corresponds exactly to the old tree.
+ // The verifier knows that hash, so we don't need to send it.
+ return TreeProof{}, hashes
+ }
+ th, hashes := subTreeHash(lo, hi, hashes)
+ return TreeProof{th}, hashes
+ }
+
+ // Interior node for the proof.
+ // Decide whether to walk down the left or right side.
+ var p TreeProof
+ var th Hash
+ if k, _ := maxpow2(hi - lo); n <= lo+k {
+ // m is on left side
+ p, hashes = treeProof(lo, lo+k, n, hashes)
+ th, hashes = subTreeHash(lo+k, hi, hashes)
+ } else {
+ // m is on right side
+ th, hashes = subTreeHash(lo, lo+k, hashes)
+ p, hashes = treeProof(lo+k, hi, n, hashes)
+ }
+ return append(p, th), hashes
+}
+
+// CheckTree verifies that p is a valid proof that the tree of size t with hash th
+// contains as a prefix the tree of size n with hash h.
+func CheckTree(p TreeProof, t int64, th Hash, n int64, h Hash) error {
+ if t < 1 || n < 1 || n > t {
+ return fmt.Errorf("tlog: invalid inputs in CheckTree")
+ }
+ h2, th2, err := runTreeProof(p, 0, t, n, h)
+ if err != nil {
+ return err
+ }
+ if th2 == th && h2 == h {
+ return nil
+ }
+ return errProofFailed
+}
+
+// runTreeProof runs the sub-proof p related to the subtree containing records [lo, hi),
+// where old is the hash of the old tree with n records.
+// Running the proof means constructing and returning the implied hashes of that
+// subtree in both the old and new tree.
+func runTreeProof(p TreeProof, lo, hi, n int64, old Hash) (Hash, Hash, error) {
+ // We must have lo < n <= hi or else the code here has a bug.
+ if !(lo < n && n <= hi) {
+ panic("tlog: bad math in runTreeProof")
+ }
+
+ // Reached common ground.
+ if n == hi {
+ if lo == 0 {
+ if len(p) != 0 {
+ return Hash{}, Hash{}, errProofFailed
+ }
+ return old, old, nil
+ }
+ if len(p) != 1 {
+ return Hash{}, Hash{}, errProofFailed
+ }
+ return p[0], p[0], nil
+ }
+
+ if len(p) == 0 {
+ return Hash{}, Hash{}, errProofFailed
+ }
+
+ // Interior node for the proof.
+ k, _ := maxpow2(hi - lo)
+ if n <= lo+k {
+ oh, th, err := runTreeProof(p[:len(p)-1], lo, lo+k, n, old)
+ if err != nil {
+ return Hash{}, Hash{}, err
+ }
+ return oh, NodeHash(th, p[len(p)-1]), nil
+ } else {
+ oh, th, err := runTreeProof(p[:len(p)-1], lo+k, hi, n, old)
+ if err != nil {
+ return Hash{}, Hash{}, err
+ }
+ return NodeHash(p[len(p)-1], oh), NodeHash(p[len(p)-1], th), nil
+ }
+}
diff --git a/libgo/go/golang.org/x/mod/zip/zip.go b/libgo/go/golang.org/x/mod/zip/zip.go
new file mode 100644
index 0000000..37c7642
--- /dev/null
+++ b/libgo/go/golang.org/x/mod/zip/zip.go
@@ -0,0 +1,570 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package zip provides functions for creating and extracting module zip files.
+//
+// Module zip files have several restrictions listed below. These are necessary
+// to ensure that module zip files can be extracted consistently on supported
+// platforms and file systems.
+//
+// • All file paths within a zip file must start with "<module>@<version>/",
+// where "<module>" is the module path and "<version>" is the version.
+// The module path must be valid (see golang.org/x/mod/module.CheckPath).
+// The version must be valid and canonical (see
+// golang.org/x/mod/module.CanonicalVersion). The path must have a major
+// version suffix consistent with the version (see
+// golang.org/x/mod/module.Check). The part of the file path after the
+// "<module>@<version>/" prefix must be valid (see
+// golang.org/x/mod/module.CheckFilePath).
+//
+// • No two file paths may be equal under Unicode case-folding (see
+// strings.EqualFold).
+//
+// • A go.mod file may or may not appear in the top-level directory. If present,
+// it must be named "go.mod", not any other case. Files named "go.mod"
+// are not allowed in any other directory.
+//
+// • The total size in bytes of a module zip file may be at most MaxZipFile
+// bytes (500 MiB). The total uncompressed size of the files within the
+// zip may also be at most MaxZipFile bytes.
+//
+// • Each file's uncompressed size must match its declared 64-bit uncompressed
+// size in the zip file header.
+//
+// • If the zip contains files named "<module>@<version>/go.mod" or
+// "<module>@<version>/LICENSE", their sizes in bytes may be at most
+// MaxGoMod or MaxLICENSE, respectively (both are 16 MiB).
+//
+// • Empty directories are ignored. File permissions and timestamps are also
+// ignored.
+//
+// • Symbolic links and other irregular files are not allowed.
+//
+// Note that this package does not provide hashing functionality. See
+// golang.org/x/mod/sumdb/dirhash.
+package zip
+
+import (
+ "archive/zip"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/mod/module"
+)
+
+const (
+ // MaxZipFile is the maximum size in bytes of a module zip file. The
+ // go command will report an error if either the zip file or its extracted
+ // content is larger than this.
+ MaxZipFile = 500 << 20
+
+ // MaxGoMod is the maximum size in bytes of a go.mod file within a
+ // module zip file.
+ MaxGoMod = 16 << 20
+
+ // MaxLICENSE is the maximum size in bytes of a LICENSE file within a
+ // module zip file.
+ MaxLICENSE = 16 << 20
+)
+
+// File provides an abstraction for a file in a directory, zip, or anything
+// else that looks like a file.
+type File interface {
+ // Path returns a clean slash-separated relative path from the module root
+ // directory to the file.
+ Path() string
+
+ // Lstat returns information about the file. If the file is a symbolic link,
+ // Lstat returns information about the link itself, not the file it points to.
+ Lstat() (os.FileInfo, error)
+
+ // Open provides access to the data within a regular file. Open may return
+ // an error if called on a directory or symbolic link.
+ Open() (io.ReadCloser, error)
+}
+
+// Create builds a zip archive for module m from an abstract list of files
+// and writes it to w.
+//
+// Create verifies the restrictions described in the package documentation
+// and should not produce an archive that Unzip cannot extract. Create does not
+// include files in the output archive if they don't belong in the module zip.
+// In particular, Create will not include files in modules found in
+// subdirectories, most files in vendor directories, or irregular files (such
+// as symbolic links) in the output archive.
+func Create(w io.Writer, m module.Version, files []File) (err error) {
+ defer func() {
+ if err != nil {
+ err = &zipError{verb: "create zip", err: err}
+ }
+ }()
+
+ // Check that the version is canonical, the module path is well-formed, and
+ // the major version suffix matches the major version.
+ if vers := module.CanonicalVersion(m.Version); vers != m.Version {
+ return fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers)
+ }
+ if err := module.Check(m.Path, m.Version); err != nil {
+ return err
+ }
+
+ // Find directories containing go.mod files (other than the root).
+ // These directories will not be included in the output zip.
+ haveGoMod := make(map[string]bool)
+ for _, f := range files {
+ dir, base := path.Split(f.Path())
+ if strings.EqualFold(base, "go.mod") {
+ info, err := f.Lstat()
+ if err != nil {
+ return err
+ }
+ if info.Mode().IsRegular() {
+ haveGoMod[dir] = true
+ }
+ }
+ }
+
+ inSubmodule := func(p string) bool {
+ for {
+ dir, _ := path.Split(p)
+ if dir == "" {
+ return false
+ }
+ if haveGoMod[dir] {
+ return true
+ }
+ p = dir[:len(dir)-1]
+ }
+ }
+
+ // Create the module zip file.
+ zw := zip.NewWriter(w)
+ prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version)
+
+ addFile := func(f File, path string, size int64) error {
+ rc, err := f.Open()
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+ w, err := zw.Create(prefix + path)
+ if err != nil {
+ return err
+ }
+ lr := &io.LimitedReader{R: rc, N: size + 1}
+ if _, err := io.Copy(w, lr); err != nil {
+ return err
+ }
+ if lr.N <= 0 {
+ return fmt.Errorf("file %q is larger than declared size", path)
+ }
+ return nil
+ }
+
+ collisions := make(collisionChecker)
+ maxSize := int64(MaxZipFile)
+ for _, f := range files {
+ p := f.Path()
+ if p != path.Clean(p) {
+ return fmt.Errorf("file path %s is not clean", p)
+ }
+ if path.IsAbs(p) {
+ return fmt.Errorf("file path %s is not relative", p)
+ }
+ if isVendoredPackage(p) || inSubmodule(p) {
+ continue
+ }
+ if p == ".hg_archival.txt" {
+ // Inserted by hg archive.
+ // The go command drops this regardless of the VCS being used.
+ continue
+ }
+ if err := module.CheckFilePath(p); err != nil {
+ return err
+ }
+ if strings.ToLower(p) == "go.mod" && p != "go.mod" {
+ return fmt.Errorf("found file named %s, want all lower-case go.mod", p)
+ }
+ info, err := f.Lstat()
+ if err != nil {
+ return err
+ }
+ if err := collisions.check(p, info.IsDir()); err != nil {
+ return err
+ }
+ if !info.Mode().IsRegular() {
+ // Skip symbolic links (golang.org/issue/27093).
+ continue
+ }
+ size := info.Size()
+ if size < 0 || maxSize < size {
+ return fmt.Errorf("module source tree too large (max size is %d bytes)", MaxZipFile)
+ }
+ maxSize -= size
+ if p == "go.mod" && size > MaxGoMod {
+ return fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod)
+ }
+ if p == "LICENSE" && size > MaxLICENSE {
+ return fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE)
+ }
+
+ if err := addFile(f, p, size); err != nil {
+ return err
+ }
+ }
+
+ return zw.Close()
+}
+
+// CreateFromDir creates a module zip file for module m from the contents of
+// a directory, dir. The zip content is written to w.
+//
+// CreateFromDir verifies the restrictions described in the package
+// documentation and should not produce an archive that Unzip cannot extract.
+// CreateFromDir does not include files in the output archive if they don't
+// belong in the module zip. In particular, CreateFromDir will not include
+// files in modules found in subdirectories, most files in vendor directories,
+// or irregular files (such as symbolic links) in the output archive.
+// Additionally, unlike Create, CreateFromDir will not include directories
+// named ".bzr", ".git", ".hg", or ".svn".
+func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) {
+ defer func() {
+ if zerr, ok := err.(*zipError); ok {
+ zerr.path = dir
+ } else if err != nil {
+ err = &zipError{verb: "create zip", path: dir, err: err}
+ }
+ }()
+
+ var files []File
+ err = filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error {
+ relPath, err := filepath.Rel(dir, filePath)
+ if err != nil {
+ return err
+ }
+ slashPath := filepath.ToSlash(relPath)
+
+ if info.IsDir() {
+ if filePath == dir {
+ // Don't skip the top-level directory.
+ return nil
+ }
+
+ // Skip VCS directories.
+ // fossil repos are regular files with arbitrary names, so we don't try
+ // to exclude them.
+ switch filepath.Base(filePath) {
+ case ".bzr", ".git", ".hg", ".svn":
+ return filepath.SkipDir
+ }
+
+ // Skip some subdirectories inside vendor, but maintain bug
+ // golang.org/issue/31562, described in isVendoredPackage.
+ // We would like Create and CreateFromDir to produce the same result
+ // for a set of files, whether expressed as a directory tree or zip.
+ if isVendoredPackage(slashPath) {
+ return filepath.SkipDir
+ }
+
+ // Skip submodules (directories containing go.mod files).
+ if goModInfo, err := os.Lstat(filepath.Join(filePath, "go.mod")); err == nil && !goModInfo.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if info.Mode().IsRegular() {
+ if !isVendoredPackage(slashPath) {
+ files = append(files, dirFile{
+ filePath: filePath,
+ slashPath: slashPath,
+ info: info,
+ })
+ }
+ return nil
+ }
+
+ // Not a regular file or a directory. Probably a symbolic link.
+ // Irregular files are ignored, so skip it.
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return Create(w, m, files)
+}
+
+type dirFile struct {
+ filePath, slashPath string
+ info os.FileInfo
+}
+
+func (f dirFile) Path() string { return f.slashPath }
+func (f dirFile) Lstat() (os.FileInfo, error) { return f.info, nil }
+func (f dirFile) Open() (io.ReadCloser, error) { return os.Open(f.filePath) }
+
+func isVendoredPackage(name string) bool {
+ var i int
+ if strings.HasPrefix(name, "vendor/") {
+ i += len("vendor/")
+ } else if j := strings.Index(name, "/vendor/"); j >= 0 {
+ // This offset looks incorrect; this should probably be
+ //
+ // i = j + len("/vendor/")
+ //
+ // (See https://golang.org/issue/31562.)
+ //
+ // Unfortunately, we can't fix it without invalidating checksums.
+ // Fortunately, the error appears to be strictly conservative: we'll retain
+ // vendored packages that we should have pruned, but we won't prune
+ // non-vendored packages that we should have retained.
+ //
+ // Since this defect doesn't seem to break anything, it's not worth fixing
+ // for now.
+ i += len("/vendor/")
+ } else {
+ return false
+ }
+ return strings.Contains(name[i:], "/")
+}
+
+// Unzip extracts the contents of a module zip file to a directory.
+//
+// Unzip checks all restrictions listed in the package documentation and returns
+// an error if the zip archive is not valid. In some cases, files may be written
+// to dir before an error is returned (for example, if a file's uncompressed
+// size does not match its declared size).
+//
+// dir may or may not exist: Unzip will create it and any missing parent
+// directories if it doesn't exist. If dir exists, it must be empty.
+func Unzip(dir string, m module.Version, zipFile string) (err error) {
+ defer func() {
+ if err != nil {
+ err = &zipError{verb: "unzip", path: zipFile, err: err}
+ }
+ }()
+
+ if vers := module.CanonicalVersion(m.Version); vers != m.Version {
+ return fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers)
+ }
+ if err := module.Check(m.Path, m.Version); err != nil {
+ return err
+ }
+
+ // Check that the directory is empty. Don't create it yet in case there's
+ // an error reading the zip.
+ files, _ := ioutil.ReadDir(dir)
+ if len(files) > 0 {
+ return fmt.Errorf("target directory %v exists and is not empty", dir)
+ }
+
+ // Open the zip file and ensure it's under the size limit.
+ f, err := os.Open(zipFile)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ zipSize := info.Size()
+ if zipSize > MaxZipFile {
+ return fmt.Errorf("module zip file is too large (%d bytes; limit is %d bytes)", zipSize, MaxZipFile)
+ }
+
+ z, err := zip.NewReader(f, zipSize)
+ if err != nil {
+ return err
+ }
+
+ // Check total size, valid file names.
+ collisions := make(collisionChecker)
+ prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version)
+ var size int64
+ for _, zf := range z.File {
+ if !strings.HasPrefix(zf.Name, prefix) {
+ return fmt.Errorf("unexpected file name %s", zf.Name)
+ }
+ name := zf.Name[len(prefix):]
+ if name == "" {
+ continue
+ }
+ isDir := strings.HasSuffix(name, "/")
+ if isDir {
+ name = name[:len(name)-1]
+ }
+ if path.Clean(name) != name {
+ return fmt.Errorf("invalid file name %s", zf.Name)
+ }
+ if err := module.CheckFilePath(name); err != nil {
+ return err
+ }
+ if err := collisions.check(name, isDir); err != nil {
+ return err
+ }
+ if isDir {
+ continue
+ }
+ if base := path.Base(name); strings.EqualFold(base, "go.mod") {
+ if base != name {
+ return fmt.Errorf("found go.mod file not in module root directory (%s)", zf.Name)
+ } else if name != "go.mod" {
+ return fmt.Errorf("found file named %s, want all lower-case go.mod", zf.Name)
+ }
+ }
+ s := int64(zf.UncompressedSize64)
+ if s < 0 || MaxZipFile-size < s {
+ return fmt.Errorf("total uncompressed size of module contents too large (max size is %d bytes)", MaxZipFile)
+ }
+ size += s
+ if name == "go.mod" && s > MaxGoMod {
+ return fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod)
+ }
+ if name == "LICENSE" && s > MaxLICENSE {
+ return fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE)
+ }
+ }
+
+ // Unzip, enforcing sizes checked earlier.
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return err
+ }
+ for _, zf := range z.File {
+ name := zf.Name[len(prefix):]
+ if name == "" || strings.HasSuffix(name, "/") {
+ continue
+ }
+ dst := filepath.Join(dir, name)
+ if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil {
+ return err
+ }
+ w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0444)
+ if err != nil {
+ return err
+ }
+ r, err := zf.Open()
+ if err != nil {
+ w.Close()
+ return err
+ }
+ lr := &io.LimitedReader{R: r, N: int64(zf.UncompressedSize64) + 1}
+ _, err = io.Copy(w, lr)
+ r.Close()
+ if err != nil {
+ w.Close()
+ return err
+ }
+ if err := w.Close(); err != nil {
+ return err
+ }
+ if lr.N <= 0 {
+ return fmt.Errorf("uncompressed size of file %s is larger than declared size (%d bytes)", zf.Name, zf.UncompressedSize64)
+ }
+ }
+
+ return nil
+}
+
+// collisionChecker finds case-insensitive name collisions and paths that
+// are listed as both files and directories.
+//
+// The keys of this map are processed with strToFold. pathInfo has the original
+// path for each folded path.
+type collisionChecker map[string]pathInfo
+
+type pathInfo struct {
+ path string
+ isDir bool
+}
+
+func (cc collisionChecker) check(p string, isDir bool) error {
+ fold := strToFold(p)
+ if other, ok := cc[fold]; ok {
+ if p != other.path {
+ return fmt.Errorf("case-insensitive file name collision: %q and %q", other.path, p)
+ }
+ if isDir != other.isDir {
+ return fmt.Errorf("entry %q is both a file and a directory", p)
+ }
+ if !isDir {
+ return fmt.Errorf("multiple entries for file %q", p)
+ }
+ // It's not an error if check is called with the same directory multiple
+ // times. check is called recursively on parent directories, so check
+ // may be called on the same directory many times.
+ } else {
+ cc[fold] = pathInfo{path: p, isDir: isDir}
+ }
+
+ if parent := path.Dir(p); parent != "." {
+ return cc.check(parent, true)
+ }
+ return nil
+}
+
+type zipError struct {
+ verb, path string
+ err error
+}
+
+func (e *zipError) Error() string {
+ if e.path == "" {
+ return fmt.Sprintf("%s: %v", e.verb, e.err)
+ } else {
+ return fmt.Sprintf("%s %s: %v", e.verb, e.path, e.err)
+ }
+}
+
+func (e *zipError) Unwrap() error {
+ return e.err
+}
+
+// strToFold returns a string with the property that
+// strings.EqualFold(s, t) iff strToFold(s) == strToFold(t)
+// This lets us test a large set of strings for fold-equivalent
+// duplicates without making a quadratic number of calls
+// to EqualFold. Note that strings.ToUpper and strings.ToLower
+// do not have the desired property in some corner cases.
+func strToFold(s string) string {
+ // Fast path: all ASCII, no upper case.
+ // Most paths look like this already.
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= utf8.RuneSelf || 'A' <= c && c <= 'Z' {
+ goto Slow
+ }
+ }
+ return s
+
+Slow:
+ var buf bytes.Buffer
+ for _, r := range s {
+ // SimpleFold(x) cycles to the next equivalent rune > x
+ // or wraps around to smaller values. Iterate until it wraps,
+ // and we've found the minimum value.
+ for {
+ r0 := r
+ r = unicode.SimpleFold(r0)
+ if r <= r0 {
+ break
+ }
+ }
+ // Exception to allow fast path above: A-Z => a-z
+ if 'A' <= r && r <= 'Z' {
+ r += 'a' - 'A'
+ }
+ buf.WriteRune(r)
+ }
+ return buf.String()
+}