aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/text
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/text')
-rw-r--r--libgo/go/text/scanner/scanner.go670
-rw-r--r--libgo/go/text/scanner/scanner_test.go563
-rw-r--r--libgo/go/text/tabwriter/tabwriter.go561
-rw-r--r--libgo/go/text/tabwriter/tabwriter_test.go614
-rw-r--r--libgo/go/text/template/doc.go313
-rw-r--r--libgo/go/text/template/exec.go673
-rw-r--r--libgo/go/text/template/exec_test.go683
-rw-r--r--libgo/go/text/template/funcs.go367
-rw-r--r--libgo/go/text/template/helper.go241
-rw-r--r--libgo/go/text/template/parse.go89
-rw-r--r--libgo/go/text/template/parse/lex.go482
-rw-r--r--libgo/go/text/template/parse/lex_test.go257
-rw-r--r--libgo/go/text/template/parse/node.go463
-rw-r--r--libgo/go/text/template/parse/parse.go436
-rw-r--r--libgo/go/text/template/parse/parse_test.go259
-rw-r--r--libgo/go/text/template/parse/set.go49
-rw-r--r--libgo/go/text/template/set.go120
-rw-r--r--libgo/go/text/template/set_test.go239
-rw-r--r--libgo/go/text/template/testdata/file1.tmpl2
-rw-r--r--libgo/go/text/template/testdata/file2.tmpl2
-rw-r--r--libgo/go/text/template/testdata/tmpl1.tmpl1
-rw-r--r--libgo/go/text/template/testdata/tmpl2.tmpl1
22 files changed, 7085 insertions, 0 deletions
diff --git a/libgo/go/text/scanner/scanner.go b/libgo/go/text/scanner/scanner.go
new file mode 100644
index 0000000..f46f63d
--- /dev/null
+++ b/libgo/go/text/scanner/scanner.go
@@ -0,0 +1,670 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
+// It takes an io.Reader providing the source, which then can be tokenized
+// through repeated calls to the Scan function. For compatibility with
+// existing tools, the NUL character is not allowed (implementation
+// restriction).
+//
+// By default, a Scanner skips white space and Go comments and recognizes all
+// literals as defined by the Go language specification. It may be
+// customized to recognize only a subset of those literals and to recognize
+// different white space characters.
+//
+// Basic usage pattern:
+//
+// var s scanner.Scanner
+// s.Init(src)
+// tok := s.Scan()
+// for tok != scanner.EOF {
+// // do something with tok
+// tok = s.Scan()
+// }
+//
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "unicode"
+ "unicode/utf8"
+)
+
+// TODO(gri): Consider changing this to use the new (token) Position package.
+
+// A source position is represented by a Position value.
+// A position is valid if Line > 0.
+type Position struct {
+ Filename string // filename, if any
+ Offset int // byte offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count per line)
+}
+
+// IsValid returns true if the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "???"
+ }
+ return s
+}
+
+// Predefined mode bits to control recognition of tokens. For instance,
+// to configure a Scanner such that it only recognizes (Go) identifiers,
+// integers, and skips comments, set the Scanner's Mode field to:
+//
+// ScanIdents | ScanInts | SkipComments
+//
+const (
+ ScanIdents = 1 << -Ident
+ ScanInts = 1 << -Int
+ ScanFloats = 1 << -Float // includes Ints
+ ScanChars = 1 << -Char
+ ScanStrings = 1 << -String
+ ScanRawStrings = 1 << -RawString
+ ScanComments = 1 << -Comment
+ SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
+ GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
+)
+
+// The result of Scan is one of the following tokens or a Unicode character.
+const (
+ EOF = -(iota + 1)
+ Ident
+ Int
+ Float
+ Char
+ String
+ RawString
+ Comment
+ skipComment
+)
+
+var tokenString = map[rune]string{
+ EOF: "EOF",
+ Ident: "Ident",
+ Int: "Int",
+ Float: "Float",
+ Char: "Char",
+ String: "String",
+ RawString: "RawString",
+ Comment: "Comment",
+}
+
+// TokenString returns a (visible) string for a token or Unicode character.
+func TokenString(tok rune) string {
+ if s, found := tokenString[tok]; found {
+ return s
+ }
+ return fmt.Sprintf("%q", string(tok))
+}
+
+// GoWhitespace is the default value for the Scanner's Whitespace field.
+// Its value selects Go's white space characters.
+const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
+
+const bufLen = 1024 // at least utf8.UTFMax
+
+// A Scanner implements reading of Unicode characters and tokens from an io.Reader.
+type Scanner struct {
+ // Input
+ src io.Reader
+
+ // Source buffer
+ srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
+ srcPos int // reading position (srcBuf index)
+ srcEnd int // source end (srcBuf index)
+
+ // Source position
+ srcBufOffset int // byte offset of srcBuf[0] in source
+ line int // line count
+ column int // character count
+ lastLineLen int // length of last line in characters (for correct column reporting)
+ lastCharLen int // length of last character in bytes
+
+ // Token text buffer
+ // Typically, token text is stored completely in srcBuf, but in general
+ // the token text's head may be buffered in tokBuf while the token text's
+ // tail is stored in srcBuf.
+ tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
+ tokPos int // token text tail position (srcBuf index); valid if >= 0
+ tokEnd int // token text tail end (srcBuf index)
+
+ // One character look-ahead
+ ch rune // character before current srcPos
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(s *Scanner, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // The Mode field controls which tokens are recognized. For instance,
+ // to recognize Ints, set the ScanInts bit in Mode. The field may be
+ // changed at any time.
+ Mode uint
+
+ // The Whitespace field controls which characters are recognized
+ // as white space. To recognize a character ch <= ' ' as white space,
+ // set the ch'th bit in Whitespace (the Scanner's behavior is undefined
+ // for values ch > ' '). The field may be changed at any time.
+ Whitespace uint64
+
+ // Start position of most recently scanned token; set by Scan.
+ // Calling Init or Next invalidates the position (Line == 0).
+ // The Filename field is always left untouched by the Scanner.
+ // If an error is reported (via Error) and Position is invalid,
+ // the scanner is not inside a token. Call Pos to obtain an error
+ // position in that case.
+ Position
+}
+
+// Init initializes a Scanner with a new source and returns s.
+// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens,
+// and Whitespace is set to GoWhitespace.
+func (s *Scanner) Init(src io.Reader) *Scanner {
+ s.src = src
+
+ // initialize source buffer
+ // (the first call to next() will fill it by calling src.Read)
+ s.srcBuf[0] = utf8.RuneSelf // sentinel
+ s.srcPos = 0
+ s.srcEnd = 0
+
+ // initialize source position
+ s.srcBufOffset = 0
+ s.line = 1
+ s.column = 0
+ s.lastLineLen = 0
+ s.lastCharLen = 0
+
+ // initialize token text buffer
+ // (required for first call to next()).
+ s.tokPos = -1
+
+ // initialize one character look-ahead
+ s.ch = -1 // no char read yet
+
+ // initialize public fields
+ s.Error = nil
+ s.ErrorCount = 0
+ s.Mode = GoTokens
+ s.Whitespace = GoWhitespace
+ s.Line = 0 // invalidate token position
+
+ return s
+}
+
+// TODO(gri): The code for next() and the internal scanner state could benefit
+// from a rethink. While next() is optimized for the common ASCII
+// case, the "corrections" needed for proper position tracking undo
+// some of the attempts for fast-path optimization.
+
+// next reads and returns the next Unicode character. It is designed such
+// that only a minimal amount of work needs to be done in the common ASCII
+// case (one test to check for both ASCII and end-of-buffer, and one test
+// to check for newlines).
+func (s *Scanner) next() rune {
+ ch, width := rune(s.srcBuf[s.srcPos]), 1
+
+ if ch >= utf8.RuneSelf {
+ // uncommon case: not ASCII or not enough bytes
+ for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
+ // not enough bytes: read some more, but first
+ // save away token text if any
+ if s.tokPos >= 0 {
+ s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
+ s.tokPos = 0
+ // s.tokEnd is set by Scan()
+ }
+ // move unread bytes to beginning of buffer
+ copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
+ s.srcBufOffset += s.srcPos
+ // read more bytes
+ // (an io.Reader must return io.EOF when it reaches
+ // the end of what it is reading - simply returning
+ // n == 0 will make this loop retry forever; but the
+ // error is in the reader implementation in that case)
+ i := s.srcEnd - s.srcPos
+ n, err := s.src.Read(s.srcBuf[i:bufLen])
+ s.srcPos = 0
+ s.srcEnd = i + n
+ s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
+ if err != nil {
+ if s.srcEnd == 0 {
+ if s.lastCharLen > 0 {
+ // previous character was not EOF
+ s.column++
+ }
+ s.lastCharLen = 0
+ return EOF
+ }
+ if err != io.EOF {
+ s.error(err.Error())
+ }
+ // If err == EOF, we won't be getting more
+ // bytes; break to avoid infinite loop. If
+ // err is something else, we don't know if
+ // we can get more bytes; thus also break.
+ break
+ }
+ }
+ // at least one byte
+ ch = rune(s.srcBuf[s.srcPos])
+ if ch >= utf8.RuneSelf {
+ // uncommon case: not ASCII
+ ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
+ if ch == utf8.RuneError && width == 1 {
+ // advance for correct error position
+ s.srcPos += width
+ s.lastCharLen = width
+ s.column++
+ s.error("illegal UTF-8 encoding")
+ return ch
+ }
+ }
+ }
+
+ // advance
+ s.srcPos += width
+ s.lastCharLen = width
+ s.column++
+
+ // special situations
+ switch ch {
+ case 0:
+ // implementation restriction for compatibility with other tools
+ s.error("illegal character NUL")
+ case '\n':
+ s.line++
+ s.lastLineLen = s.column
+ s.column = 0
+ }
+
+ return ch
+}
+
+// Next reads and returns the next Unicode character.
+// It returns EOF at the end of the source. It reports
+// a read error by calling s.Error, if not nil; otherwise
+// it prints an error message to os.Stderr. Next does not
+// update the Scanner's Position field; use Pos() to
+// get the current position.
+func (s *Scanner) Next() rune {
+ s.tokPos = -1 // don't collect token text
+ s.Line = 0 // invalidate token position
+ ch := s.Peek()
+ s.ch = s.next()
+ return ch
+}
+
+// Peek returns the next Unicode character in the source without advancing
+// the scanner. It returns EOF if the scanner's position is at the last
+// character of the source.
+func (s *Scanner) Peek() rune {
+ if s.ch < 0 {
+ s.ch = s.next()
+ }
+ return s.ch
+}
+
+func (s *Scanner) error(msg string) {
+ s.ErrorCount++
+ if s.Error != nil {
+ s.Error(s, msg)
+ return
+ }
+ pos := s.Position
+ if !pos.IsValid() {
+ pos = s.Pos()
+ }
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+func (s *Scanner) scanIdentifier() rune {
+ ch := s.next() // read character after first '_' or letter
+ for ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) {
+ ch = s.next()
+ }
+ return ch
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+
+func (s *Scanner) scanMantissa(ch rune) rune {
+ for isDecimal(ch) {
+ ch = s.next()
+ }
+ return ch
+}
+
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.scanMantissa(s.next())
+ }
+ return ch
+}
+
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+func (s *Scanner) scanNumber(ch rune) (rune, rune) {
+ // isDecimal(ch)
+ if ch == '0' {
+ // int or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal int
+ ch = s.next()
+ for digitVal(ch) < 16 {
+ ch = s.next()
+ }
+ } else {
+ // octal int or float
+ seenDecimalDigit := false
+ for isDecimal(ch) {
+ if ch > '7' {
+ seenDecimalDigit = true
+ }
+ ch = s.next()
+ }
+ if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
+ // float
+ ch = s.scanFraction(ch)
+ ch = s.scanExponent(ch)
+ return Float, ch
+ }
+ // octal int
+ if seenDecimalDigit {
+ s.error("illegal octal number")
+ }
+ }
+ return Int, ch
+ }
+ // decimal int or float
+ ch = s.scanMantissa(ch)
+ if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
+ // float
+ ch = s.scanFraction(ch)
+ ch = s.scanExponent(ch)
+ return Float, ch
+ }
+ return Int, ch
+}
+
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.error("illegal char escape")
+ }
+ return ch
+}
+
+func (s *Scanner) scanEscape(quote rune) rune {
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ // nothing to do
+ ch = s.next()
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.error("illegal char escape")
+ }
+ return ch
+}
+
+func (s *Scanner) scanString(quote rune) (n int) {
+ ch := s.next() // read character after quote
+ for ch != quote {
+ if ch == '\n' || ch < 0 {
+ s.error("literal not terminated")
+ return
+ }
+ if ch == '\\' {
+ ch = s.scanEscape(quote)
+ } else {
+ ch = s.next()
+ }
+ n++
+ }
+ return
+}
+
+func (s *Scanner) scanRawString() {
+ ch := s.next() // read character after '`'
+ for ch != '`' {
+ if ch < 0 {
+ s.error("literal not terminated")
+ return
+ }
+ ch = s.next()
+ }
+}
+
+func (s *Scanner) scanChar() {
+ if s.scanString('\'') != 1 {
+ s.error("illegal char literal")
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) rune {
+ // ch == '/' || ch == '*'
+ if ch == '/' {
+ // line comment
+ ch = s.next() // read character after "//"
+ for ch != '\n' && ch >= 0 {
+ ch = s.next()
+ }
+ return ch
+ }
+
+ // general comment
+ ch = s.next() // read character after "/*"
+ for {
+ if ch < 0 {
+ s.error("comment not terminated")
+ break
+ }
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ ch = s.next()
+ break
+ }
+ }
+ return ch
+}
+
+// Scan reads the next token or Unicode character from source and returns it.
+// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set.
+// It returns EOF at the end of the source. It reports scanner errors (read and
+// token errors) by calling s.Error, if not nil; otherwise it prints an error
+// message to os.Stderr.
+func (s *Scanner) Scan() rune {
+ ch := s.Peek()
+
+ // reset token text position
+ s.tokPos = -1
+ s.Line = 0
+
+redo:
+ // skip white space
+ for s.Whitespace&(1<<uint(ch)) != 0 {
+ ch = s.next()
+ }
+
+ // start collecting token text
+ s.tokBuf.Reset()
+ s.tokPos = s.srcPos - s.lastCharLen
+
+ // set token position
+ // (this is a slightly optimized version of the code in Pos())
+ s.Offset = s.srcBufOffset + s.tokPos
+ if s.column > 0 {
+ // common case: last character was not a '\n'
+ s.Line = s.line
+ s.Column = s.column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.Line = s.line - 1
+ s.Column = s.lastLineLen
+ }
+
+ // determine token value
+ tok := ch
+ switch {
+ case unicode.IsLetter(ch) || ch == '_':
+ if s.Mode&ScanIdents != 0 {
+ tok = Ident
+ ch = s.scanIdentifier()
+ } else {
+ ch = s.next()
+ }
+ case isDecimal(ch):
+ if s.Mode&(ScanInts|ScanFloats) != 0 {
+ tok, ch = s.scanNumber(ch)
+ } else {
+ ch = s.next()
+ }
+ default:
+ switch ch {
+ case '"':
+ if s.Mode&ScanStrings != 0 {
+ s.scanString('"')
+ tok = String
+ }
+ ch = s.next()
+ case '\'':
+ if s.Mode&ScanChars != 0 {
+ s.scanChar()
+ tok = Char
+ }
+ ch = s.next()
+ case '.':
+ ch = s.next()
+ if isDecimal(ch) && s.Mode&ScanFloats != 0 {
+ tok = Float
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '/':
+ ch = s.next()
+ if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
+ if s.Mode&SkipComments != 0 {
+ s.tokPos = -1 // don't collect token text
+ ch = s.scanComment(ch)
+ goto redo
+ }
+ ch = s.scanComment(ch)
+ tok = Comment
+ }
+ case '`':
+ if s.Mode&ScanRawStrings != 0 {
+ s.scanRawString()
+ tok = String
+ }
+ ch = s.next()
+ default:
+ ch = s.next()
+ }
+ }
+
+ // end of token text
+ s.tokEnd = s.srcPos - s.lastCharLen
+
+ s.ch = ch
+ return tok
+}
+
+// Pos returns the position of the character immediately after
+// the character or token returned by the last call to Next or Scan.
+func (s *Scanner) Pos() (pos Position) {
+ pos.Filename = s.Filename
+ pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
+ switch {
+ case s.column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.line
+ pos.Column = s.column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ pos.Line = s.line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// TokenText returns the string corresponding to the most recently scanned token.
+// Valid after calling Scan().
+func (s *Scanner) TokenText() string {
+ if s.tokPos < 0 {
+ // no token text
+ return ""
+ }
+
+ if s.tokEnd < 0 {
+ // if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
+ s.tokEnd = s.tokPos
+ }
+
+ if s.tokBuf.Len() == 0 {
+ // common case: the entire token text is still in srcBuf
+ return string(s.srcBuf[s.tokPos:s.tokEnd])
+ }
+
+ // part of the token text was saved in tokBuf: save the rest in
+ // tokBuf as well and return its content
+ s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
+ s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
+ return s.tokBuf.String()
+}
diff --git a/libgo/go/text/scanner/scanner_test.go b/libgo/go/text/scanner/scanner_test.go
new file mode 100644
index 0000000..bb3adb5
--- /dev/null
+++ b/libgo/go/text/scanner/scanner_test.go
@@ -0,0 +1,563 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+// A StringReader delivers its data one string segment at a time via Read.
+type StringReader struct {
+ data []string
+ step int
+}
+
+func (r *StringReader) Read(p []byte) (n int, err error) {
+ if r.step < len(r.data) {
+ s := r.data[r.step]
+ n = copy(p, s)
+ r.step++
+ } else {
+ err = io.EOF
+ }
+ return
+}
+
+func readRuneSegments(t *testing.T, segments []string) {
+ got := ""
+ want := strings.Join(segments, "")
+ s := new(Scanner).Init(&StringReader{data: segments})
+ for {
+ ch := s.Next()
+ if ch == EOF {
+ break
+ }
+ got += string(ch)
+ }
+ if got != want {
+ t.Errorf("segments=%v got=%s want=%s", segments, got, want)
+ }
+}
+
+var segmentList = [][]string{
+ {},
+ {""},
+ {"日", "本語"},
+ {"\u65e5", "\u672c", "\u8a9e"},
+ {"\U000065e5", " ", "\U0000672c", "\U00008a9e"},
+ {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"},
+ {"Hello", ", ", "World", "!"},
+ {"Hello", ", ", "", "World", "!"},
+}
+
+func TestNext(t *testing.T) {
+ for _, s := range segmentList {
+ readRuneSegments(t, s)
+ }
+}
+
+type token struct {
+ tok rune
+ text string
+}
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+var tokenList = []token{
+ {Comment, "// line comments"},
+ {Comment, "//"},
+ {Comment, "////"},
+ {Comment, "// comment"},
+ {Comment, "// /* comment */"},
+ {Comment, "// // comment //"},
+ {Comment, "//" + f100},
+
+ {Comment, "// general comments"},
+ {Comment, "/**/"},
+ {Comment, "/***/"},
+ {Comment, "/* comment */"},
+ {Comment, "/* // comment */"},
+ {Comment, "/* /* comment */"},
+ {Comment, "/*\n comment\n*/"},
+ {Comment, "/*" + f100 + "*/"},
+
+ {Comment, "// identifiers"},
+ {Ident, "a"},
+ {Ident, "a0"},
+ {Ident, "foobar"},
+ {Ident, "abc123"},
+ {Ident, "LGTM"},
+ {Ident, "_"},
+ {Ident, "_abc123"},
+ {Ident, "abc123_"},
+ {Ident, "_abc_123_"},
+ {Ident, "_äöü"},
+ {Ident, "_本"},
+ {Ident, "äöü"},
+ {Ident, "本"},
+ {Ident, "a۰۱۸"},
+ {Ident, "foo६४"},
+ {Ident, "bar9876"},
+ {Ident, f100},
+
+ {Comment, "// decimal ints"},
+ {Int, "0"},
+ {Int, "1"},
+ {Int, "9"},
+ {Int, "42"},
+ {Int, "1234567890"},
+
+ {Comment, "// octal ints"},
+ {Int, "00"},
+ {Int, "01"},
+ {Int, "07"},
+ {Int, "042"},
+ {Int, "01234567"},
+
+ {Comment, "// hexadecimal ints"},
+ {Int, "0x0"},
+ {Int, "0x1"},
+ {Int, "0xf"},
+ {Int, "0x42"},
+ {Int, "0x123456789abcDEF"},
+ {Int, "0x" + f100},
+ {Int, "0X0"},
+ {Int, "0X1"},
+ {Int, "0XF"},
+ {Int, "0X42"},
+ {Int, "0X123456789abcDEF"},
+ {Int, "0X" + f100},
+
+ {Comment, "// floats"},
+ {Float, "0."},
+ {Float, "1."},
+ {Float, "42."},
+ {Float, "01234567890."},
+ {Float, ".0"},
+ {Float, ".1"},
+ {Float, ".42"},
+ {Float, ".0123456789"},
+ {Float, "0.0"},
+ {Float, "1.0"},
+ {Float, "42.0"},
+ {Float, "01234567890.0"},
+ {Float, "0e0"},
+ {Float, "1e0"},
+ {Float, "42e0"},
+ {Float, "01234567890e0"},
+ {Float, "0E0"},
+ {Float, "1E0"},
+ {Float, "42E0"},
+ {Float, "01234567890E0"},
+ {Float, "0e+10"},
+ {Float, "1e-10"},
+ {Float, "42e+10"},
+ {Float, "01234567890e-10"},
+ {Float, "0E+10"},
+ {Float, "1E-10"},
+ {Float, "42E+10"},
+ {Float, "01234567890E-10"},
+
+ {Comment, "// chars"},
+ {Char, `' '`},
+ {Char, `'a'`},
+ {Char, `'本'`},
+ {Char, `'\a'`},
+ {Char, `'\b'`},
+ {Char, `'\f'`},
+ {Char, `'\n'`},
+ {Char, `'\r'`},
+ {Char, `'\t'`},
+ {Char, `'\v'`},
+ {Char, `'\''`},
+ {Char, `'\000'`},
+ {Char, `'\777'`},
+ {Char, `'\x00'`},
+ {Char, `'\xff'`},
+ {Char, `'\u0000'`},
+ {Char, `'\ufA16'`},
+ {Char, `'\U00000000'`},
+ {Char, `'\U0000ffAB'`},
+
+ {Comment, "// strings"},
+ {String, `" "`},
+ {String, `"a"`},
+ {String, `"本"`},
+ {String, `"\a"`},
+ {String, `"\b"`},
+ {String, `"\f"`},
+ {String, `"\n"`},
+ {String, `"\r"`},
+ {String, `"\t"`},
+ {String, `"\v"`},
+ {String, `"\""`},
+ {String, `"\000"`},
+ {String, `"\777"`},
+ {String, `"\x00"`},
+ {String, `"\xff"`},
+ {String, `"\u0000"`},
+ {String, `"\ufA16"`},
+ {String, `"\U00000000"`},
+ {String, `"\U0000ffAB"`},
+ {String, `"` + f100 + `"`},
+
+ {Comment, "// raw strings"},
+ {String, "``"},
+ {String, "`\\`"},
+ {String, "`" + "\n\n/* foobar */\n\n" + "`"},
+ {String, "`" + f100 + "`"},
+
+ {Comment, "// individual characters"},
+ // NUL character is not allowed
+ {'\x01', "\x01"},
+ {' ' - 1, string(' ' - 1)},
+ {'+', "+"},
+ {'/', "/"},
+ {'.', "."},
+ {'~', "~"},
+ {'(', "("},
+}
+
+func makeSource(pattern string) *bytes.Buffer {
+ var buf bytes.Buffer
+ for _, k := range tokenList {
+ fmt.Fprintf(&buf, pattern, k.text)
+ }
+ return &buf
+}
+
+func checkTok(t *testing.T, s *Scanner, line int, got, want rune, text string) {
+ if got != want {
+ t.Fatalf("tok = %s, want %s for %q", TokenString(got), TokenString(want), text)
+ }
+ if s.Line != line {
+ t.Errorf("line = %d, want %d for %q", s.Line, line, text)
+ }
+ stext := s.TokenText()
+ if stext != text {
+ t.Errorf("text = %q, want %q", stext, text)
+ } else {
+ // check idempotency of TokenText() call
+ stext = s.TokenText()
+ if stext != text {
+ t.Errorf("text = %q, want %q (idempotency check)", stext, text)
+ }
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
+
+func testScan(t *testing.T, mode uint) {
+ s := new(Scanner).Init(makeSource(" \t%s\n"))
+ s.Mode = mode
+ tok := s.Scan()
+ line := 1
+ for _, k := range tokenList {
+ if mode&SkipComments == 0 || k.tok != Comment {
+ checkTok(t, s, line, tok, k.tok, k.text)
+ tok = s.Scan()
+ }
+ line += countNewlines(k.text) + 1 // each token is on a new line
+ }
+ checkTok(t, s, line, tok, EOF, "")
+}
+
+func TestScan(t *testing.T) {
+ testScan(t, GoTokens)
+ testScan(t, GoTokens&^SkipComments)
+}
+
+func TestPosition(t *testing.T) {
+ src := makeSource("\t\t\t\t%s\n")
+ s := new(Scanner).Init(src)
+ s.Mode = GoTokens &^ SkipComments
+ s.Scan()
+ pos := Position{"", 4, 1, 5}
+ for _, k := range tokenList {
+ if s.Offset != pos.Offset {
+ t.Errorf("offset = %d, want %d for %q", s.Offset, pos.Offset, k.text)
+ }
+ if s.Line != pos.Line {
+ t.Errorf("line = %d, want %d for %q", s.Line, pos.Line, k.text)
+ }
+ if s.Column != pos.Column {
+ t.Errorf("column = %d, want %d for %q", s.Column, pos.Column, k.text)
+ }
+ pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
+ pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+ s.Scan()
+ }
+ // make sure there were no token-internal errors reported by scanner
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanZeroMode(t *testing.T) {
+ src := makeSource("%s\n")
+ str := src.String()
+ s := new(Scanner).Init(src)
+ s.Mode = 0 // don't recognize any token classes
+ s.Whitespace = 0 // don't skip any whitespace
+ tok := s.Scan()
+ for i, ch := range str {
+ if tok != ch {
+ t.Fatalf("%d. tok = %s, want %s", i, TokenString(tok), TokenString(ch))
+ }
+ tok = s.Scan()
+ }
+ if tok != EOF {
+ t.Fatalf("tok = %s, want EOF", TokenString(tok))
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func testScanSelectedMode(t *testing.T, mode uint, class rune) {
+ src := makeSource("%s\n")
+ s := new(Scanner).Init(src)
+ s.Mode = mode
+ tok := s.Scan()
+ for tok != EOF {
+ if tok < 0 && tok != class {
+ t.Fatalf("tok = %s, want %s", TokenString(tok), TokenString(class))
+ }
+ tok = s.Scan()
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanSelectedMask(t *testing.T) {
+ testScanSelectedMode(t, 0, 0)
+ testScanSelectedMode(t, ScanIdents, Ident)
+ // Don't test ScanInts and ScanNumbers since some parts of
+ // the floats in the source look like (illegal) octal ints
+ // and ScanNumbers may return either Int or Float.
+ testScanSelectedMode(t, ScanChars, Char)
+ testScanSelectedMode(t, ScanStrings, String)
+ testScanSelectedMode(t, SkipComments, 0)
+ testScanSelectedMode(t, ScanComments, Comment)
+}
+
+func TestScanNext(t *testing.T) {
+ s := new(Scanner).Init(bytes.NewBufferString("if a == bcd /* comment */ {\n\ta += c\n} // line comment ending in eof"))
+ checkTok(t, s, 1, s.Scan(), Ident, "if")
+ checkTok(t, s, 1, s.Scan(), Ident, "a")
+ checkTok(t, s, 1, s.Scan(), '=', "=")
+ checkTok(t, s, 0, s.Next(), '=', "")
+ checkTok(t, s, 0, s.Next(), ' ', "")
+ checkTok(t, s, 0, s.Next(), 'b', "")
+ checkTok(t, s, 1, s.Scan(), Ident, "cd")
+ checkTok(t, s, 1, s.Scan(), '{', "{")
+ checkTok(t, s, 2, s.Scan(), Ident, "a")
+ checkTok(t, s, 2, s.Scan(), '+', "+")
+ checkTok(t, s, 0, s.Next(), '=', "")
+ checkTok(t, s, 2, s.Scan(), Ident, "c")
+ checkTok(t, s, 3, s.Scan(), '}', "}")
+ checkTok(t, s, 3, s.Scan(), -1, "")
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanWhitespace(t *testing.T) {
+ var buf bytes.Buffer
+ var ws uint64
+ // start at 1, NUL character is not allowed
+ for ch := byte(1); ch < ' '; ch++ {
+ buf.WriteByte(ch)
+ ws |= 1 << ch
+ }
+ const orig = 'x'
+ buf.WriteByte(orig)
+
+ s := new(Scanner).Init(&buf)
+ s.Mode = 0
+ s.Whitespace = ws
+ tok := s.Scan()
+ if tok != orig {
+ t.Errorf("tok = %s, want %s", TokenString(tok), TokenString(orig))
+ }
+}
+
+func testError(t *testing.T, src, pos, msg string, tok rune) {
+ s := new(Scanner).Init(bytes.NewBufferString(src))
+ errorCalled := false
+ s.Error = func(s *Scanner, m string) {
+ if !errorCalled {
+ // only look at first error
+ if p := s.Pos().String(); p != pos {
+ t.Errorf("pos = %q, want %q for %q", p, pos, src)
+ }
+ if m != msg {
+ t.Errorf("msg = %q, want %q for %q", m, msg, src)
+ }
+ errorCalled = true
+ }
+ }
+ tk := s.Scan()
+ if tk != tok {
+ t.Errorf("tok = %s, want %s for %q", TokenString(tk), TokenString(tok), src)
+ }
+ if !errorCalled {
+ t.Errorf("error handler not called for %q", src)
+ }
+ if s.ErrorCount == 0 {
+ t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func TestError(t *testing.T) {
+ testError(t, "\x00", "1:1", "illegal character NUL", 0)
+ testError(t, "\x80", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
+ testError(t, "\xff", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
+
+ testError(t, "a\x00", "1:2", "illegal character NUL", Ident)
+ testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", Ident)
+ testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", Ident)
+
+ testError(t, `"a`+"\x00", "1:3", "illegal character NUL", String)
+ testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", String)
+ testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", String)
+
+ testError(t, "`a"+"\x00", "1:3", "illegal character NUL", String)
+ testError(t, "`ab"+"\x80", "1:4", "illegal UTF-8 encoding", String)
+ testError(t, "`abc"+"\xff", "1:5", "illegal UTF-8 encoding", String)
+
+ testError(t, `'\"'`, "1:3", "illegal char escape", Char)
+ testError(t, `"\'"`, "1:3", "illegal char escape", String)
+
+ testError(t, `01238`, "1:6", "illegal octal number", Int)
+ testError(t, `'aa'`, "1:4", "illegal char literal", Char)
+
+ testError(t, `'`, "1:2", "literal not terminated", Char)
+ testError(t, `'`+"\n", "1:2", "literal not terminated", Char)
+ testError(t, `"abc`, "1:5", "literal not terminated", String)
+ testError(t, `"abc`+"\n", "1:5", "literal not terminated", String)
+ testError(t, "`abc\n", "2:1", "literal not terminated", String)
+ testError(t, `/*/`, "1:4", "comment not terminated", EOF)
+}
+
+func checkPos(t *testing.T, got, want Position) {
+ if got.Offset != want.Offset || got.Line != want.Line || got.Column != want.Column {
+ t.Errorf("got offset, line, column = %d, %d, %d; want %d, %d, %d",
+ got.Offset, got.Line, got.Column, want.Offset, want.Line, want.Column)
+ }
+}
+
+func checkNextPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
+ if ch := s.Next(); ch != char {
+ t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
+ }
+ want := Position{Offset: offset, Line: line, Column: column}
+ checkPos(t, s.Pos(), want)
+}
+
+func checkScanPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
+ want := Position{Offset: offset, Line: line, Column: column}
+ checkPos(t, s.Pos(), want)
+ if ch := s.Scan(); ch != char {
+ t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
+ if string(ch) != s.TokenText() {
+ t.Errorf("tok = %q, want %q", s.TokenText(), string(ch))
+ }
+ }
+ checkPos(t, s.Position, want)
+}
+
+func TestPos(t *testing.T) {
+ // corner case: empty source
+ s := new(Scanner).Init(bytes.NewBufferString(""))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ s.Peek() // peek doesn't affect the position
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+
+ // corner case: source with only a newline
+ s = new(Scanner).Init(bytes.NewBufferString("\n"))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ checkNextPos(t, s, 1, 2, 1, '\n')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 1, 2, 1, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // corner case: source with only a single character
+ s = new(Scanner).Init(bytes.NewBufferString("本"))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ checkNextPos(t, s, 3, 1, 2, '本')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 3, 1, 2, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // positions after calling Next
+ s = new(Scanner).Init(bytes.NewBufferString(" foo६४ \n\n本語\n"))
+ checkNextPos(t, s, 1, 1, 2, ' ')
+ s.Peek() // peek doesn't affect the position
+ checkNextPos(t, s, 2, 1, 3, ' ')
+ checkNextPos(t, s, 3, 1, 4, 'f')
+ checkNextPos(t, s, 4, 1, 5, 'o')
+ checkNextPos(t, s, 5, 1, 6, 'o')
+ checkNextPos(t, s, 8, 1, 7, '६')
+ checkNextPos(t, s, 11, 1, 8, '४')
+ checkNextPos(t, s, 12, 1, 9, ' ')
+ checkNextPos(t, s, 13, 1, 10, ' ')
+ checkNextPos(t, s, 14, 2, 1, '\n')
+ checkNextPos(t, s, 15, 3, 1, '\n')
+ checkNextPos(t, s, 18, 3, 2, '本')
+ checkNextPos(t, s, 21, 3, 3, '語')
+ checkNextPos(t, s, 22, 4, 1, '\n')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 22, 4, 1, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // positions after calling Scan
+ s = new(Scanner).Init(bytes.NewBufferString("abc\n本語\n\nx"))
+ s.Mode = 0
+ s.Whitespace = 0
+ checkScanPos(t, s, 0, 1, 1, 'a')
+ s.Peek() // peek doesn't affect the position
+ checkScanPos(t, s, 1, 1, 2, 'b')
+ checkScanPos(t, s, 2, 1, 3, 'c')
+ checkScanPos(t, s, 3, 1, 4, '\n')
+ checkScanPos(t, s, 4, 2, 1, '本')
+ checkScanPos(t, s, 7, 2, 2, '語')
+ checkScanPos(t, s, 10, 2, 3, '\n')
+ checkScanPos(t, s, 11, 3, 1, '\n')
+ checkScanPos(t, s, 12, 4, 1, 'x')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 13, 4, 2, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
diff --git a/libgo/go/text/tabwriter/tabwriter.go b/libgo/go/text/tabwriter/tabwriter.go
new file mode 100644
index 0000000..c136ca2
--- /dev/null
+++ b/libgo/go/text/tabwriter/tabwriter.go
@@ -0,0 +1,561 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "unicode/utf8"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+ size int // cell size in bytes
+ width int // cell width in runes
+ htab bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are simply passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+ // configuration
+ output io.Writer
+ minwidth int
+ tabwidth int
+ padding int
+ padbytes [8]byte
+ flags uint
+
+ // current state
+ buf bytes.Buffer // collected text excluding tabs or line breaks
+ pos int // buffer position up to which cell.width of incomplete cell has been computed
+ cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+ endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+ lines [][]cell // list of lines; each line is a list of cells
+ widths []int // list of column widths in runes - re-used during formatting
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+ b.buf.Reset()
+ b.pos = 0
+ b.cell = cell{}
+ b.endChar = 0
+ b.lines = b.lines[0:0]
+ b.widths = b.widths[0:0]
+ b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+// (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+// position pos; html tags and entities are excluded from this width if html
+// filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+// which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+// formatting; it is kept in Writer because it's re-used
+//
+// |<---------- size ---------->|
+// | |
+// |<- width ->|<- ignored ->| |
+// | | | |
+// [---processed---tab------------<tag>...</tag>...]
+// ^ ^ ^
+// | | |
+// buf start of incomplete cell pos
+
+// Formatting can be controlled with these flags.
+const (
+ // Ignore html tags and treat entities (starting with '&'
+ // and ending in ';') as single characters (width = 1).
+ FilterHTML uint = 1 << iota
+
+ // Strip Escape characters bracketing escaped text segments
+ // instead of passing them through unchanged with the text.
+ StripEscape
+
+ // Force right-alignment of cell content.
+ // Default is left-alignment.
+ AlignRight
+
+ // Handle empty columns as if they were not present in
+ // the input in the first place.
+ DiscardEmptyColumns
+
+ // Always use tabs for indentation columns (i.e., padding of
+ // leading empty cells on the left) independent of padchar.
+ TabIndent
+
+ // Print a vertical bar ('|') between columns (after formatting).
+ // Discarded columns appear as zero-width columns ("||").
+ Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+// To format in tab-separated columns with a tab stop of 8:
+// b.Init(w, 8, 1, 8, '\t', 0);
+//
+// To format in space-separated columns with at least 4 spaces between columns:
+// b.Init(w, 0, 4, 8, ' ', 0);
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ if minwidth < 0 || tabwidth < 0 || padding < 0 {
+ panic("negative minwidth, tabwidth, or padding")
+ }
+ b.output = output
+ b.minwidth = minwidth
+ b.tabwidth = tabwidth
+ b.padding = padding
+ for i := range b.padbytes {
+ b.padbytes[i] = padchar
+ }
+ if padchar == '\t' {
+ // tab padding enforces left-alignment
+ flags &^= AlignRight
+ }
+ b.flags = flags
+
+ b.reset()
+
+ return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+ pos := 0
+ for i, line := range b.lines {
+ print("(", i, ") ")
+ for _, c := range line {
+ print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+ pos += c.size
+ }
+ print("\n")
+ }
+ print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+ err error
+}
+
+func (b *Writer) write0(buf []byte) {
+ n, err := b.output.Write(buf)
+ if n != len(buf) && err == nil {
+ err = os.EIO
+ }
+ if err != nil {
+ panic(osError{err})
+ }
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+ for n > len(src) {
+ b.write0(src)
+ n -= len(src)
+ }
+ b.write0(src[0:n])
+}
+
+var (
+ newline = []byte{'\n'}
+ tabs = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+ if b.padbytes[0] == '\t' || useTabs {
+ // padding is done with tabs
+ if b.tabwidth == 0 {
+ return // tabs have no width - can't do any padding
+ }
+ // make cellw the smallest multiple of b.tabwidth
+ cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+ n := cellw - textw // amount of padding
+ if n < 0 {
+ panic("internal error")
+ }
+ b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+ return
+ }
+
+ // padding is done with non-tab characters
+ b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ for i := line0; i < line1; i++ {
+ line := b.lines[i]
+
+ // if TabIndent is set, use tabs to pad leading empty cells
+ useTabs := b.flags&TabIndent != 0
+
+ for j, c := range line {
+ if j > 0 && b.flags&Debug != 0 {
+ // indicate column break
+ b.write0(vbar)
+ }
+
+ if c.size == 0 {
+ // empty cell
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], useTabs)
+ }
+ } else {
+ // non-empty cell
+ useTabs = false
+ if b.flags&AlignRight == 0 { // align left
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ } else { // align right
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ }
+ }
+ }
+
+ if i+1 == len(b.lines) {
+ // last buffered line - we don't have a newline, so just write
+ // any outstanding buffered data
+ b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+ pos += b.cell.size
+ } else {
+ // not the last line - write newline
+ b.write0(newline)
+ }
+ }
+ return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ column := len(b.widths)
+ for this := line0; this < line1; this++ {
+ line := b.lines[this]
+
+ if column < len(line)-1 {
+ // cell exists in this column => this line
+ // has more cells than the previous line
+ // (the last cell per line is ignored because cells are
+ // tab-terminated; the last cell per line describes the
+ // text before the newline/formfeed and does not belong
+ // to a column)
+
+ // print unprinted lines until beginning of block
+ pos = b.writeLines(pos, line0, this)
+ line0 = this
+
+ // column block begin
+ width := b.minwidth // minimal column width
+ discardable := true // true if all cells in this column are empty and "soft"
+ for ; this < line1; this++ {
+ line = b.lines[this]
+ if column < len(line)-1 {
+ // cell exists in this column
+ c := line[column]
+ // update width
+ if w := c.width + b.padding; w > width {
+ width = w
+ }
+ // update discardable
+ if c.width > 0 || c.htab {
+ discardable = false
+ }
+ } else {
+ break
+ }
+ }
+ // column block end
+
+ // discard empty columns if necessary
+ if discardable && b.flags&DiscardEmptyColumns != 0 {
+ width = 0
+ }
+
+ // format and print all columns to the right of this column
+ // (we know the widths of this column and all columns to the left)
+ b.widths = append(b.widths, width) // push width
+ pos = b.format(pos, line0, this)
+ b.widths = b.widths[0 : len(b.widths)-1] // pop width
+ line0 = this
+ }
+ }
+
+ // print unprinted lines until end
+ return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+ b.buf.Write(text)
+ b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+ b.cell.width += utf8.RuneCount(b.buf.Bytes()[b.pos:b.buf.Len()])
+ b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+ switch ch {
+ case Escape:
+ b.endChar = Escape
+ case '<':
+ b.endChar = '>'
+ case '&':
+ b.endChar = ';'
+ }
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+ switch b.endChar {
+ case Escape:
+ b.updateWidth()
+ if b.flags&StripEscape == 0 {
+ b.cell.width -= 2 // don't count the Escape chars
+ }
+ case '>': // tag of zero width
+ case ';':
+ b.cell.width++ // entity, count as one rune
+ }
+ b.pos = b.buf.Len()
+ b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+ b.cell.htab = htab
+ line := &b.lines[len(b.lines)-1]
+ *line = append(*line, b.cell)
+ b.cell = cell{}
+ return len(*line)
+}
+
+func handlePanic(err *error) {
+ if e := recover(); e != nil {
+ *err = e.(osError).err // re-panics if it's not a local osError
+ }
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is simply considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+ defer b.reset() // even in the presence of errors
+ defer handlePanic(&err)
+
+ // add current cell if not empty
+ if b.cell.size > 0 {
+ if b.endChar != 0 {
+ // inside escape - terminate it even if incomplete
+ b.endEscape()
+ }
+ b.terminateCell(false)
+ }
+
+ // format contents of buffer
+ b.format(0, 0, len(b.lines))
+
+ return
+}
+
+var hbar = []byte("---\n")
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+ defer handlePanic(&err)
+
+ // split text into cells
+ n = 0
+ for i, ch := range buf {
+ if b.endChar == 0 {
+ // outside escape
+ switch ch {
+ case '\t', '\v', '\n', '\f':
+ // end of cell
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i + 1 // ch consumed
+ ncells := b.terminateCell(ch == '\t')
+ if ch == '\n' || ch == '\f' {
+ // terminate line
+ b.addLine()
+ if ch == '\f' || ncells == 1 {
+ // A '\f' always forces a flush. Otherwise, if the previous
+ // line has only one cell which does not have an impact on
+ // the formatting of the following lines (the last cell per
+ // line is ignored by format()), thus we can flush the
+ // Writer contents.
+ if err = b.Flush(); err != nil {
+ return
+ }
+ if ch == '\f' && b.flags&Debug != 0 {
+ // indicate section break
+ b.write0(hbar)
+ }
+ }
+ }
+
+ case Escape:
+ // start of escaped sequence
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ if b.flags&StripEscape != 0 {
+ n++ // strip Escape
+ }
+ b.startEscape(Escape)
+
+ case '<', '&':
+ // possibly an html tag/entity
+ if b.flags&FilterHTML != 0 {
+ // begin of tag/entity
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ b.startEscape(ch)
+ }
+ }
+
+ } else {
+ // inside escape
+ if ch == b.endChar {
+ // end of tag/entity
+ j := i + 1
+ if ch == Escape && b.flags&StripEscape != 0 {
+ j = i // strip Escape
+ }
+ b.append(buf[n:j])
+ n = i + 1 // ch consumed
+ b.endEscape()
+ }
+ }
+ }
+
+ // append leftover text
+ b.append(buf[n:])
+ n = len(buf)
+ return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/libgo/go/text/tabwriter/tabwriter_test.go b/libgo/go/text/tabwriter/tabwriter_test.go
new file mode 100644
index 0000000..1ffb330
--- /dev/null
+++ b/libgo/go/text/tabwriter/tabwriter_test.go
@@ -0,0 +1,614 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tabwriter
+
+import (
+ "io"
+ "testing"
+)
+
+type buffer struct {
+ a []byte
+}
+
+func (b *buffer) init(n int) { b.a = make([]byte, n)[0:0] }
+
+func (b *buffer) clear() { b.a = b.a[0:0] }
+
+func (b *buffer) Write(buf []byte) (written int, err error) {
+ n := len(b.a)
+ m := len(buf)
+ if n+m <= cap(b.a) {
+ b.a = b.a[0 : n+m]
+ for i := 0; i < m; i++ {
+ b.a[n+i] = buf[i]
+ }
+ } else {
+ panic("buffer.Write: buffer too small")
+ }
+ return len(buf), nil
+}
+
+func (b *buffer) String() string { return string(b.a) }
+
+func write(t *testing.T, testname string, w *Writer, src string) {
+ written, err := io.WriteString(w, src)
+ if err != nil {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err)
+ }
+ if written != len(src) {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- written = %d, len(src) = %d\n", testname, src, written, len(src))
+ }
+}
+
+func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
+ err := w.Flush()
+ if err != nil {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err)
+ }
+
+ res := b.String()
+ if res != expected {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- found:\n%q\n--- expected:\n%q\n", testname, src, res, expected)
+ }
+}
+
+func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
+ var b buffer
+ b.init(1000)
+
+ var w Writer
+ w.Init(&b, minwidth, tabwidth, padding, padchar, flags)
+
+ // write all at once
+ title := testname + " (written all at once)"
+ b.clear()
+ write(t, title, &w, src)
+ verify(t, title, &w, &b, src, expected)
+
+ // write byte-by-byte
+ title = testname + " (written byte-by-byte)"
+ b.clear()
+ for i := 0; i < len(src); i++ {
+ write(t, title, &w, src[i:i+1])
+ }
+ verify(t, title, &w, &b, src, expected)
+
+ // write using Fibonacci slice sizes
+ title = testname + " (written in fibonacci slices)"
+ b.clear()
+ for i, d := 0, 0; i < len(src); {
+ write(t, title, &w, src[i:i+d])
+ i, d = i+d, d+1
+ if i+d > len(src) {
+ d = len(src) - i
+ }
+ }
+ verify(t, title, &w, &b, src, expected)
+}
+
+var tests = []struct {
+ testname string
+ minwidth, tabwidth, padding int
+ padchar byte
+ flags uint
+ src, expected string
+}{
+ {
+ "1a",
+ 8, 0, 1, '.', 0,
+ "",
+ "",
+ },
+
+ {
+ "1a debug",
+ 8, 0, 1, '.', Debug,
+ "",
+ "",
+ },
+
+ {
+ "1b esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\xff",
+ "",
+ },
+
+ {
+ "1b esc",
+ 8, 0, 1, '.', 0,
+ "\xff\xff",
+ "\xff\xff",
+ },
+
+ {
+ "1c esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\t\xff",
+ "\t",
+ },
+
+ {
+ "1c esc",
+ 8, 0, 1, '.', 0,
+ "\xff\t\xff",
+ "\xff\t\xff",
+ },
+
+ {
+ "1d esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\"foo\t\n\tbar\"\xff",
+ "\"foo\t\n\tbar\"",
+ },
+
+ {
+ "1d esc",
+ 8, 0, 1, '.', 0,
+ "\xff\"foo\t\n\tbar\"\xff",
+ "\xff\"foo\t\n\tbar\"\xff",
+ },
+
+ {
+ "1e esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "abc\xff\tdef", // unterminated escape
+ "abc\tdef",
+ },
+
+ {
+ "1e esc",
+ 8, 0, 1, '.', 0,
+ "abc\xff\tdef", // unterminated escape
+ "abc\xff\tdef",
+ },
+
+ {
+ "2",
+ 8, 0, 1, '.', 0,
+ "\n\n\n",
+ "\n\n\n",
+ },
+
+ {
+ "3",
+ 8, 0, 1, '.', 0,
+ "a\nb\nc",
+ "a\nb\nc",
+ },
+
+ {
+ "4a",
+ 8, 0, 1, '.', 0,
+ "\t", // '\t' terminates an empty cell on last line - nothing to print
+ "",
+ },
+
+ {
+ "4b",
+ 8, 0, 1, '.', AlignRight,
+ "\t", // '\t' terminates an empty cell on last line - nothing to print
+ "",
+ },
+
+ {
+ "5",
+ 8, 0, 1, '.', 0,
+ "*\t*",
+ "*.......*",
+ },
+
+ {
+ "5b",
+ 8, 0, 1, '.', 0,
+ "*\t*\n",
+ "*.......*\n",
+ },
+
+ {
+ "5c",
+ 8, 0, 1, '.', 0,
+ "*\t*\t",
+ "*.......*",
+ },
+
+ {
+ "5c debug",
+ 8, 0, 1, '.', Debug,
+ "*\t*\t",
+ "*.......|*",
+ },
+
+ {
+ "5d",
+ 8, 0, 1, '.', AlignRight,
+ "*\t*\t",
+ ".......**",
+ },
+
+ {
+ "6",
+ 8, 0, 1, '.', 0,
+ "\t\n",
+ "........\n",
+ },
+
+ {
+ "7a",
+ 8, 0, 1, '.', 0,
+ "a) foo",
+ "a) foo",
+ },
+
+ {
+ "7b",
+ 8, 0, 1, ' ', 0,
+ "b) foo\tbar",
+ "b) foo bar",
+ },
+
+ {
+ "7c",
+ 8, 0, 1, '.', 0,
+ "c) foo\tbar\t",
+ "c) foo..bar",
+ },
+
+ {
+ "7d",
+ 8, 0, 1, '.', 0,
+ "d) foo\tbar\n",
+ "d) foo..bar\n",
+ },
+
+ {
+ "7e",
+ 8, 0, 1, '.', 0,
+ "e) foo\tbar\t\n",
+ "e) foo..bar.....\n",
+ },
+
+ {
+ "7f",
+ 8, 0, 1, '.', FilterHTML,
+ "f) f&lt;o\t<b>bar</b>\t\n",
+ "f) f&lt;o..<b>bar</b>.....\n",
+ },
+
+ {
+ "7g",
+ 8, 0, 1, '.', FilterHTML,
+ "g) f&lt;o\t<b>bar</b>\t non-terminated entity &amp",
+ "g) f&lt;o..<b>bar</b>..... non-terminated entity &amp",
+ },
+
+ {
+ "7g debug",
+ 8, 0, 1, '.', FilterHTML | Debug,
+ "g) f&lt;o\t<b>bar</b>\t non-terminated entity &amp",
+ "g) f&lt;o..|<b>bar</b>.....| non-terminated entity &amp",
+ },
+
+ {
+ "8",
+ 8, 0, 1, '*', 0,
+ "Hello, world!\n",
+ "Hello, world!\n",
+ },
+
+ {
+ "9a",
+ 1, 0, 0, '.', 0,
+ "1\t2\t3\t4\n" +
+ "11\t222\t3333\t44444\n",
+
+ "1.2..3...4\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9b",
+ 1, 0, 0, '.', FilterHTML,
+ "1\t2<!---\f--->\t3\t4\n" + // \f inside HTML is ignored
+ "11\t222\t3333\t44444\n",
+
+ "1.2<!---\f--->..3...4\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9c",
+ 1, 0, 0, '.', 0,
+ "1\t2\t3\t4\f" + // \f causes a newline and flush
+ "11\t222\t3333\t44444\n",
+
+ "1234\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9c debug",
+ 1, 0, 0, '.', Debug,
+ "1\t2\t3\t4\f" + // \f causes a newline and flush
+ "11\t222\t3333\t44444\n",
+
+ "1|2|3|4\n" +
+ "---\n" +
+ "11|222|3333|44444\n",
+ },
+
+ {
+ "10a",
+ 5, 0, 0, '.', 0,
+ "1\t2\t3\t4\n",
+ "1....2....3....4\n",
+ },
+
+ {
+ "10b",
+ 5, 0, 0, '.', 0,
+ "1\t2\t3\t4\t\n",
+ "1....2....3....4....\n",
+ },
+
+ {
+ "11",
+ 8, 0, 1, '.', 0,
+ "本\tb\tc\n" +
+ "aa\t\u672c\u672c\u672c\tcccc\tddddd\n" +
+ "aaa\tbbbb\n",
+
+ "本.......b.......c\n" +
+ "aa......本本本.....cccc....ddddd\n" +
+ "aaa.....bbbb\n",
+ },
+
+ {
+ "12a",
+ 8, 0, 1, ' ', AlignRight,
+ "a\tè\tc\t\n" +
+ "aa\tèèè\tcccc\tddddd\t\n" +
+ "aaa\tèèèè\t\n",
+
+ " a è c\n" +
+ " aa èèè cccc ddddd\n" +
+ " aaa èèèè\n",
+ },
+
+ {
+ "12b",
+ 2, 0, 0, ' ', 0,
+ "a\tb\tc\n" +
+ "aa\tbbb\tcccc\n" +
+ "aaa\tbbbb\n",
+
+ "a b c\n" +
+ "aa bbbcccc\n" +
+ "aaabbbb\n",
+ },
+
+ {
+ "12c",
+ 8, 0, 1, '_', 0,
+ "a\tb\tc\n" +
+ "aa\tbbb\tcccc\n" +
+ "aaa\tbbbb\n",
+
+ "a_______b_______c\n" +
+ "aa______bbb_____cccc\n" +
+ "aaa_____bbbb\n",
+ },
+
+ {
+ "13a",
+ 4, 0, 1, '-', 0,
+ "4444\t日本語\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t999999999\t0000000000\n",
+
+ "4444------日本語-22--1---333\n" +
+ "999999999-22\n" +
+ "7---------22\n" +
+ "------------------88888888\n" +
+ "\n" +
+ "666666-666666-666666----4444\n" +
+ "1------1------999999999-0000000000\n",
+ },
+
+ {
+ "13b",
+ 4, 0, 3, '.', 0,
+ "4444\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t999999999\t0000000000\n",
+
+ "4444........333...22...1...333\n" +
+ "999999999...22\n" +
+ "7...........22\n" +
+ "....................88888888\n" +
+ "\n" +
+ "666666...666666...666666......4444\n" +
+ "1........1........999999999...0000000000\n",
+ },
+
+ {
+ "13c",
+ 8, 8, 1, '\t', FilterHTML,
+ "4444\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
+
+ "4444\t\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t\t22\n" +
+ "\t\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t\t4444\n" +
+ "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
+ },
+
+ {
+ "14",
+ 1, 0, 2, ' ', AlignRight,
+ ".0\t.3\t2.4\t-5.1\t\n" +
+ "23.0\t12345678.9\t2.4\t-989.4\t\n" +
+ "5.1\t12.0\t2.4\t-7.0\t\n" +
+ ".0\t0.0\t332.0\t8908.0\t\n" +
+ ".0\t-.3\t456.4\t22.1\t\n" +
+ ".0\t1.2\t44.4\t-13.3\t\t",
+
+ " .0 .3 2.4 -5.1\n" +
+ " 23.0 12345678.9 2.4 -989.4\n" +
+ " 5.1 12.0 2.4 -7.0\n" +
+ " .0 0.0 332.0 8908.0\n" +
+ " .0 -.3 456.4 22.1\n" +
+ " .0 1.2 44.4 -13.3",
+ },
+
+ {
+ "14 debug",
+ 1, 0, 2, ' ', AlignRight | Debug,
+ ".0\t.3\t2.4\t-5.1\t\n" +
+ "23.0\t12345678.9\t2.4\t-989.4\t\n" +
+ "5.1\t12.0\t2.4\t-7.0\t\n" +
+ ".0\t0.0\t332.0\t8908.0\t\n" +
+ ".0\t-.3\t456.4\t22.1\t\n" +
+ ".0\t1.2\t44.4\t-13.3\t\t",
+
+ " .0| .3| 2.4| -5.1|\n" +
+ " 23.0| 12345678.9| 2.4| -989.4|\n" +
+ " 5.1| 12.0| 2.4| -7.0|\n" +
+ " .0| 0.0| 332.0| 8908.0|\n" +
+ " .0| -.3| 456.4| 22.1|\n" +
+ " .0| 1.2| 44.4| -13.3|",
+ },
+
+ {
+ "15a",
+ 4, 0, 0, '.', 0,
+ "a\t\tb",
+ "a.......b",
+ },
+
+ {
+ "15b",
+ 4, 0, 0, '.', DiscardEmptyColumns,
+ "a\t\tb", // htabs - do not discard column
+ "a.......b",
+ },
+
+ {
+ "15c",
+ 4, 0, 0, '.', DiscardEmptyColumns,
+ "a\v\vb",
+ "a...b",
+ },
+
+ {
+ "15d",
+ 4, 0, 0, '.', AlignRight | DiscardEmptyColumns,
+ "a\v\vb",
+ "...ab",
+ },
+
+ {
+ "16a",
+ 100, 100, 0, '\t', 0,
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16b",
+ 100, 100, 0, '\t', DiscardEmptyColumns,
+ "a\vb\v\vd\n" +
+ "a\vb\v\vd\ve\n" +
+ "a\n" +
+ "a\vb\vc\vd\n" +
+ "a\vb\vc\vd\ve\n",
+
+ "a\tb\td\n" +
+ "a\tb\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16b debug",
+ 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
+ "a\vb\v\vd\n" +
+ "a\vb\v\vd\ve\n" +
+ "a\n" +
+ "a\vb\vc\vd\n" +
+ "a\vb\vc\vd\ve\n",
+
+ "a\t|b\t||d\n" +
+ "a\t|b\t||d\t|e\n" +
+ "a\n" +
+ "a\t|b\t|c\t|d\n" +
+ "a\t|b\t|c\t|d\t|e\n",
+ },
+
+ {
+ "16c",
+ 100, 100, 0, '\t', DiscardEmptyColumns,
+ "a\tb\t\td\n" + // hard tabs - do not discard column
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16c debug",
+ 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
+ "a\tb\t\td\n" + // hard tabs - do not discard column
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\t|b\t|\t|d\n" +
+ "a\t|b\t|\t|d\t|e\n" +
+ "a\n" +
+ "a\t|b\t|c\t|d\n" +
+ "a\t|b\t|c\t|d\t|e\n",
+ },
+}
+
+func Test(t *testing.T) {
+ for _, e := range tests {
+ check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected)
+ }
+}
diff --git a/libgo/go/text/template/doc.go b/libgo/go/text/template/doc.go
new file mode 100644
index 0000000..42f9e56
--- /dev/null
+++ b/libgo/go/text/template/doc.go
@@ -0,0 +1,313 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template implements data-driven templates for generating textual output
+such as HTML.
+
+Templates are executed by applying them to a data structure. Annotations in the
+template refer to elements of the data structure (typically a field of a struct
+or a key in a map) to control execution and derive values to be displayed.
+Execution of the template walks the structure and sets the cursor, represented
+by a period '.' and called "dot", to the value at the current location in the
+structure as execution proceeds.
+
+The input text for a template is UTF-8-encoded text in any format.
+"Actions"--data evaluations or control structures--are delimited by
+"{{" and "}}"; all text outside actions is copied to the output unchanged.
+Actions may not span newlines, although comments can.
+
+Once constructed, templates and template sets can be executed safely in
+parallel.
+
+Actions
+
+Here is the list of actions. "Arguments" and "pipelines" are evaluations of
+data, defined in detail below.
+
+*/
+// {{/* a comment */}}
+// A comment; discarded. May contain newlines.
+// Comments do not nest.
+/*
+
+ {{pipeline}}
+ The default textual representation of the value of the pipeline
+ is copied to the output.
+
+ {{if pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, T1 is executed. The empty values are false, 0, any
+ nil pointer or interface value, and any array, slice, map, or
+ string of length zero.
+ Dot is unaffected.
+
+ {{if pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, T0 is executed;
+ otherwise, T1 is executed. Dot is unaffected.
+
+ {{range pipeline}} T1 {{end}}
+ The value of the pipeline must be an array, slice, or map. If
+ the value of the pipeline has length zero, nothing is output;
+ otherwise, dot is set to the successive elements of the array,
+ slice, or map and T1 is executed.
+
+ {{range pipeline}} T1 {{else}} T0 {{end}}
+ The value of the pipeline must be an array, slice, or map. If
+ the value of the pipeline has length zero, dot is unaffected and
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+ {{template "name" pipeline}}
+ The template with the specified name is executed with dot set
+ to the value of the pipeline.
+
+ {{with pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, dot is set to the value of the pipeline and T1 is
+ executed.
+
+ {{with pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, dot is unaffected and T0
+ is executed; otherwise, dot is set to the value of the pipeline
+ and T1 is executed.
+
+Arguments
+
+An argument is a simple value, denoted by one of the following.
+
+ - A boolean, string, character, integer, floating-point, imaginary
+ or complex constant in Go syntax. These behave like Go's untyped
+ constants, although raw strings may not span newlines.
+ - The character '.' (period):
+ .
+ The result is the value of dot.
+ - A variable name, which is a (possibly empty) alphanumeric string
+ preceded by a dollar sign, such as
+ $piOver2
+ or
+ $
+ The result is the value of the variable.
+ Variables are described below.
+ - The name of a field of the data, which must be a struct, preceded
+ by a period, such as
+ .Field
+ The result is the value of the field. Field invocations may be
+ chained:
+ .Field1.Field2
+ Fields can also be evaluated on variables, including chaining:
+ $x.Field1.Field2
+ - The name of a key of the data, which must be a map, preceded
+ by a period, such as
+ .Key
+ The result is the map element value indexed by the key.
+ Key invocations may be chained and combined with fields to any
+ depth:
+ .Field1.Key1.Field2.Key2
+ Although the key must be an alphanumeric identifier, unlike with
+ field names they do not need to start with an upper case letter.
+ Keys can also be evaluated on variables, including chaining:
+ $x.key1.key2
+ - The name of a niladic method of the data, preceded by a period,
+ such as
+ .Method
+ The result is the value of invoking the method with dot as the
+ receiver, dot.Method(). Such a method must have one return value (of
+ any type) or two return values, the second of which is an error.
+ If it has two and the returned error is non-nil, execution terminates
+ and an error is returned to the caller as the value of Execute.
+ Method invocations may be chained and combined with fields and keys
+ to any depth:
+ .Field1.Key1.Method1.Field2.Key2.Method2
+ Methods can also be evaluated on variables, including chaining:
+ $x.Method1.Field
+ - The name of a niladic function, such as
+ fun
+ The result is the value of invoking the function, fun(). The return
+ types and values behave as in methods. Functions and function
+ names are described below.
+
+Arguments may evaluate to any type; if they are pointers the implementation
+automatically indirects to the base type when required.
+
+A pipeline is a possibly chained sequence of "commands". A command is a simple
+value (argument) or a function or method call, possibly with multiple arguments:
+
+ Argument
+ The result is the value of evaluating the argument.
+ .Method [Argument...]
+ The method can be alone or the last element of a chain but,
+ unlike methods in the middle of a chain, it can take arguments.
+ The result is the value of calling the method with the
+ arguments:
+ dot.Method(Argument1, etc.)
+ functionName [Argument...]
+ The result is the value of calling the function associated
+ with the name:
+ function(Argument1, etc.)
+ Functions and function names are described below.
+
+Pipelines
+
+A pipeline may be "chained" by separating a sequence of commands with pipeline
+characters '|'. In a chained pipeline, the result of the each command is
+passed as the last argument of the following command. The output of the final
+command in the pipeline is the value of the pipeline.
+
+The output of a command will be either one value or two values, the second of
+which has type error. If that second value is present and evaluates to
+non-nil, execution terminates and the error is returned to the caller of
+Execute.
+
+Variables
+
+A pipeline inside an action may initialize a variable to capture the result.
+The initialization has syntax
+
+ $variable := pipeline
+
+where $variable is the name of the variable. An action that declares a
+variable produces no output.
+
+If a "range" action initializes a variable, the variable is set to the
+successive elements of the iteration. Also, a "range" may declare two
+variables, separated by a comma:
+
+ $index, $element := pipeline
+
+in which case $index and $element are set to the successive values of the
+array/slice index or map key and element, respectively. Note that if there is
+only one variable, it is assigned the element; this is opposite to the
+convention in Go range clauses.
+
+A variable's scope extends to the "end" action of the control structure ("if",
+"with", or "range") in which it is declared, or to the end of the template if
+there is no such control structure. A template invocation does not inherit
+variables from the point of its invocation.
+
+When execution begins, $ is set to the data argument passed to Execute, that is,
+to the starting value of dot.
+
+Examples
+
+Here are some example one-line templates demonstrating pipelines and variables.
+All produce the quoted word "output":
+
+ {{"\"output\""}}
+ A string constant.
+ {{`"output"`}}
+ A raw string constant.
+ {{printf "%q" "output"}}
+ A function call.
+ {{"output" | printf "%q"}}
+ A function call whose final argument comes from the previous
+ command.
+ {{"put" | printf "%s%s" "out" | printf "%q"}}
+ A more elaborate call.
+ {{"output" | printf "%s" | printf "%q"}}
+ A longer chain.
+ {{with "output"}}{{printf "%q" .}}{{end}}
+ A with action using dot.
+ {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
+ A with action that creates and uses a variable.
+ {{with $x := "output"}}{{printf "%q" $x}}{{end}}
+ A with action that uses the variable in another action.
+ {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
+ The same, but pipelined.
+
+Functions
+
+During execution functions are found in three function maps: first in the
+template, then in the "template set" (described below), and finally in the
+global function map. By default, no functions are defined in the template or
+the set but the Funcs methods can be used to add them.
+
+Predefined global functions are named as follows.
+
+ and
+ Returns the boolean AND of its arguments by returning the
+ first empty argument or the last argument, that is,
+ "and x y" behaves as "if x then y else x". All the
+ arguments are evaluated.
+ html
+ Returns the escaped HTML equivalent of the textual
+ representation of its arguments.
+ index
+ Returns the result of indexing its first argument by the
+ following arguments. Thus "index x 1 2 3" is, in Go syntax,
+ x[1][2][3]. Each indexed item must be a map, slice, or array.
+ js
+ Returns the escaped JavaScript equivalent of the textual
+ representation of its arguments.
+ len
+ Returns the integer length of its argument.
+ not
+ Returns the boolean negation of its single argument.
+ or
+ Returns the boolean OR of its arguments by returning the
+ first non-empty argument or the last argument, that is,
+ "or x y" behaves as "if x then x else y". All the
+ arguments are evaluated.
+ print
+ An alias for fmt.Sprint
+ printf
+ An alias for fmt.Sprintf
+ println
+ An alias for fmt.Sprintln
+ urlquery
+ Returns the escaped value of the textual representation of
+ its arguments in a form suitable for embedding in a URL query.
+
+The boolean functions take any zero value to be false and a non-zero value to
+be true.
+
+Template sets
+
+Each template is named by a string specified when it is created. A template may
+use a template invocation to instantiate another template directly or by its
+name; see the explanation of the template action above. The name is looked up
+in the template set associated with the template.
+
+If no template invocation actions occur in the template, the issue of template
+sets can be ignored. If it does contain invocations, though, the template
+containing the invocations must be part of a template set in which to look up
+the names.
+
+There are two ways to construct template sets.
+
+The first is to use a Set's Parse method to create a set of named templates from
+a single input defining multiple templates. The syntax of the definitions is to
+surround each template declaration with a define and end action.
+
+The define action names the template being created by providing a string
+constant. Here is a simple example of input to Set.Parse:
+
+ `{{define "T1"}} definition of template T1 {{end}}
+ {{define "T2"}} definition of template T2 {{end}}
+ {{define "T3"}} {{template "T1"}} {{template "T2"}} {{end}}`
+
+This defines two templates, T1 and T2, and a third T3 that invokes the other two
+when it is executed.
+
+The second way to build a template set is to use Set's Add method to add a
+parsed template to a set. A template may be bound to at most one set. If it's
+necessary to have a template in multiple sets, the template definition must be
+parsed multiple times to create distinct *Template values.
+
+Set.Parse may be called multiple times on different inputs to construct the set.
+Two sets may therefore be constructed with a common base set of templates plus,
+through a second Parse call each, specializations for some elements.
+
+A template may be executed directly or through Set.Execute, which executes a
+named template from the set. To invoke our example above, we might write,
+
+ err := set.Execute(os.Stdout, "T3", "no data needed")
+ if err != nil {
+ log.Fatalf("execution failed: %s", err)
+ }
+*/
+package template
diff --git a/libgo/go/text/template/exec.go b/libgo/go/text/template/exec.go
new file mode 100644
index 0000000..1910882
--- /dev/null
+++ b/libgo/go/text/template/exec.go
@@ -0,0 +1,673 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "strings"
+ "text/template/parse"
+)
+
+// state represents the state of an execution. It's not part of the
+// template so that multiple executions of the same template
+// can execute in parallel.
+type state struct {
+ tmpl *Template
+ wr io.Writer
+ line int // line number for errors
+ vars []variable // push-down stack of variable values.
+}
+
+// variable holds the dynamic value of a variable such as $, $x etc.
+type variable struct {
+ name string
+ value reflect.Value
+}
+
+// push pushes a new variable on the stack.
+func (s *state) push(name string, value reflect.Value) {
+ s.vars = append(s.vars, variable{name, value})
+}
+
+// mark returns the length of the variable stack.
+func (s *state) mark() int {
+ return len(s.vars)
+}
+
+// pop pops the variable stack up to the mark.
+func (s *state) pop(mark int) {
+ s.vars = s.vars[0:mark]
+}
+
+// setVar overwrites the top-nth variable on the stack. Used by range iterations.
+func (s *state) setVar(n int, value reflect.Value) {
+ s.vars[len(s.vars)-n].value = value
+}
+
+// varValue returns the value of the named variable.
+func (s *state) varValue(name string) reflect.Value {
+ for i := s.mark() - 1; i >= 0; i-- {
+ if s.vars[i].name == name {
+ return s.vars[i].value
+ }
+ }
+ s.errorf("undefined variable: %s", name)
+ return zero
+}
+
+var zero reflect.Value
+
+// errorf formats the error and terminates processing.
+func (s *state) errorf(format string, args ...interface{}) {
+ format = fmt.Sprintf("template: %s:%d: %s", s.tmpl.Name(), s.line, format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (s *state) error(err error) {
+ s.errorf("%s", err)
+}
+
+// errRecover is the handler that turns panics into returns from the top
+// level of Parse.
+func errRecover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ *errp = e.(error)
+ }
+}
+
+// Execute applies a parsed template to the specified data object,
+// writing the output to wr.
+func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
+ defer errRecover(&err)
+ value := reflect.ValueOf(data)
+ state := &state{
+ tmpl: t,
+ wr: wr,
+ line: 1,
+ vars: []variable{{"$", value}},
+ }
+ if t.Tree == nil || t.Root == nil {
+ state.errorf("must be parsed before execution")
+ }
+ state.walk(value, t.Root)
+ return
+}
+
+// Walk functions step through the major pieces of the template structure,
+// generating output as they go.
+func (s *state) walk(dot reflect.Value, n parse.Node) {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ s.line = n.Line
+ // Do not pop variables so they persist until next end.
+ // Also, if the action declares variables, don't print the result.
+ val := s.evalPipeline(dot, n.Pipe)
+ if len(n.Pipe.Decl) == 0 {
+ s.printValue(n, val)
+ }
+ case *parse.IfNode:
+ s.line = n.Line
+ s.walkIfOrWith(parse.NodeIf, dot, n.Pipe, n.List, n.ElseList)
+ case *parse.ListNode:
+ for _, node := range n.Nodes {
+ s.walk(dot, node)
+ }
+ case *parse.RangeNode:
+ s.line = n.Line
+ s.walkRange(dot, n)
+ case *parse.TemplateNode:
+ s.line = n.Line
+ s.walkTemplate(dot, n)
+ case *parse.TextNode:
+ if _, err := s.wr.Write(n.Text); err != nil {
+ s.error(err)
+ }
+ case *parse.WithNode:
+ s.line = n.Line
+ s.walkIfOrWith(parse.NodeWith, dot, n.Pipe, n.List, n.ElseList)
+ default:
+ s.errorf("unknown node: %s", n)
+ }
+}
+
+// walkIfOrWith walks an 'if' or 'with' node. The two control structures
+// are identical in behavior except that 'with' sets dot.
+func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
+ defer s.pop(s.mark())
+ val := s.evalPipeline(dot, pipe)
+ truth, ok := isTrue(val)
+ if !ok {
+ s.errorf("if/with can't use %v", val)
+ }
+ if truth {
+ if typ == parse.NodeWith {
+ s.walk(val, list)
+ } else {
+ s.walk(dot, list)
+ }
+ } else if elseList != nil {
+ s.walk(dot, elseList)
+ }
+}
+
+// isTrue returns whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value.
+func isTrue(val reflect.Value) (truth, ok bool) {
+ if !val.IsValid() {
+ // Something like var x interface{}, never set. It's a form of nil.
+ return false, true
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ truth = val.Len() > 0
+ case reflect.Bool:
+ truth = val.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ truth = val.Complex() != 0
+ case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+ truth = !val.IsNil()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ truth = val.Int() != 0
+ case reflect.Float32, reflect.Float64:
+ truth = val.Float() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ truth = val.Uint() != 0
+ case reflect.Struct:
+ truth = true // Struct values are always true.
+ default:
+ return
+ }
+ return truth, true
+}
+
+func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ defer s.pop(s.mark())
+ val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+ // mark top of stack before any variables in the body are pushed.
+ mark := s.mark()
+ oneIteration := func(index, elem reflect.Value) {
+ // Set top var (lexically the second if there are two) to the element.
+ if len(r.Pipe.Decl) > 0 {
+ s.setVar(1, elem)
+ }
+ // Set next var (lexically the first if there are two) to the index.
+ if len(r.Pipe.Decl) > 1 {
+ s.setVar(2, index)
+ }
+ s.walk(elem, r.List)
+ s.pop(mark)
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ if val.Len() == 0 {
+ break
+ }
+ for i := 0; i < val.Len(); i++ {
+ oneIteration(reflect.ValueOf(i), val.Index(i))
+ }
+ return
+ case reflect.Map:
+ if val.Len() == 0 {
+ break
+ }
+ for _, key := range val.MapKeys() {
+ oneIteration(key, val.MapIndex(key))
+ }
+ return
+ case reflect.Chan:
+ if val.IsNil() {
+ break
+ }
+ i := 0
+ for ; ; i++ {
+ elem, ok := val.Recv()
+ if !ok {
+ break
+ }
+ oneIteration(reflect.ValueOf(i), elem)
+ }
+ if i == 0 {
+ break
+ }
+ return
+ case reflect.Invalid:
+ break // An invalid value is likely a nil map, etc. and acts like an empty map.
+ default:
+ s.errorf("range can't iterate over %v", val)
+ }
+ if r.ElseList != nil {
+ s.walk(dot, r.ElseList)
+ }
+}
+
+func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
+ set := s.tmpl.set
+ if set == nil {
+ s.errorf("no set defined in which to invoke template named %q", t.Name)
+ }
+ tmpl := set.tmpl[t.Name]
+ if tmpl == nil {
+ s.errorf("template %q not in set", t.Name)
+ }
+ // Variables declared by the pipeline persist.
+ dot = s.evalPipeline(dot, t.Pipe)
+ newState := *s
+ newState.tmpl = tmpl
+ // No dynamic scoping: template invocations inherit no variables.
+ newState.vars = []variable{{"$", dot}}
+ newState.walk(dot, tmpl.Root)
+}
+
+// Eval functions evaluate pipelines, commands, and their elements and extract
+// values from the data structure by examining fields, calling methods, and so on.
+// The printing of those values happens only through walk functions.
+
+// evalPipeline returns the value acquired by evaluating a pipeline. If the
+// pipeline has a variable declaration, the variable will be pushed on the
+// stack. Callers should therefore pop the stack after they are finished
+// executing commands depending on the pipeline value.
+func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
+ if pipe == nil {
+ return
+ }
+ for _, cmd := range pipe.Cmds {
+ value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
+ // If the object has type interface{}, dig down one level to the thing inside.
+ if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
+ value = reflect.ValueOf(value.Interface()) // lovely!
+ }
+ }
+ for _, variable := range pipe.Decl {
+ s.push(variable.Ident[0], value)
+ }
+ return value
+}
+
+func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
+ if len(args) > 1 || final.IsValid() {
+ s.errorf("can't give argument to non-function %s", args[0])
+ }
+}
+
+func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
+ firstWord := cmd.Args[0]
+ switch n := firstWord.(type) {
+ case *parse.FieldNode:
+ return s.evalFieldNode(dot, n, cmd.Args, final)
+ case *parse.IdentifierNode:
+ // Must be a function.
+ return s.evalFunction(dot, n.Ident, cmd.Args, final)
+ case *parse.VariableNode:
+ return s.evalVariableNode(dot, n, cmd.Args, final)
+ }
+ s.notAFunction(cmd.Args, final)
+ switch word := firstWord.(type) {
+ case *parse.BoolNode:
+ return reflect.ValueOf(word.True)
+ case *parse.DotNode:
+ return dot
+ case *parse.NumberNode:
+ return s.idealConstant(word)
+ case *parse.StringNode:
+ return reflect.ValueOf(word.Text)
+ }
+ s.errorf("can't evaluate command %q", firstWord)
+ panic("not reached")
+}
+
+// idealConstant is called to return the value of a number in a context where
+// we don't know the type. In that case, the syntax of the number tells us
+// its type, and we use Go rules to resolve. Note there is no such thing as
+// a uint ideal constant in this situation - the value must be of int type.
+func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
+ // These are ideal constants but we don't know the type
+ // and we have no context. (If it was a method argument,
+ // we'd know what we need.) The syntax guides us to some extent.
+ switch {
+ case constant.IsComplex:
+ return reflect.ValueOf(constant.Complex128) // incontrovertible.
+ case constant.IsFloat && strings.IndexAny(constant.Text, ".eE") >= 0:
+ return reflect.ValueOf(constant.Float64)
+ case constant.IsInt:
+ n := int(constant.Int64)
+ if int64(n) != constant.Int64 {
+ s.errorf("%s overflows int", constant.Text)
+ }
+ return reflect.ValueOf(n)
+ case constant.IsUint:
+ s.errorf("%s overflows int", constant.Text)
+ }
+ return zero
+}
+
+func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
+ return s.evalFieldChain(dot, dot, field.Ident, args, final)
+}
+
+func (s *state) evalVariableNode(dot reflect.Value, v *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
+ // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
+ value := s.varValue(v.Ident[0])
+ if len(v.Ident) == 1 {
+ return value
+ }
+ return s.evalFieldChain(dot, value, v.Ident[1:], args, final)
+}
+
+// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
+// dot is the environment in which to evaluate arguments, while
+// receiver is the value being walked along the chain.
+func (s *state) evalFieldChain(dot, receiver reflect.Value, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
+ n := len(ident)
+ for i := 0; i < n-1; i++ {
+ receiver = s.evalField(dot, ident[i], nil, zero, receiver)
+ }
+ // Now if it's a method, it gets the arguments.
+ return s.evalField(dot, ident[n-1], args, final, receiver)
+}
+
+func (s *state) evalFunction(dot reflect.Value, name string, args []parse.Node, final reflect.Value) reflect.Value {
+ function, ok := findFunction(name, s.tmpl, s.tmpl.set)
+ if !ok {
+ s.errorf("%q is not a defined function", name)
+ }
+ return s.evalCall(dot, function, name, args, final)
+}
+
+// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
+// The 'final' argument represents the return value from the preceding
+// value of the pipeline, if any.
+func (s *state) evalField(dot reflect.Value, fieldName string, args []parse.Node, final, receiver reflect.Value) reflect.Value {
+ if !receiver.IsValid() {
+ return zero
+ }
+ typ := receiver.Type()
+ receiver, _ = indirect(receiver)
+ // Unless it's an interface, need to get to a value of type *T to guarantee
+ // we see all methods of T and *T.
+ ptr := receiver
+ if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
+ ptr = ptr.Addr()
+ }
+ if method, ok := methodByName(ptr, fieldName); ok {
+ return s.evalCall(dot, method, fieldName, args, final)
+ }
+ hasArgs := len(args) > 1 || final.IsValid()
+ // It's not a method; is it a field of a struct?
+ receiver, isNil := indirect(receiver)
+ if receiver.Kind() == reflect.Struct {
+ tField, ok := receiver.Type().FieldByName(fieldName)
+ if ok {
+ field := receiver.FieldByIndex(tField.Index)
+ if hasArgs {
+ s.errorf("%s is not a method but has arguments", fieldName)
+ }
+ if tField.PkgPath == "" { // field is exported
+ return field
+ }
+ }
+ }
+ // If it's a map, attempt to use the field name as a key.
+ if receiver.Kind() == reflect.Map {
+ nameVal := reflect.ValueOf(fieldName)
+ if nameVal.Type().AssignableTo(receiver.Type().Key()) {
+ if hasArgs {
+ s.errorf("%s is not a method but has arguments", fieldName)
+ }
+ return receiver.MapIndex(nameVal)
+ }
+ }
+ if isNil {
+ s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
+ }
+ s.errorf("can't evaluate field %s in type %s", fieldName, typ)
+ panic("not reached")
+}
+
+// TODO: delete when reflect's own MethodByName is released.
+func methodByName(receiver reflect.Value, name string) (reflect.Value, bool) {
+ typ := receiver.Type()
+ for i := 0; i < typ.NumMethod(); i++ {
+ if typ.Method(i).Name == name {
+ return receiver.Method(i), true // This value includes the receiver.
+ }
+ }
+ return zero, false
+}
+
+var (
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+ fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
+// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
+// as the function itself.
+func (s *state) evalCall(dot, fun reflect.Value, name string, args []parse.Node, final reflect.Value) reflect.Value {
+ if args != nil {
+ args = args[1:] // Zeroth arg is function name/node; not passed to function.
+ }
+ typ := fun.Type()
+ numIn := len(args)
+ if final.IsValid() {
+ numIn++
+ }
+ numFixed := len(args)
+ if typ.IsVariadic() {
+ numFixed = typ.NumIn() - 1 // last arg is the variadic one.
+ if numIn < numFixed {
+ s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
+ }
+ } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
+ s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
+ }
+ if !goodFunc(typ) {
+ s.errorf("can't handle multiple results from method/function %q", name)
+ }
+ // Build the arg list.
+ argv := make([]reflect.Value, numIn)
+ // Args must be evaluated. Fixed args first.
+ i := 0
+ for ; i < numFixed; i++ {
+ argv[i] = s.evalArg(dot, typ.In(i), args[i])
+ }
+ // Now the ... args.
+ if typ.IsVariadic() {
+ argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
+ for ; i < len(args); i++ {
+ argv[i] = s.evalArg(dot, argType, args[i])
+ }
+ }
+ // Add final value if necessary.
+ if final.IsValid() {
+ argv[i] = final
+ }
+ result := fun.Call(argv)
+ // If we have an error that is not nil, stop execution and return that error to the caller.
+ if len(result) == 2 && !result[1].IsNil() {
+ s.errorf("error calling %s: %s", name, result[1].Interface().(error))
+ }
+ return result[0]
+}
+
+// validateType guarantees that the value is valid and assignable to the type.
+func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
+ if !value.IsValid() {
+ s.errorf("invalid value; expected %s", typ)
+ }
+ if !value.Type().AssignableTo(typ) {
+ // Does one dereference or indirection work? We could do more, as we
+ // do with method receivers, but that gets messy and method receivers
+ // are much more constrained, so it makes more sense there than here.
+ // Besides, one is almost always all you need.
+ switch {
+ case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
+ value = value.Elem()
+ case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
+ value = value.Addr()
+ default:
+ s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
+ }
+ }
+ return value
+}
+
+func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
+ switch arg := n.(type) {
+ case *parse.DotNode:
+ return s.validateType(dot, typ)
+ case *parse.FieldNode:
+ return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
+ case *parse.VariableNode:
+ return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
+ }
+ switch typ.Kind() {
+ case reflect.Bool:
+ return s.evalBool(typ, n)
+ case reflect.Complex64, reflect.Complex128:
+ return s.evalComplex(typ, n)
+ case reflect.Float32, reflect.Float64:
+ return s.evalFloat(typ, n)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return s.evalInteger(typ, n)
+ case reflect.Interface:
+ if typ.NumMethod() == 0 {
+ return s.evalEmptyInterface(dot, n)
+ }
+ case reflect.String:
+ return s.evalString(typ, n)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return s.evalUnsignedInteger(typ, n)
+ }
+ s.errorf("can't handle %s for arg of type %s", n, typ)
+ panic("not reached")
+}
+
+func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.BoolNode); ok {
+ value := reflect.New(typ).Elem()
+ value.SetBool(n.True)
+ return value
+ }
+ s.errorf("expected bool; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.StringNode); ok {
+ value := reflect.New(typ).Elem()
+ value.SetString(n.Text)
+ return value
+ }
+ s.errorf("expected string; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
+ value := reflect.New(typ).Elem()
+ value.SetInt(n.Int64)
+ return value
+ }
+ s.errorf("expected integer; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
+ value := reflect.New(typ).Elem()
+ value.SetUint(n.Uint64)
+ return value
+ }
+ s.errorf("expected unsigned integer; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
+ value := reflect.New(typ).Elem()
+ value.SetFloat(n.Float64)
+ return value
+ }
+ s.errorf("expected float; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
+ value := reflect.New(typ).Elem()
+ value.SetComplex(n.Complex128)
+ return value
+ }
+ s.errorf("expected complex; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
+ switch n := n.(type) {
+ case *parse.BoolNode:
+ return reflect.ValueOf(n.True)
+ case *parse.DotNode:
+ return dot
+ case *parse.FieldNode:
+ return s.evalFieldNode(dot, n, nil, zero)
+ case *parse.IdentifierNode:
+ return s.evalFunction(dot, n.Ident, nil, zero)
+ case *parse.NumberNode:
+ return s.idealConstant(n)
+ case *parse.StringNode:
+ return reflect.ValueOf(n.Text)
+ case *parse.VariableNode:
+ return s.evalVariableNode(dot, n, nil, zero)
+ }
+ s.errorf("can't handle assignment of %s to empty interface argument", n)
+ panic("not reached")
+}
+
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+ for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+ if v.IsNil() {
+ return v, true
+ }
+ if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+ break
+ }
+ }
+ return v, false
+}
+
+// printValue writes the textual representation of the value to the output of
+// the template.
+func (s *state) printValue(n parse.Node, v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ v, _ = indirect(v) // fmt.Fprint handles nil.
+ }
+ if !v.IsValid() {
+ fmt.Fprint(s.wr, "<no value>")
+ return
+ }
+
+ if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+ if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+ v = v.Addr()
+ } else {
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func:
+ s.errorf("can't print %s of type %s", n, v.Type())
+ }
+ }
+ }
+ fmt.Fprint(s.wr, v.Interface())
+}
diff --git a/libgo/go/text/template/exec_test.go b/libgo/go/text/template/exec_test.go
new file mode 100644
index 0000000..5721667
--- /dev/null
+++ b/libgo/go/text/template/exec_test.go
@@ -0,0 +1,683 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+// T has lots of interesting pieces to use to test execution.
+type T struct {
+ // Basics
+ True bool
+ I int
+ U16 uint16
+ X string
+ FloatZero float64
+ ComplexZero float64
+ // Nested structs.
+ U *U
+ // Struct with String method.
+ V0 V
+ V1, V2 *V
+ // Struct with Error method.
+ W0 W
+ W1, W2 *W
+ // Slices
+ SI []int
+ SIEmpty []int
+ SB []bool
+ // Maps
+ MSI map[string]int
+ MSIone map[string]int // one element, for deterministic output
+ MSIEmpty map[string]int
+ MXI map[interface{}]int
+ MII map[int]int
+ SMSI []map[string]int
+ // Empty interfaces; used to see if we can dig inside one.
+ Empty0 interface{} // nil
+ Empty1 interface{}
+ Empty2 interface{}
+ Empty3 interface{}
+ Empty4 interface{}
+ // Non-empty interface.
+ NonEmptyInterface I
+ // Stringer.
+ Str fmt.Stringer
+ Err error
+ // Pointers
+ PI *int
+ PSI *[]int
+ NIL *int
+ // Template to test evaluation of templates.
+ Tmpl *Template
+}
+
+type U struct {
+ V string
+}
+
+type V struct {
+ j int
+}
+
+func (v *V) String() string {
+ if v == nil {
+ return "nilV"
+ }
+ return fmt.Sprintf("<%d>", v.j)
+}
+
+type W struct {
+ k int
+}
+
+func (w *W) Error() string {
+ if w == nil {
+ return "nilW"
+ }
+ return fmt.Sprintf("[%d]", w.k)
+}
+
+var tVal = &T{
+ True: true,
+ I: 17,
+ U16: 16,
+ X: "x",
+ U: &U{"v"},
+ V0: V{6666},
+ V1: &V{7777}, // leave V2 as nil
+ W0: W{888},
+ W1: &W{999}, // leave W2 as nil
+ SI: []int{3, 4, 5},
+ SB: []bool{true, false},
+ MSI: map[string]int{"one": 1, "two": 2, "three": 3},
+ MSIone: map[string]int{"one": 1},
+ MXI: map[interface{}]int{"one": 1},
+ MII: map[int]int{1: 1},
+ SMSI: []map[string]int{
+ {"one": 1, "two": 2},
+ {"eleven": 11, "twelve": 12},
+ },
+ Empty1: 3,
+ Empty2: "empty2",
+ Empty3: []int{7, 8},
+ Empty4: &U{"UinEmpty"},
+ NonEmptyInterface: new(T),
+ Str: bytes.NewBuffer([]byte("foozle")),
+ Err: errors.New("erroozle"),
+ PI: newInt(23),
+ PSI: newIntSlice(21, 22, 23),
+ Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X
+}
+
+// A non-empty interface.
+type I interface {
+ Method0() string
+}
+
+var iVal I = tVal
+
+// Helpers for creation.
+func newInt(n int) *int {
+ p := new(int)
+ *p = n
+ return p
+}
+
+func newIntSlice(n ...int) *[]int {
+ p := new([]int)
+ *p = make([]int, len(n))
+ copy(*p, n)
+ return p
+}
+
+// Simple methods with and without arguments.
+func (t *T) Method0() string {
+ return "M0"
+}
+
+func (t *T) Method1(a int) int {
+ return a
+}
+
+func (t *T) Method2(a uint16, b string) string {
+ return fmt.Sprintf("Method2: %d %s", a, b)
+}
+
+func (t *T) MAdd(a int, b []int) []int {
+ v := make([]int, len(b))
+ for i, x := range b {
+ v[i] = x + a
+ }
+ return v
+}
+
+// MSort is used to sort map keys for stable output. (Nice trick!)
+func (t *T) MSort(m map[string]int) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// EPERM returns a value and an error according to its argument.
+func (t *T) EPERM(error bool) (bool, error) {
+ if error {
+ return true, os.EPERM
+ }
+ return false, nil
+}
+
+// A few methods to test chaining.
+func (t *T) GetU() *U {
+ return t.U
+}
+
+func (u *U) TrueFalse(b bool) string {
+ if b {
+ return "true"
+ }
+ return ""
+}
+
+func typeOf(arg interface{}) string {
+ return fmt.Sprintf("%T", arg)
+}
+
+type execTest struct {
+ name string
+ input string
+ output string
+ data interface{}
+ ok bool
+}
+
+// bigInt and bigUint are hex string representing numbers either side
+// of the max int boundary.
+// We do it this way so the test doesn't depend on ints being 32 bits.
+var (
+ bigInt = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
+ bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+)
+
+var execTests = []execTest{
+ // Trivial cases.
+ {"empty", "", "", nil, true},
+ {"text", "some text", "some text", nil, true},
+
+ // Ideal constants.
+ {"ideal int", "{{typeOf 3}}", "int", 0, true},
+ {"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
+ {"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
+ {"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
+ {"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
+ {"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
+
+ // Fields of structs.
+ {".X", "-{{.X}}-", "-x-", tVal, true},
+ {".U.V", "-{{.U.V}}-", "-v-", tVal, true},
+
+ // Fields on maps.
+ {"map .one", "{{.MSI.one}}", "1", tVal, true},
+ {"map .two", "{{.MSI.two}}", "2", tVal, true},
+ {"map .NO", "{{.MSI.NO}}", "<no value>", tVal, true},
+ {"map .one interface", "{{.MXI.one}}", "1", tVal, true},
+ {"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
+ {"map .WRONG type", "{{.MII.one}}", "", tVal, false},
+
+ // Dots of all kinds to test basic evaluation.
+ {"dot int", "<{{.}}>", "<13>", 13, true},
+ {"dot uint", "<{{.}}>", "<14>", uint(14), true},
+ {"dot float", "<{{.}}>", "<15.1>", 15.1, true},
+ {"dot bool", "<{{.}}>", "<true>", true, true},
+ {"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true},
+ {"dot string", "<{{.}}>", "<hello>", "hello", true},
+ {"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true},
+ {"dot map", "<{{.}}>", "<map[two:22]>", map[string]int{"two": 22}, true},
+ {"dot struct", "<{{.}}>", "<{7 seven}>", struct {
+ a int
+ b string
+ }{7, "seven"}, true},
+
+ // Variables.
+ {"$ int", "{{$}}", "123", 123, true},
+ {"$.I", "{{$.I}}", "17", tVal, true},
+ {"$.U.V", "{{$.U.V}}", "v", tVal, true},
+ {"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
+
+ // Type with String method.
+ {"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true},
+ {"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true},
+ {"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
+
+ // Type with Error method.
+ {"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true},
+ {"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
+ {"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
+
+ // Pointers.
+ {"*int", "{{.PI}}", "23", tVal, true},
+ {"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
+ {"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
+ {"NIL", "{{.NIL}}", "<nil>", tVal, true},
+
+ // Empty interfaces holding values.
+ {"empty nil", "{{.Empty0}}", "<no value>", tVal, true},
+ {"empty with int", "{{.Empty1}}", "3", tVal, true},
+ {"empty with string", "{{.Empty2}}", "empty2", tVal, true},
+ {"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
+ {"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
+ {"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
+
+ // Method calls.
+ {".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
+ {".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
+ {".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
+ {".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
+ {".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
+ {".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
+ {"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
+ {"method on chained var",
+ "{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+ "true", tVal, true},
+ {"chained method",
+ "{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+ "true", tVal, true},
+ {"chained method on variable",
+ "{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
+ "true", tVal, true},
+
+ // Pipelines.
+ {"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
+
+ // If.
+ {"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
+ {"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
+ {"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
+ {"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
+
+ // Print etc.
+ {"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
+ {"print", `{{print 1 2 3}}`, "1 2 3", tVal, true},
+ {"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
+ {"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
+ {"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
+ {"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true},
+ {"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
+ {"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
+ {"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
+ {"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
+ {"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
+ {"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
+ {"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
+
+ // HTML.
+ {"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
+ "&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+ {"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
+ "&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+
+ // JavaScript.
+ {"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true},
+
+ // URL query.
+ {"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
+
+ // Booleans
+ {"not", "{{not true}} {{not false}}", "false true", nil, true},
+ {"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
+ {"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
+ {"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
+ {"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
+
+ // Indexing.
+ {"slice[0]", "{{index .SI 0}}", "3", tVal, true},
+ {"slice[1]", "{{index .SI 1}}", "4", tVal, true},
+ {"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
+ {"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
+ {"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
+ {"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
+ {"map[NO]", "{{index .MSI `XXX`}}", "", tVal, true},
+ {"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
+ {"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
+
+ // Len.
+ {"slice", "{{len .SI}}", "3", tVal, true},
+ {"map", "{{len .MSI }}", "3", tVal, true},
+ {"len of int", "{{len 3}}", "", tVal, false},
+ {"len of nothing", "{{len .Empty0}}", "", tVal, false},
+
+ // With.
+ {"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
+ {"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
+ {"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
+ {"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
+ {"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true},
+ {"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
+ {"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
+ {"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
+ {"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
+ {"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
+ {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
+ {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
+
+ // Range.
+ {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI | .MSort}}-{{.}}-{{end}}", "-one--three--two-", tVal, true},
+ {"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range map else", "{{range .MSI | .MSort}}-{{.}}-{{else}}EMPTY{{end}}", "-one--three--two-", tVal, true},
+ {"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
+ {"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
+ {"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true},
+ {"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true},
+ {"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true},
+ {"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "<one=1>", tVal, true},
+ {"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+ {"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+ {"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
+ {"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
+
+ // Cute examples.
+ {"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
+ {"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
+
+ // Error handling.
+ {"error method, error", "{{.EPERM true}}", "", tVal, false},
+ {"error method, no error", "{{.EPERM false}}", "false", tVal, true},
+
+ // Fixed bugs.
+ // Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
+ {"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
+ // Do not loop endlessly in indirect for non-empty interfaces.
+ // The bug appears with *interface only; looped forever.
+ {"bug1", "{{.Method0}}", "M0", &iVal, true},
+ // Was taking address of interface field, so method set was empty.
+ {"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
+ // Struct values were not legal in with - mere oversight.
+ {"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
+ // Nil interface values in if.
+ {"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
+ // Stringer.
+ {"bug5", "{{.Str}}", "foozle", tVal, true},
+ {"bug5a", "{{.Err}}", "erroozle", tVal, true},
+ // Args need to be indirected and dereferenced sometimes.
+ {"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
+ {"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
+ {"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
+ {"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
+}
+
+func zeroArgs() string {
+ return "zeroArgs"
+}
+
+func oneArg(a string) string {
+ return "oneArg=" + a
+}
+
+// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
+func count(n int) chan string {
+ if n == 0 {
+ return nil
+ }
+ c := make(chan string)
+ go func() {
+ for i := 0; i < n; i++ {
+ c <- "abcdefghijklmnop"[i : i+1]
+ }
+ close(c)
+ }()
+ return c
+}
+
+// vfunc takes a *V and a V
+func vfunc(V, *V) string {
+ return "vfunc"
+}
+
+func testExecute(execTests []execTest, set *Set, t *testing.T) {
+ b := new(bytes.Buffer)
+ funcs := FuncMap{
+ "count": count,
+ "oneArg": oneArg,
+ "typeOf": typeOf,
+ "vfunc": vfunc,
+ "zeroArgs": zeroArgs,
+ }
+ for _, test := range execTests {
+ tmpl := New(test.name).Funcs(funcs)
+ _, err := tmpl.ParseInSet(test.input, set)
+ if err != nil {
+ t.Errorf("%s: parse error: %s", test.name, err)
+ continue
+ }
+ b.Reset()
+ err = tmpl.Execute(b, test.data)
+ switch {
+ case !test.ok && err == nil:
+ t.Errorf("%s: expected error; got none", test.name)
+ continue
+ case test.ok && err != nil:
+ t.Errorf("%s: unexpected execute error: %s", test.name, err)
+ continue
+ case !test.ok && err != nil:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ }
+ result := b.String()
+ if result != test.output {
+ t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
+ }
+ }
+}
+
+func TestExecute(t *testing.T) {
+ testExecute(execTests, nil, t)
+}
+
+var delimPairs = []string{
+ "", "", // default
+ "{{", "}}", // same as default
+ "<<", ">>", // distinct
+ "|", "|", // same
+ "(日)", "(本)", // peculiar
+}
+
+func TestDelims(t *testing.T) {
+ const hello = "Hello, world"
+ var value = struct{ Str string }{hello}
+ for i := 0; i < len(delimPairs); i += 2 {
+ text := ".Str"
+ left := delimPairs[i+0]
+ trueLeft := left
+ right := delimPairs[i+1]
+ trueRight := right
+ if left == "" { // default case
+ trueLeft = "{{"
+ }
+ if right == "" { // default case
+ trueRight = "}}"
+ }
+ text = trueLeft + text + trueRight
+ // Now add a comment
+ text += trueLeft + "/*comment*/" + trueRight
+ // Now add an action containing a string.
+ text += trueLeft + `"` + trueLeft + `"` + trueRight
+ // At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
+ tmpl, err := New("delims").Delims(left, right).Parse(text)
+ if err != nil {
+ t.Fatalf("delim %q text %q parse err %s", left, text, err)
+ }
+ var b = new(bytes.Buffer)
+ err = tmpl.Execute(b, value)
+ if err != nil {
+ t.Fatalf("delim %q exec err %s", left, err)
+ }
+ if b.String() != hello+trueLeft {
+ t.Errorf("expected %q got %q", hello+trueLeft, b.String())
+ }
+ }
+}
+
+// Check that an error from a method flows back to the top.
+func TestExecuteError(t *testing.T) {
+ b := new(bytes.Buffer)
+ tmpl := New("error")
+ _, err := tmpl.Parse("{{.EPERM true}}")
+ if err != nil {
+ t.Fatalf("parse error: %s", err)
+ }
+ err = tmpl.Execute(b, tVal)
+ if err == nil {
+ t.Errorf("expected error; got none")
+ } else if !strings.Contains(err.Error(), os.EPERM.Error()) {
+ if *debug {
+ fmt.Printf("test execute error: %s\n", err)
+ }
+ t.Errorf("expected os.EPERM; got %s", err)
+ }
+}
+
+func TestJSEscaping(t *testing.T) {
+ testCases := []struct {
+ in, exp string
+ }{
+ {`a`, `a`},
+ {`'foo`, `\'foo`},
+ {`Go "jump" \`, `Go \"jump\" \\`},
+ {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+ {"unprintable \uFDFF", `unprintable \uFDFF`},
+ {`<html>`, `\x3Chtml\x3E`},
+ }
+ for _, tc := range testCases {
+ s := JSEscapeString(tc.in)
+ if s != tc.exp {
+ t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
+ }
+ }
+}
+
+// A nice example: walk a binary tree.
+
+type Tree struct {
+ Val int
+ Left, Right *Tree
+}
+
+// Use different delimiters to test Set.Delims.
+const treeTemplate = `
+ (define "tree")
+ [
+ (.Val)
+ (with .Left)
+ (template "tree" .)
+ (end)
+ (with .Right)
+ (template "tree" .)
+ (end)
+ ]
+ (end)
+`
+
+func TestTree(t *testing.T) {
+ var tree = &Tree{
+ 1,
+ &Tree{
+ 2, &Tree{
+ 3,
+ &Tree{
+ 4, nil, nil,
+ },
+ nil,
+ },
+ &Tree{
+ 5,
+ &Tree{
+ 6, nil, nil,
+ },
+ nil,
+ },
+ },
+ &Tree{
+ 7,
+ &Tree{
+ 8,
+ &Tree{
+ 9, nil, nil,
+ },
+ nil,
+ },
+ &Tree{
+ 10,
+ &Tree{
+ 11, nil, nil,
+ },
+ nil,
+ },
+ },
+ }
+ set := new(Set)
+ _, err := set.Delims("(", ")").Parse(treeTemplate)
+ if err != nil {
+ t.Fatal("parse error:", err)
+ }
+ var b bytes.Buffer
+ err = set.Execute(&b, "tree", tree)
+ if err != nil {
+ t.Fatal("exec error:", err)
+ }
+ stripSpace := func(r rune) rune {
+ if r == '\t' || r == '\n' {
+ return -1
+ }
+ return r
+ }
+ result := strings.Map(stripSpace, b.String())
+ const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
+ if result != expect {
+ t.Errorf("expected %q got %q", expect, result)
+ }
+}
diff --git a/libgo/go/text/template/funcs.go b/libgo/go/text/template/funcs.go
new file mode 100644
index 0000000..2ca09a7
--- /dev/null
+++ b/libgo/go/text/template/funcs.go
@@ -0,0 +1,367 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. If the second argument evaluates to non-nil
+// during execution, execution terminates and Execute returns an error.
+type FuncMap map[string]interface{}
+
+var builtins = FuncMap{
+ "and": and,
+ "html": HTMLEscaper,
+ "index": index,
+ "js": JSEscaper,
+ "len": length,
+ "not": not,
+ "or": or,
+ "print": fmt.Sprint,
+ "printf": fmt.Sprintf,
+ "println": fmt.Sprintln,
+ "urlquery": URLQueryEscaper,
+}
+
+var builtinFuncs = createValueFuncs(builtins)
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+ m := make(map[string]reflect.Value)
+ addValueFuncs(m, funcMap)
+ return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+ for name, fn := range in {
+ v := reflect.ValueOf(fn)
+ if v.Kind() != reflect.Func {
+ panic("value for " + name + " not a function")
+ }
+ if !goodFunc(v.Type()) {
+ panic(fmt.Errorf("can't handle multiple results from method/function %q", name))
+ }
+ out[name] = v
+ }
+}
+
+// addFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+ for name, fn := range in {
+ out[name] = fn
+ }
+}
+
+// goodFunc checks that the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+ // We allow functions with 1 result or 2 results where the second is an error.
+ switch {
+ case typ.NumOut() == 1:
+ return true
+ case typ.NumOut() == 2 && typ.Out(1) == errorType:
+ return true
+ }
+ return false
+}
+
+// findFunction looks for a function in the template, set, and global map.
+func findFunction(name string, tmpl *Template, set *Set) (reflect.Value, bool) {
+ if tmpl != nil {
+ if fn := tmpl.execFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ }
+ if set != nil {
+ if fn := set.execFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ }
+ if fn := builtinFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ return reflect.Value{}, false
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item interface{}, indices ...interface{}) (interface{}, error) {
+ v := reflect.ValueOf(item)
+ for _, i := range indices {
+ index := reflect.ValueOf(i)
+ var isNil bool
+ if v, isNil = indirect(v); isNil {
+ return nil, fmt.Errorf("index of nil pointer")
+ }
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice:
+ var x int64
+ switch index.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x = index.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x = int64(index.Uint())
+ default:
+ return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+ }
+ if x < 0 || x >= int64(v.Len()) {
+ return nil, fmt.Errorf("index out of range: %d", x)
+ }
+ v = v.Index(int(x))
+ case reflect.Map:
+ if !index.Type().AssignableTo(v.Type().Key()) {
+ return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
+ }
+ if x := v.MapIndex(index); x.IsValid() {
+ v = x
+ } else {
+ v = reflect.Zero(v.Type().Key())
+ }
+ default:
+ return nil, fmt.Errorf("can't index item of type %s", index.Type())
+ }
+ }
+ return v.Interface(), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item interface{}) (int, error) {
+ v, isNil := indirect(reflect.ValueOf(item))
+ if isNil {
+ return 0, fmt.Errorf("len of nil pointer")
+ }
+ switch v.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len(), nil
+ }
+ return 0, fmt.Errorf("len of type %s", v.Type())
+}
+
+// Boolean logic.
+
+func truth(a interface{}) bool {
+ t, _ := isTrue(reflect.ValueOf(a))
+ return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 interface{}, args ...interface{}) interface{} {
+ if !truth(arg0) {
+ return arg0
+ }
+ for i := range args {
+ arg0 = args[i]
+ if !truth(arg0) {
+ break
+ }
+ }
+ return arg0
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 interface{}, args ...interface{}) interface{} {
+ if truth(arg0) {
+ return arg0
+ }
+ for i := range args {
+ arg0 = args[i]
+ if truth(arg0) {
+ break
+ }
+ }
+ return arg0
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg interface{}) (truth bool) {
+ truth, _ = isTrue(reflect.ValueOf(arg))
+ return !truth
+}
+
+// HTML escaping.
+
+var (
+ htmlQuot = []byte("&#34;") // shorter than "&quot;"
+ htmlApos = []byte("&#39;") // shorter than "&apos;"
+ htmlAmp = []byte("&amp;")
+ htmlLt = []byte("&lt;")
+ htmlGt = []byte("&gt;")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+ last := 0
+ for i, c := range b {
+ var html []byte
+ switch c {
+ case '"':
+ html = htmlQuot
+ case '\'':
+ html = htmlApos
+ case '&':
+ html = htmlAmp
+ case '<':
+ html = htmlLt
+ case '>':
+ html = htmlGt
+ default:
+ continue
+ }
+ w.Write(b[last:i])
+ w.Write(html)
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexAny(s, `'"&<>`) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ HTMLEscape(&b, []byte(s))
+ return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+ ok := false
+ var s string
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ s = fmt.Sprint(args...)
+ }
+ return HTMLEscapeString(s)
+}
+
+// JavaScript escaping.
+
+var (
+ jsLowUni = []byte(`\u00`)
+ hex = []byte("0123456789ABCDEF")
+
+ jsBackslash = []byte(`\\`)
+ jsApos = []byte(`\'`)
+ jsQuot = []byte(`\"`)
+ jsLt = []byte(`\x3C`)
+ jsGt = []byte(`\x3E`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+ last := 0
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+
+ if !jsIsSpecial(rune(c)) {
+ // fast path: nothing to do
+ continue
+ }
+ w.Write(b[last:i])
+
+ if c < utf8.RuneSelf {
+ // Quotes, slashes and angle brackets get quoted.
+ // Control characters get written as \u00XX.
+ switch c {
+ case '\\':
+ w.Write(jsBackslash)
+ case '\'':
+ w.Write(jsApos)
+ case '"':
+ w.Write(jsQuot)
+ case '<':
+ w.Write(jsLt)
+ case '>':
+ w.Write(jsGt)
+ default:
+ w.Write(jsLowUni)
+ t, b := c>>4, c&0x0f
+ w.Write(hex[t : t+1])
+ w.Write(hex[b : b+1])
+ }
+ } else {
+ // Unicode rune.
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.IsPrint(r) {
+ w.Write(b[i : i+size])
+ } else {
+ // TODO(dsymonds): Do this without fmt?
+ fmt.Fprintf(w, "\\u%04X", r)
+ }
+ i += size - 1
+ }
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexFunc(s, jsIsSpecial) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ JSEscape(&b, []byte(s))
+ return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+ switch r {
+ case '\\', '\'', '"', '<', '>':
+ return true
+ }
+ return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+ ok := false
+ var s string
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ s = fmt.Sprint(args...)
+ }
+ return JSEscapeString(s)
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+ s, ok := "", false
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ s = fmt.Sprint(args...)
+ }
+ return url.QueryEscape(s)
+}
diff --git a/libgo/go/text/template/helper.go b/libgo/go/text/template/helper.go
new file mode 100644
index 0000000..a743a83
--- /dev/null
+++ b/libgo/go/text/template/helper.go
@@ -0,0 +1,241 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions to make constructing templates and sets easier.
+
+package template
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+)
+
+// Functions and methods to parse a single template.
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable initializations
+// such as
+// var t = template.Must(template.New("name").Parse("text"))
+func Must(t *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// ParseFile creates a new Template and parses the template definition from
+// the named file. The template name is the base name of the file.
+func ParseFile(filename string) (*Template, error) {
+ t := New(filepath.Base(filename))
+ return t.ParseFile(filename)
+}
+
+// parseFileInSet creates a new Template and parses the template
+// definition from the named file. The template name is the base name
+// of the file. It also adds the template to the set. Function bindings are
+// checked against those in the set.
+func parseFileInSet(filename string, set *Set) (*Template, error) {
+ t := New(filepath.Base(filename))
+ return t.parseFileInSet(filename, set)
+}
+
+// ParseFile reads the template definition from a file and parses it to
+// construct an internal representation of the template for execution.
+// The returned template will be nil if an error occurs.
+func (t *Template) ParseFile(filename string) (*Template, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return t.Parse(string(b))
+}
+
+// parseFileInSet is the same as ParseFile except that function bindings
+// are checked against those in the set and the template is added
+// to the set.
+// The returned template will be nil if an error occurs.
+func (t *Template) parseFileInSet(filename string, set *Set) (*Template, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return t.ParseInSet(string(b), set)
+}
+
+// Functions and methods to parse a set.
+
+// SetMust is a helper that wraps a call to a function returning (*Set, error)
+// and panics if the error is non-nil. It is intended for use in variable initializations
+// such as
+// var s = template.SetMust(template.ParseSetFiles("file"))
+func SetMust(s *Set, err error) *Set {
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// ParseFiles parses the named files into a set of named templates.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseFiles(filenames ...string) (*Set, error) {
+ for _, filename := range filenames {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ _, err = s.Parse(string(b))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseSetFiles creates a new Set and parses the set definition from the
+// named files. Each file must be individually parseable.
+func ParseSetFiles(filenames ...string) (*Set, error) {
+ s := new(Set)
+ for _, filename := range filenames {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ _, err = s.Parse(string(b))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseGlob parses the set definition from the files identified by the
+// pattern. The pattern is processed by filepath.Glob and must match at
+// least one file.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseGlob(pattern string) (*Set, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
+ }
+ return s.ParseFiles(filenames...)
+}
+
+// ParseSetGlob creates a new Set and parses the set definition from the
+// files identified by the pattern. The pattern is processed by filepath.Glob
+// and must match at least one file.
+func ParseSetGlob(pattern string) (*Set, error) {
+ set, err := new(Set).ParseGlob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ return set, nil
+}
+
+// Functions and methods to parse stand-alone template files into a set.
+
+// ParseTemplateFiles parses the named template files and adds
+// them to the set. Each template will be named the base name of
+// its file.
+// Unlike with ParseFiles, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateFiles is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseTemplateFiles(filenames ...string) (*Set, error) {
+ for _, filename := range filenames {
+ _, err := parseFileInSet(filename, s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseTemplateGlob parses the template files matched by the
+// patern and adds them to the set. Each template will be named
+// the base name of its file.
+// Unlike with ParseGlob, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateGlob is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself.
+// If an error occurs, parsing stops and the returned set is nil.
+func (s *Set) ParseTemplateGlob(pattern string) (*Set, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ for _, filename := range filenames {
+ _, err := parseFileInSet(filename, s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return s, nil
+}
+
+// ParseTemplateFiles creates a set by parsing the named files,
+// each of which defines a single template. Each template will be
+// named the base name of its file.
+// Unlike with ParseFiles, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateFiles is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself. Parsing stops if an error is
+// encountered.
+func ParseTemplateFiles(filenames ...string) (*Set, error) {
+ set := new(Set)
+ set.init()
+ for _, filename := range filenames {
+ t, err := ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ if err := set.add(t); err != nil {
+ return nil, err
+ }
+ }
+ return set, nil
+}
+
+// ParseTemplateGlob creates a set by parsing the files matched
+// by the pattern, each of which defines a single template. The pattern
+// is processed by filepath.Glob and must match at least one file. Each
+// template will be named the base name of its file.
+// Unlike with ParseGlob, each file should be a stand-alone template
+// definition suitable for Template.Parse (not Set.Parse); that is, the
+// file does not contain {{define}} clauses. ParseTemplateGlob is
+// therefore equivalent to calling the ParseFile function to create
+// individual templates, which are then added to the set.
+// Each file must be parseable by itself. Parsing stops if an error is
+// encountered.
+func ParseTemplateGlob(pattern string) (*Set, error) {
+ set := new(Set)
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("pattern matches no files: %#q", pattern)
+ }
+ for _, filename := range filenames {
+ t, err := ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ if err := set.add(t); err != nil {
+ return nil, err
+ }
+ }
+ return set, nil
+}
diff --git a/libgo/go/text/template/parse.go b/libgo/go/text/template/parse.go
new file mode 100644
index 0000000..fa56214
--- /dev/null
+++ b/libgo/go/text/template/parse.go
@@ -0,0 +1,89 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "reflect"
+ "text/template/parse"
+)
+
+// Template is the representation of a parsed template.
+type Template struct {
+ name string
+ *parse.Tree
+ leftDelim string
+ rightDelim string
+ // We use two maps, one for parsing and one for execution.
+ // This separation makes the API cleaner since it doesn't
+ // expose reflection to the client.
+ parseFuncs FuncMap
+ execFuncs map[string]reflect.Value
+ set *Set // can be nil.
+}
+
+// Name returns the name of the template.
+func (t *Template) Name() string {
+ return t.name
+}
+
+// Parsing.
+
+// New allocates a new template with the given name.
+func New(name string) *Template {
+ return &Template{
+ name: name,
+ parseFuncs: make(FuncMap),
+ execFuncs: make(map[string]reflect.Value),
+ }
+}
+
+// Delims sets the action delimiters, to be used in a subsequent
+// parse, to the specified strings.
+// An empty delimiter stands for the corresponding default: {{ or }}.
+// The return value is the template, so calls can be chained.
+func (t *Template) Delims(left, right string) *Template {
+ t.leftDelim = left
+ t.rightDelim = right
+ return t
+}
+
+// Funcs adds the elements of the argument map to the template's function
+// map. It panics if a value in the map is not a function with appropriate
+// return type.
+// The return value is the template, so calls can be chained.
+func (t *Template) Funcs(funcMap FuncMap) *Template {
+ addValueFuncs(t.execFuncs, funcMap)
+ addFuncs(t.parseFuncs, funcMap)
+ return t
+}
+
+// Parse parses the template definition string to construct an internal
+// representation of the template for execution.
+func (t *Template) Parse(s string) (tmpl *Template, err error) {
+ t.Tree, err = parse.New(t.name).Parse(s, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+// ParseInSet parses the template definition string to construct an internal
+// representation of the template for execution. It also adds the template
+// to the set. It is an error if s is already defined in the set.
+// Function bindings are checked against those in the set.
+func (t *Template) ParseInSet(s string, set *Set) (tmpl *Template, err error) {
+ var setFuncs FuncMap
+ if set != nil {
+ setFuncs = set.parseFuncs
+ }
+ t.Tree, err = parse.New(t.name).Parse(s, t.leftDelim, t.rightDelim, t.parseFuncs, setFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ if set != nil {
+ err = set.add(t)
+ }
+ return t, err
+}
diff --git a/libgo/go/text/template/parse/lex.go b/libgo/go/text/template/parse/lex.go
new file mode 100644
index 0000000..97c19a1
--- /dev/null
+++ b/libgo/go/text/template/parse/lex.go
@@ -0,0 +1,482 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType
+ val string
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case i.typ > itemKeyword:
+ return fmt.Sprintf("<%s>", i.val)
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemColonEquals // colon-equals (':=') introducing a declaration
+ itemEOF
+ itemField // alphanumeric identifier, starting with '.', possibly chained ('.x.y')
+ itemIdentifier // alphanumeric identifier
+ itemLeftDelim // left action delimiter
+ itemNumber // simple number, including imaginary
+ itemPipe // pipe symbol
+ itemRawString // raw quoted string (includes quotes)
+ itemRightDelim // right action delimiter
+ itemString // quoted string (includes quotes)
+ itemText // plain text
+ itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'.
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemDot // the cursor, spelled '.'.
+ itemDefine // define keyword
+ itemElse // else keyword
+ itemEnd // end keyword
+ itemIf // if keyword
+ itemRange // range keyword
+ itemTemplate // template keyword
+ itemWith // with keyword
+)
+
+// Make the types prettyprint.
+var itemName = map[itemType]string{
+ itemError: "error",
+ itemBool: "bool",
+ itemChar: "char",
+ itemCharConstant: "charconst",
+ itemComplex: "complex",
+ itemColonEquals: ":=",
+ itemEOF: "EOF",
+ itemField: "field",
+ itemIdentifier: "identifier",
+ itemLeftDelim: "left delim",
+ itemNumber: "number",
+ itemPipe: "pipe",
+ itemRawString: "raw string",
+ itemRightDelim: "right delim",
+ itemString: "string",
+ itemVariable: "variable",
+ // keywords
+ itemDot: ".",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+ itemEnd: "end",
+ itemRange: "range",
+ itemTemplate: "template",
+ itemWith: "with",
+}
+
+func (i itemType) String() string {
+ s := itemName[i]
+ if s == "" {
+ return fmt.Sprintf("item%d", int(i))
+ }
+ return s
+}
+
+var key = map[string]itemType{
+ ".": itemDot,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+ "if": itemIf,
+ "range": itemRange,
+ "template": itemTemplate,
+ "with": itemWith,
+}
+
+const eof = -1
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ name string // the name of the input; used only for error reports.
+ input string // the string being scanned.
+ leftDelim string // start of action.
+ rightDelim string // end of action.
+ state stateFn // the next lexing function to enter.
+ pos int // current position in the input.
+ start int // start position of this item.
+ width int // width of last rune read from input.
+ items chan item // channel of scanned items.
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() (r rune) {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ l.items <- item{t, l.input[l.start:l.pos]}
+ l.start = l.pos
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.IndexRune(valid, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+// lineNumber reports which line we're on. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.pos], "\n")
+}
+
+// error returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.run.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-l.items:
+ return item
+ default:
+ l.state = l.state(l)
+ }
+ }
+ panic("not reached")
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input, left, right string) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+ if right == "" {
+ right = rightDelim
+ }
+ l := &lexer{
+ name: name,
+ input: input,
+ leftDelim: left,
+ rightDelim: right,
+ state: lexText,
+ items: make(chan item, 2), // Two items of buffering is sufficient for all state functions
+ }
+ return l
+}
+
+// state functions
+
+const (
+ leftDelim = "{{"
+ rightDelim = "}}"
+ leftComment = "/*"
+ rightComment = "*/"
+)
+
+// lexText scans until an opening action delimiter, "{{".
+func lexText(l *lexer) stateFn {
+ for {
+ if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ return lexLeftDelim
+ }
+ if l.next() == eof {
+ break
+ }
+ }
+ // Correctly reached EOF.
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ l.emit(itemEOF)
+ return nil
+}
+
+// lexLeftDelim scans the left delimiter, which is known to be present.
+func lexLeftDelim(l *lexer) stateFn {
+ if strings.HasPrefix(l.input[l.pos:], l.leftDelim+leftComment) {
+ return lexComment
+ }
+ l.pos += len(l.leftDelim)
+ l.emit(itemLeftDelim)
+ return lexInsideAction
+}
+
+// lexComment scans a comment. The left comment marker is known to be present.
+func lexComment(l *lexer) stateFn {
+ i := strings.Index(l.input[l.pos:], rightComment+l.rightDelim)
+ if i < 0 {
+ return l.errorf("unclosed comment")
+ }
+ l.pos += i + len(rightComment) + len(l.rightDelim)
+ l.ignore()
+ return lexText
+}
+
+// lexRightDelim scans the right delimiter, which is known to be present.
+func lexRightDelim(l *lexer) stateFn {
+ l.pos += len(l.rightDelim)
+ l.emit(itemRightDelim)
+ return lexText
+}
+
+// lexInsideAction scans the elements inside action delimiters.
+func lexInsideAction(l *lexer) stateFn {
+ // Either number, quoted string, or identifier.
+ // Spaces separate and are ignored.
+ // Pipe symbols separate and are emitted.
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
+ return lexRightDelim
+ }
+ switch r := l.next(); {
+ case r == eof || r == '\n':
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ l.ignore()
+ case r == ':':
+ if l.next() != '=' {
+ return l.errorf("expected :=")
+ }
+ l.emit(itemColonEquals)
+ case r == '|':
+ l.emit(itemPipe)
+ case r == '"':
+ return lexQuote
+ case r == '`':
+ return lexRawQuote
+ case r == '$':
+ return lexIdentifier
+ case r == '\'':
+ return lexChar
+ case r == '.':
+ // special look-ahead for ".field" so we don't break l.backup().
+ if l.pos < len(l.input) {
+ r := l.input[l.pos]
+ if r < '0' || '9' < r {
+ return lexIdentifier // itemDot comes from the keyword table.
+ }
+ }
+ fallthrough // '.' can start a number.
+ case r == '+' || r == '-' || ('0' <= r && r <= '9'):
+ l.backup()
+ return lexNumber
+ case isAlphaNumeric(r):
+ l.backup()
+ return lexIdentifier
+ case r <= unicode.MaxASCII && unicode.IsPrint(r):
+ l.emit(itemChar)
+ return lexInsideAction
+ default:
+ return l.errorf("unrecognized character in action: %#U", r)
+ }
+ return lexInsideAction
+}
+
+// lexIdentifier scans an alphanumeric or field.
+func lexIdentifier(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case isAlphaNumeric(r):
+ // absorb.
+ case r == '.' && (l.input[l.start] == '.' || l.input[l.start] == '$'):
+ // field chaining; absorb into one token.
+ default:
+ l.backup()
+ word := l.input[l.start:l.pos]
+ switch {
+ case key[word] > itemKeyword:
+ l.emit(key[word])
+ case word[0] == '.':
+ l.emit(itemField)
+ case word[0] == '$':
+ l.emit(itemVariable)
+ case word == "true", word == "false":
+ l.emit(itemBool)
+ default:
+ l.emit(itemIdentifier)
+ }
+ break Loop
+ }
+ }
+ return lexInsideAction
+}
+
+// lexChar scans a character constant. The initial quote is already
+// scanned. Syntax checking is done by the parse.
+func lexChar(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated character constant")
+ case '\'':
+ break Loop
+ }
+ }
+ l.emit(itemCharConstant)
+ return lexInsideAction
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+func lexNumber(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ if sign := l.peek(); sign == '+' || sign == '-' {
+ // Complex: 1+2i. No spaces, must end in 'i'.
+ if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(itemComplex)
+ } else {
+ l.emit(itemNumber)
+ }
+ return lexInsideAction
+}
+
+func (l *lexer) scanNumber() bool {
+ // Optional leading sign.
+ l.accept("+-")
+ // Is it hex?
+ digits := "0123456789"
+ if l.accept("0") && l.accept("xX") {
+ digits = "0123456789abcdefABCDEF"
+ }
+ l.acceptRun(digits)
+ if l.accept(".") {
+ l.acceptRun(digits)
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun("0123456789")
+ }
+ // Is it imaginary?
+ l.accept("i")
+ // Next thing mustn't be alphanumeric.
+ if isAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+// lexQuote scans a quoted string.
+func lexQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated quoted string")
+ case '"':
+ break Loop
+ }
+ }
+ l.emit(itemString)
+ return lexInsideAction
+}
+
+// lexRawQuote scans a raw quoted string.
+func lexRawQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case eof, '\n':
+ return l.errorf("unterminated raw quoted string")
+ case '`':
+ break Loop
+ }
+ }
+ l.emit(itemRawString)
+ return lexInsideAction
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+ switch r {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
diff --git a/libgo/go/text/template/parse/lex_test.go b/libgo/go/text/template/parse/lex_test.go
new file mode 100644
index 0000000..6ee1b47
--- /dev/null
+++ b/libgo/go/text/template/parse/lex_test.go
@@ -0,0 +1,257 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "reflect"
+ "testing"
+)
+
+type lexTest struct {
+ name string
+ input string
+ items []item
+}
+
+var (
+ tEOF = item{itemEOF, ""}
+ tLeft = item{itemLeftDelim, "{{"}
+ tRight = item{itemRightDelim, "}}"}
+ tRange = item{itemRange, "range"}
+ tPipe = item{itemPipe, "|"}
+ tFor = item{itemIdentifier, "for"}
+ tQuote = item{itemString, `"abc \n\t\" "`}
+ raw = "`" + `abc\n\t\" ` + "`"
+ tRawQuote = item{itemRawString, raw}
+)
+
+var lexTests = []lexTest{
+ {"empty", "", []item{tEOF}},
+ {"spaces", " \t\n", []item{{itemText, " \t\n"}, tEOF}},
+ {"text", `now is the time`, []item{{itemText, "now is the time"}, tEOF}},
+ {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+ {itemText, "hello-"},
+ {itemText, "-world"},
+ tEOF,
+ }},
+ {"punctuation", "{{,@%}}", []item{
+ tLeft,
+ {itemChar, ","},
+ {itemChar, "@"},
+ {itemChar, "%"},
+ tRight,
+ tEOF,
+ }},
+ {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
+ {"for", `{{for }}`, []item{tLeft, tFor, tRight, tEOF}},
+ {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
+ {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
+ {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{
+ tLeft,
+ {itemNumber, "1"},
+ {itemNumber, "02"},
+ {itemNumber, "0x14"},
+ {itemNumber, "-7.2i"},
+ {itemNumber, "1e3"},
+ {itemNumber, "+1.2e-4"},
+ {itemNumber, "4.2i"},
+ {itemComplex, "1+2i"},
+ tRight,
+ tEOF,
+ }},
+ {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
+ tLeft,
+ {itemCharConstant, `'a'`},
+ {itemCharConstant, `'\n'`},
+ {itemCharConstant, `'\''`},
+ {itemCharConstant, `'\\'`},
+ {itemCharConstant, `'\u00FF'`},
+ {itemCharConstant, `'\xFF'`},
+ {itemCharConstant, `'本'`},
+ tRight,
+ tEOF,
+ }},
+ {"bools", "{{true false}}", []item{
+ tLeft,
+ {itemBool, "true"},
+ {itemBool, "false"},
+ tRight,
+ tEOF,
+ }},
+ {"dot", "{{.}}", []item{
+ tLeft,
+ {itemDot, "."},
+ tRight,
+ tEOF,
+ }},
+ {"dots", "{{.x . .2 .x.y}}", []item{
+ tLeft,
+ {itemField, ".x"},
+ {itemDot, "."},
+ {itemNumber, ".2"},
+ {itemField, ".x.y"},
+ tRight,
+ tEOF,
+ }},
+ {"keywords", "{{range if else end with}}", []item{
+ tLeft,
+ {itemRange, "range"},
+ {itemIf, "if"},
+ {itemElse, "else"},
+ {itemEnd, "end"},
+ {itemWith, "with"},
+ tRight,
+ tEOF,
+ }},
+ {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
+ tLeft,
+ {itemVariable, "$c"},
+ {itemColonEquals, ":="},
+ {itemIdentifier, "printf"},
+ {itemVariable, "$"},
+ {itemVariable, "$hello"},
+ {itemVariable, "$23"},
+ {itemVariable, "$"},
+ {itemVariable, "$var.Field"},
+ {itemField, ".Method"},
+ tRight,
+ tEOF,
+ }},
+ {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
+ {itemText, "intro "},
+ tLeft,
+ {itemIdentifier, "echo"},
+ {itemIdentifier, "hi"},
+ {itemNumber, "1.2"},
+ tPipe,
+ {itemIdentifier, "noargs"},
+ tPipe,
+ {itemIdentifier, "args"},
+ {itemNumber, "1"},
+ {itemString, `"hi"`},
+ tRight,
+ {itemText, " outro"},
+ tEOF,
+ }},
+ {"declaration", "{{$v := 3}}", []item{
+ tLeft,
+ {itemVariable, "$v"},
+ {itemColonEquals, ":="},
+ {itemNumber, "3"},
+ tRight,
+ tEOF,
+ }},
+ {"2 declarations", "{{$v , $w := 3}}", []item{
+ tLeft,
+ {itemVariable, "$v"},
+ {itemChar, ","},
+ {itemVariable, "$w"},
+ {itemColonEquals, ":="},
+ {itemNumber, "3"},
+ tRight,
+ tEOF,
+ }},
+ // errors
+ {"badchar", "#{{\x01}}", []item{
+ {itemText, "#"},
+ tLeft,
+ {itemError, "unrecognized character in action: U+0001"},
+ }},
+ {"unclosed action", "{{\n}}", []item{
+ tLeft,
+ {itemError, "unclosed action"},
+ }},
+ {"EOF in action", "{{range", []item{
+ tLeft,
+ tRange,
+ {itemError, "unclosed action"},
+ }},
+ {"unclosed quote", "{{\"\n\"}}", []item{
+ tLeft,
+ {itemError, "unterminated quoted string"},
+ }},
+ {"unclosed raw quote", "{{`xx\n`}}", []item{
+ tLeft,
+ {itemError, "unterminated raw quoted string"},
+ }},
+ {"unclosed char constant", "{{'\n}}", []item{
+ tLeft,
+ {itemError, "unterminated character constant"},
+ }},
+ {"bad number", "{{3k}}", []item{
+ tLeft,
+ {itemError, `bad number syntax: "3k"`},
+ }},
+
+ // Fixed bugs
+ // Many elements in an action blew the lookahead until
+ // we made lexInsideAction not loop.
+ {"long pipeline deadlock", "{{|||||}}", []item{
+ tLeft,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tRight,
+ tEOF,
+ }},
+}
+
+// collect gathers the emitted items into a slice.
+func collect(t *lexTest, left, right string) (items []item) {
+ l := lex(t.name, t.input, left, right)
+ for {
+ item := l.nextItem()
+ items = append(items, item)
+ if item.typ == itemEOF || item.typ == itemError {
+ break
+ }
+ }
+ return
+}
+
+func TestLex(t *testing.T) {
+ for _, test := range lexTests {
+ items := collect(&test, "", "")
+ if !reflect.DeepEqual(items, test.items) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
+
+// Some easy cases from above, but with delimiters $$ and @@
+var lexDelimTests = []lexTest{
+ {"punctuation", "$$,@%{{}}@@", []item{
+ tLeftDelim,
+ {itemChar, ","},
+ {itemChar, "@"},
+ {itemChar, "%"},
+ {itemChar, "{"},
+ {itemChar, "{"},
+ {itemChar, "}"},
+ {itemChar, "}"},
+ tRightDelim,
+ tEOF,
+ }},
+ {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
+ {"for", `$$for @@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
+ {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
+ {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
+}
+
+var (
+ tLeftDelim = item{itemLeftDelim, "$$"}
+ tRightDelim = item{itemRightDelim, "@@"}
+)
+
+func TestDelims(t *testing.T) {
+ for _, test := range lexDelimTests {
+ items := collect(&test, "$$", "@@")
+ if !reflect.DeepEqual(items, test.items) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
diff --git a/libgo/go/text/template/parse/node.go b/libgo/go/text/template/parse/node.go
new file mode 100644
index 0000000..a4e5514
--- /dev/null
+++ b/libgo/go/text/template/parse/node.go
@@ -0,0 +1,463 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parse nodes.
+
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// A node is an element in the parse tree. The interface is trivial.
+type Node interface {
+ Type() NodeType
+ String() string
+}
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+ return t
+}
+
+const (
+ NodeText NodeType = iota // Plain text.
+ NodeAction // A simple action such as field evaluation.
+ NodeBool // A boolean constant.
+ NodeCommand // An element of a pipeline.
+ NodeDot // The cursor, dot.
+ nodeElse // An else action. Not added to tree.
+ nodeEnd // An end action. Not added to tree.
+ NodeField // A field or method name.
+ NodeIdentifier // An identifier; always a function name.
+ NodeIf // An if action.
+ NodeList // A list of Nodes.
+ NodeNumber // A numerical constant.
+ NodePipe // A pipeline of commands.
+ NodeRange // A range action.
+ NodeString // A string constant.
+ NodeTemplate // A template invocation action.
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+)
+
+// Nodes.
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+ NodeType
+ Nodes []Node // The element nodes in lexical order.
+}
+
+func newList() *ListNode {
+ return &ListNode{NodeType: NodeList}
+}
+
+func (l *ListNode) append(n Node) {
+ l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) String() string {
+ b := new(bytes.Buffer)
+ fmt.Fprint(b, "[")
+ for _, n := range l.Nodes {
+ fmt.Fprint(b, n)
+ }
+ fmt.Fprint(b, "]")
+ return b.String()
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+ NodeType
+ Text []byte // The text; may span newlines.
+}
+
+func newText(text string) *TextNode {
+ return &TextNode{NodeType: NodeText, Text: []byte(text)}
+}
+
+func (t *TextNode) String() string {
+ return fmt.Sprintf("(text: %q)", t.Text)
+}
+
+// PipeNode holds a pipeline with optional declaration
+type PipeNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Decl []*VariableNode // Variable declarations in lexical order.
+ Cmds []*CommandNode // The commands in lexical order.
+}
+
+func newPipeline(line int, decl []*VariableNode) *PipeNode {
+ return &PipeNode{NodeType: NodePipe, Line: line, Decl: decl}
+}
+
+func (p *PipeNode) append(command *CommandNode) {
+ p.Cmds = append(p.Cmds, command)
+}
+
+func (p *PipeNode) String() string {
+ if p.Decl != nil {
+ return fmt.Sprintf("%v := %v", p.Decl, p.Cmds)
+ }
+ return fmt.Sprintf("%v", p.Cmds)
+}
+
+// ActionNode holds an action (something bounded by delimiters).
+// Control actions have their own nodes; ActionNode represents simple
+// ones such as field evaluations.
+type ActionNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Pipe *PipeNode // The pipeline in the action.
+}
+
+func newAction(line int, pipe *PipeNode) *ActionNode {
+ return &ActionNode{NodeType: NodeAction, Line: line, Pipe: pipe}
+}
+
+func (a *ActionNode) String() string {
+ return fmt.Sprintf("(action: %v)", a.Pipe)
+}
+
+// CommandNode holds a command (a pipeline inside an evaluating action).
+type CommandNode struct {
+ NodeType
+ Args []Node // Arguments in lexical order: Identifier, field, or constant.
+}
+
+func newCommand() *CommandNode {
+ return &CommandNode{NodeType: NodeCommand}
+}
+
+func (c *CommandNode) append(arg Node) {
+ c.Args = append(c.Args, arg)
+}
+
+func (c *CommandNode) String() string {
+ return fmt.Sprintf("(command: %v)", c.Args)
+}
+
+// IdentifierNode holds an identifier.
+type IdentifierNode struct {
+ NodeType
+ Ident string // The identifier's name.
+}
+
+// NewIdentifier returns a new IdentifierNode with the given identifier name.
+func NewIdentifier(ident string) *IdentifierNode {
+ return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
+}
+
+func (i *IdentifierNode) String() string {
+ return fmt.Sprintf("I=%s", i.Ident)
+}
+
+// VariableNode holds a list of variable names. The dollar sign is
+// part of the name.
+type VariableNode struct {
+ NodeType
+ Ident []string // Variable names in lexical order.
+}
+
+func newVariable(ident string) *VariableNode {
+ return &VariableNode{NodeType: NodeVariable, Ident: strings.Split(ident, ".")}
+}
+
+func (v *VariableNode) String() string {
+ return fmt.Sprintf("V=%s", v.Ident)
+}
+
+// DotNode holds the special identifier '.'. It is represented by a nil pointer.
+type DotNode bool
+
+func newDot() *DotNode {
+ return nil
+}
+
+func (d *DotNode) Type() NodeType {
+ return NodeDot
+}
+
+func (d *DotNode) String() string {
+ return "{{<.>}}"
+}
+
+// FieldNode holds a field (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The period is dropped from each ident.
+type FieldNode struct {
+ NodeType
+ Ident []string // The identifiers in lexical order.
+}
+
+func newField(ident string) *FieldNode {
+ return &FieldNode{NodeType: NodeField, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
+}
+
+func (f *FieldNode) String() string {
+ return fmt.Sprintf("F=%s", f.Ident)
+}
+
+// BoolNode holds a boolean constant.
+type BoolNode struct {
+ NodeType
+ True bool // The value of the boolean constant.
+}
+
+func newBool(true bool) *BoolNode {
+ return &BoolNode{NodeType: NodeBool, True: true}
+}
+
+func (b *BoolNode) String() string {
+ return fmt.Sprintf("B=%t", b.True)
+}
+
+// NumberNode holds a number: signed or unsigned integer, float, or complex.
+// The value is parsed and stored under all the types that can represent the value.
+// This simulates in a small amount of code the behavior of Go's ideal constants.
+type NumberNode struct {
+ NodeType
+ IsInt bool // Number has an integral value.
+ IsUint bool // Number has an unsigned integral value.
+ IsFloat bool // Number has a floating-point value.
+ IsComplex bool // Number is complex.
+ Int64 int64 // The signed integer value.
+ Uint64 uint64 // The unsigned integer value.
+ Float64 float64 // The floating-point value.
+ Complex128 complex128 // The complex value.
+ Text string // The original textual representation from the input.
+}
+
+func newNumber(text string, typ itemType) (*NumberNode, error) {
+ n := &NumberNode{NodeType: NodeNumber, Text: text}
+ switch typ {
+ case itemCharConstant:
+ rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
+ if err != nil {
+ return nil, err
+ }
+ if tail != "'" {
+ return nil, fmt.Errorf("malformed character constant: %s", text)
+ }
+ n.Int64 = int64(rune)
+ n.IsInt = true
+ n.Uint64 = uint64(rune)
+ n.IsUint = true
+ n.Float64 = float64(rune) // odd but those are the rules.
+ n.IsFloat = true
+ return n, nil
+ case itemComplex:
+ // fmt.Sscan can parse the pair, so let it do the work.
+ if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
+ return nil, err
+ }
+ n.IsComplex = true
+ n.simplifyComplex()
+ return n, nil
+ }
+ // Imaginary constants can only be complex unless they are zero.
+ if len(text) > 0 && text[len(text)-1] == 'i' {
+ f, err := strconv.Atof64(text[:len(text)-1])
+ if err == nil {
+ n.IsComplex = true
+ n.Complex128 = complex(0, f)
+ n.simplifyComplex()
+ return n, nil
+ }
+ }
+ // Do integer test first so we get 0x123 etc.
+ u, err := strconv.Btoui64(text, 0) // will fail for -0; fixed below.
+ if err == nil {
+ n.IsUint = true
+ n.Uint64 = u
+ }
+ i, err := strconv.Btoi64(text, 0)
+ if err == nil {
+ n.IsInt = true
+ n.Int64 = i
+ if i == 0 {
+ n.IsUint = true // in case of -0.
+ n.Uint64 = u
+ }
+ }
+ // If an integer extraction succeeded, promote the float.
+ if n.IsInt {
+ n.IsFloat = true
+ n.Float64 = float64(n.Int64)
+ } else if n.IsUint {
+ n.IsFloat = true
+ n.Float64 = float64(n.Uint64)
+ } else {
+ f, err := strconv.Atof64(text)
+ if err == nil {
+ n.IsFloat = true
+ n.Float64 = f
+ // If a floating-point extraction succeeded, extract the int if needed.
+ if !n.IsInt && float64(int64(f)) == f {
+ n.IsInt = true
+ n.Int64 = int64(f)
+ }
+ if !n.IsUint && float64(uint64(f)) == f {
+ n.IsUint = true
+ n.Uint64 = uint64(f)
+ }
+ }
+ }
+ if !n.IsInt && !n.IsUint && !n.IsFloat {
+ return nil, fmt.Errorf("illegal number syntax: %q", text)
+ }
+ return n, nil
+}
+
+// simplifyComplex pulls out any other types that are represented by the complex number.
+// These all require that the imaginary part be zero.
+func (n *NumberNode) simplifyComplex() {
+ n.IsFloat = imag(n.Complex128) == 0
+ if n.IsFloat {
+ n.Float64 = real(n.Complex128)
+ n.IsInt = float64(int64(n.Float64)) == n.Float64
+ if n.IsInt {
+ n.Int64 = int64(n.Float64)
+ }
+ n.IsUint = float64(uint64(n.Float64)) == n.Float64
+ if n.IsUint {
+ n.Uint64 = uint64(n.Float64)
+ }
+ }
+}
+
+func (n *NumberNode) String() string {
+ return fmt.Sprintf("N=%s", n.Text)
+}
+
+// StringNode holds a string constant. The value has been "unquoted".
+type StringNode struct {
+ NodeType
+ Quoted string // The original text of the string, with quotes.
+ Text string // The string, after quote processing.
+}
+
+func newString(orig, text string) *StringNode {
+ return &StringNode{NodeType: NodeString, Quoted: orig, Text: text}
+}
+
+func (s *StringNode) String() string {
+ return fmt.Sprintf("S=%#q", s.Text)
+}
+
+// endNode represents an {{end}} action. It is represented by a nil pointer.
+// It does not appear in the final parse tree.
+type endNode bool
+
+func newEnd() *endNode {
+ return nil
+}
+
+func (e *endNode) Type() NodeType {
+ return nodeEnd
+}
+
+func (e *endNode) String() string {
+ return "{{end}}"
+}
+
+// elseNode represents an {{else}} action. Does not appear in the final tree.
+type elseNode struct {
+ NodeType
+ Line int // The line number in the input.
+}
+
+func newElse(line int) *elseNode {
+ return &elseNode{NodeType: nodeElse, Line: line}
+}
+
+func (e *elseNode) Type() NodeType {
+ return nodeElse
+}
+
+func (e *elseNode) String() string {
+ return "{{else}}"
+}
+
+// BranchNode is the common representation of if, range, and with.
+type BranchNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Pipe *PipeNode // The pipeline to be evaluated.
+ List *ListNode // What to execute if the value is non-empty.
+ ElseList *ListNode // What to execute if the value is empty (nil if absent).
+}
+
+func (b *BranchNode) String() string {
+ name := ""
+ switch b.NodeType {
+ case NodeIf:
+ name = "if"
+ case NodeRange:
+ name = "range"
+ case NodeWith:
+ name = "with"
+ default:
+ panic("unknown branch type")
+ }
+ if b.ElseList != nil {
+ return fmt.Sprintf("({{%s %s}} %s {{else}} %s)", name, b.Pipe, b.List, b.ElseList)
+ }
+ return fmt.Sprintf("({{%s %s}} %s)", name, b.Pipe, b.List)
+}
+
+// IfNode represents an {{if}} action and its commands.
+type IfNode struct {
+ BranchNode
+}
+
+func newIf(line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
+ return &IfNode{BranchNode{NodeType: NodeIf, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+// RangeNode represents a {{range}} action and its commands.
+type RangeNode struct {
+ BranchNode
+}
+
+func newRange(line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
+ return &RangeNode{BranchNode{NodeType: NodeRange, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+// WithNode represents a {{with}} action and its commands.
+type WithNode struct {
+ BranchNode
+}
+
+func newWith(line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
+ return &WithNode{BranchNode{NodeType: NodeWith, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+// TemplateNode represents a {{template}} action.
+type TemplateNode struct {
+ NodeType
+ Line int // The line number in the input.
+ Name string // The name of the template (unquoted).
+ Pipe *PipeNode // The command to evaluate as dot for the template.
+}
+
+func newTemplate(line int, name string, pipe *PipeNode) *TemplateNode {
+ return &TemplateNode{NodeType: NodeTemplate, Line: line, Name: name, Pipe: pipe}
+}
+
+func (t *TemplateNode) String() string {
+ if t.Pipe == nil {
+ return fmt.Sprintf("{{template %q}}", t.Name)
+ }
+ return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
+}
diff --git a/libgo/go/text/template/parse/parse.go b/libgo/go/text/template/parse/parse.go
new file mode 100644
index 0000000..1b6ab3a
--- /dev/null
+++ b/libgo/go/text/template/parse/parse.go
@@ -0,0 +1,436 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse builds parse trees for templates. The grammar is defined
+// in the documents for the template package.
+package parse
+
+import (
+ "fmt"
+ "runtime"
+ "strconv"
+ "unicode"
+)
+
+// Tree is the representation of a parsed template.
+type Tree struct {
+ Name string // Name is the name of the template.
+ Root *ListNode // Root is the top-level root of the parse tree.
+ // Parsing only; cleared after parse.
+ funcs []map[string]interface{}
+ lex *lexer
+ token [2]item // two-token lookahead for parser.
+ peekCount int
+ vars []string // variables defined at the moment.
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+ if t.peekCount > 0 {
+ t.peekCount--
+ } else {
+ t.token[0] = t.lex.nextItem()
+ }
+ return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+ t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens
+func (t *Tree) backup2(t1 item) {
+ t.token[1] = t1
+ t.peekCount = 2
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+ if t.peekCount > 0 {
+ return t.token[t.peekCount-1]
+ }
+ t.peekCount = 1
+ t.token[0] = t.lex.nextItem()
+ return t.token[0]
+}
+
+// Parsing.
+
+// New allocates a new template with the given name.
+func New(name string, funcs ...map[string]interface{}) *Tree {
+ return &Tree{
+ Name: name,
+ funcs: funcs,
+ }
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...interface{}) {
+ t.Root = nil
+ format = fmt.Sprintf("template: %s:%d: %s", t.Name, t.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (t *Tree) error(err error) {
+ t.errorf("%s", err)
+}
+
+// expect consumes the next token and guarantees it has the required type.
+func (t *Tree) expect(expected itemType, context string) item {
+ token := t.next()
+ if token.typ != expected {
+ t.errorf("expected %s in %s; got %s", expected, context, token)
+ }
+ return token
+}
+
+// unexpected complains about the token and terminates processing.
+func (t *Tree) unexpected(token item, context string) {
+ t.errorf("unexpected %s in %s", token, context)
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ if t != nil {
+ t.stopParse()
+ }
+ *errp = e.(error)
+ }
+ return
+}
+
+// startParse starts the template parsing from the lexer.
+func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
+ t.Root = nil
+ t.lex = lex
+ t.vars = []string{"$"}
+ t.funcs = funcs
+}
+
+// stopParse terminates parsing.
+func (t *Tree) stopParse() {
+ t.lex = nil
+ t.vars = nil
+ t.funcs = nil
+}
+
+// atEOF returns true if, possibly after spaces, we're at EOF.
+func (t *Tree) atEOF() bool {
+ for {
+ token := t.peek()
+ switch token.typ {
+ case itemEOF:
+ return true
+ case itemText:
+ for _, r := range token.val {
+ if !unicode.IsSpace(r) {
+ return false
+ }
+ }
+ t.next() // skip spaces.
+ continue
+ }
+ break
+ }
+ return false
+}
+
+// Parse parses the template definition string to construct an internal
+// representation of the template for execution. If either action delimiter
+// string is empty, the default ("{{" or "}}") is used.
+func (t *Tree) Parse(s, leftDelim, rightDelim string, funcs ...map[string]interface{}) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.startParse(funcs, lex(t.Name, s, leftDelim, rightDelim))
+ t.parse(true)
+ t.stopParse()
+ return t, nil
+}
+
+// parse is the helper for Parse.
+// It triggers an error if we expect EOF but don't reach it.
+func (t *Tree) parse(toEOF bool) (next Node) {
+ t.Root, next = t.itemList(true)
+ if toEOF && next != nil {
+ t.errorf("unexpected %s", next)
+ }
+ return next
+}
+
+// itemList:
+// textOrAction*
+// Terminates at EOF and at {{end}} or {{else}}, which is returned separately.
+// The toEOF flag tells whether we expect to reach EOF.
+func (t *Tree) itemList(toEOF bool) (list *ListNode, next Node) {
+ list = newList()
+ for t.peek().typ != itemEOF {
+ n := t.textOrAction()
+ switch n.Type() {
+ case nodeEnd, nodeElse:
+ return list, n
+ }
+ list.append(n)
+ }
+ if !toEOF {
+ t.unexpected(t.next(), "input")
+ }
+ return list, nil
+}
+
+// textOrAction:
+// text | action
+func (t *Tree) textOrAction() Node {
+ switch token := t.next(); token.typ {
+ case itemText:
+ return newText(token.val)
+ case itemLeftDelim:
+ return t.action()
+ default:
+ t.unexpected(token, "input")
+ }
+ return nil
+}
+
+// Action:
+// control
+// command ("|" command)*
+// Left delim is past. Now get actions.
+// First word could be a keyword such as range.
+func (t *Tree) action() (n Node) {
+ switch token := t.next(); token.typ {
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+ return t.endControl()
+ case itemIf:
+ return t.ifControl()
+ case itemRange:
+ return t.rangeControl()
+ case itemTemplate:
+ return t.templateControl()
+ case itemWith:
+ return t.withControl()
+ }
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ return newAction(t.lex.lineNumber(), t.pipeline("command"))
+}
+
+// Pipeline:
+// field or command
+// pipeline "|" pipeline
+func (t *Tree) pipeline(context string) (pipe *PipeNode) {
+ var decl []*VariableNode
+ // Are there declarations?
+ for {
+ if v := t.peek(); v.typ == itemVariable {
+ t.next()
+ if next := t.peek(); next.typ == itemColonEquals || next.typ == itemChar {
+ t.next()
+ variable := newVariable(v.val)
+ if len(variable.Ident) != 1 {
+ t.errorf("illegal variable in declaration: %s", v.val)
+ }
+ decl = append(decl, variable)
+ t.vars = append(t.vars, v.val)
+ if next.typ == itemChar && next.val == "," {
+ if context == "range" && len(decl) < 2 {
+ continue
+ }
+ t.errorf("too many declarations in %s", context)
+ }
+ } else {
+ t.backup2(v)
+ }
+ }
+ break
+ }
+ pipe = newPipeline(t.lex.lineNumber(), decl)
+ for {
+ switch token := t.next(); token.typ {
+ case itemRightDelim:
+ if len(pipe.Cmds) == 0 {
+ t.errorf("missing value for %s", context)
+ }
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemVariable, itemNumber, itemRawString, itemString:
+ t.backup()
+ pipe.append(t.command())
+ default:
+ t.unexpected(token, context)
+ }
+ }
+ return
+}
+
+func (t *Tree) parseControl(context string) (lineNum int, pipe *PipeNode, list, elseList *ListNode) {
+ lineNum = t.lex.lineNumber()
+ defer t.popVars(len(t.vars))
+ pipe = t.pipeline(context)
+ var next Node
+ list, next = t.itemList(false)
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+ elseList, next = t.itemList(false)
+ if next.Type() != nodeEnd {
+ t.errorf("expected end; found %s", next)
+ }
+ elseList = elseList
+ }
+ return lineNum, pipe, list, elseList
+}
+
+// If:
+// {{if pipeline}} itemList {{end}}
+// {{if pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) ifControl() Node {
+ return newIf(t.parseControl("if"))
+}
+
+// Range:
+// {{range pipeline}} itemList {{end}}
+// {{range pipeline}} itemList {{else}} itemList {{end}}
+// Range keyword is past.
+func (t *Tree) rangeControl() Node {
+ return newRange(t.parseControl("range"))
+}
+
+// With:
+// {{with pipeline}} itemList {{end}}
+// {{with pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) withControl() Node {
+ return newWith(t.parseControl("with"))
+}
+
+// End:
+// {{end}}
+// End keyword is past.
+func (t *Tree) endControl() Node {
+ t.expect(itemRightDelim, "end")
+ return newEnd()
+}
+
+// Else:
+// {{else}}
+// Else keyword is past.
+func (t *Tree) elseControl() Node {
+ t.expect(itemRightDelim, "else")
+ return newElse(t.lex.lineNumber())
+}
+
+// Template:
+// {{template stringValue pipeline}}
+// Template keyword is past. The name must be something that can evaluate
+// to a string.
+func (t *Tree) templateControl() Node {
+ var name string
+ switch token := t.next(); token.typ {
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ name = s
+ default:
+ t.unexpected(token, "template invocation")
+ }
+ var pipe *PipeNode
+ if t.next().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ pipe = t.pipeline("template")
+ }
+ return newTemplate(t.lex.lineNumber(), name, pipe)
+}
+
+// command:
+// space-separated arguments up to a pipeline character or right delimiter.
+// we consume the pipe character but leave the right delim to terminate the action.
+func (t *Tree) command() *CommandNode {
+ cmd := newCommand()
+Loop:
+ for {
+ switch token := t.next(); token.typ {
+ case itemRightDelim:
+ t.backup()
+ break Loop
+ case itemPipe:
+ break Loop
+ case itemError:
+ t.errorf("%s", token.val)
+ case itemIdentifier:
+ if !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+ }
+ cmd.append(NewIdentifier(token.val))
+ case itemDot:
+ cmd.append(newDot())
+ case itemVariable:
+ cmd.append(t.useVar(token.val))
+ case itemField:
+ cmd.append(newField(token.val))
+ case itemBool:
+ cmd.append(newBool(token.val == "true"))
+ case itemCharConstant, itemComplex, itemNumber:
+ number, err := newNumber(token.val, token.typ)
+ if err != nil {
+ t.error(err)
+ }
+ cmd.append(number)
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ cmd.append(newString(token.val, s))
+ default:
+ t.unexpected(token, "command")
+ }
+ }
+ if len(cmd.Args) == 0 {
+ t.errorf("empty command")
+ }
+ return cmd
+}
+
+// hasFunction reports if a function name exists in the Tree's maps.
+func (t *Tree) hasFunction(name string) bool {
+ for _, funcMap := range t.funcs {
+ if funcMap == nil {
+ continue
+ }
+ if funcMap[name] != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// popVars trims the variable list to the specified length
+func (t *Tree) popVars(n int) {
+ t.vars = t.vars[:n]
+}
+
+// useVar returns a node for a variable reference. It errors if the
+// variable is not defined.
+func (t *Tree) useVar(name string) Node {
+ v := newVariable(name)
+ for _, varName := range t.vars {
+ if varName == v.Ident[0] {
+ return v
+ }
+ }
+ t.errorf("undefined variable %q", v.Ident[0])
+ return nil
+}
diff --git a/libgo/go/text/template/parse/parse_test.go b/libgo/go/text/template/parse/parse_test.go
new file mode 100644
index 0000000..f05f6e3
--- /dev/null
+++ b/libgo/go/text/template/parse/parse_test.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "flag"
+ "fmt"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+type numberTest struct {
+ text string
+ isInt bool
+ isUint bool
+ isFloat bool
+ isComplex bool
+ int64
+ uint64
+ float64
+ complex128
+}
+
+var numberTests = []numberTest{
+ // basics
+ {"0", true, true, true, false, 0, 0, 0, 0},
+ {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
+ {"73", true, true, true, false, 73, 73, 73, 0},
+ {"073", true, true, true, false, 073, 073, 073, 0},
+ {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
+ {"-73", true, false, true, false, -73, 0, -73, 0},
+ {"+73", true, false, true, false, 73, 0, 73, 0},
+ {"100", true, true, true, false, 100, 100, 100, 0},
+ {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
+ {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
+ {"-1.2", false, false, true, false, 0, 0, -1.2, 0},
+ {"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
+ {"-1e19", false, false, true, false, 0, 0, -1e19, 0},
+ {"4i", false, false, false, true, 0, 0, 0, 4i},
+ {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
+ {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
+ // complex with 0 imaginary are float (and maybe integer)
+ {"0i", true, true, true, true, 0, 0, 0, 0},
+ {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
+ {"-12+0i", true, false, true, true, -12, 0, -12, -12},
+ {"13+0i", true, true, true, true, 13, 13, 13, 13},
+ // funny bases
+ {"0123", true, true, true, false, 0123, 0123, 0123, 0},
+ {"-0x0", true, true, true, false, 0, 0, 0, 0},
+ {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
+ // character constants
+ {`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
+ {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
+ {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
+ {`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
+ {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
+ {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ // some broken syntax
+ {text: "+-2"},
+ {text: "0x123."},
+ {text: "1e."},
+ {text: "0xi."},
+ {text: "1+2."},
+ {text: "'x"},
+ {text: "'xx'"},
+}
+
+func TestNumberParse(t *testing.T) {
+ for _, test := range numberTests {
+ // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
+ // because imaginary comes out as a number.
+ var c complex128
+ typ := itemNumber
+ if test.text[0] == '\'' {
+ typ = itemCharConstant
+ } else {
+ _, err := fmt.Sscan(test.text, &c)
+ if err == nil {
+ typ = itemComplex
+ }
+ }
+ n, err := newNumber(test.text, typ)
+ ok := test.isInt || test.isUint || test.isFloat || test.isComplex
+ if ok && err != nil {
+ t.Errorf("unexpected error for %q: %s", test.text, err)
+ continue
+ }
+ if !ok && err == nil {
+ t.Errorf("expected error for %q", test.text)
+ continue
+ }
+ if !ok {
+ if *debug {
+ fmt.Printf("%s\n\t%s\n", test.text, err)
+ }
+ continue
+ }
+ if n.IsComplex != test.isComplex {
+ t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
+ }
+ if test.isInt {
+ if !n.IsInt {
+ t.Errorf("expected integer for %q", test.text)
+ }
+ if n.Int64 != test.int64 {
+ t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
+ }
+ } else if n.IsInt {
+ t.Errorf("did not expect integer for %q", test.text)
+ }
+ if test.isUint {
+ if !n.IsUint {
+ t.Errorf("expected unsigned integer for %q", test.text)
+ }
+ if n.Uint64 != test.uint64 {
+ t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
+ }
+ } else if n.IsUint {
+ t.Errorf("did not expect unsigned integer for %q", test.text)
+ }
+ if test.isFloat {
+ if !n.IsFloat {
+ t.Errorf("expected float for %q", test.text)
+ }
+ if n.Float64 != test.float64 {
+ t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
+ }
+ } else if n.IsFloat {
+ t.Errorf("did not expect float for %q", test.text)
+ }
+ if test.isComplex {
+ if !n.IsComplex {
+ t.Errorf("expected complex for %q", test.text)
+ }
+ if n.Complex128 != test.complex128 {
+ t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
+ }
+ } else if n.IsComplex {
+ t.Errorf("did not expect complex for %q", test.text)
+ }
+ }
+}
+
+type parseTest struct {
+ name string
+ input string
+ ok bool
+ result string
+}
+
+const (
+ noError = true
+ hasError = false
+)
+
+var parseTests = []parseTest{
+ {"empty", "", noError,
+ `[]`},
+ {"comment", "{{/*\n\n\n*/}}", noError,
+ `[]`},
+ {"spaces", " \t\n", noError,
+ `[(text: " \t\n")]`},
+ {"text", "some text", noError,
+ `[(text: "some text")]`},
+ {"emptyAction", "{{}}", hasError,
+ `[(action: [])]`},
+ {"field", "{{.X}}", noError,
+ `[(action: [(command: [F=[X]])])]`},
+ {"simple command", "{{printf}}", noError,
+ `[(action: [(command: [I=printf])])]`},
+ {"$ invocation", "{{$}}", noError,
+ "[(action: [(command: [V=[$]])])]"},
+ {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
+ "[({{with [V=[$x]] := [(command: [N=3])]}} [(action: [(command: [V=[$x] N=23])])])]"},
+ {"variable with fields", "{{$.I}}", noError,
+ "[(action: [(command: [V=[$ I]])])]"},
+ {"multi-word command", "{{printf `%d` 23}}", noError,
+ "[(action: [(command: [I=printf S=`%d` N=23])])]"},
+ {"pipeline", "{{.X|.Y}}", noError,
+ `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ {"pipeline with decl", "{{$x := .X|.Y}}", noError,
+ `[(action: [V=[$x]] := [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ {"declaration", "{{.X|.Y}}", noError,
+ `[(action: [(command: [F=[X]]) (command: [F=[Y]])])]`},
+ {"simple if", "{{if .X}}hello{{end}}", noError,
+ `[({{if [(command: [F=[X]])]}} [(text: "hello")])]`},
+ {"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
+ `[({{if [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ {"simple range", "{{range .X}}hello{{end}}", noError,
+ `[({{range [(command: [F=[X]])]}} [(text: "hello")])]`},
+ {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
+ `[({{range [(command: [F=[X Y Z]])]}} [(text: "hello")])]`},
+ {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
+ `[({{range [(command: [F=[X]])]}} [(text: "hello")({{range [(command: [F=[Y]])]}} [(text: "goodbye")])])]`},
+ {"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
+ `[({{range [(command: [F=[X]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
+ `[({{range [(command: [F=[X]]) (command: [F=[M]])]}} [(text: "true")] {{else}} [(text: "false")])]`},
+ {"range []int", "{{range .SI}}{{.}}{{end}}", noError,
+ `[({{range [(command: [F=[SI]])]}} [(action: [(command: [{{<.>}}])])])]`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a'}}{{end}}", noError,
+ `[({{range [(command: [F=[SI] N=1 N=-3.2i B=true B=false N='a'])]}} [])]`},
+ {"template", "{{template `x`}}", noError,
+ `[{{template "x"}}]`},
+ {"template with arg", "{{template `x` .Y}}", noError,
+ `[{{template "x" [(command: [F=[Y]])]}}]`},
+ {"with", "{{with .X}}hello{{end}}", noError,
+ `[({{with [(command: [F=[X]])]}} [(text: "hello")])]`},
+ {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
+ `[({{with [(command: [F=[X]])]}} [(text: "hello")] {{else}} [(text: "goodbye")])]`},
+ // Errors.
+ {"unclosed action", "hello{{range", hasError, ""},
+ {"unmatched end", "{{end}}", hasError, ""},
+ {"missing end", "hello{{range .x}}", hasError, ""},
+ {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
+ {"undefined function", "hello{{undefined}}", hasError, ""},
+ {"undefined variable", "{{$x}}", hasError, ""},
+ {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
+ {"variable undefined in template", "{{template $v}}", hasError, ""},
+ {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
+ {"template with field ref", "{{template .X}}", hasError, ""},
+ {"template with var", "{{template $v}}", hasError, ""},
+ {"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
+ {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
+ {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
+}
+
+var builtins = map[string]interface{}{
+ "printf": fmt.Sprintf,
+}
+
+func TestParse(t *testing.T) {
+ for _, test := range parseTests {
+ tmpl, err := New(test.name).Parse(test.input, "", "", builtins)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ result := tmpl.Root.String()
+ if result != test.result {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
+ }
+ }
+}
diff --git a/libgo/go/text/template/parse/set.go b/libgo/go/text/template/parse/set.go
new file mode 100644
index 0000000..d363eef
--- /dev/null
+++ b/libgo/go/text/template/parse/set.go
@@ -0,0 +1,49 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Set returns a slice of Trees created by parsing the template set
+// definition in the argument string. If an error is encountered,
+// parsing stops and an empty slice is returned with the error.
+func Set(text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (tree map[string]*Tree, err error) {
+ tree = make(map[string]*Tree)
+ defer (*Tree)(nil).recover(&err)
+ lex := lex("set", text, leftDelim, rightDelim)
+ const context = "define clause"
+ for {
+ t := New("set") // name will be updated once we know it.
+ t.startParse(funcs, lex)
+ // Expect EOF or "{{ define name }}".
+ if t.atEOF() {
+ break
+ }
+ t.expect(itemLeftDelim, context)
+ t.expect(itemDefine, context)
+ name := t.expect(itemString, context)
+ t.Name, err = strconv.Unquote(name.val)
+ if err != nil {
+ t.error(err)
+ }
+ t.expect(itemRightDelim, context)
+ end := t.parse(false)
+ if end == nil {
+ t.errorf("unexpected EOF in %s", context)
+ }
+ if end.Type() != nodeEnd {
+ t.errorf("unexpected %s in %s", end, context)
+ }
+ t.stopParse()
+ if _, present := tree[t.Name]; present {
+ return nil, fmt.Errorf("template: %q multiply defined", name)
+ }
+ tree[t.Name] = t
+ }
+ return
+}
diff --git a/libgo/go/text/template/set.go b/libgo/go/text/template/set.go
new file mode 100644
index 0000000..747cc78
--- /dev/null
+++ b/libgo/go/text/template/set.go
@@ -0,0 +1,120 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "text/template/parse"
+)
+
+// Set holds a set of related templates that can refer to one another by name.
+// The zero value represents an empty set.
+// A template may be a member of multiple sets.
+type Set struct {
+ tmpl map[string]*Template
+ leftDelim string
+ rightDelim string
+ parseFuncs FuncMap
+ execFuncs map[string]reflect.Value
+}
+
+func (s *Set) init() {
+ if s.tmpl == nil {
+ s.tmpl = make(map[string]*Template)
+ s.parseFuncs = make(FuncMap)
+ s.execFuncs = make(map[string]reflect.Value)
+ }
+}
+
+// Delims sets the action delimiters, to be used in a subsequent
+// parse, to the specified strings.
+// An empty delimiter stands for the corresponding default: {{ or }}.
+// The return value is the set, so calls can be chained.
+func (s *Set) Delims(left, right string) *Set {
+ s.leftDelim = left
+ s.rightDelim = right
+ return s
+}
+
+// Funcs adds the elements of the argument map to the set's function map. It
+// panics if a value in the map is not a function with appropriate return
+// type.
+// The return value is the set, so calls can be chained.
+func (s *Set) Funcs(funcMap FuncMap) *Set {
+ s.init()
+ addValueFuncs(s.execFuncs, funcMap)
+ addFuncs(s.parseFuncs, funcMap)
+ return s
+}
+
+// Add adds the argument templates to the set. It panics if two templates
+// with the same name are added or if a template is already a member of
+// a set.
+// The return value is the set, so calls can be chained.
+func (s *Set) Add(templates ...*Template) *Set {
+ for _, t := range templates {
+ if err := s.add(t); err != nil {
+ panic(err)
+ }
+ }
+ return s
+}
+
+// add adds the argument template to the set.
+func (s *Set) add(t *Template) error {
+ s.init()
+ if t.set != nil {
+ return fmt.Errorf("template: %q already in a set", t.name)
+ }
+ if _, ok := s.tmpl[t.name]; ok {
+ return fmt.Errorf("template: %q already defined in set", t.name)
+ }
+ s.tmpl[t.name] = t
+ t.set = s
+ return nil
+}
+
+// Template returns the template with the given name in the set,
+// or nil if there is no such template.
+func (s *Set) Template(name string) *Template {
+ return s.tmpl[name]
+}
+
+// FuncMap returns the set's function map.
+func (s *Set) FuncMap() FuncMap {
+ return s.parseFuncs
+}
+
+// Execute applies the named template to the specified data object, writing
+// the output to wr.
+func (s *Set) Execute(wr io.Writer, name string, data interface{}) error {
+ tmpl := s.tmpl[name]
+ if tmpl == nil {
+ return fmt.Errorf("template: no template %q in set", name)
+ }
+ return tmpl.Execute(wr, data)
+}
+
+// Parse parses a string into a set of named templates. Parse may be called
+// multiple times for a given set, adding the templates defined in the string
+// to the set. It is an error if a template has a name already defined in the set.
+func (s *Set) Parse(text string) (*Set, error) {
+ trees, err := parse.Set(text, s.leftDelim, s.rightDelim, s.parseFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ s.init()
+ for name, tree := range trees {
+ tmpl := New(name)
+ tmpl.Tree = tree
+ err = s.add(tmpl)
+ if err != nil {
+ return s, err
+ }
+ }
+ return s, nil
+}
diff --git a/libgo/go/text/template/set_test.go b/libgo/go/text/template/set_test.go
new file mode 100644
index 0000000..f437bc7
--- /dev/null
+++ b/libgo/go/text/template/set_test.go
@@ -0,0 +1,239 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "testing"
+)
+
+const (
+ noError = true
+ hasError = false
+)
+
+type setParseTest struct {
+ name string
+ input string
+ ok bool
+ names []string
+ results []string
+}
+
+var setParseTests = []setParseTest{
+ {"empty", "", noError,
+ nil,
+ nil},
+ {"one", `{{define "foo"}} FOO {{end}}`, noError,
+ []string{"foo"},
+ []string{`[(text: " FOO ")]`}},
+ {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
+ []string{"foo", "bar"},
+ []string{`[(text: " FOO ")]`, `[(text: " BAR ")]`}},
+ // errors
+ {"missing end", `{{define "foo"}} FOO `, hasError,
+ nil,
+ nil},
+ {"malformed name", `{{define "foo}} FOO `, hasError,
+ nil,
+ nil},
+}
+
+func TestSetParse(t *testing.T) {
+ for _, test := range setParseTests {
+ set, err := new(Set).Parse(test.input)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ if set == nil {
+ continue
+ }
+ if len(set.tmpl) != len(test.names) {
+ t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(set.tmpl))
+ continue
+ }
+ for i, name := range test.names {
+ tmpl, ok := set.tmpl[name]
+ if !ok {
+ t.Errorf("%s: can't find template %q", test.name, name)
+ continue
+ }
+ result := tmpl.Root.String()
+ if result != test.results[i] {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
+ }
+ }
+ }
+}
+
+var setExecTests = []execTest{
+ {"empty", "", "", nil, true},
+ {"text", "some text", "some text", nil, true},
+ {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
+ {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
+ {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
+ {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
+ {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
+ {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
+ {"variable declared by template", `{{template "nested" $x=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
+
+ // User-defined function: test argument evaluator.
+ {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
+ {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
+}
+
+// These strings are also in testdata/*.
+const setText1 = `
+ {{define "x"}}TEXT{{end}}
+ {{define "dotV"}}{{.V}}{{end}}
+`
+
+const setText2 = `
+ {{define "dot"}}{{.}}{{end}}
+ {{define "nested"}}{{template "dot" .}}{{end}}
+`
+
+func TestSetExecute(t *testing.T) {
+ // Declare a set with a couple of templates first.
+ set := new(Set)
+ _, err := set.Parse(setText1)
+ if err != nil {
+ t.Fatalf("error parsing set: %s", err)
+ }
+ _, err = set.Parse(setText2)
+ if err != nil {
+ t.Fatalf("error parsing set: %s", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestSetParseFiles(t *testing.T) {
+ set := new(Set)
+ _, err := set.ParseFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = set.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestParseSetFiles(t *testing.T) {
+ set := new(Set)
+ _, err := ParseSetFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ set, err = ParseSetFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestSetParseGlob(t *testing.T) {
+ _, err := new(Set).ParseGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = new(Set).ParseGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := new(Set).ParseGlob("testdata/file*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+func TestParseSetGlob(t *testing.T) {
+ _, err := ParseSetGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = ParseSetGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := ParseSetGlob("testdata/file*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(setExecTests, set, t)
+}
+
+var templateFileExecTests = []execTest{
+ {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\ntemplate2\n", 0, true},
+}
+
+func TestSetParseTemplateFiles(t *testing.T) {
+ _, err := ParseTemplateFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ set, err := new(Set).ParseTemplateFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
+
+func TestParseTemplateFiles(t *testing.T) {
+ _, err := ParseTemplateFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ set, err := new(Set).ParseTemplateFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
+
+func TestSetParseTemplateGlob(t *testing.T) {
+ _, err := ParseTemplateGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = new(Set).ParseTemplateGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := new(Set).ParseTemplateGlob("testdata/tmpl*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
+
+func TestParseTemplateGlob(t *testing.T) {
+ _, err := ParseTemplateGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = ParseTemplateGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ set, err := ParseTemplateGlob("testdata/tmpl*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, set, t)
+}
diff --git a/libgo/go/text/template/testdata/file1.tmpl b/libgo/go/text/template/testdata/file1.tmpl
new file mode 100644
index 0000000..febf9d9f
--- /dev/null
+++ b/libgo/go/text/template/testdata/file1.tmpl
@@ -0,0 +1,2 @@
+{{define "x"}}TEXT{{end}}
+{{define "dotV"}}{{.V}}{{end}}
diff --git a/libgo/go/text/template/testdata/file2.tmpl b/libgo/go/text/template/testdata/file2.tmpl
new file mode 100644
index 0000000..39bf6fb
--- /dev/null
+++ b/libgo/go/text/template/testdata/file2.tmpl
@@ -0,0 +1,2 @@
+{{define "dot"}}{{.}}{{end}}
+{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/libgo/go/text/template/testdata/tmpl1.tmpl b/libgo/go/text/template/testdata/tmpl1.tmpl
new file mode 100644
index 0000000..3d15b81
--- /dev/null
+++ b/libgo/go/text/template/testdata/tmpl1.tmpl
@@ -0,0 +1 @@
+template1
diff --git a/libgo/go/text/template/testdata/tmpl2.tmpl b/libgo/go/text/template/testdata/tmpl2.tmpl
new file mode 100644
index 0000000..a374d2f
--- /dev/null
+++ b/libgo/go/text/template/testdata/tmpl2.tmpl
@@ -0,0 +1 @@
+template2