aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/html
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2011-10-26 23:57:58 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2011-10-26 23:57:58 +0000
commitd8f412571f8768df2d3239e72392dfeabbad1559 (patch)
tree19d182df05ead7ff8ba7ee00a7d57555e1383fdf /libgo/go/html
parente0c39d66d4f0607177b1cf8995dda56a667e07b3 (diff)
downloadgcc-d8f412571f8768df2d3239e72392dfeabbad1559.zip
gcc-d8f412571f8768df2d3239e72392dfeabbad1559.tar.gz
gcc-d8f412571f8768df2d3239e72392dfeabbad1559.tar.bz2
Update Go library to last weekly.
From-SVN: r180552
Diffstat (limited to 'libgo/go/html')
-rw-r--r--libgo/go/html/escape.go24
-rw-r--r--libgo/go/html/parse.go94
-rw-r--r--libgo/go/html/parse_test.go33
-rw-r--r--libgo/go/html/render.go169
-rw-r--r--libgo/go/html/render_test.go111
-rw-r--r--libgo/go/html/token.go707
-rw-r--r--libgo/go/html/token_test.go285
7 files changed, 1088 insertions, 335 deletions
diff --git a/libgo/go/html/escape.go b/libgo/go/html/escape.go
index 0de97c5..e9edc47 100644
--- a/libgo/go/html/escape.go
+++ b/libgo/go/html/escape.go
@@ -6,6 +6,7 @@ package html
import (
"bytes"
+ "os"
"strings"
"utf8"
)
@@ -182,12 +183,24 @@ func unescape(b []byte) []byte {
return b
}
+// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
+func lower(b []byte) []byte {
+ for i, c := range b {
+ if 'A' <= c && c <= 'Z' {
+ b[i] = c + 'a' - 'A'
+ }
+ }
+ return b
+}
+
const escapedChars = `&'<>"`
-func escape(buf *bytes.Buffer, s string) {
+func escape(w writer, s string) os.Error {
i := strings.IndexAny(s, escapedChars)
for i != -1 {
- buf.WriteString(s[0:i])
+ if _, err := w.WriteString(s[:i]); err != nil {
+ return err
+ }
var esc string
switch s[i] {
case '&':
@@ -204,10 +217,13 @@ func escape(buf *bytes.Buffer, s string) {
panic("unrecognized escape character")
}
s = s[i+1:]
- buf.WriteString(esc)
+ if _, err := w.WriteString(esc); err != nil {
+ return err
+ }
i = strings.IndexAny(s, escapedChars)
}
- buf.WriteString(s)
+ _, err := w.WriteString(s)
+ return err
}
// EscapeString escapes special characters like "<" to become "&lt;". It
diff --git a/libgo/go/html/parse.go b/libgo/go/html/parse.go
index 519ebe5..582437f 100644
--- a/libgo/go/html/parse.go
+++ b/libgo/go/html/parse.go
@@ -29,6 +29,9 @@ type parser struct {
head, form *Node
// Other parsing state flags (section 11.2.3.5).
scripting, framesetOK bool
+ // originalIM is the insertion mode to go back to after completing a text
+ // or inTableText insertion mode.
+ originalIM insertionMode
}
func (p *parser) top() *Node {
@@ -64,21 +67,37 @@ var (
// popUntil([]string{"html, "table"}, "table") would return true and leave:
// ["html", "body", "font"]
func (p *parser) popUntil(stopTags []string, matchTags ...string) bool {
+ if i := p.indexOfElementInScope(stopTags, matchTags...); i != -1 {
+ p.oe = p.oe[:i]
+ return true
+ }
+ return false
+}
+
+// indexOfElementInScope returns the index in p.oe of the highest element
+// whose tag is in matchTags that is in scope according to stopTags.
+// If no matching element is in scope, it returns -1.
+func (p *parser) indexOfElementInScope(stopTags []string, matchTags ...string) int {
for i := len(p.oe) - 1; i >= 0; i-- {
tag := p.oe[i].Data
for _, t := range matchTags {
if t == tag {
- p.oe = p.oe[:i]
- return true
+ return i
}
}
for _, t := range stopTags {
if t == tag {
- return false
+ return -1
}
}
}
- return false
+ return -1
+}
+
+// elementInScope is like popUntil, except that it doesn't modify the stack of
+// open elements.
+func (p *parser) elementInScope(stopTags []string, matchTags ...string) bool {
+ return p.indexOfElementInScope(stopTags, matchTags...) != -1
}
// addChild adds a child node n to the top element, and pushes n onto the stack
@@ -198,12 +217,23 @@ type insertionMode func(*parser) (insertionMode, bool)
// Section 11.2.3.1, "using the rules for".
func useTheRulesFor(p *parser, actual, delegate insertionMode) (insertionMode, bool) {
im, consumed := delegate(p)
+ // TODO: do we need to update p.originalMode if it equals delegate?
if im != delegate {
return im, consumed
}
return actual, consumed
}
+// setOriginalIM sets the insertion mode to return to after completing a text or
+// inTableText insertion mode.
+// Section 11.2.3.1, "using the rules for".
+func (p *parser) setOriginalIM(im insertionMode) {
+ if p.originalIM != nil {
+ panic("html: bad parser state: originalIM was set twice")
+ }
+ p.originalIM = im
+}
+
// Section 11.2.5.4.1.
func initialIM(p *parser) (insertionMode, bool) {
if p.tok.Type == DoctypeToken {
@@ -302,8 +332,10 @@ func inHeadIM(p *parser) (insertionMode, bool) {
switch p.tok.Data {
case "meta":
// TODO.
- case "script":
- // TODO.
+ case "script", "title":
+ p.addElement(p.tok.Data, p.tok.Attr)
+ p.setOriginalIM(inHeadIM)
+ return textIM, true
default:
implied = true
}
@@ -365,7 +397,6 @@ func afterHeadIM(p *parser) (insertionMode, bool) {
// Section 11.2.5.4.7.
func inBodyIM(p *parser) (insertionMode, bool) {
- var endP bool
switch p.tok.Type {
case TextToken:
p.reconstructActiveFormattingElements()
@@ -374,15 +405,10 @@ func inBodyIM(p *parser) (insertionMode, bool) {
case StartTagToken:
switch p.tok.Data {
case "address", "article", "aside", "blockquote", "center", "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "menu", "nav", "ol", "p", "section", "summary", "ul":
- // TODO: Do the proper "does the stack of open elements has a p element in button scope" algorithm in section 11.2.3.2.
- n := p.top()
- if n.Type == ElementNode && n.Data == "p" {
- endP = true
- } else {
- p.addElement(p.tok.Data, p.tok.Attr)
- }
+ p.popUntil(buttonScopeStopTags, "p")
+ p.addElement(p.tok.Data, p.tok.Attr)
case "h1", "h2", "h3", "h4", "h5", "h6":
- // TODO: auto-insert </p> if necessary.
+ p.popUntil(buttonScopeStopTags, "p")
switch n := p.top(); n.Data {
case "h1", "h2", "h3", "h4", "h5", "h6":
p.oe.pop()
@@ -399,6 +425,11 @@ func inBodyIM(p *parser) (insertionMode, bool) {
case "b", "big", "code", "em", "font", "i", "s", "small", "strike", "strong", "tt", "u":
p.reconstructActiveFormattingElements()
p.addFormattingElement(p.tok.Data, p.tok.Attr)
+ case "applet", "marquee", "object":
+ p.reconstructActiveFormattingElements()
+ p.addElement(p.tok.Data, p.tok.Attr)
+ p.afe = append(p.afe, &scopeMarker)
+ p.framesetOK = false
case "area", "br", "embed", "img", "input", "keygen", "wbr":
p.reconstructActiveFormattingElements()
p.addElement(p.tok.Data, p.tok.Attr)
@@ -406,12 +437,12 @@ func inBodyIM(p *parser) (insertionMode, bool) {
p.acknowledgeSelfClosingTag()
p.framesetOK = false
case "table":
- // TODO: auto-insert </p> if necessary, depending on quirks mode.
+ p.popUntil(buttonScopeStopTags, "p") // TODO: skip this step in quirks mode.
p.addElement(p.tok.Data, p.tok.Attr)
p.framesetOK = false
return inTableIM, true
case "hr":
- // TODO: auto-insert </p> if necessary.
+ p.popUntil(buttonScopeStopTags, "p")
p.addElement(p.tok.Data, p.tok.Attr)
p.oe.pop()
p.acknowledgeSelfClosingTag()
@@ -425,6 +456,11 @@ func inBodyIM(p *parser) (insertionMode, bool) {
case "body":
// TODO: autoclose the stack of open elements.
return afterBodyIM, true
+ case "p":
+ if !p.elementInScope(buttonScopeStopTags, "p") {
+ p.addElement("p", nil)
+ }
+ p.popUntil(buttonScopeStopTags, "p")
case "a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", "strike", "strong", "tt", "u":
p.inBodyEndTagFormatting(p.tok.Data)
default:
@@ -434,14 +470,8 @@ func inBodyIM(p *parser) (insertionMode, bool) {
}
}
}
- if endP {
- // TODO: do the proper algorithm.
- n := p.oe.pop()
- if n.Type != ElementNode || n.Data != "p" {
- panic("unreachable")
- }
- }
- return inBodyIM, !endP
+
+ return inBodyIM, true
}
func (p *parser) inBodyEndTagFormatting(tag string) {
@@ -560,6 +590,20 @@ func (p *parser) inBodyEndTagFormatting(tag string) {
}
}
+// Section 11.2.5.4.8.
+func textIM(p *parser) (insertionMode, bool) {
+ switch p.tok.Type {
+ case TextToken:
+ p.addText(p.tok.Data)
+ return textIM, true
+ case EndTagToken:
+ p.oe.pop()
+ }
+ o := p.originalIM
+ p.originalIM = nil
+ return o, p.tok.Type == EndTagToken
+}
+
// Section 11.2.5.4.9.
func inTableIM(p *parser) (insertionMode, bool) {
var (
diff --git a/libgo/go/html/parse_test.go b/libgo/go/html/parse_test.go
index 7d918d2..564580c 100644
--- a/libgo/go/html/parse_test.go
+++ b/libgo/go/html/parse_test.go
@@ -80,13 +80,13 @@ func dumpLevel(w io.Writer, n *Node, level int) os.Error {
case DocumentNode:
return os.NewError("unexpected DocumentNode")
case ElementNode:
- fmt.Fprintf(w, "<%s>", EscapeString(n.Data))
+ fmt.Fprintf(w, "<%s>", n.Data)
case TextNode:
- fmt.Fprintf(w, "%q", EscapeString(n.Data))
+ fmt.Fprintf(w, "%q", n.Data)
case CommentNode:
return os.NewError("COMMENT")
case DoctypeNode:
- fmt.Fprintf(w, "<!DOCTYPE %s>", EscapeString(n.Data))
+ fmt.Fprintf(w, "<!DOCTYPE %s>", n.Data)
case scopeMarkerNode:
return os.NewError("unexpected scopeMarkerNode")
default:
@@ -123,7 +123,7 @@ func TestParser(t *testing.T) {
rc := make(chan io.Reader)
go readDat(filename, rc)
// TODO(nigeltao): Process all test cases, not just a subset.
- for i := 0; i < 25; i++ {
+ for i := 0; i < 27; i++ {
// Parse the #data section.
b, err := ioutil.ReadAll(<-rc)
if err != nil {
@@ -134,7 +134,7 @@ func TestParser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- actual, err := dump(doc)
+ got, err := dump(doc)
if err != nil {
t.Fatal(err)
}
@@ -147,9 +147,26 @@ func TestParser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- expected := string(b)
- if actual != expected {
- t.Errorf("%s test #%d %q, actual vs expected:\n----\n%s----\n%s----", filename, i, text, actual, expected)
+ if want := string(b); got != want {
+ t.Errorf("%s test #%d %q, got vs want:\n----\n%s----\n%s----", filename, i, text, got, want)
+ continue
+ }
+ // Check that rendering and re-parsing results in an identical tree.
+ pr, pw := io.Pipe()
+ go func() {
+ pw.CloseWithError(Render(pw, doc))
+ }()
+ doc1, err := Parse(pr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got1, err := dump(doc1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != got1 {
+ t.Errorf("%s test #%d %q, got vs got1:\n----\n%s----\n%s----", filename, i, text, got, got1)
+ continue
}
}
}
diff --git a/libgo/go/html/render.go b/libgo/go/html/render.go
new file mode 100644
index 0000000..e1ec66f
--- /dev/null
+++ b/libgo/go/html/render.go
@@ -0,0 +1,169 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) os.Error
+ WriteString(string) (int, os.Error)
+}
+
+// Render renders the parse tree n to the given writer.
+//
+// For 'well-formed' parse trees, calling Parse on the output of Render will
+// result in a clone of the original tree.
+//
+// 'Well-formed' is not formally specified, but calling Parse on arbitrary
+// input results in a 'well-formed' parse tree if Parse does not return an
+// error. Programmatically constructed trees are typically also 'well-formed',
+// but it is possible to construct a tree that, when rendered and re-parsed,
+// results in a different tree. A simple example is that a solitary text node
+// would become a tree containing <html>, <head> and <body> elements. Another
+// example is that the programmatic equivalent of "a<head>b</head>c" becomes
+// "<html><head><head/><body>abc</body></html>".
+//
+// Comment nodes are elided from the output, analogous to Parse skipping over
+// any <!--comment--> input.
+func Render(w io.Writer, n *Node) os.Error {
+ if x, ok := w.(writer); ok {
+ return render(x, n)
+ }
+ buf := bufio.NewWriter(w)
+ if err := render(buf, n); err != nil {
+ return err
+ }
+ return buf.Flush()
+}
+
+func render(w writer, n *Node) os.Error {
+ // Render non-element nodes; these are the easy cases.
+ switch n.Type {
+ case ErrorNode:
+ return os.NewError("html: cannot render an ErrorNode node")
+ case TextNode:
+ return escape(w, n.Data)
+ case DocumentNode:
+ for _, c := range n.Child {
+ if err := render(w, c); err != nil {
+ return err
+ }
+ }
+ return nil
+ case ElementNode:
+ // No-op.
+ case CommentNode:
+ return nil
+ case DoctypeNode:
+ if _, err := w.WriteString("<!DOCTYPE "); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+ default:
+ return os.NewError("html: unknown node type")
+ }
+
+ // Render the <xxx> opening tag.
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ for _, a := range n.Attr {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(a.Key); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(`="`); err != nil {
+ return err
+ }
+ if err := escape(w, a.Val); err != nil {
+ return err
+ }
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ }
+ if voidElements[n.Data] {
+ if len(n.Child) != 0 {
+ return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
+ }
+ _, err := w.WriteString("/>")
+ return err
+ }
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+
+ // Render any child nodes.
+ switch n.Data {
+ case "noembed", "noframes", "noscript", "script", "style":
+ for _, c := range n.Child {
+ if c.Type != TextNode {
+ return fmt.Errorf("html: raw text element <%s> has non-text child node", n.Data)
+ }
+ if _, err := w.WriteString(c.Data); err != nil {
+ return err
+ }
+ }
+ case "textarea", "title":
+ for _, c := range n.Child {
+ if c.Type != TextNode {
+ return fmt.Errorf("html: RCDATA element <%s> has non-text child node", n.Data)
+ }
+ if err := render(w, c); err != nil {
+ return err
+ }
+ }
+ default:
+ for _, c := range n.Child {
+ if err := render(w, c); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render the </xxx> closing tag.
+ if _, err := w.WriteString("</"); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+}
+
+// Section 13.1.2, "Elements", gives this list of void elements. Void elements
+// are those that can't have any contents.
+var voidElements = map[string]bool{
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "command": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true,
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
+}
diff --git a/libgo/go/html/render_test.go b/libgo/go/html/render_test.go
new file mode 100644
index 0000000..d166a3b
--- /dev/null
+++ b/libgo/go/html/render_test.go
@@ -0,0 +1,111 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestRenderer(t *testing.T) {
+ n := &Node{
+ Type: ElementNode,
+ Data: "html",
+ Child: []*Node{
+ &Node{
+ Type: ElementNode,
+ Data: "head",
+ },
+ &Node{
+ Type: ElementNode,
+ Data: "body",
+ Child: []*Node{
+ &Node{
+ Type: TextNode,
+ Data: "0<1",
+ },
+ &Node{
+ Type: ElementNode,
+ Data: "p",
+ Attr: []Attribute{
+ Attribute{
+ Key: "id",
+ Val: "A",
+ },
+ Attribute{
+ Key: "foo",
+ Val: `abc"def`,
+ },
+ },
+ Child: []*Node{
+ &Node{
+ Type: TextNode,
+ Data: "2",
+ },
+ &Node{
+ Type: ElementNode,
+ Data: "b",
+ Attr: []Attribute{
+ Attribute{
+ Key: "empty",
+ Val: "",
+ },
+ },
+ Child: []*Node{
+ &Node{
+ Type: TextNode,
+ Data: "3",
+ },
+ },
+ },
+ &Node{
+ Type: ElementNode,
+ Data: "i",
+ Attr: []Attribute{
+ Attribute{
+ Key: "backslash",
+ Val: `\`,
+ },
+ },
+ Child: []*Node{
+ &Node{
+ Type: TextNode,
+ Data: "&4",
+ },
+ },
+ },
+ },
+ },
+ &Node{
+ Type: TextNode,
+ Data: "5",
+ },
+ &Node{
+ Type: ElementNode,
+ Data: "blockquote",
+ },
+ &Node{
+ Type: ElementNode,
+ Data: "br",
+ },
+ &Node{
+ Type: TextNode,
+ Data: "6",
+ },
+ },
+ },
+ },
+ }
+ want := `<html><head></head><body>0&lt;1<p id="A" foo="abc&quot;def">` +
+ `2<b empty="">3</b><i backslash="\">&amp;4</i></p>` +
+ `5<blockquote></blockquote><br/>6</body></html>`
+ b := new(bytes.Buffer)
+ if err := Render(b, n); err != nil {
+ t.Fatal(err)
+ }
+ if got := b.String(); got != want {
+ t.Errorf("got vs want:\n%s\n%s\n", got, want)
+ }
+}
diff --git a/libgo/go/html/token.go b/libgo/go/html/token.go
index d266b3a..2826f95 100644
--- a/libgo/go/html/token.go
+++ b/libgo/go/html/token.go
@@ -9,6 +9,7 @@ import (
"io"
"os"
"strconv"
+ "strings"
)
// A TokenType is the type of a Token.
@@ -100,13 +101,19 @@ func (t Token) String() string {
case SelfClosingTagToken:
return "<" + t.tagString() + "/>"
case CommentToken:
- return "<!--" + EscapeString(t.Data) + "-->"
+ return "<!--" + t.Data + "-->"
case DoctypeToken:
- return "<!DOCTYPE " + EscapeString(t.Data) + ">"
+ return "<!DOCTYPE " + t.Data + ">"
}
return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
}
+// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
+// the end is exclusive.
+type span struct {
+ start, end int
+}
+
// A Tokenizer returns a stream of HTML Tokens.
type Tokenizer struct {
// If ReturnComments is set, Next returns comment tokens;
@@ -115,7 +122,7 @@ type Tokenizer struct {
// r is the source of the HTML text.
r io.Reader
- // tt is the TokenType of the most recently read token.
+ // tt is the TokenType of the current token.
tt TokenType
// err is the first error encountered during tokenization. It is possible
// for tt != Error && err != nil to hold: this means that Next returned a
@@ -125,10 +132,26 @@ type Tokenizer struct {
// subsequent Next calls would return an ErrorToken.
// err is never reset. Once it becomes non-nil, it stays non-nil.
err os.Error
- // buf[p0:p1] holds the raw data of the most recent token.
- // buf[p1:] is buffered input that will yield future tokens.
- p0, p1 int
- buf []byte
+ // buf[raw.start:raw.end] holds the raw bytes of the current token.
+ // buf[raw.end:] is buffered input that will yield future tokens.
+ raw span
+ buf []byte
+ // buf[data.start:data.end] holds the raw bytes of the current token's data:
+ // a text token's text, a tag token's tag name, etc.
+ data span
+ // pendingAttr is the attribute key and value currently being tokenized.
+ // When complete, pendingAttr is pushed onto attr. nAttrReturned is
+ // incremented on each call to TagAttr.
+ pendingAttr [2]span
+ attr [][2]span
+ nAttrReturned int
+ // rawTag is the "script" in "</script>" that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "<p>" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
}
// Error returns the error associated with the most recent ErrorToken token.
@@ -140,33 +163,42 @@ func (z *Tokenizer) Error() os.Error {
return z.err
}
-// Raw returns the unmodified text of the current token. Calling Next, Token,
-// Text, TagName or TagAttr may change the contents of the returned slice.
-func (z *Tokenizer) Raw() []byte {
- return z.buf[z.p0:z.p1]
-}
-
// readByte returns the next byte from the input stream, doing a buffered read
-// from z.r into z.buf if necessary. z.buf[z.p0:z.p1] remains a contiguous byte
+// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
// slice that holds all the bytes read so far for the current token.
// It sets z.err if the underlying reader returns an error.
// Pre-condition: z.err == nil.
func (z *Tokenizer) readByte() byte {
- if z.p1 >= len(z.buf) {
+ if z.raw.end >= len(z.buf) {
// Our buffer is exhausted and we have to read from z.r.
- // We copy z.buf[z.p0:z.p1] to the beginning of z.buf. If the length
- // z.p1 - z.p0 is more than half the capacity of z.buf, then we
+ // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
+ // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
// allocate a new buffer before the copy.
c := cap(z.buf)
- d := z.p1 - z.p0
+ d := z.raw.end - z.raw.start
var buf1 []byte
if 2*d > c {
buf1 = make([]byte, d, 2*c)
} else {
buf1 = z.buf[:d]
}
- copy(buf1, z.buf[z.p0:z.p1])
- z.p0, z.p1, z.buf = 0, d, buf1[:d]
+ copy(buf1, z.buf[z.raw.start:z.raw.end])
+ if x := z.raw.start; x != 0 {
+ // Adjust the data/attr spans to refer to the same contents after the copy.
+ z.data.start -= x
+ z.data.end -= x
+ z.pendingAttr[0].start -= x
+ z.pendingAttr[0].end -= x
+ z.pendingAttr[1].start -= x
+ z.pendingAttr[1].end -= x
+ for i := range z.attr {
+ z.attr[i][0].start -= x
+ z.attr[i][0].end -= x
+ z.attr[i][1].start -= x
+ z.attr[i][1].end -= x
+ }
+ }
+ z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
// Now that we have copied the live bytes to the start of the buffer,
// we read from z.r into the remainder.
n, err := z.r.Read(buf1[d:cap(buf1)])
@@ -176,297 +208,467 @@ func (z *Tokenizer) readByte() byte {
}
z.buf = buf1[:d+n]
}
- x := z.buf[z.p1]
- z.p1++
+ x := z.buf[z.raw.end]
+ z.raw.end++
return x
}
-// readTo keeps reading bytes until x is found or a read error occurs. If an
-// error does occur, z.err is set to that error.
-// Pre-condition: z.err == nil.
-func (z *Tokenizer) readTo(x uint8) {
+// skipWhiteSpace skips past any white space.
+func (z *Tokenizer) skipWhiteSpace() {
+ if z.err != nil {
+ return
+ }
for {
c := z.readByte()
if z.err != nil {
return
}
switch c {
- case x:
+ case ' ', '\n', '\r', '\t', '\f':
+ // No-op.
+ default:
+ z.raw.end--
return
- case '\\':
- z.readByte()
+ }
+ }
+}
+
+// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
+// is typically something like "script" or "textarea".
+func (z *Tokenizer) readRawOrRCDATA() {
+loop:
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '<' {
+ continue loop
+ }
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '/' {
+ continue loop
+ }
+ for i := 0; i < len(z.rawTag); i++ {
+ c = z.readByte()
if z.err != nil {
- return
+ break loop
+ }
+ if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
+ continue loop
}
}
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ // The 3 is 2 for the leading "</" plus 1 for the trailing character c.
+ z.raw.end -= 3 + len(z.rawTag)
+ break loop
+ case '<':
+ // Step back one, to catch "</foo</foo>".
+ z.raw.end--
+ }
}
+ z.data.end = z.raw.end
+ // A textarea's or title's RCDATA can contain escaped entities.
+ z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
+ z.rawTag = ""
}
-// nextComment reads the next token starting with "<!--".
-// The opening "<!--" has already been consumed.
-// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 4 <= z.p1.
-func (z *Tokenizer) nextComment() {
- // <!--> is a valid comment.
+// readComment reads the next comment token starting with "<!--". The opening
+// "<!--" has already been consumed.
+func (z *Tokenizer) readComment() {
+ z.data.start = z.raw.end
+ defer func() {
+ if z.data.end < z.data.start {
+ // It's a comment with no data, like <!-->.
+ z.data.end = z.data.start
+ }
+ }()
for dashCount := 2; ; {
c := z.readByte()
if z.err != nil {
+ z.data.end = z.raw.end
return
}
switch c {
case '-':
dashCount++
+ continue
case '>':
if dashCount >= 2 {
- z.tt = CommentToken
+ z.data.end = z.raw.end - len("-->")
return
}
- dashCount = 0
- default:
- dashCount = 0
+ case '!':
+ if dashCount >= 2 {
+ c = z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ if c == '>' {
+ z.data.end = z.raw.end - len("--!>")
+ return
+ }
+ }
}
+ dashCount = 0
}
}
-// nextMarkupDeclaration reads the next token starting with "<!".
-// It might be a "<!--comment-->", a "<!DOCTYPE foo>", or "<!malformed text".
-// The opening "<!" has already been consumed.
-// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 2 <= z.p1.
-func (z *Tokenizer) nextMarkupDeclaration() {
+// readUntilCloseAngle reads until the next ">".
+func (z *Tokenizer) readUntilCloseAngle() {
+ z.data.start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ if c == '>' {
+ z.data.end = z.raw.end - len(">")
+ return
+ }
+ }
+}
+
+// readMarkupDeclaration reads the next token starting with "<!". It might be
+// a "<!--comment-->", a "<!DOCTYPE foo>", or "<!a bogus comment". The opening
+// "<!" has already been consumed.
+func (z *Tokenizer) readMarkupDeclaration() TokenType {
+ z.data.start = z.raw.end
var c [2]byte
for i := 0; i < 2; i++ {
c[i] = z.readByte()
if z.err != nil {
- return
+ z.data.end = z.raw.end
+ return CommentToken
}
}
if c[0] == '-' && c[1] == '-' {
- z.nextComment()
- return
+ z.readComment()
+ return CommentToken
}
- z.p1 -= 2
- const s = "DOCTYPE "
- for i := 0; ; i++ {
+ z.raw.end -= 2
+ const s = "DOCTYPE"
+ for i := 0; i < len(s); i++ {
c := z.readByte()
if z.err != nil {
- return
+ z.data.end = z.raw.end
+ return CommentToken
}
- // Capitalize c.
- if 'a' <= c && c <= 'z' {
- c = 'A' + (c - 'a')
- }
- if i < len(s) && c != s[i] {
- z.nextText()
- return
- }
- if c == '>' {
- if i >= len(s) {
- z.tt = DoctypeToken
- }
- return
+ if c != s[i] && c != s[i]+('a'-'A') {
+ // Back up to read the fragment of "DOCTYPE" again.
+ z.raw.end = z.data.start
+ z.readUntilCloseAngle()
+ return CommentToken
}
}
+ if z.skipWhiteSpace(); z.err != nil {
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ return DoctypeToken
+ }
+ z.readUntilCloseAngle()
+ return DoctypeToken
}
-// nextTag reads the next token starting with "<". It might be a "<startTag>",
-// an "</endTag>", a "<!markup declaration>", or "<malformed text".
-// The opening "<" has already been consumed.
-// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1.
-func (z *Tokenizer) nextTag() {
- c := z.readByte()
- if z.err != nil {
- return
- }
- switch {
- case c == '/':
- z.tt = EndTagToken
- // Lower-cased characters are more common in tag names, so we check for them first.
- case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
- z.tt = StartTagToken
- case c == '!':
- z.nextMarkupDeclaration()
- return
- case c == '?':
- z.tt, z.err = ErrorToken, os.NewError("html: TODO: implement XML processing instructions")
- return
- default:
- z.tt, z.err = ErrorToken, os.NewError("html: TODO: handle malformed tags")
- return
+// readStartTag reads the next start tag token. The opening "<a" has already
+// been consumed, where 'a' means anything in [A-Za-z].
+func (z *Tokenizer) readStartTag() TokenType {
+ z.attr = z.attr[:0]
+ z.nAttrReturned = 0
+ // Read the tag name and attribute key/value pairs.
+ z.readTagName()
+ if z.skipWhiteSpace(); z.err != nil {
+ return ErrorToken
}
for {
c := z.readByte()
- if z.err != nil {
- return
+ if z.err != nil || c == '>' {
+ break
}
- switch c {
- case '"', '\'':
- z.readTo(c)
- if z.err != nil {
- return
- }
- case '>':
- if z.buf[z.p1-2] == '/' && z.tt == StartTagToken {
- z.tt = SelfClosingTagToken
+ z.raw.end--
+ z.readTagAttrKey()
+ z.readTagAttrVal()
+ // Save pendingAttr if it has a non-empty key.
+ if z.pendingAttr[0].start != z.pendingAttr[0].end {
+ z.attr = append(z.attr, z.pendingAttr)
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ break
+ }
+ }
+ // Any "<noembed>", "<noframes>", "<noscript>", "<script>", "<style>",
+ // "<textarea>" or "<title>" tag flags the tokenizer's next token as raw.
+ // The tag name lengths of these special cases ranges in [5, 8].
+ if x := z.data.end - z.data.start; 5 <= x && x <= 8 {
+ switch z.buf[z.data.start] {
+ case 'n', 's', 't', 'N', 'S', 'T':
+ switch s := strings.ToLower(string(z.buf[z.data.start:z.data.end])); s {
+ case "noembed", "noframes", "noscript", "script", "style", "textarea", "title":
+ z.rawTag = s
}
+ }
+ }
+ // Look for a self-closing token like "<br/>".
+ if z.err == nil && z.buf[z.raw.end-2] == '/' {
+ return SelfClosingTagToken
+ }
+ return StartTagToken
+}
+
+// readEndTag reads the next end tag token. The opening "</a" has already
+// been consumed, where 'a' means anything in [A-Za-z].
+func (z *Tokenizer) readEndTag() {
+ z.attr = z.attr[:0]
+ z.nAttrReturned = 0
+ z.readTagName()
+ for {
+ c := z.readByte()
+ if z.err != nil || c == '>' {
return
}
}
}
-// nextText reads all text up until an '<'.
-// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1.
-func (z *Tokenizer) nextText() {
+// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
+// is positioned such that the first byte of the tag name (the "d" in "<div")
+// has already been consumed.
+func (z *Tokenizer) readTagName() {
+ z.data.start = z.raw.end - 1
for {
c := z.readByte()
if z.err != nil {
+ z.data.end = z.raw.end
return
}
- if c == '<' {
- z.p1--
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ z.data.end = z.raw.end - 1
+ return
+ case '/', '>':
+ z.raw.end--
+ z.data.end = z.raw.end
return
}
}
}
-// Next scans the next token and returns its type.
-func (z *Tokenizer) Next() TokenType {
+// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
+// Precondition: z.err == nil.
+func (z *Tokenizer) readTagAttrKey() {
+ z.pendingAttr[0].start = z.raw.end
for {
- if z.err != nil {
- z.tt = ErrorToken
- return z.tt
- }
- z.p0 = z.p1
c := z.readByte()
if z.err != nil {
- z.tt = ErrorToken
- return z.tt
+ z.pendingAttr[0].end = z.raw.end
+ return
}
- // We assume that the next token is text unless proven otherwise.
- z.tt = TextToken
- if c != '<' {
- z.nextText()
- } else {
- z.nextTag()
- if z.tt == CommentToken && !z.ReturnComments {
- continue
- }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/':
+ z.pendingAttr[0].end = z.raw.end - 1
+ return
+ case '=', '>':
+ z.raw.end--
+ z.pendingAttr[0].end = z.raw.end
+ return
}
- return z.tt
}
- panic("unreachable")
}
-// trim returns the largest j such that z.buf[i:j] contains only white space,
-// or only white space plus the final ">" or "/>" of the raw data.
-func (z *Tokenizer) trim(i int) int {
- k := z.p1
- for ; i < k; i++ {
- switch z.buf[i] {
- case ' ', '\n', '\t', '\f':
- continue
- case '>':
- if i == k-1 {
- return k
+// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
+func (z *Tokenizer) readTagAttrVal() {
+ z.pendingAttr[1].start = z.raw.end
+ z.pendingAttr[1].end = z.raw.end
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ c := z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c != '=' {
+ z.raw.end--
+ return
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ quote := z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch quote {
+ case '>':
+ z.raw.end--
+ return
+
+ case '\'', '"':
+ z.pendingAttr[1].start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[1].end = z.raw.end
+ return
}
- case '/':
- if i == k-2 {
- return k
+ if c == quote {
+ z.pendingAttr[1].end = z.raw.end - 1
+ return
}
}
- return i
- }
- return k
-}
-// tagName finds the tag name at the start of z.buf[i:] and returns that name
-// lower-cased, as well as the trimmed cursor location afterwards.
-func (z *Tokenizer) tagName(i int) ([]byte, int) {
- i0 := i
-loop:
- for ; i < z.p1; i++ {
- c := z.buf[i]
- switch c {
- case ' ', '\n', '\t', '\f', '/', '>':
- break loop
- }
- if 'A' <= c && c <= 'Z' {
- z.buf[i] = c + 'a' - 'A'
+ default:
+ z.pendingAttr[1].start = z.raw.end - 1
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ z.pendingAttr[1].end = z.raw.end - 1
+ return
+ case '>':
+ z.raw.end--
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
}
}
- return z.buf[i0:i], z.trim(i)
}
-// unquotedAttrVal finds the unquoted attribute value at the start of z.buf[i:]
-// and returns that value, as well as the trimmed cursor location afterwards.
-func (z *Tokenizer) unquotedAttrVal(i int) ([]byte, int) {
- i0 := i
+// next scans the next token and returns its type.
+func (z *Tokenizer) next() TokenType {
+ if z.err != nil {
+ return ErrorToken
+ }
+ z.raw.start = z.raw.end
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ if z.rawTag != "" {
+ z.readRawOrRCDATA()
+ return TextToken
+ }
+ z.textIsRaw = false
+
loop:
- for ; i < z.p1; i++ {
- switch z.buf[i] {
- case ' ', '\n', '\t', '\f', '>':
+ for {
+ c := z.readByte()
+ if z.err != nil {
break loop
- case '&':
- // TODO: unescape the entity.
}
- }
- return z.buf[i0:i], z.trim(i)
-}
-
-// attrName finds the largest attribute name at the start
-// of z.buf[i:] and returns it lower-cased, as well
-// as the trimmed cursor location after that name.
-//
-// http://dev.w3.org/html5/spec/Overview.html#syntax-attribute-name
-// TODO: unicode characters
-func (z *Tokenizer) attrName(i int) ([]byte, int) {
- for z.buf[i] == '/' {
- i++
- if z.buf[i] == '>' {
- return nil, z.trim(i)
+ if c != '<' {
+ continue loop
}
- }
- i0 := i
-loop:
- for ; i < z.p1; i++ {
- c := z.buf[i]
- switch c {
- case '>', '/', '=':
+
+ // Check if the '<' we have just read is part of a tag, comment
+ // or doctype. If not, it's part of the accumulated text token.
+ c = z.readByte()
+ if z.err != nil {
break loop
}
+ var tokenType TokenType
switch {
- case 'A' <= c && c <= 'Z':
- z.buf[i] = c + 'a' - 'A'
- case c > ' ' && c < 0x7f:
- // No-op.
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ tokenType = StartTagToken
+ case c == '/':
+ tokenType = EndTagToken
+ case c == '!' || c == '?':
+ // We use CommentToken to mean any of "<!--actual comments-->",
+ // "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
+ tokenType = CommentToken
default:
- break loop
+ continue
+ }
+
+ // We have a non-text token, but we might have accumulated some text
+ // before that. If so, we return the text first, and return the non-
+ // text token on the subsequent call to Next.
+ if x := z.raw.end - len("<a"); z.raw.start < x {
+ z.raw.end = x
+ z.data.end = x
+ return TextToken
+ }
+ switch tokenType {
+ case StartTagToken:
+ return z.readStartTag()
+ case EndTagToken:
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c == '>' {
+ // "</>" does not generate a token at all.
+ // Reset the tokenizer state and start again.
+ z.raw.start = z.raw.end
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ continue loop
+ }
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ z.readEndTag()
+ return EndTagToken
+ }
+ z.raw.end--
+ z.readUntilCloseAngle()
+ return CommentToken
+ case CommentToken:
+ if c == '!' {
+ return z.readMarkupDeclaration()
+ }
+ z.raw.end--
+ z.readUntilCloseAngle()
+ return CommentToken
+ }
+ }
+ if z.raw.start < z.raw.end {
+ z.data.end = z.raw.end
+ return TextToken
+ }
+ return ErrorToken
+}
+
+// Next scans the next token and returns its type.
+func (z *Tokenizer) Next() TokenType {
+ for {
+ z.tt = z.next()
+ // TODO: remove the ReturnComments option. A tokenizer should
+ // always return comment tags.
+ if z.tt == CommentToken && !z.ReturnComments {
+ continue
}
+ return z.tt
}
- return z.buf[i0:i], z.trim(i)
+ panic("unreachable")
+}
+
+// Raw returns the unmodified text of the current token. Calling Next, Token,
+// Text, TagName or TagAttr may change the contents of the returned slice.
+func (z *Tokenizer) Raw() []byte {
+ return z.buf[z.raw.start:z.raw.end]
}
// Text returns the unescaped text of a text, comment or doctype token. The
// contents of the returned slice may change on the next call to Next.
func (z *Tokenizer) Text() []byte {
- var i0, i1 int
switch z.tt {
- case TextToken:
- i0 = z.p0
- i1 = z.p1
- case CommentToken:
- // Trim the "<!--" from the left and the "-->" from the right.
- // "<!-->" is a valid comment, so the adjusted endpoints might overlap.
- i0 = z.p0 + 4
- i1 = z.p1 - 3
- case DoctypeToken:
- // Trim the "<!DOCTYPE " from the left and the ">" from the right.
- i0 = z.p0 + 10
- i1 = z.p1 - 1
- default:
- return nil
- }
- z.p0 = z.p1
- if i0 < i1 {
- return unescape(z.buf[i0:i1])
+ case TextToken, CommentToken, DoctypeToken:
+ s := z.buf[z.data.start:z.data.end]
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ if !z.textIsRaw {
+ s = unescape(s)
+ }
+ return s
}
return nil
}
@@ -475,73 +677,33 @@ func (z *Tokenizer) Text() []byte {
// `<IMG SRC="foo">`) and whether the tag has attributes.
// The contents of the returned slice may change on the next call to Next.
func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
- i := z.p0 + 1
- if i >= z.p1 {
- z.p0 = z.p1
- return nil, false
- }
- if z.buf[i] == '/' {
- i++
+ if z.data.start < z.data.end {
+ switch z.tt {
+ case StartTagToken, EndTagToken, SelfClosingTagToken:
+ s := z.buf[z.data.start:z.data.end]
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ return lower(s), z.nAttrReturned < len(z.attr)
+ }
}
- name, z.p0 = z.tagName(i)
- hasAttr = z.p0 != z.p1
- return
+ return nil, false
}
// TagAttr returns the lower-cased key and unescaped value of the next unparsed
// attribute for the current tag token and whether there are more attributes.
// The contents of the returned slices may change on the next call to Next.
func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
- key, i := z.attrName(z.p0)
- // Check for an empty attribute value.
- if i == z.p1 {
- z.p0 = i
- return
- }
- // Get past the equals and quote characters.
- if z.buf[i] != '=' {
- z.p0, moreAttr = i, true
- return
- }
- i = z.trim(i + 1)
- if i == z.p1 {
- z.p0 = i
- return
- }
- closeQuote := z.buf[i]
- if closeQuote != '\'' && closeQuote != '"' {
- val, z.p0 = z.unquotedAttrVal(i)
- moreAttr = z.p0 != z.p1
- return
- }
- i = z.trim(i + 1)
- // Copy and unescape everything up to the closing quote.
- dst, src := i, i
-loop:
- for src < z.p1 {
- c := z.buf[src]
- switch c {
- case closeQuote:
- src++
- break loop
- case '&':
- dst, src = unescapeEntity(z.buf, dst, src, true)
- case '\\':
- if src == z.p1 {
- z.buf[dst] = '\\'
- dst++
- } else {
- z.buf[dst] = z.buf[src+1]
- dst, src = dst+1, src+2
- }
- default:
- z.buf[dst] = c
- dst, src = dst+1, src+1
+ if z.nAttrReturned < len(z.attr) {
+ switch z.tt {
+ case StartTagToken, SelfClosingTagToken:
+ x := z.attr[z.nAttrReturned]
+ z.nAttrReturned++
+ key = z.buf[x[0].start:x[0].end]
+ val = z.buf[x[1].start:x[1].end]
+ return lower(key), unescape(val), z.nAttrReturned < len(z.attr)
}
}
- val, z.p0 = z.buf[i:dst], z.trim(src)
- moreAttr = z.p0 != z.p1
- return
+ return nil, nil, false
}
// Token returns the next Token. The result's Data and Attr values remain valid
@@ -551,7 +713,7 @@ func (z *Tokenizer) Token() Token {
switch z.tt {
case TextToken, CommentToken, DoctypeToken:
t.Data = string(z.Text())
- case StartTagToken, EndTagToken, SelfClosingTagToken:
+ case StartTagToken, SelfClosingTagToken:
var attr []Attribute
name, moreAttr := z.TagName()
for moreAttr {
@@ -561,6 +723,9 @@ func (z *Tokenizer) Token() Token {
}
t.Data = string(name)
t.Attr = attr
+ case EndTagToken:
+ name, _ := z.TagName()
+ t.Data = string(name)
}
return t
}
diff --git a/libgo/go/html/token_test.go b/libgo/go/html/token_test.go
index 0a0beb2..310cd97 100644
--- a/libgo/go/html/token_test.go
+++ b/libgo/go/html/token_test.go
@@ -21,6 +21,11 @@ type tokenTest struct {
}
var tokenTests = []tokenTest{
+ {
+ "empty",
+ "",
+ "",
+ },
// A single text node. The tokenizer should not break text nodes on whitespace,
// nor should it normalize whitespace within a text node.
{
@@ -41,6 +46,88 @@ var tokenTests = []tokenTest{
"<a>b<c/>d</e>",
"<a>$b$<c/>$d$</e>",
},
+ // Angle brackets that aren't a tag.
+ {
+ "not a tag #0",
+ "<",
+ "&lt;",
+ },
+ {
+ "not a tag #1",
+ "</",
+ "&lt;/",
+ },
+ {
+ "not a tag #2",
+ "</>",
+ "",
+ },
+ {
+ "not a tag #3",
+ "a</>b",
+ "a$b",
+ },
+ {
+ "not a tag #4",
+ "</ >",
+ "<!-- -->",
+ },
+ {
+ "not a tag #5",
+ "</.",
+ "<!--.-->",
+ },
+ {
+ "not a tag #6",
+ "</.>",
+ "<!--.-->",
+ },
+ {
+ "not a tag #7",
+ "a < b",
+ "a &lt; b",
+ },
+ {
+ "not a tag #8",
+ "<.>",
+ "&lt;.&gt;",
+ },
+ {
+ "not a tag #9",
+ "a<<<b>>>c",
+ "a&lt;&lt;$<b>$&gt;&gt;c",
+ },
+ {
+ "not a tag #10",
+ "if x<0 and y < 0 then x*y>0",
+ "if x&lt;0 and y &lt; 0 then x*y&gt;0",
+ },
+ // EOF in a tag name.
+ {
+ "tag name eof #0",
+ "<a",
+ "",
+ },
+ {
+ "tag name eof #1",
+ "<a ",
+ "",
+ },
+ {
+ "tag name eof #2",
+ "a<b",
+ "a",
+ },
+ {
+ "tag name eof #3",
+ "<a><b",
+ "<a>",
+ },
+ {
+ "tag name eof #4",
+ `<a x`,
+ `<a x="">`,
+ },
// Some malformed tags that are missing a '>'.
{
"malformed tag #0",
@@ -54,70 +141,198 @@ var tokenTests = []tokenTest{
},
{
"malformed tag #2",
+ `<p id`,
+ `<p id="">`,
+ },
+ {
+ "malformed tag #3",
+ `<p id=`,
+ `<p id="">`,
+ },
+ {
+ "malformed tag #4",
+ `<p id=>`,
+ `<p id="">`,
+ },
+ {
+ "malformed tag #5",
+ `<p id=0`,
+ `<p id="0">`,
+ },
+ {
+ "malformed tag #6",
`<p id=0</p>`,
`<p id="0&lt;/p">`,
},
{
- "malformed tag #3",
+ "malformed tag #7",
`<p id="0</p>`,
`<p id="0&lt;/p&gt;">`,
},
{
- "malformed tag #4",
+ "malformed tag #8",
`<p id="0"</p>`,
`<p id="0" <="" p="">`,
},
+ // Raw text and RCDATA.
+ {
+ "basic raw text",
+ "<script><a></b></script>",
+ "<script>$&lt;a&gt;&lt;/b&gt;$</script>",
+ },
+ {
+ "unfinished script end tag",
+ "<SCRIPT>a</SCR",
+ "<script>$a&lt;/SCR",
+ },
+ {
+ "broken script end tag",
+ "<SCRIPT>a</SCR ipt>",
+ "<script>$a&lt;/SCR ipt&gt;",
+ },
+ {
+ "EOF in script end tag",
+ "<SCRIPT>a</SCRipt",
+ "<script>$a&lt;/SCRipt",
+ },
+ {
+ "scriptx end tag",
+ "<SCRIPT>a</SCRiptx",
+ "<script>$a&lt;/SCRiptx",
+ },
+ {
+ "' ' completes script end tag",
+ "<SCRIPT>a</SCRipt ",
+ "<script>$a$</script>",
+ },
+ {
+ "'>' completes script end tag",
+ "<SCRIPT>a</SCRipt>",
+ "<script>$a$</script>",
+ },
+ {
+ "self-closing script end tag",
+ "<SCRIPT>a</SCRipt/>",
+ "<script>$a$</script>",
+ },
+ {
+ "nested script tag",
+ "<SCRIPT>a</SCRipt<script>",
+ "<script>$a&lt;/SCRipt&lt;script&gt;",
+ },
+ {
+ "script end tag after unfinished",
+ "<SCRIPT>a</SCRipt</script>",
+ "<script>$a&lt;/SCRipt$</script>",
+ },
+ {
+ "script/style mismatched tags",
+ "<script>a</style>",
+ "<script>$a&lt;/style&gt;",
+ },
+ {
+ "style element with entity",
+ "<style>&apos;",
+ "<style>$&amp;apos;",
+ },
+ {
+ "textarea with tag",
+ "<textarea><div></textarea>",
+ "<textarea>$&lt;div&gt;$</textarea>",
+ },
+ {
+ "title with tag and entity",
+ "<title><b>K&amp;R C</b></title>",
+ "<title>$&lt;b&gt;K&amp;R C&lt;/b&gt;$</title>",
+ },
+ // DOCTYPE tests.
+ {
+ "Proper DOCTYPE",
+ "<!DOCTYPE html>",
+ "<!DOCTYPE html>",
+ },
+ {
+ "DOCTYPE with no space",
+ "<!doctypehtml>",
+ "<!DOCTYPE html>",
+ },
+ {
+ "DOCTYPE with two spaces",
+ "<!doctype html>",
+ "<!DOCTYPE html>",
+ },
+ {
+ "looks like DOCTYPE but isn't",
+ "<!DOCUMENT html>",
+ "<!--DOCUMENT html-->",
+ },
+ {
+ "DOCTYPE at EOF",
+ "<!DOCtype",
+ "<!DOCTYPE >",
+ },
+ // XML processing instructions.
+ {
+ "XML processing instruction",
+ "<?xml?>",
+ "<!--?xml?-->",
+ },
// Comments.
{
"comment0",
"abc<b><!-- skipme --></b>def",
- "abc$<b>$</b>$def",
+ "abc$<b>$<!-- skipme -->$</b>$def",
},
{
"comment1",
"a<!-->z",
- "a$z",
+ "a$<!---->$z",
},
{
"comment2",
"a<!--->z",
- "a$z",
+ "a$<!---->$z",
},
{
"comment3",
"a<!--x>-->z",
- "a$z",
+ "a$<!--x>-->$z",
},
{
"comment4",
"a<!--x->-->z",
- "a$z",
+ "a$<!--x->-->$z",
},
{
"comment5",
"a<!>z",
- "a$&lt;!&gt;z",
+ "a$<!---->$z",
},
{
"comment6",
"a<!->z",
- "a$&lt;!-&gt;z",
+ "a$<!----->$z",
},
{
"comment7",
"a<!---<>z",
- "a$&lt;!---&lt;&gt;z",
+ "a$<!---<>z-->",
},
{
"comment8",
"a<!--z",
- "a$&lt;!--z",
+ "a$<!--z-->",
+ },
+ {
+ "comment9",
+ "a<!--x--!>z",
+ "a$<!--x-->$z",
},
// An attribute with a backslash.
{
"backslash",
`<p id="a\"b">`,
- `<p id="a&quot;b">`,
+ `<p id="a\" b"="">`,
},
// Entities, tag name and attribute key lower-casing, and whitespace
// normalization within a tag.
@@ -133,11 +348,14 @@ var tokenTests = []tokenTest{
`<a b="c&noSuchEntity;d">&lt;&alsoDoesntExist;&`,
`<a b="c&amp;noSuchEntity;d">$&lt;&amp;alsoDoesntExist;&amp;`,
},
- {
- "entity without semicolon",
- `&notit;&notin;<a b="q=z&amp=5&notice=hello&not;=world">`,
- `¬it;∉$<a b="q=z&amp;amp=5&amp;notice=hello¬=world">`,
- },
+ /*
+ // TODO: re-enable this test when it works. This input/output matches html5lib's behavior.
+ {
+ "entity without semicolon",
+ `&notit;&notin;<a b="q=z&amp=5&notice=hello&not;=world">`,
+ `¬it;∉$<a b="q=z&amp;amp=5&amp;notice=hello¬=world">`,
+ },
+ */
{
"entity with digits",
"&frac12;",
@@ -190,21 +408,34 @@ var tokenTests = []tokenTest{
`<meta http-equiv="content-type">`,
`<meta http-equiv="content-type">`,
},
+ {
+ "Mixed attributes",
+ `a<P V="0 1" w='2' X=3 y>z`,
+ `a$<p v="0 1" w="2" x="3" y="">$z`,
+ },
+ {
+ "Attributes with a solitary single quote",
+ `<p id=can't><p id=won't>`,
+ `<p id="can&apos;t">$<p id="won&apos;t">`,
+ },
}
func TestTokenizer(t *testing.T) {
loop:
for _, tt := range tokenTests {
- z := NewTokenizer(bytes.NewBuffer([]byte(tt.html)))
- for i, s := range strings.Split(tt.golden, "$") {
- if z.Next() == ErrorToken {
- t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Error())
- continue loop
- }
- actual := z.Token().String()
- if s != actual {
- t.Errorf("%s token %d: want %q got %q", tt.desc, i, s, actual)
- continue loop
+ z := NewTokenizer(strings.NewReader(tt.html))
+ z.ReturnComments = true
+ if tt.golden != "" {
+ for i, s := range strings.Split(tt.golden, "$") {
+ if z.Next() == ErrorToken {
+ t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Error())
+ continue loop
+ }
+ actual := z.Token().String()
+ if s != actual {
+ t.Errorf("%s token %d: want %q got %q", tt.desc, i, s, actual)
+ continue loop
+ }
}
}
z.Next()