aboutsummaryrefslogtreecommitdiff
path: root/gcc/rust/parse/rust-parse-impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/rust/parse/rust-parse-impl.h')
-rw-r--r--gcc/rust/parse/rust-parse-impl.h41
1 files changed, 0 insertions, 41 deletions
diff --git a/gcc/rust/parse/rust-parse-impl.h b/gcc/rust/parse/rust-parse-impl.h
index 5642990..864fb86 100644
--- a/gcc/rust/parse/rust-parse-impl.h
+++ b/gcc/rust/parse/rust-parse-impl.h
@@ -14888,47 +14888,6 @@ Parser<ManagedTokenSource>::done_end ()
return (t->get_id () == RIGHT_CURLY || t->get_id () == END_OF_FILE);
}
-// Dumps lexer output to stderr.
-template <typename ManagedTokenSource>
-void
-Parser<ManagedTokenSource>::debug_dump_lex_output (std::ostream &out)
-{
- /* TODO: a better implementation of "lexer dump" (as in dump what was
- * actually tokenised) would actually be to "write" a token to a file every
- * time skip_token() here was called. This would reflect the parser
- * modifications to the token stream, such as fixing the template angle
- * brackets. */
-
- const_TokenPtr tok = lexer.peek_token ();
-
- while (true)
- {
- if (tok->get_id () == Rust::END_OF_FILE)
- break;
-
- bool has_text = tok->get_id () == Rust::IDENTIFIER
- || tok->get_id () == Rust::INT_LITERAL
- || tok->get_id () == Rust::FLOAT_LITERAL
- || tok->get_id () == Rust::STRING_LITERAL
- || tok->get_id () == Rust::CHAR_LITERAL
- || tok->get_id () == Rust::BYTE_STRING_LITERAL
- || tok->get_id () == Rust::BYTE_CHAR_LITERAL;
-
- Location loc = tok->get_locus ();
-
- out << "<id=";
- out << tok->token_id_to_str ();
- out << has_text ? (std::string (", text=") + tok->get_str ()
- + std::string (", typehint=")
- + std::string (tok->get_type_hint_str ()))
- : "";
- out << lexer.get_line_map ()->to_string (loc);
-
- lexer.skip_token ();
- tok = lexer.peek_token ();
- }
-}
-
// Parses crate and dumps AST to stderr, recursively.
template <typename ManagedTokenSource>
void