go/parser: use (*Scanner).End instead of scannerhooks

Also drop the scannerhooks code, as it is now unused.

Updates #74958
Updates #76031

Change-Id: I5a0cb8a7fe954e40ce40b8406aed0d3f6a6a6964
Reviewed-on: https://go-review.googlesource.com/c/go/+/738701
Reviewed-by: Alan Donovan <adonovan@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
This commit is contained in:
Mateusz Poliwczak 2026-01-23 10:28:32 +01:00
parent 15882523a1
commit 74c909b2c5
4 changed files with 14 additions and 44 deletions

View file

@ -342,7 +342,6 @@ var depsRules = `
< internal/gover
< go/version
< go/token
< go/internal/scannerhooks
< go/scanner
< go/ast
< go/internal/typeparams;

View file

@ -1,11 +0,0 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scannerhooks defines nonexported channels between parser and scanner.
// Ideally this package could be eliminated by adding API to scanner.
package scannerhooks
import "go/token"
var StringEnd func(scanner any) token.Pos

View file

@ -28,7 +28,6 @@ import (
"fmt"
"go/ast"
"go/build/constraint"
"go/internal/scannerhooks"
"go/scanner"
"go/token"
"strings"
@ -53,10 +52,9 @@ type parser struct {
goVersion string // minimum Go version found in //go:build comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
stringEnd token.Pos // position immediately after token; STRING only
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to parser.advance
@ -87,6 +85,11 @@ func (p *parser) init(file *token.File, src []byte, mode Mode) {
p.next()
}
// end returns the end position of the current token
func (p *parser) end() token.Pos {
return p.scanner.End()
}
// ----------------------------------------------------------------------------
// Parsing support
@ -165,10 +168,6 @@ func (p *parser) next0() {
continue
}
} else {
if p.tok == token.STRING {
p.stringEnd = scannerhooks.StringEnd(&p.scanner)
}
// Found a non-comment; top of file is over.
p.top = false
}
@ -726,7 +725,7 @@ func (p *parser) parseFieldDecl() *ast.Field {
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, ValueEnd: p.stringEnd, Kind: p.tok, Value: p.lit}
tag = &ast.BasicLit{ValuePos: p.pos, ValueEnd: p.end(), Kind: p.tok, Value: p.lit}
p.next()
}
@ -1480,11 +1479,7 @@ func (p *parser) parseOperand() ast.Expr {
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
end := p.pos + token.Pos(len(p.lit))
if p.tok == token.STRING {
end = p.stringEnd
}
x := &ast.BasicLit{ValuePos: p.pos, ValueEnd: end, Kind: p.tok, Value: p.lit}
x := &ast.BasicLit{ValuePos: p.pos, ValueEnd: p.end(), Kind: p.tok, Value: p.lit}
p.next()
return x
@ -2525,7 +2520,7 @@ func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) as
var path string
if p.tok == token.STRING {
path = p.lit
end = p.stringEnd
end = p.end()
p.next()
} else if p.tok.IsLiteral() {
p.error(pos, "import path must be a string")

View file

@ -10,7 +10,6 @@ package scanner
import (
"bytes"
"fmt"
"go/internal/scannerhooks"
"go/token"
"path/filepath"
"strconv"
@ -42,7 +41,6 @@ type Scanner struct {
lineOffset int // current line offset
insertSemi bool // insert a semicolon before next newline
nlPos token.Pos // position of newline in preceding comment
stringEnd token.Pos // end position; defined only for STRING tokens
endPosValid bool
endPos token.Pos // overrides the offset as the default end position
@ -51,13 +49,6 @@ type Scanner struct {
ErrorCount int // number of errors encountered
}
// Provide go/parser with backdoor access to the StringEnd information.
func init() {
scannerhooks.StringEnd = func(scanner any) token.Pos {
return scanner.(*Scanner).stringEnd
}
}
const (
bom = 0xFEFF // byte order mark, only permitted as very first character
eof = -1 // end of file
@ -705,7 +696,7 @@ func stripCR(b []byte, comment bool) []byte {
return c[:i]
}
func (s *Scanner) scanRawString() (string, int) {
func (s *Scanner) scanRawString() string {
// '`' opening already consumed
offs := s.offset - 1
@ -726,12 +717,11 @@ func (s *Scanner) scanRawString() (string, int) {
}
lit := s.src[offs:s.offset]
rawLen := len(lit)
if hasCR {
lit = stripCR(lit, false)
}
return string(lit), rawLen
return string(lit)
}
func (s *Scanner) skipWhitespace() {
@ -883,7 +873,6 @@ scanAgain:
insertSemi = true
tok = token.STRING
lit = s.scanString()
s.stringEnd = pos + token.Pos(len(lit))
case '\'':
insertSemi = true
tok = token.CHAR
@ -891,9 +880,7 @@ scanAgain:
case '`':
insertSemi = true
tok = token.STRING
var rawLen int
lit, rawLen = s.scanRawString()
s.stringEnd = pos + token.Pos(rawLen)
lit = s.scanRawString()
case ':':
tok = s.switch2(token.COLON, token.DEFINE)
case '.':