[dev.link] all: merge branch 'master' into dev.link

Clean merge.

Change-Id: I94d5e621b98cd5b3e1f2007db83d52293edbd9ec
This commit is contained in:
Cherry Zhang 2019-10-18 14:44:05 -04:00
commit c3459eaab0
181 changed files with 2883 additions and 1832 deletions

View file

@ -39,6 +39,21 @@ See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.13.1">Go
1.13.1 milestone</a> on our issue tracker for details.
</p>
<p>
go1.13.2 (released 2019/10/17) includes security fixes to the
<code>crypto/dsa</code> package and the compiler.
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.13.2">Go
1.13.2 milestone</a> on our issue tracker for details.
</p>
<p>
go1.13.3 (released 2019/10/17) includes fixes to the go command,
the toolchain, the runtime, <code>syscall</code>, <code>net</code>,
<code>net/http</code>, and <code>crypto/ecdsa</code> packages.
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.13.3">Go
1.13.3 milestone</a> on our issue tracker for details.
</p>
<h2 id="go1.12">go1.12 (released 2019/02/25)</h2>
<p>
@ -121,6 +136,20 @@ See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.12.10">Go
1.12.10 milestone</a> on our issue tracker for details.
</p>
<p>
go1.12.11 (released 2019/10/17) includes security fixes to the
<code>crypto/dsa</code> package.
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.12.11">Go
1.12.11 milestone</a> on our issue tracker for details.
</p>
<p>
go1.12.12 (released 2019/10/17) includes fixes to the go command,
runtime, <code>syscall</code> and <code>net</code> packages.
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.12.12">Go
1.12.12 milestone</a> on our issue tracker for details.
</p>
<h2 id="go1.11">go1.11 (released 2018/08/24)</h2>
<p>

View file

@ -33,7 +33,29 @@ TODO
<h2 id="ports">Ports</h2>
<p>
TODO
TODO: is Dragonfly passing? On both Dragonfly release & tip? (ABI
change happened) Does the net package's interface APIs work on both?
https://golang.org/issue/34368.
</p>
<p>
TODO: is Illumos up with a builder and passing?
https://golang.org/issue/15581.
</p>
<p>
TODO: announce something about the Go Solaris port? Solaris itself
is unmaintained? The builder is still running at Oracle, but the
employee who set it up left the company and we have no way to
maintain it.
</p>
<h3 id="darwin">Darwin</h3>
<p><!-- golang.org/issue/34749 -->
Go 1.14 is the last Go release to support 32-bit binaries on
macOS (the <code>darwin/386</code> port). They are no longer
supported by macOS, starting with macOS 10.15 (Catalina).
</p>
<h3 id="nacl">Native Client (NaCl)</h3>

View file

@ -9,7 +9,6 @@
package arch
import (
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
@ -59,26 +58,6 @@ func jumpS390x(word string) bool {
return false
}
// IsS390xCMP reports whether the op (as defined by an s390x.A* constant) is
// one of the CMP instructions that require special handling.
func IsS390xCMP(op obj.As) bool {
switch op {
case s390x.ACMP, s390x.ACMPU, s390x.ACMPW, s390x.ACMPWU:
return true
}
return false
}
// IsS390xNEG reports whether the op (as defined by an s390x.A* constant) is
// one of the NEG-like instructions that require special handling.
func IsS390xNEG(op obj.As) bool {
switch op {
case s390x.ANEG, s390x.ANEGW:
return true
}
return false
}
func s390xRegisterNumber(name string, n int16) (int16, bool) {
switch name {
case "AR":

View file

@ -1049,5 +1049,11 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
VADDPD Z2, Z9, Z21 // 62e1b54858ea
VADDPD Z21, Z2, Z9 // 6231ed4858cd
VADDPD Z9, Z21, Z2 // 62d1d54058d1
CLWB (BX) // 660fae33
CLDEMOTE (BX) // 0f1c03
TPAUSE BX // 660faef3
UMONITOR BX // f30faef3
UMWAIT BX // f20faef3
// End of tests.
RET

View file

@ -132,4 +132,12 @@ TEXT errors(SB),$0
VADDPD.BCST X3, X2, K1, X1 // ERROR "illegal broadcast without memory argument"
VADDPD.BCST X3, X2, K1, X1 // ERROR "illegal broadcast without memory argument"
VADDPD.BCST X3, X2, K1, X1 // ERROR "illegal broadcast without memory argument"
// CLWB instuctions:
CLWB BX // ERROR "invalid instruction"
// CLDEMOTE instructions:
CLDEMOTE BX // ERROR "invalid instruction"
// WAITPKG instructions:
TPAUSE (BX) // ERROR "invalid instruction"
UMONITOR (BX) // ERROR "invalid instruction"
UMWAIT (BX) // ERROR "invalid instruction"
RET

View file

@ -1038,6 +1038,8 @@ again:
FSTPS (F3, F4), 1024(RSP) // fb0310916313002d
FSTPS (F3, F4), x(SB)
FSTPS (F3, F4), x+8(SB)
NOOP // 1f2003d5
HINT $0 // 1f2003d5
// System Register
MSR $1, SPSel // bf4100d5

View file

@ -152,6 +152,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
EXTR $35, R22, R12, R8 // 888dd693
SEVL // bf2003d5
HINT $6 // df2003d5
HINT $0 // 1f2003d5
HLT $65509 // a0fc5fd4
HVC $61428 // 82fe1dd4
ISB $1 // df3103d5
@ -281,7 +282,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
NGC R2, R7 // e70302da
NGCSW R10, R5 // e5030a7a
NGCS R24, R16 // f00318fa
//TODO NOP // 1f2003d5
NOOP // 1f2003d5
ORNW R4@>11, R16, R3 // 032ee42a
ORN R22@>19, R3, R3 // 634cf6aa
ORRW $4294443071, R15, R24 // f8490d32

View file

@ -237,3 +237,33 @@ start:
// Arbitrary bytes (entered in little-endian mode)
WORD $0x12345678 // WORD $305419896 // 78563412
WORD $0x9abcdef0 // WORD $2596069104 // f0debc9a
// MOV pseudo-instructions
MOV X5, X6 // 13830200
MOV $2047, X5 // 9b02f07f
MOV $-2048, X5 // 9b020080
MOV (X5), X6 // 03b30200
MOV 4(X5), X6 // 03b34200
MOVB (X5), X6 // 03830200
MOVB 4(X5), X6 // 03834200
MOVH (X5), X6 // 03930200
MOVH 4(X5), X6 // 03934200
MOVW (X5), X6 // 03a30200
MOVW 4(X5), X6 // 03a34200
MOV X5, (X6) // 23305300
MOV X5, 4(X6) // 23325300
MOVB X5, (X6) // 23005300
MOVB X5, 4(X6) // 23025300
MOVH X5, (X6) // 23105300
MOVH X5, 4(X6) // 23125300
MOVW X5, (X6) // 23205300
MOVW X5, 4(X6) // 23225300
MOVF 4(X5), F0 // 07a04200
MOVF F0, 4(X5) // 27a20200
MOVF F0, F1 // d3000020
MOVD 4(X5), F0 // 07b04200
MOVD F0, 4(X5) // 27b20200
MOVD F0, F1 // d3000022

View file

@ -171,6 +171,7 @@ var knownFormats = map[string]string{
"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
"map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
"rune %#U": "",

View file

@ -947,13 +947,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
case ssa.OpAMD64LoweredPanicExtendA, ssa.OpAMD64LoweredPanicExtendB, ssa.OpAMD64LoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
s.UseArgs(int64(3 * gc.Widthptr)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:

View file

@ -181,13 +181,15 @@ var runtimeDecls = [...]struct {
{"racewriterange", funcTag, 119},
{"msanread", funcTag, 119},
{"msanwrite", funcTag, 119},
{"checkptrAlignment", funcTag, 120},
{"checkptrArithmetic", funcTag, 122},
{"x86HasPOPCNT", varTag, 15},
{"x86HasSSE41", varTag, 15},
{"arm64HasATOMICS", varTag, 15},
}
func runtimeTypes() []*types.Type {
var typs [120]*types.Type
var typs [123]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
@ -308,5 +310,8 @@ func runtimeTypes() []*types.Type {
typs[117] = functype(nil, []*Node{anonfield(typs[23]), anonfield(typs[23])}, []*Node{anonfield(typs[23])})
typs[118] = functype(nil, []*Node{anonfield(typs[50])}, nil)
typs[119] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil)
typs[120] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[1])}, nil)
typs[121] = types.NewSlice(typs[56])
typs[122] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[121])}, nil)
return typs[:]
}

View file

@ -235,6 +235,9 @@ func racewriterange(addr, size uintptr)
func msanread(addr, size uintptr)
func msanwrite(addr, size uintptr)
func checkptrAlignment(unsafe.Pointer, *byte)
func checkptrArithmetic(unsafe.Pointer, []unsafe.Pointer)
// architecture variants
var x86HasPOPCNT bool
var x86HasSSE41 bool

View file

@ -15,12 +15,11 @@ func _() {
_ = x[PPARAM-4]
_ = x[PPARAMOUT-5]
_ = x[PFUNC-6]
_ = x[PDISCARD-7]
}
const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPFUNCPDISCARD"
const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPFUNC"
var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 45, 53}
var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 45}
func (i Class) String() string {
if i >= Class(len(_Class_index)-1) {

View file

@ -101,7 +101,7 @@ func typecheckclosure(clo *Node, top int) {
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name.Decldepth == decldepth {
n.SetAssigned(false)
n.Name.SetAssigned(false)
}
}
}
@ -192,10 +192,10 @@ func capturevars(xfunc *Node) {
outermost := v.Name.Defn
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type.Width <= 128 {
if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
v.Name.SetByval(true)
} else {
outermost.SetAddrtaken(true)
outermost.Name.SetAddrtaken(true)
outer = nod(OADDR, outer, nil)
}
@ -208,7 +208,7 @@ func capturevars(xfunc *Node) {
if v.Name.Byval() {
how = "value"
}
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Addrtaken(), outermost.Assigned(), int32(v.Type.Width))
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
}
outer = typecheck(outer, ctxExpr)
@ -345,7 +345,7 @@ func closuredebugruntimecheck(clo *Node) {
}
}
if compiling_runtime && clo.Esc == EscHeap {
yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime.")
yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
}
}

View file

@ -552,7 +552,7 @@ func tostr(v Val) Val {
func consttype(n *Node) Ctype {
if n == nil || n.Op != OLITERAL {
return 0
return CTxxx
}
return n.Val().Ctype()
}

View file

@ -60,10 +60,6 @@ var declare_typegen int
// declare records that Node n declares symbol n.Sym in the specified
// declaration context.
func declare(n *Node, ctxt Class) {
if ctxt == PDISCARD {
return
}
if n.isBlank() {
return
}
@ -207,7 +203,6 @@ func newnoname(s *types.Sym) *Node {
}
n := nod(ONONAME, nil, nil)
n.Sym = s
n.SetAddable(true)
n.Xoffset = 0
return n
}
@ -283,10 +278,9 @@ func oldname(s *types.Sym) *Node {
// Do not have a closure var for the active closure yet; make one.
c = newname(s)
c.SetClass(PAUTOHEAP)
c.SetIsClosureVar(true)
c.Name.SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
c.Name.Defn = n
c.SetAddable(false)
// Link into list of active closure variables.
// Popped from list in func closurebody.

View file

@ -201,7 +201,7 @@ func addrescapes(n *Node) {
}
// If a closure reference escapes, mark the outer variable as escaping.
if n.IsClosureVar() {
if n.Name.IsClosureVar() {
addrescapes(n.Name.Defn)
break
}
@ -251,7 +251,7 @@ func moveToHeap(n *Node) {
Dump("MOVE", n)
}
if compiling_runtime {
yyerror("%v escapes to heap, not allowed in runtime.", n)
yyerror("%v escapes to heap, not allowed in runtime", n)
}
if n.Class() == PAUTOHEAP {
Dump("n", n)
@ -283,7 +283,6 @@ func moveToHeap(n *Node) {
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
stackcopy := newname(n.Sym)
stackcopy.SetAddable(false)
stackcopy.Type = n.Type
stackcopy.Xoffset = n.Xoffset
stackcopy.SetClass(n.Class())
@ -294,7 +293,7 @@ func moveToHeap(n *Node) {
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
heapaddr.SetIsOutputParamHeapAddr(true)
heapaddr.Name.SetIsOutputParamHeapAddr(true)
}
n.Name.Param.Stackcopy = stackcopy

View file

@ -471,7 +471,15 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
e.discard(max)
case OCONV, OCONVNOP:
if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR {
if checkPtr(e.curfn) && n.Type.Etype == TUNSAFEPTR && n.Left.Type.IsPtr() {
// When -d=checkptr is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
} else if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR {
e.unsafeValue(k, n.Left)
} else {
e.expr(k, n.Left)
@ -995,9 +1003,9 @@ func (e *Escape) later(k EscHole) EscHole {
// canonicalNode returns the canonical *Node that n logically
// represents.
func canonicalNode(n *Node) *Node {
if n != nil && n.IsClosureVar() {
if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
n = n.Name.Defn
if n.IsClosureVar() {
if n.Name.IsClosureVar() {
Fatalf("still closure var")
}
}

View file

@ -417,10 +417,6 @@ func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
func (n *Node) jconv(s fmt.State, flag FmtFlag) {
c := flag & FmtShort
if c == 0 && n.Addable() {
fmt.Fprintf(s, " a(%v)", n.Addable())
}
if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
}
@ -487,13 +483,14 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
fmt.Fprintf(s, " embedded")
}
if n.Addrtaken() {
if n.Op == ONAME {
if n.Name.Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
if n.Assigned() {
if n.Name.Assigned() {
fmt.Fprint(s, " assigned")
}
}
if n.Bounded() {
fmt.Fprint(s, " bounded")
}

View file

@ -60,17 +60,10 @@ const (
PPARAMOUT // output results
PFUNC // global function
PDISCARD // discard during parse of duplicate import
// Careful: Class is stored in three bits in Node.flags.
// Adding a new Class will overflow that.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
)
func init() {
if PDISCARD != 7 {
panic("PDISCARD changed; does all Class values still fit in three bits?")
}
}
// note this is the runtime representation
// of the compilers arrays.
//

View file

@ -73,6 +73,14 @@ func fninit(n []*Node) {
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
fn := asNode(s.Def).Name.Defn
// Skip init functions with empty bodies.
// noder.go doesn't allow external init functions, and
// order.go has already removed any OEMPTY nodes, so
// checking Len() == 0 is sufficient here.
if fn.Nbody.Len() == 0 {
continue
}
fns = append(fns, s.Linksym())
}

View file

@ -135,6 +135,12 @@ func caninl(fn *Node) {
return
}
// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
if Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 {
reason = "marked go:nocheckptr"
return
}
// If marked "go:cgo_unsafe_args", don't inline, since the
// function makes assumptions about its argument frame layout.
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
@ -655,7 +661,7 @@ func inlnode(n *Node, maxCost int32) *Node {
// NB: this check is necessary to prevent indirect re-assignment of the variable
// having the address taken after the invocation or only used for reads is actually fine
// but we have no easy way to distinguish the safe cases
if d.Left.Addrtaken() {
if d.Left.Name.Addrtaken() {
if Debug['m'] > 1 {
fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left)
}
@ -919,9 +925,9 @@ func mkinlcall(n, fn *Node, maxCost int32) *Node {
if genDwarfInline > 0 {
inlf := inlvars[ln]
if ln.Class() == PPARAM {
inlf.SetInlFormal(true)
inlf.Name.SetInlFormal(true)
} else {
inlf.SetInlLocal(true)
inlf.Name.SetInlLocal(true)
}
inlf.Pos = ln.Pos
inlfvars = append(inlfvars, inlf)
@ -947,7 +953,7 @@ func mkinlcall(n, fn *Node, maxCost int32) *Node {
// was manufactured by the inliner (e.g. "~R2"); such vars
// were not part of the original callee.
if !strings.HasPrefix(m.Sym.Name, "~R") {
m.SetInlFormal(true)
m.Name.SetInlFormal(true)
m.Pos = mpos
inlfvars = append(inlfvars, m)
}
@ -1125,7 +1131,7 @@ func inlvar(var_ *Node) *Node {
n.SetClass(PAUTO)
n.Name.SetUsed(true)
n.Name.Curfn = Curfn // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
n.Name.SetAddrtaken(var_.Name.Addrtaken())
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
return n

View file

@ -35,6 +35,7 @@ const (
Norace // func must not have race detector annotations
Nosplit // func should not execute on separate stack
Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrEscapes // pointers converted to uintptr escape
@ -63,6 +64,8 @@ func pragmaValue(verb string) syntax.Pragma {
return Nosplit
case "go:noinline":
return Noinline
case "go:nocheckptr":
return NoCheckPtr
case "go:systemstack":
return Systemstack
case "go:nowritebarrier":
@ -72,7 +75,7 @@ func pragmaValue(verb string) syntax.Pragma {
case "go:yeswritebarrierrec":
return Yeswritebarrierrec
case "go:cgo_unsafe_args":
return CgoUnsafeArgs
return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
case "go:uintptrescapes":
// For the next function declared in the file
// any uintptr arguments may be pointer values

View file

@ -40,6 +40,7 @@ var (
var (
Debug_append int
Debug_checkptr int
Debug_closure int
Debug_compilelater int
debug_dclstack int
@ -65,6 +66,7 @@ var debugtab = []struct {
val interface{} // must be *int or *string
}{
{"append", "print information about append compilation", &Debug_append},
{"checkptr", "instrument unsafe pointer conversions", &Debug_checkptr},
{"closure", "print information about closure compilation", &Debug_closure},
{"compilelater", "compile functions as late as possible", &Debug_compilelater},
{"disablenil", "disable nil checks", &disable_checknil},
@ -435,6 +437,11 @@ func Main(archInit func(*Arch)) {
}
}
// Runtime can't use -d=checkptr, at least not yet.
if compiling_runtime {
Debug_checkptr = 0
}
// set via a -d flag
Ctxt.Debugpcln = Debug_pctab
if flagDWARF {

View file

@ -25,7 +25,7 @@ import (
// and its root represented by *Node is appended to xtop.
// Returns the total count of parsed lines.
func parseFiles(filenames []string) uint {
var noders []*noder
noders := make([]*noder, 0, len(filenames))
// Limit the number of simultaneously open files.
sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
@ -398,7 +398,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
typ, values = cs.typ, cs.values
}
var nn []*Node
nn := make([]*Node, 0, len(names))
for i, n := range names {
if i >= len(values) {
yyerror("missing value in const declaration")
@ -453,7 +453,7 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
}
func (p *noder) declNames(names []*syntax.Name) []*Node {
var nodes []*Node
nodes := make([]*Node, 0, len(names))
for _, name := range names {
nodes = append(nodes, p.declName(name))
}
@ -540,7 +540,7 @@ func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node {
}
func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
var nodes []*Node
nodes := make([]*Node, 0, len(params))
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
@ -590,7 +590,7 @@ func (p *noder) exprList(expr syntax.Expr) []*Node {
}
func (p *noder) exprs(exprs []syntax.Expr) []*Node {
var nodes []*Node
nodes := make([]*Node, 0, len(exprs))
for _, expr := range exprs {
nodes = append(nodes, p.expr(expr))
}
@ -809,7 +809,7 @@ func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
}
func (p *noder) structType(expr *syntax.StructType) *Node {
var l []*Node
l := make([]*Node, 0, len(expr.FieldList))
for i, field := range expr.FieldList {
p.setlineno(field)
var n *Node
@ -831,7 +831,7 @@ func (p *noder) structType(expr *syntax.StructType) *Node {
}
func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
var l []*Node
l := make([]*Node, 0, len(expr.MethodList))
for _, method := range expr.MethodList {
p.setlineno(method)
var n *Node
@ -1170,7 +1170,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
}
func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace syntax.Pos) []*Node {
var nodes []*Node
nodes := make([]*Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
@ -1226,7 +1226,7 @@ func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node {
}
func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*Node {
var nodes []*Node
nodes := make([]*Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {

View file

@ -202,7 +202,7 @@ func isaddrokay(n *Node) bool {
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
func (o *Order) addrTemp(n *Node) *Node {
if consttype(n) > 0 {
if consttype(n) != CTxxx {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
dowidth(n.Type)
@ -298,7 +298,7 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
n := o.temp[i]
if n.Name.Keepalive() {
n.Name.SetKeepalive(false)
n.SetAddrtaken(true) // ensure SSA keeps the n variable
n.Name.SetAddrtaken(true) // ensure SSA keeps the n variable
live := nod(OVARLIVE, n, nil)
live = typecheck(live, ctxStmt)
out = append(out, live)
@ -513,26 +513,6 @@ func (o *Order) stmt(n *Node) {
o.mapAssign(n)
o.cleanTemp(t)
case OAS2,
OCLOSE,
OCOPY,
OPRINT,
OPRINTN,
ORECOVER,
ORECV:
t := o.markTemp()
n.Left = o.expr(n.Left, nil)
n.Right = o.expr(n.Right, nil)
o.exprList(n.List)
o.exprList(n.Rlist)
switch n.Op {
case OAS2:
o.mapAssign(n)
default:
o.out = append(o.out, n)
}
o.cleanTemp(t)
case OASOP:
t := o.markTemp()
n.Left = o.expr(n.Left, nil)
@ -562,23 +542,14 @@ func (o *Order) stmt(n *Node) {
o.mapAssign(n)
o.cleanTemp(t)
// Special: make sure key is addressable if needed,
// and make sure OINDEXMAP is not copied out.
case OAS2MAPR:
case OAS2:
t := o.markTemp()
o.exprList(n.List)
r := n.Right
r.Left = o.expr(r.Left, nil)
r.Right = o.expr(r.Right, nil)
// See similar conversion for OINDEXMAP below.
_ = mapKeyReplaceStrConv(r.Right)
r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
o.okAs2(n)
o.exprList(n.Rlist)
o.mapAssign(n)
o.cleanTemp(t)
// Special: avoid copy of func call n.Rlist.First().
// Special: avoid copy of func call n.Right
case OAS2FUNC:
t := o.markTemp()
o.exprList(n.List)
@ -588,32 +559,29 @@ func (o *Order) stmt(n *Node) {
o.cleanTemp(t)
// Special: use temporary variables to hold result,
// so that assertI2Tetc can take address of temporary.
// so that runtime can take address of temporary.
// No temporary for blank assignment.
case OAS2DOTTYPE:
//
// OAS2MAPR: make sure key is addressable if needed,
// and make sure OINDEXMAP is not copied out.
case OAS2DOTTYPE, OAS2RECV, OAS2MAPR:
t := o.markTemp()
o.exprList(n.List)
n.Right.Left = o.expr(n.Right.Left, nil) // i in i.(T)
o.okAs2(n)
o.cleanTemp(t)
// Special: use temporary variables to hold result,
// so that chanrecv can take address of temporary.
case OAS2RECV:
t := o.markTemp()
o.exprList(n.List)
n.Right.Left = o.expr(n.Right.Left, nil) // arg to recv
ch := n.Right.Left.Type
tmp1 := o.newTemp(ch.Elem(), types.Haspointers(ch.Elem()))
tmp2 := o.newTemp(types.Types[TBOOL], false)
o.out = append(o.out, n)
r := nod(OAS, n.List.First(), tmp1)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
r = okas(n.List.Second(), tmp2)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
n.List.Set2(tmp1, tmp2)
switch r := n.Right; r.Op {
case ODOTTYPE2, ORECV:
r.Left = o.expr(r.Left, nil)
case OINDEXMAP:
r.Left = o.expr(r.Left, nil)
r.Right = o.expr(r.Right, nil)
// See similar conversion for OINDEXMAP below.
_ = mapKeyReplaceStrConv(r.Right)
r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
default:
Fatalf("order.stmt: %v", r.Op)
}
o.okAs2(n)
o.cleanTemp(t)
// Special: does not save n onto out.
@ -639,6 +607,20 @@ func (o *Order) stmt(n *Node) {
o.out = append(o.out, n)
o.cleanTemp(t)
case OCLOSE,
OCOPY,
OPRINT,
OPRINTN,
ORECOVER,
ORECV:
t := o.markTemp()
n.Left = o.expr(n.Left, nil)
n.Right = o.expr(n.Right, nil)
o.exprList(n.List)
o.exprList(n.Rlist)
o.out = append(o.out, n)
o.cleanTemp(t)
// Special: order arguments to inner call but not call itself.
case ODEFER, OGO:
t := o.markTemp()
@ -1310,7 +1292,7 @@ func okas(ok, val *Node) *Node {
}
// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
// The caller should order the right-hand side of the assignment before calling orderas2.
// The caller should order the right-hand side of the assignment before calling order.as2.
// It rewrites,
// a, b, a = ...
// as
@ -1338,7 +1320,7 @@ func (o *Order) as2(n *Node) {
o.stmt(as)
}
// okAs2 orders OAS2 with ok.
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
func (o *Order) okAs2(n *Node) {
var tmp1, tmp2 *Node

View file

@ -262,7 +262,7 @@ func compile(fn *Node) {
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT, PAUTO:
if livenessShouldTrack(n) && n.Addrtaken() {
if livenessShouldTrack(n) && n.Name.Addrtaken() {
dtypesym(n.Type)
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
@ -498,9 +498,9 @@ func createSimpleVar(n *Node) *dwarf.Var {
typename := dwarf.InfoPrefix + typesymname(n.Type)
inlIndex := 0
if genDwarfInline > 1 {
if n.InlFormal() || n.InlLocal() {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.InlFormal() {
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
@ -509,7 +509,7 @@ func createSimpleVar(n *Node) *dwarf.Var {
return &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.InlFormal(),
IsInlFormal: n.Name.InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: Ctxt.Lookup(typename),
@ -619,9 +619,9 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
}
inlIndex := 0
if genDwarfInline > 1 {
if n.InlFormal() || n.InlLocal() {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.InlFormal() {
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
@ -707,9 +707,9 @@ func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {
if n.InlFormal() || n.InlLocal() {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.InlFormal() {
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
@ -718,7 +718,7 @@ func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
dvar := &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.InlFormal(),
IsInlFormal: n.Name.InlFormal(),
Abbrev: abbrev,
Type: Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed

View file

@ -908,7 +908,7 @@ func (lv *Liveness) epilogue() {
if lv.fn.Func.HasDefer() {
for i, n := range lv.vars {
if n.Class() == PPARAMOUT {
if n.IsOutputParamHeapAddr() {
if n.Name.IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
Fatalf("variable %v both output param and heap output param", n)
}
@ -920,7 +920,7 @@ func (lv *Liveness) epilogue() {
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
if n.IsOutputParamHeapAddr() {
if n.Name.IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.

View file

@ -1024,7 +1024,6 @@ func typename(t *types.Type) *Node {
n := nod(OADDR, asNode(s.Def), nil)
n.Type = types.NewPtr(asNode(s.Def).Type)
n.SetAddable(true)
n.SetTypecheck(1)
return n
}
@ -1045,7 +1044,6 @@ func itabname(t, itype *types.Type) *Node {
n := nod(OADDR, asNode(s.Def), nil)
n.Type = types.NewPtr(asNode(s.Def).Type)
n.SetAddable(true)
n.SetTypecheck(1)
return n
}
@ -1886,7 +1884,6 @@ func zeroaddr(size int64) *Node {
}
z := nod(OADDR, asNode(s.Def), nil)
z.Type = types.NewPtr(types.Types[TUINT8])
z.SetAddable(true)
z.SetTypecheck(1)
return z
}

View file

@ -388,7 +388,7 @@ func isLiteral(n *Node) bool {
}
func (n *Node) isSimpleName() bool {
return n.Op == ONAME && n.Addable() && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
}
func litas(l *Node, r *Node, init *Nodes) {
@ -1018,7 +1018,7 @@ func stataddr(nam *Node, n *Node) bool {
switch n.Op {
case ONAME:
*nam = *n
return n.Addable()
return true
case ODOT:
if !stataddr(nam, n.Left) {

View file

@ -1264,7 +1264,7 @@ func (s *state) stmt(n *Node) {
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken() {
if !n.Left.Name.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
}
switch n.Left.Class() {
@ -3600,8 +3600,8 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
@ -3779,7 +3779,8 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
break
}
closure = s.expr(fn)
if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
if k != callDefer && k != callDeferStack && (thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo) {
// Deferred nil function needs to panic when the function is invoked, not the point of defer statement.
// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
// TODO(neelance): On other architectures this should be eliminated by the optimization steps
s.nilCheck(closure)
@ -4090,7 +4091,7 @@ func (s *state) canSSA(n *Node) bool {
if n.Op != ONAME {
return false
}
if n.Addrtaken() {
if n.Name.Addrtaken() {
return false
}
if n.isParamHeapCopy() {
@ -5257,7 +5258,7 @@ func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
var vars []*Node
for _, n := range e.curfn.Func.Dcl {
if livenessShouldTrack(n) && n.Addrtaken() {
if livenessShouldTrack(n) && n.Name.Addrtaken() {
vars = append(vars, n)
}
}
@ -6015,7 +6016,7 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this string up into two separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
@ -6029,7 +6030,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
n := name.N.(*Node)
u := types.Types[TUINTPTR]
t := types.NewPtr(types.Types[TUINT8])
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
@ -6047,7 +6048,7 @@ func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ss
n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.Elem())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this slice up into three separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
@ -6069,7 +6070,7 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
} else {
t = types.Types[TFLOAT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this complex up into two separate variables.
r := e.splitSlot(&name, ".real", 0, t)
i := e.splitSlot(&name, ".imag", t.Size(), t)
@ -6087,7 +6088,7 @@ func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
} else {
t = types.Types[TUINT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this int64 up into two separate variables.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
@ -6109,7 +6110,7 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
for f := 0; f < i; f++ {
offset += st.FieldType(f).Size()
}
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
@ -6125,7 +6126,7 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
e.Fatalf(n.Pos, "bad array size")
}
et := at.Elem()
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
return e.splitSlot(&name, "[0]", 0, et)
}
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
@ -6151,7 +6152,6 @@ func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t
n.Sym = s
n.Type = t
n.SetClass(PAUTO)
n.SetAddable(true)
n.Esc = EscNever
n.Name.Curfn = e.curfn
e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)

View file

@ -370,7 +370,6 @@ func newnamel(pos src.XPos, s *types.Sym) *Node {
n.Orig = n
n.Sym = s
n.SetAddable(true)
return n
}

View file

@ -141,11 +141,7 @@ const (
nodeInitorder, _ // tracks state during init1; two bits
_, _ // second nodeInitorder bit
_, nodeHasBreak
_, nodeIsClosureVar
_, nodeIsOutputParamHeapAddr
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
_, nodeAssigned // is the variable ever assigned to
_, nodeAddrtaken // address taken, even if not moved to heap
_, nodeImplicit
_, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this
@ -153,14 +149,11 @@ const (
_, nodeNonNil // guaranteed to be non-nil
_, nodeTransient // storage can be reused immediately after this statement
_, nodeBounded // bounds check unnecessary
_, nodeAddable // addressable
_, nodeHasCall // expression contains a function call
_, nodeLikely // if statement condition likely
_, nodeHasVal // node.E contains a Val
_, nodeHasOpt // node.E contains an Opt
_, nodeEmbedded // ODCLFIELD embedded type
_, nodeInlFormal // OPAUTO created by inliner, derived from callee formal
_, nodeInlLocal // OPAUTO created by inliner, derived from callee local
)
func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
@ -169,11 +162,7 @@ func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
func (n *Node) IsClosureVar() bool { return n.flags&nodeIsClosureVar != 0 }
func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 }
func (n *Node) IsOutputParamHeapAddr() bool { return n.flags&nodeIsOutputParamHeapAddr != 0 }
func (n *Node) Assigned() bool { return n.flags&nodeAssigned != 0 }
func (n *Node) Addrtaken() bool { return n.flags&nodeAddrtaken != 0 }
func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 }
func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
@ -181,14 +170,11 @@ func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
func (n *Node) Addable() bool { return n.flags&nodeAddable != 0 }
func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 }
func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 }
func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
func (n *Node) InlFormal() bool { return n.flags&nodeInlFormal != 0 }
func (n *Node) InlLocal() bool { return n.flags&nodeInlLocal != 0 }
func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
@ -196,11 +182,7 @@ func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
func (n *Node) SetIsClosureVar(b bool) { n.flags.set(nodeIsClosureVar, b) }
func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
func (n *Node) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nodeIsOutputParamHeapAddr, b) }
func (n *Node) SetAssigned(b bool) { n.flags.set(nodeAssigned, b) }
func (n *Node) SetAddrtaken(b bool) { n.flags.set(nodeAddrtaken, b) }
func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
@ -208,14 +190,11 @@ func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
func (n *Node) SetNonNil(b bool) { n.flags.set(nodeNonNil, b) }
func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
func (n *Node) SetBounded(b bool) { n.flags.set(nodeBounded, b) }
func (n *Node) SetAddable(b bool) { n.flags.set(nodeAddable, b) }
func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) }
func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
func (n *Node) SetInlFormal(b bool) { n.flags.set(nodeInlFormal, b) }
func (n *Node) SetInlLocal(b bool) { n.flags.set(nodeInlLocal, b) }
// Val returns the Val for the node.
func (n *Node) Val() Val {
@ -299,7 +278,7 @@ type Name struct {
Param *Param // additional fields for ONAME, OTYPE
Decldepth int32 // declaration loop depth, increased for every loop or label
Vargen int32 // unique name for ONAME within a function. Function outputs are numbered starting at one.
flags bitset8
flags bitset16
}
const (
@ -310,6 +289,12 @@ const (
nameKeepalive // mark value live across unknown assembly call
nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
nameUsed // for variable declared and not used error
nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
nameAssigned // is the variable ever assigned to
nameAddrtaken // address taken, even if not moved to heap
nameInlFormal // OPAUTO created by inliner, derived from callee formal
nameInlLocal // OPAUTO created by inliner, derived from callee local
)
func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
@ -319,6 +304,12 @@ func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
func (n *Name) Keepalive() bool { return n.flags&nameKeepalive != 0 }
func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 }
func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) }
@ -327,6 +318,12 @@ func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
func (n *Name) SetKeepalive(b bool) { n.flags.set(nameKeepalive, b) }
func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) }
func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
type Param struct {
Ntype *Node

View file

@ -828,20 +828,17 @@ func typecheck1(n *Node, top int) (res *Node) {
default:
checklvalue(n.Left, "take the address of")
r := outervalue(n.Left)
if r.Orig != r && r.Op == ONAME {
if r.Op == ONAME {
if r.Orig != r {
Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
}
for l := n.Left; ; l = l.Left {
l.SetAddrtaken(true)
if l.IsClosureVar() && !capturevarscomplete {
r.Name.SetAddrtaken(true)
if r.Name.IsClosureVar() && !capturevarscomplete {
// Mark the original variable as Addrtaken so that capturevars
// knows not to pass it by value.
// But if the capturevars phase is complete, don't touch it,
// in case l.Name's containing function has not yet been compiled.
l.Name.Defn.SetAddrtaken(true)
}
if l == r {
break
r.Name.Defn.Name.SetAddrtaken(true)
}
}
n.Left = defaultlit(n.Left, nil)
@ -3061,18 +3058,12 @@ func checkassign(stmt *Node, n *Node) {
// Variables declared in ORANGE are assigned on every iteration.
if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
r := outervalue(n)
var l *Node
for l = n; l != r; l = l.Left {
l.SetAssigned(true)
if l.IsClosureVar() {
l.Name.Defn.SetAssigned(true)
if r.Op == ONAME {
r.Name.SetAssigned(true)
if r.Name.IsClosureVar() {
r.Name.Defn.Name.SetAssigned(true)
}
}
l.SetAssigned(true)
if l.IsClosureVar() {
l.Name.Defn.SetAssigned(true)
}
}
if islvalue(n) {
@ -3821,6 +3812,33 @@ func checkreturn(fn *Node) {
func deadcode(fn *Node) {
deadcodeslice(fn.Nbody)
deadcodefn(fn)
}
func deadcodefn(fn *Node) {
if fn.Nbody.Len() == 0 {
return
}
for _, n := range fn.Nbody.Slice() {
if n.Ninit.Len() > 0 {
return
}
switch n.Op {
case OIF:
if !Isconst(n.Left, CTBOOL) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 {
return
}
case OFOR:
if !Isconst(n.Left, CTBOOL) || n.Left.Bool() {
return
}
default:
return
}
}
fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)})
}
func deadcodeslice(nn Nodes) {

View file

@ -97,7 +97,7 @@ func paramoutheap(fn *Node) bool {
for _, ln := range fn.Func.Dcl {
switch ln.Class() {
case PPARAMOUT:
if ln.isParamStackCopy() || ln.Addrtaken() {
if ln.isParamStackCopy() || ln.Name.Addrtaken() {
return true
}
@ -195,7 +195,7 @@ func walkstmt(n *Node) *Node {
v := n.Left
if v.Class() == PAUTOHEAP {
if compiling_runtime {
yyerror("%v escapes to heap, not allowed in runtime.", v)
yyerror("%v escapes to heap, not allowed in runtime", v)
}
if prealloc[v] == nil {
prealloc[v] = callnew(v.Type)
@ -564,7 +564,6 @@ opswitch:
n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
case OCLOSUREVAR, OCFUNC:
n.SetAddable(true)
case OCALLINTER, OCALLFUNC, OCALLMETH:
if n.Op == OCALLINTER {
@ -705,7 +704,7 @@ opswitch:
n = liststmt(ll)
// x, y = <-c
// orderstmt made sure x is addressable.
// order.stmt made sure x is addressable or blank.
case OAS2RECV:
init.AppendNodes(&n.Ninit)
@ -720,7 +719,7 @@ opswitch:
}
fn := chanfn("chanrecv2", 2, r.Left.Type)
ok := n.List.Second()
call := mkcall1(fn, ok.Type, init, r.Left, n1)
call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1)
n = nod(OAS, ok, call)
n = typecheck(n, ctxStmt)
@ -952,6 +951,16 @@ opswitch:
case OCONV, OCONVNOP:
n.Left = walkexpr(n.Left, init)
if n.Op == OCONVNOP && checkPtr(Curfn) {
if n.Type.IsPtr() && n.Left.Type.Etype == TUNSAFEPTR { // unsafe.Pointer to *T
n = walkCheckPtrAlignment(n, init)
break
}
if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR { // uintptr to unsafe.Pointer
n = walkCheckPtrArithmetic(n, init)
break
}
}
param, result := rtconvfn(n.Left.Type, n.Type)
if param == Txxx {
break
@ -2098,7 +2107,7 @@ func aliased(n *Node, all []*Node, i int) bool {
continue
case PAUTO, PPARAM, PPARAMOUT:
if n.Addrtaken() {
if n.Name.Addrtaken() {
varwrite = true
continue
}
@ -2146,7 +2155,7 @@ func varexpr(n *Node) bool {
case ONAME:
switch n.Class() {
case PAUTO, PPARAM, PPARAMOUT:
if !n.Addrtaken() {
if !n.Name.Addrtaken() {
return true
}
}
@ -2383,6 +2392,9 @@ func conv(n *Node, t *types.Type) *Node {
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
func convnop(n *Node, t *types.Type) *Node {
if types.Identical(n.Type, t) {
return n
}
n = nod(OCONVNOP, n, nil)
n.Type = t
n = typecheck(n, ctxExpr)
@ -3899,3 +3911,72 @@ func canMergeLoads() bool {
func isRuneCount(n *Node) bool {
return Debug['N'] == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
}
func walkCheckPtrAlignment(n *Node, init *Nodes) *Node {
if n.Type.Elem().Alignment() == 1 && n.Type.Elem().Size() == 1 {
return n
}
n.Left = cheapexpr(n.Left, init)
init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(n.Type.Elem())))
return n
}
var walkCheckPtrArithmeticMarker byte
func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
if n.Opt() == &walkCheckPtrArithmeticMarker {
return n
}
n.SetOpt(&walkCheckPtrArithmeticMarker)
defer n.SetOpt(nil)
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
switch n.Left.Op {
case OCALLFUNC, OCALLMETH, OCALLINTER:
return n
}
// Find original unsafe.Pointer operands involved in this
// arithmetic expression.
//
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
var originals []*Node
var walk func(n *Node)
walk = func(n *Node) {
switch n.Op {
case OADD:
walk(n.Left)
walk(n.Right)
case OSUB, OANDNOT:
walk(n.Left)
case OCONVNOP:
if n.Left.Type.Etype == TUNSAFEPTR {
n.Left = cheapexpr(n.Left, init)
originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
}
}
}
walk(n.Left)
n = cheapexpr(n, init)
slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals, init, nil)
slice.Esc = EscNone
slice.SetTransient(true)
init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
return n
}
// checkPtr reports whether pointer checking should be enabled for
// function fn.
func checkPtr(fn *Node) bool {
return Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr == 0
}

View file

@ -286,10 +286,12 @@ commas. For example:
if phase == "check" && flag == "on" {
checkEnabled = val != 0
debugPoset = checkEnabled // also turn on advanced self-checking in prove's datastructure
return ""
}
if phase == "check" && flag == "off" {
checkEnabled = val == 0
debugPoset = checkEnabled
return ""
}

View file

@ -567,11 +567,6 @@
(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
// For amd64p32
(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 -> (LoweredPanicExtendA [kind] hi lo y mem)
(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 -> (LoweredPanicExtendB [kind] hi lo y mem)
(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 -> (LoweredPanicExtendC [kind] hi lo y mem)
// ***************************
// Above: lowering rules
// Below: optimizations

View file

@ -94,7 +94,6 @@ func init() {
cx = buildReg("CX")
dx = buildReg("DX")
bx = buildReg("BX")
si = buildReg("SI")
gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15")
fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15")
gpsp = gp | buildReg("SP")
@ -718,12 +717,6 @@ func init() {
{name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
{name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
// amd64p32 only: PanicBounds ops take 32-bit indexes.
// The Extend ops are the same as the Bounds ops except the indexes are 64-bit.
{name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, dx, bx}}, typ: "Mem"}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
{name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, cx, dx}}, typ: "Mem"}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
{name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, ax, cx}}, typ: "Mem"}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
// Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the
// three from the unsigned total order. The == cases overlap.

View file

@ -154,7 +154,7 @@ func init() {
reg: regInfo{
inputs: []regMask{buildReg("R1"), buildReg("R0")},
outputs: []regMask{buildReg("R0"), buildReg("R1")},
clobbers: buildReg("R2 R3 R14"), // also clobbers R12 on NaCl (modified in ../config.go)
clobbers: buildReg("R2 R3 R14"),
},
clobberFlags: true,
typ: "(UInt32,UInt32)",

View file

@ -10,6 +10,7 @@
(Mul(64|32|16|8) x y) -> (Select1 (MULVU x y))
(Mul(32|64)F x y) -> (MUL(F|D) x y)
(Mul64uhilo x y) -> (MULVU x y)
(Hmul64 x y) -> (Select0 (MULV x y))
(Hmul64u x y) -> (Select0 (MULVU x y))

View file

@ -199,8 +199,8 @@ var faultOnLoad = objabi.GOOS != "aix"
// nilcheckelim2 eliminates unnecessary nil checks.
// Runs after lowering and scheduling.
func nilcheckelim2(f *Func) {
unnecessary := f.newSparseSet(f.NumValues())
defer f.retSparseSet(unnecessary)
unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[]
defer f.retSparseMap(unnecessary)
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
@ -218,9 +218,21 @@ func nilcheckelim2(f *Func) {
if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
f.Warnl(v.Pos, "removed nil check")
}
if v.Pos.IsStmt() == src.PosIsStmt {
// For bug 33724, policy is that we might choose to bump an existing position
// off the faulting load/store in favor of the one from the nil check.
// Iteration order means that first nilcheck in the chain wins, others
// are bumped into the ordinary statement preservation algorithm.
u := b.Values[unnecessary.get(v.Args[0].ID)]
if !u.Pos.SameFileAndLine(v.Pos) {
if u.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(u.Pos)
}
u.Pos = v.Pos
} else if v.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(v.Pos)
}
v.reset(OpUnknown)
firstToRemove = i
continue
@ -294,7 +306,7 @@ func nilcheckelim2(f *Func) {
}
// This instruction is guaranteed to fault if ptr is nil.
// Any previous nil check op is unnecessary.
unnecessary.add(ptr.ID)
unnecessary.set(ptr.ID, int32(i), src.NoXPos)
}
}
// Remove values we've clobbered with OpUnknown.
@ -302,7 +314,7 @@ func nilcheckelim2(f *Func) {
for j := i; j < len(b.Values); j++ {
v := b.Values[j]
if v.Op != OpUnknown {
if v.Pos.IsStmt() != src.PosNotStmt && pendingLines.contains(v.Pos) {
if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now.
v.Pos = v.Pos.WithIsStmt()
pendingLines.remove(v.Pos)
}

View file

@ -74,7 +74,7 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int {
// rewrite.
func notStmtBoundary(op Op) bool {
switch op {
case OpCopy, OpPhi, OpVarKill, OpVarDef, OpUnknown, OpFwdRef, OpArg:
case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg:
return true
}
return false

View file

@ -868,9 +868,6 @@ const (
OpAMD64LoweredPanicBoundsA
OpAMD64LoweredPanicBoundsB
OpAMD64LoweredPanicBoundsC
OpAMD64LoweredPanicExtendA
OpAMD64LoweredPanicExtendB
OpAMD64LoweredPanicExtendC
OpAMD64FlagEQ
OpAMD64FlagLT_ULT
OpAMD64FlagLT_UGT
@ -11317,42 +11314,6 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "LoweredPanicExtendA",
auxType: auxInt64,
argLen: 4,
reg: regInfo{
inputs: []inputInfo{
{0, 64}, // SI
{1, 4}, // DX
{2, 8}, // BX
},
},
},
{
name: "LoweredPanicExtendB",
auxType: auxInt64,
argLen: 4,
reg: regInfo{
inputs: []inputInfo{
{0, 64}, // SI
{1, 2}, // CX
{2, 4}, // DX
},
},
},
{
name: "LoweredPanicExtendC",
auxType: auxInt64,
argLen: 4,
reg: regInfo{
inputs: []inputInfo{
{0, 64}, // SI
{1, 1}, // AX
{2, 2}, // CX
},
},
},
{
name: "FlagEQ",
argLen: 0,

View file

@ -9,6 +9,9 @@ import (
"os"
)
// If true, check poset integrity after every mutation
var debugPoset = false
const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64
// bitset is a bit array for dense indexes.
@ -45,6 +48,7 @@ const (
undoSetChr // change back right child of undo.idx to undo.edge
undoNonEqual // forget that SSA value undo.ID is non-equal to undo.idx (another ID)
undoNewNode // remove new node created for SSA value undo.ID
undoNewConstant // remove the constant node idx from the constants map
undoAliasNode // unalias SSA value undo.ID so that it points back to node index undo.idx
undoNewRoot // remove node undo.idx from root list
undoChangeRoot // remove node undo.idx from root list, and put back undo.edge.Target instead
@ -146,20 +150,20 @@ type poset struct {
lastidx uint32 // last generated dense index
flags uint8 // internal flags
values map[ID]uint32 // map SSA values to dense indexes
constants []*Value // record SSA constants together with their value
constants map[int64]uint32 // record SSA constants together with their value
nodes []posetNode // nodes (in all DAGs)
roots []uint32 // list of root nodes (forest)
noneq map[ID]bitset // non-equal relations
noneq map[uint32]bitset // non-equal relations
undo []posetUndo // undo chain
}
func newPoset() *poset {
return &poset{
values: make(map[ID]uint32),
constants: make([]*Value, 0, 8),
constants: make(map[int64]uint32, 8),
nodes: make([]posetNode, 1, 16),
roots: make([]uint32, 0, 4),
noneq: make(map[ID]bitset),
noneq: make(map[uint32]bitset),
undo: make([]posetUndo, 0, 4),
}
}
@ -193,8 +197,8 @@ func (po *poset) upushnew(id ID, idx uint32) {
}
// upushneq pushes a new undo pass for a nonequal relation
func (po *poset) upushneq(id1 ID, id2 ID) {
po.undo = append(po.undo, posetUndo{typ: undoNonEqual, ID: id1, idx: uint32(id2)})
func (po *poset) upushneq(idx1 uint32, idx2 uint32) {
po.undo = append(po.undo, posetUndo{typ: undoNonEqual, ID: ID(idx1), idx: idx2})
}
// upushalias pushes a new undo pass for aliasing two nodes
@ -202,6 +206,11 @@ func (po *poset) upushalias(id ID, i2 uint32) {
po.undo = append(po.undo, posetUndo{typ: undoAliasNode, ID: id, idx: i2})
}
// upushconst pushes a new undo pass for a new constant
func (po *poset) upushconst(idx uint32, old uint32) {
po.undo = append(po.undo, posetUndo{typ: undoNewConstant, idx: idx, ID: ID(old)})
}
// addchild adds i2 as direct child of i1.
func (po *poset) addchild(i1, i2 uint32, strict bool) {
i1l, i1r := po.children(i1)
@ -278,18 +287,33 @@ func (po *poset) newconst(n *Value) {
panic("newconst on non-constant")
}
// If this is the first constant, put it into a new root, as
// If the same constant is already present in the poset through a different
// Value, just alias to it without allocating a new node.
val := n.AuxInt
if po.flags&posetFlagUnsigned != 0 {
val = int64(n.AuxUnsigned())
}
if c, found := po.constants[val]; found {
po.values[n.ID] = c
po.upushalias(n.ID, 0)
return
}
// Create the new node for this constant
i := po.newnode(n)
// If this is the first constant, put it as a new root, as
// we can't record an existing connection so we don't have
// a specific DAG to add it to. Notice that we want all
// constants to be in root #0, so make sure the new root
// goes there.
if len(po.constants) == 0 {
idx := len(po.roots)
i := po.newnode(n)
po.roots = append(po.roots, i)
po.roots[0], po.roots[idx] = po.roots[idx], po.roots[0]
po.upush(undoNewRoot, i, 0)
po.constants = append(po.constants, n)
po.constants[val] = i
po.upushconst(i, 0)
return
}
@ -298,21 +322,20 @@ func (po *poset) newconst(n *Value) {
// and the lower constant that is higher.
// The loop is duplicated to handle signed and unsigned comparison,
// depending on how the poset was configured.
var lowerptr, higherptr *Value
var lowerptr, higherptr uint32
if po.flags&posetFlagUnsigned != 0 {
var lower, higher uint64
val1 := n.AuxUnsigned()
for _, ptr := range po.constants {
val2 := ptr.AuxUnsigned()
for val2, ptr := range po.constants {
val2 := uint64(val2)
if val1 == val2 {
po.aliasnode(ptr, n)
return
panic("unreachable")
}
if val2 < val1 && (lowerptr == nil || val2 > lower) {
if val2 < val1 && (lowerptr == 0 || val2 > lower) {
lower = val2
lowerptr = ptr
} else if val2 > val1 && (higherptr == nil || val2 < higher) {
} else if val2 > val1 && (higherptr == 0 || val2 < higher) {
higher = val2
higherptr = ptr
}
@ -320,23 +343,21 @@ func (po *poset) newconst(n *Value) {
} else {
var lower, higher int64
val1 := n.AuxInt
for _, ptr := range po.constants {
val2 := ptr.AuxInt
for val2, ptr := range po.constants {
if val1 == val2 {
po.aliasnode(ptr, n)
return
panic("unreachable")
}
if val2 < val1 && (lowerptr == nil || val2 > lower) {
if val2 < val1 && (lowerptr == 0 || val2 > lower) {
lower = val2
lowerptr = ptr
} else if val2 > val1 && (higherptr == nil || val2 < higher) {
} else if val2 > val1 && (higherptr == 0 || val2 < higher) {
higher = val2
higherptr = ptr
}
}
}
if lowerptr == nil && higherptr == nil {
if lowerptr == 0 && higherptr == 0 {
// This should not happen, as at least one
// other constant must exist if we get here.
panic("no constant found")
@ -347,18 +368,17 @@ func (po *poset) newconst(n *Value) {
// of them, depending on what other constants are present in the poset.
// Notice that we always link constants together, so they
// are always part of the same DAG.
i := po.newnode(n)
switch {
case lowerptr != nil && higherptr != nil:
case lowerptr != 0 && higherptr != 0:
// Both bounds are present, record lower < n < higher.
po.addchild(po.values[lowerptr.ID], i, true)
po.addchild(i, po.values[higherptr.ID], true)
po.addchild(lowerptr, i, true)
po.addchild(i, higherptr, true)
case lowerptr != nil:
case lowerptr != 0:
// Lower bound only, record lower < n.
po.addchild(po.values[lowerptr.ID], i, true)
po.addchild(lowerptr, i, true)
case higherptr != nil:
case higherptr != 0:
// Higher bound only. To record n < higher, we need
// a dummy root:
//
@ -370,7 +390,7 @@ func (po *poset) newconst(n *Value) {
// \ /
// higher
//
i2 := po.values[higherptr.ID]
i2 := higherptr
r2 := po.findroot(i2)
if r2 != po.roots[0] { // all constants should be in root #0
panic("constant not in root #0")
@ -383,7 +403,8 @@ func (po *poset) newconst(n *Value) {
po.addchild(i, i2, true)
}
po.constants = append(po.constants, n)
po.constants[val] = i
po.upushconst(i, 0)
}
// aliasnode records that n2 is an alias of n1
@ -419,6 +440,19 @@ func (po *poset) aliasnode(n1, n2 *Value) {
po.upushalias(k, i2)
}
}
if n2.isGenericIntConst() {
val := n2.AuxInt
if po.flags&posetFlagUnsigned != 0 {
val = int64(n2.AuxUnsigned())
}
if po.constants[val] != i2 {
panic("aliasing constant which is not registered")
}
po.constants[val] = i1
po.upushconst(i1, i2)
}
} else {
// n2.ID wasn't seen before, so record it as alias to i1
po.values[n2.ID] = i1
@ -588,38 +622,61 @@ func (po *poset) collapsepath(n1, n2 *Value) bool {
return true
}
// Check whether it is recorded that id1!=id2
func (po *poset) isnoneq(id1, id2 ID) bool {
if id1 < id2 {
id1, id2 = id2, id1
// Check whether it is recorded that i1!=i2
func (po *poset) isnoneq(i1, i2 uint32) bool {
if i1 == i2 {
return false
}
if i1 < i2 {
i1, i2 = i2, i1
}
// Check if we recorded a non-equal relation before
if bs, ok := po.noneq[id1]; ok && bs.Test(uint32(id2)) {
if bs, ok := po.noneq[i1]; ok && bs.Test(i2) {
return true
}
return false
}
// Record that id1!=id2
func (po *poset) setnoneq(id1, id2 ID) {
if id1 < id2 {
id1, id2 = id2, id1
// Record that i1!=i2
func (po *poset) setnoneq(n1, n2 *Value) {
i1, f1 := po.lookup(n1)
i2, f2 := po.lookup(n2)
// If any of the nodes do not exist in the poset, allocate them. Since
// we don't know any relation (in the partial order) about them, they must
// become independent roots.
if !f1 {
i1 = po.newnode(n1)
po.roots = append(po.roots, i1)
po.upush(undoNewRoot, i1, 0)
}
bs := po.noneq[id1]
if !f2 {
i2 = po.newnode(n2)
po.roots = append(po.roots, i2)
po.upush(undoNewRoot, i2, 0)
}
if i1 == i2 {
panic("setnoneq on same node")
}
if i1 < i2 {
i1, i2 = i2, i1
}
bs := po.noneq[i1]
if bs == nil {
// Given that we record non-equality relations using the
// higher ID as a key, the bitsize will never change size.
// higher index as a key, the bitsize will never change size.
// TODO(rasky): if memory is a problem, consider allocating
// a small bitset and lazily grow it when higher IDs arrive.
bs = newBitset(int(id1))
po.noneq[id1] = bs
} else if bs.Test(uint32(id2)) {
// a small bitset and lazily grow it when higher indices arrive.
bs = newBitset(int(i1))
po.noneq[i1] = bs
} else if bs.Test(i2) {
// Already recorded
return
}
bs.Set(uint32(id2))
po.upushneq(id1, id2)
bs.Set(i2)
po.upushneq(i1, i2)
}
// CheckIntegrity verifies internal integrity of a poset. It is intended
@ -628,11 +685,7 @@ func (po *poset) CheckIntegrity() {
// Record which index is a constant
constants := newBitset(int(po.lastidx + 1))
for _, c := range po.constants {
if idx, ok := po.values[c.ID]; !ok {
panic("node missing for constant")
} else {
constants.Set(idx)
}
constants.Set(c)
}
// Verify that each node appears in a single DAG, and that
@ -729,15 +782,10 @@ func (po *poset) DotDump(fn string, title string) error {
names[i] = s
}
// Create constant mapping
// Create reverse constant mapping
consts := make(map[uint32]int64)
for _, v := range po.constants {
idx := po.values[v.ID]
if po.flags&posetFlagUnsigned != 0 {
consts[idx] = int64(v.AuxUnsigned())
} else {
consts[idx] = v.AuxInt
}
for val, idx := range po.constants {
consts[idx] = val
}
fmt.Fprintf(f, "digraph poset {\n")
@ -785,6 +833,9 @@ func (po *poset) DotDump(fn string, title string) error {
// to tell.
// Complexity is O(n).
func (po *poset) Ordered(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call Ordered with n1==n2")
}
@ -803,6 +854,9 @@ func (po *poset) Ordered(n1, n2 *Value) bool {
// to tell.
// Complexity is O(n).
func (po *poset) OrderedOrEqual(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call Ordered with n1==n2")
}
@ -813,8 +867,7 @@ func (po *poset) OrderedOrEqual(n1, n2 *Value) bool {
return false
}
return i1 == i2 || po.reaches(i1, i2, false) ||
(po.reaches(i2, i1, false) && !po.reaches(i2, i1, true))
return i1 == i2 || po.reaches(i1, i2, false)
}
// Equal reports whether n1==n2. It returns false either when it is
@ -822,6 +875,9 @@ func (po *poset) OrderedOrEqual(n1, n2 *Value) bool {
// to tell.
// Complexity is O(1).
func (po *poset) Equal(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call Equal with n1==n2")
}
@ -837,10 +893,23 @@ func (po *poset) Equal(n1, n2 *Value) bool {
// Complexity is O(n) (because it internally calls Ordered to see if we
// can infer n1!=n2 from n1<n2 or n2<n1).
func (po *poset) NonEqual(n1, n2 *Value) bool {
if n1.ID == n2.ID {
panic("should not call Equal with n1==n2")
if debugPoset {
defer po.CheckIntegrity()
}
if po.isnoneq(n1.ID, n2.ID) {
if n1.ID == n2.ID {
panic("should not call NonEqual with n1==n2")
}
// If we never saw the nodes before, we don't
// have a recorded non-equality.
i1, f1 := po.lookup(n1)
i2, f2 := po.lookup(n2)
if !f1 || !f2 {
return false
}
// Check if we recored inequality
if po.isnoneq(i1, i2) {
return true
}
@ -852,15 +921,10 @@ func (po *poset) NonEqual(n1, n2 *Value) bool {
return false
}
// setOrder records that n1<n2 or n1<=n2 (depending on strict).
// setOrder records that n1<n2 or n1<=n2 (depending on strict). Returns false
// if this is a contradiction.
// Implements SetOrder() and SetOrderOrEqual()
func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
// If we are trying to record n1<=n2 but we learned that n1!=n2,
// record n1<n2, as it provides more information.
if !strict && po.isnoneq(n1.ID, n2.ID) {
strict = true
}
i1, f1 := po.lookup(n1)
i2, f2 := po.lookup(n2)
@ -919,6 +983,12 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
return !strict
}
// If we are trying to record n1<=n2 but we learned that n1!=n2,
// record n1<n2, as it provides more information.
if !strict && po.isnoneq(i1, i2) {
strict = true
}
// Both n1 and n2 are in the poset. This is the complex part of the algorithm
// as we need to find many different cases and DAG shapes.
@ -983,6 +1053,9 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
// SetOrder records that n1<n2. Returns false if this is a contradiction
// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
func (po *poset) SetOrder(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call SetOrder with n1==n2")
}
@ -992,6 +1065,9 @@ func (po *poset) SetOrder(n1, n2 *Value) bool {
// SetOrderOrEqual records that n1<=n2. Returns false if this is a contradiction
// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
func (po *poset) SetOrderOrEqual(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call SetOrder with n1==n2")
}
@ -1002,15 +1078,13 @@ func (po *poset) SetOrderOrEqual(n1, n2 *Value) bool {
// (that is, if it is already recorded that n1<n2 or n2<n1).
// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
func (po *poset) SetEqual(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call Add with n1==n2")
}
// If we recorded that n1!=n2, this is a contradiction.
if po.isnoneq(n1.ID, n2.ID) {
return false
}
i1, f1 := po.lookup(n1)
i2, f2 := po.lookup(n2)
@ -1030,6 +1104,11 @@ func (po *poset) SetEqual(n1, n2 *Value) bool {
return true
}
// If we recorded that n1!=n2, this is a contradiction.
if po.isnoneq(i1, i2) {
return false
}
// If we already knew that n1<=n2, we can collapse the path to
// record n1==n2 (and viceversa).
if po.reaches(i1, i2, false) {
@ -1061,35 +1140,47 @@ func (po *poset) SetEqual(n1, n2 *Value) bool {
// (that is, if it is already recorded that n1==n2).
// Complexity is O(n).
func (po *poset) SetNonEqual(n1, n2 *Value) bool {
if debugPoset {
defer po.CheckIntegrity()
}
if n1.ID == n2.ID {
panic("should not call Equal with n1==n2")
panic("should not call SetNonEqual with n1==n2")
}
// See if we already know this
if po.isnoneq(n1.ID, n2.ID) {
// Check whether the nodes are already in the poset
i1, f1 := po.lookup(n1)
i2, f2 := po.lookup(n2)
// If either node wasn't present, we just record the new relation
// and exit.
if !f1 || !f2 {
po.setnoneq(n1, n2)
return true
}
// Check if we're contradicting an existing relation
// See if we already know this, in which case there's nothing to do.
if po.isnoneq(i1, i2) {
return true
}
// Check if we're contradicting an existing equality relation
if po.Equal(n1, n2) {
return false
}
// Record non-equality
po.setnoneq(n1.ID, n2.ID)
po.setnoneq(n1, n2)
// If we know that i1<=i2 but not i1<i2, learn that as we
// now know that they are not equal. Do the same for i2<=i1.
i1, f1 := po.lookup(n1)
i2, f2 := po.lookup(n2)
if f1 && f2 {
// Do this check only if both nodes were already in the DAG,
// otherwise there cannot be an existing relation.
if po.reaches(i1, i2, false) && !po.reaches(i1, i2, true) {
po.addchild(i1, i2, true)
}
if po.reaches(i2, i1, false) && !po.reaches(i2, i1, true) {
po.addchild(i2, i1, true)
}
}
return true
}
@ -1109,6 +1200,9 @@ func (po *poset) Undo() {
if len(po.undo) == 0 {
panic("empty undo stack")
}
if debugPoset {
defer po.CheckIntegrity()
}
for len(po.undo) > 0 {
pass := po.undo[len(po.undo)-1]
@ -1125,7 +1219,7 @@ func (po *poset) Undo() {
po.setchr(pass.idx, pass.edge)
case undoNonEqual:
po.noneq[pass.ID].Clear(pass.idx)
po.noneq[uint32(pass.ID)].Clear(pass.idx)
case undoNewNode:
if pass.idx != po.lastidx {
@ -1142,10 +1236,25 @@ func (po *poset) Undo() {
po.nodes = po.nodes[:pass.idx]
po.lastidx--
// If it was the last inserted constant, remove it
nc := len(po.constants)
if nc > 0 && po.constants[nc-1].ID == pass.ID {
po.constants = po.constants[:nc-1]
case undoNewConstant:
// FIXME: remove this O(n) loop
var val int64
var i uint32
for val, i = range po.constants {
if i == pass.idx {
break
}
}
if i != pass.idx {
panic("constant not found in undo pass")
}
if pass.ID == 0 {
delete(po.constants, val)
} else {
// Restore previous index as constant node
// (also restoring the invariant on correct bounds)
oldidx := uint32(pass.ID)
po.constants[val] = oldidx
}
case undoAliasNode:
@ -1188,4 +1297,8 @@ func (po *poset) Undo() {
panic(pass.typ)
}
}
if debugPoset && po.CheckEmpty() != nil {
panic("poset not empty at the end of undo")
}
}

View file

@ -184,7 +184,7 @@ func TestPoset(t *testing.T) {
{OrderedOrEqual, 4, 12},
{OrderedOrEqual_Fail, 12, 4},
{OrderedOrEqual, 4, 7},
{OrderedOrEqual, 7, 4},
{OrderedOrEqual_Fail, 7, 4},
// Dag #1: 1<4<=7<12
{Checkpoint, 0, 0},
@ -448,7 +448,7 @@ func TestSetEqual(t *testing.T) {
{SetOrderOrEqual, 20, 100},
{SetOrder, 100, 110},
{OrderedOrEqual, 10, 30},
{OrderedOrEqual, 30, 10},
{OrderedOrEqual_Fail, 30, 10},
{Ordered_Fail, 10, 30},
{Ordered_Fail, 30, 10},
{Ordered, 10, 40},

View file

@ -990,8 +990,6 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpOrB_0(v)
case OpPanicBounds:
return rewriteValueAMD64_OpPanicBounds_0(v)
case OpPanicExtend:
return rewriteValueAMD64_OpPanicExtend_0(v)
case OpPopCount16:
return rewriteValueAMD64_OpPopCount16_0(v)
case OpPopCount32:
@ -55176,69 +55174,6 @@ func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool {
}
return false
}
func rewriteValueAMD64_OpPanicExtend_0(v *Value) bool {
// match: (PanicExtend [kind] hi lo y mem)
// cond: boundsABI(kind) == 0
// result: (LoweredPanicExtendA [kind] hi lo y mem)
for {
kind := v.AuxInt
mem := v.Args[3]
hi := v.Args[0]
lo := v.Args[1]
y := v.Args[2]
if !(boundsABI(kind) == 0) {
break
}
v.reset(OpAMD64LoweredPanicExtendA)
v.AuxInt = kind
v.AddArg(hi)
v.AddArg(lo)
v.AddArg(y)
v.AddArg(mem)
return true
}
// match: (PanicExtend [kind] hi lo y mem)
// cond: boundsABI(kind) == 1
// result: (LoweredPanicExtendB [kind] hi lo y mem)
for {
kind := v.AuxInt
mem := v.Args[3]
hi := v.Args[0]
lo := v.Args[1]
y := v.Args[2]
if !(boundsABI(kind) == 1) {
break
}
v.reset(OpAMD64LoweredPanicExtendB)
v.AuxInt = kind
v.AddArg(hi)
v.AddArg(lo)
v.AddArg(y)
v.AddArg(mem)
return true
}
// match: (PanicExtend [kind] hi lo y mem)
// cond: boundsABI(kind) == 2
// result: (LoweredPanicExtendC [kind] hi lo y mem)
for {
kind := v.AuxInt
mem := v.Args[3]
hi := v.Args[0]
lo := v.Args[1]
y := v.Args[2]
if !(boundsABI(kind) == 2) {
break
}
v.reset(OpAMD64LoweredPanicExtendC)
v.AuxInt = kind
v.AddArg(hi)
v.AddArg(lo)
v.AddArg(y)
v.AddArg(mem)
return true
}
return false
}
func rewriteValueAMD64_OpPopCount16_0(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types

View file

@ -415,6 +415,8 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMul64_0(v)
case OpMul64F:
return rewriteValueMIPS64_OpMul64F_0(v)
case OpMul64uhilo:
return rewriteValueMIPS64_OpMul64uhilo_0(v)
case OpMul8:
return rewriteValueMIPS64_OpMul8_0(v)
case OpNeg16:
@ -6796,6 +6798,18 @@ func rewriteValueMIPS64_OpMul64F_0(v *Value) bool {
return true
}
}
func rewriteValueMIPS64_OpMul64uhilo_0(v *Value) bool {
// match: (Mul64uhilo x y)
// result: (MULVU x y)
for {
y := v.Args[1]
x := v.Args[0]
v.reset(OpMIPS64MULVU)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValueMIPS64_OpMul8_0(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types

View file

@ -3,6 +3,8 @@
// license that can be found in the LICENSE file.
// Package dirhash defines hashes over directory trees.
// These hashes are recorded in go.sum files and in the Go checksum database,
// to allow verifying that a newly-downloaded module has the expected content.
package dirhash
import (
@ -18,17 +20,34 @@ import (
"strings"
)
var DefaultHash = Hash1
// DefaultHash is the default hash function used in new go.sum entries.
var DefaultHash Hash = Hash1
// A Hash is a directory hash function.
// It accepts a list of files along with a function that opens the content of each file.
// It opens, reads, hashes, and closes each file and returns the overall directory hash.
type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error)
// Hash1 is the "h1:" directory hash function, using SHA-256.
//
// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary
// prepared as if by the Unix command:
//
// find . -type f | sort | sha256sum
//
// More precisely, the hashed summary contains a single line for each file in the list,
// ordered by sort.Strings applied to the file names, where each line consists of
// the hexadecimal SHA-256 hash of the file content,
// two spaces (U+0020), the file name, and a newline (U+000A).
//
// File names with newlines (U+000A) are disallowed.
func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) {
h := sha256.New()
files = append([]string(nil), files...)
sort.Strings(files)
for _, file := range files {
if strings.Contains(file, "\n") {
return "", errors.New("filenames with newlines are not supported")
return "", errors.New("dirhash: filenames with newlines are not supported")
}
r, err := open(file)
if err != nil {
@ -45,6 +64,9 @@ func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, er
return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
}
// HashDir returns the hash of the local file system directory dir,
// replacing the directory name itself with prefix in the file names
// used in the hash function.
func HashDir(dir, prefix string, hash Hash) (string, error) {
files, err := DirFiles(dir, prefix)
if err != nil {
@ -56,6 +78,9 @@ func HashDir(dir, prefix string, hash Hash) (string, error) {
return hash(files, osOpen)
}
// DirFiles returns the list of files in the tree rooted at dir,
// replacing the directory name dir with prefix in each name.
// The resulting names always use forward slashes.
func DirFiles(dir, prefix string) ([]string, error) {
var files []string
dir = filepath.Clean(dir)
@ -80,6 +105,10 @@ func DirFiles(dir, prefix string) ([]string, error) {
return files, nil
}
// HashZip returns the hash of the file content in the named zip file.
// Only the file names and their contents are included in the hash:
// the exact zip file format encoding, compression method,
// per-file modification times, and other metadata are ignored.
func HashZip(zipfile string, hash Hash) (string, error) {
z, err := zip.OpenReader(zipfile)
if err != nil {

View file

@ -343,6 +343,18 @@ func checkEnvWrite(key, val string, env []cfg.EnvVar) error {
return fmt.Errorf("unknown go command variable %s", key)
}
// Some variables can only have one of a few valid values. If set to an
// invalid value, the next cmd/go invocation might fail immediately,
// even 'go env -w' itself.
switch key {
case "GO111MODULE":
switch val {
case "", "auto", "on", "off":
default:
return fmt.Errorf("invalid %s value %q", key, val)
}
}
if !utf8.ValidString(val) {
return fmt.Errorf("invalid UTF-8 in %s=... value", key)
}

View file

@ -536,7 +536,7 @@ type vcsPath struct {
repo string // repository to use (expand with match of re)
vcs string // version control system to use (expand with match of re)
check func(match map[string]string) error // additional checks
ping bool // ping for scheme to use to download repo
schemelessRepo bool // if true, the repo pattern lacks a scheme
}
// vcsFromDir inspects dir and its parents to determine the
@ -657,7 +657,7 @@ const (
// RepoRootForImportPath analyzes importPath to determine the
// version control system, and code repository to use.
func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) {
rr, err := repoRootFromVCSPaths(importPath, "", security, vcsPaths)
rr, err := repoRootFromVCSPaths(importPath, security, vcsPaths)
if err == errUnknownSite {
rr, err = repoRootForImportDynamic(importPath, mod, security)
if err != nil {
@ -665,7 +665,7 @@ func RepoRootForImportPath(importPath string, mod ModuleMode, security web.Secur
}
}
if err != nil {
rr1, err1 := repoRootFromVCSPaths(importPath, "", security, vcsPathsAfterDynamic)
rr1, err1 := repoRootFromVCSPaths(importPath, security, vcsPathsAfterDynamic)
if err1 == nil {
rr = rr1
err = nil
@ -685,8 +685,7 @@ var errUnknownSite = errors.New("dynamic lookup required to find mapping")
// repoRootFromVCSPaths attempts to map importPath to a repoRoot
// using the mappings defined in vcsPaths.
// If scheme is non-empty, that scheme is forced.
func repoRootFromVCSPaths(importPath, scheme string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) {
func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) {
// A common error is to use https://packagepath because that's what
// hg and git require. Diagnose this helpfully.
if prefix := httpPrefix(importPath); prefix != "" {
@ -731,26 +730,28 @@ func repoRootFromVCSPaths(importPath, scheme string, security web.SecurityMode,
if vcs == nil {
return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
}
if srv.ping {
if scheme != "" {
match["repo"] = scheme + "://" + match["repo"]
var repoURL string
if !srv.schemelessRepo {
repoURL = match["repo"]
} else {
for _, scheme := range vcs.scheme {
if security == web.SecureOnly && !vcs.isSecureScheme(scheme) {
scheme := vcs.scheme[0] // default to first scheme
repo := match["repo"]
if vcs.pingCmd != "" {
// If we know how to test schemes, scan to find one.
for _, s := range vcs.scheme {
if security == web.SecureOnly && !vcs.isSecureScheme(s) {
continue
}
if vcs.pingCmd != "" && vcs.ping(scheme, match["repo"]) == nil {
match["repo"] = scheme + "://" + match["repo"]
goto Found
if vcs.ping(s, repo) == nil {
scheme = s
break
}
}
// No scheme found. Fall back to the first one.
match["repo"] = vcs.scheme[0] + "://" + match["repo"]
Found:
}
repoURL = scheme + "://" + repo
}
rr := &RepoRoot{
Repo: match["repo"],
Repo: repoURL,
Root: match["root"],
VCS: vcs.cmd,
vcs: vcs,
@ -1076,7 +1077,7 @@ var vcsPaths = []*vcsPath{
// Must be last.
{
regexp: lazyregexp.New(`(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\-]+)+?)\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?[A-Za-z0-9_.\-]+)*$`),
ping: true,
schemelessRepo: true,
},
}

View file

@ -19,6 +19,7 @@ import (
"cmd/go/internal/imports"
"cmd/go/internal/modload"
"cmd/go/internal/module"
"cmd/go/internal/semver"
)
var cmdVendor = &base.Command{
@ -59,10 +60,17 @@ func runVendor(cmd *base.Command, args []string) {
modpkgs[m] = append(modpkgs[m], pkg)
}
includeAllReplacements := false
isExplicit := map[module.Version]bool{}
if gv := modload.ModFile().Go; gv != nil && semver.Compare("v"+gv.Version, "v1.14") >= 0 {
// If the Go version is at least 1.14, annotate all explicit 'require' and
// 'replace' targets found in the go.mod file so that we can perform a
// stronger consistency check when -mod=vendor is set.
for _, r := range modload.ModFile().Require {
isExplicit[r.Mod] = true
}
includeAllReplacements = true
}
var buf bytes.Buffer
for _, m := range modload.BuildList()[1:] {
@ -89,6 +97,7 @@ func runVendor(cmd *base.Command, args []string) {
}
}
if includeAllReplacements {
// Record unused and wildcard replacements at the end of the modules.txt file:
// without access to the complete build list, the consumer of the vendor
// directory can't otherwise determine that those replacements had no effect.
@ -105,6 +114,7 @@ func runVendor(cmd *base.Command, args []string) {
os.Stderr.WriteString(line)
}
}
}
if buf.Len() == 0 {
fmt.Fprintf(os.Stderr, "go: no dependencies to vendor\n")

View file

@ -32,7 +32,7 @@ func cacheDir(path string) (string, error) {
if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
}
enc, err := module.EncodePath(path)
enc, err := module.EscapePath(path)
if err != nil {
return "", err
}
@ -50,7 +50,7 @@ func CachePath(m module.Version, suffix string) (string, error) {
if module.CanonicalVersion(m.Version) != m.Version {
return "", fmt.Errorf("non-canonical module version %q", m.Version)
}
encVer, err := module.EncodeVersion(m.Version)
encVer, err := module.EscapeVersion(m.Version)
if err != nil {
return "", err
}
@ -63,7 +63,7 @@ func DownloadDir(m module.Version) (string, error) {
if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
}
enc, err := module.EncodePath(m.Path)
enc, err := module.EscapePath(m.Path)
if err != nil {
return "", err
}
@ -73,7 +73,7 @@ func DownloadDir(m module.Version) (string, error) {
if module.CanonicalVersion(m.Version) != m.Version {
return "", fmt.Errorf("non-canonical module version %q", m.Version)
}
encVer, err := module.EncodeVersion(m.Version)
encVer, err := module.EscapeVersion(m.Version)
if err != nil {
return "", err
}

View file

@ -159,7 +159,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) {
continue
}
if err := module.MatchPathMajor(v, r.pathMajor); err != nil {
if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" {
incompatible = append(incompatible, v)
}
@ -293,7 +293,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
}
}
if err := module.MatchPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil {
if err := module.CheckPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil {
return nil, invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(info2.Version))
}
}
@ -317,7 +317,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
return checkGoMod()
}
if err := module.MatchPathMajor(info2.Version, r.pathMajor); err != nil {
if err := module.CheckPathMajor(info2.Version, r.pathMajor); err != nil {
if canUseIncompatible() {
info2.Version += "+incompatible"
return checkGoMod()
@ -365,7 +365,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
tagIsCanonical = true
}
if err := module.MatchPathMajor(v, r.pathMajor); err != nil {
if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
if canUseIncompatible() {
return v + "+incompatible", tagIsCanonical
}
@ -464,7 +464,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string)
}
}()
if err := module.MatchPathMajor(version, r.pathMajor); err != nil {
if err := module.CheckPathMajor(version, r.pathMajor); err != nil {
return err
}

View file

@ -212,7 +212,7 @@ func newProxyRepo(baseURL, path string) (Repo, error) {
return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", web.Redacted(base))
}
enc, err := module.EncodePath(path)
enc, err := module.EscapePath(path)
if err != nil {
return nil, err
}
@ -351,7 +351,7 @@ func (p *proxyRepo) latest() (*RevInfo, error) {
}
func (p *proxyRepo) Stat(rev string) (*RevInfo, error) {
encRev, err := module.EncodeVersion(rev)
encRev, err := module.EscapeVersion(rev)
if err != nil {
return nil, p.versionError(rev, err)
}
@ -392,7 +392,7 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) {
return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical"))
}
encVer, err := module.EncodeVersion(version)
encVer, err := module.EscapeVersion(version)
if err != nil {
return nil, p.versionError(version, err)
}
@ -408,7 +408,7 @@ func (p *proxyRepo) Zip(dst io.Writer, version string) error {
return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical"))
}
encVer, err := module.EncodeVersion(version)
encVer, err := module.EscapeVersion(version)
if err != nil {
return p.versionError(version, err)
}

View file

@ -27,7 +27,7 @@ import (
"cmd/go/internal/module"
"cmd/go/internal/note"
"cmd/go/internal/str"
"cmd/go/internal/sumweb"
"cmd/go/internal/sumdb"
"cmd/go/internal/web"
)
@ -52,11 +52,11 @@ func lookupSumDB(mod module.Version) (dbname string, lines []string, err error)
var (
dbOnce sync.Once
dbName string
db *sumweb.Conn
db *sumdb.Client
dbErr error
)
func dbDial() (dbName string, db *sumweb.Conn, err error) {
func dbDial() (dbName string, db *sumdb.Client, err error) {
// $GOSUMDB can be "key" or "key url",
// and the key can be a full verifier key
// or a host on our list of known keys.
@ -106,7 +106,7 @@ func dbDial() (dbName string, db *sumweb.Conn, err error) {
base = u
}
return name, sumweb.NewConn(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil
return name, sumdb.NewClient(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil
}
type dbClient struct {
@ -227,7 +227,7 @@ func (*dbClient) WriteConfig(file string, old, new []byte) error {
return err
}
if len(data) > 0 && !bytes.Equal(data, old) {
return sumweb.ErrWriteConflict
return sumdb.ErrWriteConflict
}
if _, err := f.Seek(0, 0); err != nil {
return err

View file

@ -223,7 +223,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
return
}
if err := module.MatchPathMajor(v, pathMajor); err != nil {
if err := module.CheckPathMajor(v, pathMajor); err != nil {
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
return
}
@ -265,7 +265,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
return
}
if err := module.MatchPathMajor(v, pathMajor); err != nil {
if err := module.CheckPathMajor(v, pathMajor); err != nil {
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
return
}

View file

@ -848,7 +848,7 @@ func fixVersion(path, vers string) (string, error) {
}
}
if vers != "" && module.CanonicalVersion(vers) == vers {
if err := module.MatchPathMajor(vers, pathMajor); err == nil {
if err := module.CheckPathMajor(vers, pathMajor); err == nil {
return vers, nil
}
}

View file

@ -2,8 +2,86 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type
// along with support code.
// Package module defines the module.Version type along with support code.
//
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
// Version string
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the escaped form be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe escaped form that
// leaves most paths unaltered.
//
// The safe escaped form is to replace every uppercase letter
// with an exclamation mark followed by the letter's lowercase equivalent.
//
// For example,
//
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
// Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
// Although paths are currently disallowed from using Unicode,
// we would like at some point to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention for escaping them in the file system.
// But there are at least two subtle considerations.
//
// First, note that not all case-fold equivalent distinct runes
// form an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are three distinct runes that case-fold to each other.
// When we do add Unicode letters, we must not assume that upper/lower
// are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
//
// Second, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
package module
// IMPORTANT NOTE
@ -28,8 +106,10 @@ import (
"cmd/go/internal/semver"
)
// A Version is defined by a module path and version pair.
// A Version (for clients, a module.Version) is defined by a module path and version pair.
// These are stored in their plain (unescaped) form.
type Version struct {
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
Path string
// Version is usually a semantic version in canonical form.
@ -43,6 +123,11 @@ type Version struct {
Version string `json:",omitempty"`
}
// String returns the module version syntax Path@Version.
func (m Version) String() string {
return m.Path + "@" + m.Version
}
// A ModuleError indicates an error specific to a module.
type ModuleError struct {
Path string
@ -119,7 +204,7 @@ func Check(path, version string) error {
}
}
_, pathMajor, _ := SplitPathVersion(path)
if err := MatchPathMajor(version, pathMajor); err != nil {
if err := CheckPathMajor(version, pathMajor); err != nil {
return &ModuleError{Path: path, Err: err}
}
return nil
@ -138,7 +223,7 @@ func firstPathOK(r rune) bool {
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
// This matches what "go get" has historically recognized in import paths.
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see note below).
// care in the safe encoding (see "escaped paths" above).
func pathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
@ -153,7 +238,7 @@ func pathOK(r rune) bool {
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "safe encoding" below.
// See note about "escaped paths" above.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
@ -179,6 +264,17 @@ func fileNameOK(r rune) bool {
}
// CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by CheckImportPath,
// with two additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
// ASCII digits, dots (U+002E), and dashes (U+002D);
// it must contain at least one dot and cannot start with a dash.
// Second, for a final path element of the form /vN, where N looks numeric
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
// and must not contain any dots. For paths beginning with "gopkg.in/",
// this second requirement is replaced by a requirement that the path
// follow the gopkg.in server's conventions.
func CheckPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed module path %q: %v", path, err)
@ -208,6 +304,20 @@ func CheckPath(path string) error {
}
// CheckImportPath checks that an import path is valid.
//
// A valid import path consists of one or more valid path elements
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
//
// A valid path element is a non-empty string made up of
// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
// It must not begin or end with a dot (U+002E), nor contain two dots in a row.
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on).
//
// CheckImportPath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckImportPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err)
@ -291,7 +401,18 @@ func checkElem(elem string, fileName bool) error {
return nil
}
// CheckFilePath checks whether a slash-separated file path is valid.
// CheckFilePath checks that a slash-separated file path is valid.
// The definition of a valid file path is the same as the definition
// of a valid import path except that the set of allowed characters is larger:
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
// and the ASCII punctuation characters
// “!#$%&()+,-.=@[]^_{}~”.
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
// have special meanings in certain shells or operating systems.)
//
// CheckFilePath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckFilePath(path string) error {
if err := checkPath(path, true); err != nil {
return fmt.Errorf("malformed file path %q: %v", path, err)
@ -330,6 +451,9 @@ var badWindowsNames = []string{
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
@ -376,9 +500,20 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
return prefix, pathMajor, true
}
// MatchPathMajor returns a non-nil error if the semantic version v
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) == nil
}
// CheckPathMajor returns a non-nil error if the semantic version v
// does not match the path major version pathMajor.
func MatchPathMajor(v, pathMajor string) error {
func CheckPathMajor(v, pathMajor string) error {
// TODO(jayconrod): return errors or panic for invalid inputs. This function
// (and others) was covered by integration tests for cmd/go, and surrounding
// code protected against invalid inputs like non-canonical versions.
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
@ -438,7 +573,10 @@ func CanonicalVersion(v string) string {
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Versions.
// Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
@ -465,96 +603,36 @@ func Sort(list []Version) {
})
}
// Safe encodings
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// EncodePath returns the safe encoding of the given module path.
// EscapePath returns the escaped form of the given module path.
// It fails if the module path is invalid.
func EncodePath(path string) (encoding string, err error) {
func EscapePath(path string) (escaped string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return encodeString(path)
return escapeString(path)
}
// EncodeVersion returns the safe encoding of the given module version.
// EscapeVersion returns the escaped form of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EncodeVersion(v string) (encoding string, err error) {
func EscapeVersion(v string) (escaped string, err error) {
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
return "", &InvalidVersionError{
Version: v,
Err: fmt.Errorf("disallowed version string"),
}
}
return encodeString(v)
return escapeString(v)
}
func encodeString(s string) (encoding string, err error) {
func escapeString(s string) (escaped string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
// The correctness of the escaping loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
@ -576,39 +654,39 @@ func encodeString(s string) (encoding string, err error) {
return string(buf), nil
}
// DecodePath returns the module path of the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid path.
func DecodePath(encoding string) (path string, err error) {
path, ok := decodeString(encoding)
// UnescapePath returns the module path for the given escaped path.
// It fails if the escaped path is invalid or describes an invalid path.
func UnescapePath(escaped string) (path string, err error) {
path, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding)
return "", fmt.Errorf("invalid escaped module path %q", escaped)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
}
return path, nil
}
// DecodeVersion returns the version string for the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid version.
// UnescapeVersion returns the version string for the given escaped version.
// It fails if the escaped form is invalid or describes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func DecodeVersion(encoding string) (v string, err error) {
v, ok := decodeString(encoding)
func UnescapeVersion(escaped string) (v string, err error) {
v, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding)
return "", fmt.Errorf("invalid escaped version %q", escaped)
}
if err := checkElem(v, true); err != nil {
return "", fmt.Errorf("disallowed version string %q", v)
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
}
return v, nil
}
func decodeString(encoding string) (string, bool) {
func unescapeString(escaped string) (string, bool) {
var buf []byte
bang := false
for _, r := range encoding {
for _, r := range escaped {
if r >= utf8.RuneSelf {
return "", false
}

View file

@ -238,43 +238,43 @@ func TestSplitPathVersion(t *testing.T) {
}
}
var encodeTests = []struct {
var escapeTests = []struct {
path string
enc string // empty means same as path
esc string // empty means same as path
}{
{path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"},
{path: "github.com/GoogleCloudPlatform/omega", enc: "github.com/!google!cloud!platform/omega"},
{path: "github.com/GoogleCloudPlatform/omega", esc: "github.com/!google!cloud!platform/omega"},
}
func TestEncodePath(t *testing.T) {
func TestEscapePath(t *testing.T) {
// Check invalid paths.
for _, tt := range checkPathTests {
if !tt.ok {
_, err := EncodePath(tt.path)
_, err := EscapePath(tt.path)
if err == nil {
t.Errorf("EncodePath(%q): succeeded, want error (invalid path)", tt.path)
t.Errorf("EscapePath(%q): succeeded, want error (invalid path)", tt.path)
}
}
}
// Check encodings.
for _, tt := range encodeTests {
enc, err := EncodePath(tt.path)
for _, tt := range escapeTests {
esc, err := EscapePath(tt.path)
if err != nil {
t.Errorf("EncodePath(%q): unexpected error: %v", tt.path, err)
t.Errorf("EscapePath(%q): unexpected error: %v", tt.path, err)
continue
}
want := tt.enc
want := tt.esc
if want == "" {
want = tt.path
}
if enc != want {
t.Errorf("EncodePath(%q) = %q, want %q", tt.path, enc, want)
if esc != want {
t.Errorf("EscapePath(%q) = %q, want %q", tt.path, esc, want)
}
}
}
var badDecode = []string{
var badUnescape = []string{
"github.com/GoogleCloudPlatform/omega",
"github.com/!google!cloud!platform!/omega",
"github.com/!0google!cloud!platform/omega",
@ -283,38 +283,61 @@ var badDecode = []string{
"",
}
func TestDecodePath(t *testing.T) {
func TestUnescapePath(t *testing.T) {
// Check invalid decodings.
for _, bad := range badDecode {
_, err := DecodePath(bad)
for _, bad := range badUnescape {
_, err := UnescapePath(bad)
if err == nil {
t.Errorf("DecodePath(%q): succeeded, want error (invalid decoding)", bad)
t.Errorf("UnescapePath(%q): succeeded, want error (invalid decoding)", bad)
}
}
// Check invalid paths (or maybe decodings).
for _, tt := range checkPathTests {
if !tt.ok {
path, err := DecodePath(tt.path)
path, err := UnescapePath(tt.path)
if err == nil {
t.Errorf("DecodePath(%q) = %q, want error (invalid path)", tt.path, path)
t.Errorf("UnescapePath(%q) = %q, want error (invalid path)", tt.path, path)
}
}
}
// Check encodings.
for _, tt := range encodeTests {
enc := tt.enc
if enc == "" {
enc = tt.path
for _, tt := range escapeTests {
esc := tt.esc
if esc == "" {
esc = tt.path
}
path, err := DecodePath(enc)
path, err := UnescapePath(esc)
if err != nil {
t.Errorf("DecodePath(%q): unexpected error: %v", enc, err)
t.Errorf("UnescapePath(%q): unexpected error: %v", esc, err)
continue
}
if path != tt.path {
t.Errorf("DecodePath(%q) = %q, want %q", enc, path, tt.path)
t.Errorf("UnescapePath(%q) = %q, want %q", esc, path, tt.path)
}
}
}
func TestMatchPathMajor(t *testing.T) {
for _, test := range []struct {
v, pathMajor string
want bool
}{
{"v0.0.0", "", true},
{"v0.0.0", "/v2", false},
{"v0.0.0", ".v0", true},
{"v0.0.0-20190510104115-cbcb75029529", ".v1", true},
{"v1.0.0", "/v2", false},
{"v1.0.0", ".v1", true},
{"v1.0.0", ".v1-unstable", true},
{"v2.0.0+incompatible", "", true},
{"v2.0.0", "", false},
{"v2.0.0", "/v2", true},
{"v2.0.0", ".v2", true},
} {
if got := MatchPathMajor(test.v, test.pathMajor); got != test.want {
t.Errorf("MatchPathMajor(%q, %q) = %v, want %v", test.v, test.pathMajor, got, test.want)
}
}
}

View file

@ -548,9 +548,6 @@ func Open(msg []byte, known Verifiers) (*Note, error) {
Text: string(text),
}
var buf bytes.Buffer
buf.Write(text)
// Parse and verify signatures.
// Ignore duplicate signatures.
seen := make(map[nameHash]bool)

View file

@ -5,7 +5,7 @@
// Parallel cache.
// This file is copied from cmd/go/internal/par.
package sumweb
package sumdb
import (
"sync"

View file

@ -2,28 +2,29 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sumweb
package sumdb
import (
"bytes"
"errors"
"fmt"
"path"
"strings"
"sync"
"sync/atomic"
"cmd/go/internal/module"
"cmd/go/internal/note"
"cmd/go/internal/str"
"cmd/go/internal/tlog"
)
// A Client provides the external operations
// (file caching, HTTP fetches, and so on)
// needed to implement the HTTP client Conn.
// A ClientOps provides the external operations
// (file caching, HTTP fetches, and so on) needed by the Client.
// The methods must be safe for concurrent use by multiple goroutines.
type Client interface {
type ClientOps interface {
// ReadRemote reads and returns the content served at the given path
// on the remote database server. The path begins with "/lookup" or "/tile/".
// on the remote database server. The path begins with "/lookup" or "/tile/",
// and there is no need to parse the path in any way.
// It is the implementation's responsibility to turn that path into a full URL
// and make the HTTP request. ReadRemote should return an error for
// any non-200 HTTP response status.
@ -35,7 +36,7 @@ type Client interface {
// "key" returns a file containing the verifier key for the server.
//
// serverName + "/latest" returns a file containing the latest known
// signed tree from the server. It is read and written (using WriteConfig).
// signed tree from the server.
// To signal that the client wishes to start with an "empty" signed tree,
// ReadConfig can return a successful empty result (0 bytes of data).
ReadConfig(file string) ([]byte, error)
@ -45,6 +46,7 @@ type Client interface {
// If the old []byte does not match the stored configuration,
// WriteConfig must return ErrWriteConflict.
// Otherwise, WriteConfig should atomically replace old with new.
// The "key" configuration file is never written using WriteConfig.
WriteConfig(file string, old, new []byte) error
// ReadCache reads and returns the content of the named cache file.
@ -61,7 +63,7 @@ type Client interface {
Log(msg string)
// SecurityError prints the given security error log message.
// The Conn returns ErrSecurity from any operation that invokes SecurityError,
// The Client returns ErrSecurity from any operation that invokes SecurityError,
// but the return value is mainly for testing. In a real program,
// SecurityError should typically print the message and call log.Fatal or os.Exit.
SecurityError(msg string)
@ -70,13 +72,13 @@ type Client interface {
// ErrWriteConflict signals a write conflict during Client.WriteConfig.
var ErrWriteConflict = errors.New("write conflict")
// ErrSecurity is returned by Conn operations that invoke Client.SecurityError.
// ErrSecurity is returned by Client operations that invoke Client.SecurityError.
var ErrSecurity = errors.New("security error: misbehaving server")
// A Conn is a client connection to a go.sum database.
// A Client is a client connection to a checksum database.
// All the methods are safe for simultaneous use by multiple goroutines.
type Conn struct {
client Client // client-provided external world
type Client struct {
ops ClientOps // access to operations in the external world
didLookup uint32
@ -97,28 +99,28 @@ type Conn struct {
latestMsg []byte // encoded signed note for latest
tileSavedMu sync.Mutex
tileSaved map[tlog.Tile]bool // which tiles have been saved using c.client.WriteCache already
tileSaved map[tlog.Tile]bool // which tiles have been saved using c.ops.WriteCache already
}
// NewConn returns a new Conn using the given Client.
func NewConn(client Client) *Conn {
return &Conn{
client: client,
// NewClient returns a new Client using the given Client.
func NewClient(ops ClientOps) *Client {
return &Client{
ops: ops,
}
}
// init initializes the conn (if not already initialized)
// init initiailzes the client (if not already initialized)
// and returns any initialization error.
func (c *Conn) init() error {
func (c *Client) init() error {
c.initOnce.Do(c.initWork)
return c.initErr
}
// initWork does the actual initialization work.
func (c *Conn) initWork() {
func (c *Client) initWork() {
defer func() {
if c.initErr != nil {
c.initErr = fmt.Errorf("initializing sumweb.Conn: %v", c.initErr)
c.initErr = fmt.Errorf("initializing sumdb.Client: %v", c.initErr)
}
}()
@ -128,7 +130,7 @@ func (c *Conn) initWork() {
}
c.tileSaved = make(map[tlog.Tile]bool)
vkey, err := c.client.ReadConfig("key")
vkey, err := c.ops.ReadConfig("key")
if err != nil {
c.initErr = err
return
@ -141,7 +143,7 @@ func (c *Conn) initWork() {
c.verifiers = note.VerifierList(verifier)
c.name = verifier.Name()
data, err := c.client.ReadConfig(c.name + "/latest")
data, err := c.ops.ReadConfig(c.name + "/latest")
if err != nil {
c.initErr = err
return
@ -152,24 +154,30 @@ func (c *Conn) initWork() {
}
}
// SetTileHeight sets the tile height for the Conn.
// SetTileHeight sets the tile height for the Client.
// Any call to SetTileHeight must happen before the first call to Lookup.
// If SetTileHeight is not called, the Conn defaults to tile height 8.
func (c *Conn) SetTileHeight(height int) {
// If SetTileHeight is not called, the Client defaults to tile height 8.
// SetTileHeight can be called at most once,
// and if so it must be called before the first call to Lookup.
func (c *Client) SetTileHeight(height int) {
if atomic.LoadUint32(&c.didLookup) != 0 {
panic("SetTileHeight used after Lookup")
}
if height <= 0 {
panic("invalid call to SetTileHeight")
}
if c.tileHeight != 0 {
panic("multiple calls to SetTileHeight")
}
c.tileHeight = height
}
// SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Conn.
// SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client.
// For any module path matching one of the patterns,
// Lookup will return ErrGONOSUMDB.
// Any call to SetGONOSUMDB must happen before the first call to Lookup.
func (c *Conn) SetGONOSUMDB(list string) {
// SetGONOSUMDB can be called at most once,
// and if so it must be called before the first call to Lookup.
func (c *Client) SetGONOSUMDB(list string) {
if atomic.LoadUint32(&c.didLookup) != 0 {
panic("SetGONOSUMDB used after Lookup")
}
@ -184,14 +192,58 @@ func (c *Conn) SetGONOSUMDB(list string) {
// usually from the environment variable).
var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)")
func (c *Conn) skip(target string) bool {
return str.GlobsMatchPath(c.nosumdb, target)
func (c *Client) skip(target string) bool {
return globsMatchPath(c.nosumdb, target)
}
// globsMatchPath reports whether any path prefix of target
// matches one of the glob patterns (as defined by path.Match)
// in the comma-separated globs list.
// It ignores any empty or malformed patterns in the list.
func globsMatchPath(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}
// Lookup returns the go.sum lines for the given module path and version.
// The version may end in a /go.mod suffix, in which case Lookup returns
// the go.sum lines for the module's go.mod-only hash.
func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
func (c *Client) Lookup(path, vers string) (lines []string, err error) {
atomic.StoreUint32(&c.didLookup, 1)
if c.skip(path) {
@ -209,16 +261,16 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
}
// Prepare encoded cache filename / URL.
epath, err := encodePath(path)
epath, err := module.EscapePath(path)
if err != nil {
return nil, err
}
evers, err := encodeVersion(strings.TrimSuffix(vers, "/go.mod"))
evers, err := module.EscapeVersion(strings.TrimSuffix(vers, "/go.mod"))
if err != nil {
return nil, err
}
file := c.name + "/lookup/" + epath + "@" + evers
remotePath := "/lookup/" + epath + "@" + evers
file := c.name + remotePath
// Fetch the data.
// The lookupCache avoids redundant ReadCache/GetURL operations
@ -232,9 +284,9 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
result := c.record.Do(file, func() interface{} {
// Try the on-disk cache, or else get from web.
writeCache := false
data, err := c.client.ReadCache(file)
data, err := c.ops.ReadCache(file)
if err != nil {
data, err = c.client.ReadRemote(remotePath)
data, err = c.ops.ReadRemote(remotePath)
if err != nil {
return cached{nil, err}
}
@ -256,7 +308,7 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
// Now that we've validated the record,
// save it to the on-disk cache (unless that's where it came from).
if writeCache {
c.client.WriteCache(file, data)
c.ops.WriteCache(file, data)
}
return cached{data, nil}
@ -278,15 +330,15 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
}
// mergeLatest merges the tree head in msg
// with the Conn's current latest tree head,
// with the Client's current latest tree head,
// ensuring the result is a consistent timeline.
// If the result is inconsistent, mergeLatest calls c.client.SecurityError
// If the result is inconsistent, mergeLatest calls c.ops.SecurityError
// with a detailed security error message and then
// (only if c.client.SecurityError does not exit the program) returns ErrSecurity.
// If the Conn's current latest tree head moves forward,
// (only if c.ops.SecurityError does not exit the program) returns ErrSecurity.
// If the Client's current latest tree head moves forward,
// mergeLatest updates the underlying configuration file as well,
// taking care to merge any independent updates to that configuration.
func (c *Conn) mergeLatest(msg []byte) error {
func (c *Client) mergeLatest(msg []byte) error {
// Merge msg into our in-memory copy of the latest tree head.
when, err := c.mergeLatestMem(msg)
if err != nil {
@ -303,7 +355,7 @@ func (c *Conn) mergeLatest(msg []byte) error {
// we need to merge any updates made there as well.
// Note that writeConfig is an atomic compare-and-swap.
for {
msg, err := c.client.ReadConfig(c.name + "/latest")
msg, err := c.ops.ReadConfig(c.name + "/latest")
if err != nil {
return err
}
@ -321,7 +373,7 @@ func (c *Conn) mergeLatest(msg []byte) error {
c.latestMu.Lock()
latestMsg := c.latestMsg
c.latestMu.Unlock()
if err := c.client.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict {
if err := c.ops.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict {
// Success or a non-write-conflict error.
return err
}
@ -342,7 +394,7 @@ const (
// msgPast means msg was from before c.latest,
// msgNow means msg was exactly c.latest, and
// msgFuture means msg was from after c.latest, which has now been updated.
func (c *Conn) mergeLatestMem(msg []byte) (when int, err error) {
func (c *Client) mergeLatestMem(msg []byte) (when int, err error) {
if len(msg) == 0 {
// Accept empty msg as the unsigned, empty timeline.
c.latestMu.Lock()
@ -412,7 +464,7 @@ func (c *Conn) mergeLatestMem(msg []byte) (when int, err error) {
// If an error occurs, such as malformed data or a network problem, checkTrees returns that error.
// If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed
// message and calls log.Fatal.
func (c *Conn) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error {
func (c *Client) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error {
thr := tlog.TileHashReader(newer, &c.tileReader)
h, err := tlog.TreeHash(older.N, thr)
if err != nil {
@ -456,12 +508,12 @@ func (c *Conn) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, ne
fmt.Fprintf(&buf, "\n\t%v", h)
}
}
c.client.SecurityError(buf.String())
c.ops.SecurityError(buf.String())
return ErrSecurity
}
// checkRecord checks that record #id's hash matches data.
func (c *Conn) checkRecord(id int64, data []byte) error {
func (c *Client) checkRecord(id int64, data []byte) error {
c.latestMu.Lock()
latest := c.latest
c.latestMu.Unlock()
@ -479,11 +531,11 @@ func (c *Conn) checkRecord(id int64, data []byte) error {
return fmt.Errorf("cannot authenticate record data in server response")
}
// tileReader is a *Conn wrapper that implements tlog.TileReader.
// tileReader is a *Client wrapper that implements tlog.TileReader.
// The separate type avoids exposing the ReadTiles and SaveTiles
// methods on Conn itself.
// methods on Client itself.
type tileReader struct {
c *Conn
c *Client
}
func (r *tileReader) Height() int {
@ -516,17 +568,17 @@ func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) {
}
// tileCacheKey returns the cache key for the tile.
func (c *Conn) tileCacheKey(tile tlog.Tile) string {
func (c *Client) tileCacheKey(tile tlog.Tile) string {
return c.name + "/" + tile.Path()
}
// tileRemotePath returns the remote path for the tile.
func (c *Conn) tileRemotePath(tile tlog.Tile) string {
func (c *Client) tileRemotePath(tile tlog.Tile) string {
return "/" + tile.Path()
}
// readTile reads a single tile, either from the on-disk cache or the server.
func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
func (c *Client) readTile(tile tlog.Tile) ([]byte, error) {
type cached struct {
data []byte
err error
@ -534,7 +586,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
result := c.tileCache.Do(tile, func() interface{} {
// Try the requested tile in on-disk cache.
data, err := c.client.ReadCache(c.tileCacheKey(tile))
data, err := c.ops.ReadCache(c.tileCacheKey(tile))
if err == nil {
c.markTileSaved(tile)
return cached{data, nil}
@ -544,9 +596,9 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
// We only save authenticated tiles to the on-disk cache,
// so the recreated prefix is equally authenticated.
full := tile
full.W = 1 << tile.H
full.W = 1 << uint(tile.H)
if tile != full {
data, err := c.client.ReadCache(c.tileCacheKey(full))
data, err := c.ops.ReadCache(c.tileCacheKey(full))
if err == nil {
c.markTileSaved(tile) // don't save tile later; we already have full
return cached{data[:len(data)/full.W*tile.W], nil}
@ -554,7 +606,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
}
// Try requested tile from server.
data, err = c.client.ReadRemote(c.tileRemotePath(tile))
data, err = c.ops.ReadRemote(c.tileRemotePath(tile))
if err == nil {
return cached{data, nil}
}
@ -564,7 +616,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
// the tile has been completed and only the complete one
// is available.
if tile != full {
data, err := c.client.ReadRemote(c.tileRemotePath(full))
data, err := c.ops.ReadRemote(c.tileRemotePath(full))
if err == nil {
// Note: We could save the full tile in the on-disk cache here,
// but we don't know if it is valid yet, and we will only find out
@ -585,7 +637,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
// markTileSaved records that tile is already present in the on-disk cache,
// so that a future SaveTiles for that tile can be ignored.
func (c *Conn) markTileSaved(tile tlog.Tile) {
func (c *Client) markTileSaved(tile tlog.Tile) {
c.tileSavedMu.Lock()
c.tileSaved[tile] = true
c.tileSavedMu.Unlock()
@ -613,7 +665,7 @@ func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) {
// c.tileSaved[tile] is still true and we will not try to write it again.
// Next time we run maybe we'll redownload it again and be
// more successful.
c.client.WriteCache(c.name+"/"+tile.Path(), data[i])
c.ops.WriteCache(c.name+"/"+tile.Path(), data[i])
}
}
}

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sumweb
package sumdb
import (
"bytes"
@ -21,7 +21,7 @@ const (
testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
)
func TestConnLookup(t *testing.T) {
func TestClientLookup(t *testing.T) {
tc := newTestClient(t)
tc.mustHaveLatest(1)
@ -49,7 +49,7 @@ func TestConnLookup(t *testing.T) {
tc.mustHaveLatest(4)
}
func TestConnBadTiles(t *testing.T) {
func TestClientBadTiles(t *testing.T) {
tc := newTestClient(t)
flipBits := func() {
@ -65,33 +65,33 @@ func TestConnBadTiles(t *testing.T) {
// Bad tiles in initial download.
tc.mustHaveLatest(1)
flipBits()
_, err := tc.conn.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumweb.Conn: checking tree#1: downloaded inconsistent tile")
_, err := tc.client.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
flipBits()
tc.newConn()
tc.newClient()
tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=")
// Bad tiles after initial download.
flipBits()
_, err = tc.conn.Lookup("rsc.io/Quote", "v1.5.2")
_, err = tc.client.Lookup("rsc.io/Quote", "v1.5.2")
tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile")
flipBits()
tc.newConn()
tc.newClient()
tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=")
// Bad starting tree hash looks like bad tiles.
tc.newConn()
tc.newClient()
text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}})
data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer)
if err != nil {
tc.t.Fatal(err)
}
tc.config[testName+"/latest"] = data
_, err = tc.conn.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumweb.Conn: checking tree#1: downloaded inconsistent tile")
_, err = tc.client.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
}
func TestConnFork(t *testing.T) {
func TestClientFork(t *testing.T) {
tc := newTestClient(t)
tc2 := tc.fork()
@ -109,7 +109,7 @@ func TestConnFork(t *testing.T) {
key := "/lookup/rsc.io/pkg1@v1.5.2"
tc2.remote[key] = tc.remote[key]
_, err := tc2.conn.Lookup("rsc.io/pkg1", "v1.5.2")
_, err := tc2.client.Lookup("rsc.io/pkg1", "v1.5.2")
tc2.mustError(err, ErrSecurity.Error())
/*
@ -154,10 +154,10 @@ func TestConnFork(t *testing.T) {
}
}
func TestConnGONOSUMDB(t *testing.T) {
func TestClientGONOSUMDB(t *testing.T) {
tc := newTestClient(t)
tc.conn.SetGONOSUMDB("p,*/q")
tc.conn.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network
tc.client.SetGONOSUMDB("p,*/q")
tc.client.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network
tc.getOK = false
ok := []string{
@ -175,13 +175,13 @@ func TestConnGONOSUMDB(t *testing.T) {
}
for _, path := range ok {
_, err := tc.conn.Lookup(path, "v1.0.0")
_, err := tc.client.Lookup(path, "v1.0.0")
if err == ErrGONOSUMDB {
t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path)
}
}
for _, path := range skip {
_, err := tc.conn.Lookup(path, "v1.0.0")
_, err := tc.client.Lookup(path, "v1.0.0")
if err != ErrGONOSUMDB {
t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err)
}
@ -191,7 +191,7 @@ func TestConnGONOSUMDB(t *testing.T) {
// A testClient is a self-contained client-side testing environment.
type testClient struct {
t *testing.T // active test
conn *Conn // conn being tested
client *Client // client being tested
tileHeight int // tile height to use (default 2)
getOK bool // should tc.GetURL succeed?
getTileOK bool // should tc.GetURL of tiles succeed?
@ -202,12 +202,12 @@ type testClient struct {
// mu protects config, cache, log, security
// during concurrent use of the exported methods
// by the conn itself (testClient is the Conn's Client,
// by the client itself (testClient is the Client's ClientOps,
// and the Client methods can both read and write these fields).
// Unexported methods invoked directly by the test
// (for example, addRecord) need not hold the mutex:
// for proper test execution those methods should only
// be called when the Conn is idle and not using its Client.
// be called when the Client is idle and not using its ClientOps.
// Not holding the mutex in those methods ensures
// that if a mistake is made, go test -race will report it.
// (Holding the mutex would eliminate the race report but
@ -240,7 +240,7 @@ func newTestClient(t *testing.T) *testClient {
t.Fatal(err)
}
tc.newConn()
tc.newClient()
tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
@ -260,18 +260,18 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
return tc
}
// newConn resets the Conn associated with tc.
// This clears any in-memory cache from the Conn
// newClient resets the Client associated with tc.
// This clears any in-memory cache from the Client
// but not tc's on-disk cache.
func (tc *testClient) newConn() {
tc.conn = NewConn(tc)
tc.conn.SetTileHeight(tc.tileHeight)
func (tc *testClient) newClient() {
tc.client = NewClient(tc)
tc.client.SetTileHeight(tc.tileHeight)
}
// mustLookup does a lookup for path@vers and checks that the lines that come back match want.
func (tc *testClient) mustLookup(path, vers, want string) {
tc.t.Helper()
lines, err := tc.conn.Lookup(path, vers)
lines, err := tc.client.Lookup(path, vers)
if err != nil {
tc.t.Fatal(err)
}
@ -315,7 +315,7 @@ func (tc *testClient) fork() *testClient {
cache: copyMap(tc.cache),
remote: copyMap(tc.remote),
}
tc2.newConn()
tc2.newClient()
return tc2
}

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sumweb implements the HTTP protocols for serving or accessing a go.sum database.
package sumweb
// Package sumdb implements the HTTP protocols for serving or accessing a module checksum database.
package sumdb
import (
"context"
@ -12,48 +12,50 @@ import (
"os"
"strings"
"cmd/go/internal/module"
"cmd/go/internal/tlog"
)
// A Server provides the external operations
// (underlying database access and so on)
// needed to implement the HTTP server Handler.
type Server interface {
// NewContext returns the context to use for the request r.
NewContext(r *http.Request) (context.Context, error)
// A ServerOps provides the external operations
// (underlying database access and so on) needed by the Server.
type ServerOps interface {
// Signed returns the signed hash of the latest tree.
Signed(ctx context.Context) ([]byte, error)
// ReadRecords returns the content for the n records id through id+n-1.
ReadRecords(ctx context.Context, id, n int64) ([][]byte, error)
// Lookup looks up a record by its associated key ("module@version"),
// Lookup looks up a record for the given module,
// returning the record ID.
Lookup(ctx context.Context, key string) (int64, error)
Lookup(ctx context.Context, m module.Version) (int64, error)
// ReadTileData reads the content of tile t.
// It is only invoked for hash tiles (t.L ≥ 0).
ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error)
}
// A Handler is the go.sum database server handler,
// which should be invoked to serve the paths listed in Paths.
// The calling code is responsible for initializing Server.
type Handler struct {
Server Server
// A Server is the checksum database HTTP server,
// which implements http.Handler and should be invoked
// to serve the paths listed in ServerPaths.
type Server struct {
ops ServerOps
}
// Paths are the URL paths for which Handler should be invoked.
// NewServer returns a new Server using the given operations.
func NewServer(ops ServerOps) *Server {
return &Server{ops: ops}
}
// ServerPaths are the URL paths the Server can (and should) serve.
//
// Typically a server will do:
//
// handler := &sumweb.Handler{Server: srv}
// for _, path := range sumweb.Paths {
// http.HandleFunc(path, handler)
// srv := sumdb.NewServer(ops)
// for _, path := range sumdb.ServerPaths {
// http.Handle(path, srv)
// }
//
var Paths = []string{
var ServerPaths = []string{
"/lookup/",
"/latest",
"/tile/",
@ -61,12 +63,8 @@ var Paths = []string{
var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`)
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx, err := h.Server.NewContext(r)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
switch {
default:
@ -79,23 +77,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
i := strings.Index(mod, "@")
encPath, encVers := mod[:i], mod[i+1:]
path, err := decodePath(encPath)
escPath, escVers := mod[:i], mod[i+1:]
path, err := module.UnescapePath(escPath)
if err != nil {
reportError(w, r, err)
return
}
vers, err := decodeVersion(encVers)
vers, err := module.UnescapeVersion(escVers)
if err != nil {
reportError(w, r, err)
return
}
id, err := h.Server.Lookup(ctx, path+"@"+vers)
id, err := s.ops.Lookup(ctx, module.Version{Path: path, Version: vers})
if err != nil {
reportError(w, r, err)
return
}
records, err := h.Server.ReadRecords(ctx, id, 1)
records, err := s.ops.ReadRecords(ctx, id, 1)
if err != nil {
// This should never happen - the lookup says the record exists.
http.Error(w, err.Error(), http.StatusInternalServerError)
@ -110,7 +108,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
signed, err := h.Server.Signed(ctx)
signed, err := s.ops.Signed(ctx)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@ -120,7 +118,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Write(signed)
case r.URL.Path == "/latest":
data, err := h.Server.Signed(ctx)
data, err := s.ops.Signed(ctx)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@ -137,7 +135,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if t.L == -1 {
// Record data.
start := t.N << uint(t.H)
records, err := h.Server.ReadRecords(ctx, start, int64(t.W))
records, err := s.ops.ReadRecords(ctx, start, int64(t.W))
if err != nil {
reportError(w, r, err)
return
@ -159,7 +157,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
data, err := h.Server.ReadTileData(ctx, t)
data, err := s.ops.ReadTileData(ctx, t)
if err != nil {
reportError(w, r, err)
return

View file

@ -2,22 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sumweb
package sumdb
import (
"context"
"fmt"
"net/http"
"strings"
"sync"
"cmd/go/internal/module"
"cmd/go/internal/note"
"cmd/go/internal/tlog"
)
// NewTestServer constructs a new TestServer
// that will sign its tree with the given signer key
// (see cmd/go/internal/note)
// (see golang.org/x/mod/sumdb/note)
// and fetch new records as needed by calling gosum.
func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer {
return &TestServer{signer: signer, gosum: gosum}
@ -45,10 +44,6 @@ func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) {
return list, nil
}
func (s *TestServer) NewContext(r *http.Request) (context.Context, error) {
return nil, nil
}
func (s *TestServer) Signed(ctx context.Context) ([]byte, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -80,7 +75,8 @@ func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, er
return list, nil
}
func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) {
func (s *TestServer) Lookup(ctx context.Context, m module.Version) (int64, error) {
key := m.String()
s.mu.Lock()
id, ok := s.lookup[key]
s.mu.Unlock()
@ -89,12 +85,7 @@ func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) {
}
// Look up module and compute go.sum lines.
i := strings.Index(key, "@")
if i < 0 {
return 0, fmt.Errorf("invalid lookup key %q", key)
}
path, vers := key[:i], key[i+1:]
data, err := s.gosum(path, vers)
data, err := s.gosum(m.Path, m.Version)
if err != nil {
return 0, err
}

View file

@ -1,167 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// FS-safe encoding of module paths and versions.
// Copied from cmd/go/internal/module and unexported.
package sumweb
import (
"fmt"
"unicode/utf8"
)
// Safe encodings
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// encodePath returns the safe encoding of the given module path.
// It fails if the module path is invalid.
func encodePath(path string) (encoding string, err error) {
return encodeString(path)
}
// encodeVersion returns the safe encoding of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func encodeVersion(v string) (encoding string, err error) {
return encodeString(v)
}
func encodeString(s string) (encoding string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// decodePath returns the module path of the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid path.
func decodePath(encoding string) (path string, err error) {
path, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding)
}
return path, nil
}
// decodeVersion returns the version string for the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func decodeVersion(encoding string) (v string, err error) {
v, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding)
}
return v, nil
}
func decodeString(encoding string) (string, bool) {
var buf []byte
bang := false
for _, r := range encoding {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}

View file

@ -1,67 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sumweb
import "testing"
var encodeTests = []struct {
path string
enc string // empty means same as path
}{
{path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"},
{path: "github.com/GoogleCloudPlatform/omega", enc: "github.com/!google!cloud!platform/omega"},
}
func TestEncodePath(t *testing.T) {
// Check encodings.
for _, tt := range encodeTests {
enc, err := encodePath(tt.path)
if err != nil {
t.Errorf("encodePath(%q): unexpected error: %v", tt.path, err)
continue
}
want := tt.enc
if want == "" {
want = tt.path
}
if enc != want {
t.Errorf("encodePath(%q) = %q, want %q", tt.path, enc, want)
}
}
}
var badDecode = []string{
"github.com/GoogleCloudPlatform/omega",
"github.com/!google!cloud!platform!/omega",
"github.com/!0google!cloud!platform/omega",
"github.com/!_google!cloud!platform/omega",
"github.com/!!google!cloud!platform/omega",
}
func TestDecodePath(t *testing.T) {
// Check invalid decodings.
for _, bad := range badDecode {
_, err := decodePath(bad)
if err == nil {
t.Errorf("DecodePath(%q): succeeded, want error (invalid decoding)", bad)
}
}
// Check encodings.
for _, tt := range encodeTests {
enc := tt.enc
if enc == "" {
enc = tt.path
}
path, err := decodePath(enc)
if err != nil {
t.Errorf("decodePath(%q): unexpected error: %v", enc, err)
continue
}
if path != tt.path {
t.Errorf("decodePath(%q) = %q, want %q", enc, path, tt.path)
}
}
}

View file

@ -41,7 +41,7 @@ func FormatTree(tree Tree) []byte {
var errMalformedTree = errors.New("malformed tree note")
var treePrefix = []byte("go.sum database tree\n")
// ParseTree parses a tree root description.
// ParseTree parses a formatted tree root description.
func ParseTree(text []byte) (tree Tree, err error) {
// The message looks like:
//

View file

@ -33,6 +33,9 @@ import (
// The special level L=-1 holds raw record data instead of hashes.
// In this case, the level encodes into a tile path as the path element
// "data" instead of "-1".
//
// See also https://golang.org/design/25530-sumdb#checksum-database
// and https://research.swtch.com/tlog#tiling_a_log.
type Tile struct {
H int // height of tile (1 ≤ H ≤ 30)
L int // level in tiling (-1 ≤ L ≤ 63)
@ -40,11 +43,13 @@ type Tile struct {
W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile)
}
// TileForIndex returns the tile of height h ≥ 1
// TileForIndex returns the tile of fixed height h ≥ 1
// and least width storing the given hash storage index.
//
// If h ≤ 0, TileForIndex panics.
func TileForIndex(h int, index int64) Tile {
if h < 1 {
panic("TileForIndex: invalid height")
if h <= 0 {
panic(fmt.Sprintf("TileForIndex: invalid height %d", h))
}
t, _, _ := tileForIndex(h, index)
return t
@ -99,8 +104,10 @@ func tileHash(data []byte) Hash {
// that must be published when publishing from a tree of
// size newTreeSize to replace a tree of size oldTreeSize.
// (No tiles need to be published for a tree of size zero.)
//
// If h ≤ 0, TileForIndex panics.
func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile {
if h < 1 {
if h <= 0 {
panic(fmt.Sprintf("NewTiles: invalid height %d", h))
}
H := uint(h)
@ -244,6 +251,16 @@ type TileReader interface {
// a data record for each tile (len(data) == len(tiles))
// and each data record must be the correct length
// (len(data[i]) == tiles[i].W*HashSize).
//
// An implementation of ReadTiles typically reads
// them from an on-disk cache or else from a remote
// tile server. Tile data downloaded from a server should
// be considered suspect and not saved into a persistent
// on-disk cache before returning from ReadTiles.
// When the client confirms the validity of the tile data,
// it will call SaveTiles to signal that they can be safely
// written to persistent storage.
// See also https://research.swtch.com/tlog#authenticating_tiles.
ReadTiles(tiles []Tile) (data [][]byte, err error)
// SaveTiles informs the TileReader that the tile data

View file

@ -5,9 +5,6 @@
// Package tlog implements a tamper-evident log
// used in the Go module go.sum database server.
//
// This package is part of a DRAFT of what the go.sum database server will look like.
// Do not assume the details here are final!
//
// This package follows the design of Certificate Transparency (RFC 6962)
// and its proofs are compatible with that system.
// See TestCertificateTransparency.

View file

@ -1023,7 +1023,7 @@ func (b *Builder) vet(a *Action) error {
// dependency tree turn on *more* analysis, as here.
// (The unsafeptr check does not write any facts for use by
// later vet runs.)
if a.Package.Goroot && !VetExplicit {
if a.Package.Goroot && !VetExplicit && VetTool == "" {
// Note that $GOROOT/src/buildall.bash
// does the same for the misc-compile trybots
// and should be updated if these flags are

View file

@ -29,7 +29,7 @@ import (
"cmd/go/internal/module"
"cmd/go/internal/par"
"cmd/go/internal/semver"
"cmd/go/internal/sumweb"
"cmd/go/internal/sumdb"
"cmd/go/internal/txtar"
)
@ -65,7 +65,7 @@ func StartProxy() {
// Prepopulate main sumdb.
for _, mod := range modList {
sumdbHandler.Server.Lookup(nil, mod.Path+"@"+mod.Version)
sumdbOps.Lookup(nil, mod)
}
})
}
@ -88,7 +88,7 @@ func readModList() {
continue
}
encPath := strings.ReplaceAll(name[:i], "_", "/")
path, err := module.DecodePath(encPath)
path, err := module.UnescapePath(encPath)
if err != nil {
if encPath != "example.com/invalidpath/v1" {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
@ -96,7 +96,7 @@ func readModList() {
continue
}
encVers := name[i+1:]
vers, err := module.DecodeVersion(encVers)
vers, err := module.UnescapeVersion(encVers)
if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
continue
@ -113,8 +113,13 @@ const (
testSumDBSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
)
var sumdbHandler = &sumweb.Handler{Server: sumweb.NewTestServer(testSumDBSignerKey, proxyGoSum)}
var sumdbWrongHandler = &sumweb.Handler{Server: sumweb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong)}
var (
sumdbOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSum)
sumdbServer = sumdb.NewServer(sumdbOps)
sumdbWrongOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong)
sumdbWrongServer = sumdb.NewServer(sumdbWrongOps)
)
// proxyHandler serves the Go module proxy protocol.
// See the proxy section of https://research.swtch.com/vgo-module.
@ -155,7 +160,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// (Client thinks it is talking directly to a sumdb.)
if strings.HasPrefix(path, "sumdb-direct/") {
r.URL.Path = path[len("sumdb-direct"):]
sumdbHandler.ServeHTTP(w, r)
sumdbServer.ServeHTTP(w, r)
return
}
@ -164,7 +169,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// (Client thinks it is talking directly to a sumdb.)
if strings.HasPrefix(path, "sumdb-wrong/") {
r.URL.Path = path[len("sumdb-wrong"):]
sumdbWrongHandler.ServeHTTP(w, r)
sumdbWrongServer.ServeHTTP(w, r)
return
}
@ -178,7 +183,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// Request for $GOPROXY/sumdb/<name>/... goes to sumdb.
if sumdbPrefix := "sumdb/" + testSumDBName + "/"; strings.HasPrefix(path, sumdbPrefix) {
r.URL.Path = path[len(sumdbPrefix)-1:]
sumdbHandler.ServeHTTP(w, r)
sumdbServer.ServeHTTP(w, r)
return
}
@ -187,7 +192,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// latest version, including pseudo-versions.
if i := strings.LastIndex(path, "/@latest"); i >= 0 {
enc := path[:i]
modPath, err := module.DecodePath(enc)
modPath, err := module.UnescapePath(enc)
if err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
@ -225,7 +230,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return
}
encVers, err := module.EncodeVersion(latest)
encVers, err := module.EscapeVersion(latest)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@ -240,7 +245,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return
}
enc, file := path[:i], path[i+len("/@v/"):]
path, err := module.DecodePath(enc)
path, err := module.UnescapePath(enc)
if err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
@ -276,7 +281,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return
}
encVers, ext := file[:i], file[i+1:]
vers, err := module.DecodeVersion(encVers)
vers, err := module.UnescapeVersion(encVers)
if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
http.NotFound(w, r)
@ -397,11 +402,11 @@ var archiveCache par.Cache
var cmdGoDir, _ = os.Getwd()
func readArchive(path, vers string) (*txtar.Archive, error) {
enc, err := module.EncodePath(path)
enc, err := module.EscapePath(path)
if err != nil {
return nil, err
}
encVers, err := module.EncodeVersion(vers)
encVers, err := module.EscapeVersion(vers)
if err != nil {
return nil, err
}

View file

@ -85,3 +85,7 @@ stderr 'multiple values for key: GOOS'
# go env -w rejects missing variables
! go env -w GOOS
stderr 'arguments must be KEY=VALUE: invalid argument: GOOS'
# go env -w rejects invalid GO111MODULE values, as otherwise cmd/go would break
! go env -w GO111MODULE=badvalue
stderr 'invalid GO111MODULE value "badvalue"'

View file

@ -0,0 +1,15 @@
# Issue 33871.
cd m/a.0
go build
-- m/go.mod --
module m
-- m/a.0/a.go --
package a
type T int
func (t T) M() int {
return int(t)
}

View file

@ -6,6 +6,7 @@ env GO111MODULE=on
cd $WORK/auto
cp go.mod go.mod.orig
cp $WORK/modules-1.13.txt $WORK/auto/modules.txt
# An explicit -mod=vendor should force use of the vendor directory.
env GOFLAGS=-mod=vendor
@ -145,19 +146,23 @@ stderr '^go: inconsistent vendoring in '$WORK/auto':$'
stderr '^\texample.com/printversion@v1.0.0: is explicitly required in go.mod, but vendor/modules.txt indicates example.com/printversion@v1.1.0$'
stderr '\n\nrun .go mod vendor. to sync, or use -mod=mod or -mod=readonly to ignore the vendor directory$'
# 'go mod vendor' should write a 1.14 vendor/modules.txt even if
# the go version is still 1.13.
# If the go version is still 1.13, 'go mod vendor' should write a
# matching vendor/modules.txt containing the corrected 1.13 data.
go mod vendor
cmp $WORK/modules-1.14.txt vendor/modules.txt
cmp $WORK/modules-1.13.txt vendor/modules.txt
go list -mod=vendor -f {{.Dir}} -tags tools all
stdout '^'$WORK'[/\\]auto$'
stdout '^'$WORK'[/\\]auto[/\\]vendor[/\\]example.com[/\\]printversion$'
stdout '^'$WORK'[/\\]auto[/\\]vendor[/\\]example.com[/\\]version$'
# When the version is upgraded to 1.14, -mod=vendor should kick in
# automatically and succeed.
# When the version is upgraded to 1.14, 'go mod vendor' should write a
# vendor/modules.txt with the updated 1.14 annotations.
go mod edit -go=1.14
go mod vendor
cmp $WORK/modules-1.14.txt vendor/modules.txt
# Then, -mod=vendor should kick in automatically and succeed.
go list -f {{.Dir}} -tags tools all
stdout '^'$WORK'[/\\]auto$'
stdout '^'$WORK'[/\\]auto[/\\]vendor[/\\]example.com[/\\]printversion$'
@ -203,7 +208,7 @@ example.com/printversion
example.com/version
# example.com/unused => nonexistent.example.com/unused v1.0.0-whatever
# example.com/version v1.2.0 => nonexistent.example.com/version v1.2.0
-- $WORK/auto/vendor/modules.txt --
-- $WORK/modules-1.13.txt --
# example.com/printversion v1.0.0
example.com/printversion
# example.com/version v1.0.0 => ./replacement-version

View file

@ -362,9 +362,6 @@ const (
AMULAWB
AMULABB
ADATABUNDLE
ADATABUNDLEEND
AMRC // MRC/MCR
ALAST

View file

@ -139,8 +139,6 @@ var Anames = []string{
"MULAWT",
"MULAWB",
"MULABB",
"DATABUNDLE",
"DATABUNDLEEND",
"MRC",
"LAST",
}

View file

@ -329,8 +329,6 @@ var optab = []Optab{
{obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0},
{obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL
{obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL
{ADATABUNDLE, C_NONE, C_NONE, C_NONE, 100, 4, 0, 0, 0, 0},
{ADATABUNDLEEND, C_NONE, C_NONE, C_NONE, 100, 0, 0, 0, 0, 0},
{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0, 0},
}
@ -434,7 +432,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP) {
if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.ANOP) {
ctxt.Diag("zero-width instruction\n%v", p)
continue
}
@ -522,7 +520,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if m/4 > len(out) {
ctxt.Diag("instruction size too large: %d > %d", m/4, len(out))
}
if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP) {
if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.ANOP) {
if p.As == obj.ATEXT {
c.autosize = p.To.Offset + 4
continue
@ -615,7 +613,6 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
func (c *ctxt5) checkpool(p *obj.Prog, nextpc int32) bool {
poolLast := nextpc
poolLast += 4 // the AB instruction to jump around the pool
poolLast += 12 // the maximum nacl alignment padding for ADATABUNDLE
poolLast += int32(c.pool.size) - 4 // the offset of the last pool entry
refPC := int32(c.pool.start) // PC of the first pool reference
@ -643,7 +640,7 @@ func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool {
q.Link = c.blitrl
q.Pos = p.Pos
c.blitrl = q
} else if force == 0 && (p.Pc+int64(12+c.pool.size)-int64(c.pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
} else if force == 0 && (p.Pc+int64(c.pool.size)-int64(c.pool.start) < 2048) {
return false
}
@ -1424,9 +1421,7 @@ func buildop(ctxt *obj.Link) {
obj.AUNDEF,
obj.AFUNCDATA,
obj.APCDATA,
obj.ANOP,
ADATABUNDLE,
ADATABUNDLEEND:
obj.ANOP:
break
}
}
@ -2481,13 +2476,6 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(p.Reg) & 15) << 0
o1 |= uint32((p.To.Offset & 15) << 12)
// DATABUNDLE: BKPT $0x5be0, signify the start of NaCl data bundle;
// DATABUNDLEEND: zero width alignment marker
case 100:
if p.As == ADATABUNDLE {
o1 = 0xe125be70
}
case 105: /* divhw r,[r,]r */
o1 = c.oprrr(p, p.As, int(p.Scond))
rf := int(p.From.Reg)

View file

@ -701,6 +701,7 @@ const (
ANGCS
ANGCSW
ANGCW
ANOOP
AORN
AORNW
AORR

View file

@ -208,6 +208,7 @@ var Anames = []string{
"NGCS",
"NGCSW",
"NGCW",
"NOOP",
"ORN",
"ORNW",
"ORR",

View file

@ -2486,6 +2486,7 @@ func buildop(ctxt *obj.Link) {
oprangeset(AYIELD, t)
oprangeset(ASEV, t)
oprangeset(ASEVL, t)
oprangeset(ANOOP, t)
oprangeset(ADRPS, t)
case ACBZ:
@ -6036,8 +6037,8 @@ func (c *ctxt7) op0(p *obj.Prog, a obj.As) uint32 {
case AERET:
return 0x6B<<25 | 4<<21 | 0x1F<<16 | 0<<10 | 0x1F<<5
// case ANOP:
// return SYSHINT(0)
case ANOOP:
return SYSHINT(0)
case AYIELD:
return SYSHINT(1)

View file

@ -148,6 +148,248 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
}
}
// addrToReg extracts the register from an Addr, handling special Addr.Names.
func addrToReg(a obj.Addr) int16 {
switch a.Name {
case obj.NAME_PARAM, obj.NAME_AUTO:
return REG_SP
}
return a.Reg
}
// movToLoad converts a MOV mnemonic into the corresponding load instruction.
func movToLoad(mnemonic obj.As) obj.As {
switch mnemonic {
case AMOV:
return ALD
case AMOVB:
return ALB
case AMOVH:
return ALH
case AMOVW:
return ALW
case AMOVBU:
return ALBU
case AMOVHU:
return ALHU
case AMOVWU:
return ALWU
case AMOVF:
return AFLW
case AMOVD:
return AFLD
default:
panic(fmt.Sprintf("%+v is not a MOV", mnemonic))
}
}
// movToStore converts a MOV mnemonic into the corresponding store instruction.
func movToStore(mnemonic obj.As) obj.As {
switch mnemonic {
case AMOV:
return ASD
case AMOVB:
return ASB
case AMOVH:
return ASH
case AMOVW:
return ASW
case AMOVF:
return AFSW
case AMOVD:
return AFSD
default:
panic(fmt.Sprintf("%+v is not a MOV", mnemonic))
}
}
// rewriteMOV rewrites MOV pseudo-instructions.
func rewriteMOV(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog) {
switch p.As {
case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
default:
panic(fmt.Sprintf("%+v is not a MOV pseudo-instruction", p.As))
}
switch p.From.Type {
case obj.TYPE_MEM: // MOV c(Rs), Rd -> L $c, Rs, Rd
switch p.From.Name {
case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE:
if p.To.Type != obj.TYPE_REG {
ctxt.Diag("unsupported load at %v", p)
}
p.As = movToLoad(p.As)
p.Reg = addrToReg(p.From)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: p.From.Offset}
case obj.NAME_EXTERN, obj.NAME_STATIC:
// AUIPC $off_hi, R
// L $off_lo, R
as := p.As
to := p.To
// The offset is not really encoded with either instruction.
// It will be extracted later for a relocation.
p.As = AAUIPC
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: p.From.Offset, Sym: p.From.Sym}
p.Reg = 0
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: to.Reg}
p.Mark |= NEED_PCREL_ITYPE_RELOC
p = obj.Appendp(p, newprog)
p.As = movToLoad(as)
p.From = obj.Addr{Type: obj.TYPE_CONST}
p.Reg = to.Reg
p.To = to
default:
ctxt.Diag("unsupported name %d for %v", p.From.Name, p)
}
case obj.TYPE_REG:
switch p.To.Type {
case obj.TYPE_REG:
switch p.As {
case AMOV: // MOV Ra, Rb -> ADDI $0, Ra, Rb
p.As = AADDI
p.Reg = p.From.Reg
p.From = obj.Addr{Type: obj.TYPE_CONST}
case AMOVF: // MOVF Ra, Rb -> FSGNJS Ra, Ra, Rb
p.As = AFSGNJS
p.Reg = p.From.Reg
case AMOVD: // MOVD Ra, Rb -> FSGNJD Ra, Ra, Rb
p.As = AFSGNJD
p.Reg = p.From.Reg
default:
ctxt.Diag("unsupported register-register move at %v", p)
}
case obj.TYPE_MEM: // MOV Rs, c(Rd) -> S $c, Rs, Rd
switch p.As {
case AMOVBU, AMOVHU, AMOVWU:
ctxt.Diag("unsupported unsigned store at %v", p)
}
switch p.To.Name {
case obj.NAME_AUTO, obj.NAME_PARAM, obj.NAME_NONE:
// The destination address goes in p.From and p.To here,
// with the offset in p.From and the register in p.To.
// The source register goes in Reg.
p.As = movToStore(p.As)
p.Reg = p.From.Reg
p.From = p.To
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: p.From.Offset}
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: addrToReg(p.To)}
case obj.NAME_EXTERN:
// AUIPC $off_hi, TMP
// S $off_lo, TMP, R
as := p.As
from := p.From
// The offset is not really encoded with either instruction.
// It will be extracted later for a relocation.
p.As = AAUIPC
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: p.To.Offset, Sym: p.To.Sym}
p.Reg = 0
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
p.Mark |= NEED_PCREL_STYPE_RELOC
p = obj.Appendp(p, newprog)
p.As = movToStore(as)
p.From = obj.Addr{Type: obj.TYPE_CONST}
p.Reg = from.Reg
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_TMP}
default:
ctxt.Diag("unsupported name %d for %v", p.From.Name, p)
}
default:
ctxt.Diag("unsupported MOV at %v", p)
}
case obj.TYPE_CONST:
// MOV $c, R
// If c is small enough, convert to:
// ADD $c, ZERO, R
// If not, convert to:
// LUI top20bits(c), R
// ADD bottom12bits(c), R, R
if p.As != AMOV {
ctxt.Diag("unsupported constant load at %v", p)
}
off := p.From.Offset
to := p.To
low, high, err := split32BitImmediate(off)
if err != nil {
ctxt.Diag("%v: constant %d too large: %v", p, off, err)
}
// LUI is only necessary if the offset doesn't fit in 12-bits.
needLUI := high != 0
if needLUI {
p.As = ALUI
p.To = to
// Pass top 20 bits to LUI.
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high}
p = obj.Appendp(p, newprog)
}
p.As = AADDIW
p.To = to
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: low}
p.Reg = REG_ZERO
if needLUI {
p.Reg = to.Reg
}
case obj.TYPE_ADDR: // MOV $sym+off(SP/SB), R
if p.To.Type != obj.TYPE_REG || p.As != AMOV {
ctxt.Diag("unsupported addr MOV at %v", p)
}
switch p.From.Name {
case obj.NAME_EXTERN, obj.NAME_STATIC:
// AUIPC $off_hi, R
// ADDI $off_lo, R
to := p.To
// The offset is not really encoded with either instruction.
// It will be extracted later for a relocation.
p.As = AAUIPC
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: p.From.Offset, Sym: p.From.Sym}
p.Reg = 0
p.To = to
p.Mark |= NEED_PCREL_ITYPE_RELOC
p = obj.Appendp(p, newprog)
p.As = AADDI
p.From = obj.Addr{Type: obj.TYPE_CONST}
p.Reg = to.Reg
p.To = to
case obj.NAME_PARAM, obj.NAME_AUTO:
p.As = AADDI
p.Reg = REG_SP
p.From.Type = obj.TYPE_CONST
case obj.NAME_NONE:
p.As = AADDI
p.Reg = p.From.Reg
p.From.Type = obj.TYPE_CONST
p.From.Reg = 0
default:
ctxt.Diag("bad addr MOV from name %v at %v", p.From.Name, p)
}
default:
ctxt.Diag("unsupported MOV at %v", p)
}
}
// setPCs sets the Pc field in all instructions reachable from p.
// It uses pc as the initial value.
func setPCs(p *obj.Prog, pc int64) {
@ -157,6 +399,44 @@ func setPCs(p *obj.Prog, pc int64) {
}
}
// stackOffset updates Addr offsets based on the current stack size.
//
// The stack looks like:
// -------------------
// | |
// | PARAMs |
// | |
// | |
// -------------------
// | Parent RA | SP on function entry
// -------------------
// | |
// | |
// | AUTOs |
// | |
// | |
// -------------------
// | RA | SP during function execution
// -------------------
//
// FixedFrameSize makes other packages aware of the space allocated for RA.
//
// A nicer version of this diagram can be found on slide 21 of the presentation
// attached to:
//
// https://golang.org/issue/16922#issuecomment-243748180
//
func stackOffset(a *obj.Addr, stacksize int64) {
switch a.Name {
case obj.NAME_AUTO:
// Adjust to the top of AUTOs.
a.Offset += stacksize
case obj.NAME_PARAM:
// Adjust to the bottom of PARAMs.
a.Offset += stacksize + 8
}
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if cursym.Func.Text == nil || cursym.Func.Text.Link == nil {
return
@ -188,6 +468,24 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// TODO(jsing): Implement.
// Update stack-based offsets.
for p := cursym.Func.Text; p != nil; p = p.Link {
stackOffset(&p.From, stacksize)
stackOffset(&p.To, stacksize)
}
// Additional instruction rewriting. Any rewrites that change the number
// of instructions must occur here (before jump target resolution).
for p := cursym.Func.Text; p != nil; p = p.Link {
switch p.As {
case AMOV, AMOVB, AMOVH, AMOVW, AMOVBU, AMOVHU, AMOVWU, AMOVF, AMOVD:
// Rewrite MOV pseudo-instructions. This cannot be done in
// progedit, as SP offsets need to be applied before we split
// up some of the Addrs.
rewriteMOV(ctxt, newprog, p)
}
}
setPCs(cursym.Func.Text, 0)
// Resolve branch and jump targets.
@ -209,6 +507,46 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
// signExtend sign extends val starting at bit bit.
func signExtend(val int64, bit uint) int64 {
return val << (64 - bit) >> (64 - bit)
}
// split32BitImmediate splits a signed 32-bit immediate into a signed 20-bit
// upper immediate and a signed 12-bit lower immediate to be added to the upper
// result. For example, high may be used in LUI and low in a following ADDI to
// generate a full 32-bit constant.
func split32BitImmediate(imm int64) (low, high int64, err error) {
if !immIFits(imm, 32) {
return 0, 0, fmt.Errorf("immediate does not fit in 32-bits: %d", imm)
}
// Nothing special needs to be done if the immediate fits in 12-bits.
if immIFits(imm, 12) {
return imm, 0, nil
}
high = imm >> 12
// The bottom 12 bits will be treated as signed.
//
// If that will result in a negative 12 bit number, add 1 to
// our upper bits to adjust for the borrow.
//
// It is not possible for this increment to overflow. To
// overflow, the 20 top bits would be 1, and the sign bit for
// the low 12 bits would be set, in which case the entire 32
// bit pattern fits in a 12 bit signed value.
if imm&(1<<11) != 0 {
high++
}
low = signExtend(imm, 12)
high = signExtend(high, 20)
return low, high, nil
}
func regVal(r, min, max int16) uint32 {
if r < min || r > max {
panic(fmt.Sprintf("register out of range, want %d < %d < %d", min, r, max))

View file

@ -129,6 +129,7 @@ var (
morestackNoCtxt *obj.LSym
gcWriteBarrier *obj.LSym
sigpanic *obj.LSym
sigpanic0 *obj.LSym
deferreturn *obj.LSym
jmpdefer *obj.LSym
)
@ -143,6 +144,7 @@ func instinit(ctxt *obj.Link) {
morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt")
gcWriteBarrier = ctxt.Lookup("runtime.gcWriteBarrier")
sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal)
sigpanic0 = ctxt.LookupABI("runtime.sigpanic", 0) // sigpanic called from assembly, which has ABI0
deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
// jmpdefer is defined in assembly as ABI0, but what we're
// looking for is the *call* to jmpdefer from the Go function
@ -491,7 +493,7 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
}
// return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack
if call.As == ACALLNORESUME && call.To.Sym != sigpanic { // sigpanic unwinds the stack, but it never resumes
if call.As == ACALLNORESUME && call.To.Sym != sigpanic && call.To.Sym != sigpanic0 { // sigpanic unwinds the stack, but it never resumes
// trying to unwind WebAssembly stack but call has no resume point, terminate with error
p = appendp(p, AIf)
p = appendp(p, obj.AUNDEF)
@ -1152,6 +1154,10 @@ func align(as obj.As) uint64 {
}
func writeUleb128(w io.ByteWriter, v uint64) {
if v < 128 {
w.WriteByte(uint8(v))
return
}
more := true
for more {
c := uint8(v & 0x7f)

View file

@ -90,10 +90,12 @@ const (
ACLAC
ACLC
ACLD
ACLDEMOTE
ACLFLUSH
ACLFLUSHOPT
ACLI
ACLTS
ACLWB
ACMC
ACMOVLCC
ACMOVLCS
@ -877,6 +879,7 @@ const (
ATESTL
ATESTQ
ATESTW
ATPAUSE
ATZCNTL
ATZCNTQ
ATZCNTW
@ -884,10 +887,12 @@ const (
AUCOMISS
AUD1
AUD2
AUMWAIT
AUNPCKHPD
AUNPCKHPS
AUNPCKLPD
AUNPCKLPS
AUMONITOR
AV4FMADDPS
AV4FMADDSS
AV4FNMADDPS

View file

@ -88,10 +88,12 @@ var Anames = []string{
"CLAC",
"CLC",
"CLD",
"CLDEMOTE",
"CLFLUSH",
"CLFLUSHOPT",
"CLI",
"CLTS",
"CLWB",
"CMC",
"CMOVLCC",
"CMOVLCS",
@ -875,6 +877,7 @@ var Anames = []string{
"TESTL",
"TESTQ",
"TESTW",
"TPAUSE",
"TZCNTL",
"TZCNTQ",
"TZCNTW",
@ -882,10 +885,12 @@ var Anames = []string{
"UCOMISS",
"UD1",
"UD2",
"UMWAIT",
"UNPCKHPD",
"UNPCKHPS",
"UNPCKLPD",
"UNPCKLPS",
"UMONITOR",
"V4FMADDPS",
"V4FMADDSS",
"V4FNMADDPS",

View file

@ -983,10 +983,12 @@ var optab =
{ACLAC, ynone, Pm, opBytes{01, 0xca}},
{ACLC, ynone, Px, opBytes{0xf8}},
{ACLD, ynone, Px, opBytes{0xfc}},
{ACLDEMOTE, yclflush, Pm, opBytes{0x1c, 00}},
{ACLFLUSH, yclflush, Pm, opBytes{0xae, 07}},
{ACLFLUSHOPT, yclflush, Pq, opBytes{0xae, 07}},
{ACLI, ynone, Px, opBytes{0xfa}},
{ACLTS, ynone, Pm, opBytes{0x06}},
{ACLWB, yclflush, Pq, opBytes{0xae, 06}},
{ACMC, ynone, Px, opBytes{0xf5}},
{ACMOVLCC, yml_rl, Pm, opBytes{0x43}},
{ACMOVLCS, yml_rl, Pm, opBytes{0x42}},
@ -1500,6 +1502,7 @@ var optab =
{ATESTL, ytestl, Px, opBytes{0xa9, 0xf7, 00, 0x85, 0x85}},
{ATESTQ, ytestl, Pw, opBytes{0xa9, 0xf7, 00, 0x85, 0x85}},
{ATESTW, ytestl, Pe, opBytes{0xa9, 0xf7, 00, 0x85, 0x85}},
{ATPAUSE, ywrfsbase, Pq, opBytes{0xae, 06}},
{obj.ATEXT, ytext, Px, opBytes{}},
{AUCOMISD, yxm, Pe, opBytes{0x2e}},
{AUCOMISS, yxm, Pm, opBytes{0x2e}},
@ -1507,6 +1510,7 @@ var optab =
{AUNPCKHPS, yxm, Pm, opBytes{0x15}},
{AUNPCKLPD, yxm, Pe, opBytes{0x14}},
{AUNPCKLPS, yxm, Pm, opBytes{0x14}},
{AUMONITOR, ywrfsbase, Pf3, opBytes{0xae, 06}},
{AVERR, ydivl, Pm, opBytes{0x00, 04}},
{AVERW, ydivl, Pm, opBytes{0x00, 05}},
{AWAIT, ynone, Px, opBytes{0x9b}},
@ -1691,11 +1695,11 @@ var optab =
{AMOVDDUP, yxm, Pf2, opBytes{0x12}},
{AMOVSHDUP, yxm, Pf3, opBytes{0x16}},
{AMOVSLDUP, yxm, Pf3, opBytes{0x12}},
{ARDTSCP, ynone, Pm, opBytes{0x01, 0xf9, 0}},
{ASTAC, ynone, Pm, opBytes{0x01, 0xcb, 0}},
{AUD1, ynone, Pm, opBytes{0xb9, 0}},
{AUD2, ynone, Pm, opBytes{0x0b, 0}},
{AUMWAIT, ywrfsbase, Pf2, opBytes{0xae, 06}},
{ASYSENTER, ynone, Px, opBytes{0x0f, 0x34, 0}},
{ASYSENTER64, ynone, Pw, opBytes{0x0f, 0x34, 0}},
{ASYSEXIT, ynone, Px, opBytes{0x0f, 0x35, 0}},

View file

@ -1136,8 +1136,10 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
var unaryDst = map[obj.As]bool{
ABSWAPL: true,
ABSWAPQ: true,
ACLDEMOTE: true,
ACLFLUSH: true,
ACLFLUSHOPT: true,
ACLWB: true,
ACMPXCHG16B: true,
ACMPXCHG8B: true,
ADECB: true,

View file

@ -37,7 +37,7 @@ const (
FuncID_debugCallV1
FuncID_gopanic
FuncID_panicwrap
FuncID_handleAsyncEvents
FuncID_handleAsyncEvent
FuncID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
)
@ -83,8 +83,8 @@ func GetFuncID(name, file string) FuncID {
return FuncID_gopanic
case "runtime.panicwrap":
return FuncID_panicwrap
case "runtime.handleAsyncEvents":
return FuncID_handleAsyncEvents
case "runtime.handleAsyncEvent":
return FuncID_handleAsyncEvent
}
if file == "<autogenerated>" {
return FuncID_wrapper

View file

@ -542,6 +542,10 @@ func writeName(w nameWriter, name string) {
}
func writeUleb128(w io.ByteWriter, v uint64) {
if v < 128 {
w.WriteByte(uint8(v))
return
}
more := true
for more {
c := uint8(v & 0x7f)

View file

@ -199,21 +199,14 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
// See [NSA] 3.4.1
c := priv.PublicKey.Curve
e := hashToInt(hash, c)
r, s, err = sign(priv, &csprng, c, e)
return
}
func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, e *big.Int) (r, s *big.Int, err error) {
N := c.Params().N
if N.Sign() == 0 {
return nil, nil, errZeroParam
}
var k, kInv *big.Int
for {
for {
k, err = randFieldElement(c, *csprng)
k, err = randFieldElement(c, csprng)
if err != nil {
r = nil
return
@ -231,6 +224,8 @@ func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve
break
}
}
e := hashToInt(hash, c)
s = new(big.Int).Mul(priv.D, r)
s.Add(s, e)
s.Mul(s, kInv)
@ -239,6 +234,7 @@ func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve
break
}
}
return
}
@ -256,12 +252,8 @@ func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
return false
}
e := hashToInt(hash, c)
return verify(pub, c, e, r, s)
}
func verifyGeneric(pub *PublicKey, c elliptic.Curve, e, r, s *big.Int) bool {
var w *big.Int
N := c.Params().N
if in, ok := c.(invertible); ok {
w = in.Inverse(s)
} else {

View file

@ -1,22 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !s390x
package ecdsa
import (
"crypto/cipher"
"crypto/elliptic"
"math/big"
)
func sign(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, e *big.Int) (r, s *big.Int, err error) {
r, s, err = signGeneric(priv, csprng, c, e)
return
}
func verify(pub *PublicKey, c elliptic.Curve, e, r, s *big.Int) bool {
return verifyGeneric(pub, c, e, r, s)
}

View file

@ -1,153 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo
package ecdsa
import (
"crypto/cipher"
"crypto/elliptic"
"internal/cpu"
"math/big"
)
// s390x accelerated signatures
//go:noescape
func kdsaSig(fc uint64, block *[1720]byte) (errn uint64)
type signverify int
const (
signing signverify = iota
verifying
)
// bufferOffsets represents the offset of a particular parameter in
// the buffer passed to the KDSA instruction.
type bufferOffsets struct {
baseSize int
hashSize int
offsetHash int
offsetKey1 int
offsetRNorKey2 int
offsetR int
offsetS int
functionCode uint64
}
func canUseKDSA(sv signverify, c elliptic.Curve, bo *bufferOffsets) bool {
if !cpu.S390X.HasECDSA {
return false
}
switch c.Params().Name {
case "P-256":
bo.baseSize = 32
bo.hashSize = 32
bo.offsetHash = 64
bo.offsetKey1 = 96
bo.offsetRNorKey2 = 128
bo.offsetR = 0
bo.offsetS = 32
if sv == signing {
bo.functionCode = 137
} else {
bo.functionCode = 1
}
return true
case "P-384":
bo.baseSize = 48
bo.hashSize = 48
bo.offsetHash = 96
bo.offsetKey1 = 144
bo.offsetRNorKey2 = 192
bo.offsetR = 0
bo.offsetS = 48
if sv == signing {
bo.functionCode = 138
} else {
bo.functionCode = 2
}
return true
case "P-521":
bo.baseSize = 66
bo.hashSize = 80
bo.offsetHash = 160
bo.offsetKey1 = 254
bo.offsetRNorKey2 = 334
bo.offsetR = 14
bo.offsetS = 94
if sv == signing {
bo.functionCode = 139
} else {
bo.functionCode = 3
}
return true
}
return false
}
// zeroExtendAndCopy pads src with leading zeros until it has the size given.
// It then copies the padded src into the dst. Bytes beyond size in dst are
// not modified.
func zeroExtendAndCopy(dst, src []byte, size int) {
nz := size - len(src)
if nz < 0 {
panic("src is too long")
}
// the compiler should replace this loop with a memclr call
z := dst[:nz]
for i := range z {
z[i] = 0
}
copy(dst[nz:size], src[:size-nz])
return
}
func sign(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, e *big.Int) (r, s *big.Int, err error) {
var bo bufferOffsets
if canUseKDSA(signing, c, &bo) && e.Sign() != 0 {
var buffer [1720]byte
for {
var k *big.Int
k, err = randFieldElement(c, csprng)
if err != nil {
return nil, nil, err
}
zeroExtendAndCopy(buffer[bo.offsetHash:], e.Bytes(), bo.hashSize)
zeroExtendAndCopy(buffer[bo.offsetKey1:], priv.D.Bytes(), bo.baseSize)
zeroExtendAndCopy(buffer[bo.offsetRNorKey2:], k.Bytes(), bo.baseSize)
errn := kdsaSig(bo.functionCode, &buffer)
if errn == 2 {
return nil, nil, errZeroParam
}
if errn == 0 { // success == 0 means successful signing
r = new(big.Int)
r.SetBytes(buffer[bo.offsetR : bo.offsetR+bo.baseSize])
s = new(big.Int)
s.SetBytes(buffer[bo.offsetS : bo.offsetS+bo.baseSize])
return
}
//at this point, it must be that errn == 1: retry
}
}
r, s, err = signGeneric(priv, csprng, c, e)
return
}
func verify(pub *PublicKey, c elliptic.Curve, e, r, s *big.Int) bool {
var bo bufferOffsets
if canUseKDSA(verifying, c, &bo) && e.Sign() != 0 {
var buffer [1720]byte
zeroExtendAndCopy(buffer[bo.offsetR:], r.Bytes(), bo.baseSize)
zeroExtendAndCopy(buffer[bo.offsetS:], s.Bytes(), bo.baseSize)
zeroExtendAndCopy(buffer[bo.offsetHash:], e.Bytes(), bo.hashSize)
zeroExtendAndCopy(buffer[bo.offsetKey1:], pub.X.Bytes(), bo.baseSize)
zeroExtendAndCopy(buffer[bo.offsetRNorKey2:], pub.Y.Bytes(), bo.baseSize)
errn := kdsaSig(bo.functionCode, &buffer)
return errn == 0
}
return verifyGeneric(pub, c, e, r, s)
}

View file

@ -1,31 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// func kdsaSig(fc uint64, block *[1720]byte) (errn uint64)
TEXT ·kdsaSig(SB), NOSPLIT|NOFRAME, $0-24
MOVD fc+0(FP), R0 // function code
MOVD block+8(FP), R1 // address parameter block
loop:
WORD $0xB93A0008 // compute digital signature authentication
BVS loop // branch back if interrupted
BEQ success // signature creation successful
BGT retry // signing unsuccessful, but retry with new CSPRN
error:
MOVD $2, R2 // fallthrough indicates fatal error
MOVD R2, errn+16(FP) // return 2 - sign/verify abort
RET
retry:
MOVD $1, R2
MOVD R2, errn+16(FP) // return 1 - sign/verify was unsuccessful -- if sign, retry with new RN
RET
success:
MOVD $0, R2
MOVD R2, errn+16(FP) // return 0 - sign/verify was successful
RET

View file

@ -1,33 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo
package ecdsa
import (
"crypto/elliptic"
"testing"
)
func TestNoAsm(t *testing.T) {
curves := [...]elliptic.Curve{
elliptic.P256(),
elliptic.P384(),
elliptic.P521(),
}
for _, curve := range curves {
// override the name of the curve to stop the assembly path being taken
params := *curve.Params()
name := params.Name
params.Name = name + "_GENERIC_OVERRIDE"
testKeyGeneration(t, &params, name)
testSignAndVerify(t, &params, name)
testNonceSafety(t, &params, name)
testINDCCA(t, &params, name)
testNegativeInputs(t, &params, name)
}
}

View file

@ -263,12 +263,20 @@ func (e *InvalidUTF8Error) Error() string {
type MarshalerError struct {
Type reflect.Type
Err error
sourceFunc string
}
func (e *MarshalerError) Error() string {
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
srcFunc := e.sourceFunc
if srcFunc == "" {
srcFunc = "MarshalJSON"
}
return "json: error calling " + srcFunc +
" for type " + e.Type.String() +
": " + e.Err.Error()
}
// Unwrap returns the underlying error.
func (e *MarshalerError) Unwrap() error { return e.Err }
var hex = "0123456789abcdef"
@ -455,7 +463,7 @@ func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
err = compact(&e.Buffer, b, opts.escapeHTML)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
}
}
@ -472,7 +480,7 @@ func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
err = compact(&e.Buffer, b, opts.escapeHTML)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
}
}
@ -488,7 +496,7 @@ func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
}
b, err := m.MarshalText()
if err != nil {
e.error(&MarshalerError{v.Type(), err})
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
}
e.stringBytes(b, opts.escapeHTML)
}
@ -502,7 +510,7 @@ func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
m := va.Interface().(encoding.TextMarshaler)
b, err := m.MarshalText()
if err != nil {
e.error(&MarshalerError{v.Type(), err})
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
}
e.stringBytes(b, opts.escapeHTML)
}
@ -761,7 +769,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
for i, v := range keys {
sv[i].v = v
if err := sv[i].resolve(); err != nil {
e.error(&MarshalerError{v.Type(), err})
e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error()))
}
}
sort.Slice(sv, func(i, j int) bool { return sv[i].s < sv[j].s })

View file

@ -1064,3 +1064,30 @@ func TestMarshalUncommonFieldNames(t *testing.T) {
t.Fatalf("Marshal: got %s want %s", got, want)
}
}
func TestMarshalerError(t *testing.T) {
s := "test variable"
st := reflect.TypeOf(s)
errText := "json: test error"
tests := []struct {
err *MarshalerError
want string
}{
{
&MarshalerError{st, fmt.Errorf(errText), ""},
"json: error calling MarshalJSON for type " + st.String() + ": " + errText,
},
{
&MarshalerError{st, fmt.Errorf(errText), "TestMarshalerError"},
"json: error calling TestMarshalerError for type " + st.String() + ": " + errText,
},
}
for i, tt := range tests {
got := tt.err.Error()
if got != tt.want {
t.Errorf("MarshalerError test %d, got: %s, want: %s", i, got, tt.want)
}
}
}

View file

@ -489,27 +489,34 @@ func (check *Checker) underlying(typ Type) Type {
}
// Otherwise, follow the forward chain.
seen := map[*Named]int{n0: 0, n: 1}
path := []Object{n0.obj, n.obj}
seen := map[*Named]int{n0: 0}
path := []Object{n0.obj}
for {
typ = n.underlying
n, _ = typ.(*Named)
if n == nil {
n1, _ := typ.(*Named)
if n1 == nil {
break // end of chain
}
seen[n] = len(seen)
path = append(path, n.obj)
n = n1
if i, ok := seen[n]; ok {
// cycle
check.cycleError(path[i:])
typ = Typ[Invalid]
break
}
seen[n] = len(seen)
path = append(path, n.obj)
}
for n := range seen {
// We should never have to update the underlying type of an imported type;
// those underlying types should have been resolved during the import.
// Also, doing so would lead to a race condition (was issue #31749).
if n.obj.pkg != check.pkg {
panic("internal error: imported type with unresolved underlying type")
}
n.underlying = typ
}

View file

@ -493,3 +493,33 @@ func (h importHelper) Import(path string) (*Package, error) {
}
return h.pkg, nil
}
// TestIssue34921 verifies that we don't update an imported type's underlying
// type when resolving an underlying type. Specifically, when determining the
// underlying type of b.T (which is the underlying type of a.T, which is int)
// we must not set the underlying type of a.T again since that would lead to
// a race condition if package b is imported elsewhere, in a package that is
// concurrently type-checked.
func TestIssue34921(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Error(r)
}
}()
var sources = []string{
`package a; type T int`,
`package b; import "a"; type T a.T`,
}
var pkg *Package
for _, src := range sources {
f := mustParse(t, src)
conf := Config{Importer: importHelper{pkg}}
res, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil)
if err != nil {
t.Errorf("%q failed to typecheck: %v", src, err)
}
pkg = res // res is imported by the next package in this test
}
}

View file

@ -4,9 +4,5 @@
package unix
const unlinkatTrap uintptr = 472
const openatTrap uintptr = 463
const fstatatTrap uintptr = 470
const AT_REMOVEDIR = 0x80
const AT_SYMLINK_NOFOLLOW = 0x0020

View file

@ -53,6 +53,29 @@ func ExampleTempDir() {
}
}
func ExampleTempDir_suffix() {
parentDir := os.TempDir()
logsDir, err := ioutil.TempDir(parentDir, "*-logs")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(logsDir) // clean up
// Logs can be cleaned out earlier if needed by searching
// for all directories whose suffix ends in *-logs.
globPattern := filepath.Join(parentDir, "*-logs")
matches, err := filepath.Glob(globPattern)
if err != nil {
log.Fatalf("Failed to match %q: %v", globPattern, err)
}
for _, match := range matches {
if err := os.RemoveAll(match); err != nil {
log.Printf("Failed to remove %q: %v", match, err)
}
}
}
func ExampleTempFile() {
content := []byte("temporary file's content")
tmpfile, err := ioutil.TempFile("", "example")

Some files were not shown because too many files have changed in this diff Show more