mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.simd] all: merge master (9b2d39b) into dev.simd
Conflicts: - src/internal/buildcfg/exp.go Merge List: + 2025-09-229b2d39b75bcmd/compile/internal/ssa: match style and formatting + 2025-09-22e23edf5e55runtime: don't re-read metrics before check in TestReadMetricsSched + 2025-09-22177cd8d763log/slog: use a pooled json encoder + 2025-09-222353c15785cmd/cgo/internal/test: skip TestMultipleAssign when using UCRT on Windows + 2025-09-2232dfd69282cmd/dist: disable FIPS 140-3 mode when testing maphash with purego + 2025-09-197f6ff5ec3ecmd/compile: fix doc word + 2025-09-199693b94be0runtime: include stderr when objdump fails + 2025-09-198616981ce6log/slog: optimize slog Level.String() to avoid fmt.Sprintf + 2025-09-19b8af744360testing: fix example for unexported identifier + 2025-09-1951dc5bfe6cRevert "cmd/go: disable cgo by default if CC unset and DefaultCC doesn't exist" + 2025-09-19ee7bf06cb3time: improve ParseDuration performance for invalid input + 2025-09-19f9e61a9a32cmd/compile: duplicate nil check to two branches of write barrier + 2025-09-183cf1aaf8b9runtime: use futexes with 64-bit time on Linux + 2025-09-180ab038af62cmd/compile/internal/abi: use clear built-in + 2025-09-1800bf24fdcabytes: use clear in test + 2025-09-18f9701d21d2crypto: use clear built-in + 2025-09-18a58afe44fanet: fix testHookCanceledDial race + 2025-09-183203a5da29net/http: avoid connCount underflow race + 2025-09-188ca209ec39context: don't return a non-nil from Err before Done is closed + 2025-09-183032894e04runtime: make explicit nil check in heapSetTypeSmallHeader + 2025-09-17ef05b66d61cmd/internal/obj/riscv: add support for Zicond instructions + 2025-09-1778ef487a6fcmd/compile: fix the issue of shift amount exceeding the valid range + 2025-09-1777aac7bb75runtime: don't enable heap randomization if MSAN or ASAN is enabled + 2025-09-17465b85eb76runtime: fix CheckScavengedBitsCleared with randomized heap base + 2025-09-17909704b85eencoding/json/v2: fix typo in comment + 2025-09-173db5979e8ctesting: use reflect.TypeAssert and reflect.TypeFor + 2025-09-176a8dbbecbfpath/filepath: fix EvalSymlinks to return ENOTDIR on plan9 + 2025-09-17bffe7ad9f1go/parser: Add TestBothLineAndLeadComment + 2025-09-1702a888e820go/ast: document that (*ast.File).Comments is sorted by position + 2025-09-16594deca981cmd/link: simplify PE relocations mapping + 2025-09-169df1a289acgo/parser: simplify expectSemi + 2025-09-1672ba117bdainternal/buildcfg: enable randomizedHeapBase64 by default + 2025-09-16796ea3bc2eos/user: align test file name and build tags + 2025-09-16a69395eab2runtime/_mkmalloc: add a copy of cloneNode + 2025-09-16cbdad4fc3ccmd/go: check pattern for utf8 validity before call regexp.MustCompile + 2025-09-16c2d85eb999cmd/go: disable cgo by default if CC unset and DefaultCC doesn't exist + 2025-09-16ac82fe68aabytes,strings: remove reference to non-existent SplitFunc + 2025-09-160b26678db2cmd/compile: fix mips zerorange implementation + 2025-09-16e2cfc1eb3acmd/internal/obj/riscv: improve handling of float point moves + 2025-09-16281c632e6ecrypto/x509/internal/macos: standardize package name + 2025-09-1661dc7fe30diter: document that calling yield after terminated range loop causes runtime panic Change-Id: Ic06019efc855913632003f41eb10c746b3410b0a
This commit is contained in:
commit
2d8cb80d7c
86 changed files with 927 additions and 279 deletions
|
|
@ -508,7 +508,7 @@ func Fields(s []byte) [][]byte {
|
|||
// It splits the slice s at each run of code points c satisfying f(c) and
|
||||
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
|
||||
// len(s) == 0, an empty slice is returned. Every element of the returned slice is
|
||||
// non-empty. Unlike [SplitFunc], leading and trailing runs of code points
|
||||
// non-empty. Unlike [Split], leading and trailing runs of code points
|
||||
// satisfying f(c) are discarded.
|
||||
//
|
||||
// FieldsFunc makes no guarantees about the order in which it calls f(c)
|
||||
|
|
|
|||
|
|
@ -891,9 +891,7 @@ func BenchmarkCountSingle(b *testing.B) {
|
|||
b.Fatal("bad count", j, expect)
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(buf); i++ {
|
||||
buf[i] = 0
|
||||
}
|
||||
clear(buf)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
19
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
19
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
|
|
@ -195,6 +195,12 @@ start:
|
|||
RDTIME X5 // f32210c0
|
||||
RDINSTRET X5 // f32220c0
|
||||
|
||||
// 12.3: Integer Conditional Operations (Zicond)
|
||||
CZEROEQZ X5, X6, X7 // b353530e
|
||||
CZEROEQZ X5, X7 // b3d3530e
|
||||
CZERONEZ X5, X6, X7 // b373530e
|
||||
CZERONEZ X5, X7 // b3f3530e
|
||||
|
||||
// 13.1: Multiplication Operations
|
||||
MUL X5, X6, X7 // b3035302
|
||||
MULH X5, X6, X7 // b3135302
|
||||
|
|
@ -1952,12 +1958,23 @@ start:
|
|||
MOVF 4(X5), F0 // 07a04200
|
||||
MOVF F0, 4(X5) // 27a20200
|
||||
MOVF F0, F1 // d3000020
|
||||
MOVF X1, F3 // d38100f0
|
||||
MOVF F3, X1 // d38001e0
|
||||
MOVF X0, F3 // d30100f0
|
||||
MOVF $(0.0), F3 // d30100f0
|
||||
|
||||
// Converted to load of symbol (AUIPC + FLW)
|
||||
MOVF $(709.78271289338397), F3 // 970f000087a10f00
|
||||
|
||||
MOVD 4(X5), F0 // 07b04200
|
||||
MOVD F0, 4(X5) // 27b20200
|
||||
MOVD F0, F1 // d3000022
|
||||
MOVD F3, X1 // d38001e2
|
||||
MOVD X1, F3 // d38100f2
|
||||
MOVD X0, F3 // d30100f2
|
||||
MOVD $(0.0), F3 // d30100f2
|
||||
|
||||
// Convert to load of symbol (AUIPC + FLD)
|
||||
// Converted to load of symbol (AUIPC + FLD)
|
||||
MOVD $(709.78271289338397), F3 // 970f000087b10f00
|
||||
|
||||
// TLS load with local-exec (LUI + ADDIW + ADD of TP + load)
|
||||
|
|
|
|||
|
|
@ -1096,6 +1096,12 @@ func testErrno(t *testing.T) {
|
|||
}
|
||||
|
||||
func testMultipleAssign(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && usesUCRT(t) {
|
||||
// UCRT's strtol throws an unrecoverable crash when
|
||||
// using an invalid base (that is, not 0 or 2..36).
|
||||
// See go.dev/issue/62887.
|
||||
t.Skip("skipping test on Windows when linking with UCRT")
|
||||
}
|
||||
p := C.CString("234")
|
||||
n, m := C.strtol(p, nil, 345), C.strtol(p, nil, 10)
|
||||
defer C.free(unsafe.Pointer(p))
|
||||
|
|
|
|||
|
|
@ -6,6 +6,13 @@
|
|||
|
||||
package cgotest
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var syscall_dot_SIGCHLD = syscall.SIGCHLD
|
||||
|
||||
func usesUCRT(t *testing.T) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,20 @@
|
|||
|
||||
package cgotest
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"internal/syscall/windows"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var syscall_dot_SIGCHLD syscall.Signal
|
||||
|
||||
// usesUCRT reports whether the test is using the Windows UCRT (Universal C Runtime).
|
||||
func usesUCRT(t *testing.T) bool {
|
||||
name, err := syscall.UTF16PtrFromString("ucrtbase.dll")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := windows.GetModuleHandle(name)
|
||||
return err == nil && h != 0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ terms of these, so the next step after type checking is to convert the syntax
|
|||
and types2 representations to ir and types. This process is referred to as
|
||||
"noding."
|
||||
|
||||
Noding using a process called Unified IR, which builds a node representation
|
||||
Noding uses a process called Unified IR, which builds a node representation
|
||||
using a serialized version of the typechecked code from step 2.
|
||||
Unified IR is also involved in import/export of packages and inlining.
|
||||
|
||||
|
|
|
|||
|
|
@ -664,9 +664,7 @@ func (state *assignState) tryAllocRegs(typ *types.Type) []RegIndex {
|
|||
func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 {
|
||||
nr := len(pa.Registers)
|
||||
padding := storage[:nr]
|
||||
for i := 0; i < nr; i++ {
|
||||
padding[i] = 0
|
||||
}
|
||||
clear(padding)
|
||||
if pa.Type.Kind() != types.TSTRUCT || nr == 0 {
|
||||
return padding
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
package mips
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
|
|
@ -17,7 +18,7 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
|
|||
}
|
||||
|
||||
for cnt != 0 {
|
||||
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off)
|
||||
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.Arch.FixedFrameSize+off)
|
||||
cnt -= int64(types.PtrSize)
|
||||
off += int64(types.PtrSize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog
|
|||
}
|
||||
|
||||
for cnt != 0 {
|
||||
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off)
|
||||
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off+8)
|
||||
cnt -= int64(types.PtrSize)
|
||||
off += int64(types.PtrSize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,8 +48,7 @@ However, certain types don't come from Go and are special; below we will cover
|
|||
|
||||
Some operators contain an auxiliary field. The aux fields are usually printed as
|
||||
enclosed in `[]` or `{}`, and could be the constant op argument, argument type,
|
||||
etc.
|
||||
for example:
|
||||
etc. For example:
|
||||
|
||||
v13 (?) = Const64 <int> [1]
|
||||
|
||||
|
|
|
|||
|
|
@ -717,7 +717,8 @@
|
|||
(SRLVconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVVconst [0])
|
||||
|
||||
// (x + x) << c -> x << c+1
|
||||
((SLLV|SLL)const [c] (ADDV x x)) => ((SLLV|SLL)const [c+1] x)
|
||||
((SLLV|SLL)const <t> [c] (ADDV x x)) && c < t.Size() * 8 - 1 => ((SLLV|SLL)const [c+1] x)
|
||||
((SLLV|SLL)const <t> [c] (ADDV x x)) && c >= t.Size() * 8 - 1 => (MOVVconst [0])
|
||||
|
||||
// mul by constant
|
||||
(MULV _ (MOVVconst [0])) => (MOVVconst [0])
|
||||
|
|
|
|||
|
|
@ -247,7 +247,7 @@ func init() {
|
|||
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 32
|
||||
{name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64
|
||||
{name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int64"}, // arg0 << auxInt, auxInt should be in the range 0 to 31.
|
||||
{name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt
|
||||
{name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt, auxInt should be in the range 0 to 63.
|
||||
{name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, shift amount is mod 32
|
||||
{name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64
|
||||
{name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int64"}, // arg0 >> auxInt, auxInt should be in the range 0 to 31.
|
||||
|
|
|
|||
|
|
@ -6561,15 +6561,17 @@ func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool {
|
|||
}
|
||||
func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (SLLVconst [c] (ADDV x x))
|
||||
// match: (SLLVconst <t> [c] (ADDV x x))
|
||||
// cond: c < t.Size() * 8 - 1
|
||||
// result: (SLLVconst [c+1] x)
|
||||
for {
|
||||
t := v.Type
|
||||
c := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpLOONG64ADDV {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[1]
|
||||
if x != v_0.Args[0] {
|
||||
if x != v_0.Args[0] || !(c < t.Size()*8-1) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64SLLVconst)
|
||||
|
|
@ -6577,6 +6579,23 @@ func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SLLVconst <t> [c] (ADDV x x))
|
||||
// cond: c >= t.Size() * 8 - 1
|
||||
// result: (MOVVconst [0])
|
||||
for {
|
||||
t := v.Type
|
||||
c := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpLOONG64ADDV {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[1]
|
||||
if x != v_0.Args[0] || !(c >= t.Size()*8-1) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(0)
|
||||
return true
|
||||
}
|
||||
// match: (SLLVconst [c] (MOVVconst [d]))
|
||||
// result: (MOVVconst [d<<uint64(c)])
|
||||
for {
|
||||
|
|
@ -6593,15 +6612,17 @@ func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool {
|
|||
}
|
||||
func rewriteValueLOONG64_OpLOONG64SLLconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (SLLconst [c] (ADDV x x))
|
||||
// match: (SLLconst <t> [c] (ADDV x x))
|
||||
// cond: c < t.Size() * 8 - 1
|
||||
// result: (SLLconst [c+1] x)
|
||||
for {
|
||||
t := v.Type
|
||||
c := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpLOONG64ADDV {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[1]
|
||||
if x != v_0.Args[0] {
|
||||
if x != v_0.Args[0] || !(c < t.Size()*8-1) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64SLLconst)
|
||||
|
|
@ -6609,6 +6630,23 @@ func rewriteValueLOONG64_OpLOONG64SLLconst(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (SLLconst <t> [c] (ADDV x x))
|
||||
// cond: c >= t.Size() * 8 - 1
|
||||
// result: (MOVVconst [0])
|
||||
for {
|
||||
t := v.Type
|
||||
c := auxIntToInt64(v.AuxInt)
|
||||
if v_0.Op != OpLOONG64ADDV {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[1]
|
||||
if x != v_0.Args[0] || !(c >= t.Size()*8-1) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(0)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64SRA(v *Value) bool {
|
||||
|
|
|
|||
|
|
@ -303,6 +303,15 @@ func writebarrier(f *Func) {
|
|||
mem := stores[0].MemoryArg()
|
||||
pos := stores[0].Pos
|
||||
|
||||
// If there is a nil check before the WB store, duplicate it to
|
||||
// the two branches, where the store and the WB load occur. So
|
||||
// they are more likely be removed by late nilcheck removal (which
|
||||
// is block-local).
|
||||
var nilcheck, nilcheckThen, nilcheckEnd *Value
|
||||
if a := stores[0].Args[0]; a.Op == OpNilCheck && a.Args[1] == mem {
|
||||
nilcheck = a
|
||||
}
|
||||
|
||||
// If the source of a MoveWB is volatile (will be clobbered by a
|
||||
// function call), we need to copy it to a temporary location, as
|
||||
// marshaling the args of wbMove might clobber the value we're
|
||||
|
|
@ -377,6 +386,10 @@ func writebarrier(f *Func) {
|
|||
// For each write barrier store, append write barrier code to bThen.
|
||||
memThen := mem
|
||||
|
||||
if nilcheck != nil {
|
||||
nilcheckThen = bThen.NewValue2(nilcheck.Pos, OpNilCheck, nilcheck.Type, nilcheck.Args[0], memThen)
|
||||
}
|
||||
|
||||
// Note: we can issue the write barrier code in any order. In particular,
|
||||
// it doesn't matter if they are in a different order *even if* they end
|
||||
// up referring to overlapping memory regions. For instance if an OpStore
|
||||
|
|
@ -447,6 +460,9 @@ func writebarrier(f *Func) {
|
|||
// take care of the vast majority of these. We could
|
||||
// patch this up in the signal handler, or use XCHG to
|
||||
// combine the read and the write.
|
||||
if ptr == nilcheck {
|
||||
ptr = nilcheckThen
|
||||
}
|
||||
oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen)
|
||||
// Save old value to write buffer.
|
||||
addEntry(pos, oldVal)
|
||||
|
|
@ -459,9 +475,12 @@ func writebarrier(f *Func) {
|
|||
// Now do the rare cases, Zeros and Moves.
|
||||
for _, w := range stores {
|
||||
pos := w.Pos
|
||||
dst := w.Args[0]
|
||||
if dst == nilcheck {
|
||||
dst = nilcheckThen
|
||||
}
|
||||
switch w.Op {
|
||||
case OpZeroWB:
|
||||
dst := w.Args[0]
|
||||
typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
|
||||
// zeroWB(&typ, dst)
|
||||
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
|
||||
|
|
@ -469,7 +488,6 @@ func writebarrier(f *Func) {
|
|||
f.fe.Func().SetWBPos(pos)
|
||||
nWBops--
|
||||
case OpMoveWB:
|
||||
dst := w.Args[0]
|
||||
src := w.Args[1]
|
||||
if isVolatile(src) {
|
||||
for _, c := range volatiles {
|
||||
|
|
@ -491,24 +509,29 @@ func writebarrier(f *Func) {
|
|||
// merge memory
|
||||
mem = bEnd.NewValue2(pos, OpPhi, types.TypeMem, mem, memThen)
|
||||
|
||||
if nilcheck != nil {
|
||||
nilcheckEnd = bEnd.NewValue2(nilcheck.Pos, OpNilCheck, nilcheck.Type, nilcheck.Args[0], mem)
|
||||
}
|
||||
|
||||
// Do raw stores after merge point.
|
||||
for _, w := range stores {
|
||||
pos := w.Pos
|
||||
dst := w.Args[0]
|
||||
if dst == nilcheck {
|
||||
dst = nilcheckEnd
|
||||
}
|
||||
switch w.Op {
|
||||
case OpStoreWB:
|
||||
ptr := w.Args[0]
|
||||
val := w.Args[1]
|
||||
if buildcfg.Experiment.CgoCheck2 {
|
||||
// Issue cgo checking code.
|
||||
mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, ptr, val)
|
||||
mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, dst, val)
|
||||
}
|
||||
mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
|
||||
mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, dst, val, mem)
|
||||
case OpZeroWB:
|
||||
dst := w.Args[0]
|
||||
mem = bEnd.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, dst, mem)
|
||||
mem.Aux = w.Aux
|
||||
case OpMoveWB:
|
||||
dst := w.Args[0]
|
||||
src := w.Args[1]
|
||||
if isVolatile(src) {
|
||||
for _, c := range volatiles {
|
||||
|
|
@ -529,9 +552,8 @@ func writebarrier(f *Func) {
|
|||
case OpVarDef, OpVarLive:
|
||||
mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem)
|
||||
case OpStore:
|
||||
ptr := w.Args[0]
|
||||
val := w.Args[1]
|
||||
mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
|
||||
mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, dst, val, mem)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -557,6 +579,9 @@ func writebarrier(f *Func) {
|
|||
f.freeValue(w)
|
||||
}
|
||||
}
|
||||
if nilcheck != nil && nilcheck.Uses == 0 {
|
||||
nilcheck.reset(OpInvalid)
|
||||
}
|
||||
|
||||
// put values after the store sequence into the end block
|
||||
bEnd.Values = append(bEnd.Values, after...)
|
||||
|
|
|
|||
1
src/cmd/dist/test.go
vendored
1
src/cmd/dist/test.go
vendored
|
|
@ -705,6 +705,7 @@ func (t *tester) registerTests() {
|
|||
timeout: 300 * time.Second,
|
||||
tags: []string{"purego"},
|
||||
pkg: "hash/maphash",
|
||||
env: []string{"GODEBUG=fips140=off"}, // FIPS 140-3 mode is incompatible with purego
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"cmd/go/internal/base"
|
||||
"cmd/go/internal/gover"
|
||||
|
|
@ -285,6 +286,11 @@ func reportError(q *query, err error) {
|
|||
// TODO(bcmills): Use errors.As to unpack these errors instead of parsing
|
||||
// strings with regular expressions.
|
||||
|
||||
if !utf8.ValidString(q.pattern) || !utf8.ValidString(q.version) {
|
||||
base.Errorf("go: %s", errStr)
|
||||
return
|
||||
}
|
||||
|
||||
patternRE := regexp.MustCompile("(?m)(?:[ \t(\"`]|^)" + regexp.QuoteMeta(q.pattern) + "(?:[ @:;)\"`]|$)")
|
||||
if patternRE.MatchString(errStr) {
|
||||
if q.rawVersion == "" {
|
||||
|
|
|
|||
16
src/cmd/go/testdata/script/get_panic_issue75251.txt
vendored
Normal file
16
src/cmd/go/testdata/script/get_panic_issue75251.txt
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
# Issue #75251: Don't panic if the package path or the package version
|
||||
# contains invalid UTF-8 characters.
|
||||
|
||||
go mod init m
|
||||
|
||||
! go get golang.org/x/net/http/httpgutsÿv0.43.0 # contains 0xff byte
|
||||
! stderr panic
|
||||
stderr 'malformed module path'
|
||||
|
||||
! go get golang.org/x/net/http/httpgutsÿ@v0.43.0 # contains 0xff byte
|
||||
! stderr panic
|
||||
stderr 'malformed module path'
|
||||
|
||||
! go get golang.org/x/net/http/httpguts@ÿv0.43.0 # contains 0xff byte
|
||||
! stderr panic
|
||||
stderr 'disallowed version string'
|
||||
|
|
@ -61,6 +61,8 @@ var Anames = []string{
|
|||
"CSRRWI",
|
||||
"CSRRSI",
|
||||
"CSRRCI",
|
||||
"CZEROEQZ",
|
||||
"CZERONEZ",
|
||||
"MUL",
|
||||
"MULH",
|
||||
"MULHU",
|
||||
|
|
|
|||
|
|
@ -409,6 +409,10 @@ const (
|
|||
ACSRRSI
|
||||
ACSRRCI
|
||||
|
||||
// 12.3: Integer Conditional Operations (Zicond)
|
||||
ACZEROEQZ
|
||||
ACZERONEZ
|
||||
|
||||
// 13.1: Multiplication Operations
|
||||
AMUL
|
||||
AMULH
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by ./parse.py -go rv64_a rv64_c rv64_d rv64_f rv64_i rv64_m rv64_q rv64_zba rv64_zbb rv64_zbs rv_a rv_c rv_c_d rv_d rv_f rv_i rv_m rv_q rv_s rv_system rv_v rv_zba rv_zbb rv_zbs rv_zicsr; DO NOT EDIT.
|
||||
// Code generated by ./parse.py -go rv64_a rv64_c rv64_d rv64_f rv64_i rv64_m rv64_q rv64_zba rv64_zbb rv64_zbs rv_a rv_c rv_c_d rv_d rv_f rv_i rv_m rv_q rv_s rv_system rv_v rv_zba rv_zbb rv_zbs rv_zicond rv_zicsr; DO NOT EDIT.
|
||||
package riscv
|
||||
|
||||
import "cmd/internal/obj"
|
||||
|
|
@ -194,6 +194,10 @@ func encode(a obj.As) *inst {
|
|||
return &inst{0x13, 0x1, 0x0, 0x1, 1537, 0x30}
|
||||
case ACTZW:
|
||||
return &inst{0x1b, 0x1, 0x0, 0x1, 1537, 0x30}
|
||||
case ACZEROEQZ:
|
||||
return &inst{0x33, 0x5, 0x0, 0x0, 224, 0x7}
|
||||
case ACZERONEZ:
|
||||
return &inst{0x33, 0x7, 0x0, 0x0, 224, 0x7}
|
||||
case ADIV:
|
||||
return &inst{0x33, 0x4, 0x0, 0x0, 32, 0x1}
|
||||
case ADIVU:
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import (
|
|||
"internal/abi"
|
||||
"internal/buildcfg"
|
||||
"log"
|
||||
"math"
|
||||
"math/bits"
|
||||
"strings"
|
||||
)
|
||||
|
|
@ -145,9 +146,29 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
|||
p.From.Offset = 0
|
||||
}
|
||||
|
||||
case AMOVF:
|
||||
if p.From.Type == obj.TYPE_FCONST && p.From.Name == obj.NAME_NONE && p.From.Reg == obj.REG_NONE {
|
||||
f64 := p.From.Val.(float64)
|
||||
f32 := float32(f64)
|
||||
if math.Float32bits(f32) == 0 {
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = REG_ZERO
|
||||
break
|
||||
}
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Sym = ctxt.Float32Sym(f32)
|
||||
p.From.Name = obj.NAME_EXTERN
|
||||
p.From.Offset = 0
|
||||
}
|
||||
|
||||
case AMOVD:
|
||||
if p.From.Type == obj.TYPE_FCONST && p.From.Name == obj.NAME_NONE && p.From.Reg == obj.REG_NONE {
|
||||
f64 := p.From.Val.(float64)
|
||||
if math.Float64bits(f64) == 0 {
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = REG_ZERO
|
||||
break
|
||||
}
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Sym = ctxt.Float64Sym(f64)
|
||||
p.From.Name = obj.NAME_EXTERN
|
||||
|
|
@ -1927,6 +1948,10 @@ var instructions = [ALAST & obj.AMask]instructionData{
|
|||
ACSRRW & obj.AMask: {enc: iIIEncoding, immForm: ACSRRWI},
|
||||
ACSRRWI & obj.AMask: {enc: iIIEncoding},
|
||||
|
||||
// 12.3: "Zicond" Extension for Integer Conditional Operations
|
||||
ACZERONEZ & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
||||
ACZEROEQZ & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
||||
|
||||
// 13.1: Multiplication Operations
|
||||
AMUL & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
||||
AMULH & obj.AMask: {enc: rIIIEncoding, ternary: true},
|
||||
|
|
@ -3254,16 +3279,37 @@ func instructionsForMOV(p *obj.Prog) []*instruction {
|
|||
case p.From.Type == obj.TYPE_REG && p.To.Type == obj.TYPE_REG:
|
||||
// Handle register to register moves.
|
||||
switch p.As {
|
||||
case AMOV: // MOV Ra, Rb -> ADDI $0, Ra, Rb
|
||||
case AMOV:
|
||||
// MOV Ra, Rb -> ADDI $0, Ra, Rb
|
||||
ins.as, ins.rs1, ins.rs2, ins.imm = AADDI, uint32(p.From.Reg), obj.REG_NONE, 0
|
||||
case AMOVW: // MOVW Ra, Rb -> ADDIW $0, Ra, Rb
|
||||
case AMOVW:
|
||||
// MOVW Ra, Rb -> ADDIW $0, Ra, Rb
|
||||
ins.as, ins.rs1, ins.rs2, ins.imm = AADDIW, uint32(p.From.Reg), obj.REG_NONE, 0
|
||||
case AMOVBU: // MOVBU Ra, Rb -> ANDI $255, Ra, Rb
|
||||
case AMOVBU:
|
||||
// MOVBU Ra, Rb -> ANDI $255, Ra, Rb
|
||||
ins.as, ins.rs1, ins.rs2, ins.imm = AANDI, uint32(p.From.Reg), obj.REG_NONE, 255
|
||||
case AMOVF: // MOVF Ra, Rb -> FSGNJS Ra, Ra, Rb
|
||||
ins.as, ins.rs1 = AFSGNJS, uint32(p.From.Reg)
|
||||
case AMOVD: // MOVD Ra, Rb -> FSGNJD Ra, Ra, Rb
|
||||
ins.as, ins.rs1 = AFSGNJD, uint32(p.From.Reg)
|
||||
case AMOVF:
|
||||
// MOVF Ra, Rb -> FSGNJS Ra, Ra, Rb
|
||||
// or -> FMVWX Ra, Rb
|
||||
// or -> FMVXW Ra, Rb
|
||||
if ins.rs2 >= REG_X0 && ins.rs2 <= REG_X31 && ins.rd >= REG_F0 && ins.rd <= REG_F31 {
|
||||
ins.as = AFMVWX
|
||||
} else if ins.rs2 >= REG_F0 && ins.rs2 <= REG_F31 && ins.rd >= REG_X0 && ins.rd <= REG_X31 {
|
||||
ins.as = AFMVXW
|
||||
} else {
|
||||
ins.as, ins.rs1 = AFSGNJS, uint32(p.From.Reg)
|
||||
}
|
||||
case AMOVD:
|
||||
// MOVD Ra, Rb -> FSGNJD Ra, Ra, Rb
|
||||
// or -> FMVDX Ra, Rb
|
||||
// or -> FMVXD Ra, Rb
|
||||
if ins.rs2 >= REG_X0 && ins.rs2 <= REG_X31 && ins.rd >= REG_F0 && ins.rd <= REG_F31 {
|
||||
ins.as = AFMVDX
|
||||
} else if ins.rs2 >= REG_F0 && ins.rs2 <= REG_F31 && ins.rd >= REG_X0 && ins.rd <= REG_X31 {
|
||||
ins.as = AFMVXD
|
||||
} else {
|
||||
ins.as, ins.rs1 = AFSGNJD, uint32(p.From.Reg)
|
||||
}
|
||||
case AMOVB, AMOVH:
|
||||
if buildcfg.GORISCV64 >= 22 {
|
||||
// Use SEXTB or SEXTH to extend.
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package pkgpattern
|
|||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Note: most of this code was originally part of the cmd/go/internal/search
|
||||
|
|
@ -71,7 +72,7 @@ func matchPatternInternal(pattern string, vendorExclude bool) func(name string)
|
|||
|
||||
const vendorChar = "\x00"
|
||||
|
||||
if vendorExclude && strings.Contains(pattern, vendorChar) {
|
||||
if vendorExclude && strings.Contains(pattern, vendorChar) || !utf8.ValidString(pattern) {
|
||||
return func(name string) bool { return false }
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -348,11 +349,11 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read
|
|||
return nil, fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols))
|
||||
}
|
||||
pesym := &f.COFFSymbols[r.SymbolTableIndex]
|
||||
_, gosym, err := state.readpesym(pesym)
|
||||
_, rSym, err := state.readpesym(pesym)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if gosym == 0 {
|
||||
if rSym == 0 {
|
||||
name, err := pesym.FullName(f.StringTable)
|
||||
if err != nil {
|
||||
name = string(pesym.Name[:])
|
||||
|
|
@ -360,90 +361,53 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read
|
|||
return nil, fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type)
|
||||
}
|
||||
|
||||
rSym := gosym
|
||||
rSize := uint8(4)
|
||||
rOff := int32(r.VirtualAddress)
|
||||
var rAdd int64
|
||||
var rType objabi.RelocType
|
||||
switch arch.Family {
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: unsupported arch %v", pn, arch.Family)
|
||||
case sys.I386, sys.AMD64:
|
||||
case sys.I386:
|
||||
switch r.Type {
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, state.sectsyms[rsect], r.Type)
|
||||
|
||||
case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32,
|
||||
IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32
|
||||
IMAGE_REL_AMD64_ADDR32NB:
|
||||
if r.Type == IMAGE_REL_AMD64_ADDR32NB {
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
} else {
|
||||
rType = objabi.R_PCREL
|
||||
}
|
||||
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
|
||||
case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32:
|
||||
if r.Type == IMAGE_REL_I386_DIR32NB {
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
} else {
|
||||
rType = objabi.R_ADDR
|
||||
}
|
||||
|
||||
// load addend from image
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
|
||||
case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64
|
||||
rSize = 8
|
||||
|
||||
rType = objabi.R_ADDR
|
||||
|
||||
// load addend from image
|
||||
rAdd = int64(binary.LittleEndian.Uint64(state.sectdata[rsect][rOff:]))
|
||||
}
|
||||
|
||||
case sys.ARM:
|
||||
switch r.Type {
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: %v: unknown ARM relocation type %v", pn, state.sectsyms[rsect], r.Type)
|
||||
|
||||
case IMAGE_REL_ARM_SECREL:
|
||||
case IMAGE_REL_I386_REL32:
|
||||
rType = objabi.R_PCREL
|
||||
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
|
||||
case IMAGE_REL_ARM_ADDR32, IMAGE_REL_ARM_ADDR32NB:
|
||||
if r.Type == IMAGE_REL_ARM_ADDR32NB {
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
} else {
|
||||
rType = objabi.R_ADDR
|
||||
}
|
||||
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
|
||||
case IMAGE_REL_ARM_BRANCH24:
|
||||
rType = objabi.R_CALLARM
|
||||
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
case IMAGE_REL_I386_DIR32:
|
||||
rType = objabi.R_ADDR
|
||||
case IMAGE_REL_I386_DIR32NB:
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
}
|
||||
case sys.AMD64:
|
||||
switch r.Type {
|
||||
case IMAGE_REL_AMD64_REL32:
|
||||
rType = objabi.R_PCREL
|
||||
case IMAGE_REL_AMD64_ADDR32:
|
||||
rType = objabi.R_ADDR
|
||||
case IMAGE_REL_AMD64_ADDR64:
|
||||
rType = objabi.R_ADDR
|
||||
rSize = 8
|
||||
case IMAGE_REL_AMD64_ADDR32NB:
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
}
|
||||
|
||||
case sys.ARM64:
|
||||
switch r.Type {
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: %v: unknown ARM64 relocation type %v", pn, state.sectsyms[rsect], r.Type)
|
||||
|
||||
case IMAGE_REL_ARM64_ADDR32, IMAGE_REL_ARM64_ADDR32NB:
|
||||
if r.Type == IMAGE_REL_ARM64_ADDR32NB {
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
} else {
|
||||
rType = objabi.R_ADDR
|
||||
}
|
||||
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
case IMAGE_REL_ARM64_ADDR32:
|
||||
rType = objabi.R_ADDR
|
||||
case IMAGE_REL_ARM64_ADDR32NB:
|
||||
rType = objabi.R_PEIMAGEOFF
|
||||
}
|
||||
}
|
||||
|
||||
if rType == 0 {
|
||||
return nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, state.sectsyms[rsect], r.Type)
|
||||
}
|
||||
var rAdd int64
|
||||
switch rSize {
|
||||
default:
|
||||
panic("unexpected relocation size " + strconv.Itoa(int(rSize)))
|
||||
case 4:
|
||||
rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:])))
|
||||
case 8:
|
||||
rAdd = int64(binary.LittleEndian.Uint64(state.sectdata[rsect][rOff:]))
|
||||
}
|
||||
// ld -r could generate multiple section symbols for the
|
||||
// same section but with different values, we have to take
|
||||
// that into account, or in the case of split resources,
|
||||
|
|
|
|||
|
|
@ -463,6 +463,8 @@ func (c *cancelCtx) Done() <-chan struct{} {
|
|||
func (c *cancelCtx) Err() error {
|
||||
// An atomic load is ~5x faster than a mutex, which can matter in tight loops.
|
||||
if err := c.err.Load(); err != nil {
|
||||
// Ensure the done channel has been closed before returning a non-nil error.
|
||||
<-c.Done()
|
||||
return err.(error)
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -1177,3 +1177,23 @@ func (c *customContext) Err() error {
|
|||
func (c *customContext) Value(key any) any {
|
||||
return c.parent.Value(key)
|
||||
}
|
||||
|
||||
// Issue #75533.
|
||||
func TestContextErrDoneRace(t *testing.T) {
|
||||
// 4 iterations reliably reproduced #75533.
|
||||
for range 10 {
|
||||
ctx, cancel := WithCancel(Background())
|
||||
donec := ctx.Done()
|
||||
go cancel()
|
||||
for ctx.Err() == nil {
|
||||
if runtime.GOARCH == "wasm" {
|
||||
runtime.Gosched() // need to explicitly yield
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-donec:
|
||||
default:
|
||||
t.Fatalf("ctx.Err is non-nil, but ctx.Done is not closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -203,9 +203,7 @@ func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
|||
// followed by ByteEncode₁, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode1(s []byte, f ringElement) []byte {
|
||||
s, b := sliceForAppend(s, encodingSize1)
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
clear(b)
|
||||
for i := range f {
|
||||
b[i/8] |= uint8(compress(f[i], 1) << (i % 8))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,9 +61,7 @@ func (d *Digest) Size() int { return d.outputLen }
|
|||
// Reset resets the Digest to its initial state.
|
||||
func (d *Digest) Reset() {
|
||||
// Zero the permutation's state.
|
||||
for i := range d.a {
|
||||
d.a[i] = 0
|
||||
}
|
||||
clear(d.a[:])
|
||||
d.state = spongeAbsorbing
|
||||
d.n = 0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,9 +55,7 @@ func NewCipher(key []byte) (*Cipher, error) {
|
|||
// Deprecated: Reset can't guarantee that the key will be entirely removed from
|
||||
// the process's memory.
|
||||
func (c *Cipher) Reset() {
|
||||
for i := range c.s {
|
||||
c.s[i] = 0
|
||||
}
|
||||
clear(c.s[:])
|
||||
c.i, c.j = 0, 0
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -220,9 +220,7 @@ func (hc *halfConn) changeCipherSpec() error {
|
|||
hc.mac = hc.nextMac
|
||||
hc.nextCipher = nil
|
||||
hc.nextMac = nil
|
||||
for i := range hc.seq {
|
||||
hc.seq[i] = 0
|
||||
}
|
||||
clear(hc.seq[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -231,9 +229,7 @@ func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, level QUICEncrypti
|
|||
hc.level = level
|
||||
key, iv := suite.trafficKey(secret)
|
||||
hc.cipher = suite.aead(key, iv)
|
||||
for i := range hc.seq {
|
||||
hc.seq[i] = 0
|
||||
}
|
||||
clear(hc.seq[:])
|
||||
}
|
||||
|
||||
// incSeq increments the sequence number.
|
||||
|
|
|
|||
|
|
@ -1590,9 +1590,7 @@ var getConfigForClientTests = []struct {
|
|||
},
|
||||
func(clientHello *ClientHelloInfo) (*Config, error) {
|
||||
config := testConfig.Clone()
|
||||
for i := range config.SessionTicketKey {
|
||||
config.SessionTicketKey[i] = 0
|
||||
}
|
||||
clear(config.SessionTicketKey[:])
|
||||
config.sessionTicketKeys = nil
|
||||
return config, nil
|
||||
},
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@
|
|||
|
||||
//go:build darwin
|
||||
|
||||
// Package macOS provides cgo-less wrappers for Core Foundation and
|
||||
// Package macos provides cgo-less wrappers for Core Foundation and
|
||||
// Security.framework, similarly to how package syscall provides access to
|
||||
// libSystem.dylib.
|
||||
package macOS
|
||||
package macos
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
//go:build darwin
|
||||
|
||||
package macOS
|
||||
package macos
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
|
|
|||
|
|
@ -5,51 +5,51 @@
|
|||
package x509
|
||||
|
||||
import (
|
||||
macOS "crypto/x509/internal/macos"
|
||||
"crypto/x509/internal/macos"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
certs := macOS.CFArrayCreateMutable()
|
||||
defer macOS.ReleaseCFArray(certs)
|
||||
leaf, err := macOS.SecCertificateCreateWithData(c.Raw)
|
||||
certs := macos.CFArrayCreateMutable()
|
||||
defer macos.ReleaseCFArray(certs)
|
||||
leaf, err := macos.SecCertificateCreateWithData(c.Raw)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid leaf certificate")
|
||||
}
|
||||
macOS.CFArrayAppendValue(certs, leaf)
|
||||
macos.CFArrayAppendValue(certs, leaf)
|
||||
if opts.Intermediates != nil {
|
||||
for _, lc := range opts.Intermediates.lazyCerts {
|
||||
c, err := lc.getCert()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sc, err := macOS.SecCertificateCreateWithData(c.Raw)
|
||||
sc, err := macos.SecCertificateCreateWithData(c.Raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
macOS.CFArrayAppendValue(certs, sc)
|
||||
macos.CFArrayAppendValue(certs, sc)
|
||||
}
|
||||
}
|
||||
|
||||
policies := macOS.CFArrayCreateMutable()
|
||||
defer macOS.ReleaseCFArray(policies)
|
||||
sslPolicy, err := macOS.SecPolicyCreateSSL(opts.DNSName)
|
||||
policies := macos.CFArrayCreateMutable()
|
||||
defer macos.ReleaseCFArray(policies)
|
||||
sslPolicy, err := macos.SecPolicyCreateSSL(opts.DNSName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
macOS.CFArrayAppendValue(policies, sslPolicy)
|
||||
macos.CFArrayAppendValue(policies, sslPolicy)
|
||||
|
||||
trustObj, err := macOS.SecTrustCreateWithCertificates(certs, policies)
|
||||
trustObj, err := macos.SecTrustCreateWithCertificates(certs, policies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer macOS.CFRelease(trustObj)
|
||||
defer macos.CFRelease(trustObj)
|
||||
|
||||
if !opts.CurrentTime.IsZero() {
|
||||
dateRef := macOS.TimeToCFDateRef(opts.CurrentTime)
|
||||
defer macOS.CFRelease(dateRef)
|
||||
if err := macOS.SecTrustSetVerifyDate(trustObj, dateRef); err != nil {
|
||||
dateRef := macos.TimeToCFDateRef(opts.CurrentTime)
|
||||
defer macos.CFRelease(dateRef)
|
||||
if err := macos.SecTrustSetVerifyDate(trustObj, dateRef); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -59,13 +59,13 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
|
|||
// always enforce its SCT requirements, and there are still _some_ people
|
||||
// using TLS or OCSP for that.
|
||||
|
||||
if ret, err := macOS.SecTrustEvaluateWithError(trustObj); err != nil {
|
||||
if ret, err := macos.SecTrustEvaluateWithError(trustObj); err != nil {
|
||||
switch ret {
|
||||
case macOS.ErrSecCertificateExpired:
|
||||
case macos.ErrSecCertificateExpired:
|
||||
return nil, CertificateInvalidError{c, Expired, err.Error()}
|
||||
case macOS.ErrSecHostNameMismatch:
|
||||
case macos.ErrSecHostNameMismatch:
|
||||
return nil, HostnameError{c, opts.DNSName}
|
||||
case macOS.ErrSecNotTrusted:
|
||||
case macos.ErrSecNotTrusted:
|
||||
return nil, UnknownAuthorityError{Cert: c}
|
||||
default:
|
||||
return nil, fmt.Errorf("x509: %s", err)
|
||||
|
|
@ -73,13 +73,13 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
|
|||
}
|
||||
|
||||
chain := [][]*Certificate{{}}
|
||||
chainRef, err := macOS.SecTrustCopyCertificateChain(trustObj)
|
||||
chainRef, err := macos.SecTrustCopyCertificateChain(trustObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer macOS.CFRelease(chainRef)
|
||||
for i := 0; i < macOS.CFArrayGetCount(chainRef); i++ {
|
||||
certRef := macOS.CFArrayGetValueAtIndex(chainRef, i)
|
||||
defer macos.CFRelease(chainRef)
|
||||
for i := 0; i < macos.CFArrayGetCount(chainRef); i++ {
|
||||
certRef := macos.CFArrayGetValueAtIndex(chainRef, i)
|
||||
cert, err := exportCertificate(certRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -88,7 +88,7 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
|
|||
}
|
||||
if len(chain[0]) == 0 {
|
||||
// This should _never_ happen, but to be safe
|
||||
return nil, errors.New("x509: macOS certificate verification internal error")
|
||||
return nil, errors.New("x509: macos certificate verification internal error")
|
||||
}
|
||||
|
||||
if opts.DNSName != "" {
|
||||
|
|
@ -118,8 +118,8 @@ func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate
|
|||
}
|
||||
|
||||
// exportCertificate returns a *Certificate for a SecCertificateRef.
|
||||
func exportCertificate(cert macOS.CFRef) (*Certificate, error) {
|
||||
data, err := macOS.SecCertificateCopyData(cert)
|
||||
func exportCertificate(cert macos.CFRef) (*Certificate, error) {
|
||||
data, err := macos.SecCertificateCopyData(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -465,7 +465,7 @@ func appendDurationISO8601(b []byte, d time.Duration) []byte {
|
|||
}
|
||||
|
||||
// daysPerYear is the exact average number of days in a year according to
|
||||
// the Gregorian calender, which has an extra day each year that is
|
||||
// the Gregorian calendar, which has an extra day each year that is
|
||||
// a multiple of 4, unless it is evenly divisible by 100 but not by 400.
|
||||
// This does not take into account leap seconds, which are not deterministic.
|
||||
const daysPerYear = 365.2425
|
||||
|
|
|
|||
|
|
@ -1064,7 +1064,7 @@ type File struct {
|
|||
Scope *Scope // package scope (this file only). Deprecated: see Object
|
||||
Imports []*ImportSpec // imports in this file
|
||||
Unresolved []*Ident // unresolved identifiers in this file. Deprecated: see Object
|
||||
Comments []*CommentGroup // list of all comments in the source file
|
||||
Comments []*CommentGroup // comments in the file, in lexical order
|
||||
GoVersion string // minimum Go version required by //go:build or // +build directives
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -345,30 +345,29 @@ func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
|
|||
|
||||
// expectSemi consumes a semicolon and returns the applicable line comment.
|
||||
func (p *parser) expectSemi() (comment *ast.CommentGroup) {
|
||||
// semicolon is optional before a closing ')' or '}'
|
||||
if p.tok != token.RPAREN && p.tok != token.RBRACE {
|
||||
switch p.tok {
|
||||
case token.COMMA:
|
||||
// permit a ',' instead of a ';' but complain
|
||||
p.errorExpected(p.pos, "';'")
|
||||
fallthrough
|
||||
case token.SEMICOLON:
|
||||
if p.lit == ";" {
|
||||
// explicit semicolon
|
||||
p.next()
|
||||
comment = p.lineComment // use following comments
|
||||
} else {
|
||||
// artificial semicolon
|
||||
comment = p.lineComment // use preceding comments
|
||||
p.next()
|
||||
}
|
||||
return comment
|
||||
default:
|
||||
p.errorExpected(p.pos, "';'")
|
||||
p.advance(stmtStart)
|
||||
switch p.tok {
|
||||
case token.RPAREN, token.RBRACE:
|
||||
return nil // semicolon is optional before a closing ')' or '}'
|
||||
case token.COMMA:
|
||||
// permit a ',' instead of a ';' but complain
|
||||
p.errorExpected(p.pos, "';'")
|
||||
fallthrough
|
||||
case token.SEMICOLON:
|
||||
if p.lit == ";" {
|
||||
// explicit semicolon
|
||||
p.next()
|
||||
comment = p.lineComment // use following comments
|
||||
} else {
|
||||
// artificial semicolon
|
||||
comment = p.lineComment // use preceding comments
|
||||
p.next()
|
||||
}
|
||||
return comment
|
||||
default:
|
||||
p.errorExpected(p.pos, "';'")
|
||||
p.advance(stmtStart)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) atComma(context string, follow token.Token) bool {
|
||||
|
|
|
|||
|
|
@ -896,3 +896,53 @@ func test() {
|
|||
t.Fatalf("unexpected f.Comments got:\n%v\nwant:\n%v", got.String(), want.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestBothLineAndLeadComment makes sure that we populate the
|
||||
// p.lineComment field even though there is a comment after the
|
||||
// line comment.
|
||||
func TestBothLineAndLeadComment(t *testing.T) {
|
||||
const src = `package test
|
||||
|
||||
var _ int; /* line comment */
|
||||
// Doc comment
|
||||
func _() {}
|
||||
|
||||
var _ int; /* line comment */
|
||||
// Some comment
|
||||
|
||||
func _() {}
|
||||
`
|
||||
|
||||
fset := token.NewFileSet()
|
||||
f, _ := ParseFile(fset, "", src, ParseComments|SkipObjectResolution)
|
||||
|
||||
lineComment := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Comment
|
||||
docComment := f.Decls[1].(*ast.FuncDecl).Doc
|
||||
|
||||
if lineComment == nil {
|
||||
t.Fatal("missing line comment")
|
||||
}
|
||||
if docComment == nil {
|
||||
t.Fatal("missing doc comment")
|
||||
}
|
||||
|
||||
if lineComment.List[0].Text != "/* line comment */" {
|
||||
t.Errorf(`unexpected line comment got = %q; want "/* line comment */"`, lineComment.List[0].Text)
|
||||
}
|
||||
if docComment.List[0].Text != "// Doc comment" {
|
||||
t.Errorf(`unexpected line comment got = %q; want "// Doc comment"`, docComment.List[0].Text)
|
||||
}
|
||||
|
||||
lineComment2 := f.Decls[2].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Comment
|
||||
if lineComment2 == nil {
|
||||
t.Fatal("missing line comment")
|
||||
}
|
||||
if lineComment.List[0].Text != "/* line comment */" {
|
||||
t.Errorf(`unexpected line comment got = %q; want "/* line comment */"`, lineComment.List[0].Text)
|
||||
}
|
||||
|
||||
docComment2 := f.Decls[3].(*ast.FuncDecl).Doc
|
||||
if docComment2 != nil {
|
||||
t.Errorf("unexpected doc comment %v", docComment2)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,10 +79,11 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
|
|||
dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix")
|
||||
|
||||
baseline := goexperiment.Flags{
|
||||
RegabiWrappers: regabiSupported,
|
||||
RegabiArgs: regabiSupported,
|
||||
SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged
|
||||
Dwarf5: dwarf5Supported,
|
||||
RegabiWrappers: regabiSupported,
|
||||
RegabiArgs: regabiSupported,
|
||||
SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged
|
||||
Dwarf5: dwarf5Supported,
|
||||
RandomizedHeapBase64: true,
|
||||
}
|
||||
|
||||
// Start with the statically enabled set of experiments.
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ or index-value pairs.
|
|||
Yield returns true if the iterator should continue with the next
|
||||
element in the sequence, false if it should stop.
|
||||
|
||||
Yield panics if called after it returns false.
|
||||
|
||||
For instance, [maps.Keys] returns an iterator that produces the sequence
|
||||
of keys of the map m, implemented as follows:
|
||||
|
||||
|
|
|
|||
|
|
@ -31,12 +31,19 @@ import (
|
|||
|
||||
const testMessage = "Test logging, but use a somewhat realistic message length."
|
||||
|
||||
type event struct {
|
||||
ID string
|
||||
Index int
|
||||
Flag bool
|
||||
}
|
||||
|
||||
var (
|
||||
testTime = time.Date(2022, time.May, 1, 0, 0, 0, 0, time.UTC)
|
||||
testString = "7e3b3b2aaeff56a7108fe11e154200dd/7819479873059528190"
|
||||
testInt = 32768
|
||||
testDuration = 23 * time.Second
|
||||
testError = errors.New("fail")
|
||||
testEvent = event{"abcdefgh", 65536, true}
|
||||
)
|
||||
|
||||
var testAttrs = []slog.Attr{
|
||||
|
|
|
|||
|
|
@ -80,12 +80,12 @@ func BenchmarkAttrs(b *testing.B) {
|
|||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
)
|
||||
},
|
||||
},
|
||||
|
|
@ -103,37 +103,37 @@ func BenchmarkAttrs(b *testing.B) {
|
|||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
slog.String("string", testString),
|
||||
slog.Int("status", testInt),
|
||||
slog.Duration("duration", testDuration),
|
||||
slog.Time("time", testTime),
|
||||
slog.Any("error", testError),
|
||||
slog.Any("event", testEvent),
|
||||
)
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -137,15 +137,40 @@ func appendJSONValue(s *handleState, v Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func appendJSONMarshal(buf *buffer.Buffer, v any) error {
|
||||
type jsonEncoder struct {
|
||||
buf *bytes.Buffer
|
||||
// Use a json.Encoder to avoid escaping HTML.
|
||||
var bb bytes.Buffer
|
||||
enc := json.NewEncoder(&bb)
|
||||
enc.SetEscapeHTML(false)
|
||||
if err := enc.Encode(v); err != nil {
|
||||
json *json.Encoder
|
||||
}
|
||||
|
||||
var jsonEncoderPool = &sync.Pool{
|
||||
New: func() any {
|
||||
enc := &jsonEncoder{
|
||||
buf: new(bytes.Buffer),
|
||||
}
|
||||
enc.json = json.NewEncoder(enc.buf)
|
||||
enc.json.SetEscapeHTML(false)
|
||||
return enc
|
||||
},
|
||||
}
|
||||
|
||||
func appendJSONMarshal(buf *buffer.Buffer, v any) error {
|
||||
j := jsonEncoderPool.Get().(*jsonEncoder)
|
||||
defer func() {
|
||||
// To reduce peak allocation, return only smaller buffers to the pool.
|
||||
const maxBufferSize = 16 << 10
|
||||
if j.buf.Cap() > maxBufferSize {
|
||||
return
|
||||
}
|
||||
j.buf.Reset()
|
||||
jsonEncoderPool.Put(j)
|
||||
}()
|
||||
|
||||
if err := j.json.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
bs := bb.Bytes()
|
||||
|
||||
bs := j.buf.Bytes()
|
||||
buf.Write(bs[:len(bs)-1]) // remove final newline
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -142,6 +142,39 @@ func jsonValueString(v Value) string {
|
|||
return string(buf)
|
||||
}
|
||||
|
||||
func TestJSONAllocs(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
l := New(NewJSONHandler(io.Discard, &HandlerOptions{}))
|
||||
testErr := errors.New("an error occurred")
|
||||
testEvent := struct {
|
||||
ID int
|
||||
Scope string
|
||||
Enabled bool
|
||||
}{
|
||||
123456, "abcdefgh", true,
|
||||
}
|
||||
|
||||
t.Run("message", func(t *testing.T) {
|
||||
wantAllocs(t, 0, func() {
|
||||
l.LogAttrs(ctx, LevelInfo,
|
||||
"hello world",
|
||||
)
|
||||
})
|
||||
})
|
||||
t.Run("attrs", func(t *testing.T) {
|
||||
wantAllocs(t, 1, func() {
|
||||
l.LogAttrs(ctx, LevelInfo,
|
||||
"hello world",
|
||||
String("component", "subtest"),
|
||||
Int("id", 67890),
|
||||
Bool("flag", true),
|
||||
Any("error", testErr),
|
||||
Any("event", testEvent),
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkJSONHandler(b *testing.B) {
|
||||
for _, bench := range []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -61,7 +61,11 @@ func (l Level) String() string {
|
|||
if val == 0 {
|
||||
return base
|
||||
}
|
||||
return fmt.Sprintf("%s%+d", base, val)
|
||||
sval := strconv.Itoa(int(val))
|
||||
if val > 0 {
|
||||
sval = "+" + sval
|
||||
}
|
||||
return base + sval
|
||||
}
|
||||
|
||||
switch {
|
||||
|
|
|
|||
|
|
@ -215,3 +215,25 @@ func TestLevelVarString(t *testing.T) {
|
|||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLevelString(b *testing.B) {
|
||||
levels := []Level{
|
||||
0,
|
||||
LevelError,
|
||||
LevelError + 2,
|
||||
LevelError - 2,
|
||||
LevelWarn,
|
||||
LevelWarn - 1,
|
||||
LevelInfo,
|
||||
LevelInfo + 1,
|
||||
LevelInfo - 3,
|
||||
LevelDebug,
|
||||
LevelDebug - 2,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
for _, level := range levels {
|
||||
_ = level.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,9 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (rsa sysc
|
|||
defer fd.pfd.SetWriteDeadline(noDeadline)
|
||||
}
|
||||
|
||||
// Load the hook function synchronously to prevent a race
|
||||
// with test code that restores the old value.
|
||||
testHookCanceledDial := testHookCanceledDial
|
||||
stop := context.AfterFunc(ctx, func() {
|
||||
// Force the runtime's poller to immediately give up
|
||||
// waiting for writability, unblocking waitWrite
|
||||
|
|
|
|||
|
|
@ -1382,7 +1382,10 @@ func (w *wantConn) cancel(t *Transport) {
|
|||
w.done = true
|
||||
w.mu.Unlock()
|
||||
|
||||
if pc != nil {
|
||||
// HTTP/2 connections (pc.alt != nil) aren't removed from the idle pool on use,
|
||||
// and should not be added back here. If the pconn isn't in the idle pool,
|
||||
// it's because we removed it due to an error.
|
||||
if pc != nil && pc.alt == nil {
|
||||
t.putOrCloseIdleConn(pc)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7625,3 +7625,35 @@ func TestTransportServerProtocols(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue61474(t *testing.T) {
|
||||
run(t, testIssue61474, []testMode{http2Mode})
|
||||
}
|
||||
func testIssue61474(t *testing.T, mode testMode) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
// This test reliably exercises the condition causing #61474,
|
||||
// but requires many iterations to do so.
|
||||
// Keep the test around for now, but don't run it by default.
|
||||
t.Skip("test is too large")
|
||||
|
||||
cst := newClientServerTest(t, mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
|
||||
}), func(tr *Transport) {
|
||||
tr.MaxConnsPerHost = 1
|
||||
})
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
for range 100000 {
|
||||
wg.Go(func() {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Millisecond)
|
||||
defer cancel()
|
||||
req, _ := NewRequestWithContext(ctx, "GET", cst.ts.URL, nil)
|
||||
resp, err := cst.c.Do(req)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris) && cgo && !osusergo
|
||||
//go:build (cgo || darwin) && !osusergo && unix && !android
|
||||
|
||||
package user
|
||||
|
||||
|
|
@ -16,8 +16,8 @@ func evalSymlinks(path string) (string, error) {
|
|||
// Check validity of path
|
||||
_, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
// Return the same error value as on other operating systems
|
||||
if strings.HasSuffix(err.Error(), "not a directory") {
|
||||
// Return the same error value as on other operating systems.
|
||||
if strings.Contains(err.Error(), "not a directory") {
|
||||
err = syscall.ENOTDIR
|
||||
}
|
||||
return "", err
|
||||
|
|
|
|||
73
src/runtime/_mkmalloc/astutil/clone.go
Normal file
73
src/runtime/_mkmalloc/astutil/clone.go
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a copy of golang.org/x/tools/internal/astutil/clone.go
|
||||
|
||||
package astutil
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// CloneNode returns a deep copy of a Node.
|
||||
// It omits pointers to ast.{Scope,Object} variables.
|
||||
func CloneNode[T ast.Node](n T) T {
|
||||
return cloneNode(n).(T)
|
||||
}
|
||||
|
||||
func cloneNode(n ast.Node) ast.Node {
|
||||
var clone func(x reflect.Value) reflect.Value
|
||||
set := func(dst, src reflect.Value) {
|
||||
src = clone(src)
|
||||
if src.IsValid() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
clone = func(x reflect.Value) reflect.Value {
|
||||
switch x.Kind() {
|
||||
case reflect.Pointer:
|
||||
if x.IsNil() {
|
||||
return x
|
||||
}
|
||||
// Skip fields of types potentially involved in cycles.
|
||||
switch x.Interface().(type) {
|
||||
case *ast.Object, *ast.Scope:
|
||||
return reflect.Zero(x.Type())
|
||||
}
|
||||
y := reflect.New(x.Type().Elem())
|
||||
set(y.Elem(), x.Elem())
|
||||
return y
|
||||
|
||||
case reflect.Struct:
|
||||
y := reflect.New(x.Type()).Elem()
|
||||
for i := 0; i < x.Type().NumField(); i++ {
|
||||
set(y.Field(i), x.Field(i))
|
||||
}
|
||||
return y
|
||||
|
||||
case reflect.Slice:
|
||||
if x.IsNil() {
|
||||
return x
|
||||
}
|
||||
y := reflect.MakeSlice(x.Type(), x.Len(), x.Cap())
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
set(y.Index(i), x.Index(i))
|
||||
}
|
||||
return y
|
||||
|
||||
case reflect.Interface:
|
||||
y := reflect.New(x.Type()).Elem()
|
||||
set(y, x.Elem())
|
||||
return y
|
||||
|
||||
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.UnsafePointer:
|
||||
panic(x) // unreachable in AST
|
||||
|
||||
default:
|
||||
return x // bool, string, number
|
||||
}
|
||||
}
|
||||
return clone(reflect.ValueOf(n)).Interface().(ast.Node)
|
||||
}
|
||||
|
|
@ -48,6 +48,7 @@ const (
|
|||
EINTR = C.EINTR
|
||||
EAGAIN = C.EAGAIN
|
||||
ENOMEM = C.ENOMEM
|
||||
ENOSYS = C.ENOSYS
|
||||
|
||||
PROT_NONE = C.PROT_NONE
|
||||
PROT_READ = C.PROT_READ
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ const (
|
|||
EINTR = C.EINTR
|
||||
EAGAIN = C.EAGAIN
|
||||
ENOMEM = C.ENOMEM
|
||||
ENOSYS = C.ENOSYS
|
||||
|
||||
PROT_NONE = C.PROT_NONE
|
||||
PROT_READ = C.PROT_READ
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
@ -136,16 +137,30 @@ type fpstate struct {
|
|||
anon0 [48]byte
|
||||
}
|
||||
|
||||
type timespec struct {
|
||||
// The timespec structs and types are defined in Linux in
|
||||
// include/uapi/linux/time_types.h and include/uapi/asm-generic/posix_types.h.
|
||||
type timespec32 struct {
|
||||
tv_sec int32
|
||||
tv_nsec int32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (ts *timespec) setNsec(ns int64) {
|
||||
func (ts *timespec32) setNsec(ns int64) {
|
||||
ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec)
|
||||
}
|
||||
|
||||
type timespec struct {
|
||||
tv_sec int64
|
||||
tv_nsec int64
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (ts *timespec) setNsec(ns int64) {
|
||||
var newNS int32
|
||||
ts.tv_sec = int64(timediv(ns, 1e9, &newNS))
|
||||
ts.tv_nsec = int64(newNS)
|
||||
}
|
||||
|
||||
type timeval struct {
|
||||
tv_sec int32
|
||||
tv_usec int32
|
||||
|
|
@ -223,8 +238,8 @@ type ucontext struct {
|
|||
}
|
||||
|
||||
type itimerspec struct {
|
||||
it_interval timespec
|
||||
it_value timespec
|
||||
it_interval timespec32
|
||||
it_value timespec32
|
||||
}
|
||||
|
||||
type itimerval struct {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_ENOMEM = 0xc
|
||||
_EAGAIN = 0xb
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0
|
||||
_PROT_READ = 0x1
|
||||
|
|
@ -95,16 +96,30 @@ const (
|
|||
_SOCK_DGRAM = 0x2
|
||||
)
|
||||
|
||||
type timespec struct {
|
||||
// The timespec structs and types are defined in Linux in
|
||||
// include/uapi/linux/time_types.h and include/uapi/asm-generic/posix_types.h.
|
||||
type timespec32 struct {
|
||||
tv_sec int32
|
||||
tv_nsec int32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (ts *timespec) setNsec(ns int64) {
|
||||
func (ts *timespec32) setNsec(ns int64) {
|
||||
ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec)
|
||||
}
|
||||
|
||||
type timespec struct {
|
||||
tv_sec int64
|
||||
tv_nsec int64
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (ts *timespec) setNsec(ns int64) {
|
||||
var newNS int32
|
||||
ts.tv_sec = int64(timediv(ns, 1e9, &newNS))
|
||||
ts.tv_nsec = int64(newNS)
|
||||
}
|
||||
|
||||
type stackt struct {
|
||||
ss_sp *byte
|
||||
ss_flags int32
|
||||
|
|
@ -155,8 +170,8 @@ func (tv *timeval) set_usec(x int32) {
|
|||
}
|
||||
|
||||
type itimerspec struct {
|
||||
it_interval timespec
|
||||
it_value timespec
|
||||
it_interval timespec32
|
||||
it_value timespec32
|
||||
}
|
||||
|
||||
type itimerval struct {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
@ -93,16 +94,30 @@ const (
|
|||
_SIGEV_THREAD_ID = 0x4
|
||||
)
|
||||
|
||||
type timespec struct {
|
||||
// The timespec structs and types are defined in Linux in
|
||||
// include/uapi/linux/time_types.h and include/uapi/asm-generic/posix_types.h.
|
||||
type timespec32 struct {
|
||||
tv_sec int32
|
||||
tv_nsec int32
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (ts *timespec) setNsec(ns int64) {
|
||||
func (ts *timespec32) setNsec(ns int64) {
|
||||
ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec)
|
||||
}
|
||||
|
||||
type timespec struct {
|
||||
tv_sec int64
|
||||
tv_nsec int64
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (ts *timespec) setNsec(ns int64) {
|
||||
var newNS int32
|
||||
ts.tv_sec = int64(timediv(ns, 1e9, &newNS))
|
||||
ts.tv_nsec = int64(newNS)
|
||||
}
|
||||
|
||||
type timeval struct {
|
||||
tv_sec int32
|
||||
tv_usec int32
|
||||
|
|
@ -138,8 +153,8 @@ type siginfo struct {
|
|||
}
|
||||
|
||||
type itimerspec struct {
|
||||
it_interval timespec
|
||||
it_value timespec
|
||||
it_interval timespec32
|
||||
it_value timespec32
|
||||
}
|
||||
|
||||
type itimerval struct {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ const (
|
|||
_EINTR = 0x4
|
||||
_EAGAIN = 0xb
|
||||
_ENOMEM = 0xc
|
||||
_ENOSYS = 0x26
|
||||
|
||||
_PROT_NONE = 0x0
|
||||
_PROT_READ = 0x1
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ package runtime
|
|||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"internal/goexperiment"
|
||||
"internal/goos"
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
|
|
@ -1122,8 +1121,6 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
|
|||
// Lock so that we can safely access the bitmap.
|
||||
lock(&mheap_.lock)
|
||||
|
||||
heapBase := mheap_.pages.inUse.ranges[0].base.addr()
|
||||
secondArenaBase := arenaBase(arenaIndex(heapBase) + 1)
|
||||
chunkLoop:
|
||||
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
|
||||
chunk := mheap_.pages.tryChunkOf(i)
|
||||
|
|
@ -1140,14 +1137,6 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
|
|||
want := chunk.scavenged[j] &^ chunk.pallocBits[j]
|
||||
got := chunk.scavenged[j]
|
||||
if want != got {
|
||||
// When goexperiment.RandomizedHeapBase64 is set we use a
|
||||
// series of padding pages to generate randomized heap base
|
||||
// address which have both the alloc and scav bits set. If
|
||||
// we see this for a chunk between the address of the heap
|
||||
// base, and the address of the second arena continue.
|
||||
if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) {
|
||||
continue
|
||||
}
|
||||
ok = false
|
||||
if n >= len(mismatches) {
|
||||
break chunkLoop
|
||||
|
|
@ -1165,6 +1154,37 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
|
|||
|
||||
getg().m.mallocing--
|
||||
})
|
||||
|
||||
if randomizeHeapBase && len(mismatches) > 0 {
|
||||
// When goexperiment.RandomizedHeapBase64 is set we use a series of
|
||||
// padding pages to generate randomized heap base address which have
|
||||
// both the alloc and scav bits set. Because of this we expect exactly
|
||||
// one arena will have mismatches, so check for that explicitly and
|
||||
// remove the mismatches if that property holds. If we see more than one
|
||||
// arena with this property, that is an indication something has
|
||||
// actually gone wrong, so return the mismatches.
|
||||
//
|
||||
// We do this, instead of ignoring the mismatches in the chunkLoop, because
|
||||
// it's not easy to determine which arena we added the padding pages to
|
||||
// programmatically, without explicitly recording the base address somewhere
|
||||
// in a global variable (which we'd rather not do as the address of that variable
|
||||
// is likely to be somewhat predictable, potentially defeating the purpose
|
||||
// of our randomization).
|
||||
affectedArenas := map[arenaIdx]bool{}
|
||||
for _, mismatch := range mismatches {
|
||||
if mismatch.Base > 0 {
|
||||
affectedArenas[arenaIndex(mismatch.Base)] = true
|
||||
}
|
||||
}
|
||||
if len(affectedArenas) == 1 {
|
||||
ok = true
|
||||
// zero the mismatches
|
||||
for i := range n {
|
||||
mismatches[i] = BitsMismatch{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -349,7 +349,7 @@ const (
|
|||
|
||||
// randomizeHeapBase indicates if the heap base address should be randomized.
|
||||
// See comment in mallocinit for how the randomization is performed.
|
||||
randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform
|
||||
randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform && !raceenabled && !msanenabled && !asanenabled
|
||||
|
||||
// randHeapBasePrefixMask is used to extract the top byte of the randomized
|
||||
// heap base address.
|
||||
|
|
|
|||
|
|
@ -714,6 +714,26 @@ func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
|
|||
}
|
||||
|
||||
func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
|
||||
if header == nil {
|
||||
// This nil check and throw is almost pointless. Normally we would
|
||||
// expect header to never be nil. However, this is called on potentially
|
||||
// freshly-allocated virtual memory. As of 2025, the compiler-inserted
|
||||
// nil check is not a branch but a memory read that we expect to fault
|
||||
// if the pointer really is nil.
|
||||
//
|
||||
// However, this causes a read of the page, and operating systems may
|
||||
// take it as a hint to back the accessed memory with a read-only zero
|
||||
// page. However, we immediately write to this memory, which can then
|
||||
// force operating systems to have to update the page table and flush
|
||||
// the TLB.
|
||||
//
|
||||
// This nil check is thus an explicit branch instead of what the compiler
|
||||
// would insert circa 2025, which is a memory read instruction.
|
||||
//
|
||||
// See go.dev/issue/74375 for details of a similar issue in
|
||||
// spanInlineMarkBits.
|
||||
throw("runtime: pointer to heap type header nil?")
|
||||
}
|
||||
*header = typ
|
||||
if doubleCheckHeapSetType {
|
||||
doubleCheckHeapType(x, dataSize, typ, header, span)
|
||||
|
|
|
|||
|
|
@ -1760,8 +1760,6 @@ func TestReadMetricsSched(t *testing.T) {
|
|||
metrics.Read(s[:])
|
||||
return s[notInGo].Value.Uint64() >= count
|
||||
})
|
||||
|
||||
metrics.Read(s[:])
|
||||
logMetrics(t, s[:])
|
||||
check(t, &s[notInGo], count, count+generalSlack)
|
||||
|
||||
|
|
@ -1782,8 +1780,6 @@ func TestReadMetricsSched(t *testing.T) {
|
|||
metrics.Read(s[:])
|
||||
return s[waiting].Value.Uint64() >= waitingCount
|
||||
})
|
||||
|
||||
metrics.Read(s[:])
|
||||
logMetrics(t, s[:])
|
||||
check(t, &s[waiting], waitingCount, waitingCount+waitingSlack)
|
||||
|
||||
|
|
|
|||
|
|
@ -40,9 +40,6 @@ type mOS struct {
|
|||
waitsema uint32 // semaphore for parking on locks
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
|
||||
|
||||
// Linux futex.
|
||||
//
|
||||
// futexsleep(uint32 *addr, uint32 val)
|
||||
|
|
@ -79,7 +76,7 @@ func futexsleep(addr *uint32, val uint32, ns int64) {
|
|||
|
||||
var ts timespec
|
||||
ts.setNsec(ns)
|
||||
futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, unsafe.Pointer(&ts), nil, 0)
|
||||
futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, &ts, nil, 0)
|
||||
}
|
||||
|
||||
// If any procs are sleeping on addr, wake up at most cnt.
|
||||
|
|
|
|||
40
src/runtime/os_linux_futex32.go
Normal file
40
src/runtime/os_linux_futex32.go
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && (386 || arm || mips || mipsle || ppc)
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/runtime/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func futex_time32(addr unsafe.Pointer, op int32, val uint32, ts *timespec32, addr2 unsafe.Pointer, val3 uint32) int32
|
||||
|
||||
//go:noescape
|
||||
func futex_time64(addr unsafe.Pointer, op int32, val uint32, ts *timespec, addr2 unsafe.Pointer, val3 uint32) int32
|
||||
|
||||
var is32bitOnly atomic.Bool
|
||||
|
||||
//go:nosplit
|
||||
func futex(addr unsafe.Pointer, op int32, val uint32, ts *timespec, addr2 unsafe.Pointer, val3 uint32) int32 {
|
||||
if !is32bitOnly.Load() {
|
||||
ret := futex_time64(addr, op, val, ts, addr2, val3)
|
||||
// futex_time64 is only supported on Linux 5.0+
|
||||
if ret != -_ENOSYS {
|
||||
return ret
|
||||
}
|
||||
is32bitOnly.Store(true)
|
||||
}
|
||||
// Downgrade ts.
|
||||
var ts32 timespec32
|
||||
var pts32 *timespec32
|
||||
if ts != nil {
|
||||
ts32.setNsec(ts.tv_sec*1e9 + ts.tv_nsec)
|
||||
pts32 = &ts32
|
||||
}
|
||||
return futex_time32(addr, op, val, pts32, addr2, val3)
|
||||
}
|
||||
14
src/runtime/os_linux_futex64.go
Normal file
14
src/runtime/os_linux_futex64.go
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && !(386 || arm || mips || mipsle || ppc || s390)
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func futex(addr unsafe.Pointer, op int32, val uint32, ts *timespec, addr2 unsafe.Pointer, val3 uint32) int32
|
||||
|
|
@ -48,6 +48,7 @@
|
|||
#define SYS_madvise 219
|
||||
#define SYS_gettid 224
|
||||
#define SYS_futex 240
|
||||
#define SYS_futex_time64 422
|
||||
#define SYS_sched_getaffinity 242
|
||||
#define SYS_set_thread_area 243
|
||||
#define SYS_exit_group 252
|
||||
|
|
@ -532,10 +533,26 @@ TEXT runtime·madvise(SB),NOSPLIT,$0
|
|||
MOVL AX, ret+12(FP)
|
||||
RET
|
||||
|
||||
// Linux: kernel/futex/syscalls.c, requiring COMPAT_32BIT_TIME
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val,
|
||||
// struct old_timespec32 *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex_time32(SB),NOSPLIT,$0
|
||||
MOVL $SYS_futex, AX
|
||||
MOVL addr+0(FP), BX
|
||||
MOVL op+4(FP), CX
|
||||
MOVL val+8(FP), DX
|
||||
MOVL ts+12(FP), SI
|
||||
MOVL addr2+16(FP), DI
|
||||
MOVL val3+20(FP), BP
|
||||
INVOKE_SYSCALL
|
||||
MOVL AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Linux: kernel/futex/syscalls.c
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val,
|
||||
// struct timespec *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex(SB),NOSPLIT,$0
|
||||
MOVL $SYS_futex, AX
|
||||
TEXT runtime·futex_time64(SB),NOSPLIT,$0
|
||||
MOVL $SYS_futex_time64, AX
|
||||
MOVL addr+0(FP), BX
|
||||
MOVL op+4(FP), CX
|
||||
MOVL val+8(FP), DX
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@
|
|||
#define SYS_sigaltstack (SYS_BASE + 186)
|
||||
#define SYS_mmap2 (SYS_BASE + 192)
|
||||
#define SYS_futex (SYS_BASE + 240)
|
||||
#define SYS_futex_time64 (SYS_BASE + 422)
|
||||
#define SYS_exit_group (SYS_BASE + 248)
|
||||
#define SYS_munmap (SYS_BASE + 91)
|
||||
#define SYS_madvise (SYS_BASE + 220)
|
||||
|
|
@ -403,9 +404,10 @@ finish:
|
|||
|
||||
RET
|
||||
|
||||
// Linux: kernel/futex/syscalls.c, requiring COMPAT_32BIT_TIME
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val,
|
||||
// struct timespec *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex(SB),NOSPLIT,$0
|
||||
// struct old_timespec32 *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex_time32(SB),NOSPLIT,$0
|
||||
MOVW addr+0(FP), R0
|
||||
MOVW op+4(FP), R1
|
||||
MOVW val+8(FP), R2
|
||||
|
|
@ -417,6 +419,21 @@ TEXT runtime·futex(SB),NOSPLIT,$0
|
|||
MOVW R0, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Linux: kernel/futex/syscalls.c
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val,
|
||||
// struct timespec *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex_time64(SB),NOSPLIT,$0
|
||||
MOVW addr+0(FP), R0
|
||||
MOVW op+4(FP), R1
|
||||
MOVW val+8(FP), R2
|
||||
MOVW ts+12(FP), R3
|
||||
MOVW addr2+16(FP), R4
|
||||
MOVW val3+20(FP), R5
|
||||
MOVW $SYS_futex_time64, R7
|
||||
SWI $0
|
||||
MOVW R0, ret+24(FP)
|
||||
RET
|
||||
|
||||
// int32 clone(int32 flags, void *stack, M *mp, G *gp, void (*fn)(void));
|
||||
TEXT runtime·clone(SB),NOSPLIT,$0
|
||||
MOVW flags+0(FP), R0
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@
|
|||
#define SYS_mincore 4217
|
||||
#define SYS_gettid 4222
|
||||
#define SYS_futex 4238
|
||||
#define SYS_futex_time64 4422
|
||||
#define SYS_sched_getaffinity 4240
|
||||
#define SYS_exit_group 4246
|
||||
#define SYS_timer_create 4257
|
||||
|
|
@ -362,8 +363,10 @@ TEXT runtime·madvise(SB),NOSPLIT,$0-16
|
|||
MOVW R2, ret+12(FP)
|
||||
RET
|
||||
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val, struct timespec *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex(SB),NOSPLIT,$20-28
|
||||
// Linux: kernel/futex/syscalls.c, requiring COMPAT_32BIT_TIME
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val,
|
||||
// struct old_timespec32 *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex_time32(SB),NOSPLIT,$20-28
|
||||
MOVW addr+0(FP), R4
|
||||
MOVW op+4(FP), R5
|
||||
MOVW val+8(FP), R6
|
||||
|
|
@ -382,6 +385,27 @@ TEXT runtime·futex(SB),NOSPLIT,$20-28
|
|||
MOVW R2, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Linux: kernel/futex/syscalls.c
|
||||
// int32 futex(int32 *uaddr, int32 op, int32 val,
|
||||
// struct timespec *timeout, int32 *uaddr2, int32 val2);
|
||||
TEXT runtime·futex_time64(SB),NOSPLIT,$20-28
|
||||
MOVW addr+0(FP), R4
|
||||
MOVW op+4(FP), R5
|
||||
MOVW val+8(FP), R6
|
||||
MOVW ts+12(FP), R7
|
||||
|
||||
MOVW addr2+16(FP), R8
|
||||
MOVW val3+20(FP), R9
|
||||
|
||||
MOVW R8, 16(R29)
|
||||
MOVW R9, 20(R29)
|
||||
|
||||
MOVW $SYS_futex_time64, R2
|
||||
SYSCALL
|
||||
BEQ R7, 2(PC)
|
||||
SUBU R2, R0, R2 // caller expects negative errno
|
||||
MOVW R2, ret+24(FP)
|
||||
RET
|
||||
|
||||
// int32 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void));
|
||||
TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0-24
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func TestUnsafePoint(t *testing.T) {
|
|||
cmd := exec.Command(testenv.GoToolPath(t), "tool", "objdump", "-s", "setGlobalPointer", os.Args[0])
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("can't objdump %v", err)
|
||||
t.Fatalf("can't objdump %v:\n%s", err, out)
|
||||
}
|
||||
lines := strings.Split(string(out), "\n")[1:]
|
||||
|
||||
|
|
|
|||
|
|
@ -433,7 +433,7 @@ func Fields(s string) []string {
|
|||
// FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c)
|
||||
// and returns an array of slices of s. If all code points in s satisfy f(c) or the
|
||||
// string is empty, an empty slice is returned. Every element of the returned slice is
|
||||
// non-empty. Unlike [SplitFunc], leading and trailing runs of code points satisfying f(c)
|
||||
// non-empty. Unlike [Split], leading and trailing runs of code points satisfying f(c)
|
||||
// are discarded.
|
||||
//
|
||||
// FieldsFunc makes no guarantees about the order in which it calls f(c)
|
||||
|
|
|
|||
|
|
@ -163,23 +163,23 @@ func (f *F) Add(args ...any) {
|
|||
|
||||
// supportedTypes represents all of the supported types which can be fuzzed.
|
||||
var supportedTypes = map[reflect.Type]bool{
|
||||
reflect.TypeOf(([]byte)("")): true,
|
||||
reflect.TypeOf((string)("")): true,
|
||||
reflect.TypeOf((bool)(false)): true,
|
||||
reflect.TypeOf((byte)(0)): true,
|
||||
reflect.TypeOf((rune)(0)): true,
|
||||
reflect.TypeOf((float32)(0)): true,
|
||||
reflect.TypeOf((float64)(0)): true,
|
||||
reflect.TypeOf((int)(0)): true,
|
||||
reflect.TypeOf((int8)(0)): true,
|
||||
reflect.TypeOf((int16)(0)): true,
|
||||
reflect.TypeOf((int32)(0)): true,
|
||||
reflect.TypeOf((int64)(0)): true,
|
||||
reflect.TypeOf((uint)(0)): true,
|
||||
reflect.TypeOf((uint8)(0)): true,
|
||||
reflect.TypeOf((uint16)(0)): true,
|
||||
reflect.TypeOf((uint32)(0)): true,
|
||||
reflect.TypeOf((uint64)(0)): true,
|
||||
reflect.TypeFor[[]byte](): true,
|
||||
reflect.TypeFor[string](): true,
|
||||
reflect.TypeFor[bool](): true,
|
||||
reflect.TypeFor[byte](): true,
|
||||
reflect.TypeFor[rune](): true,
|
||||
reflect.TypeFor[float32](): true,
|
||||
reflect.TypeFor[float64](): true,
|
||||
reflect.TypeFor[int](): true,
|
||||
reflect.TypeFor[int8](): true,
|
||||
reflect.TypeFor[int16](): true,
|
||||
reflect.TypeFor[int32](): true,
|
||||
reflect.TypeFor[int64](): true,
|
||||
reflect.TypeFor[uint](): true,
|
||||
reflect.TypeFor[uint8](): true,
|
||||
reflect.TypeFor[uint16](): true,
|
||||
reflect.TypeFor[uint32](): true,
|
||||
reflect.TypeFor[uint64](): true,
|
||||
}
|
||||
|
||||
// Fuzz runs the fuzz function, ff, for fuzz testing. If ff fails for a set of
|
||||
|
|
@ -224,7 +224,7 @@ func (f *F) Fuzz(ff any) {
|
|||
if fnType.Kind() != reflect.Func {
|
||||
panic("testing: F.Fuzz must receive a function")
|
||||
}
|
||||
if fnType.NumIn() < 2 || fnType.In(0) != reflect.TypeOf((*T)(nil)) {
|
||||
if fnType.NumIn() < 2 || fnType.In(0) != reflect.TypeFor[*T]() {
|
||||
panic("testing: fuzz target must receive at least two arguments, where the first argument is a *T")
|
||||
}
|
||||
if fnType.NumOut() != 0 {
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) {
|
|||
// hint is used for shrinking as a function of indirection level so
|
||||
// that recursive data structures will terminate.
|
||||
func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value, ok bool) {
|
||||
if m, ok := reflect.Zero(t).Interface().(Generator); ok {
|
||||
if m, ok := reflect.TypeAssert[Generator](reflect.Zero(t)); ok {
|
||||
return m.Generate(rand, size), true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,9 +30,9 @@
|
|||
// import "testing"
|
||||
//
|
||||
// func TestAbs(t *testing.T) {
|
||||
// got := Abs(-1)
|
||||
// got := abs(-1)
|
||||
// if got != 1 {
|
||||
// t.Errorf("Abs(-1) = %d; want 1", got)
|
||||
// t.Errorf("abs(-1) = %d; want 1", got)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
|
|
|
|||
|
|
@ -1602,6 +1602,16 @@ func leadingFraction(s string) (x uint64, scale float64, rem string) {
|
|||
return x, scale, s[i:]
|
||||
}
|
||||
|
||||
// parseDurationError describes a problem parsing a duration string.
|
||||
type parseDurationError struct {
|
||||
message string
|
||||
value string
|
||||
}
|
||||
|
||||
func (e *parseDurationError) Error() string {
|
||||
return "time: " + e.message + " " + quote(e.value)
|
||||
}
|
||||
|
||||
var unitMap = map[string]uint64{
|
||||
"ns": uint64(Nanosecond),
|
||||
"us": uint64(Microsecond),
|
||||
|
|
@ -1637,7 +1647,7 @@ func ParseDuration(s string) (Duration, error) {
|
|||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
for s != "" {
|
||||
var (
|
||||
|
|
@ -1649,13 +1659,13 @@ func ParseDuration(s string) (Duration, error) {
|
|||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
v, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
|
|
@ -1669,7 +1679,7 @@ func ParseDuration(s string) (Duration, error) {
|
|||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
|
|
@ -1681,17 +1691,17 @@ func ParseDuration(s string) (Duration, error) {
|
|||
}
|
||||
}
|
||||
if i == 0 {
|
||||
return 0, errors.New("time: missing unit in duration " + quote(orig))
|
||||
return 0, &parseDurationError{"missing unit in duration", orig}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
|
||||
return 0, &parseDurationError{"unknown unit " + quote(u) + " in duration", orig}
|
||||
}
|
||||
if v > 1<<63/unit {
|
||||
// overflow
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
v *= unit
|
||||
if f > 0 {
|
||||
|
|
@ -1700,19 +1710,19 @@ func ParseDuration(s string) (Duration, error) {
|
|||
v += uint64(float64(f) * (float64(unit) / scale))
|
||||
if v > 1<<63 {
|
||||
// overflow
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
}
|
||||
d += v
|
||||
if d > 1<<63 {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
}
|
||||
if neg {
|
||||
return -Duration(d), nil
|
||||
}
|
||||
if d > 1<<63-1 {
|
||||
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||
return 0, &parseDurationError{"invalid duration", orig}
|
||||
}
|
||||
return Duration(d), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1620,6 +1620,13 @@ func BenchmarkParseDuration(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseDurationError(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ParseDuration("9223372036854775810ns") // overflow
|
||||
ParseDuration("9007199254.740993") // missing unit
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHour(b *testing.B) {
|
||||
t := Now()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
|
|
|||
|
|
@ -148,11 +148,13 @@ func lshConst64x2Add(x int64) int64 {
|
|||
}
|
||||
|
||||
func lshConst32x31Add(x int32) int32 {
|
||||
// loong64:-"SLL\t","MOVV\tR0"
|
||||
// riscv64:-"SLLI","MOV\t[$]0"
|
||||
return (x + x) << 31
|
||||
}
|
||||
|
||||
func lshConst64x63Add(x int64) int64 {
|
||||
// loong64:-"SLLV","MOVV\tR0"
|
||||
// riscv64:-"SLLI","MOV\t[$]0"
|
||||
return (x + x) << 63
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,3 +30,9 @@ func f6(p, q *T) {
|
|||
func f8(t *struct{ b [8]int }) struct{ b [8]int } {
|
||||
return *t // ERROR "removed nil check"
|
||||
}
|
||||
|
||||
// nil check is removed for pointer write (which involves a
|
||||
// write barrier).
|
||||
func f9(x **int, y *int) {
|
||||
*x = y // ERROR "removed nil check"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,3 +30,9 @@ func f6(p, q *T) {
|
|||
func f8(t *[8]int) [8]int {
|
||||
return *t // ERROR "generated nil check"
|
||||
}
|
||||
|
||||
// On AIX, a write nil check is removed, but a read nil check
|
||||
// remains (for the write barrier).
|
||||
func f9(x **int, y *int) {
|
||||
*x = y // ERROR "generated nil check" "removed nil check"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,3 +30,8 @@ func f6(p, q *T) {
|
|||
func f8(t *[8]int) [8]int {
|
||||
return *t // ERROR "generated nil check"
|
||||
}
|
||||
|
||||
// nil check is not removed on Wasm.
|
||||
func f9(x **int, y *int) {
|
||||
*x = y // ERROR "generated nil check"
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue