runtime/secret: implement new secret package

Implement secret.Do.

- When secret.Do returns:
  - Clear stack that is used by the argument function.
  - Clear all the registers that might contain secrets.
- On stack growth in secret mode, clear the old stack.
- When objects are allocated in secret mode, mark them and then zero
  the marked objects immediately when they are freed.
- If the argument function panics, raise that panic as if it originated
  from secret.Do. This removes anything about the secret function
  from tracebacks.

For now, this is only implemented on linux for arm64 and amd64.

This is a rebased version of Keith Randalls initial implementation at
CL 600635. I have added arm64 support, signal handling, preemption
handling and dealt with vDSOs spilling into system stacks.

Fixes #21865

Change-Id: I6fbd5a233beeaceb160785e0c0199a5c94d8e520
Co-authored-by: Keith Randall <khr@golang.org>
Reviewed-on: https://go-review.googlesource.com/c/go/+/704615
Reviewed-by: Roland Shoemaker <roland@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Filippo Valsorda <filippo@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
Daniel Morsing 2025-09-25 17:26:03 +01:00 committed by Gopher Robot
parent 0c747b7aa7
commit a3fb92a710
42 changed files with 3170 additions and 21 deletions

View file

@ -0,0 +1,20 @@
### New secret package
<!-- https://go.dev/issue/21865 --->
The new [secret](/pkg/runtime/secret) package is available as an experiment.
It provides a facility for securely erasing temporaries used in
code that manipulates secret information, typically cryptographic in nature.
Users can access it by passing `GOEXPERIMENT=runtimesecret` at build time.
<!-- if we land any code that uses runtimesecret for forward secrecy
like crypto/tls, mention them here too -->
The secret.Do function runs its function argument and then erases all
temporary storage (registers, stack, new heap allocations) used by
that function argument. Heap storage is not erased until that storage
is deemed unreachable by the garbage collector, which might take some
time after secret.Do completes.
This package is intended to make it easier to ensure [forward
secrecy](https://en.wikipedia.org/wiki/Forward_secrecy).

View file

@ -753,6 +753,15 @@ func (t *tester) registerTests() {
})
}
// Test GOEXPERIMENT=runtimesecret.
if !strings.Contains(goexperiment, "runtimesecret") {
t.registerTest("GOEXPERIMENT=runtimesecret go test runtime/secret/...", &goTest{
variant: "runtimesecret",
env: []string{"GOEXPERIMENT=runtimesecret"},
pkg: "runtime/secret/...",
})
}
// Test ios/amd64 for the iOS simulator.
if goos == "darwin" && goarch == "amd64" && t.cgoEnabled {
t.registerTest("GOOS=ios on darwin/amd64",

View file

@ -108,6 +108,7 @@ var depsRules = `
< internal/runtime/cgroup
< internal/runtime/gc/scan
< runtime
< runtime/secret
< sync/atomic
< internal/sync
< weak

View file

@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.runtimesecret
package goexperiment
const RuntimeSecret = false
const RuntimeSecretInt = 0

View file

@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.runtimesecret
package goexperiment
const RuntimeSecret = true
const RuntimeSecretInt = 1

View file

@ -125,4 +125,7 @@ type Flags struct {
// SIMD enables the simd package and the compiler's handling
// of SIMD intrinsics.
SIMD bool
// RuntimeSecret enables the runtime/secret package.
RuntimeSecret bool
}

View file

@ -171,6 +171,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator
{subBasicLit, "elemsize_", str(elemsize)},
{subBasicLit, "sizeclass_", str(sc)},
{subBasicLit, "noscanint_", str(noscan)},
{subBasicLit, "isTiny_", str(0)},
},
})
}
@ -198,6 +199,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator
{subBasicLit, "sizeclass_", str(tinySizeClass)},
{subBasicLit, "size_", str(s)},
{subBasicLit, "noscanint_", str(noscan)},
{subBasicLit, "isTiny_", str(1)},
},
})
}
@ -215,6 +217,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator
{subBasicLit, "elemsize_", str(elemsize)},
{subBasicLit, "sizeclass_", str(sc)},
{subBasicLit, "noscanint_", str(noscan)},
{subBasicLit, "isTiny_", str(0)},
},
})
}

View file

@ -456,6 +456,13 @@ TEXT gogo<>(SB), NOSPLIT, $0
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT, $0-8
#ifdef GOEXPERIMENT_runtimesecret
CMPL g_secret(R14), $0
JEQ nosecret
CALL ·secretEraseRegistersMcall(SB)
nosecret:
#endif
MOVQ AX, DX // DX = fn
// Save state in g->sched. The caller's SP and PC are restored by gogo to
@ -511,6 +518,17 @@ TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
// func systemstack(fn func())
TEXT runtime·systemstack(SB), NOSPLIT, $0-8
#ifdef GOEXPERIMENT_runtimesecret
// If in secret mode, erase registers on transition
// from G stack to M stack,
get_tls(CX)
MOVQ g(CX), AX
CMPL g_secret(AX), $0
JEQ nosecret
CALL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVQ fn+0(FP), DI // DI = fn
get_tls(CX)
MOVQ g(CX), AX // AX = g
@ -643,6 +661,18 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
MOVQ AX, (m_morebuf+gobuf_sp)(BX)
MOVQ DI, (m_morebuf+gobuf_g)(BX)
// If in secret mode, erase registers on transition
// from G stack to M stack,
#ifdef GOEXPERIMENT_runtimesecret
CMPL g_secret(DI), $0
JEQ nosecret
CALL ·secretEraseRegisters(SB)
get_tls(CX)
MOVQ g(CX), DI // DI = g
MOVQ g_m(DI), BX // BX = m
nosecret:
#endif
// Call newstack on m->g0's stack.
MOVQ m_g0(BX), BX
MOVQ BX, g(CX)
@ -917,11 +947,6 @@ TEXT ·asmcgocall_landingpad(SB),NOSPLIT,$0-0
// aligned appropriately for the gcc ABI.
// See cgocall.go for more details.
TEXT ·asmcgocall(SB),NOSPLIT,$0-20
MOVQ fn+0(FP), AX
MOVQ arg+8(FP), BX
MOVQ SP, DX
// Figure out if we need to switch to m->g0 stack.
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already. Or we might already
@ -938,6 +963,21 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
CMPQ DI, SI
JEQ nosave
// Running on a user G
// Figure out if we're running secret code and clear the registers
// so that the C code we're about to call doesn't spill confidential
// information into memory
#ifdef GOEXPERIMENT_runtimesecret
CMPL g_secret(DI), $0
JEQ nosecret
CALL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVQ fn+0(FP), AX
MOVQ arg+8(FP), BX
MOVQ SP, DX
// Switch to system stack.
// The original frame pointer is stored in BP,
// which is useful for stack unwinding.
@ -976,6 +1016,10 @@ nosave:
// but then the only path through this code would be a rare case on Solaris.
// Using this code for all "already on system stack" calls exercises it more,
// which should help keep it correct.
MOVQ fn+0(FP), AX
MOVQ arg+8(FP), BX
MOVQ SP, DX
SUBQ $16, SP
ANDQ $~15, SP
MOVQ $0, 8(SP) // where above code stores g, in case someone looks during debugging

View file

@ -300,6 +300,17 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
#ifdef GOEXPERIMENT_runtimesecret
MOVW g_secret(g), R26
CBZ R26, nosecret
// Use R26 as a secondary link register
// We purposefully don't erase it in secretEraseRegistersMcall
MOVD LR, R26
BL runtime·secretEraseRegistersMcall(SB)
MOVD R26, LR
nosecret:
#endif
MOVD R0, R26 // context
// Save caller state in g->sched
@ -340,6 +351,13 @@ TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
// func systemstack(fn func())
TEXT runtime·systemstack(SB), NOSPLIT, $0-8
#ifdef GOEXPERIMENT_runtimesecret
MOVW g_secret(g), R3
CBZ R3, nosecret
BL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVD fn+0(FP), R3 // R3 = fn
MOVD R3, R26 // context
MOVD g_m(g), R4 // R4 = m
@ -469,6 +487,16 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
MOVD R0, (m_morebuf+gobuf_sp)(R8) // f's caller's RSP
MOVD g, (m_morebuf+gobuf_g)(R8)
// If in secret mode, erase registers on transition
// from G stack to M stack,
#ifdef GOEXPERIMENT_runtimesecret
MOVW g_secret(g), R4
CBZ R4, nosecret
BL ·secretEraseRegisters(SB)
MOVD g_m(g), R8
nosecret:
#endif
// Call newstack on m->g0's stack.
MOVD m_g0(R8), g
BL runtime·save_g(SB)
@ -1143,12 +1171,7 @@ TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
// aligned appropriately for the gcc ABI.
// See cgocall.go for more details.
TEXT ·asmcgocall(SB),NOSPLIT,$0-20
MOVD fn+0(FP), R1
MOVD arg+8(FP), R0
MOVD RSP, R2 // save original stack pointer
CBZ g, nosave
MOVD g, R4
// Figure out if we need to switch to m->g0 stack.
// We get called to create new OS threads too, and those
@ -1162,6 +1185,23 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
CMP R3, g
BEQ nosave
// running on a user stack. Figure out if we're running
// secret code and clear our registers if so.
#ifdef GOEXPERIMENT_runtimesecret
MOVW g_secret(g), R5
CBZ R5, nosecret
BL ·secretEraseRegisters(SB)
// restore g0 back into R3
MOVD g_m(g), R3
MOVD m_g0(R3), R3
nosecret:
#endif
MOVD fn+0(FP), R1
MOVD arg+8(FP), R0
MOVD RSP, R2
MOVD g, R4
// Switch to system stack.
MOVD R0, R9 // gosave_systemstack_switch<> and save_g might clobber R0
BL gosave_systemstack_switch<>(SB)
@ -1208,7 +1248,10 @@ nosave:
// but then the only path through this code would be a rare case on Solaris.
// Using this code for all "already on system stack" calls exercises it more,
// which should help keep it correct.
MOVD RSP, R13
MOVD fn+0(FP), R1
MOVD arg+8(FP), R0
MOVD RSP, R2
MOVD R2, R13
SUB $16, R13
MOVD R13, RSP
MOVD $0, R4

View file

@ -1185,7 +1185,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
} else {
if size <= maxSmallSize-gc.MallocHeaderSize {
if typ == nil || !typ.Pointers() {
if size < maxTinySize {
// tiny allocations might be kept alive by other co-located values.
// Make sure secret allocations get zeroed by avoiding the tiny allocator
// See go.dev/issue/76356
gp := getg()
if size < maxTinySize && gp.secret == 0 {
x, elemsize = mallocgcTiny(size, typ)
} else {
x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
@ -1205,6 +1209,13 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Mark any object allocated while in secret mode as secret.
// This ensures we zero it immediately when freeing it.
addSecret(x)
}
// Notify sanitizers, if enabled.
if raceenabled {
racemalloc(x, size-asanRZ)

File diff suppressed because it is too large Load diff

View file

@ -22,6 +22,7 @@ package runtime
import (
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
)
@ -36,6 +37,7 @@ const elemsize_ = 8
const sizeclass_ = 0
const noscanint_ = 0
const size_ = 0
const isTiny_ = 0
func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
@ -55,6 +57,17 @@ func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// WARNING: mallocStub does not do any work for sanitizers so callers need
// to steer out of this codepath early if sanitizers are enabled.
func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// secret code, need to avoid the tiny allocator since it might keep
// co-located values alive longer and prevent timely zero-ing
//
// Call directly into the NoScan allocator.
// See go.dev/issue/76356
const isTiny = isTiny_ == 1
gp := getg()
if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
@ -82,6 +95,12 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Actually do the allocation.
x, elemsize := inlinedMalloc(size, typ, needzero)
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Mark any object allocated while in secret mode as secret.
// This ensures we zero it immediately when freeing it.
addSecret(x)
}
// Notify valgrind, if enabled.
// To allow the compiler to not know about valgrind, we do valgrind instrumentation
// unlike the other sanitizers.

View file

@ -838,6 +838,33 @@ func gcStart(trigger gcTrigger) {
// Accumulate fine-grained stopping time.
work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
if goexperiment.RuntimeSecret {
// The world is stopped. Every M is either parked
// or in a syscall, or running some non-go code which can't run in secret mode.
// To get to a parked or a syscall state
// they have to transition through a point where we erase any
// confidential information in the registers. Making them
// handle a signal now would clobber the signal stack
// with non-confidential information.
//
// TODO(dmo): this is linear with respect to the number of Ms.
// Investigate just how long this takes and whether we can somehow
// loop over just the Ms that have secret info on their signal stack,
// or cooperatively have the Ms send signals to themselves just
// after they erase their registers, but before they enter a syscall
for mp := allm; mp != nil; mp = mp.alllink {
// even through the world is stopped, the kernel can still
// invoke our signal handlers. No confidential information can be spilled
// (because it's been erased by this time), but we can avoid
// sending additional signals by atomically inspecting this variable
if atomic.Xchg(&mp.signalSecret, 0) != 0 {
noopSignal(mp)
}
// TODO: syncronize with the signal handler to ensure that the signal
// was actually delivered.
}
}
// Finish sweep before we start concurrent scan.
systemstack(func() {
finishsweep_m()

View file

@ -225,6 +225,7 @@ type mheap struct {
specialPinCounterAlloc fixalloc // allocator for specialPinCounter
specialWeakHandleAlloc fixalloc // allocator for specialWeakHandle
specialBubbleAlloc fixalloc // allocator for specialBubble
specialSecretAlloc fixalloc // allocator for specialSecret
speciallock mutex // lock for special record allocators.
arenaHintAlloc fixalloc // allocator for arenaHints
@ -803,6 +804,7 @@ func (h *mheap) init() {
h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
h.specialSecretAlloc.init(unsafe.Sizeof(specialSecret{}), nil, nil, &memstats.other_sys)
h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys)
h.specialBubbleAlloc.init(unsafe.Sizeof(specialBubble{}), nil, nil, &memstats.other_sys)
h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
@ -1970,6 +1972,9 @@ const (
_KindSpecialCheckFinalizer = 8
// _KindSpecialBubble is used to associate objects with synctest bubbles.
_KindSpecialBubble = 9
// _KindSpecialSecret is a special used to mark an object
// as needing zeroing immediately upon freeing.
_KindSpecialSecret = 10
)
type special struct {
@ -2822,6 +2827,11 @@ func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
lock(&mheap_.speciallock)
mheap_.specialBubbleAlloc.free(unsafe.Pointer(st))
unlock(&mheap_.speciallock)
case _KindSpecialSecret:
memclrNoHeapPointers(p, size)
lock(&mheap_.speciallock)
mheap_.specialSecretAlloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
default:
throw("bad special kind")
panic("not reached")

View file

@ -55,6 +55,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/stringslite"
)
@ -406,6 +407,22 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
return false, 0
}
// If we're in the middle of a secret computation, we can't
// allow any conservative scanning of stacks, as that may lead
// to secrets leaking out from the stack into work buffers.
// Additionally, the preemption code will store the
// machine state (including registers which may contain confidential
// information) into the preemption buffers.
//
// TODO(dmo): there's technically nothing stopping us from doing the
// preemption, granted that don't conservatively scan and we clean up after
// ourselves. This is made slightly harder by the xRegs cached allocations
// that can move between Gs and Ps. In any case, for the intended users (cryptography code)
// they are unlikely get stuck in unterminating loops.
if goexperiment.RuntimeSecret && gp.secret > 0 {
return false, 0
}
// Check if PC is an unsafe-point.
f := findfunc(pc)
if !f.valid() {

View file

@ -8,6 +8,7 @@ import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/exithook"
@ -4454,6 +4455,13 @@ func goexit1() {
// goexit continuation on g0.
func goexit0(gp *g) {
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Erase the whole stack. This path only occurs when
// runtime.Goexit is called from within a runtime/secret.Do call.
memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
// Since this is running on g0, our registers are already zeroed from going through
// mcall in secret mode.
}
gdestroy(gp)
schedule()
}
@ -4482,6 +4490,7 @@ func gdestroy(gp *g) {
gp.timer = nil
gp.bubble = nil
gp.fipsOnlyBypass = false
gp.secret = 0
if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
// Flush assist credit to the global pool. This gives
@ -5216,6 +5225,10 @@ func malg(stacksize int32) *g {
// The compiler turns a go statement into a call to this.
func newproc(fn *funcval) {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
panic("goroutine spawned while running in secret mode")
}
pc := sys.GetCallerPC()
systemstack(func() {
newg := newproc1(fn, gp, pc, false, waitReasonZero)

View file

@ -549,6 +549,7 @@ type g struct {
syncSafePoint bool // set if g is stopped at a synchronous safe point.
runningCleanups atomic.Bool
sig uint32
secret int32 // current nesting of runtime/secret.Do calls.
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
@ -620,14 +621,15 @@ type m struct {
// Fields whose offsets are not known to debuggers.
procid uint64 // for debuggers, but offset not hard-coded
gsignal *g // signal-handling g
goSigStack gsignalStack // Go-allocated signal handling stack
sigmask sigset // storage for saved signal mask
tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
mstartfn func()
curg *g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
procid uint64 // for debuggers, but offset not hard-coded
gsignal *g // signal-handling g
goSigStack gsignalStack // Go-allocated signal handling stack
sigmask sigset // storage for saved signal mask
tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
mstartfn func()
curg *g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
signalSecret uint32 // whether we have secret information in our signal stack
// p is the currently attached P for executing Go code, nil if not executing user Go code.
//

118
src/runtime/secret.go Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (amd64 || arm64) && linux
package runtime
import (
"internal/goarch"
"unsafe"
)
//go:linkname secret_count runtime/secret.count
func secret_count() int32 {
return getg().secret
}
//go:linkname secret_inc runtime/secret.inc
func secret_inc() {
gp := getg()
gp.secret++
}
//go:linkname secret_dec runtime/secret.dec
func secret_dec() {
gp := getg()
gp.secret--
}
//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets
func secret_eraseSecrets() {
// zero all the stack memory that might be dirtied with
// secrets. We do this from the systemstack so that we
// don't have to figure out which holes we have to keep
// to ensure that we can return from memclr. gp.sched will
// act as a pigeonhole for our actual return.
lo := getg().stack.lo
systemstack(func() {
// Note, this systemstack call happens within the secret mode,
// so we don't have to call out to erase our registers, the systemstack
// code will do that.
mp := acquirem()
sp := mp.curg.sched.sp
// we need to keep systemstack return on top of the stack being cleared
// for traceback
sp -= goarch.PtrSize
// TODO: keep some sort of low water mark so that we don't have
// to zero a potentially large stack if we used just a little
// bit of it. That will allow us to use a higher value for
// lo than gp.stack.lo.
memclrNoHeapPointers(unsafe.Pointer(lo), sp-lo)
releasem(mp)
})
// Don't put any code here: the stack frame's contents are gone!
}
// specialSecret tracks whether we need to zero an object immediately
// upon freeing.
type specialSecret struct {
special special
}
// addSecret records the fact that we need to zero p immediately
// when it is freed.
func addSecret(p unsafe.Pointer) {
// TODO(dmo): figure out the cost of these. These are mostly
// intended to catch allocations that happen via the runtime
// that the user has no control over and not big buffers that user
// code is allocating. The cost should be relatively low,
// but we have run into a wall with other special allocations before.
lock(&mheap_.speciallock)
s := (*specialSecret)(mheap_.specialSecretAlloc.alloc())
s.special.kind = _KindSpecialSecret
unlock(&mheap_.speciallock)
addspecial(p, &s.special, false)
}
// send a no-op signal to an M for the purposes of
// clobbering the signal stack
//
// Use sigpreempt. If we don't have a preemption queued, this just
// turns into a no-op
func noopSignal(mp *m) {
signalM(mp, sigPreempt)
}
// secret_getStack returns the memory range of the
// current goroutine's stack.
// For testing only.
// Note that this is kind of tricky, as the goroutine can
// be copied and/or exit before the result is used, at which
// point it may no longer be valid.
//
//go:linkname secret_getStack runtime/secret.getStack
func secret_getStack() (uintptr, uintptr) {
gp := getg()
return gp.stack.lo, gp.stack.hi
}
// return a slice of all Ms signal stacks
// For testing only.
//
//go:linkname secret_appendSignalStacks runtime/secret.appendSignalStacks
func secret_appendSignalStacks(sigstacks []stack) []stack {
// This is probably overkill, but it's what
// doAllThreadsSyscall does
stw := stopTheWorld(stwAllThreadsSyscall)
allocmLock.lock()
acquirem()
for mp := allm; mp != nil; mp = mp.alllink {
sigstacks = append(sigstacks, mp.gsignal.stack)
}
releasem(getg().m)
allocmLock.unlock()
startTheWorld(stw)
return sigstacks
}

View file

@ -0,0 +1,213 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: this assembly file is used for testing only.
// We need to access registers directly to properly test
// that secrets are erased and go test doesn't like to conditionally
// include assembly files.
// These functions defined in the package proper and we
// rely on the linker to prune these away in regular builds
#include "go_asm.h"
#include "funcdata.h"
TEXT ·loadRegisters(SB),0,$0-8
MOVQ p+0(FP), AX
MOVQ (AX), R10
MOVQ (AX), R11
MOVQ (AX), R12
MOVQ (AX), R13
MOVOU (AX), X1
MOVOU (AX), X2
MOVOU (AX), X3
MOVOU (AX), X4
CMPB internalcpu·X86+const_offsetX86HasAVX(SB), $1
JNE return
VMOVDQU (AX), Y5
VMOVDQU (AX), Y6
VMOVDQU (AX), Y7
VMOVDQU (AX), Y8
CMPB internalcpu·X86+const_offsetX86HasAVX512(SB), $1
JNE return
VMOVUPD (AX), Z14
VMOVUPD (AX), Z15
VMOVUPD (AX), Z16
VMOVUPD (AX), Z17
KMOVQ (AX), K2
KMOVQ (AX), K3
KMOVQ (AX), K4
KMOVQ (AX), K5
return:
RET
TEXT ·spillRegisters(SB),0,$0-16
MOVQ p+0(FP), AX
MOVQ AX, BX
MOVQ R10, (AX)
MOVQ R11, 8(AX)
MOVQ R12, 16(AX)
MOVQ R13, 24(AX)
ADDQ $32, AX
MOVOU X1, (AX)
MOVOU X2, 16(AX)
MOVOU X3, 32(AX)
MOVOU X4, 48(AX)
ADDQ $64, AX
CMPB internalcpu·X86+const_offsetX86HasAVX(SB), $1
JNE return
VMOVDQU Y5, (AX)
VMOVDQU Y6, 32(AX)
VMOVDQU Y7, 64(AX)
VMOVDQU Y8, 96(AX)
ADDQ $128, AX
CMPB internalcpu·X86+const_offsetX86HasAVX512(SB), $1
JNE return
VMOVUPD Z14, (AX)
ADDQ $64, AX
VMOVUPD Z15, (AX)
ADDQ $64, AX
VMOVUPD Z16, (AX)
ADDQ $64, AX
VMOVUPD Z17, (AX)
ADDQ $64, AX
KMOVQ K2, (AX)
ADDQ $8, AX
KMOVQ K3, (AX)
ADDQ $8, AX
KMOVQ K4, (AX)
ADDQ $8, AX
KMOVQ K5, (AX)
ADDQ $8, AX
return:
SUBQ BX, AX
MOVQ AX, ret+8(FP)
RET
TEXT ·useSecret(SB),0,$64-24
NO_LOCAL_POINTERS
// Load secret into AX
MOVQ secret_base+0(FP), AX
MOVQ (AX), AX
// Scatter secret all across registers.
// Increment low byte so we can tell which register
// a leaking secret came from.
ADDQ $2, AX // add 2 so Rn has secret #n.
MOVQ AX, BX
INCQ AX
MOVQ AX, CX
INCQ AX
MOVQ AX, DX
INCQ AX
MOVQ AX, SI
INCQ AX
MOVQ AX, DI
INCQ AX
MOVQ AX, BP
INCQ AX
MOVQ AX, R8
INCQ AX
MOVQ AX, R9
INCQ AX
MOVQ AX, R10
INCQ AX
MOVQ AX, R11
INCQ AX
MOVQ AX, R12
INCQ AX
MOVQ AX, R13
INCQ AX
MOVQ AX, R14
INCQ AX
MOVQ AX, R15
CMPB internalcpu·X86+const_offsetX86HasAVX512(SB), $1
JNE noavx512
VMOVUPD (SP), Z0
VMOVUPD (SP), Z1
VMOVUPD (SP), Z2
VMOVUPD (SP), Z3
VMOVUPD (SP), Z4
VMOVUPD (SP), Z5
VMOVUPD (SP), Z6
VMOVUPD (SP), Z7
VMOVUPD (SP), Z8
VMOVUPD (SP), Z9
VMOVUPD (SP), Z10
VMOVUPD (SP), Z11
VMOVUPD (SP), Z12
VMOVUPD (SP), Z13
VMOVUPD (SP), Z14
VMOVUPD (SP), Z15
VMOVUPD (SP), Z16
VMOVUPD (SP), Z17
VMOVUPD (SP), Z18
VMOVUPD (SP), Z19
VMOVUPD (SP), Z20
VMOVUPD (SP), Z21
VMOVUPD (SP), Z22
VMOVUPD (SP), Z23
VMOVUPD (SP), Z24
VMOVUPD (SP), Z25
VMOVUPD (SP), Z26
VMOVUPD (SP), Z27
VMOVUPD (SP), Z28
VMOVUPD (SP), Z29
VMOVUPD (SP), Z30
VMOVUPD (SP), Z31
noavx512:
MOVOU (SP), X0
MOVOU (SP), X1
MOVOU (SP), X2
MOVOU (SP), X3
MOVOU (SP), X4
MOVOU (SP), X5
MOVOU (SP), X6
MOVOU (SP), X7
MOVOU (SP), X8
MOVOU (SP), X9
MOVOU (SP), X10
MOVOU (SP), X11
MOVOU (SP), X12
MOVOU (SP), X13
MOVOU (SP), X14
MOVOU (SP), X15
// Put secret on the stack.
INCQ AX
MOVQ AX, (SP)
MOVQ AX, 8(SP)
MOVQ AX, 16(SP)
MOVQ AX, 24(SP)
MOVQ AX, 32(SP)
MOVQ AX, 40(SP)
MOVQ AX, 48(SP)
MOVQ AX, 56(SP)
// Delay a bit. This makes it more likely that
// we will be the target of a signal while
// registers contain secrets.
// It also tests the path from G stack to M stack
// to scheduler and back.
CALL ·delay(SB)
RET

View file

@ -0,0 +1,167 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: this assembly file is used for testing only.
// We need to access registers directly to properly test
// that secrets are erased and go test doesn't like to conditionally
// include assembly files.
// These functions defined in the package proper and we
// rely on the linker to prune these away in regular builds
#include "go_asm.h"
#include "funcdata.h"
TEXT ·loadRegisters(SB),0,$0-8
MOVD p+0(FP), R0
MOVD (R0), R10
MOVD (R0), R11
MOVD (R0), R12
MOVD (R0), R13
FMOVD (R0), F15
FMOVD (R0), F16
FMOVD (R0), F17
FMOVD (R0), F18
VLD1 (R0), [V20.B16]
VLD1 (R0), [V21.H8]
VLD1 (R0), [V22.S4]
VLD1 (R0), [V23.D2]
RET
TEXT ·spillRegisters(SB),0,$0-16
MOVD p+0(FP), R0
MOVD R0, R1
MOVD R10, (R0)
MOVD R11, 8(R0)
MOVD R12, 16(R0)
MOVD R13, 24(R0)
ADD $32, R0
FMOVD F15, (R0)
FMOVD F16, 16(R0)
FMOVD F17, 32(R0)
FMOVD F18, 64(R0)
ADD $64, R0
VST1.P [V20.B16], (R0)
VST1.P [V21.H8], (R0)
VST1.P [V22.S4], (R0)
VST1.P [V23.D2], (R0)
SUB R1, R0, R0
MOVD R0, ret+8(FP)
RET
TEXT ·useSecret(SB),0,$0-24
NO_LOCAL_POINTERS
// Load secret into R0
MOVD secret_base+0(FP), R0
MOVD (R0), R0
// Scatter secret across registers.
// Increment low byte so we can tell which register
// a leaking secret came from.
// TODO(dmo): more substantial dirtying here
ADD $1, R0
MOVD R0, R1
ADD $1, R0
MOVD R0, R2
ADD $1, R0
MOVD R0, R3
ADD $1, R0
MOVD R0, R4
ADD $1, R0
MOVD R0, R5
ADD $1, R0
MOVD R0, R6
ADD $1, R0
MOVD R0, R7
ADD $1, R0
MOVD R0, R8
ADD $1, R0
MOVD R0, R9
ADD $1, R0
MOVD R0, R10
ADD $1, R0
MOVD R0, R11
ADD $1, R0
MOVD R0, R12
ADD $1, R0
MOVD R0, R13
ADD $1, R0
MOVD R0, R14
ADD $1, R0
MOVD R0, R15
// Dirty the floating point registers
ADD $1, R0
FMOVD R0, F0
ADD $1, R0
FMOVD R0, F1
ADD $1, R0
FMOVD R0, F2
ADD $1, R0
FMOVD R0, F3
ADD $1, R0
FMOVD R0, F4
ADD $1, R0
FMOVD R0, F5
ADD $1, R0
FMOVD R0, F6
ADD $1, R0
FMOVD R0, F7
ADD $1, R0
FMOVD R0, F8
ADD $1, R0
FMOVD R0, F9
ADD $1, R0
FMOVD R0, F10
ADD $1, R0
FMOVD R0, F11
ADD $1, R0
FMOVD R0, F12
ADD $1, R0
FMOVD R0, F13
ADD $1, R0
FMOVD R0, F14
ADD $1, R0
FMOVD R0, F15
ADD $1, R0
FMOVD R0, F16
ADD $1, R0
FMOVD R0, F17
ADD $1, R0
FMOVD R0, F18
ADD $1, R0
FMOVD R0, F19
ADD $1, R0
FMOVD R0, F20
ADD $1, R0
FMOVD R0, F21
ADD $1, R0
FMOVD R0, F22
ADD $1, R0
FMOVD R0, F23
ADD $1, R0
FMOVD R0, F24
ADD $1, R0
FMOVD R0, F25
ADD $1, R0
FMOVD R0, F26
ADD $1, R0
FMOVD R0, F27
ADD $1, R0
FMOVD R0, F28
ADD $1, R0
FMOVD R0, F29
ADD $1, R0
FMOVD R0, F30
ADD $1, R0
FMOVD R0, F31
RET

View file

@ -0,0 +1,427 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.runtimesecret && linux
package secret
import (
"bytes"
"debug/elf"
"fmt"
"internal/testenv"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
)
// Copied from runtime/runtime-gdb_unix_test.go
func canGenerateCore(t *testing.T) bool {
// Ensure there is enough RLIMIT_CORE available to generate a full core.
var lim syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
if err != nil {
t.Fatalf("error getting rlimit: %v", err)
}
// Minimum RLIMIT_CORE max to allow. This is a conservative estimate.
// Most systems allow infinity.
const minRlimitCore = 100 << 20 // 100 MB
if lim.Max < minRlimitCore {
t.Skipf("RLIMIT_CORE max too low: %#+v", lim)
}
// Make sure core pattern will send core to the current directory.
b, err := os.ReadFile("/proc/sys/kernel/core_pattern")
if err != nil {
t.Fatalf("error reading core_pattern: %v", err)
}
if string(b) != "core\n" {
t.Skipf("Unexpected core pattern %q", string(b))
}
coreUsesPID := false
b, err = os.ReadFile("/proc/sys/kernel/core_uses_pid")
if err == nil {
switch string(bytes.TrimSpace(b)) {
case "0":
case "1":
coreUsesPID = true
default:
t.Skipf("unexpected core_uses_pid value %q", string(b))
}
}
return coreUsesPID
}
func TestCore(t *testing.T) {
// use secret, grab a coredump, rummage through
// it, trying to find our secret.
switch runtime.GOARCH {
case "amd64", "arm64":
default:
t.Skip("unsupported arch")
}
coreUsesPid := canGenerateCore(t)
// Build our crashing program
// Because we need assembly files to properly dirty our state
// we need to construct a package in our temporary directory.
tmpDir := t.TempDir()
// copy our base source
err := copyToDir("./testdata/crash.go", tmpDir, nil)
if err != nil {
t.Fatalf("error copying directory %v", err)
}
// Copy our testing assembly files. Use the ones from the package
// to assure that they are always in sync
err = copyToDir("./asm_amd64.s", tmpDir, nil)
if err != nil {
t.Fatalf("error copying file %v", err)
}
err = copyToDir("./asm_arm64.s", tmpDir, nil)
if err != nil {
t.Fatalf("error copying file %v", err)
}
err = copyToDir("./stubs.go", tmpDir, func(s string) string {
return strings.Replace(s, "package secret", "package main", 1)
})
if err != nil {
t.Fatalf("error copying file %v", err)
}
// the crashing package will live out of tree, so its source files
// cannot refer to our internal packages. However, the assembly files
// can refer to internal names and we can pass the missing offsets as
// a small generated file
offsets := `
package main
const (
offsetX86HasAVX = %v
offsetX86HasAVX512 = %v
)
`
err = os.WriteFile(filepath.Join(tmpDir, "offsets.go"), []byte(fmt.Sprintf(offsets, offsetX86HasAVX, offsetX86HasAVX512)), 0666)
if err != nil {
t.Fatalf("error writing offset file %v", err)
}
// generate go.mod file
cmd := exec.Command(testenv.GoToolPath(t), "mod", "init", "crashtest")
cmd.Dir = tmpDir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("error initing module %v\n%s", err, out)
}
cmd = exec.Command(testenv.GoToolPath(t), "build", "-o", filepath.Join(tmpDir, "a.exe"))
cmd.Dir = tmpDir
out, err = testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("error building source %v\n%s", err, out)
}
// Start the test binary.
cmd = testenv.CommandContext(t, t.Context(), "./a.exe")
cmd.Dir = tmpDir
var stdout strings.Builder
cmd.Stdout = &stdout
cmd.Stderr = &stdout
err = cmd.Run()
// For debugging.
t.Logf("\n\n\n--- START SUBPROCESS ---\n\n\n%s\n\n--- END SUBPROCESS ---\n\n\n", stdout.String())
if err == nil {
t.Fatalf("test binary did not crash")
}
eErr, ok := err.(*exec.ExitError)
if !ok {
t.Fatalf("error is not exit error: %v", err)
}
if eErr.Exited() {
t.Fatalf("process exited instead of being terminated: %v", eErr)
}
rummage(t, tmpDir, eErr.Pid(), coreUsesPid)
}
func copyToDir(name string, dir string, replace func(string) string) error {
f, err := os.ReadFile(name)
if err != nil {
return err
}
if replace != nil {
f = []byte(replace(string(f)))
}
return os.WriteFile(filepath.Join(dir, filepath.Base(name)), f, 0666)
}
type violation struct {
id byte // secret ID
off uint64 // offset in core dump
}
// A secret value that should never appear in a core dump,
// except for this global variable itself.
// The first byte of the secret is variable, to track
// different instances of it.
//
// If this value is changed, update ./internal/crashsecret/main.go
// TODO: this is little-endian specific.
var secretStore = [8]byte{
0x00,
0x81,
0xa0,
0xc6,
0xb3,
0x01,
0x66,
0x53,
}
func rummage(t *testing.T, tmpDir string, pid int, coreUsesPid bool) {
coreFileName := "core"
if coreUsesPid {
coreFileName += fmt.Sprintf(".%d", pid)
}
core, err := os.Open(filepath.Join(tmpDir, coreFileName))
if err != nil {
t.Fatalf("core file not found: %v", err)
}
b, err := io.ReadAll(core)
if err != nil {
t.Fatalf("can't read core file: %v", err)
}
// Open elf view onto core file.
coreElf, err := elf.NewFile(core)
if err != nil {
t.Fatalf("can't parse core file: %v", err)
}
// Look for any places that have the secret.
var violations []violation // core file offsets where we found a secret
i := 0
for {
j := bytes.Index(b[i:], secretStore[1:])
if j < 0 {
break
}
j--
i += j
t.Errorf("secret %d found at offset %x in core file", b[i], i)
violations = append(violations, violation{
id: b[i],
off: uint64(i),
})
i += len(secretStore)
}
// Get more specific data about where in the core we found the secrets.
regions := elfRegions(t, core, coreElf)
for _, r := range regions {
for _, v := range violations {
if v.off >= r.min && v.off < r.max {
var addr string
if r.addrMin != 0 {
addr = fmt.Sprintf(" addr=%x", r.addrMin+(v.off-r.min))
}
t.Logf("additional info: secret %d at offset %x in %s%s", v.id, v.off-r.min, r.name, addr)
}
}
}
}
type elfRegion struct {
name string
min, max uint64 // core file offset range
addrMin, addrMax uint64 // inferior address range (or 0,0 if no address, like registers)
}
func elfRegions(t *testing.T, core *os.File, coreElf *elf.File) []elfRegion {
var regions []elfRegion
for _, p := range coreElf.Progs {
regions = append(regions, elfRegion{
name: fmt.Sprintf("%s[%s]", p.Type, p.Flags),
min: p.Off,
max: p.Off + min(p.Filesz, p.Memsz),
addrMin: p.Vaddr,
addrMax: p.Vaddr + min(p.Filesz, p.Memsz),
})
}
// TODO(dmo): parse thread regions for arm64.
// This doesn't invalidate the test, it just makes it harder to figure
// out where we're leaking stuff.
if runtime.GOARCH == "amd64" {
regions = append(regions, threadRegions(t, core, coreElf)...)
}
for i, r1 := range regions {
for j, r2 := range regions {
if i == j {
continue
}
if r1.max <= r2.min || r2.max <= r1.min {
continue
}
t.Fatalf("overlapping regions %v %v", r1, r2)
}
}
return regions
}
func threadRegions(t *testing.T, core *os.File, coreElf *elf.File) []elfRegion {
var regions []elfRegion
for _, prog := range coreElf.Progs {
if prog.Type != elf.PT_NOTE {
continue
}
b := make([]byte, prog.Filesz)
_, err := core.ReadAt(b, int64(prog.Off))
if err != nil {
t.Fatalf("can't read core file %v", err)
}
prefix := "unk"
b0 := b
for len(b) > 0 {
namesz := coreElf.ByteOrder.Uint32(b)
b = b[4:]
descsz := coreElf.ByteOrder.Uint32(b)
b = b[4:]
typ := elf.NType(coreElf.ByteOrder.Uint32(b))
b = b[4:]
name := string(b[:namesz-1])
b = b[(namesz+3)/4*4:]
off := prog.Off + uint64(len(b0)-len(b))
desc := b[:descsz]
b = b[(descsz+3)/4*4:]
if name != "CORE" && name != "LINUX" {
continue
}
end := off + uint64(len(desc))
// Note: amd64 specific
// See /usr/include/x86_64-linux-gnu/bits/sigcontext.h
//
// struct _fpstate
switch typ {
case elf.NT_PRSTATUS:
pid := coreElf.ByteOrder.Uint32(desc[32:36])
prefix = fmt.Sprintf("thread%d: ", pid)
regions = append(regions, elfRegion{
name: prefix + "prstatus header",
min: off,
max: off + 112,
})
off += 112
greg := []string{
"r15",
"r14",
"r13",
"r12",
"rbp",
"rbx",
"r11",
"r10",
"r9",
"r8",
"rax",
"rcx",
"rdx",
"rsi",
"rdi",
"orig_rax",
"rip",
"cs",
"eflags",
"rsp",
"ss",
"fs_base",
"gs_base",
"ds",
"es",
"fs",
"gs",
}
for _, r := range greg {
regions = append(regions, elfRegion{
name: prefix + r,
min: off,
max: off + 8,
})
off += 8
}
regions = append(regions, elfRegion{
name: prefix + "prstatus footer",
min: off,
max: off + 8,
})
off += 8
case elf.NT_FPREGSET:
regions = append(regions, elfRegion{
name: prefix + "fpregset header",
min: off,
max: off + 32,
})
off += 32
for i := 0; i < 8; i++ {
regions = append(regions, elfRegion{
name: prefix + fmt.Sprintf("mmx%d", i),
min: off,
max: off + 16,
})
off += 16
// They are long double (10 bytes), but
// stored in 16-byte slots.
}
for i := 0; i < 16; i++ {
regions = append(regions, elfRegion{
name: prefix + fmt.Sprintf("xmm%d", i),
min: off,
max: off + 16,
})
off += 16
}
regions = append(regions, elfRegion{
name: prefix + "fpregset footer",
min: off,
max: off + 96,
})
off += 96
/*
case NT_X86_XSTATE: // aka NT_PRPSINFO+511
// legacy: 512 bytes
// xsave header: 64 bytes
fmt.Printf("hdr %v\n", desc[512:][:64])
// ymm high128: 256 bytes
println(len(desc))
fallthrough
*/
default:
regions = append(regions, elfRegion{
name: fmt.Sprintf("%s/%s", name, typ),
min: off,
max: off + uint64(len(desc)),
})
off += uint64(len(desc))
}
if off != end {
t.Fatalf("note section incomplete")
}
}
}
return regions
}

View file

@ -0,0 +1,16 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package secret
import (
"internal/cpu"
"unsafe"
)
// exports for assembly testing functions
const (
offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX)
offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512)
)

View file

@ -0,0 +1,128 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.runtimesecret
package secret
import (
"runtime"
_ "unsafe"
)
// Do invokes f.
//
// Do ensures that any temporary storage used by f is erased in a
// timely manner. (In this context, "f" is shorthand for the
// entire call tree initiated by f.)
// - Any registers used by f are erased before Do returns.
// - Any stack used by f is erased before Do returns.
// - Any heap allocation done by f is erased as soon as the garbage
// collector realizes that it is no longer reachable.
// - Do works even if f panics or calls runtime.Goexit. As part of
// that, any panic raised by f will appear as if it originates from
// Do itself.
//
// Limitations:
// - Currently only supported on linux/amd64 and linux/arm64. On unsupported
// platforms, Do will invoke f directly.
// - Protection does not extend to any global variables written by f.
// - Any attempt to launch a goroutine by f will result in a panic.
// - If f calls runtime.Goexit, erasure can be delayed by defers
// higher up on the call stack.
// - Heap allocations will only be erased if the program drops all
// references to those allocations, and then the garbage collector
// notices that those references are gone. The former is under
// control of the program, but the latter is at the whim of the
// runtime.
// - Any value panicked by f may point to allocations from within
// f. Those allocations will not be erased until (at least) the
// panicked value is dead.
// - Pointer addresses may leak into data buffers used by the runtime
// to perform garbage collection. Users should not encode confidential
// information into pointers. For example, if an offset into an array or
// struct is confidential, then users should not create a pointer into
// the object. Since this function is intended to be used with constant-time
// cryptographic code, this requirement is usually fulfilled implicitly.
func Do(f func()) {
const osArch = runtime.GOOS + "/" + runtime.GOARCH
switch osArch {
default:
// unsupported, just invoke f directly.
f()
return
case "linux/amd64", "linux/arm64":
}
// Place to store any panic value.
var p any
// Step 1: increment the nesting count.
inc()
// Step 2: call helper. The helper just calls f
// and captures (recovers) any panic result.
p = doHelper(f)
// Step 3: erase everything used by f (stack, registers).
eraseSecrets()
// Step 4: decrement the nesting count.
dec()
// Step 5: re-raise any caught panic.
// This will make the panic appear to come
// from a stack whose bottom frame is
// runtime/secret.Do.
// Anything below that to do with f will be gone.
//
// Note that the panic value is not erased. It behaves
// like any other value that escapes from f. If it is
// heap allocated, it will be erased when the garbage
// collector notices it is no longer referenced.
if p != nil {
panic(p)
}
// Note: if f calls runtime.Goexit, step 3 and above will not
// happen, as Goexit is unrecoverable. We handle that case in
// runtime/proc.go:goexit0.
}
func doHelper(f func()) (p any) {
// Step 2b: Pop the stack up to the secret.doHelper frame
// if we are in the process of panicking.
// (It is a no-op if we are not panicking.)
// We return any panicked value to secret.Do, who will
// re-panic it.
defer func() {
// Note: we rely on the go1.21+ behavior that
// if we are panicking, recover returns non-nil.
p = recover()
}()
// Step 2a: call the secret function.
f()
return
}
// Enabled reports whether [Do] appears anywhere on the call stack.
func Enabled() bool {
return count() > 0
}
// implemented in runtime
//go:linkname count
func count() int32
//go:linkname inc
func inc()
//go:linkname dec
func dec()
//go:linkname eraseSecrets
func eraseSecrets()

View file

@ -0,0 +1,293 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// the race detector does not like our pointer shenanigans
// while checking the stack.
//go:build goexperiment.runtimesecret && (arm64 || amd64) && linux && !race
package secret
import (
"runtime"
"strings"
"testing"
"time"
"unsafe"
)
type secretType int64
const secretValue = 0x53c237_53c237
// S is a type that might have some secrets in it.
type S [100]secretType
// makeS makes an S with secrets in it.
//
//go:noinline
func makeS() S {
// Note: noinline ensures this doesn't get inlined and
// completely optimized away.
var s S
for i := range s {
s[i] = secretValue
}
return s
}
// heapS allocates an S on the heap with secrets in it.
//
//go:noinline
func heapS() *S {
// Note: noinline forces heap allocation
s := makeS()
return &s
}
// for the tiny allocator
//
//go:noinline
func heapSTiny() *secretType {
s := new(secretType(secretValue))
return s
}
// Test that when we allocate inside secret.Do, the resulting
// allocations are zeroed by the garbage collector when they
// are freed.
// See runtime/mheap.go:freeSpecial.
func TestHeap(t *testing.T) {
var u uintptr
Do(func() {
u = uintptr(unsafe.Pointer(heapS()))
})
runtime.GC()
// Check that object got zeroed.
checkRangeForSecret(t, u, u+unsafe.Sizeof(S{}))
// Also check our stack, just because we can.
checkStackForSecret(t)
}
func TestHeapTiny(t *testing.T) {
var u uintptr
Do(func() {
u = uintptr(unsafe.Pointer(heapSTiny()))
})
runtime.GC()
// Check that object got zeroed.
checkRangeForSecret(t, u, u+unsafe.Sizeof(secretType(0)))
// Also check our stack, just because we can.
checkStackForSecret(t)
}
// Test that when we return from secret.Do, we zero the stack used
// by the argument to secret.Do.
// See runtime/secret.go:secret_dec.
func TestStack(t *testing.T) {
checkStackForSecret(t) // if this fails, something is wrong with the test
Do(func() {
s := makeS()
use(&s)
})
checkStackForSecret(t)
}
//go:noinline
func use(s *S) {
// Note: noinline prevents dead variable elimination.
}
// Test that when we copy a stack, we zero the old one.
// See runtime/stack.go:copystack.
func TestStackCopy(t *testing.T) {
checkStackForSecret(t) // if this fails, something is wrong with the test
var lo, hi uintptr
Do(func() {
// Put some secrets on the current stack frame.
s := makeS()
use(&s)
// Remember the current stack.
lo, hi = getStack()
// Use a lot more stack to force a stack copy.
growStack()
})
checkRangeForSecret(t, lo, hi) // pre-grow stack
checkStackForSecret(t) // post-grow stack (just because we can)
}
func growStack() {
growStack1(1000)
}
func growStack1(n int) {
if n == 0 {
return
}
growStack1(n - 1)
}
func TestPanic(t *testing.T) {
checkStackForSecret(t) // if this fails, something is wrong with the test
defer func() {
checkStackForSecret(t)
p := recover()
if p == nil {
t.Errorf("panic squashed")
return
}
var e error
var ok bool
if e, ok = p.(error); !ok {
t.Errorf("panic not an error")
}
if !strings.Contains(e.Error(), "divide by zero") {
t.Errorf("panic not a divide by zero error: %s", e.Error())
}
var pcs [10]uintptr
n := runtime.Callers(0, pcs[:])
frames := runtime.CallersFrames(pcs[:n])
for {
frame, more := frames.Next()
if strings.Contains(frame.Function, "dividePanic") {
t.Errorf("secret function in traceback")
}
if !more {
break
}
}
}()
Do(dividePanic)
}
func dividePanic() {
s := makeS()
use(&s)
_ = 8 / zero
}
var zero int
func TestGoExit(t *testing.T) {
checkStackForSecret(t) // if this fails, something is wrong with the test
c := make(chan uintptr, 2)
go func() {
// Run the test in a separate goroutine
defer func() {
// Tell original goroutine what our stack is
// so it can check it for secrets.
lo, hi := getStack()
c <- lo
c <- hi
}()
Do(func() {
s := makeS()
use(&s)
// there's an entire round-trip through the scheduler between here
// and when we are able to check if the registers are still dirtied, and we're
// not guaranteed to run on the same M. Make a best effort attempt anyway
loadRegisters(unsafe.Pointer(&s))
runtime.Goexit()
})
t.Errorf("goexit didn't happen")
}()
lo := <-c
hi := <-c
// We want to wait until the other goroutine has finished Goexiting and
// cleared its stack. There's no signal for that, so just wait a bit.
time.Sleep(1 * time.Millisecond)
checkRangeForSecret(t, lo, hi)
var spillArea [64]secretType
n := spillRegisters(unsafe.Pointer(&spillArea))
if n > unsafe.Sizeof(spillArea) {
t.Fatalf("spill area overrun %d\n", n)
}
for i, v := range spillArea {
if v == secretValue {
t.Errorf("secret found in spill slot %d", i)
}
}
}
func checkStackForSecret(t *testing.T) {
t.Helper()
lo, hi := getStack()
checkRangeForSecret(t, lo, hi)
}
func checkRangeForSecret(t *testing.T, lo, hi uintptr) {
t.Helper()
for p := lo; p < hi; p += unsafe.Sizeof(secretType(0)) {
v := *(*secretType)(unsafe.Pointer(p))
if v == secretValue {
t.Errorf("secret found in [%x,%x] at %x", lo, hi, p)
}
}
}
func TestRegisters(t *testing.T) {
Do(func() {
s := makeS()
loadRegisters(unsafe.Pointer(&s))
})
var spillArea [64]secretType
n := spillRegisters(unsafe.Pointer(&spillArea))
if n > unsafe.Sizeof(spillArea) {
t.Fatalf("spill area overrun %d\n", n)
}
for i, v := range spillArea {
if v == secretValue {
t.Errorf("secret found in spill slot %d", i)
}
}
}
func TestSignalStacks(t *testing.T) {
Do(func() {
s := makeS()
loadRegisters(unsafe.Pointer(&s))
// cause a signal with our secret state to dirty
// at least one of the signal stacks
func() {
defer func() {
x := recover()
if x == nil {
panic("did not get panic")
}
}()
var p *int
*p = 20
}()
})
// signal stacks aren't cleared until after
// the next GC after secret.Do returns
runtime.GC()
stk := make([]stack, 0, 100)
stk = appendSignalStacks(stk)
for _, s := range stk {
checkRangeForSecret(t, s.lo, s.hi)
}
}
// hooks into the runtime
func getStack() (uintptr, uintptr)
// Stack is a copy of runtime.stack for testing export.
// Fields must match.
type stack struct {
lo uintptr
hi uintptr
}
func appendSignalStacks([]stack) []stack

View file

@ -0,0 +1,32 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build arm64 || amd64
// testing stubs, these are implemented in assembly in
// asm_$GOARCH.s
//
// Note that this file is also used as a template to build a
// crashing binary that tries to leave secrets in places where
// they are supposed to be erased. see crash_test.go for more info
package secret
import "unsafe"
// Load data from p into test registers.
//
//go:noescape
func loadRegisters(p unsafe.Pointer)
// Spill data from test registers into p.
// Returns the amount of space filled in.
//
//go:noescape
func spillRegisters(p unsafe.Pointer) uintptr
// Load secret into all registers.
//
//go:noescape
func useSecret(secret []byte)

View file

@ -0,0 +1,13 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !arm64 && !amd64
package secret
import "unsafe"
func loadRegisters(p unsafe.Pointer) {}
func spillRegisters(p unsafe.Pointer) uintptr { return 0 }
func useSecret(secret []byte) {}

142
src/runtime/secret/testdata/crash.go vendored Normal file
View file

@ -0,0 +1,142 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"os"
"runtime"
"runtime/debug"
"runtime/secret"
"sync"
"syscall"
"time"
_ "unsafe"
"weak"
)
// callback from assembly
//
//go:linkname delay main.delay
func delay() {
time.Sleep(1 * time.Millisecond)
}
// Same secret as in ../../crash_test.go
var secretStore = [8]byte{
0x00,
0x81,
0xa0,
0xc6,
0xb3,
0x01,
0x66,
0x53,
}
func main() {
enableCore()
useSecretProc()
// clear out secret. That way we don't have
// to figure out which secret is the allowed
// source
clear(secretStore[:])
panic("terminate")
}
// Copied from runtime/runtime-gdb_unix_test.go
func enableCore() {
debug.SetTraceback("crash")
var lim syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
if err != nil {
panic(fmt.Sprintf("error getting rlimit: %v", err))
}
lim.Cur = lim.Max
fmt.Fprintf(os.Stderr, "Setting RLIMIT_CORE = %+#v\n", lim)
err = syscall.Setrlimit(syscall.RLIMIT_CORE, &lim)
if err != nil {
panic(fmt.Sprintf("error setting rlimit: %v", err))
}
}
// useSecretProc does 5 seconds of work, using the secret value
// inside secret.Do in a bunch of ways.
func useSecretProc() {
stop := make(chan bool)
var wg sync.WaitGroup
for i := 0; i < 4; i++ {
wg.Add(1)
go func() {
time.Sleep(1 * time.Second)
for {
select {
case <-stop:
wg.Done()
return
default:
secret.Do(func() {
// Copy key into a variable-sized heap allocation.
// This both puts secrets in heap objects,
// and more generally just causes allocation,
// which forces garbage collection, which
// requires interrupts and the like.
s := bytes.Repeat(secretStore[:], 1+i*2)
// Also spam the secret across all registers.
useSecret(s)
})
}
}
}()
}
// Send some allocations over a channel. This does 2 things:
// 1) forces some GCs to happen
// 2) causes more scheduling noise (Gs moving between Ms, etc.)
c := make(chan []byte)
wg.Add(2)
go func() {
for {
select {
case <-stop:
wg.Done()
return
case c <- make([]byte, 256):
}
}
}()
go func() {
for {
select {
case <-stop:
wg.Done()
return
case <-c:
}
}
}()
time.Sleep(5 * time.Second)
close(stop)
wg.Wait()
// use a weak reference for ensuring that the GC has cleared everything
// Use a large value to avoid the tiny allocator.
w := weak.Make(new([2048]byte))
// 20 seems like a decent amount?
for i := 0; i < 20; i++ {
runtime.GC() // GC should clear any secret heap objects and clear out scheduling buffers.
if w.Value() == nil {
fmt.Fprintf(os.Stderr, "number of GCs %v\n", i+1)
return
}
}
fmt.Fprintf(os.Stderr, "GC didn't clear out in time\n")
// This will cause the core dump to happen with the sentinel value still in memory
// so we will detect the fault.
panic("fault")
}

107
src/runtime/secret_amd64.s Normal file
View file

@ -0,0 +1,107 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "textflag.h"
#include "funcdata.h"
// TODO(dmo): generate these with mkpreempt.go, the register sets
// are tightly coupled and this will ensure that we keep them
// all synchronized
// secretEraseRegisters erases any register that may
// have been used with user code within a secret.Do function.
// This is roughly the general purpose and floating point
// registers, barring any reserved registers and registers generally
// considered architectural (amd64 segment registers, arm64 exception registers)
TEXT ·secretEraseRegisters(SB),NOFRAME|NOSPLIT,$0-0
XORL AX, AX
JMP ·secretEraseRegistersMcall(SB)
// Mcall requires an argument in AX. This function
// excludes that register from being cleared
TEXT ·secretEraseRegistersMcall(SB),NOSPLIT|NOFRAME,$0-0
// integer registers
XORL BX, BX
XORL CX, CX
XORL DX, DX
XORL DI, DI
XORL SI, SI
// BP = frame pointer
// SP = stack pointer
XORL R8, R8
XORL R9, R9
XORL R10, R10
XORL R11, R11
XORL R12, R12
XORL R13, R13
// R14 = G register
XORL R15, R15
// floating-point registers
CMPB internalcpu·X86+const_offsetX86HasAVX(SB), $1
JEQ avx
PXOR X0, X0
PXOR X1, X1
PXOR X2, X2
PXOR X3, X3
PXOR X4, X4
PXOR X5, X5
PXOR X6, X6
PXOR X7, X7
PXOR X8, X8
PXOR X9, X9
PXOR X10, X10
PXOR X11, X11
PXOR X12, X12
PXOR X13, X13
PXOR X14, X14
PXOR X15, X15
JMP noavx512
avx:
// VZEROALL zeroes all of the X0-X15 registers, no matter how wide.
// That includes Y0-Y15 (256-bit avx) and Z0-Z15 (512-bit avx512).
VZEROALL
// Clear all the avx512 state.
CMPB internalcpu·X86+const_offsetX86HasAVX512(SB), $1
JNE noavx512
// Zero X16-X31
// Note that VZEROALL above already cleared Z0-Z15.
VMOVAPD Z0, Z16
VMOVAPD Z0, Z17
VMOVAPD Z0, Z18
VMOVAPD Z0, Z19
VMOVAPD Z0, Z20
VMOVAPD Z0, Z21
VMOVAPD Z0, Z22
VMOVAPD Z0, Z23
VMOVAPD Z0, Z24
VMOVAPD Z0, Z25
VMOVAPD Z0, Z26
VMOVAPD Z0, Z27
VMOVAPD Z0, Z28
VMOVAPD Z0, Z29
VMOVAPD Z0, Z30
VMOVAPD Z0, Z31
// Zero k0-k7
KXORQ K0, K0, K0
KXORQ K0, K0, K1
KXORQ K0, K0, K2
KXORQ K0, K0, K3
KXORQ K0, K0, K4
KXORQ K0, K0, K5
KXORQ K0, K0, K6
KXORQ K0, K0, K7
noavx512:
// misc registers
CMPL BX, BX //eflags
// segment registers? Direction flag? Both seem overkill.
RET

View file

@ -0,0 +1,90 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "textflag.h"
#include "funcdata.h"
TEXT ·secretEraseRegisters(SB),NOFRAME|NOSPLIT,$0-0
MOVD ZR, R0
MOVD ZR, R26
JMP ·secretEraseRegistersMcall(SB)
// Mcall requires an argument in R0 and does not have a
// stack frame to spill into. Additionally, there is no stack
// to spill the link register into. This function deliberately
// doesn't clear R0 and R26, and Mcall uses R26 as a link register.
TEXT ·secretEraseRegistersMcall(SB),NOFRAME|NOSPLIT,$0-0
// integer registers
MOVD ZR, R1
MOVD ZR, R2
MOVD ZR, R3
MOVD ZR, R4
MOVD ZR, R5
MOVD ZR, R6
MOVD ZR, R7
MOVD ZR, R8
MOVD ZR, R9
MOVD ZR, R10
MOVD ZR, R11
MOVD ZR, R12
MOVD ZR, R13
MOVD ZR, R14
MOVD ZR, R15
MOVD ZR, R16
MOVD ZR, R17
// R18 = platform register
MOVD ZR, R19
MOVD ZR, R20
MOVD ZR, R21
MOVD ZR, R22
MOVD ZR, R23
MOVD ZR, R24
MOVD ZR, R25
// R26 used for extra link register in mcall where we can't spill
MOVD ZR, R27
// R28 = g
// R29 = frame pointer
// R30 = link pointer (return address)
// R31 = stack pointer
// floating point registers
// (also clears simd registers)
FMOVD ZR, F0
FMOVD ZR, F1
FMOVD ZR, F2
FMOVD ZR, F3
FMOVD ZR, F4
FMOVD ZR, F5
FMOVD ZR, F6
FMOVD ZR, F7
FMOVD ZR, F8
FMOVD ZR, F9
FMOVD ZR, F10
FMOVD ZR, F11
FMOVD ZR, F12
FMOVD ZR, F13
FMOVD ZR, F14
FMOVD ZR, F15
FMOVD ZR, F16
FMOVD ZR, F17
FMOVD ZR, F18
FMOVD ZR, F19
FMOVD ZR, F20
FMOVD ZR, F21
FMOVD ZR, F22
FMOVD ZR, F23
FMOVD ZR, F24
FMOVD ZR, F25
FMOVD ZR, F26
FMOVD ZR, F27
FMOVD ZR, F28
FMOVD ZR, F29
FMOVD ZR, F30
FMOVD ZR, F31
// misc registers
CMP ZR, ZR // N,Z,C,V flags
RET

View file

@ -0,0 +1,9 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build arm64 || amd64
package runtime
func secretEraseRegisters()

View file

@ -0,0 +1,11 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !arm64 && !amd64
package runtime
func secretEraseRegisters() {
throw("runtime/secret.Do not supported yet")
}

View file

@ -0,0 +1,32 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !(amd64 || arm64) || !linux
package runtime
import "unsafe"
// Stubs for platforms that do not implement runtime/secret
//go:linkname secret_count runtime/secret.count
func secret_count() int32 { return 0 }
//go:linkname secret_inc runtime/secret.inc
func secret_inc() {}
//go:linkname secret_dec runtime/secret.dec
func secret_dec() {}
//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets
func secret_eraseSecrets() {}
func addSecret(p unsafe.Pointer) {}
type specialSecret struct{}
//go:linkname secret_getStack runtime/secret.getStack
func secret_getStack() (uintptr, uintptr) { return 0, 0 }
func noopSignal(mp *m) {}

View file

@ -54,3 +54,31 @@ func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
// dumpSigStack prints a signal stack with the context, fpstate pointer field within that context and
// the beginning of the fpstate annotated by C/F/S respectively
func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) {
println(s)
println("SP:\t", hex(sp))
println("ctx:\t", hex(ctx))
fpfield := ctx + unsafe.Offsetof(ucontext{}.uc_mcontext) + unsafe.Offsetof(mcontext{}.fpregs)
println("fpfield:\t", hex(fpfield))
fpbegin := uintptr(unsafe.Pointer((&sigctxt{nil, unsafe.Pointer(ctx)}).regs().fpstate))
println("fpstate:\t", hex(fpbegin))
hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) {
switch p {
case ctx:
hm.start()
print("C")
println()
case fpfield:
hm.start()
print("F")
println()
case fpbegin:
hm.start()
print("S")
println()
}
})
}

View file

@ -69,3 +69,22 @@ func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
func (c *sigctxt) set_sigaddr(x uint64) {
*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) {
println(s)
println("SP:\t", hex(sp))
println("ctx:\t", hex(ctx))
entriesStart := uintptr(unsafe.Pointer(&(*ucontext)(unsafe.Pointer(ctx)).uc_mcontext.__reserved))
hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) {
switch p {
case ctx:
hm.start()
print("C")
println()
case entriesStart:
hm.start()
print("E")
println()
}
})
}

View file

@ -8,6 +8,7 @@ package runtime
import (
"internal/abi"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
@ -488,6 +489,11 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
c.fixsigcode(sig)
sighandler(sig, info, ctx, gp)
if goexperiment.RuntimeSecret && gp.secret > 0 {
atomic.Store(&gp.m.signalSecret, 1)
}
setg(gp)
if setStack {
restoreGsignalStack(&gsignalStack)

View file

@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing
{runtime.G{}, 284 + xreg, 448 + xreg}, // g, but exported for testing
{runtime.Sudog{}, 64, 104}, // sudog, but exported for testing
}

View file

@ -8,6 +8,7 @@ import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
@ -985,6 +986,16 @@ func copystack(gp *g, newsize uintptr) {
}
// free old stack
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Some portion of the old stack has secret stuff on it.
// We don't really know where we entered secret mode,
// so just clear the whole thing.
// TODO(dmo): traceback until we hit secret.Do? clearing
// is fast and optimized, might not be worth it.
memclrNoHeapPointers(unsafe.Pointer(old.lo), old.hi-old.lo)
// The memmove call above might put secrets from the stack into registers.
secretEraseRegisters()
}
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
}
@ -1026,6 +1037,14 @@ func newstack() {
}
gp := thisg.m.curg
if goexperiment.RuntimeSecret && gp.secret > 0 {
// If we're entering here from a secret context, clear
// all the registers. This is important because we
// might context switch to a different goroutine which
// is not in secret mode, and it will not be careful
// about clearing its registers.
secretEraseRegisters()
}
if thisg.m.curg.throwsplit {
// Update syscallsp, syscallpc in case traceback uses them.

View file

@ -228,6 +228,18 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
// due to stack probes inserted to avoid stack/heap collisions.
// See issue #20427.
#ifdef GOEXPERIMENT_runtimesecret
// The kernel might spill our secrets onto g0
// erase our registers here.
// TODO(dmo): what is the ABI guarantee here? we use
// R14 later, but the function is ABI0
CMPL g_secret(R14), $0
JEQ nosecret
CALL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
MOVQ g_m(R14), BX // BX unchanged by C code.

View file

@ -225,6 +225,13 @@ TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28
// func walltime() (sec int64, nsec int32)
TEXT runtime·walltime(SB),NOSPLIT,$24-12
#ifdef GOEXPERIMENT_runtimesecret
MOVW g_secret(g), R20
CBZ R20, nosecret
BL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVD RSP, R20 // R20 is unchanged by C code
MOVD RSP, R1
@ -309,6 +316,13 @@ finish:
RET
TEXT runtime·nanotime1(SB),NOSPLIT,$24-8
#ifdef GOEXPERIMENT_runtimesecret
MOVW g_secret(g), R20
CBZ R20, nosecret
BL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVD RSP, R20 // R20 is unchanged by C code
MOVD RSP, R1

View file

@ -12,6 +12,16 @@
// func now() (sec int64, nsec int32, mono int64)
TEXT time·now<ABIInternal>(SB),NOSPLIT,$16-24
#ifdef GOEXPERIMENT_runtimesecret
// The kernel might spill our secrets onto g0
// erase our registers here.
CMPL g_secret(R14), $0
JEQ nosecret
CALL ·secretEraseRegisters(SB)
nosecret:
#endif
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
MOVQ g_m(R14), BX // BX unchanged by C code.

View file

@ -8,6 +8,7 @@ package runtime
import (
"internal/cpu"
"internal/goexperiment"
"unsafe"
)
@ -95,6 +96,13 @@ func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
return -1, false
}
// vDSO code may spill registers to the stack
// Make sure they're zeroed if we're running in secret mode
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
secretEraseRegisters()
}
// We use getg().m instead of acquirem() here, because always taking
// the lock is slightly more expensive than not always taking the lock.
// However, we *do* require that m doesn't migrate elsewhere during the

View file

@ -47,6 +47,10 @@ TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
// func gettimeofday(tv *Timeval) (err uintptr)
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
// Usually, we'd check if we're running
// secret code here, but because we execute
// gettimeofday on the G stack, it's fine to leave
// the registers uncleared
MOVQ tv+0(FP), DI
MOVQ $0, SI
MOVQ runtime·vdsoGettimeofdaySym(SB), AX