go/src/runtime/export_test.go

650 lines
14 KiB
Go
Raw Normal View History

// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Export guts for testing.
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
var Fadd64 = fadd64
var Fsub64 = fsub64
var Fmul64 = fmul64
var Fdiv64 = fdiv64
var F64to32 = f64to32
var F32to64 = f32to64
var Fcmp64 = fcmp64
var Fintto64 = fintto64
var F64toint = f64toint
var Entersyscall = entersyscall
var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
var Xadduintptr = atomic.Xadduintptr
var FuncPC = funcPC
var Fastlog2 = fastlog2
var Atoi = atoi
var Atoi32 = atoi32
var Nanotime = nanotime
type LFNode struct {
Next uint64
Pushcnt uintptr
}
func LFStackPush(head *uint64, node *LFNode) {
(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
}
func LFStackPop(head *uint64) *LFNode {
return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
}
func GCMask(x interface{}) (ret []byte) {
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack Scalararg and ptrarg are not "signal safe". Go code filling them out can be interrupted by a signal, and then the signal handler runs, and if it also ends up in Go code that uses scalararg or ptrarg, now the old values have been smashed. For the pieces of code that do need to run in a signal handler, we introduced onM_signalok, which is really just onM except that the _signalok is meant to convey that the caller asserts that scalarg and ptrarg will be restored to their old values after the call (instead of the usual behavior, zeroing them). Scalararg and ptrarg are also untyped and therefore error-prone. Go code can always pass a closure instead of using scalararg and ptrarg; they were only really necessary for C code. And there's no more C code. For all these reasons, delete scalararg and ptrarg, converting the few remaining references to use closures. Once those are gone, there is no need for a distinction between onM and onM_signalok, so replace both with a single function equivalent to the current onM_signalok (that is, it can be called on any of the curg, g0, and gsignal stacks). The name onM and the phrase 'm stack' are misnomers, because on most system an M has two system stacks: the main thread stack and the signal handling stack. Correct the misnomer by naming the replacement function systemstack. Fix a few references to "M stack" in code. The main motivation for this change is to eliminate scalararg/ptrarg. Rick and I have already seen them cause problems because the calling sequence m.ptrarg[0] = p is a heap pointer assignment, so it gets a write barrier. The write barrier also uses onM, so it has all the same problems as if it were being invoked by a signal handler. We worked around this by saving and restoring the old values and by calling onM_signalok, but there's no point in keeping this nice home for bugs around any longer. This CL also changes funcline to return the file name as a result instead of filling in a passed-in *string. (The *string signature is left over from when the code was written in and called from C.) That's arguably an unrelated change, except that once I had done the ptrarg/scalararg/onM cleanup I started getting false positives about the *string argument escaping (not allowed in package runtime). The compiler is wrong, but the easiest fix is to write the code like Go code instead of like C code. I am a bit worried that the compiler is wrong because of some use of uninitialized memory in the escape analysis. If that's the reason, it will go away when we convert the compiler to Go. (And if not, we'll debug it the next time.) LGTM=khr R=r, khr CC=austin, golang-codereviews, iant, rlh https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
systemstack(func() {
ret = getgcmask(x)
})
return
}
func RunSchedLocalQueueTest() {
_p_ := new(p)
gs := make([]g, len(_p_.runq))
for i := 0; i < len(_p_.runq); i++ {
if g, _ := runqget(_p_); g != nil {
throw("runq is not empty initially")
}
for j := 0; j < i; j++ {
runqput(_p_, &gs[i], false)
}
for j := 0; j < i; j++ {
if g, _ := runqget(_p_); g != &gs[i] {
print("bad element at iter ", i, "/", j, "\n")
throw("bad element")
}
}
if g, _ := runqget(_p_); g != nil {
throw("runq is not empty afterwards")
}
}
}
func RunSchedLocalQueueStealTest() {
p1 := new(p)
p2 := new(p)
gs := make([]g, len(p1.runq))
for i := 0; i < len(p1.runq); i++ {
for j := 0; j < i; j++ {
gs[j].sig = 0
runqput(p1, &gs[j], false)
}
gp := runqsteal(p2, p1, true)
s := 0
if gp != nil {
s++
gp.sig++
}
for {
gp, _ = runqget(p2)
if gp == nil {
break
}
s++
gp.sig++
}
for {
gp, _ = runqget(p1)
if gp == nil {
break
}
gp.sig++
}
for j := 0; j < i; j++ {
if gs[j].sig != 1 {
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
throw("bad element")
}
}
if s != i/2 && s != i/2+1 {
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
throw("bad steal")
}
}
}
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
p := new(p)
gs := make([]g, 2)
ready := new(uint32)
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
if runqempty(p) {
println("next:", next0, next1)
throw("queue is empty")
}
done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
runqput(p, &gs[1], next1)
runqget(p)
<-done
runqget(p)
}
}
var (
StringHash = stringHash
BytesHash = bytesHash
Int32Hash = int32Hash
Int64Hash = int64Hash
MemHash = memhash
MemHash32 = memhash32
MemHash64 = memhash64
EfaceHash = efaceHash
IfaceHash = ifaceHash
)
var UseAeshash = &useAeshash
func MemclrBytes(b []byte) {
s := (*slice)(unsafe.Pointer(&b))
memclrNoHeapPointers(s.array, uintptr(s.len))
}
var HashLoad = &hashLoad
// entry point for testing
func GostringW(w []uint16) (s string) {
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack Scalararg and ptrarg are not "signal safe". Go code filling them out can be interrupted by a signal, and then the signal handler runs, and if it also ends up in Go code that uses scalararg or ptrarg, now the old values have been smashed. For the pieces of code that do need to run in a signal handler, we introduced onM_signalok, which is really just onM except that the _signalok is meant to convey that the caller asserts that scalarg and ptrarg will be restored to their old values after the call (instead of the usual behavior, zeroing them). Scalararg and ptrarg are also untyped and therefore error-prone. Go code can always pass a closure instead of using scalararg and ptrarg; they were only really necessary for C code. And there's no more C code. For all these reasons, delete scalararg and ptrarg, converting the few remaining references to use closures. Once those are gone, there is no need for a distinction between onM and onM_signalok, so replace both with a single function equivalent to the current onM_signalok (that is, it can be called on any of the curg, g0, and gsignal stacks). The name onM and the phrase 'm stack' are misnomers, because on most system an M has two system stacks: the main thread stack and the signal handling stack. Correct the misnomer by naming the replacement function systemstack. Fix a few references to "M stack" in code. The main motivation for this change is to eliminate scalararg/ptrarg. Rick and I have already seen them cause problems because the calling sequence m.ptrarg[0] = p is a heap pointer assignment, so it gets a write barrier. The write barrier also uses onM, so it has all the same problems as if it were being invoked by a signal handler. We worked around this by saving and restoring the old values and by calling onM_signalok, but there's no point in keeping this nice home for bugs around any longer. This CL also changes funcline to return the file name as a result instead of filling in a passed-in *string. (The *string signature is left over from when the code was written in and called from C.) That's arguably an unrelated change, except that once I had done the ptrarg/scalararg/onM cleanup I started getting false positives about the *string argument escaping (not allowed in package runtime). The compiler is wrong, but the easiest fix is to write the code like Go code instead of like C code. I am a bit worried that the compiler is wrong because of some use of uninitialized memory in the escape analysis. If that's the reason, it will go away when we convert the compiler to Go. (And if not, we'll debug it the next time.) LGTM=khr R=r, khr CC=austin, golang-codereviews, iant, rlh https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
systemstack(func() {
s = gostringw(&w[0])
})
return
}
type Uintreg sys.Uintreg
var Open = open
var Close = closefd
var Read = read
var Write = write
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
var BigEndian = sys.BigEndian
// For benchmarking.
func BenchSetType(n int, x interface{}) {
e := *efaceOf(&x)
t := e._type
var size uintptr
var p unsafe.Pointer
switch t.kind & kindMask {
case kindPtr:
t = (*ptrtype)(unsafe.Pointer(t)).elem
size = t.size
p = e.data
case kindSlice:
slice := *(*struct {
ptr unsafe.Pointer
len, cap uintptr
})(e.data)
t = (*slicetype)(unsafe.Pointer(t)).elem
size = t.size * slice.len
p = slice.ptr
}
allocSize := roundupsize(size)
systemstack(func() {
for i := 0; i < n; i++ {
heapBitsSetType(uintptr(p), allocSize, size, t)
}
})
}
const PtrSize = sys.PtrSize
var ForceGCPeriod = &forcegcperiod
// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
// the "environment" traceback level, so later calls to
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
func SetTracebackEnv(level string) {
setTraceback(level)
traceback_env = traceback_cache
}
var ReadUnaligned32 = readUnaligned32
var ReadUnaligned64 = readUnaligned64
2016-03-29 12:28:24 -04:00
func CountPagesInUse() (pagesInUse, counted uintptr) {
stopTheWorld("CountPagesInUse")
pagesInUse = uintptr(mheap_.pagesInUse)
for _, s := range mheap_.allspans {
2016-03-29 12:28:24 -04:00
if s.state == mSpanInUse {
counted += s.npages
}
}
startTheWorld()
return
}
func Fastrand() uint32 { return fastrand() }
func Fastrandn(n uint32) uint32 { return fastrandn(n) }
type ProfBuf profBuf
func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
}
func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
(*profBuf)(p).write(tag, now, hdr, stk)
}
const (
ProfBufBlocking = profBufBlocking
ProfBufNonBlocking = profBufNonBlocking
)
func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
return (*profBuf)(p).read(profBufReadMode(mode))
}
func (p *ProfBuf) Close() {
(*profBuf)(p).close()
}
// ReadMemStatsSlow returns both the runtime-computed MemStats and
// MemStats accumulated by scanning the heap.
func ReadMemStatsSlow() (base, slow MemStats) {
stopTheWorld("ReadMemStatsSlow")
// Run on the system stack to avoid stack growth allocation.
systemstack(func() {
// Make sure stats don't change.
getg().m.mallocing++
readmemstats_m(&base)
// Initialize slow from base and zero the fields we're
// recomputing.
slow = base
slow.Alloc = 0
slow.TotalAlloc = 0
slow.Mallocs = 0
slow.Frees = 0
slow.HeapReleased = 0
var bySize [_NumSizeClasses]struct {
Mallocs, Frees uint64
}
// Add up current allocations in spans.
for _, s := range mheap_.allspans {
if s.state != mSpanInUse {
continue
}
if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
slow.Mallocs++
slow.Alloc += uint64(s.elemsize)
} else {
slow.Mallocs += uint64(s.allocCount)
slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
bySize[sizeclass].Mallocs += uint64(s.allocCount)
}
}
// Add in frees. readmemstats_m flushed the cached stats, so
// these are up-to-date.
var smallFree uint64
slow.Frees = mheap_.nlargefree
for i := range mheap_.nsmallfree {
slow.Frees += mheap_.nsmallfree[i]
bySize[i].Frees = mheap_.nsmallfree[i]
bySize[i].Mallocs += mheap_.nsmallfree[i]
smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
}
slow.Frees += memstats.tinyallocs
slow.Mallocs += slow.Frees
slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
for i := range slow.BySize {
slow.BySize[i].Mallocs = bySize[i].Mallocs
slow.BySize[i].Frees = bySize[i].Frees
}
for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
slow.HeapReleased += uint64(i.span().released())
}
getg().m.mallocing--
})
startTheWorld()
return
}
// BlockOnSystemStack switches to the system stack, prints "x\n" to
// stderr, and blocks in a stack containing
// "runtime.blockOnSystemStackInternal".
func BlockOnSystemStack() {
systemstack(blockOnSystemStackInternal)
}
func blockOnSystemStackInternal() {
print("x\n")
lock(&deadlock)
lock(&deadlock)
}
type RWMutex struct {
rw rwmutex
}
func (rw *RWMutex) RLock() {
rw.rw.rlock()
}
func (rw *RWMutex) RUnlock() {
rw.rw.runlock()
}
func (rw *RWMutex) Lock() {
rw.rw.lock()
}
func (rw *RWMutex) Unlock() {
rw.rw.unlock()
}
const RuntimeHmapSize = unsafe.Sizeof(hmap{})
func MapBucketsCount(m map[int]int) int {
h := *(**hmap)(unsafe.Pointer(&m))
return 1 << h.B
}
func MapBucketsPointerIsNil(m map[int]int) bool {
h := *(**hmap)(unsafe.Pointer(&m))
return h.buckets == nil
}
func LockOSCounts() (external, internal uint32) {
g := getg()
if g.m.lockedExt+g.m.lockedInt == 0 {
if g.lockedm != 0 {
panic("lockedm on non-locked goroutine")
}
} else {
if g.lockedm == 0 {
panic("nil lockedm on locked goroutine")
}
}
return g.m.lockedExt, g.m.lockedInt
}
//go:noinline
func TracebackSystemstack(stk []uintptr, i int) int {
if i == 0 {
pc, sp := getcallerpc(), getcallersp()
return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
}
n := 0
systemstack(func() {
n = TracebackSystemstack(stk, i-1)
})
return n
}
runtime: use sparse mappings for the heap This replaces the contiguous heap arena mapping with a potentially sparse mapping that can support heap mappings anywhere in the address space. This has several advantages over the current approach: * There is no longer any limit on the size of the Go heap. (Currently it's limited to 512GB.) Hence, this fixes #10460. * It eliminates many failures modes of heap initialization and growing. In particular it eliminates any possibility of panicking with an address space conflict. This can happen for many reasons and even causes a low but steady rate of TSAN test failures because of conflicts with the TSAN runtime. See #16936 and #11993. * It eliminates the notion of "non-reserved" heap, which was added because creating huge address space reservations (particularly on 64-bit) led to huge process VSIZE. This was at best confusing and at worst conflicted badly with ulimit -v. However, the non-reserved heap logic is complicated, can race with other mappings in non-pure Go binaries (e.g., #18976), and requires that the entire heap be either reserved or non-reserved. We currently maintain the latter property, but it's quite difficult to convince yourself of that, and hence difficult to keep correct. This logic is still present, but will be removed in the next CL. * It fixes problems on 32-bit where skipping over parts of the address space leads to mapping huge (and never-to-be-used) metadata structures. See #19831. This also completely rewrites and significantly simplifies mheap.sysAlloc, which has been a source of many bugs. E.g., #21044, #20259, #18651, and #13143 (and maybe #23222). This change also makes it possible to allocate individual objects larger than 512GB. As a result, a few tests that expected huge allocations to fail needed to be changed to make even larger allocations. However, at the moment attempting to allocate a humongous object may cause the program to freeze for several minutes on Linux as we fall back to probing every page with addrspace_free. That logic (and this failure mode) will be removed in the next CL. Fixes #10460. Fixes #22204 (since it rewrites the code involved). This slightly slows down compilebench and the x/benchmarks garbage benchmark. name old time/op new time/op delta Template 184ms ± 1% 185ms ± 1% ~ (p=0.065 n=10+9) Unicode 86.9ms ± 3% 86.3ms ± 1% ~ (p=0.631 n=10+10) GoTypes 599ms ± 0% 602ms ± 0% +0.56% (p=0.000 n=10+9) Compiler 2.87s ± 1% 2.89s ± 1% +0.51% (p=0.002 n=9+10) SSA 7.29s ± 1% 7.25s ± 1% ~ (p=0.182 n=10+9) Flate 118ms ± 2% 118ms ± 1% ~ (p=0.113 n=9+9) GoParser 147ms ± 1% 148ms ± 1% +1.07% (p=0.003 n=9+10) Reflect 401ms ± 1% 404ms ± 1% +0.71% (p=0.003 n=10+9) Tar 175ms ± 1% 175ms ± 1% ~ (p=0.604 n=9+10) XML 209ms ± 1% 210ms ± 1% ~ (p=0.052 n=10+10) (https://perf.golang.org/search?q=upload:20171231.4) name old time/op new time/op delta Garbage/benchmem-MB=64-12 2.23ms ± 1% 2.25ms ± 1% +0.84% (p=0.000 n=19+19) (https://perf.golang.org/search?q=upload:20171231.3) Relative to the start of the sparse heap changes (starting at and including "runtime: fix various contiguous bitmap assumptions"), overall slowdown is roughly 1% on GC-intensive benchmarks: name old time/op new time/op delta Template 183ms ± 1% 185ms ± 1% +1.32% (p=0.000 n=9+9) Unicode 84.9ms ± 2% 86.3ms ± 1% +1.65% (p=0.000 n=9+10) GoTypes 595ms ± 1% 602ms ± 0% +1.19% (p=0.000 n=9+9) Compiler 2.86s ± 0% 2.89s ± 1% +0.91% (p=0.000 n=9+10) SSA 7.19s ± 0% 7.25s ± 1% +0.75% (p=0.000 n=8+9) Flate 117ms ± 1% 118ms ± 1% +1.10% (p=0.000 n=10+9) GoParser 146ms ± 2% 148ms ± 1% +1.48% (p=0.002 n=10+10) Reflect 398ms ± 1% 404ms ± 1% +1.51% (p=0.000 n=10+9) Tar 173ms ± 1% 175ms ± 1% +1.17% (p=0.000 n=10+10) XML 208ms ± 1% 210ms ± 1% +0.62% (p=0.011 n=10+10) [Geo mean] 369ms 373ms +1.17% (https://perf.golang.org/search?q=upload:20180101.2) name old time/op new time/op delta Garbage/benchmem-MB=64-12 2.22ms ± 1% 2.25ms ± 1% +1.51% (p=0.000 n=20+19) (https://perf.golang.org/search?q=upload:20180101.3) Change-Id: I5daf4cfec24b252e5a57001f0a6c03f22479d0f0 Reviewed-on: https://go-review.googlesource.com/85887 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
2017-12-19 22:05:23 -08:00
func KeepNArenaHints(n int) {
hint := mheap_.arenaHints
for i := 1; i < n; i++ {
hint = hint.next
if hint == nil {
return
}
}
hint.next = nil
}
// MapNextArenaHint reserves a page at the next arena growth hint,
// preventing the arena from growing there, and returns the range of
// addresses that are no longer viable.
func MapNextArenaHint() (start, end uintptr) {
hint := mheap_.arenaHints
addr := hint.addr
if hint.down {
start, end = addr-heapArenaBytes, addr
addr -= physPageSize
} else {
start, end = addr, addr+heapArenaBytes
}
sysReserve(unsafe.Pointer(addr), physPageSize)
runtime: use sparse mappings for the heap This replaces the contiguous heap arena mapping with a potentially sparse mapping that can support heap mappings anywhere in the address space. This has several advantages over the current approach: * There is no longer any limit on the size of the Go heap. (Currently it's limited to 512GB.) Hence, this fixes #10460. * It eliminates many failures modes of heap initialization and growing. In particular it eliminates any possibility of panicking with an address space conflict. This can happen for many reasons and even causes a low but steady rate of TSAN test failures because of conflicts with the TSAN runtime. See #16936 and #11993. * It eliminates the notion of "non-reserved" heap, which was added because creating huge address space reservations (particularly on 64-bit) led to huge process VSIZE. This was at best confusing and at worst conflicted badly with ulimit -v. However, the non-reserved heap logic is complicated, can race with other mappings in non-pure Go binaries (e.g., #18976), and requires that the entire heap be either reserved or non-reserved. We currently maintain the latter property, but it's quite difficult to convince yourself of that, and hence difficult to keep correct. This logic is still present, but will be removed in the next CL. * It fixes problems on 32-bit where skipping over parts of the address space leads to mapping huge (and never-to-be-used) metadata structures. See #19831. This also completely rewrites and significantly simplifies mheap.sysAlloc, which has been a source of many bugs. E.g., #21044, #20259, #18651, and #13143 (and maybe #23222). This change also makes it possible to allocate individual objects larger than 512GB. As a result, a few tests that expected huge allocations to fail needed to be changed to make even larger allocations. However, at the moment attempting to allocate a humongous object may cause the program to freeze for several minutes on Linux as we fall back to probing every page with addrspace_free. That logic (and this failure mode) will be removed in the next CL. Fixes #10460. Fixes #22204 (since it rewrites the code involved). This slightly slows down compilebench and the x/benchmarks garbage benchmark. name old time/op new time/op delta Template 184ms ± 1% 185ms ± 1% ~ (p=0.065 n=10+9) Unicode 86.9ms ± 3% 86.3ms ± 1% ~ (p=0.631 n=10+10) GoTypes 599ms ± 0% 602ms ± 0% +0.56% (p=0.000 n=10+9) Compiler 2.87s ± 1% 2.89s ± 1% +0.51% (p=0.002 n=9+10) SSA 7.29s ± 1% 7.25s ± 1% ~ (p=0.182 n=10+9) Flate 118ms ± 2% 118ms ± 1% ~ (p=0.113 n=9+9) GoParser 147ms ± 1% 148ms ± 1% +1.07% (p=0.003 n=9+10) Reflect 401ms ± 1% 404ms ± 1% +0.71% (p=0.003 n=10+9) Tar 175ms ± 1% 175ms ± 1% ~ (p=0.604 n=9+10) XML 209ms ± 1% 210ms ± 1% ~ (p=0.052 n=10+10) (https://perf.golang.org/search?q=upload:20171231.4) name old time/op new time/op delta Garbage/benchmem-MB=64-12 2.23ms ± 1% 2.25ms ± 1% +0.84% (p=0.000 n=19+19) (https://perf.golang.org/search?q=upload:20171231.3) Relative to the start of the sparse heap changes (starting at and including "runtime: fix various contiguous bitmap assumptions"), overall slowdown is roughly 1% on GC-intensive benchmarks: name old time/op new time/op delta Template 183ms ± 1% 185ms ± 1% +1.32% (p=0.000 n=9+9) Unicode 84.9ms ± 2% 86.3ms ± 1% +1.65% (p=0.000 n=9+10) GoTypes 595ms ± 1% 602ms ± 0% +1.19% (p=0.000 n=9+9) Compiler 2.86s ± 0% 2.89s ± 1% +0.91% (p=0.000 n=9+10) SSA 7.19s ± 0% 7.25s ± 1% +0.75% (p=0.000 n=8+9) Flate 117ms ± 1% 118ms ± 1% +1.10% (p=0.000 n=10+9) GoParser 146ms ± 2% 148ms ± 1% +1.48% (p=0.002 n=10+10) Reflect 398ms ± 1% 404ms ± 1% +1.51% (p=0.000 n=10+9) Tar 173ms ± 1% 175ms ± 1% +1.17% (p=0.000 n=10+10) XML 208ms ± 1% 210ms ± 1% +0.62% (p=0.011 n=10+10) [Geo mean] 369ms 373ms +1.17% (https://perf.golang.org/search?q=upload:20180101.2) name old time/op new time/op delta Garbage/benchmem-MB=64-12 2.22ms ± 1% 2.25ms ± 1% +1.51% (p=0.000 n=20+19) (https://perf.golang.org/search?q=upload:20180101.3) Change-Id: I5daf4cfec24b252e5a57001f0a6c03f22479d0f0 Reviewed-on: https://go-review.googlesource.com/85887 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
2017-12-19 22:05:23 -08:00
return
}
func GetNextArenaHint() uintptr {
return mheap_.arenaHints.addr
}
type G = g
func Getg() *G {
return getg()
}
//go:noinline
func PanicForTesting(b []byte, i int) byte {
return unexportedPanicForTesting(b, i)
}
//go:noinline
func unexportedPanicForTesting(b []byte, i int) byte {
return b[i]
}
func G0StackOverflow() {
systemstack(func() {
stackOverflow(nil)
})
}
func stackOverflow(x *byte) {
var buf [256]byte
stackOverflow(&buf[0])
}
func MapTombstoneCheck(m map[int]int) {
// Make sure emptyOne and emptyRest are distributed correctly.
// We should have a series of filled and emptyOne cells, followed by
// a series of emptyRest cells.
h := *(**hmap)(unsafe.Pointer(&m))
i := interface{}(m)
t := *(**maptype)(unsafe.Pointer(&i))
for x := 0; x < 1<<h.B; x++ {
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
n := 0
for b := b0; b != nil; b = b.overflow(t) {
for i := 0; i < bucketCnt; i++ {
if b.tophash[i] != emptyRest {
n++
}
}
}
k := 0
for b := b0; b != nil; b = b.overflow(t) {
for i := 0; i < bucketCnt; i++ {
if k < n && b.tophash[i] == emptyRest {
panic("early emptyRest")
}
if k >= n && b.tophash[i] != emptyRest {
panic("late non-emptyRest")
}
if k == n-1 && b.tophash[i] == emptyOne {
panic("last non-emptyRest entry is emptyOne")
}
k++
}
}
}
}
// Span is a safe wrapper around an mspan, whose memory
// is managed manually.
type Span struct {
*mspan
}
func AllocSpan(base, npages uintptr, scavenged bool) Span {
lock(&mheap_.lock)
s := (*mspan)(mheap_.spanalloc.alloc())
unlock(&mheap_.lock)
s.init(base, npages)
s.scavenged = scavenged
return Span{s}
}
func (s *Span) Free() {
lock(&mheap_.lock)
mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
unlock(&mheap_.lock)
s.mspan = nil
}
func (s Span) Base() uintptr {
return s.mspan.base()
}
func (s Span) Pages() uintptr {
return s.mspan.npages
}
type TreapIterType treapIterType
const (
TreapIterScav TreapIterType = TreapIterType(treapIterScav)
TreapIterBits = treapIterBits
)
type TreapIterFilter treapIterFilter
func TreapFilter(mask, match TreapIterType) TreapIterFilter {
return TreapIterFilter(treapFilter(treapIterType(mask), treapIterType(match)))
}
func (s Span) MatchesIter(mask, match TreapIterType) bool {
return treapFilter(treapIterType(mask), treapIterType(match)).matches(s.treapFilter())
}
type TreapIter struct {
treapIter
}
func (t TreapIter) Span() Span {
return Span{t.span()}
}
func (t TreapIter) Valid() bool {
return t.valid()
}
func (t TreapIter) Next() TreapIter {
return TreapIter{t.next()}
}
func (t TreapIter) Prev() TreapIter {
return TreapIter{t.prev()}
}
// Treap is a safe wrapper around mTreap for testing.
//
// It must never be heap-allocated because mTreap is
// notinheap.
//
//go:notinheap
type Treap struct {
mTreap
}
func (t *Treap) Start(mask, match TreapIterType) TreapIter {
return TreapIter{t.start(treapIterType(mask), treapIterType(match))}
}
func (t *Treap) End(mask, match TreapIterType) TreapIter {
return TreapIter{t.end(treapIterType(mask), treapIterType(match))}
}
func (t *Treap) Insert(s Span) {
// mTreap uses a fixalloc in mheap_ for treapNode
// allocation which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
lock(&mheap_.lock)
t.insert(s.mspan)
unlock(&mheap_.lock)
t.CheckInvariants()
}
func (t *Treap) Find(npages uintptr) TreapIter {
return TreapIter{t.find(npages)}
}
func (t *Treap) Erase(i TreapIter) {
// mTreap uses a fixalloc in mheap_ for treapNode
// freeing which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
lock(&mheap_.lock)
t.erase(i.treapIter)
unlock(&mheap_.lock)
t.CheckInvariants()
}
func (t *Treap) RemoveSpan(s Span) {
// See Erase about locking.
lock(&mheap_.lock)
t.removeSpan(s.mspan)
unlock(&mheap_.lock)
t.CheckInvariants()
}
func (t *Treap) Size() int {
i := 0
t.mTreap.treap.walkTreap(func(t *treapNode) {
i++
})
return i
}
func (t *Treap) CheckInvariants() {
t.mTreap.treap.walkTreap(checkTreapNode)
t.mTreap.treap.validateInvariants()
}