2016-03-01 22:57:46 +00:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
2010-10-25 17:55:50 -07:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
// Export guts for testing.
|
|
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
2015-11-02 14:09:24 -05:00
|
|
|
import (
|
|
|
|
|
"runtime/internal/atomic"
|
2015-11-11 12:39:30 -05:00
|
|
|
"runtime/internal/sys"
|
2015-11-02 14:09:24 -05:00
|
|
|
"unsafe"
|
|
|
|
|
)
|
2014-08-21 21:10:45 +04:00
|
|
|
|
2010-10-25 17:55:50 -07:00
|
|
|
var Fadd64 = fadd64
|
|
|
|
|
var Fsub64 = fsub64
|
|
|
|
|
var Fmul64 = fmul64
|
|
|
|
|
var Fdiv64 = fdiv64
|
|
|
|
|
var F64to32 = f64to32
|
|
|
|
|
var F32to64 = f32to64
|
|
|
|
|
var Fcmp64 = fcmp64
|
|
|
|
|
var Fintto64 = fintto64
|
|
|
|
|
var F64toint = f64toint
|
2015-04-30 19:03:31 -04:00
|
|
|
var Sqrt = sqrt
|
2011-07-19 11:01:17 -04:00
|
|
|
|
|
|
|
|
var Entersyscall = entersyscall
|
|
|
|
|
var Exitsyscall = exitsyscall
|
2014-08-27 23:32:49 -04:00
|
|
|
var LockedOSThread = lockedOSThread
|
2015-11-02 14:09:24 -05:00
|
|
|
var Xadduintptr = atomic.Xadduintptr
|
2012-04-12 11:49:25 +04:00
|
|
|
|
2015-04-17 17:27:07 -07:00
|
|
|
var FuncPC = funcPC
|
|
|
|
|
|
2015-09-14 14:03:45 -07:00
|
|
|
var Fastlog2 = fastlog2
|
|
|
|
|
|
2016-10-30 01:54:19 +02:00
|
|
|
var Atoi = atoi
|
|
|
|
|
var Atoi32 = atoi32
|
|
|
|
|
|
2012-04-12 11:49:25 +04:00
|
|
|
type LFNode struct {
|
2014-10-27 15:57:07 -04:00
|
|
|
Next uint64
|
2012-04-12 11:49:25 +04:00
|
|
|
Pushcnt uintptr
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-21 21:10:45 +04:00
|
|
|
func LFStackPush(head *uint64, node *LFNode) {
|
2017-03-07 16:38:29 -05:00
|
|
|
(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
|
2014-08-21 21:10:45 +04:00
|
|
|
}
|
2012-04-12 11:49:25 +04:00
|
|
|
|
2014-08-21 21:10:45 +04:00
|
|
|
func LFStackPop(head *uint64) *LFNode {
|
2017-03-07 16:38:29 -05:00
|
|
|
return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
|
2014-08-21 21:10:45 +04:00
|
|
|
}
|
2012-05-11 10:50:03 +04:00
|
|
|
|
2014-09-05 14:59:31 -07:00
|
|
|
func GCMask(x interface{}) (ret []byte) {
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
|
|
|
systemstack(func() {
|
2015-04-28 00:28:47 -04:00
|
|
|
ret = getgcmask(x)
|
2014-09-05 14:59:31 -07:00
|
|
|
})
|
|
|
|
|
return
|
|
|
|
|
}
|
2014-07-29 11:01:02 +04:00
|
|
|
|
2014-09-06 10:07:23 -07:00
|
|
|
func RunSchedLocalQueueTest() {
|
2016-03-12 16:41:08 -07:00
|
|
|
_p_ := new(p)
|
|
|
|
|
gs := make([]g, len(_p_.runq))
|
|
|
|
|
for i := 0; i < len(_p_.runq); i++ {
|
|
|
|
|
if g, _ := runqget(_p_); g != nil {
|
|
|
|
|
throw("runq is not empty initially")
|
|
|
|
|
}
|
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
|
runqput(_p_, &gs[i], false)
|
|
|
|
|
}
|
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
|
if g, _ := runqget(_p_); g != &gs[i] {
|
|
|
|
|
print("bad element at iter ", i, "/", j, "\n")
|
|
|
|
|
throw("bad element")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if g, _ := runqget(_p_); g != nil {
|
|
|
|
|
throw("runq is not empty afterwards")
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-09-06 10:07:23 -07:00
|
|
|
}
|
2016-03-12 16:41:08 -07:00
|
|
|
|
2014-09-06 10:07:23 -07:00
|
|
|
func RunSchedLocalQueueStealTest() {
|
2016-03-12 16:41:08 -07:00
|
|
|
p1 := new(p)
|
|
|
|
|
p2 := new(p)
|
|
|
|
|
gs := make([]g, len(p1.runq))
|
|
|
|
|
for i := 0; i < len(p1.runq); i++ {
|
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
|
gs[j].sig = 0
|
|
|
|
|
runqput(p1, &gs[j], false)
|
|
|
|
|
}
|
|
|
|
|
gp := runqsteal(p2, p1, true)
|
|
|
|
|
s := 0
|
|
|
|
|
if gp != nil {
|
|
|
|
|
s++
|
|
|
|
|
gp.sig++
|
|
|
|
|
}
|
|
|
|
|
for {
|
|
|
|
|
gp, _ = runqget(p2)
|
|
|
|
|
if gp == nil {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
s++
|
|
|
|
|
gp.sig++
|
|
|
|
|
}
|
|
|
|
|
for {
|
|
|
|
|
gp, _ = runqget(p1)
|
|
|
|
|
if gp == nil {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
gp.sig++
|
|
|
|
|
}
|
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
|
if gs[j].sig != 1 {
|
|
|
|
|
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
|
|
|
|
|
throw("bad element")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s != i/2 && s != i/2+1 {
|
|
|
|
|
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
|
|
|
|
|
throw("bad steal")
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-09-06 10:07:23 -07:00
|
|
|
}
|
2013-09-06 16:23:46 -07:00
|
|
|
|
2016-03-18 16:34:11 +01:00
|
|
|
func RunSchedLocalQueueEmptyTest(iters int) {
|
|
|
|
|
// Test that runq is not spuriously reported as empty.
|
|
|
|
|
// Runq emptiness affects scheduling decisions and spurious emptiness
|
|
|
|
|
// can lead to underutilization (both runnable Gs and idle Ps coexist
|
|
|
|
|
// for arbitrary long time).
|
|
|
|
|
done := make(chan bool, 1)
|
|
|
|
|
p := new(p)
|
|
|
|
|
gs := make([]g, 2)
|
|
|
|
|
ready := new(uint32)
|
|
|
|
|
for i := 0; i < iters; i++ {
|
|
|
|
|
*ready = 0
|
|
|
|
|
next0 := (i & 1) == 0
|
|
|
|
|
next1 := (i & 2) == 0
|
|
|
|
|
runqput(p, &gs[0], next0)
|
|
|
|
|
go func() {
|
|
|
|
|
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
|
|
|
|
|
}
|
|
|
|
|
if runqempty(p) {
|
|
|
|
|
println("next:", next0, next1)
|
|
|
|
|
throw("queue is empty")
|
|
|
|
|
}
|
|
|
|
|
done <- true
|
|
|
|
|
}()
|
|
|
|
|
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
|
|
|
|
|
}
|
|
|
|
|
runqput(p, &gs[1], next1)
|
|
|
|
|
runqget(p)
|
|
|
|
|
<-done
|
|
|
|
|
runqget(p)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-27 14:05:11 +02:00
|
|
|
var (
|
|
|
|
|
StringHash = stringHash
|
|
|
|
|
BytesHash = bytesHash
|
|
|
|
|
Int32Hash = int32Hash
|
|
|
|
|
Int64Hash = int64Hash
|
|
|
|
|
MemHash = memhash
|
|
|
|
|
MemHash32 = memhash32
|
|
|
|
|
MemHash64 = memhash64
|
|
|
|
|
EfaceHash = efaceHash
|
|
|
|
|
IfaceHash = ifaceHash
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var UseAeshash = &useAeshash
|
2016-10-17 18:41:56 -04:00
|
|
|
|
|
|
|
|
func MemclrBytes(b []byte) {
|
|
|
|
|
s := (*slice)(unsafe.Pointer(&b))
|
|
|
|
|
memclrNoHeapPointers(s.array, uintptr(s.len))
|
|
|
|
|
}
|
2013-09-13 14:19:23 -04:00
|
|
|
|
2013-10-04 13:54:03 -07:00
|
|
|
var HashLoad = &hashLoad
|
2014-02-06 17:43:22 -08:00
|
|
|
|
2014-09-05 15:01:09 -07:00
|
|
|
// entry point for testing
|
|
|
|
|
func GostringW(w []uint16) (s string) {
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 14:54:31 -05:00
|
|
|
systemstack(func() {
|
2014-09-05 15:01:09 -07:00
|
|
|
s = gostringw(&w[0])
|
|
|
|
|
})
|
|
|
|
|
return
|
|
|
|
|
}
|
2014-09-11 16:53:34 -07:00
|
|
|
|
2015-11-11 12:39:30 -05:00
|
|
|
type Uintreg sys.Uintreg
|
2015-03-02 20:16:48 -08:00
|
|
|
|
|
|
|
|
var Open = open
|
2015-04-13 19:37:04 -04:00
|
|
|
var Close = closefd
|
2015-03-02 20:16:48 -08:00
|
|
|
var Read = read
|
|
|
|
|
var Write = write
|
2015-03-03 13:55:22 -05:00
|
|
|
|
|
|
|
|
func Envs() []string { return envs }
|
|
|
|
|
func SetEnvs(e []string) { envs = e }
|
2015-04-16 14:32:18 -07:00
|
|
|
|
2015-11-11 12:39:30 -05:00
|
|
|
var BigEndian = sys.BigEndian
|
2015-05-02 22:59:35 -04:00
|
|
|
|
|
|
|
|
// For benchmarking.
|
|
|
|
|
|
|
|
|
|
func BenchSetType(n int, x interface{}) {
|
2015-10-21 12:12:25 -07:00
|
|
|
e := *efaceOf(&x)
|
2015-05-02 22:59:35 -04:00
|
|
|
t := e._type
|
|
|
|
|
var size uintptr
|
|
|
|
|
var p unsafe.Pointer
|
|
|
|
|
switch t.kind & kindMask {
|
2015-10-12 16:01:51 -07:00
|
|
|
case kindPtr:
|
2015-05-02 22:59:35 -04:00
|
|
|
t = (*ptrtype)(unsafe.Pointer(t)).elem
|
|
|
|
|
size = t.size
|
|
|
|
|
p = e.data
|
2015-10-12 16:01:51 -07:00
|
|
|
case kindSlice:
|
2015-05-02 22:59:35 -04:00
|
|
|
slice := *(*struct {
|
|
|
|
|
ptr unsafe.Pointer
|
|
|
|
|
len, cap uintptr
|
|
|
|
|
})(e.data)
|
|
|
|
|
t = (*slicetype)(unsafe.Pointer(t)).elem
|
|
|
|
|
size = t.size * slice.len
|
|
|
|
|
p = slice.ptr
|
|
|
|
|
}
|
|
|
|
|
allocSize := roundupsize(size)
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
heapBitsSetType(uintptr(p), allocSize, size, t)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
2015-05-15 14:23:23 -04:00
|
|
|
|
2015-11-11 12:39:30 -05:00
|
|
|
const PtrSize = sys.PtrSize
|
2015-07-30 00:46:42 -04:00
|
|
|
|
2015-08-05 11:35:28 -04:00
|
|
|
var ForceGCPeriod = &forcegcperiod
|
2016-01-14 16:43:40 -05:00
|
|
|
|
|
|
|
|
// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
|
|
|
|
|
// the "environment" traceback level, so later calls to
|
|
|
|
|
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
|
|
|
|
|
func SetTracebackEnv(level string) {
|
|
|
|
|
setTraceback(level)
|
|
|
|
|
traceback_env = traceback_cache
|
|
|
|
|
}
|
2016-03-17 13:28:04 -07:00
|
|
|
|
|
|
|
|
var ReadUnaligned32 = readUnaligned32
|
|
|
|
|
var ReadUnaligned64 = readUnaligned64
|
runtime: fix pagesInUse accounting
When we grow the heap, we create a temporary "in use" span for the
memory acquired from the OS and then free that span to link it into
the heap. Hence, we (1) increase pagesInUse when we make the temporary
span so that (2) freeing the span will correctly decrease it.
However, currently step (1) increases pagesInUse by the number of
pages requested from the heap, while step (2) decreases it by the
number of pages requested from the OS (the size of the temporary
span). These aren't necessarily the same, since we round up the number
of pages we request from the OS, so steps 1 and 2 don't necessarily
cancel out like they're supposed to. Over time, this can add up and
cause pagesInUse to underflow and wrap around to 2^64. The garbage
collector computes the sweep ratio from this, so if this happens, the
sweep ratio becomes effectively infinite, causing the first allocation
on each P in a sweep cycle to sweep the entire heap. This makes
sweeping effectively STW.
Fix this by increasing pagesInUse in step 1 by the number of pages
requested from the OS, so that the two steps correctly cancel out. We
add a test that checks that the running total matches the actual state
of the heap.
Fixes #15022. For 1.6.x.
Change-Id: Iefd9d6abe37d0d447cbdbdf9941662e4f18eeffc
Reviewed-on: https://go-review.googlesource.com/21280
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2016-03-29 12:28:24 -04:00
|
|
|
|
|
|
|
|
func CountPagesInUse() (pagesInUse, counted uintptr) {
|
|
|
|
|
stopTheWorld("CountPagesInUse")
|
|
|
|
|
|
|
|
|
|
pagesInUse = uintptr(mheap_.pagesInUse)
|
|
|
|
|
|
2016-10-04 15:51:31 -04:00
|
|
|
for _, s := range mheap_.allspans {
|
runtime: fix pagesInUse accounting
When we grow the heap, we create a temporary "in use" span for the
memory acquired from the OS and then free that span to link it into
the heap. Hence, we (1) increase pagesInUse when we make the temporary
span so that (2) freeing the span will correctly decrease it.
However, currently step (1) increases pagesInUse by the number of
pages requested from the heap, while step (2) decreases it by the
number of pages requested from the OS (the size of the temporary
span). These aren't necessarily the same, since we round up the number
of pages we request from the OS, so steps 1 and 2 don't necessarily
cancel out like they're supposed to. Over time, this can add up and
cause pagesInUse to underflow and wrap around to 2^64. The garbage
collector computes the sweep ratio from this, so if this happens, the
sweep ratio becomes effectively infinite, causing the first allocation
on each P in a sweep cycle to sweep the entire heap. This makes
sweeping effectively STW.
Fix this by increasing pagesInUse in step 1 by the number of pages
requested from the OS, so that the two steps correctly cancel out. We
add a test that checks that the running total matches the actual state
of the heap.
Fixes #15022. For 1.6.x.
Change-Id: Iefd9d6abe37d0d447cbdbdf9941662e4f18eeffc
Reviewed-on: https://go-review.googlesource.com/21280
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2016-03-29 12:28:24 -04:00
|
|
|
if s.state == mSpanInUse {
|
|
|
|
|
counted += s.npages
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
startTheWorld()
|
|
|
|
|
|
|
|
|
|
return
|
|
|
|
|
}
|
2017-01-05 09:36:27 +03:00
|
|
|
|
2017-02-13 12:46:17 -08:00
|
|
|
func Fastrand() uint32 { return fastrand() }
|
|
|
|
|
func Fastrandn(n uint32) uint32 { return fastrandn(n) }
|
2017-02-09 13:58:48 -05:00
|
|
|
|
|
|
|
|
type ProfBuf profBuf
|
|
|
|
|
|
|
|
|
|
func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
|
|
|
|
|
return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
|
|
|
|
|
(*profBuf)(p).write(tag, now, hdr, stk)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
ProfBufBlocking = profBufBlocking
|
|
|
|
|
ProfBufNonBlocking = profBufNonBlocking
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
|
|
|
|
|
return (*profBuf)(p).read(profBufReadMode(mode))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *ProfBuf) Close() {
|
|
|
|
|
(*profBuf)(p).close()
|
|
|
|
|
}
|
2017-01-03 10:15:55 -07:00
|
|
|
|
|
|
|
|
// ReadMemStatsSlow returns both the runtime-computed MemStats and
|
|
|
|
|
// MemStats accumulated by scanning the heap.
|
|
|
|
|
func ReadMemStatsSlow() (base, slow MemStats) {
|
|
|
|
|
stopTheWorld("ReadMemStatsSlow")
|
|
|
|
|
|
|
|
|
|
// Run on the system stack to avoid stack growth allocation.
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
// Make sure stats don't change.
|
|
|
|
|
getg().m.mallocing++
|
|
|
|
|
|
|
|
|
|
readmemstats_m(&base)
|
|
|
|
|
|
|
|
|
|
// Initialize slow from base and zero the fields we're
|
|
|
|
|
// recomputing.
|
|
|
|
|
slow = base
|
|
|
|
|
slow.Alloc = 0
|
|
|
|
|
slow.TotalAlloc = 0
|
|
|
|
|
slow.Mallocs = 0
|
|
|
|
|
slow.Frees = 0
|
|
|
|
|
var bySize [_NumSizeClasses]struct {
|
|
|
|
|
Mallocs, Frees uint64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add up current allocations in spans.
|
|
|
|
|
for _, s := range mheap_.allspans {
|
|
|
|
|
if s.state != mSpanInUse {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-02-09 17:53:07 -05:00
|
|
|
if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
|
2017-01-03 10:15:55 -07:00
|
|
|
slow.Mallocs++
|
|
|
|
|
slow.Alloc += uint64(s.elemsize)
|
|
|
|
|
} else {
|
|
|
|
|
slow.Mallocs += uint64(s.allocCount)
|
|
|
|
|
slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
|
2016-02-09 17:53:07 -05:00
|
|
|
bySize[sizeclass].Mallocs += uint64(s.allocCount)
|
2017-01-03 10:15:55 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add in frees. readmemstats_m flushed the cached stats, so
|
|
|
|
|
// these are up-to-date.
|
|
|
|
|
var smallFree uint64
|
|
|
|
|
slow.Frees = mheap_.nlargefree
|
|
|
|
|
for i := range mheap_.nsmallfree {
|
|
|
|
|
slow.Frees += mheap_.nsmallfree[i]
|
|
|
|
|
bySize[i].Frees = mheap_.nsmallfree[i]
|
|
|
|
|
bySize[i].Mallocs += mheap_.nsmallfree[i]
|
|
|
|
|
smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
|
|
|
|
|
}
|
|
|
|
|
slow.Frees += memstats.tinyallocs
|
|
|
|
|
slow.Mallocs += slow.Frees
|
|
|
|
|
|
|
|
|
|
slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
|
|
|
|
|
|
|
|
|
|
for i := range slow.BySize {
|
|
|
|
|
slow.BySize[i].Mallocs = bySize[i].Mallocs
|
|
|
|
|
slow.BySize[i].Frees = bySize[i].Frees
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
getg().m.mallocing--
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
startTheWorld()
|
|
|
|
|
return
|
|
|
|
|
}
|
2017-03-10 10:59:39 -05:00
|
|
|
|
|
|
|
|
// BlockOnSystemStack switches to the system stack, prints "x\n" to
|
|
|
|
|
// stderr, and blocks in a stack containing
|
|
|
|
|
// "runtime.blockOnSystemStackInternal".
|
|
|
|
|
func BlockOnSystemStack() {
|
|
|
|
|
systemstack(blockOnSystemStackInternal)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func blockOnSystemStackInternal() {
|
|
|
|
|
print("x\n")
|
|
|
|
|
lock(&deadlock)
|
|
|
|
|
lock(&deadlock)
|
|
|
|
|
}
|
2017-06-15 16:42:08 -07:00
|
|
|
|
|
|
|
|
type RWMutex struct {
|
|
|
|
|
rw rwmutex
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (rw *RWMutex) RLock() {
|
|
|
|
|
rw.rw.rlock()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (rw *RWMutex) RUnlock() {
|
|
|
|
|
rw.rw.runlock()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (rw *RWMutex) Lock() {
|
|
|
|
|
rw.rw.lock()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (rw *RWMutex) Unlock() {
|
|
|
|
|
rw.rw.unlock()
|
|
|
|
|
}
|
2017-09-01 12:32:38 -07:00
|
|
|
|
2017-09-02 18:46:59 +02:00
|
|
|
func MapBucketsCount(m map[int]int) int {
|
2017-09-01 12:32:38 -07:00
|
|
|
h := *(**hmap)(unsafe.Pointer(&m))
|
|
|
|
|
return 1 << h.B
|
|
|
|
|
}
|
2017-06-14 11:46:35 -04:00
|
|
|
|
2017-09-02 18:46:59 +02:00
|
|
|
func MapBucketsPointerIsNil(m map[int]int) bool {
|
|
|
|
|
h := *(**hmap)(unsafe.Pointer(&m))
|
|
|
|
|
return h.buckets == nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-14 11:46:35 -04:00
|
|
|
func LockOSCounts() (external, internal uint32) {
|
|
|
|
|
g := getg()
|
|
|
|
|
if g.m.lockedExt+g.m.lockedInt == 0 {
|
|
|
|
|
if g.lockedm != 0 {
|
|
|
|
|
panic("lockedm on non-locked goroutine")
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if g.lockedm == 0 {
|
|
|
|
|
panic("nil lockedm on locked goroutine")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return g.m.lockedExt, g.m.lockedInt
|
|
|
|
|
}
|
2017-10-27 15:20:21 -04:00
|
|
|
|
|
|
|
|
//go:noinline
|
|
|
|
|
func TracebackSystemstack(stk []uintptr, i int) int {
|
|
|
|
|
if i == 0 {
|
|
|
|
|
pc, sp := getcallerpc(), getcallersp(unsafe.Pointer(&stk))
|
|
|
|
|
return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
|
|
|
|
|
}
|
|
|
|
|
n := 0
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
n = TracebackSystemstack(stk, i-1)
|
|
|
|
|
})
|
|
|
|
|
return n
|
|
|
|
|
}
|
runtime: use sparse mappings for the heap
This replaces the contiguous heap arena mapping with a potentially
sparse mapping that can support heap mappings anywhere in the address
space.
This has several advantages over the current approach:
* There is no longer any limit on the size of the Go heap. (Currently
it's limited to 512GB.) Hence, this fixes #10460.
* It eliminates many failures modes of heap initialization and
growing. In particular it eliminates any possibility of panicking
with an address space conflict. This can happen for many reasons and
even causes a low but steady rate of TSAN test failures because of
conflicts with the TSAN runtime. See #16936 and #11993.
* It eliminates the notion of "non-reserved" heap, which was added
because creating huge address space reservations (particularly on
64-bit) led to huge process VSIZE. This was at best confusing and at
worst conflicted badly with ulimit -v. However, the non-reserved
heap logic is complicated, can race with other mappings in non-pure
Go binaries (e.g., #18976), and requires that the entire heap be
either reserved or non-reserved. We currently maintain the latter
property, but it's quite difficult to convince yourself of that, and
hence difficult to keep correct. This logic is still present, but
will be removed in the next CL.
* It fixes problems on 32-bit where skipping over parts of the address
space leads to mapping huge (and never-to-be-used) metadata
structures. See #19831.
This also completely rewrites and significantly simplifies
mheap.sysAlloc, which has been a source of many bugs. E.g., #21044,
#20259, #18651, and #13143 (and maybe #23222).
This change also makes it possible to allocate individual objects
larger than 512GB. As a result, a few tests that expected huge
allocations to fail needed to be changed to make even larger
allocations. However, at the moment attempting to allocate a humongous
object may cause the program to freeze for several minutes on Linux as
we fall back to probing every page with addrspace_free. That logic
(and this failure mode) will be removed in the next CL.
Fixes #10460.
Fixes #22204 (since it rewrites the code involved).
This slightly slows down compilebench and the x/benchmarks garbage
benchmark.
name old time/op new time/op delta
Template 184ms ± 1% 185ms ± 1% ~ (p=0.065 n=10+9)
Unicode 86.9ms ± 3% 86.3ms ± 1% ~ (p=0.631 n=10+10)
GoTypes 599ms ± 0% 602ms ± 0% +0.56% (p=0.000 n=10+9)
Compiler 2.87s ± 1% 2.89s ± 1% +0.51% (p=0.002 n=9+10)
SSA 7.29s ± 1% 7.25s ± 1% ~ (p=0.182 n=10+9)
Flate 118ms ± 2% 118ms ± 1% ~ (p=0.113 n=9+9)
GoParser 147ms ± 1% 148ms ± 1% +1.07% (p=0.003 n=9+10)
Reflect 401ms ± 1% 404ms ± 1% +0.71% (p=0.003 n=10+9)
Tar 175ms ± 1% 175ms ± 1% ~ (p=0.604 n=9+10)
XML 209ms ± 1% 210ms ± 1% ~ (p=0.052 n=10+10)
(https://perf.golang.org/search?q=upload:20171231.4)
name old time/op new time/op delta
Garbage/benchmem-MB=64-12 2.23ms ± 1% 2.25ms ± 1% +0.84% (p=0.000 n=19+19)
(https://perf.golang.org/search?q=upload:20171231.3)
Relative to the start of the sparse heap changes (starting at and
including "runtime: fix various contiguous bitmap assumptions"),
overall slowdown is roughly 1% on GC-intensive benchmarks:
name old time/op new time/op delta
Template 183ms ± 1% 185ms ± 1% +1.32% (p=0.000 n=9+9)
Unicode 84.9ms ± 2% 86.3ms ± 1% +1.65% (p=0.000 n=9+10)
GoTypes 595ms ± 1% 602ms ± 0% +1.19% (p=0.000 n=9+9)
Compiler 2.86s ± 0% 2.89s ± 1% +0.91% (p=0.000 n=9+10)
SSA 7.19s ± 0% 7.25s ± 1% +0.75% (p=0.000 n=8+9)
Flate 117ms ± 1% 118ms ± 1% +1.10% (p=0.000 n=10+9)
GoParser 146ms ± 2% 148ms ± 1% +1.48% (p=0.002 n=10+10)
Reflect 398ms ± 1% 404ms ± 1% +1.51% (p=0.000 n=10+9)
Tar 173ms ± 1% 175ms ± 1% +1.17% (p=0.000 n=10+10)
XML 208ms ± 1% 210ms ± 1% +0.62% (p=0.011 n=10+10)
[Geo mean] 369ms 373ms +1.17%
(https://perf.golang.org/search?q=upload:20180101.2)
name old time/op new time/op delta
Garbage/benchmem-MB=64-12 2.22ms ± 1% 2.25ms ± 1% +1.51% (p=0.000 n=20+19)
(https://perf.golang.org/search?q=upload:20180101.3)
Change-Id: I5daf4cfec24b252e5a57001f0a6c03f22479d0f0
Reviewed-on: https://go-review.googlesource.com/85887
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2017-12-19 22:05:23 -08:00
|
|
|
|
|
|
|
|
func KeepNArenaHints(n int) {
|
|
|
|
|
hint := mheap_.arenaHints
|
|
|
|
|
for i := 1; i < n; i++ {
|
|
|
|
|
hint = hint.next
|
|
|
|
|
if hint == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
hint.next = nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// MapNextArenaHint reserves a page at the next arena growth hint,
|
|
|
|
|
// preventing the arena from growing there, and returns the range of
|
|
|
|
|
// addresses that are no longer viable.
|
|
|
|
|
func MapNextArenaHint() (start, end uintptr) {
|
|
|
|
|
hint := mheap_.arenaHints
|
|
|
|
|
addr := hint.addr
|
|
|
|
|
if hint.down {
|
|
|
|
|
start, end = addr-heapArenaBytes, addr
|
|
|
|
|
addr -= physPageSize
|
|
|
|
|
} else {
|
|
|
|
|
start, end = addr, addr+heapArenaBytes
|
|
|
|
|
}
|
|
|
|
|
var reserved bool
|
|
|
|
|
sysReserve(unsafe.Pointer(addr), physPageSize, &reserved)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func GetNextArenaHint() uintptr {
|
|
|
|
|
return mheap_.arenaHints.addr
|
|
|
|
|
}
|