mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: remove mcache field from m
Having an mcache field in both m and p is confusing, so remove it from m. Always use mcache field from p. Use new variable mcache0 during bootstrap. Change-Id: If2cba9f8bb131d911d512b61fd883a86cf62cc98 Reviewed-on: https://go-review.googlesource.com/c/go/+/205239 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
7802b55176
commit
3093959ee1
7 changed files with 54 additions and 43 deletions
|
|
@ -468,8 +468,7 @@ func mallocinit() {
|
||||||
|
|
||||||
// Initialize the heap.
|
// Initialize the heap.
|
||||||
mheap_.init()
|
mheap_.init()
|
||||||
_g_ := getg()
|
mcache0 = allocmcache()
|
||||||
_g_.m.mcache = allocmcache()
|
|
||||||
|
|
||||||
// Create initial arena growth hints.
|
// Create initial arena growth hints.
|
||||||
if sys.PtrSize == 8 {
|
if sys.PtrSize == 8 {
|
||||||
|
|
@ -953,7 +952,19 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||||
|
|
||||||
shouldhelpgc := false
|
shouldhelpgc := false
|
||||||
dataSize := size
|
dataSize := size
|
||||||
c := gomcache()
|
var c *mcache
|
||||||
|
if mp.p != 0 {
|
||||||
|
c = mp.p.ptr().mcache
|
||||||
|
} else {
|
||||||
|
// We will be called without a P while bootstrapping,
|
||||||
|
// in which case we use mcache0, which is set in mallocinit.
|
||||||
|
// mcache0 is cleared when bootstrapping is complete,
|
||||||
|
// by procresize.
|
||||||
|
c = mcache0
|
||||||
|
if c == nil {
|
||||||
|
throw("malloc called with no P")
|
||||||
|
}
|
||||||
|
}
|
||||||
var x unsafe.Pointer
|
var x unsafe.Pointer
|
||||||
noscan := typ == nil || typ.ptrdata == 0
|
noscan := typ == nil || typ.ptrdata == 0
|
||||||
if size <= maxSmallSize {
|
if size <= maxSmallSize {
|
||||||
|
|
@ -1193,7 +1204,7 @@ func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
|
||||||
}
|
}
|
||||||
|
|
||||||
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
|
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
|
||||||
mp.mcache.next_sample = nextSample()
|
mp.p.ptr().mcache.next_sample = nextSample()
|
||||||
mProf_Malloc(x, size)
|
mProf_Malloc(x, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -226,7 +226,7 @@ func (s *mspan) sweep(preserve bool) bool {
|
||||||
size := s.elemsize
|
size := s.elemsize
|
||||||
res := false
|
res := false
|
||||||
|
|
||||||
c := _g_.m.mcache
|
c := _g_.m.p.ptr().mcache
|
||||||
freeToHeap := false
|
freeToHeap := false
|
||||||
|
|
||||||
// The allocBits indicate which unmarked objects don't need to be
|
// The allocBits indicate which unmarked objects don't need to be
|
||||||
|
|
|
||||||
|
|
@ -1141,10 +1141,21 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS
|
||||||
// which may only be done with the heap locked.
|
// which may only be done with the heap locked.
|
||||||
|
|
||||||
// Transfer stats from mcache to global.
|
// Transfer stats from mcache to global.
|
||||||
memstats.heap_scan += uint64(gp.m.mcache.local_scan)
|
var c *mcache
|
||||||
gp.m.mcache.local_scan = 0
|
if gp.m.p != 0 {
|
||||||
memstats.tinyallocs += uint64(gp.m.mcache.local_tinyallocs)
|
c = gp.m.p.ptr().mcache
|
||||||
gp.m.mcache.local_tinyallocs = 0
|
} else {
|
||||||
|
// This case occurs while bootstrapping.
|
||||||
|
// See the similar code in mallocgc.
|
||||||
|
c = mcache0
|
||||||
|
if c == nil {
|
||||||
|
throw("mheap.allocSpan called with no P")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
memstats.heap_scan += uint64(c.local_scan)
|
||||||
|
c.local_scan = 0
|
||||||
|
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||||
|
c.local_tinyallocs = 0
|
||||||
|
|
||||||
// Do some additional accounting if it's a large allocation.
|
// Do some additional accounting if it's a large allocation.
|
||||||
if spanclass.sizeclass() == 0 {
|
if spanclass.sizeclass() == 0 {
|
||||||
|
|
@ -1342,12 +1353,12 @@ func (h *mheap) grow(npage uintptr) bool {
|
||||||
// Free the span back into the heap.
|
// Free the span back into the heap.
|
||||||
func (h *mheap) freeSpan(s *mspan) {
|
func (h *mheap) freeSpan(s *mspan) {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
mp := getg().m
|
c := getg().m.p.ptr().mcache
|
||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
memstats.heap_scan += uint64(mp.mcache.local_scan)
|
memstats.heap_scan += uint64(c.local_scan)
|
||||||
mp.mcache.local_scan = 0
|
c.local_scan = 0
|
||||||
memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
|
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||||
mp.mcache.local_tinyallocs = 0
|
c.local_tinyallocs = 0
|
||||||
if msanenabled {
|
if msanenabled {
|
||||||
// Tell msan that this entire span is no longer in use.
|
// Tell msan that this entire span is no longer in use.
|
||||||
base := unsafe.Pointer(s.base())
|
base := unsafe.Pointer(s.base())
|
||||||
|
|
|
||||||
|
|
@ -82,6 +82,7 @@ var modinfo string
|
||||||
var (
|
var (
|
||||||
m0 m
|
m0 m
|
||||||
g0 g
|
g0 g
|
||||||
|
mcache0 *mcache
|
||||||
raceprocctx0 uintptr
|
raceprocctx0 uintptr
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -2957,7 +2958,6 @@ func reentersyscall(pc, sp uintptr) {
|
||||||
|
|
||||||
_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
|
_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
|
||||||
_g_.sysblocktraced = true
|
_g_.sysblocktraced = true
|
||||||
_g_.m.mcache = nil
|
|
||||||
pp := _g_.m.p.ptr()
|
pp := _g_.m.p.ptr()
|
||||||
pp.m = 0
|
pp.m = 0
|
||||||
_g_.m.oldp.set(pp)
|
_g_.m.oldp.set(pp)
|
||||||
|
|
@ -3083,9 +3083,6 @@ func exitsyscall() {
|
||||||
oldp := _g_.m.oldp.ptr()
|
oldp := _g_.m.oldp.ptr()
|
||||||
_g_.m.oldp = 0
|
_g_.m.oldp = 0
|
||||||
if exitsyscallfast(oldp) {
|
if exitsyscallfast(oldp) {
|
||||||
if _g_.m.mcache == nil {
|
|
||||||
throw("lost mcache")
|
|
||||||
}
|
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
|
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
|
||||||
systemstack(traceGoStart)
|
systemstack(traceGoStart)
|
||||||
|
|
@ -3136,10 +3133,6 @@ func exitsyscall() {
|
||||||
// Call the scheduler.
|
// Call the scheduler.
|
||||||
mcall(exitsyscall0)
|
mcall(exitsyscall0)
|
||||||
|
|
||||||
if _g_.m.mcache == nil {
|
|
||||||
throw("lost mcache")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scheduler returned, so we're allowed to run now.
|
// Scheduler returned, so we're allowed to run now.
|
||||||
// Delete the syscallsp information that we left for
|
// Delete the syscallsp information that we left for
|
||||||
// the garbage collector during the system call.
|
// the garbage collector during the system call.
|
||||||
|
|
@ -4033,10 +4026,12 @@ func (pp *p) init(id int32) {
|
||||||
pp.wbBuf.reset()
|
pp.wbBuf.reset()
|
||||||
if pp.mcache == nil {
|
if pp.mcache == nil {
|
||||||
if id == 0 {
|
if id == 0 {
|
||||||
if getg().m.mcache == nil {
|
if mcache0 == nil {
|
||||||
throw("missing mcache?")
|
throw("missing mcache?")
|
||||||
}
|
}
|
||||||
pp.mcache = getg().m.mcache // bootstrap
|
// Use the bootstrap mcache0. Only one P will get
|
||||||
|
// mcache0: the one with ID 0.
|
||||||
|
pp.mcache = mcache0
|
||||||
} else {
|
} else {
|
||||||
pp.mcache = allocmcache()
|
pp.mcache = allocmcache()
|
||||||
}
|
}
|
||||||
|
|
@ -4216,7 +4211,6 @@ func procresize(nprocs int32) *p {
|
||||||
_g_.m.p.ptr().m = 0
|
_g_.m.p.ptr().m = 0
|
||||||
}
|
}
|
||||||
_g_.m.p = 0
|
_g_.m.p = 0
|
||||||
_g_.m.mcache = nil
|
|
||||||
p := allp[0]
|
p := allp[0]
|
||||||
p.m = 0
|
p.m = 0
|
||||||
p.status = _Pidle
|
p.status = _Pidle
|
||||||
|
|
@ -4226,6 +4220,9 @@ func procresize(nprocs int32) *p {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// g.m.p is now set, so we no longer need mcache0 for bootstrapping.
|
||||||
|
mcache0 = nil
|
||||||
|
|
||||||
// release resources from unused P's
|
// release resources from unused P's
|
||||||
for i := nprocs; i < old; i++ {
|
for i := nprocs; i < old; i++ {
|
||||||
p := allp[i]
|
p := allp[i]
|
||||||
|
|
@ -4291,7 +4288,7 @@ func acquirep(_p_ *p) {
|
||||||
func wirep(_p_ *p) {
|
func wirep(_p_ *p) {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.p != 0 || _g_.m.mcache != nil {
|
if _g_.m.p != 0 {
|
||||||
throw("wirep: already in go")
|
throw("wirep: already in go")
|
||||||
}
|
}
|
||||||
if _p_.m != 0 || _p_.status != _Pidle {
|
if _p_.m != 0 || _p_.status != _Pidle {
|
||||||
|
|
@ -4302,7 +4299,6 @@ func wirep(_p_ *p) {
|
||||||
print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
|
print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
|
||||||
throw("wirep: invalid p state")
|
throw("wirep: invalid p state")
|
||||||
}
|
}
|
||||||
_g_.m.mcache = _p_.mcache
|
|
||||||
_g_.m.p.set(_p_)
|
_g_.m.p.set(_p_)
|
||||||
_p_.m.set(_g_.m)
|
_p_.m.set(_g_.m)
|
||||||
_p_.status = _Prunning
|
_p_.status = _Prunning
|
||||||
|
|
@ -4312,19 +4308,18 @@ func wirep(_p_ *p) {
|
||||||
func releasep() *p {
|
func releasep() *p {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.p == 0 || _g_.m.mcache == nil {
|
if _g_.m.p == 0 {
|
||||||
throw("releasep: invalid arg")
|
throw("releasep: invalid arg")
|
||||||
}
|
}
|
||||||
_p_ := _g_.m.p.ptr()
|
_p_ := _g_.m.p.ptr()
|
||||||
if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
|
if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
|
||||||
print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
|
print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
|
||||||
throw("releasep: invalid p state")
|
throw("releasep: invalid p state")
|
||||||
}
|
}
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceProcStop(_g_.m.p.ptr())
|
traceProcStop(_g_.m.p.ptr())
|
||||||
}
|
}
|
||||||
_g_.m.p = 0
|
_g_.m.p = 0
|
||||||
_g_.m.mcache = nil
|
|
||||||
_p_.m = 0
|
_p_.m = 0
|
||||||
_p_.status = _Pidle
|
_p_.status = _Pidle
|
||||||
return _p_
|
return _p_
|
||||||
|
|
|
||||||
|
|
@ -459,11 +459,6 @@ func releasem(mp *m) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
func gomcache() *mcache {
|
|
||||||
return getg().m.mcache
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:linkname reflect_typelinks reflect.typelinks
|
//go:linkname reflect_typelinks reflect.typelinks
|
||||||
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
|
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
|
||||||
modules := activeModules()
|
modules := activeModules()
|
||||||
|
|
|
||||||
|
|
@ -510,7 +510,6 @@ type m struct {
|
||||||
park note
|
park note
|
||||||
alllink *m // on allm
|
alllink *m // on allm
|
||||||
schedlink muintptr
|
schedlink muintptr
|
||||||
mcache *mcache
|
|
||||||
lockedg guintptr
|
lockedg guintptr
|
||||||
createstack [32]uintptr // stack that created this thread.
|
createstack [32]uintptr // stack that created this thread.
|
||||||
lockedExt uint32 // tracking for external LockOSThread
|
lockedExt uint32 // tracking for external LockOSThread
|
||||||
|
|
|
||||||
|
|
@ -357,16 +357,16 @@ func stackalloc(n uint32) stack {
|
||||||
n2 >>= 1
|
n2 >>= 1
|
||||||
}
|
}
|
||||||
var x gclinkptr
|
var x gclinkptr
|
||||||
c := thisg.m.mcache
|
if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
|
||||||
if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" {
|
// thisg.m.p == 0 can happen in the guts of exitsyscall
|
||||||
// c == nil can happen in the guts of exitsyscall or
|
// or procresize. Just get a stack from the global pool.
|
||||||
// procresize. Just get a stack from the global pool.
|
|
||||||
// Also don't touch stackcache during gc
|
// Also don't touch stackcache during gc
|
||||||
// as it's flushed concurrently.
|
// as it's flushed concurrently.
|
||||||
lock(&stackpool[order].item.mu)
|
lock(&stackpool[order].item.mu)
|
||||||
x = stackpoolalloc(order)
|
x = stackpoolalloc(order)
|
||||||
unlock(&stackpool[order].item.mu)
|
unlock(&stackpool[order].item.mu)
|
||||||
} else {
|
} else {
|
||||||
|
c := thisg.m.p.ptr().mcache
|
||||||
x = c.stackcache[order].list
|
x = c.stackcache[order].list
|
||||||
if x.ptr() == nil {
|
if x.ptr() == nil {
|
||||||
stackcacherefill(c, order)
|
stackcacherefill(c, order)
|
||||||
|
|
@ -452,12 +452,12 @@ func stackfree(stk stack) {
|
||||||
n2 >>= 1
|
n2 >>= 1
|
||||||
}
|
}
|
||||||
x := gclinkptr(v)
|
x := gclinkptr(v)
|
||||||
c := gp.m.mcache
|
if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
|
||||||
if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" {
|
|
||||||
lock(&stackpool[order].item.mu)
|
lock(&stackpool[order].item.mu)
|
||||||
stackpoolfree(x, order)
|
stackpoolfree(x, order)
|
||||||
unlock(&stackpool[order].item.mu)
|
unlock(&stackpool[order].item.mu)
|
||||||
} else {
|
} else {
|
||||||
|
c := gp.m.p.ptr().mcache
|
||||||
if c.stackcache[order].size >= _StackCacheSize {
|
if c.stackcache[order].size >= _StackCacheSize {
|
||||||
stackcacherelease(c, order)
|
stackcacherelease(c, order)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue