[dev.garbage] Merge remote-tracking branch 'origin/master' into HEAD

Change-Id: I282fd9ce9db435dfd35e882a9502ab1abc185297
This commit is contained in:
Rick Hudson 2016-04-27 18:19:16 -04:00
commit 23aeb34df1
764 changed files with 48184 additions and 19520 deletions

View file

@ -87,9 +87,6 @@ import (
const (
debugMalloc = false
flagNoScan = _FlagNoScan
flagNoZero = _FlagNoZero
maxTinySize = _TinySize
tinySizeClass = _TinySizeClass
maxSmallSize = _MaxSmallSize
@ -490,12 +487,6 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
// base address for all 0-byte allocations
var zerobase uintptr
const (
// flags to malloc
_FlagNoScan = 1 << 0 // GC doesn't have to scan object
_FlagNoZero = 1 << 1 // don't zero memory
)
// nextFreeFast returns the next free object if one is quickly available.
// Otherwise it returns 0.
func (c *mcache) nextFreeFast(sizeclass int8) gclinkptr {
@ -564,7 +555,7 @@ func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
@ -573,10 +564,6 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
return unsafe.Pointer(&zerobase)
}
if flags&flagNoScan == 0 && typ == nil {
throw("malloc missing type")
}
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
@ -620,14 +607,15 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
dataSize := size
c := gomcache()
var x unsafe.Pointer
noscan := typ == nil || typ.kind&kindNoPointers != 0
if size <= maxSmallSize {
if flags&flagNoScan != 0 && size < maxTinySize {
if noscan && size < maxTinySize {
// Tiny allocator.
//
// Tiny allocator combines several tiny allocation requests
// into a single memory block. The resulting memory block
// is freed when all subobjects are unreachable. The subobjects
// must be FlagNoScan (don't have pointers), this ensures that
// must be noscan (don't have pointers), this ensures that
// the amount of potentially wasted memory is bounded.
//
// Size of the memory block used for combining (maxTinySize) is tunable.
@ -699,7 +687,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
v, shouldhelpgc = c.nextFree(sizeclass)
}
x = unsafe.Pointer(v)
if flags&flagNoZero == 0 {
if needzero {
memclr(unsafe.Pointer(v), size)
// TODO:(rlh) Only clear if object is not known to be zeroed.
}
@ -708,14 +696,15 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
var s *mspan
shouldhelpgc = true
systemstack(func() {
s = largeAlloc(size, flags)
s = largeAlloc(size, needzero)
})
s.freeindex = 1
x = unsafe.Pointer(s.base())
size = s.elemsize
}
if flags&flagNoScan != 0 {
var scanSize uintptr
if noscan {
heapBitsSetTypeNoScan(uintptr(x), size)
} else {
// If allocating a defer+arg block, now that we've picked a malloc size
@ -733,11 +722,12 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// pointers, GC has to scan to the last
// element.
if typ.ptrdata != 0 {
c.local_scan += dataSize - typ.size + typ.ptrdata
scanSize = dataSize - typ.size + typ.ptrdata
}
} else {
c.local_scan += typ.ptrdata
scanSize = typ.ptrdata
}
c.local_scan += scanSize
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
@ -748,14 +738,12 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
publicationBarrier()
}
// GCmarkterminate allocates black
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase == _GCmarktermination || gcBlackenPromptly {
systemstack(func() {
gcmarknewobject_m(uintptr(x), size)
})
if gcphase != _GCoff {
gcmarknewobject(uintptr(x), size, scanSize)
}
// The object x is about to be reused but tracefree and msanfree
@ -813,7 +801,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
return x
}
func largeAlloc(size uintptr, flag uint32) *mspan {
func largeAlloc(size uintptr, needzero bool) *mspan {
// print("largeAlloc size=", size, "\n")
if size+_PageSize < size {
@ -829,7 +817,7 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
// pays the debt down to npage pages.
deductSweepCredit(npages*_PageSize, npages)
s := mheap_.alloc(npages, 0, true, flag&_FlagNoZero == 0)
s := mheap_.alloc(npages, 0, true, needzero)
if s == nil {
throw("out of memory")
}
@ -840,11 +828,7 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
// implementation of new builtin
func newobject(typ *_type) unsafe.Pointer {
flags := uint32(0)
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
return mallocgc(typ.size, typ, flags)
return mallocgc(typ.size, typ, true)
}
//go:linkname reflect_unsafe_New reflect.unsafe_New
@ -852,29 +836,19 @@ func reflect_unsafe_New(typ *_type) unsafe.Pointer {
return newobject(typ)
}
// implementation of make builtin for slices
func newarray(typ *_type, n uintptr) unsafe.Pointer {
flags := uint32(0)
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
// newarray allocates an array of n elements of type typ.
func newarray(typ *_type, n int) unsafe.Pointer {
if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
panic(plainError("runtime: allocation size out of range"))
}
if int(n) < 0 || (typ.size > 0 && n > _MaxMem/typ.size) {
panic("runtime: allocation size out of range")
}
return mallocgc(typ.size*n, typ, flags)
return mallocgc(typ.size*uintptr(n), typ, true)
}
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer {
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
return newarray(typ, n)
}
// rawmem returns a chunk of pointerless memory. It is
// not zeroed.
func rawmem(size uintptr) unsafe.Pointer {
return mallocgc(size, nil, flagNoScan|flagNoZero)
}
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
mp.mcache.next_sample = nextSample()
mProf_Malloc(x, size)