2008-12-18 15:42:28 -08:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
// See malloc.h for overview.
|
|
|
|
|
//
|
|
|
|
|
// TODO(rsc): double-check stats.
|
|
|
|
|
|
2010-02-03 16:31:34 -08:00
|
|
|
package runtime
|
2008-12-18 15:42:28 -08:00
|
|
|
#include "runtime.h"
|
2011-12-16 15:33:58 -05:00
|
|
|
#include "arch_GOARCH.h"
|
2008-12-18 15:42:28 -08:00
|
|
|
#include "malloc.h"
|
2010-02-03 16:31:34 -08:00
|
|
|
#include "type.h"
|
2012-06-06 17:20:02 -04:00
|
|
|
#include "typekind.h"
|
2012-10-07 22:05:32 +04:00
|
|
|
#include "race.h"
|
2013-07-17 12:52:37 -04:00
|
|
|
#include "stack.h"
|
2013-08-12 13:47:18 -07:00
|
|
|
#include "../../cmd/ld/textflag.h"
|
2008-12-18 15:42:28 -08:00
|
|
|
|
2013-07-19 17:47:40 +04:00
|
|
|
// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
|
2013-08-29 12:36:59 -07:00
|
|
|
#pragma dataflag NOPTR
|
2013-05-28 22:14:47 +04:00
|
|
|
MHeap runtime·mheap;
|
2014-01-30 13:28:19 +04:00
|
|
|
MStats mstats;
|
2012-02-21 22:08:42 -05:00
|
|
|
|
2013-01-26 09:57:06 +08:00
|
|
|
int32 runtime·checking;
|
|
|
|
|
|
2012-10-22 01:08:13 +08:00
|
|
|
extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
|
2008-12-18 15:42:28 -08:00
|
|
|
|
2012-09-24 14:58:34 -04:00
|
|
|
extern volatile intgo runtime·MemProfileRate;
|
2010-03-24 09:40:09 -07:00
|
|
|
|
2014-02-26 15:52:58 -08:00
|
|
|
static MSpan* largealloc(uint32, uintptr*);
|
2014-02-12 22:36:45 +04:00
|
|
|
static void profilealloc(void *v, uintptr size, uintptr typ);
|
2014-02-26 15:52:58 -08:00
|
|
|
static void settype(MSpan *s, void *v, uintptr typ);
|
2014-01-24 22:35:11 +04:00
|
|
|
|
2008-12-18 15:42:28 -08:00
|
|
|
// Allocate an object of at least size bytes.
|
|
|
|
|
// Small objects are allocated from the per-thread cache's free lists.
|
|
|
|
|
// Large objects (> 32 kB) are allocated straight from the heap.
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
// If the block will be freed with runtime·free(), typ must be 0.
|
2008-12-18 15:42:28 -08:00
|
|
|
void*
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
|
2008-12-18 15:42:28 -08:00
|
|
|
{
|
2012-09-24 14:58:34 -04:00
|
|
|
int32 sizeclass;
|
2014-01-24 22:35:11 +04:00
|
|
|
uintptr tinysize, size1;
|
2012-09-24 14:58:34 -04:00
|
|
|
intgo rate;
|
2008-12-18 15:42:28 -08:00
|
|
|
MCache *c;
|
2014-02-26 15:52:58 -08:00
|
|
|
MSpan *s;
|
2014-01-28 22:38:39 +04:00
|
|
|
MLink *v, *next;
|
2014-01-24 22:35:11 +04:00
|
|
|
byte *tiny;
|
2008-12-18 15:42:28 -08:00
|
|
|
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
if(size == 0) {
|
|
|
|
|
// All 0-length allocations use this pointer.
|
|
|
|
|
// The language does not require the allocations to
|
|
|
|
|
// have distinct values.
|
|
|
|
|
return &runtime·zerobase;
|
|
|
|
|
}
|
2008-12-19 03:13:39 -08:00
|
|
|
if(m->mallocing)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·throw("malloc/free - deadlock");
|
2014-02-26 15:52:58 -08:00
|
|
|
// Disable preemption during settype.
|
|
|
|
|
// We can not use m->mallocing for this, because settype calls mallocgc.
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
m->locks++;
|
2008-12-19 03:13:39 -08:00
|
|
|
m->mallocing = 1;
|
2008-12-18 15:42:28 -08:00
|
|
|
|
2012-09-24 20:08:05 -04:00
|
|
|
if(DebugTypeAtBlockEnd)
|
|
|
|
|
size += sizeof(uintptr);
|
|
|
|
|
|
2011-07-18 14:52:57 -04:00
|
|
|
c = m->mcache;
|
2013-12-06 14:40:45 -08:00
|
|
|
if(!runtime·debug.efence && size <= MaxSmallSize) {
|
2014-01-24 22:35:11 +04:00
|
|
|
if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize) {
|
|
|
|
|
// Tiny allocator.
|
|
|
|
|
//
|
|
|
|
|
// Tiny allocator combines several tiny allocation requests
|
|
|
|
|
// into a single memory block. The resulting memory block
|
|
|
|
|
// is freed when all subobjects are unreachable. The subobjects
|
|
|
|
|
// must be FlagNoScan (don't have pointers), this ensures that
|
|
|
|
|
// the amount of potentially wasted memory is bounded.
|
|
|
|
|
//
|
|
|
|
|
// Size of the memory block used for combining (TinySize) is tunable.
|
|
|
|
|
// Current setting is 16 bytes, which relates to 2x worst case memory
|
|
|
|
|
// wastage (when all but one subobjects are unreachable).
|
|
|
|
|
// 8 bytes would result in no wastage at all, but provides less
|
|
|
|
|
// opportunities for combining.
|
|
|
|
|
// 32 bytes provides more opportunities for combining,
|
|
|
|
|
// but can lead to 4x worst case wastage.
|
|
|
|
|
// The best case winning is 8x regardless of block size.
|
|
|
|
|
//
|
|
|
|
|
// Objects obtained from tiny allocator must not be freed explicitly.
|
|
|
|
|
// So when an object will be freed explicitly, we ensure that
|
|
|
|
|
// its size >= TinySize.
|
|
|
|
|
//
|
|
|
|
|
// SetFinalizer has a special case for objects potentially coming
|
|
|
|
|
// from tiny allocator, it such case it allows to set finalizers
|
|
|
|
|
// for an inner byte of a memory block.
|
|
|
|
|
//
|
|
|
|
|
// The main targets of tiny allocator are small strings and
|
|
|
|
|
// standalone escaping variables. On a json benchmark
|
|
|
|
|
// the allocator reduces number of allocations by ~12% and
|
|
|
|
|
// reduces heap size by ~20%.
|
|
|
|
|
|
2014-01-28 00:26:56 +04:00
|
|
|
tinysize = c->tinysize;
|
2014-01-24 22:35:11 +04:00
|
|
|
if(size <= tinysize) {
|
2014-01-28 00:26:56 +04:00
|
|
|
tiny = c->tiny;
|
2014-01-24 22:35:11 +04:00
|
|
|
// Align tiny pointer for required (conservative) alignment.
|
|
|
|
|
if((size&7) == 0)
|
|
|
|
|
tiny = (byte*)ROUND((uintptr)tiny, 8);
|
|
|
|
|
else if((size&3) == 0)
|
|
|
|
|
tiny = (byte*)ROUND((uintptr)tiny, 4);
|
|
|
|
|
else if((size&1) == 0)
|
|
|
|
|
tiny = (byte*)ROUND((uintptr)tiny, 2);
|
2014-01-28 00:26:56 +04:00
|
|
|
size1 = size + (tiny - c->tiny);
|
2014-01-24 22:35:11 +04:00
|
|
|
if(size1 <= tinysize) {
|
|
|
|
|
// The object fits into existing tiny block.
|
|
|
|
|
v = (MLink*)tiny;
|
2014-01-28 00:26:56 +04:00
|
|
|
c->tiny += size1;
|
|
|
|
|
c->tinysize -= size1;
|
2014-01-24 22:35:11 +04:00
|
|
|
m->mallocing = 0;
|
|
|
|
|
m->locks--;
|
|
|
|
|
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
|
|
|
|
g->stackguard0 = StackPreempt;
|
|
|
|
|
return v;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Allocate a new TinySize block.
|
2014-02-26 15:52:58 -08:00
|
|
|
s = c->alloc[TinySizeClass];
|
|
|
|
|
if(s->freelist == nil)
|
|
|
|
|
s = runtime·MCache_Refill(c, TinySizeClass);
|
|
|
|
|
v = s->freelist;
|
2014-01-28 22:38:39 +04:00
|
|
|
next = v->next;
|
2014-02-26 15:52:58 -08:00
|
|
|
s->freelist = next;
|
|
|
|
|
s->ref++;
|
2014-01-28 22:38:39 +04:00
|
|
|
if(next != nil) // prefetching nil leads to a DTLB miss
|
|
|
|
|
PREFETCH(next);
|
2014-01-24 22:35:11 +04:00
|
|
|
((uint64*)v)[0] = 0;
|
|
|
|
|
((uint64*)v)[1] = 0;
|
|
|
|
|
// See if we need to replace the existing tiny block with the new one
|
|
|
|
|
// based on amount of remaining free space.
|
|
|
|
|
if(TinySize-size > tinysize) {
|
2014-01-28 00:26:56 +04:00
|
|
|
c->tiny = (byte*)v + size;
|
|
|
|
|
c->tinysize = TinySize - size;
|
2014-01-24 22:35:11 +04:00
|
|
|
}
|
|
|
|
|
size = TinySize;
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
2008-12-18 15:42:28 -08:00
|
|
|
// Allocate from mcache free lists.
|
2013-05-15 11:02:33 +04:00
|
|
|
// Inlined version of SizeToClass().
|
|
|
|
|
if(size <= 1024-8)
|
|
|
|
|
sizeclass = runtime·size_to_class8[(size+7)>>3];
|
|
|
|
|
else
|
|
|
|
|
sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
size = runtime·class_to_size[sizeclass];
|
2014-02-26 15:52:58 -08:00
|
|
|
s = c->alloc[sizeclass];
|
|
|
|
|
if(s->freelist == nil)
|
|
|
|
|
s = runtime·MCache_Refill(c, sizeclass);
|
|
|
|
|
v = s->freelist;
|
2014-01-28 22:38:39 +04:00
|
|
|
next = v->next;
|
2014-02-26 15:52:58 -08:00
|
|
|
s->freelist = next;
|
|
|
|
|
s->ref++;
|
2014-01-28 22:38:39 +04:00
|
|
|
if(next != nil) // prefetching nil leads to a DTLB miss
|
|
|
|
|
PREFETCH(next);
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
if(!(flag & FlagNoZero)) {
|
2013-05-28 11:05:55 +04:00
|
|
|
v->next = nil;
|
|
|
|
|
// block is zeroed iff second word is zero ...
|
2014-01-21 10:53:51 +04:00
|
|
|
if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0)
|
2013-05-28 11:05:55 +04:00
|
|
|
runtime·memclr((byte*)v, size);
|
|
|
|
|
}
|
2014-01-24 22:35:11 +04:00
|
|
|
done:
|
2013-05-28 11:05:55 +04:00
|
|
|
c->local_cachealloc += size;
|
2008-12-19 03:13:39 -08:00
|
|
|
} else {
|
|
|
|
|
// Allocate directly from heap.
|
2014-02-26 15:52:58 -08:00
|
|
|
s = largealloc(flag, &size);
|
|
|
|
|
v = (void*)(s->start << PageShift);
|
2009-06-04 21:09:06 -07:00
|
|
|
}
|
2012-06-08 17:35:14 -04:00
|
|
|
|
2013-12-18 17:13:59 -08:00
|
|
|
if(flag & FlagNoGC)
|
|
|
|
|
runtime·marknogc(v);
|
|
|
|
|
else if(!(flag & FlagNoScan))
|
|
|
|
|
runtime·markscan(v);
|
2009-01-26 17:37:05 -08:00
|
|
|
|
2012-09-24 20:08:05 -04:00
|
|
|
if(DebugTypeAtBlockEnd)
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
*(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
|
|
|
|
|
|
2014-02-26 15:52:58 -08:00
|
|
|
m->mallocing = 0;
|
2013-08-23 17:28:47 -07:00
|
|
|
// TODO: save type even if FlagNoScan? Potentially expensive but might help
|
|
|
|
|
// heap profiling/tracing.
|
2014-02-26 15:52:58 -08:00
|
|
|
if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
|
|
|
|
|
settype(s, v, typ);
|
2012-09-24 20:08:05 -04:00
|
|
|
|
2014-01-28 22:34:32 +04:00
|
|
|
if(raceenabled)
|
|
|
|
|
runtime·racemalloc(v, size);
|
2009-12-03 17:22:23 -08:00
|
|
|
|
2013-12-03 14:42:38 -08:00
|
|
|
if(runtime·debug.allocfreetrace)
|
|
|
|
|
goto profile;
|
|
|
|
|
|
2011-02-02 23:03:47 -05:00
|
|
|
if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
|
2014-02-12 22:36:45 +04:00
|
|
|
if(size < rate && size < c->next_sample)
|
|
|
|
|
c->next_sample -= size;
|
2010-03-24 09:40:09 -07:00
|
|
|
else {
|
|
|
|
|
profile:
|
2014-02-12 22:36:45 +04:00
|
|
|
profilealloc(v, size, typ);
|
2010-03-23 20:48:23 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-12 22:36:45 +04:00
|
|
|
m->locks--;
|
|
|
|
|
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
|
|
|
|
g->stackguard0 = StackPreempt;
|
|
|
|
|
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·gc(0);
|
2012-10-07 22:05:32 +04:00
|
|
|
|
2008-12-19 03:13:39 -08:00
|
|
|
return v;
|
2008-12-18 15:42:28 -08:00
|
|
|
}
|
|
|
|
|
|
2014-02-26 15:52:58 -08:00
|
|
|
static MSpan*
|
2014-01-24 22:35:11 +04:00
|
|
|
largealloc(uint32 flag, uintptr *sizep)
|
|
|
|
|
{
|
|
|
|
|
uintptr npages, size;
|
|
|
|
|
MSpan *s;
|
|
|
|
|
void *v;
|
|
|
|
|
|
|
|
|
|
// Allocate directly from heap.
|
|
|
|
|
size = *sizep;
|
2014-01-27 20:29:21 +04:00
|
|
|
if(size + PageSize < size)
|
|
|
|
|
runtime·throw("out of memory");
|
2014-01-24 22:35:11 +04:00
|
|
|
npages = size >> PageShift;
|
|
|
|
|
if((size & PageMask) != 0)
|
|
|
|
|
npages++;
|
|
|
|
|
s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZero));
|
|
|
|
|
if(s == nil)
|
|
|
|
|
runtime·throw("out of memory");
|
|
|
|
|
s->limit = (byte*)(s->start<<PageShift) + size;
|
|
|
|
|
*sizep = npages<<PageShift;
|
|
|
|
|
v = (void*)(s->start << PageShift);
|
|
|
|
|
// setup for mark sweep
|
|
|
|
|
runtime·markspan(v, 0, 0, true);
|
2014-02-26 15:52:58 -08:00
|
|
|
return s;
|
2014-01-24 22:35:11 +04:00
|
|
|
}
|
|
|
|
|
|
2014-02-12 22:36:45 +04:00
|
|
|
static void
|
|
|
|
|
profilealloc(void *v, uintptr size, uintptr typ)
|
|
|
|
|
{
|
|
|
|
|
uintptr rate;
|
|
|
|
|
int32 next;
|
|
|
|
|
MCache *c;
|
|
|
|
|
|
|
|
|
|
c = m->mcache;
|
|
|
|
|
rate = runtime·MemProfileRate;
|
|
|
|
|
if(size < rate) {
|
|
|
|
|
// pick next profile time
|
|
|
|
|
// If you change this, also change allocmcache.
|
|
|
|
|
if(rate > 0x3fffffff) // make 2*rate not overflow
|
|
|
|
|
rate = 0x3fffffff;
|
|
|
|
|
next = runtime·fastrand1() % (2*rate);
|
|
|
|
|
// Subtract the "remainder" of the current allocation.
|
|
|
|
|
// Otherwise objects that are close in size to sampling rate
|
|
|
|
|
// will be under-sampled, because we consistently discard this remainder.
|
|
|
|
|
next -= (size - c->next_sample);
|
|
|
|
|
if(next < 0)
|
|
|
|
|
next = 0;
|
|
|
|
|
c->next_sample = next;
|
|
|
|
|
}
|
|
|
|
|
runtime·MProf_Malloc(v, size, typ);
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-26 17:37:05 -08:00
|
|
|
void*
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·malloc(uintptr size)
|
2009-01-26 17:37:05 -08:00
|
|
|
{
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
return runtime·mallocgc(size, 0, FlagNoInvokeGC);
|
2009-01-26 17:37:05 -08:00
|
|
|
}
|
|
|
|
|
|
2008-12-18 15:42:28 -08:00
|
|
|
// Free the object whose base pointer is v.
|
|
|
|
|
void
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·free(void *v)
|
2008-12-18 15:42:28 -08:00
|
|
|
{
|
2011-02-02 23:03:47 -05:00
|
|
|
int32 sizeclass;
|
2008-12-18 15:42:28 -08:00
|
|
|
MSpan *s;
|
|
|
|
|
MCache *c;
|
2011-02-02 23:03:47 -05:00
|
|
|
uintptr size;
|
2008-12-18 15:42:28 -08:00
|
|
|
|
2009-01-09 16:22:13 -08:00
|
|
|
if(v == nil)
|
|
|
|
|
return;
|
2011-02-02 23:03:47 -05:00
|
|
|
|
2012-01-10 12:56:25 -08:00
|
|
|
// If you change this also change mgc0.c:/^sweep,
|
2011-02-02 23:03:47 -05:00
|
|
|
// which has a copy of the guts of free.
|
2009-01-09 16:22:13 -08:00
|
|
|
|
2009-05-29 13:31:53 -07:00
|
|
|
if(m->mallocing)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·throw("malloc/free - deadlock");
|
2009-05-29 13:31:53 -07:00
|
|
|
m->mallocing = 1;
|
|
|
|
|
|
2011-02-02 23:03:47 -05:00
|
|
|
if(!runtime·mlookup(v, nil, nil, &s)) {
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·printf("free %p: not an allocated block\n", v);
|
|
|
|
|
runtime·throw("free runtime·mlookup");
|
2010-02-03 16:31:34 -08:00
|
|
|
}
|
2014-01-08 12:41:26 -08:00
|
|
|
size = s->elemsize;
|
|
|
|
|
sizeclass = s->sizeclass;
|
2014-01-24 22:35:11 +04:00
|
|
|
// Objects that are smaller than TinySize can be allocated using tiny alloc,
|
|
|
|
|
// if then such object is combined with an object with finalizer, we will crash.
|
|
|
|
|
if(size < TinySize)
|
|
|
|
|
runtime·throw("freeing too small block");
|
2009-01-26 17:37:05 -08:00
|
|
|
|
runtime: concurrent GC sweep
Moves sweep phase out of stoptheworld by adding
background sweeper goroutine and lazy on-demand sweeping.
It turned out to be somewhat trickier than I expected,
because there is no point in time when we know size of live heap
nor consistent number of mallocs and frees.
So everything related to next_gc, mprof, memstats, etc becomes trickier.
At the end of GC next_gc is conservatively set to heap_alloc*GOGC,
which is much larger than real value. But after every sweep
next_gc is decremented by freed*GOGC. So when everything is swept
next_gc becomes what it should be.
For mprof I had to introduce 3-generation scheme (allocs, revent_allocs, prev_allocs),
because by the end of GC we know number of frees for the *previous* GC.
Significant caution is required to not cross yet-unknown real value of next_gc.
This is achieved by 2 means:
1. Whenever I allocate a span from MCentral, I sweep a span in that MCentral.
2. Whenever I allocate N pages from MHeap, I sweep until at least N pages are
returned to heap.
This provides quite strong guarantees that heap does not grow when it should now.
http-1
allocated 7036 7033 -0.04%
allocs 60 60 +0.00%
cputime 51050 46700 -8.52%
gc-pause-one 34060569 1777993 -94.78%
gc-pause-total 2554 133 -94.79%
latency-50 178448 170926 -4.22%
latency-95 284350 198294 -30.26%
latency-99 345191 220652 -36.08%
rss 101564416 101007360 -0.55%
sys-gc 6606832 6541296 -0.99%
sys-heap 88801280 87752704 -1.18%
sys-other 7334208 7405928 +0.98%
sys-stack 524288 524288 +0.00%
sys-total 103266608 102224216 -1.01%
time 50339 46533 -7.56%
virtual-mem 292990976 293728256 +0.25%
garbage-1
allocated 2983818 2990889 +0.24%
allocs 62880 62902 +0.03%
cputime 16480000 16190000 -1.76%
gc-pause-one 828462467 487875135 -41.11%
gc-pause-total 4142312 2439375 -41.11%
rss 1151709184 1153712128 +0.17%
sys-gc 66068352 66068352 +0.00%
sys-heap 1039728640 1039728640 +0.00%
sys-other 37776064 40770176 +7.93%
sys-stack 8781824 8781824 +0.00%
sys-total 1152354880 1155348992 +0.26%
time 16496998 16199876 -1.80%
virtual-mem 1409564672 1402281984 -0.52%
LGTM=rsc
R=golang-codereviews, sameer, rsc, iant, jeremyjackins, gobot
CC=golang-codereviews, khr
https://golang.org/cl/46430043
2014-02-12 22:16:42 +04:00
|
|
|
// Ensure that the span is swept.
|
|
|
|
|
// If we free into an unswept span, we will corrupt GC bitmaps.
|
|
|
|
|
runtime·MSpan_EnsureSwept(s);
|
|
|
|
|
|
2014-01-08 12:41:26 -08:00
|
|
|
if(s->specials != nil)
|
|
|
|
|
runtime·freeallspecials(s, v, size);
|
|
|
|
|
|
2011-07-18 14:52:57 -04:00
|
|
|
c = m->mcache;
|
2008-12-18 15:42:28 -08:00
|
|
|
if(sizeclass == 0) {
|
2010-02-10 21:23:08 -08:00
|
|
|
// Large object.
|
2014-02-13 11:10:31 -05:00
|
|
|
s->needzero = 1;
|
2011-02-16 13:21:20 -05:00
|
|
|
// Must mark v freed before calling unmarkspan and MHeap_Free:
|
|
|
|
|
// they might coalesce v into other spans and change the bitmap further.
|
2014-02-26 15:52:58 -08:00
|
|
|
runtime·markfreed(v);
|
2011-02-02 23:03:47 -05:00
|
|
|
runtime·unmarkspan(v, 1<<PageShift);
|
2014-03-06 18:34:29 -05:00
|
|
|
// NOTE(rsc,dvyukov): The original implementation of efence
|
|
|
|
|
// in CL 22060046 used SysFree instead of SysFault, so that
|
|
|
|
|
// the operating system would eventually give the memory
|
|
|
|
|
// back to us again, so that an efence program could run
|
|
|
|
|
// longer without running out of memory. Unfortunately,
|
|
|
|
|
// calling SysFree here without any kind of adjustment of the
|
|
|
|
|
// heap data structures means that when the memory does
|
|
|
|
|
// come back to us, we have the wrong metadata for it, either in
|
|
|
|
|
// the MSpan structures or in the garbage collection bitmap.
|
|
|
|
|
// Using SysFault here means that the program will run out of
|
|
|
|
|
// memory fairly quickly in efence mode, but at least it won't
|
|
|
|
|
// have mysterious crashes due to confused memory reuse.
|
|
|
|
|
// It should be possible to switch back to SysFree if we also
|
|
|
|
|
// implement and then call some kind of MHeap_DeleteSpan.
|
2013-12-06 14:40:45 -08:00
|
|
|
if(runtime·debug.efence)
|
2014-03-06 18:34:29 -05:00
|
|
|
runtime·SysFault((void*)(s->start<<PageShift), size);
|
2013-12-06 14:40:45 -08:00
|
|
|
else
|
|
|
|
|
runtime·MHeap_Free(&runtime·mheap, s, 1);
|
2013-06-06 14:56:50 +04:00
|
|
|
c->local_nlargefree++;
|
|
|
|
|
c->local_largefree += size;
|
2010-02-10 21:23:08 -08:00
|
|
|
} else {
|
|
|
|
|
// Small object.
|
2014-01-21 10:53:51 +04:00
|
|
|
if(size > 2*sizeof(uintptr))
|
2013-04-04 14:18:52 -07:00
|
|
|
((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
|
2014-01-21 10:53:51 +04:00
|
|
|
else if(size > sizeof(uintptr))
|
|
|
|
|
((uintptr*)v)[1] = 0;
|
2011-02-16 13:21:20 -05:00
|
|
|
// Must mark v freed before calling MCache_Free:
|
|
|
|
|
// it might coalesce v and other blocks into a bigger span
|
|
|
|
|
// and change the bitmap further.
|
2013-06-06 14:56:50 +04:00
|
|
|
c->local_nsmallfree[sizeclass]++;
|
2014-03-06 21:33:00 +04:00
|
|
|
c->local_cachealloc -= size;
|
2014-02-26 15:52:58 -08:00
|
|
|
if(c->alloc[sizeclass] == s) {
|
|
|
|
|
// We own the span, so we can just add v to the freelist
|
|
|
|
|
runtime·markfreed(v);
|
|
|
|
|
((MLink*)v)->next = s->freelist;
|
|
|
|
|
s->freelist = v;
|
|
|
|
|
s->ref--;
|
|
|
|
|
} else {
|
|
|
|
|
// Someone else owns this span. Add to free queue.
|
|
|
|
|
runtime·MCache_Free(c, v, sizeclass, size);
|
|
|
|
|
}
|
2008-12-18 15:42:28 -08:00
|
|
|
}
|
2009-05-29 13:31:53 -07:00
|
|
|
m->mallocing = 0;
|
2008-12-18 15:42:28 -08:00
|
|
|
}
|
|
|
|
|
|
2009-01-26 17:37:05 -08:00
|
|
|
int32
|
2011-02-02 23:03:47 -05:00
|
|
|
runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
|
2008-12-19 03:13:39 -08:00
|
|
|
{
|
2011-02-02 23:03:47 -05:00
|
|
|
uintptr n, i;
|
2009-02-11 17:54:03 -08:00
|
|
|
byte *p;
|
2008-12-19 03:13:39 -08:00
|
|
|
MSpan *s;
|
|
|
|
|
|
2011-07-18 14:52:57 -04:00
|
|
|
m->mcache->local_nlookup++;
|
2012-06-08 17:35:14 -04:00
|
|
|
if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
|
|
|
|
|
// purge cache stats to prevent overflow
|
2013-05-28 22:14:47 +04:00
|
|
|
runtime·lock(&runtime·mheap);
|
2012-07-01 13:10:01 +04:00
|
|
|
runtime·purgecachedstats(m->mcache);
|
2013-05-28 22:14:47 +04:00
|
|
|
runtime·unlock(&runtime·mheap);
|
2012-06-08 17:35:14 -04:00
|
|
|
}
|
|
|
|
|
|
2013-05-28 22:14:47 +04:00
|
|
|
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
|
2010-02-10 21:23:08 -08:00
|
|
|
if(sp)
|
|
|
|
|
*sp = s;
|
2008-12-19 03:13:39 -08:00
|
|
|
if(s == nil) {
|
2011-02-02 23:03:47 -05:00
|
|
|
runtime·checkfreed(v, 1);
|
2009-01-26 17:37:05 -08:00
|
|
|
if(base)
|
|
|
|
|
*base = nil;
|
|
|
|
|
if(size)
|
|
|
|
|
*size = 0;
|
|
|
|
|
return 0;
|
2008-12-19 03:13:39 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
p = (byte*)((uintptr)s->start<<PageShift);
|
|
|
|
|
if(s->sizeclass == 0) {
|
|
|
|
|
// Large object.
|
2009-01-26 17:37:05 -08:00
|
|
|
if(base)
|
|
|
|
|
*base = p;
|
|
|
|
|
if(size)
|
|
|
|
|
*size = s->npages<<PageShift;
|
|
|
|
|
return 1;
|
2008-12-19 03:13:39 -08:00
|
|
|
}
|
|
|
|
|
|
2012-09-24 20:08:05 -04:00
|
|
|
n = s->elemsize;
|
2011-07-18 14:52:57 -04:00
|
|
|
if(base) {
|
|
|
|
|
i = ((byte*)v - p)/n;
|
2009-01-26 17:37:05 -08:00
|
|
|
*base = p + i*n;
|
2011-07-18 14:52:57 -04:00
|
|
|
}
|
2009-01-26 17:37:05 -08:00
|
|
|
if(size)
|
|
|
|
|
*size = n;
|
2010-02-10 00:00:12 -08:00
|
|
|
|
2009-01-26 17:37:05 -08:00
|
|
|
return 1;
|
2008-12-19 03:13:39 -08:00
|
|
|
}
|
|
|
|
|
|
2012-07-01 13:10:01 +04:00
|
|
|
void
|
|
|
|
|
runtime·purgecachedstats(MCache *c)
|
|
|
|
|
{
|
2013-06-06 14:56:50 +04:00
|
|
|
MHeap *h;
|
2013-05-22 22:22:57 +04:00
|
|
|
int32 i;
|
|
|
|
|
|
2011-07-18 14:52:57 -04:00
|
|
|
// Protected by either heap or GC lock.
|
2013-06-06 14:56:50 +04:00
|
|
|
h = &runtime·mheap;
|
2011-07-18 14:52:57 -04:00
|
|
|
mstats.heap_alloc += c->local_cachealloc;
|
|
|
|
|
c->local_cachealloc = 0;
|
|
|
|
|
mstats.nlookup += c->local_nlookup;
|
|
|
|
|
c->local_nlookup = 0;
|
2013-06-06 14:56:50 +04:00
|
|
|
h->largefree += c->local_largefree;
|
|
|
|
|
c->local_largefree = 0;
|
|
|
|
|
h->nlargefree += c->local_nlargefree;
|
|
|
|
|
c->local_nlargefree = 0;
|
|
|
|
|
for(i=0; i<nelem(c->local_nsmallfree); i++) {
|
|
|
|
|
h->nsmallfree[i] += c->local_nsmallfree[i];
|
|
|
|
|
c->local_nsmallfree[i] = 0;
|
2013-05-22 22:22:57 +04:00
|
|
|
}
|
2011-07-18 14:52:57 -04:00
|
|
|
}
|
|
|
|
|
|
2014-01-30 13:28:19 +04:00
|
|
|
// Size of the trailing by_size array differs between Go and C,
|
|
|
|
|
// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
|
|
|
|
|
// sizeof_C_MStats is what C thinks about size of Go struct.
|
|
|
|
|
uintptr runtime·sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
|
2010-12-13 16:22:19 -05:00
|
|
|
|
2011-02-02 23:03:47 -05:00
|
|
|
#define MaxArena32 (2U<<30)
|
|
|
|
|
|
2008-12-18 15:42:28 -08:00
|
|
|
void
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·mallocinit(void)
|
2008-12-18 15:42:28 -08:00
|
|
|
{
|
2014-03-06 18:34:29 -05:00
|
|
|
byte *p, *p1;
|
|
|
|
|
uintptr arena_size, bitmap_size, spans_size, p_size;
|
2011-02-02 23:03:47 -05:00
|
|
|
extern byte end[];
|
2012-02-24 15:28:51 -05:00
|
|
|
uintptr limit;
|
2013-06-12 18:47:16 +04:00
|
|
|
uint64 i;
|
2011-01-28 15:03:26 -05:00
|
|
|
|
2012-02-09 09:25:10 +11:00
|
|
|
p = nil;
|
2014-03-06 18:34:29 -05:00
|
|
|
p_size = 0;
|
2012-02-09 16:48:52 +11:00
|
|
|
arena_size = 0;
|
|
|
|
|
bitmap_size = 0;
|
2013-05-28 22:04:34 +04:00
|
|
|
spans_size = 0;
|
|
|
|
|
|
2012-02-09 16:48:52 +11:00
|
|
|
// for 64-bit build
|
|
|
|
|
USED(p);
|
2014-03-06 18:34:29 -05:00
|
|
|
USED(p_size);
|
2012-02-09 16:48:52 +11:00
|
|
|
USED(arena_size);
|
|
|
|
|
USED(bitmap_size);
|
2013-05-28 22:04:34 +04:00
|
|
|
USED(spans_size);
|
2012-02-09 09:25:10 +11:00
|
|
|
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·InitSizes();
|
2011-01-28 15:03:26 -05:00
|
|
|
|
2014-01-24 22:35:11 +04:00
|
|
|
if(runtime·class_to_size[TinySizeClass] != TinySize)
|
|
|
|
|
runtime·throw("bad TinySizeClass");
|
|
|
|
|
|
2013-03-26 14:01:12 -07:00
|
|
|
// limit = runtime·memlimit();
|
|
|
|
|
// See https://code.google.com/p/go/issues/detail?id=5049
|
|
|
|
|
// TODO(rsc): Fix after 1.1.
|
|
|
|
|
limit = 0;
|
2012-02-24 15:28:51 -05:00
|
|
|
|
2011-02-02 23:03:47 -05:00
|
|
|
// Set up the allocation arena, a contiguous area of memory where
|
|
|
|
|
// allocated data will be found. The arena begins with a bitmap large
|
|
|
|
|
// enough to hold 4 bits per allocated word.
|
2012-02-24 15:28:51 -05:00
|
|
|
if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
|
2011-01-28 15:03:26 -05:00
|
|
|
// On a 64-bit machine, allocate from a single contiguous reservation.
|
2012-11-13 12:45:08 -05:00
|
|
|
// 128 GB (MaxMem) should be big enough for now.
|
2011-01-28 15:03:26 -05:00
|
|
|
//
|
|
|
|
|
// The code will work with the reservation at any address, but ask
|
2013-06-12 18:47:16 +04:00
|
|
|
// SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
|
2012-11-13 12:45:08 -05:00
|
|
|
// Allocating a 128 GB region takes away 37 bits, and the amd64
|
2011-01-28 15:03:26 -05:00
|
|
|
// doesn't let us choose the top 17 bits, so that leaves the 11 bits
|
2012-11-13 12:45:08 -05:00
|
|
|
// in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
|
2013-06-12 18:47:16 +04:00
|
|
|
// that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
|
2012-11-13 12:45:08 -05:00
|
|
|
// In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
|
|
|
|
|
// UTF-8 sequences, and they are otherwise as far away from
|
2013-06-12 18:47:16 +04:00
|
|
|
// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
|
|
|
|
|
// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
|
|
|
|
|
// on OS X during thread allocations. 0x00c0 causes conflicts with
|
|
|
|
|
// AddressSanitizer which reserves all memory up to 0x0100.
|
2011-01-28 15:03:26 -05:00
|
|
|
// These choices are both for debuggability and to reduce the
|
|
|
|
|
// odds of the conservative garbage collector not collecting memory
|
|
|
|
|
// because some non-pointer block of memory had a bit pattern
|
|
|
|
|
// that matched a memory address.
|
2011-02-02 23:03:47 -05:00
|
|
|
//
|
2012-11-13 12:45:08 -05:00
|
|
|
// Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
|
|
|
|
|
// but it hardly matters: e0 00 is not valid UTF-8 either.
|
2012-02-08 14:39:16 -05:00
|
|
|
//
|
|
|
|
|
// If this fails we fall back to the 32 bit memory mechanism
|
2012-11-13 12:45:08 -05:00
|
|
|
arena_size = MaxMem;
|
2011-02-02 23:03:47 -05:00
|
|
|
bitmap_size = arena_size / (sizeof(void*)*8/4);
|
2013-05-30 17:09:58 +04:00
|
|
|
spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]);
|
2013-06-13 16:02:50 +04:00
|
|
|
spans_size = ROUND(spans_size, PageSize);
|
2013-06-12 18:47:16 +04:00
|
|
|
for(i = 0; i <= 0x7f; i++) {
|
|
|
|
|
p = (void*)(i<<40 | 0x00c0ULL<<32);
|
2014-03-06 18:34:29 -05:00
|
|
|
p_size = bitmap_size + spans_size + arena_size + PageSize;
|
|
|
|
|
p = runtime·SysReserve(p, p_size);
|
2013-06-12 18:47:16 +04:00
|
|
|
if(p != nil)
|
|
|
|
|
break;
|
|
|
|
|
}
|
2012-02-08 14:39:16 -05:00
|
|
|
}
|
|
|
|
|
if (p == nil) {
|
2011-02-02 23:03:47 -05:00
|
|
|
// On a 32-bit machine, we can't typically get away
|
|
|
|
|
// with a giant virtual address space reservation.
|
|
|
|
|
// Instead we map the memory information bitmap
|
|
|
|
|
// immediately after the data segment, large enough
|
|
|
|
|
// to handle another 2GB of mappings (256 MB),
|
|
|
|
|
// along with a reservation for another 512 MB of memory.
|
|
|
|
|
// When that gets used up, we'll start asking the kernel
|
|
|
|
|
// for any memory anywhere and hope it's in the 2GB
|
|
|
|
|
// following the bitmap (presumably the executable begins
|
|
|
|
|
// near the bottom of memory, so we'll have to use up
|
|
|
|
|
// most of memory before the kernel resorts to giving out
|
|
|
|
|
// memory before the beginning of the text segment).
|
|
|
|
|
//
|
|
|
|
|
// Alternatively we could reserve 512 MB bitmap, enough
|
|
|
|
|
// for 4GB of mappings, and then accept any memory the
|
|
|
|
|
// kernel threw at us, but normally that's a waste of 512 MB
|
|
|
|
|
// of address space, which is probably too much in a 32-bit world.
|
|
|
|
|
bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
|
|
|
|
|
arena_size = 512<<20;
|
2013-05-30 17:09:58 +04:00
|
|
|
spans_size = MaxArena32 / PageSize * sizeof(runtime·mheap.spans[0]);
|
2013-05-28 22:04:34 +04:00
|
|
|
if(limit > 0 && arena_size+bitmap_size+spans_size > limit) {
|
2012-02-24 15:28:51 -05:00
|
|
|
bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
|
|
|
|
|
arena_size = bitmap_size * 8;
|
2013-05-30 17:09:58 +04:00
|
|
|
spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]);
|
2012-02-24 15:28:51 -05:00
|
|
|
}
|
2013-06-13 16:02:50 +04:00
|
|
|
spans_size = ROUND(spans_size, PageSize);
|
2013-05-28 22:04:34 +04:00
|
|
|
|
2011-02-09 15:08:30 -05:00
|
|
|
// SysReserve treats the address we ask for, end, as a hint,
|
|
|
|
|
// not as an absolute requirement. If we ask for the end
|
|
|
|
|
// of the data segment but the operating system requires
|
|
|
|
|
// a little more space before we can start allocating, it will
|
2011-08-31 07:02:46 -04:00
|
|
|
// give out a slightly higher pointer. Except QEMU, which
|
|
|
|
|
// is buggy, as usual: it won't adjust the pointer upward.
|
|
|
|
|
// So adjust it upward a little bit ourselves: 1/4 MB to get
|
|
|
|
|
// away from the running binary image and then round up
|
|
|
|
|
// to a MB boundary.
|
2014-01-29 18:18:46 +04:00
|
|
|
p = (byte*)ROUND((uintptr)end + (1<<18), 1<<20);
|
2014-03-06 18:34:29 -05:00
|
|
|
p_size = bitmap_size + spans_size + arena_size + PageSize;
|
|
|
|
|
p = runtime·SysReserve(p, p_size);
|
2011-02-09 15:08:30 -05:00
|
|
|
if(p == nil)
|
|
|
|
|
runtime·throw("runtime: cannot reserve arena virtual address space");
|
2011-01-28 15:03:26 -05:00
|
|
|
}
|
2014-01-29 18:18:46 +04:00
|
|
|
|
|
|
|
|
// PageSize can be larger than OS definition of page size,
|
|
|
|
|
// so SysReserve can give us a PageSize-unaligned pointer.
|
|
|
|
|
// To overcome this we ask for PageSize more and round up the pointer.
|
2014-03-06 18:34:29 -05:00
|
|
|
p1 = (byte*)ROUND((uintptr)p, PageSize);
|
2011-02-11 14:32:34 -05:00
|
|
|
|
2014-03-06 18:34:29 -05:00
|
|
|
runtime·mheap.spans = (MSpan**)p1;
|
|
|
|
|
runtime·mheap.bitmap = p1 + spans_size;
|
|
|
|
|
runtime·mheap.arena_start = p1 + spans_size + bitmap_size;
|
2013-05-28 22:14:47 +04:00
|
|
|
runtime·mheap.arena_used = runtime·mheap.arena_start;
|
2014-03-06 18:34:29 -05:00
|
|
|
runtime·mheap.arena_end = p + p_size;
|
|
|
|
|
|
|
|
|
|
if(((uintptr)runtime·mheap.arena_start & (PageSize-1)) != 0)
|
|
|
|
|
runtime·throw("misrounded allocation in mallocinit");
|
2011-01-28 15:03:26 -05:00
|
|
|
|
|
|
|
|
// Initialize the rest of the allocator.
|
2013-06-10 09:20:27 +04:00
|
|
|
runtime·MHeap_Init(&runtime·mheap);
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
m->mcache = runtime·allocmcache();
|
2008-12-18 15:42:28 -08:00
|
|
|
|
|
|
|
|
// See if it works.
|
2014-01-24 22:35:11 +04:00
|
|
|
runtime·free(runtime·malloc(TinySize));
|
2008-12-18 15:42:28 -08:00
|
|
|
}
|
|
|
|
|
|
2011-01-28 15:03:26 -05:00
|
|
|
void*
|
|
|
|
|
runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
|
|
|
|
|
{
|
2014-03-06 18:34:29 -05:00
|
|
|
byte *p, *p_end;
|
|
|
|
|
uintptr p_size;
|
2011-02-02 23:03:47 -05:00
|
|
|
|
2012-03-07 14:21:45 -05:00
|
|
|
if(n > h->arena_end - h->arena_used) {
|
|
|
|
|
// We are in 32-bit mode, maybe we didn't use all possible address space yet.
|
|
|
|
|
// Reserve some more space.
|
|
|
|
|
byte *new_end;
|
|
|
|
|
|
2014-03-06 18:34:29 -05:00
|
|
|
p_size = ROUND(n + PageSize, 256<<20);
|
|
|
|
|
new_end = h->arena_end + p_size;
|
2012-03-07 14:21:45 -05:00
|
|
|
if(new_end <= h->arena_start + MaxArena32) {
|
2014-03-06 18:34:29 -05:00
|
|
|
p = runtime·SysReserve(h->arena_end, p_size);
|
2012-03-07 14:21:45 -05:00
|
|
|
if(p == h->arena_end)
|
|
|
|
|
h->arena_end = new_end;
|
2014-03-06 18:34:29 -05:00
|
|
|
else if(p+p_size <= h->arena_start + MaxArena32) {
|
|
|
|
|
// Keep everything page-aligned.
|
|
|
|
|
// Our pages are bigger than hardware pages.
|
|
|
|
|
h->arena_end = p+p_size;
|
|
|
|
|
h->arena_used = p + (-(uintptr)p&(PageSize-1));
|
|
|
|
|
} else {
|
|
|
|
|
uint64 stat;
|
|
|
|
|
stat = 0;
|
|
|
|
|
runtime·SysFree(p, p_size, &stat);
|
|
|
|
|
}
|
2012-03-07 14:21:45 -05:00
|
|
|
}
|
|
|
|
|
}
|
2011-02-02 23:03:47 -05:00
|
|
|
if(n <= h->arena_end - h->arena_used) {
|
2011-01-28 15:03:26 -05:00
|
|
|
// Keep taking from our reservation.
|
|
|
|
|
p = h->arena_used;
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 16:55:40 -04:00
|
|
|
runtime·SysMap(p, n, &mstats.heap_sys);
|
2011-01-28 15:03:26 -05:00
|
|
|
h->arena_used += n;
|
2011-02-02 23:03:47 -05:00
|
|
|
runtime·MHeap_MapBits(h);
|
2013-05-28 22:04:34 +04:00
|
|
|
runtime·MHeap_MapSpans(h);
|
2012-11-07 12:48:58 +04:00
|
|
|
if(raceenabled)
|
|
|
|
|
runtime·racemapshadow(p, n);
|
2014-03-06 18:34:29 -05:00
|
|
|
|
|
|
|
|
if(((uintptr)p & (PageSize-1)) != 0)
|
|
|
|
|
runtime·throw("misrounded allocation in MHeap_SysAlloc");
|
2011-01-28 15:03:26 -05:00
|
|
|
return p;
|
|
|
|
|
}
|
2011-02-02 23:03:47 -05:00
|
|
|
|
2012-02-08 14:39:16 -05:00
|
|
|
// If using 64-bit, our reservation is all we have.
|
|
|
|
|
if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
|
2011-02-02 23:03:47 -05:00
|
|
|
return nil;
|
|
|
|
|
|
|
|
|
|
// On 32-bit, once the reservation is gone we can
|
|
|
|
|
// try to get memory at a location chosen by the OS
|
|
|
|
|
// and hope that it is in the range we allocated bitmap for.
|
2014-03-06 18:34:29 -05:00
|
|
|
p_size = ROUND(n, PageSize) + PageSize;
|
|
|
|
|
p = runtime·SysAlloc(p_size, &mstats.heap_sys);
|
2011-02-02 23:03:47 -05:00
|
|
|
if(p == nil)
|
|
|
|
|
return nil;
|
|
|
|
|
|
2014-03-06 18:34:29 -05:00
|
|
|
if(p < h->arena_start || p+p_size - h->arena_start >= MaxArena32) {
|
2012-03-07 14:21:45 -05:00
|
|
|
runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
|
|
|
|
|
p, h->arena_start, h->arena_start+MaxArena32);
|
2014-03-06 18:34:29 -05:00
|
|
|
runtime·SysFree(p, p_size, &mstats.heap_sys);
|
2011-02-02 23:03:47 -05:00
|
|
|
return nil;
|
|
|
|
|
}
|
2014-03-06 18:34:29 -05:00
|
|
|
|
|
|
|
|
p_end = p + p_size;
|
|
|
|
|
p += -(uintptr)p & (PageSize-1);
|
2011-02-02 23:03:47 -05:00
|
|
|
if(p+n > h->arena_used) {
|
|
|
|
|
h->arena_used = p+n;
|
2014-03-06 18:34:29 -05:00
|
|
|
if(p_end > h->arena_end)
|
|
|
|
|
h->arena_end = p_end;
|
2011-02-02 23:03:47 -05:00
|
|
|
runtime·MHeap_MapBits(h);
|
2013-05-28 22:04:34 +04:00
|
|
|
runtime·MHeap_MapSpans(h);
|
2012-11-07 12:48:58 +04:00
|
|
|
if(raceenabled)
|
|
|
|
|
runtime·racemapshadow(p, n);
|
2011-02-02 23:03:47 -05:00
|
|
|
}
|
|
|
|
|
|
2014-03-06 18:34:29 -05:00
|
|
|
if(((uintptr)p & (PageSize-1)) != 0)
|
|
|
|
|
runtime·throw("misrounded allocation in MHeap_SysAlloc");
|
2011-02-02 23:03:47 -05:00
|
|
|
return p;
|
2011-01-28 15:03:26 -05:00
|
|
|
}
|
|
|
|
|
|
2013-05-31 10:42:30 +04:00
|
|
|
static struct
|
|
|
|
|
{
|
|
|
|
|
Lock;
|
|
|
|
|
byte* pos;
|
|
|
|
|
byte* end;
|
|
|
|
|
} persistent;
|
|
|
|
|
|
|
|
|
|
enum
|
|
|
|
|
{
|
|
|
|
|
PersistentAllocChunk = 256<<10,
|
|
|
|
|
PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Wrapper around SysAlloc that can allocate small chunks.
|
|
|
|
|
// There is no associated free operation.
|
|
|
|
|
// Intended for things like function/type/debug-related persistent data.
|
|
|
|
|
// If align is 0, uses default align (currently 8).
|
|
|
|
|
void*
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 16:55:40 -04:00
|
|
|
runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat)
|
2013-05-31 10:42:30 +04:00
|
|
|
{
|
|
|
|
|
byte *p;
|
|
|
|
|
|
2013-06-02 01:45:26 +04:00
|
|
|
if(align != 0) {
|
2013-05-31 10:42:30 +04:00
|
|
|
if(align&(align-1))
|
|
|
|
|
runtime·throw("persistentalloc: align is now a power of 2");
|
|
|
|
|
if(align > PageSize)
|
|
|
|
|
runtime·throw("persistentalloc: align is too large");
|
|
|
|
|
} else
|
|
|
|
|
align = 8;
|
|
|
|
|
if(size >= PersistentAllocMaxBlock)
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 16:55:40 -04:00
|
|
|
return runtime·SysAlloc(size, stat);
|
2013-05-31 10:42:30 +04:00
|
|
|
runtime·lock(&persistent);
|
|
|
|
|
persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
|
|
|
|
|
if(persistent.pos + size > persistent.end) {
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 16:55:40 -04:00
|
|
|
persistent.pos = runtime·SysAlloc(PersistentAllocChunk, &mstats.other_sys);
|
2013-05-31 10:42:30 +04:00
|
|
|
if(persistent.pos == nil) {
|
|
|
|
|
runtime·unlock(&persistent);
|
|
|
|
|
runtime·throw("runtime: cannot allocate memory");
|
|
|
|
|
}
|
|
|
|
|
persistent.end = persistent.pos + PersistentAllocChunk;
|
|
|
|
|
}
|
|
|
|
|
p = persistent.pos;
|
|
|
|
|
persistent.pos += size;
|
|
|
|
|
runtime·unlock(&persistent);
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 16:55:40 -04:00
|
|
|
if(stat != &mstats.other_sys) {
|
|
|
|
|
// reaccount the allocation against provided stat
|
|
|
|
|
runtime·xadd64(stat, size);
|
|
|
|
|
runtime·xadd64(&mstats.other_sys, -(uint64)size);
|
|
|
|
|
}
|
|
|
|
|
return p;
|
2013-05-31 10:42:30 +04:00
|
|
|
}
|
|
|
|
|
|
2014-02-26 15:52:58 -08:00
|
|
|
static void
|
|
|
|
|
settype(MSpan *s, void *v, uintptr typ)
|
2012-09-24 20:08:05 -04:00
|
|
|
{
|
|
|
|
|
uintptr size, ofs, j, t;
|
|
|
|
|
uintptr ntypes, nbytes2, nbytes3;
|
|
|
|
|
uintptr *data2;
|
|
|
|
|
byte *data3;
|
|
|
|
|
|
2014-02-26 15:52:58 -08:00
|
|
|
if(s->sizeclass == 0) {
|
|
|
|
|
s->types.compression = MTypes_Single;
|
|
|
|
|
s->types.data = typ;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
size = s->elemsize;
|
|
|
|
|
ofs = ((uintptr)v - (s->start<<PageShift)) / size;
|
|
|
|
|
|
|
|
|
|
switch(s->types.compression) {
|
|
|
|
|
case MTypes_Empty:
|
|
|
|
|
ntypes = (s->npages << PageShift) / size;
|
|
|
|
|
nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
|
|
|
|
|
data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
|
|
|
|
|
s->types.compression = MTypes_Bytes;
|
|
|
|
|
s->types.data = (uintptr)data3;
|
|
|
|
|
((uintptr*)data3)[1] = typ;
|
|
|
|
|
data3[8*sizeof(uintptr) + ofs] = 1;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case MTypes_Words:
|
|
|
|
|
((uintptr*)s->types.data)[ofs] = typ;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case MTypes_Bytes:
|
|
|
|
|
data3 = (byte*)s->types.data;
|
|
|
|
|
for(j=1; j<8; j++) {
|
|
|
|
|
if(((uintptr*)data3)[j] == typ) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if(((uintptr*)data3)[j] == 0) {
|
|
|
|
|
((uintptr*)data3)[j] = typ;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2012-09-24 20:08:05 -04:00
|
|
|
}
|
2014-02-26 15:52:58 -08:00
|
|
|
if(j < 8) {
|
|
|
|
|
data3[8*sizeof(uintptr) + ofs] = j;
|
|
|
|
|
} else {
|
2012-09-24 20:08:05 -04:00
|
|
|
ntypes = (s->npages << PageShift) / size;
|
2014-02-26 15:52:58 -08:00
|
|
|
nbytes2 = ntypes * sizeof(uintptr);
|
|
|
|
|
data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
|
|
|
|
|
s->types.compression = MTypes_Words;
|
|
|
|
|
s->types.data = (uintptr)data2;
|
|
|
|
|
|
|
|
|
|
// Move the contents of data3 to data2. Then deallocate data3.
|
|
|
|
|
for(j=0; j<ntypes; j++) {
|
|
|
|
|
t = data3[8*sizeof(uintptr) + j];
|
|
|
|
|
t = ((uintptr*)data3)[t];
|
|
|
|
|
data2[j] = t;
|
2012-09-24 20:08:05 -04:00
|
|
|
}
|
2014-02-26 15:52:58 -08:00
|
|
|
data2[ofs] = typ;
|
2012-09-24 20:08:05 -04:00
|
|
|
}
|
2014-02-26 15:52:58 -08:00
|
|
|
break;
|
2012-09-24 20:08:05 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uintptr
|
|
|
|
|
runtime·gettype(void *v)
|
|
|
|
|
{
|
|
|
|
|
MSpan *s;
|
|
|
|
|
uintptr t, ofs;
|
|
|
|
|
byte *data;
|
|
|
|
|
|
2013-05-28 22:14:47 +04:00
|
|
|
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
|
2012-09-24 20:08:05 -04:00
|
|
|
if(s != nil) {
|
|
|
|
|
t = 0;
|
|
|
|
|
switch(s->types.compression) {
|
|
|
|
|
case MTypes_Empty:
|
|
|
|
|
break;
|
|
|
|
|
case MTypes_Single:
|
|
|
|
|
t = s->types.data;
|
|
|
|
|
break;
|
|
|
|
|
case MTypes_Words:
|
|
|
|
|
ofs = (uintptr)v - (s->start<<PageShift);
|
|
|
|
|
t = ((uintptr*)s->types.data)[ofs/s->elemsize];
|
|
|
|
|
break;
|
|
|
|
|
case MTypes_Bytes:
|
|
|
|
|
ofs = (uintptr)v - (s->start<<PageShift);
|
|
|
|
|
data = (byte*)s->types.data;
|
|
|
|
|
t = data[8*sizeof(uintptr) + ofs/s->elemsize];
|
|
|
|
|
t = ((uintptr*)data)[t];
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
runtime·throw("runtime·gettype: invalid compression kind");
|
|
|
|
|
}
|
|
|
|
|
if(0) {
|
|
|
|
|
runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
|
|
|
|
|
}
|
|
|
|
|
return t;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-19 03:13:39 -08:00
|
|
|
// Runtime stubs.
|
|
|
|
|
|
|
|
|
|
void*
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·mal(uintptr n)
|
2010-03-23 20:48:23 -07:00
|
|
|
{
|
runtime: refactor mallocgc
Make it accept type, combine flags.
Several reasons for the change:
1. mallocgc and settype must be atomic wrt GC
2. settype is called from only one place now
3. it will help performance (eventually settype
functionality must be combined with markallocated)
4. flags are easier to read now (no mallocgc(sz, 0, 1, 0) anymore)
R=golang-dev, iant, nightlyone, rsc, dave, khr, bradfitz, r
CC=golang-dev
https://golang.org/cl/10136043
2013-07-26 21:17:24 +04:00
|
|
|
return runtime·mallocgc(n, 0, 0);
|
2008-12-19 03:13:39 -08:00
|
|
|
}
|
|
|
|
|
|
2013-08-12 13:47:18 -07:00
|
|
|
#pragma textflag NOSPLIT
|
runtime: use goc2c as much as possible
Package runtime's C functions written to be called from Go
started out written in C using carefully constructed argument
lists and the FLUSH macro to write a result back to memory.
For some functions, the appropriate parameter list ended up
being architecture-dependent due to differences in alignment,
so we added 'goc2c', which takes a .goc file containing Go func
declarations but C bodies, rewrites the Go func declaration to
equivalent C declarations for the target architecture, adds the
needed FLUSH statements, and writes out an equivalent C file.
That C file is compiled as part of package runtime.
Native Client's x86-64 support introduces the most complex
alignment rules yet, breaking many functions that could until
now be portably written in C. Using goc2c for those avoids the
breakage.
Separately, Keith's work on emitting stack information from
the C compiler would require the hand-written functions
to add #pragmas specifying how many arguments are result
parameters. Using goc2c for those avoids maintaining #pragmas.
For both reasons, use goc2c for as many Go-called C functions
as possible.
This CL is a replay of the bulk of CL 15400047 and CL 15790043,
both of which were reviewed as part of the NaCl port and are
checked in to the NaCl branch. This CL is part of bringing the
NaCl code into the main tree.
No new code here, just reformatting and occasional movement
into .h files.
LGTM=r
R=dave, alex.brainman, r
CC=golang-codereviews
https://golang.org/cl/65220044
2014-02-20 15:58:47 -05:00
|
|
|
func new(typ *Type) (ret *uint8) {
|
2013-08-23 17:28:47 -07:00
|
|
|
ret = runtime·mallocgc(typ->size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
|
2010-06-21 20:53:49 -07:00
|
|
|
}
|
|
|
|
|
|
2013-05-27 11:29:11 +04:00
|
|
|
static void*
|
|
|
|
|
cnew(Type *typ, intgo n, int32 objtyp)
|
2012-10-21 17:41:32 -04:00
|
|
|
{
|
2013-05-27 11:29:11 +04:00
|
|
|
if((objtyp&(PtrSize-1)) != objtyp)
|
|
|
|
|
runtime·throw("runtime: invalid objtyp");
|
|
|
|
|
if(n < 0 || (typ->size > 0 && n > MaxMem/typ->size))
|
|
|
|
|
runtime·panicstring("runtime: allocation size out of range");
|
2013-08-23 17:28:47 -07:00
|
|
|
return runtime·mallocgc(typ->size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
|
2012-10-21 17:41:32 -04:00
|
|
|
}
|
|
|
|
|
|
2013-05-27 11:29:11 +04:00
|
|
|
// same as runtime·new, but callable from C
|
|
|
|
|
void*
|
|
|
|
|
runtime·cnew(Type *typ)
|
|
|
|
|
{
|
|
|
|
|
return cnew(typ, 1, TypeInfo_SingleObject);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void*
|
|
|
|
|
runtime·cnewarray(Type *typ, intgo n)
|
|
|
|
|
{
|
|
|
|
|
return cnew(typ, n, TypeInfo_Array);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-30 20:01:50 -07:00
|
|
|
func GC() {
|
runtime: fix finalizer flakiness
The flakiness appears to be just in tests, not in the actual code.
Specifically, the many tests call runtime.GC once and expect that
the finalizers will be running in the background when GC returns.
Now that the sweep phase is concurrent with execution, however,
the finalizers will not be run until sweep finishes, which might
be quite a bit later. To force sweep to finish, implement runtime.GC
by calling the actual collection twice. The second will complete the
sweep from the first.
This was reliably broken after a few runs before the CL and now
passes tens of runs:
while GOMAXPROCS=2 ./runtime.test -test.run=Finalizer -test.short \
-test.timeout=300s -test.cpu=$(perl -e 'print ("1,2,4," x 100) . "1"')
do true; done
Fixes #7328.
LGTM=dvyukov
R=dvyukov, dave
CC=golang-codereviews
https://golang.org/cl/71080043
2014-03-04 09:46:40 -05:00
|
|
|
// We assume that the user expects unused memory to have
|
|
|
|
|
// been freed when GC returns. To ensure this, run gc(1) twice.
|
|
|
|
|
// The first will do a collection, and the second will force the
|
|
|
|
|
// first's sweeping to finish before doing a second collection.
|
|
|
|
|
// The second collection is overkill, but we assume the user
|
|
|
|
|
// has a good reason for calling runtime.GC and can stand the
|
|
|
|
|
// expense. At the least, this fixes all the calls to runtime.GC in
|
|
|
|
|
// tests that expect finalizers to start running when GC returns.
|
|
|
|
|
runtime·gc(1);
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·gc(1);
|
2009-06-30 20:01:50 -07:00
|
|
|
}
|
2010-02-03 16:31:34 -08:00
|
|
|
|
|
|
|
|
func SetFinalizer(obj Eface, finalizer Eface) {
|
|
|
|
|
byte *base;
|
|
|
|
|
uintptr size;
|
|
|
|
|
FuncType *ft;
|
2012-09-24 14:58:34 -04:00
|
|
|
int32 i;
|
|
|
|
|
uintptr nret;
|
2010-02-08 21:41:54 -08:00
|
|
|
Type *t;
|
2013-07-29 19:43:08 +04:00
|
|
|
Type *fint;
|
|
|
|
|
PtrType *ot;
|
2013-08-14 14:54:31 -04:00
|
|
|
Iface iface;
|
2010-02-10 00:00:12 -08:00
|
|
|
|
2010-02-03 16:31:34 -08:00
|
|
|
if(obj.type == nil) {
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
|
2011-10-06 18:42:51 +03:00
|
|
|
goto throw;
|
2010-02-03 16:31:34 -08:00
|
|
|
}
|
|
|
|
|
if(obj.type->kind != KindPtr) {
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 14:00:19 -04:00
|
|
|
runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
|
2010-02-03 16:31:34 -08:00
|
|
|
goto throw;
|
|
|
|
|
}
|
2013-12-17 14:18:58 -08:00
|
|
|
ot = (PtrType*)obj.type;
|
2014-01-24 22:35:11 +04:00
|
|
|
// As an implementation detail we do not run finalizers for zero-sized objects,
|
|
|
|
|
// because we use &runtime·zerobase for all such allocations.
|
|
|
|
|
if(ot->elem != nil && ot->elem->size == 0)
|
2013-12-17 14:18:58 -08:00
|
|
|
return;
|
2011-02-02 23:03:47 -05:00
|
|
|
if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) {
|
2014-01-24 22:35:11 +04:00
|
|
|
// As an implementation detail we allow to set finalizers for an inner byte
|
|
|
|
|
// of an object if it could come from tiny alloc (see mallocgc for details).
|
|
|
|
|
if(ot->elem == nil || (ot->elem->kind&KindNoPointers) == 0 || ot->elem->size >= TinySize) {
|
|
|
|
|
runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
|
|
|
|
|
goto throw;
|
|
|
|
|
}
|
2010-02-03 16:31:34 -08:00
|
|
|
}
|
|
|
|
|
if(finalizer.type != nil) {
|
2011-10-06 18:42:51 +03:00
|
|
|
if(finalizer.type->kind != KindFunc)
|
|
|
|
|
goto badfunc;
|
2010-02-03 16:31:34 -08:00
|
|
|
ft = (FuncType*)finalizer.type;
|
2013-07-29 19:43:08 +04:00
|
|
|
if(ft->dotdotdot || ft->in.len != 1)
|
|
|
|
|
goto badfunc;
|
|
|
|
|
fint = *(Type**)ft->in.array;
|
2013-08-14 14:54:31 -04:00
|
|
|
if(fint == obj.type) {
|
|
|
|
|
// ok - same type
|
|
|
|
|
} else if(fint->kind == KindPtr && (fint->x == nil || fint->x->name == nil || obj.type->x == nil || obj.type->x->name == nil) && ((PtrType*)fint)->elem == ((PtrType*)obj.type)->elem) {
|
|
|
|
|
// ok - not same type, but both pointers,
|
|
|
|
|
// one or the other is unnamed, and same element type, so assignable.
|
|
|
|
|
} else if(fint->kind == KindInterface && ((InterfaceType*)fint)->mhdr.len == 0) {
|
|
|
|
|
// ok - satisfies empty interface
|
|
|
|
|
} else if(fint->kind == KindInterface && runtime·ifaceE2I2((InterfaceType*)fint, obj, &iface)) {
|
|
|
|
|
// ok - satisfies non-empty interface
|
|
|
|
|
} else
|
2010-02-03 16:31:34 -08:00
|
|
|
goto badfunc;
|
2010-02-10 00:00:12 -08:00
|
|
|
|
2010-02-08 21:41:54 -08:00
|
|
|
// compute size needed for return parameters
|
2014-01-07 13:45:50 -08:00
|
|
|
nret = 0;
|
2010-02-08 21:41:54 -08:00
|
|
|
for(i=0; i<ft->out.len; i++) {
|
|
|
|
|
t = ((Type**)ft->out.array)[i];
|
2013-06-13 16:02:50 +04:00
|
|
|
nret = ROUND(nret, t->align) + t->size;
|
2010-02-08 21:41:54 -08:00
|
|
|
}
|
2013-06-13 16:02:50 +04:00
|
|
|
nret = ROUND(nret, sizeof(void*));
|
2014-01-07 13:45:50 -08:00
|
|
|
ot = (PtrType*)obj.type;
|
|
|
|
|
if(!runtime·addfinalizer(obj.data, finalizer.data, nret, fint, ot)) {
|
|
|
|
|
runtime·printf("runtime.SetFinalizer: finalizer already set\n");
|
|
|
|
|
goto throw;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// NOTE: asking to remove a finalizer when there currently isn't one set is OK.
|
|
|
|
|
runtime·removefinalizer(obj.data);
|
2011-10-06 18:42:51 +03:00
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
badfunc:
|
2013-08-14 14:54:31 -04:00
|
|
|
runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string);
|
2011-10-06 18:42:51 +03:00
|
|
|
throw:
|
|
|
|
|
runtime·throw("runtime.SetFinalizer");
|
2010-02-03 16:31:34 -08:00
|
|
|
}
|