| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | // Copyright 2014 The Go Authors. All rights reserved. | 
					
						
							|  |  |  | // Use of this source code is governed by a BSD-style | 
					
						
							|  |  |  | // license that can be found in the LICENSE file. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package runtime | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"unsafe" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const ( | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 	debugMalloc = false | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-13 01:03:32 +04:00
										 |  |  | 	flagNoScan = 1 << 0 // GC doesn't have to scan object | 
					
						
							|  |  |  | 	flagNoZero = 1 << 1 // don't zero memory | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	maxTinySize   = 16 | 
					
						
							|  |  |  | 	tinySizeClass = 2 | 
					
						
							|  |  |  | 	maxSmallSize  = 32 << 10 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pageShift = 13 | 
					
						
							|  |  |  | 	pageSize  = 1 << pageShift | 
					
						
							|  |  |  | 	pageMask  = pageSize - 1 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-29 16:00:31 -04:00
										 |  |  | 	bitsPerPointer  = 2 | 
					
						
							|  |  |  | 	bitsMask        = 1<<bitsPerPointer - 1 | 
					
						
							|  |  |  | 	pointersPerByte = 8 / bitsPerPointer | 
					
						
							|  |  |  | 	bitPtrMask      = bitsMask << 2 | 
					
						
							|  |  |  | 	maxGCMask       = 64 | 
					
						
							|  |  |  | 	bitsDead        = 0 | 
					
						
							|  |  |  | 	bitsPointer     = 2 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 	bitBoundary = 1 | 
					
						
							|  |  |  | 	bitMarked   = 2 | 
					
						
							|  |  |  | 	bitMask     = bitBoundary | bitMarked | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	mSpanInUse = 0 | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | // Page number (address>>pageShift) | 
					
						
							|  |  |  | type pageID uintptr | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | // All zero-sized allocations return a pointer to this byte. | 
					
						
							|  |  |  | var zeroObject byte | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Maximum possible heap size. | 
					
						
							|  |  |  | var maxMem uintptr | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-05 17:03:06 +04:00
										 |  |  | // Allocate an object of size bytes. | 
					
						
							|  |  |  | // Small objects are allocated from the per-P cache's free lists. | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | // Large objects (> 32 kB) are allocated straight from the heap. | 
					
						
							|  |  |  | func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { | 
					
						
							|  |  |  | 	if size == 0 { | 
					
						
							|  |  |  | 		return unsafe.Pointer(&zeroObject) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	size0 := size | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 	// This function must be atomic wrt GC, but for performance reasons | 
					
						
							|  |  |  | 	// we don't acquirem/releasem on fast path. The code below does not have | 
					
						
							|  |  |  | 	// split stack checks, so it can't be preempted by GC. | 
					
						
							|  |  |  | 	// Functions like roundup/add are inlined. And onM/racemalloc are nosplit. | 
					
						
							|  |  |  | 	// If debugMalloc = true, these assumptions are checked below. | 
					
						
							|  |  |  | 	if debugMalloc { | 
					
						
							|  |  |  | 		mp := acquirem() | 
					
						
							|  |  |  | 		if mp.mallocing != 0 { | 
					
						
							|  |  |  | 			gothrow("malloc deadlock") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		mp.mallocing = 1 | 
					
						
							|  |  |  | 		if mp.curg != nil { | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 			mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	c := gomcache() | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	var s *mspan | 
					
						
							|  |  |  | 	var x unsafe.Pointer | 
					
						
							|  |  |  | 	if size <= maxSmallSize { | 
					
						
							|  |  |  | 		if flags&flagNoScan != 0 && size < maxTinySize { | 
					
						
							|  |  |  | 			// Tiny allocator. | 
					
						
							|  |  |  | 			// | 
					
						
							|  |  |  | 			// Tiny allocator combines several tiny allocation requests | 
					
						
							|  |  |  | 			// into a single memory block. The resulting memory block | 
					
						
							|  |  |  | 			// is freed when all subobjects are unreachable. The subobjects | 
					
						
							|  |  |  | 			// must be FlagNoScan (don't have pointers), this ensures that | 
					
						
							|  |  |  | 			// the amount of potentially wasted memory is bounded. | 
					
						
							|  |  |  | 			// | 
					
						
							|  |  |  | 			// Size of the memory block used for combining (maxTinySize) is tunable. | 
					
						
							|  |  |  | 			// Current setting is 16 bytes, which relates to 2x worst case memory | 
					
						
							|  |  |  | 			// wastage (when all but one subobjects are unreachable). | 
					
						
							|  |  |  | 			// 8 bytes would result in no wastage at all, but provides less | 
					
						
							|  |  |  | 			// opportunities for combining. | 
					
						
							|  |  |  | 			// 32 bytes provides more opportunities for combining, | 
					
						
							|  |  |  | 			// but can lead to 4x worst case wastage. | 
					
						
							|  |  |  | 			// The best case winning is 8x regardless of block size. | 
					
						
							|  |  |  | 			// | 
					
						
							|  |  |  | 			// Objects obtained from tiny allocator must not be freed explicitly. | 
					
						
							|  |  |  | 			// So when an object will be freed explicitly, we ensure that | 
					
						
							|  |  |  | 			// its size >= maxTinySize. | 
					
						
							|  |  |  | 			// | 
					
						
							|  |  |  | 			// SetFinalizer has a special case for objects potentially coming | 
					
						
							|  |  |  | 			// from tiny allocator, it such case it allows to set finalizers | 
					
						
							|  |  |  | 			// for an inner byte of a memory block. | 
					
						
							|  |  |  | 			// | 
					
						
							|  |  |  | 			// The main targets of tiny allocator are small strings and | 
					
						
							|  |  |  | 			// standalone escaping variables. On a json benchmark | 
					
						
							|  |  |  | 			// the allocator reduces number of allocations by ~12% and | 
					
						
							|  |  |  | 			// reduces heap size by ~20%. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			tinysize := uintptr(c.tinysize) | 
					
						
							|  |  |  | 			if size <= tinysize { | 
					
						
							|  |  |  | 				tiny := unsafe.Pointer(c.tiny) | 
					
						
							|  |  |  | 				// Align tiny pointer for required (conservative) alignment. | 
					
						
							|  |  |  | 				if size&7 == 0 { | 
					
						
							|  |  |  | 					tiny = roundup(tiny, 8) | 
					
						
							|  |  |  | 				} else if size&3 == 0 { | 
					
						
							|  |  |  | 					tiny = roundup(tiny, 4) | 
					
						
							|  |  |  | 				} else if size&1 == 0 { | 
					
						
							|  |  |  | 					tiny = roundup(tiny, 2) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny))) | 
					
						
							|  |  |  | 				if size1 <= tinysize { | 
					
						
							|  |  |  | 					// The object fits into existing tiny block. | 
					
						
							|  |  |  | 					x = tiny | 
					
						
							|  |  |  | 					c.tiny = (*byte)(add(x, size)) | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 					c.tinysize -= uintptr(size1) | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 					if debugMalloc { | 
					
						
							|  |  |  | 						mp := acquirem() | 
					
						
							|  |  |  | 						if mp.mallocing == 0 { | 
					
						
							|  |  |  | 							gothrow("bad malloc") | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						mp.mallocing = 0 | 
					
						
							|  |  |  | 						if mp.curg != nil { | 
					
						
							|  |  |  | 							mp.curg.stackguard0 = mp.curg.stackguard | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						releasem(mp) | 
					
						
							|  |  |  | 						releasem(mp) | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 					return x | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// Allocate a new maxTinySize block. | 
					
						
							|  |  |  | 			s = c.alloc[tinySizeClass] | 
					
						
							|  |  |  | 			v := s.freelist | 
					
						
							|  |  |  | 			if v == nil { | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 				mp := acquirem() | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 				mp.scalararg[0] = tinySizeClass | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 				onM(mcacheRefill_m) | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 				releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 				s = c.alloc[tinySizeClass] | 
					
						
							|  |  |  | 				v = s.freelist | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			s.freelist = v.next | 
					
						
							|  |  |  | 			s.ref++ | 
					
						
							|  |  |  | 			//TODO: prefetch v.next | 
					
						
							|  |  |  | 			x = unsafe.Pointer(v) | 
					
						
							|  |  |  | 			(*[2]uint64)(x)[0] = 0 | 
					
						
							|  |  |  | 			(*[2]uint64)(x)[1] = 0 | 
					
						
							|  |  |  | 			// See if we need to replace the existing tiny block with the new one | 
					
						
							|  |  |  | 			// based on amount of remaining free space. | 
					
						
							|  |  |  | 			if maxTinySize-size > tinysize { | 
					
						
							|  |  |  | 				c.tiny = (*byte)(add(x, size)) | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 				c.tinysize = uintptr(maxTinySize - size) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			size = maxTinySize | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			var sizeclass int8 | 
					
						
							|  |  |  | 			if size <= 1024-8 { | 
					
						
							|  |  |  | 				sizeclass = size_to_class8[(size+7)>>3] | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				sizeclass = size_to_class128[(size-1024+127)>>7] | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			size = uintptr(class_to_size[sizeclass]) | 
					
						
							|  |  |  | 			s = c.alloc[sizeclass] | 
					
						
							|  |  |  | 			v := s.freelist | 
					
						
							|  |  |  | 			if v == nil { | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 				mp := acquirem() | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 				mp.scalararg[0] = uintptr(sizeclass) | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 				onM(mcacheRefill_m) | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 				releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 				s = c.alloc[sizeclass] | 
					
						
							|  |  |  | 				v = s.freelist | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			s.freelist = v.next | 
					
						
							|  |  |  | 			s.ref++ | 
					
						
							|  |  |  | 			//TODO: prefetch | 
					
						
							|  |  |  | 			x = unsafe.Pointer(v) | 
					
						
							|  |  |  | 			if flags&flagNoZero == 0 { | 
					
						
							|  |  |  | 				v.next = nil | 
					
						
							|  |  |  | 				if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 { | 
					
						
							|  |  |  | 					memclr(unsafe.Pointer(v), size) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 		c.local_cachealloc += intptr(size) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 		mp := acquirem() | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 		mp.scalararg[0] = uintptr(size) | 
					
						
							|  |  |  | 		mp.scalararg[1] = uintptr(flags) | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 		onM(largeAlloc_m) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		s = (*mspan)(mp.ptrarg[0]) | 
					
						
							|  |  |  | 		mp.ptrarg[0] = nil | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 		releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		x = unsafe.Pointer(uintptr(s.start << pageShift)) | 
					
						
							|  |  |  | 		size = uintptr(s.elemsize) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 	if flags&flagNoScan != 0 { | 
					
						
							|  |  |  | 		// All objects are pre-marked as noscan. | 
					
						
							|  |  |  | 		goto marked | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 	// From here till marked label marking the object as allocated | 
					
						
							|  |  |  | 	// and storing type info in the GC bitmap. | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 	{ | 
					
						
							|  |  |  | 		arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) | 
					
						
							|  |  |  | 		off := (uintptr(x) - arena_start) / ptrSize | 
					
						
							| 
									
										
										
										
											2014-08-19 17:38:00 +04:00
										 |  |  | 		xbits := (*uint8)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1)) | 
					
						
							|  |  |  | 		shift := (off % wordsPerBitmapByte) * gcBits | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 		if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary { | 
					
						
							|  |  |  | 			println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask)) | 
					
						
							|  |  |  | 			gothrow("bad bits in markallocated") | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 		var ti, te uintptr | 
					
						
							|  |  |  | 		var ptrmask *uint8 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 		if size == ptrSize { | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 			// It's one word and it has pointers, it must be a pointer. | 
					
						
							|  |  |  | 			*xbits |= (bitsPointer << 2) << shift | 
					
						
							|  |  |  | 			goto marked | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 		if typ.kind&kindGCProg != 0 { | 
					
						
							|  |  |  | 			nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize | 
					
						
							|  |  |  | 			masksize := nptr | 
					
						
							|  |  |  | 			if masksize%2 != 0 { | 
					
						
							|  |  |  | 				masksize *= 2 // repeated | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 			masksize = masksize * pointersPerByte / 8 // 4 bits per word | 
					
						
							|  |  |  | 			masksize++                                // unroll flag in the beginning | 
					
						
							|  |  |  | 			if masksize > maxGCMask && typ.gc[1] != 0 { | 
					
						
							|  |  |  | 				// If the mask is too large, unroll the program directly | 
					
						
							|  |  |  | 				// into the GC bitmap. It's 7 times slower than copying | 
					
						
							|  |  |  | 				// from the pre-unrolled mask, but saves 1/16 of type size | 
					
						
							|  |  |  | 				// memory for the mask. | 
					
						
							|  |  |  | 				mp := acquirem() | 
					
						
							|  |  |  | 				mp.ptrarg[0] = x | 
					
						
							|  |  |  | 				mp.ptrarg[1] = unsafe.Pointer(typ) | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 				mp.scalararg[0] = uintptr(size) | 
					
						
							|  |  |  | 				mp.scalararg[1] = uintptr(size0) | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 				onM(unrollgcproginplace_m) | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 				releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 				goto marked | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 			ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) | 
					
						
							|  |  |  | 			// Check whether the program is already unrolled. | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 			if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 				mp := acquirem() | 
					
						
							|  |  |  | 				mp.ptrarg[0] = unsafe.Pointer(typ) | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 				onM(unrollgcprog_m) | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 				releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 			ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			ptrmask = (*uint8)(unsafe.Pointer(&typ.gc[0])) // embed mask | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if size == 2*ptrSize { | 
					
						
							| 
									
										
										
										
											2014-08-19 17:38:00 +04:00
										 |  |  | 			*xbits = *ptrmask | bitBoundary | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 			goto marked | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 		te = uintptr(typ.size) / ptrSize | 
					
						
							|  |  |  | 		// If the type occupies odd number of words, its mask is repeated. | 
					
						
							|  |  |  | 		if te%2 == 0 { | 
					
						
							|  |  |  | 			te /= 2 | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 		// Copy pointer bitmask into the bitmap. | 
					
						
							|  |  |  | 		for i := uintptr(0); i < size0; i += 2 * ptrSize { | 
					
						
							| 
									
										
										
										
											2014-08-19 15:59:42 +04:00
										 |  |  | 			v := *(*uint8)(add(unsafe.Pointer(ptrmask), ti)) | 
					
						
							|  |  |  | 			ti++ | 
					
						
							|  |  |  | 			if ti == te { | 
					
						
							|  |  |  | 				ti = 0 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 			if i == 0 { | 
					
						
							|  |  |  | 				v |= bitBoundary | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if i+ptrSize == size0 { | 
					
						
							|  |  |  | 				v &^= uint8(bitPtrMask << 4) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-19 17:38:00 +04:00
										 |  |  | 			*xbits = v | 
					
						
							|  |  |  | 			xbits = (*byte)(add(unsafe.Pointer(xbits), ^uintptr(0))) | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-08-13 20:42:55 +04:00
										 |  |  | 		if size0%(2*ptrSize) == 0 && size0 < size { | 
					
						
							|  |  |  | 			// Mark the word after last object's word as bitsDead. | 
					
						
							| 
									
										
										
										
											2014-08-19 17:38:00 +04:00
										 |  |  | 			*xbits = bitsDead << 2 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:34:30 +04:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | marked: | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	if raceenabled { | 
					
						
							|  |  |  | 		racemalloc(x, size) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if debugMalloc { | 
					
						
							|  |  |  | 		mp := acquirem() | 
					
						
							|  |  |  | 		if mp.mallocing == 0 { | 
					
						
							|  |  |  | 			gothrow("bad malloc") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		mp.mallocing = 0 | 
					
						
							|  |  |  | 		if mp.curg != nil { | 
					
						
							|  |  |  | 			mp.curg.stackguard0 = mp.curg.stackguard | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		releasem(mp) | 
					
						
							|  |  |  | 		releasem(mp) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	if debug.allocfreetrace != 0 { | 
					
						
							|  |  |  | 		tracealloc(x, size, typ) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-08-13 01:03:32 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if rate := MemProfileRate; rate > 0 { | 
					
						
							|  |  |  | 		if size < uintptr(rate) && int32(size) < c.next_sample { | 
					
						
							|  |  |  | 			c.next_sample -= int32(size) | 
					
						
							|  |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 			mp := acquirem() | 
					
						
							| 
									
										
										
										
											2014-08-13 01:03:32 +04:00
										 |  |  | 			profilealloc(mp, x, size) | 
					
						
							| 
									
										
										
										
											2014-08-18 16:33:39 +04:00
										 |  |  | 			releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-08-07 13:04:04 +04:00
										 |  |  | 	if memstats.heap_alloc >= memstats.next_gc { | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		gogc(0) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return x | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // cmallocgc is a trampoline used to call the Go malloc from C. | 
					
						
							|  |  |  | func cmallocgc(size uintptr, typ *_type, flags int, ret *unsafe.Pointer) { | 
					
						
							|  |  |  | 	*ret = gomallocgc(size, typ, flags) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // implementation of new builtin | 
					
						
							|  |  |  | func newobject(typ *_type) unsafe.Pointer { | 
					
						
							|  |  |  | 	flags := 0 | 
					
						
							|  |  |  | 	if typ.kind&kindNoPointers != 0 { | 
					
						
							|  |  |  | 		flags |= flagNoScan | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return gomallocgc(uintptr(typ.size), typ, flags) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // implementation of make builtin for slices | 
					
						
							|  |  |  | func newarray(typ *_type, n uintptr) unsafe.Pointer { | 
					
						
							|  |  |  | 	flags := 0 | 
					
						
							|  |  |  | 	if typ.kind&kindNoPointers != 0 { | 
					
						
							|  |  |  | 		flags |= flagNoScan | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if int(n) < 0 || (typ.size > 0 && n > maxMem/uintptr(typ.size)) { | 
					
						
							|  |  |  | 		panic("runtime: allocation size out of range") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return gomallocgc(uintptr(typ.size)*n, typ, flags) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-07-31 12:43:40 -07:00
										 |  |  | // rawmem returns a chunk of pointerless memory.  It is | 
					
						
							|  |  |  | // not zeroed. | 
					
						
							|  |  |  | func rawmem(size uintptr) unsafe.Pointer { | 
					
						
							|  |  |  | 	return gomallocgc(size, nil, flagNoScan|flagNoZero) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | // round size up to next size class | 
					
						
							|  |  |  | func goroundupsize(size uintptr) uintptr { | 
					
						
							|  |  |  | 	if size < maxSmallSize { | 
					
						
							|  |  |  | 		if size <= 1024-8 { | 
					
						
							|  |  |  | 			return uintptr(class_to_size[size_to_class8[(size+7)>>3]]) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]]) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if size+pageSize < size { | 
					
						
							|  |  |  | 		return size | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return (size + pageSize - 1) &^ pageMask | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { | 
					
						
							|  |  |  | 	c := mp.mcache | 
					
						
							|  |  |  | 	rate := MemProfileRate | 
					
						
							|  |  |  | 	if size < uintptr(rate) { | 
					
						
							|  |  |  | 		// pick next profile time | 
					
						
							|  |  |  | 		// If you change this, also change allocmcache. | 
					
						
							|  |  |  | 		if rate > 0x3fffffff { // make 2*rate not overflow | 
					
						
							|  |  |  | 			rate = 0x3fffffff | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-09-02 14:33:33 -07:00
										 |  |  | 		next := int32(fastrand1()) % (2 * int32(rate)) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		// Subtract the "remainder" of the current allocation. | 
					
						
							|  |  |  | 		// Otherwise objects that are close in size to sampling rate | 
					
						
							|  |  |  | 		// will be under-sampled, because we consistently discard this remainder. | 
					
						
							|  |  |  | 		next -= (int32(size) - c.next_sample) | 
					
						
							|  |  |  | 		if next < 0 { | 
					
						
							|  |  |  | 			next = 0 | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		c.next_sample = next | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-09-01 18:51:12 -04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	mProf_Malloc(x, size) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // force = 1 - do GC regardless of current heap usage | 
					
						
							|  |  |  | // force = 2 - go GC and eager sweep | 
					
						
							|  |  |  | func gogc(force int32) { | 
					
						
							| 
									
										
										
										
											2014-08-29 18:44:38 +04:00
										 |  |  | 	// The gc is turned off (via enablegc) until the bootstrap has completed. | 
					
						
							|  |  |  | 	// Also, malloc gets called in the guts of a number of libraries that might be | 
					
						
							|  |  |  | 	// holding locks. To avoid deadlocks during stoptheworld, don't bother | 
					
						
							|  |  |  | 	// trying to run gc while holding a lock. The next mallocgc without a lock | 
					
						
							|  |  |  | 	// will do the gc instead. | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	mp := acquirem() | 
					
						
							| 
									
										
										
										
											2014-08-29 18:44:38 +04:00
										 |  |  | 	if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 { | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		releasem(mp) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-08-21 11:46:53 +04:00
										 |  |  | 	mp = nil | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	semacquire(&worldsema, false) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if force == 0 && memstats.heap_alloc < memstats.next_gc { | 
					
						
							|  |  |  | 		// typically threads which lost the race to grab | 
					
						
							|  |  |  | 		// worldsema exit here when gc is done. | 
					
						
							|  |  |  | 		semrelease(&worldsema) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Ok, we're doing it!  Stop everybody else | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 	startTime := nanotime() | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	mp = acquirem() | 
					
						
							|  |  |  | 	mp.gcing = 1 | 
					
						
							| 
									
										
										
										
											2014-08-21 11:46:53 +04:00
										 |  |  | 	releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-09-04 00:54:06 -04:00
										 |  |  | 	onM(stoptheworld) | 
					
						
							| 
									
										
										
										
											2014-08-21 11:46:53 +04:00
										 |  |  | 	if mp != acquirem() { | 
					
						
							|  |  |  | 		gothrow("gogc: rescheduled") | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	clearpools() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Run gc on the g0 stack.  We do this so that the g stack | 
					
						
							|  |  |  | 	// we're currently running on will no longer change.  Cuts | 
					
						
							|  |  |  | 	// the root set down a bit (g0 stacks are not scanned, and | 
					
						
							|  |  |  | 	// we don't need to scan gc's internal state).  We also | 
					
						
							|  |  |  | 	// need to switch to g0 so we can shrink the stack. | 
					
						
							|  |  |  | 	n := 1 | 
					
						
							|  |  |  | 	if debug.gctrace > 1 { | 
					
						
							|  |  |  | 		n = 2 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for i := 0; i < n; i++ { | 
					
						
							|  |  |  | 		if i > 0 { | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 			startTime = nanotime() | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// switch to g0, call gc, then switch back | 
					
						
							| 
									
										
										
											
												cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
											
										 
											2014-08-27 21:59:49 -04:00
										 |  |  | 		mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits | 
					
						
							|  |  |  | 		mp.scalararg[1] = uintptr(startTime >> 32)   // high 32 bits | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		if force >= 2 { | 
					
						
							| 
									
										
										
										
											2014-08-19 11:53:20 +04:00
										 |  |  | 			mp.scalararg[2] = 1 // eagersweep | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2014-08-19 11:53:20 +04:00
										 |  |  | 			mp.scalararg[2] = 0 | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 		onM(gc_m) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// all done | 
					
						
							|  |  |  | 	mp.gcing = 0 | 
					
						
							|  |  |  | 	semrelease(&worldsema) | 
					
						
							| 
									
										
										
										
											2014-09-04 00:54:06 -04:00
										 |  |  | 	onM(starttheworld) | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	releasem(mp) | 
					
						
							| 
									
										
										
										
											2014-08-21 11:46:53 +04:00
										 |  |  | 	mp = nil | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// now that gc is done, kick off finalizer thread if needed | 
					
						
							|  |  |  | 	if !concurrentSweep { | 
					
						
							|  |  |  | 		// give the queued finalizers, if any, a chance to run | 
					
						
							|  |  |  | 		gosched() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GC runs a garbage collection. | 
					
						
							|  |  |  | func GC() { | 
					
						
							|  |  |  | 	gogc(2) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // SetFinalizer sets the finalizer associated with x to f. | 
					
						
							|  |  |  | // When the garbage collector finds an unreachable block | 
					
						
							|  |  |  | // with an associated finalizer, it clears the association and runs | 
					
						
							|  |  |  | // f(x) in a separate goroutine.  This makes x reachable again, but | 
					
						
							|  |  |  | // now without an associated finalizer.  Assuming that SetFinalizer | 
					
						
							|  |  |  | // is not called again, the next time the garbage collector sees | 
					
						
							|  |  |  | // that x is unreachable, it will free x. | 
					
						
							|  |  |  | // | 
					
						
							|  |  |  | // SetFinalizer(x, nil) clears any finalizer associated with x. | 
					
						
							|  |  |  | // | 
					
						
							|  |  |  | // The argument x must be a pointer to an object allocated by | 
					
						
							|  |  |  | // calling new or by taking the address of a composite literal. | 
					
						
							|  |  |  | // The argument f must be a function that takes a single argument | 
					
						
							|  |  |  | // to which x's type can be assigned, and can have arbitrary ignored return | 
					
						
							|  |  |  | // values. If either of these is not true, SetFinalizer aborts the | 
					
						
							|  |  |  | // program. | 
					
						
							|  |  |  | // | 
					
						
							|  |  |  | // Finalizers are run in dependency order: if A points at B, both have | 
					
						
							|  |  |  | // finalizers, and they are otherwise unreachable, only the finalizer | 
					
						
							|  |  |  | // for A runs; once A is freed, the finalizer for B can run. | 
					
						
							|  |  |  | // If a cyclic structure includes a block with a finalizer, that | 
					
						
							|  |  |  | // cycle is not guaranteed to be garbage collected and the finalizer | 
					
						
							|  |  |  | // is not guaranteed to run, because there is no ordering that | 
					
						
							|  |  |  | // respects the dependencies. | 
					
						
							|  |  |  | // | 
					
						
							|  |  |  | // The finalizer for x is scheduled to run at some arbitrary time after | 
					
						
							|  |  |  | // x becomes unreachable. | 
					
						
							|  |  |  | // There is no guarantee that finalizers will run before a program exits, | 
					
						
							|  |  |  | // so typically they are useful only for releasing non-memory resources | 
					
						
							|  |  |  | // associated with an object during a long-running program. | 
					
						
							|  |  |  | // For example, an os.File object could use a finalizer to close the | 
					
						
							|  |  |  | // associated operating system file descriptor when a program discards | 
					
						
							|  |  |  | // an os.File without calling Close, but it would be a mistake | 
					
						
							|  |  |  | // to depend on a finalizer to flush an in-memory I/O buffer such as a | 
					
						
							|  |  |  | // bufio.Writer, because the buffer would not be flushed at program exit. | 
					
						
							|  |  |  | // | 
					
						
							|  |  |  | // It is not guaranteed that a finalizer will run if the size of *x is | 
					
						
							|  |  |  | // zero bytes. | 
					
						
							|  |  |  | // | 
					
						
							|  |  |  | // A single goroutine runs all finalizers for a program, sequentially. | 
					
						
							|  |  |  | // If a finalizer must run for a long time, it should do so by starting | 
					
						
							|  |  |  | // a new goroutine. | 
					
						
							|  |  |  | func SetFinalizer(obj interface{}, finalizer interface{}) { | 
					
						
							|  |  |  | 	e := (*eface)(unsafe.Pointer(&obj)) | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	etyp := e._type | 
					
						
							|  |  |  | 	if etyp == nil { | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		gothrow("runtime.SetFinalizer: first argument is nil") | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	if etyp.kind&kindMask != kindPtr { | 
					
						
							|  |  |  | 		gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	ot := (*ptrtype)(unsafe.Pointer(etyp)) | 
					
						
							|  |  |  | 	if ot.elem == nil { | 
					
						
							|  |  |  | 		gothrow("nil elem type!") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// As an implementation detail we do not run finalizers for zero-sized objects, | 
					
						
							|  |  |  | 	// because we use &runtime·zerobase for all such allocations. | 
					
						
							|  |  |  | 	if ot.elem.size == 0 { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// find the containing object | 
					
						
							|  |  |  | 	_, base, _ := findObject(e.data) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// The following check is required for cases when a user passes a pointer to composite | 
					
						
							|  |  |  | 	// literal, but compiler makes it a pointer to global. For example: | 
					
						
							|  |  |  | 	//	var Foo = &Object{} | 
					
						
							|  |  |  | 	//	func main() { | 
					
						
							|  |  |  | 	//		runtime.SetFinalizer(Foo, nil) | 
					
						
							|  |  |  | 	//	} | 
					
						
							|  |  |  | 	// See issue 7656. | 
					
						
							|  |  |  | 	if base == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if e.data != base { | 
					
						
							|  |  |  | 		// As an implementation detail we allow to set finalizers for an inner byte | 
					
						
							|  |  |  | 		// of an object if it could come from tiny alloc (see mallocgc for details). | 
					
						
							|  |  |  | 		if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize { | 
					
						
							|  |  |  | 			gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block") | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	f := (*eface)(unsafe.Pointer(&finalizer)) | 
					
						
							|  |  |  | 	ftyp := f._type | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	if ftyp == nil { | 
					
						
							|  |  |  | 		// switch to M stack and remove finalizer | 
					
						
							|  |  |  | 		mp := acquirem() | 
					
						
							|  |  |  | 		mp.ptrarg[0] = e.data | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 		onM(removeFinalizer_m) | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 		releasem(mp) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if ftyp.kind&kindMask != kindFunc { | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 		gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function") | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	ft := (*functype)(unsafe.Pointer(ftyp)) | 
					
						
							|  |  |  | 	ins := *(*[]*_type)(unsafe.Pointer(&ft.in)) | 
					
						
							|  |  |  | 	if ft.dotdotdot || len(ins) != 1 { | 
					
						
							|  |  |  | 		gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	fint := ins[0] | 
					
						
							|  |  |  | 	switch { | 
					
						
							|  |  |  | 	case fint == etyp: | 
					
						
							|  |  |  | 		// ok - same type | 
					
						
							|  |  |  | 		goto okarg | 
					
						
							|  |  |  | 	case fint.kind&kindMask == kindPtr: | 
					
						
							|  |  |  | 		if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem { | 
					
						
							|  |  |  | 			// ok - not same type, but both pointers, | 
					
						
							|  |  |  | 			// one or the other is unnamed, and same element type, so assignable. | 
					
						
							|  |  |  | 			goto okarg | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	case fint.kind&kindMask == kindInterface: | 
					
						
							|  |  |  | 		ityp := (*interfacetype)(unsafe.Pointer(fint)) | 
					
						
							|  |  |  | 		if len(ityp.mhdr) == 0 { | 
					
						
							|  |  |  | 			// ok - satisfies empty interface | 
					
						
							|  |  |  | 			goto okarg | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if _, ok := assertE2I2(ityp, obj); ok { | 
					
						
							|  |  |  | 			goto okarg | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) | 
					
						
							|  |  |  | okarg: | 
					
						
							|  |  |  | 	// compute size needed for return parameters | 
					
						
							|  |  |  | 	nret := uintptr(0) | 
					
						
							|  |  |  | 	for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) { | 
					
						
							|  |  |  | 		nret = round(nret, uintptr(t.align)) + uintptr(t.size) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	nret = round(nret, ptrSize) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// make sure we have a finalizer goroutine | 
					
						
							|  |  |  | 	createfing() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// switch to M stack to add finalizer record | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	mp := acquirem() | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	mp.ptrarg[0] = f.data | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	mp.ptrarg[1] = e.data | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	mp.scalararg[0] = nret | 
					
						
							|  |  |  | 	mp.ptrarg[2] = unsafe.Pointer(fint) | 
					
						
							|  |  |  | 	mp.ptrarg[3] = unsafe.Pointer(ot) | 
					
						
							| 
									
										
										
										
											2014-09-03 11:35:22 -04:00
										 |  |  | 	onM(setFinalizer_m) | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 	if mp.scalararg[0] != 1 { | 
					
						
							|  |  |  | 		gothrow("runtime.SetFinalizer: finalizer already set") | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-07-30 09:01:52 -07:00
										 |  |  | 	releasem(mp) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | // round n up to a multiple of a.  a must be a power of 2. | 
					
						
							|  |  |  | func round(n, a uintptr) uintptr { | 
					
						
							|  |  |  | 	return (n + a - 1) &^ (a - 1) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Look up pointer v in heap.  Return the span containing the object, | 
					
						
							|  |  |  | // the start of the object, and the size of the object.  If the object | 
					
						
							|  |  |  | // does not exist, return nil, nil, 0. | 
					
						
							|  |  |  | func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { | 
					
						
							|  |  |  | 	c := gomcache() | 
					
						
							|  |  |  | 	c.local_nlookup++ | 
					
						
							|  |  |  | 	if ptrSize == 4 && c.local_nlookup >= 1<<30 { | 
					
						
							|  |  |  | 		// purge cache stats to prevent overflow | 
					
						
							|  |  |  | 		lock(&mheap_.lock) | 
					
						
							|  |  |  | 		purgecachedstats(c) | 
					
						
							|  |  |  | 		unlock(&mheap_.lock) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// find span | 
					
						
							|  |  |  | 	arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) | 
					
						
							|  |  |  | 	arena_used := uintptr(unsafe.Pointer(mheap_.arena_used)) | 
					
						
							|  |  |  | 	if uintptr(v) < arena_start || uintptr(v) >= arena_used { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	p := uintptr(v) >> pageShift | 
					
						
							|  |  |  | 	q := p - arena_start>>pageShift | 
					
						
							|  |  |  | 	s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize)) | 
					
						
							|  |  |  | 	if s == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	x = unsafe.Pointer(uintptr(s.start) << pageShift) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse { | 
					
						
							|  |  |  | 		s = nil | 
					
						
							|  |  |  | 		x = nil | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	n = uintptr(s.elemsize) | 
					
						
							|  |  |  | 	if s.sizeclass != 0 { | 
					
						
							|  |  |  | 		x = add(x, (uintptr(v)-uintptr(x))/n*n) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | var fingCreate uint32 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func createfing() { | 
					
						
							|  |  |  | 	// start the finalizer goroutine exactly once | 
					
						
							|  |  |  | 	if fingCreate == 0 && cas(&fingCreate, 0, 1) { | 
					
						
							|  |  |  | 		go runfinq() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // This is the goroutine that runs all of the finalizers | 
					
						
							|  |  |  | func runfinq() { | 
					
						
							|  |  |  | 	var ( | 
					
						
							|  |  |  | 		frame    unsafe.Pointer | 
					
						
							|  |  |  | 		framecap uintptr | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		lock(&finlock) | 
					
						
							|  |  |  | 		fb := finq | 
					
						
							|  |  |  | 		finq = nil | 
					
						
							|  |  |  | 		if fb == nil { | 
					
						
							|  |  |  | 			gp := getg() | 
					
						
							|  |  |  | 			fing = gp | 
					
						
							|  |  |  | 			fingwait = true | 
					
						
							|  |  |  | 			gp.issystem = true | 
					
						
							|  |  |  | 			goparkunlock(&finlock, "finalizer wait") | 
					
						
							|  |  |  | 			gp.issystem = false | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		unlock(&finlock) | 
					
						
							|  |  |  | 		if raceenabled { | 
					
						
							|  |  |  | 			racefingo() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for fb != nil { | 
					
						
							|  |  |  | 			for i := int32(0); i < fb.cnt; i++ { | 
					
						
							|  |  |  | 				f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{}))) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 				framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret) | 
					
						
							|  |  |  | 				if framecap < framesz { | 
					
						
							|  |  |  | 					// The frame does not contain pointers interesting for GC, | 
					
						
							|  |  |  | 					// all not yet finalized objects are stored in finq. | 
					
						
							|  |  |  | 					// If we do not mark it as FlagNoScan, | 
					
						
							|  |  |  | 					// the last finalized object is not collected. | 
					
						
							|  |  |  | 					frame = gomallocgc(framesz, nil, flagNoScan) | 
					
						
							|  |  |  | 					framecap = framesz | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 				if f.fint == nil { | 
					
						
							|  |  |  | 					gothrow("missing type in runfinq") | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				switch f.fint.kind & kindMask { | 
					
						
							|  |  |  | 				case kindPtr: | 
					
						
							|  |  |  | 					// direct use of pointer | 
					
						
							|  |  |  | 					*(*unsafe.Pointer)(frame) = f.arg | 
					
						
							|  |  |  | 				case kindInterface: | 
					
						
							|  |  |  | 					ityp := (*interfacetype)(unsafe.Pointer(f.fint)) | 
					
						
							|  |  |  | 					// set up with empty interface | 
					
						
							|  |  |  | 					(*eface)(frame)._type = &f.ot.typ | 
					
						
							|  |  |  | 					(*eface)(frame).data = f.arg | 
					
						
							|  |  |  | 					if len(ityp.mhdr) != 0 { | 
					
						
							|  |  |  | 						// convert to interface with methods | 
					
						
							|  |  |  | 						// this conversion is guaranteed to succeed - we checked in SetFinalizer | 
					
						
							|  |  |  | 						*(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame)) | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				default: | 
					
						
							|  |  |  | 					gothrow("bad kind in runfinq") | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2014-09-05 16:51:45 -04:00
										 |  |  | 				reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), nil) | 
					
						
							| 
									
										
										
										
											2014-08-28 13:23:10 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 				// drop finalizer queue references to finalized object | 
					
						
							|  |  |  | 				f.fn = nil | 
					
						
							|  |  |  | 				f.arg = nil | 
					
						
							|  |  |  | 				f.ot = nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			fb.cnt = 0 | 
					
						
							|  |  |  | 			next := fb.next | 
					
						
							|  |  |  | 			lock(&finlock) | 
					
						
							|  |  |  | 			fb.next = finc | 
					
						
							|  |  |  | 			finc = fb | 
					
						
							|  |  |  | 			unlock(&finlock) | 
					
						
							|  |  |  | 			fb = next | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2014-09-04 00:54:06 -04:00
										 |  |  | 
 | 
					
						
							|  |  |  | var persistent struct { | 
					
						
							|  |  |  | 	lock mutex | 
					
						
							|  |  |  | 	pos  unsafe.Pointer | 
					
						
							|  |  |  | 	end  unsafe.Pointer | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Wrapper around sysAlloc that can allocate small chunks. | 
					
						
							|  |  |  | // There is no associated free operation. | 
					
						
							|  |  |  | // Intended for things like function/type/debug-related persistent data. | 
					
						
							|  |  |  | // If align is 0, uses default align (currently 8). | 
					
						
							|  |  |  | func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { | 
					
						
							|  |  |  | 	const ( | 
					
						
							|  |  |  | 		chunk    = 256 << 10 | 
					
						
							|  |  |  | 		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if align != 0 { | 
					
						
							|  |  |  | 		if align&(align-1) != 0 { | 
					
						
							|  |  |  | 			gothrow("persistentalloc: align is not a power of 2") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if align > _PageSize { | 
					
						
							|  |  |  | 			gothrow("persistentalloc: align is too large") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		align = 8 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if size >= maxBlock { | 
					
						
							|  |  |  | 		return sysAlloc(size, stat) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	lock(&persistent.lock) | 
					
						
							|  |  |  | 	persistent.pos = roundup(persistent.pos, align) | 
					
						
							|  |  |  | 	if uintptr(persistent.pos)+size > uintptr(persistent.end) { | 
					
						
							|  |  |  | 		persistent.pos = sysAlloc(chunk, &memstats.other_sys) | 
					
						
							|  |  |  | 		if persistent.pos == nil { | 
					
						
							|  |  |  | 			unlock(&persistent.lock) | 
					
						
							|  |  |  | 			gothrow("runtime: cannot allocate memory") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		persistent.end = add(persistent.pos, chunk) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	p := persistent.pos | 
					
						
							|  |  |  | 	persistent.pos = add(persistent.pos, size) | 
					
						
							|  |  |  | 	unlock(&persistent.lock) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if stat != &memstats.other_sys { | 
					
						
							|  |  |  | 		xadd64(stat, int64(size)) | 
					
						
							|  |  |  | 		xadd64(&memstats.other_sys, -int64(size)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return p | 
					
						
							|  |  |  | } |