runtime: break atomics out into package runtime/internal/atomic

This change breaks out most of the atomics functions in the runtime
into package runtime/internal/atomic. It adds some basic support
in the toolchain for runtime packages, and also modifies linux/arm
atomics to remove the dependency on the runtime's mutex. The mutexes
have been replaced with spinlocks.

all trybots are happy!
In addition to the trybots, I've tested on the darwin/arm64 builder,
on the darwin/arm builder, and on a ppc64le machine.

Change-Id: I6698c8e3cf3834f55ce5824059f44d00dc8e3c2f
Reviewed-on: https://go-review.googlesource.com/14204
Run-TryBot: Michael Matloob <matloob@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
Michael Matloob 2015-11-02 14:09:24 -05:00
parent d33360571f
commit 67faca7d9c
94 changed files with 2302 additions and 1940 deletions

View file

@ -54,6 +54,7 @@ package runtime
// before the table grows. Typical tables will be somewhat less loaded.
import (
"runtime/internal/atomic"
"unsafe"
)
@ -280,7 +281,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr))
return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
@ -315,7 +316,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
}
b = b.overflow(t)
if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr))
return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
}
}
@ -331,7 +332,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)), false
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
@ -366,7 +367,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
}
b = b.overflow(t)
if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)), false
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
@ -627,7 +628,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// Remember we have an iterator.
// Can run concurrently with another hash_iter_init().
if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
atomicor8(&h.flags, iterator|oldIterator)
atomic.Or8(&h.flags, iterator|oldIterator)
}
mapiternext(it)
@ -1024,14 +1025,14 @@ var zerosize uintptr = initialZeroSize
// serve as the zero value for t.
func mapzero(t *_type) {
// Is the type small enough for existing buffer?
cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if t.size <= cursize {
return
}
// Allocate a new buffer.
lock(&zerolock)
cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if cursize < t.size {
for cursize < t.size {
cursize *= 2
@ -1040,8 +1041,8 @@ func mapzero(t *_type) {
throw("map element too large")
}
}
atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
}
unlock(&zerolock)
}