mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
reflect: use sync.Map instead of RWMutex for type caches
This provides a significant speedup when using reflection-heavy code on many CPU cores, such as when marshaling or unmarshaling protocol buffers. updates #17973 updates #18177 name old time/op new time/op delta Call 239ns ±10% 245ns ± 7% ~ (p=0.562 n=10+9) Call-6 201ns ±38% 48ns ±29% -76.39% (p=0.000 n=10+9) Call-48 133ns ± 8% 12ns ± 2% -90.92% (p=0.000 n=10+8) CallArgCopy/size=128 169ns ±12% 197ns ± 2% +16.35% (p=0.000 n=10+7) CallArgCopy/size=128-6 142ns ± 9% 34ns ± 7% -76.10% (p=0.000 n=10+9) CallArgCopy/size=128-48 125ns ± 3% 9ns ± 7% -93.01% (p=0.000 n=8+8) CallArgCopy/size=256 177ns ± 8% 197ns ± 5% +11.24% (p=0.000 n=10+9) CallArgCopy/size=256-6 148ns ±11% 35ns ± 6% -76.23% (p=0.000 n=10+9) CallArgCopy/size=256-48 127ns ± 4% 9ns ± 9% -92.66% (p=0.000 n=10+9) CallArgCopy/size=1024 196ns ± 6% 228ns ± 7% +16.09% (p=0.000 n=10+9) CallArgCopy/size=1024-6 143ns ± 6% 42ns ± 5% -70.39% (p=0.000 n=8+8) CallArgCopy/size=1024-48 130ns ± 7% 10ns ± 1% -91.99% (p=0.000 n=10+8) CallArgCopy/size=4096 330ns ± 9% 351ns ± 5% +6.20% (p=0.004 n=10+9) CallArgCopy/size=4096-6 173ns ±14% 62ns ± 6% -63.83% (p=0.000 n=10+8) CallArgCopy/size=4096-48 141ns ± 6% 15ns ± 6% -89.59% (p=0.000 n=10+8) CallArgCopy/size=65536 7.71µs ±10% 7.74µs ±10% ~ (p=0.859 n=10+9) CallArgCopy/size=65536-6 1.33µs ± 4% 1.34µs ± 6% ~ (p=0.720 n=10+9) CallArgCopy/size=65536-48 347ns ± 2% 344ns ± 2% ~ (p=0.202 n=10+9) PtrTo 30.2ns ±10% 41.3ns ±11% +36.97% (p=0.000 n=10+9) PtrTo-6 126ns ± 6% 7ns ±10% -94.47% (p=0.000 n=9+9) PtrTo-48 86.9ns ± 9% 1.7ns ± 9% -98.08% (p=0.000 n=10+9) FieldByName1 86.6ns ± 5% 87.3ns ± 7% ~ (p=0.737 n=10+9) FieldByName1-6 19.8ns ±10% 18.7ns ±10% ~ (p=0.073 n=9+9) FieldByName1-48 7.54ns ± 4% 7.74ns ± 5% +2.55% (p=0.023 n=9+9) FieldByName2 1.63µs ± 8% 1.70µs ± 4% +4.13% (p=0.020 n=9+9) FieldByName2-6 481ns ± 6% 490ns ±10% ~ (p=0.474 n=9+9) FieldByName2-48 723ns ± 3% 736ns ± 2% +1.76% (p=0.045 n=8+8) FieldByName3 10.5µs ± 7% 10.8µs ± 7% ~ (p=0.234 n=8+8) FieldByName3-6 2.78µs ± 3% 2.94µs ±10% +5.87% (p=0.031 n=9+9) FieldByName3-48 3.72µs ± 2% 3.91µs ± 5% +4.91% (p=0.003 n=9+9) InterfaceBig 10.8ns ± 5% 10.7ns ± 5% ~ (p=0.849 n=9+9) InterfaceBig-6 9.62ns ±81% 1.79ns ± 4% -81.38% (p=0.003 n=9+9) InterfaceBig-48 0.48ns ±34% 0.50ns ± 7% ~ (p=0.071 n=8+9) InterfaceSmall 10.7ns ± 5% 10.9ns ± 4% ~ (p=0.243 n=9+9) InterfaceSmall-6 1.85ns ± 5% 1.79ns ± 1% -2.97% (p=0.006 n=7+8) InterfaceSmall-48 0.49ns ±20% 0.48ns ± 5% ~ (p=0.740 n=7+9) New 28.2ns ±20% 26.6ns ± 3% ~ (p=0.617 n=9+9) New-6 4.69ns ± 4% 4.44ns ± 3% -5.33% (p=0.001 n=9+9) New-48 1.10ns ± 9% 1.08ns ± 6% ~ (p=0.285 n=9+8) name old alloc/op new alloc/op delta Call 0.00B 0.00B ~ (all equal) Call-6 0.00B 0.00B ~ (all equal) Call-48 0.00B 0.00B ~ (all equal) name old allocs/op new allocs/op delta Call 0.00 0.00 ~ (all equal) Call-6 0.00 0.00 ~ (all equal) Call-48 0.00 0.00 ~ (all equal) name old speed new speed delta CallArgCopy/size=128 757MB/s ±11% 649MB/s ± 1% -14.33% (p=0.000 n=10+7) CallArgCopy/size=128-6 901MB/s ± 9% 3781MB/s ± 7% +319.69% (p=0.000 n=10+9) CallArgCopy/size=128-48 1.02GB/s ± 2% 14.63GB/s ± 6% +1337.98% (p=0.000 n=8+8) CallArgCopy/size=256 1.45GB/s ± 9% 1.30GB/s ± 5% -10.17% (p=0.000 n=10+9) CallArgCopy/size=256-6 1.73GB/s ±11% 7.28GB/s ± 7% +320.76% (p=0.000 n=10+9) CallArgCopy/size=256-48 2.00GB/s ± 4% 27.46GB/s ± 9% +1270.85% (p=0.000 n=10+9) CallArgCopy/size=1024 5.21GB/s ± 6% 4.49GB/s ± 8% -13.74% (p=0.000 n=10+9) CallArgCopy/size=1024-6 7.18GB/s ± 7% 24.17GB/s ± 5% +236.64% (p=0.000 n=9+8) CallArgCopy/size=1024-48 7.87GB/s ± 7% 98.43GB/s ± 1% +1150.99% (p=0.000 n=10+8) CallArgCopy/size=4096 12.3GB/s ± 6% 11.7GB/s ± 5% -5.00% (p=0.008 n=9+9) CallArgCopy/size=4096-6 23.8GB/s ±16% 65.6GB/s ± 5% +175.02% (p=0.000 n=10+8) CallArgCopy/size=4096-48 29.0GB/s ± 7% 279.6GB/s ± 6% +862.87% (p=0.000 n=10+8) CallArgCopy/size=65536 8.52GB/s ±11% 8.49GB/s ± 9% ~ (p=0.842 n=10+9) CallArgCopy/size=65536-6 49.3GB/s ± 4% 49.0GB/s ± 6% ~ (p=0.720 n=10+9) CallArgCopy/size=65536-48 189GB/s ± 2% 190GB/s ± 2% ~ (p=0.211 n=10+9) https://perf.golang.org/search?q=upload:20170426.3 Change-Id: Iff68f18ef69defb7f30962e21736ac7685a48a27 Reviewed-on: https://go-review.googlesource.com/41871 Run-TryBot: Bryan Mills <bcmills@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
This commit is contained in:
parent
6e54fe47ce
commit
33b92cd6ce
1 changed files with 107 additions and 184 deletions
|
|
@ -782,18 +782,12 @@ func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
|
|||
|
||||
func (t *rtype) common() *rtype { return t }
|
||||
|
||||
var methodCache struct {
|
||||
sync.RWMutex
|
||||
m map[*rtype][]method
|
||||
}
|
||||
var methodCache sync.Map // map[*rtype][]method
|
||||
|
||||
func (t *rtype) exportedMethods() []method {
|
||||
methodCache.RLock()
|
||||
methods, found := methodCache.m[t]
|
||||
methodCache.RUnlock()
|
||||
|
||||
methodsi, found := methodCache.Load(t)
|
||||
if found {
|
||||
return methods
|
||||
return methodsi.([]method)
|
||||
}
|
||||
|
||||
ut := t.uncommon()
|
||||
|
|
@ -809,6 +803,7 @@ func (t *rtype) exportedMethods() []method {
|
|||
break
|
||||
}
|
||||
}
|
||||
var methods []method
|
||||
if allExported {
|
||||
methods = allm
|
||||
} else {
|
||||
|
|
@ -822,14 +817,8 @@ func (t *rtype) exportedMethods() []method {
|
|||
methods = methods[:len(methods):len(methods)]
|
||||
}
|
||||
|
||||
methodCache.Lock()
|
||||
if methodCache.m == nil {
|
||||
methodCache.m = make(map[*rtype][]method)
|
||||
}
|
||||
methodCache.m[t] = methods
|
||||
methodCache.Unlock()
|
||||
|
||||
return methods
|
||||
methodsi, _ = methodCache.LoadOrStore(t, methods)
|
||||
return methodsi.([]method)
|
||||
}
|
||||
|
||||
func (t *rtype) NumMethod() int {
|
||||
|
|
@ -838,7 +827,7 @@ func (t *rtype) NumMethod() int {
|
|||
return tt.NumMethod()
|
||||
}
|
||||
if t.tflag&tflagUncommon == 0 {
|
||||
return 0 // avoid methodCache lock in zero case
|
||||
return 0 // avoid methodCache synchronization
|
||||
}
|
||||
return len(t.exportedMethods())
|
||||
}
|
||||
|
|
@ -1410,10 +1399,7 @@ func TypeOf(i interface{}) Type {
|
|||
}
|
||||
|
||||
// ptrMap is the cache for PtrTo.
|
||||
var ptrMap struct {
|
||||
sync.RWMutex
|
||||
m map[*rtype]*ptrType
|
||||
}
|
||||
var ptrMap sync.Map // map[*rtype]*ptrType
|
||||
|
||||
// PtrTo returns the pointer type with element t.
|
||||
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
|
||||
|
|
@ -1427,35 +1413,19 @@ func (t *rtype) ptrTo() *rtype {
|
|||
}
|
||||
|
||||
// Check the cache.
|
||||
ptrMap.RLock()
|
||||
if m := ptrMap.m; m != nil {
|
||||
if p := m[t]; p != nil {
|
||||
ptrMap.RUnlock()
|
||||
return &p.rtype
|
||||
}
|
||||
}
|
||||
ptrMap.RUnlock()
|
||||
|
||||
ptrMap.Lock()
|
||||
if ptrMap.m == nil {
|
||||
ptrMap.m = make(map[*rtype]*ptrType)
|
||||
}
|
||||
p := ptrMap.m[t]
|
||||
if p != nil {
|
||||
// some other goroutine won the race and created it
|
||||
ptrMap.Unlock()
|
||||
return &p.rtype
|
||||
if pi, ok := ptrMap.Load(t); ok {
|
||||
return &pi.(*ptrType).rtype
|
||||
}
|
||||
|
||||
// Look in known types.
|
||||
s := "*" + t.String()
|
||||
for _, tt := range typesByString(s) {
|
||||
p = (*ptrType)(unsafe.Pointer(tt))
|
||||
if p.elem == t {
|
||||
ptrMap.m[t] = p
|
||||
ptrMap.Unlock()
|
||||
return &p.rtype
|
||||
p := (*ptrType)(unsafe.Pointer(tt))
|
||||
if p.elem != t {
|
||||
continue
|
||||
}
|
||||
pi, _ := ptrMap.LoadOrStore(t, p)
|
||||
return &pi.(*ptrType).rtype
|
||||
}
|
||||
|
||||
// Create a new ptrType starting with the description
|
||||
|
|
@ -1476,9 +1446,8 @@ func (t *rtype) ptrTo() *rtype {
|
|||
|
||||
pp.elem = t
|
||||
|
||||
ptrMap.m[t] = &pp
|
||||
ptrMap.Unlock()
|
||||
return &pp.rtype
|
||||
pi, _ := ptrMap.LoadOrStore(t, &pp)
|
||||
return &pi.(*ptrType).rtype
|
||||
}
|
||||
|
||||
// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
|
||||
|
|
@ -1779,10 +1748,7 @@ func typesByString(s string) []*rtype {
|
|||
}
|
||||
|
||||
// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
|
||||
var lookupCache struct {
|
||||
sync.RWMutex
|
||||
m map[cacheKey]*rtype
|
||||
}
|
||||
var lookupCache sync.Map // map[cacheKey]*rtype
|
||||
|
||||
// A cacheKey is the key for use in the lookupCache.
|
||||
// Four values describe any of the types we are looking for:
|
||||
|
|
@ -1794,47 +1760,15 @@ type cacheKey struct {
|
|||
extra uintptr
|
||||
}
|
||||
|
||||
// cacheGet looks for a type under the key k in the lookupCache.
|
||||
// If it finds one, it returns that type.
|
||||
// If not, it returns nil with the cache locked.
|
||||
// The caller is expected to use cachePut to unlock the cache.
|
||||
func cacheGet(k cacheKey) Type {
|
||||
lookupCache.RLock()
|
||||
t := lookupCache.m[k]
|
||||
lookupCache.RUnlock()
|
||||
if t != nil {
|
||||
return t
|
||||
}
|
||||
|
||||
lookupCache.Lock()
|
||||
t = lookupCache.m[k]
|
||||
if t != nil {
|
||||
lookupCache.Unlock()
|
||||
return t
|
||||
}
|
||||
|
||||
if lookupCache.m == nil {
|
||||
lookupCache.m = make(map[cacheKey]*rtype)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cachePut stores the given type in the cache, unlocks the cache,
|
||||
// and returns the type. It is expected that the cache is locked
|
||||
// because cacheGet returned nil.
|
||||
func cachePut(k cacheKey, t *rtype) Type {
|
||||
lookupCache.m[k] = t
|
||||
lookupCache.Unlock()
|
||||
return t
|
||||
}
|
||||
|
||||
// The funcLookupCache caches FuncOf lookups.
|
||||
// FuncOf does not share the common lookupCache since cacheKey is not
|
||||
// sufficient to represent functions unambiguously.
|
||||
var funcLookupCache struct {
|
||||
sync.RWMutex
|
||||
m map[uint32][]*rtype // keyed by hash calculated in FuncOf
|
||||
sync.Mutex // Guards stores (but not loads) on m.
|
||||
|
||||
// m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
|
||||
// Elements of m are append-only and thus safe for concurrent reading.
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
// ChanOf returns the channel type with the given direction and element type.
|
||||
|
|
@ -1847,13 +1781,12 @@ func ChanOf(dir ChanDir, t Type) Type {
|
|||
|
||||
// Look in cache.
|
||||
ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
|
||||
if ch := cacheGet(ckey); ch != nil {
|
||||
return ch
|
||||
if ch, ok := lookupCache.Load(ckey); ok {
|
||||
return ch.(*rtype)
|
||||
}
|
||||
|
||||
// This restriction is imposed by the gc compiler and the runtime.
|
||||
if typ.size >= 1<<16 {
|
||||
lookupCache.Unlock()
|
||||
panic("reflect.ChanOf: element size too large")
|
||||
}
|
||||
|
||||
|
|
@ -1862,7 +1795,6 @@ func ChanOf(dir ChanDir, t Type) Type {
|
|||
var s string
|
||||
switch dir {
|
||||
default:
|
||||
lookupCache.Unlock()
|
||||
panic("reflect.ChanOf: invalid dir")
|
||||
case SendDir:
|
||||
s = "chan<- " + typ.String()
|
||||
|
|
@ -1874,7 +1806,8 @@ func ChanOf(dir ChanDir, t Type) Type {
|
|||
for _, tt := range typesByString(s) {
|
||||
ch := (*chanType)(unsafe.Pointer(tt))
|
||||
if ch.elem == typ && ch.dir == uintptr(dir) {
|
||||
return cachePut(ckey, tt)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, tt)
|
||||
return ti.(Type)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1888,7 +1821,8 @@ func ChanOf(dir ChanDir, t Type) Type {
|
|||
ch.hash = fnv1(typ.hash, 'c', byte(dir))
|
||||
ch.elem = typ
|
||||
|
||||
return cachePut(ckey, &ch.rtype)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
|
||||
return ti.(Type)
|
||||
}
|
||||
|
||||
func ismapkey(*rtype) bool // implemented in runtime
|
||||
|
|
@ -1909,8 +1843,8 @@ func MapOf(key, elem Type) Type {
|
|||
|
||||
// Look in cache.
|
||||
ckey := cacheKey{Map, ktyp, etyp, 0}
|
||||
if mt := cacheGet(ckey); mt != nil {
|
||||
return mt
|
||||
if mt, ok := lookupCache.Load(ckey); ok {
|
||||
return mt.(Type)
|
||||
}
|
||||
|
||||
// Look in known types.
|
||||
|
|
@ -1918,7 +1852,8 @@ func MapOf(key, elem Type) Type {
|
|||
for _, tt := range typesByString(s) {
|
||||
mt := (*mapType)(unsafe.Pointer(tt))
|
||||
if mt.key == ktyp && mt.elem == etyp {
|
||||
return cachePut(ckey, tt)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, tt)
|
||||
return ti.(Type)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1950,7 +1885,8 @@ func MapOf(key, elem Type) Type {
|
|||
mt.needkeyupdate = needKeyUpdate(ktyp)
|
||||
mt.ptrToThis = 0
|
||||
|
||||
return cachePut(ckey, &mt.rtype)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
|
||||
return ti.(Type)
|
||||
}
|
||||
|
||||
type funcTypeFixed4 struct {
|
||||
|
|
@ -2055,42 +1991,46 @@ func FuncOf(in, out []Type, variadic bool) Type {
|
|||
}
|
||||
|
||||
// Look in cache.
|
||||
funcLookupCache.RLock()
|
||||
for _, t := range funcLookupCache.m[hash] {
|
||||
if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
|
||||
funcLookupCache.RUnlock()
|
||||
return t
|
||||
if ts, ok := funcLookupCache.m.Load(hash); ok {
|
||||
for _, t := range ts.([]*rtype) {
|
||||
if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
funcLookupCache.RUnlock()
|
||||
|
||||
// Not in cache, lock and retry.
|
||||
funcLookupCache.Lock()
|
||||
defer funcLookupCache.Unlock()
|
||||
if funcLookupCache.m == nil {
|
||||
funcLookupCache.m = make(map[uint32][]*rtype)
|
||||
}
|
||||
for _, t := range funcLookupCache.m[hash] {
|
||||
if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
|
||||
return t
|
||||
if ts, ok := funcLookupCache.m.Load(hash); ok {
|
||||
for _, t := range ts.([]*rtype) {
|
||||
if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addToCache := func(tt *rtype) Type {
|
||||
var rts []*rtype
|
||||
if rti, ok := funcLookupCache.m.Load(hash); ok {
|
||||
rts = rti.([]*rtype)
|
||||
}
|
||||
funcLookupCache.m.Store(hash, append(rts, tt))
|
||||
return tt
|
||||
}
|
||||
|
||||
// Look in known types for the same string representation.
|
||||
str := funcStr(ft)
|
||||
for _, tt := range typesByString(str) {
|
||||
if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
|
||||
funcLookupCache.m[hash] = append(funcLookupCache.m[hash], tt)
|
||||
return tt
|
||||
return addToCache(tt)
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the remaining fields of ft and store in cache.
|
||||
ft.str = resolveReflectName(newName(str, "", "", false))
|
||||
ft.ptrToThis = 0
|
||||
funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
|
||||
|
||||
return &ft.rtype
|
||||
return addToCache(&ft.rtype)
|
||||
}
|
||||
|
||||
// funcStr builds a string representation of a funcType.
|
||||
|
|
@ -2294,8 +2234,8 @@ func SliceOf(t Type) Type {
|
|||
|
||||
// Look in cache.
|
||||
ckey := cacheKey{Slice, typ, nil, 0}
|
||||
if slice := cacheGet(ckey); slice != nil {
|
||||
return slice
|
||||
if slice, ok := lookupCache.Load(ckey); ok {
|
||||
return slice.(Type)
|
||||
}
|
||||
|
||||
// Look in known types.
|
||||
|
|
@ -2303,7 +2243,8 @@ func SliceOf(t Type) Type {
|
|||
for _, tt := range typesByString(s) {
|
||||
slice := (*sliceType)(unsafe.Pointer(tt))
|
||||
if slice.elem == typ {
|
||||
return cachePut(ckey, tt)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, tt)
|
||||
return ti.(Type)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2317,17 +2258,19 @@ func SliceOf(t Type) Type {
|
|||
slice.elem = typ
|
||||
slice.ptrToThis = 0
|
||||
|
||||
return cachePut(ckey, &slice.rtype)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
|
||||
return ti.(Type)
|
||||
}
|
||||
|
||||
// The structLookupCache caches StructOf lookups.
|
||||
// StructOf does not share the common lookupCache since we need to pin
|
||||
// the memory associated with *structTypeFixedN.
|
||||
var structLookupCache struct {
|
||||
sync.RWMutex
|
||||
m map[uint32][]interface {
|
||||
common() *rtype
|
||||
} // keyed by hash calculated in StructOf
|
||||
sync.Mutex // Guards stores (but not loads) on m.
|
||||
|
||||
// m is a map[uint32][]Type keyed by the hash calculated in StructOf.
|
||||
// Elements in m are append-only and thus safe for concurrent reading.
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
type structTypeUncommon struct {
|
||||
|
|
@ -2581,40 +2524,32 @@ func StructOf(fields []StructField) Type {
|
|||
|
||||
var typ *structType
|
||||
var ut *uncommonType
|
||||
var typPin interface {
|
||||
common() *rtype
|
||||
} // structTypeFixedN
|
||||
|
||||
switch {
|
||||
case len(methods) == 0:
|
||||
t := new(structTypeUncommon)
|
||||
typ = &t.structType
|
||||
ut = &t.u
|
||||
typPin = t
|
||||
case len(methods) <= 4:
|
||||
t := new(structTypeFixed4)
|
||||
typ = &t.structType
|
||||
ut = &t.u
|
||||
copy(t.m[:], methods)
|
||||
typPin = t
|
||||
case len(methods) <= 8:
|
||||
t := new(structTypeFixed8)
|
||||
typ = &t.structType
|
||||
ut = &t.u
|
||||
copy(t.m[:], methods)
|
||||
typPin = t
|
||||
case len(methods) <= 16:
|
||||
t := new(structTypeFixed16)
|
||||
typ = &t.structType
|
||||
ut = &t.u
|
||||
copy(t.m[:], methods)
|
||||
typPin = t
|
||||
case len(methods) <= 32:
|
||||
t := new(structTypeFixed32)
|
||||
typ = &t.structType
|
||||
ut = &t.u
|
||||
copy(t.m[:], methods)
|
||||
typPin = t
|
||||
default:
|
||||
panic("reflect.StructOf: too many methods")
|
||||
}
|
||||
|
|
@ -2637,40 +2572,44 @@ func StructOf(fields []StructField) Type {
|
|||
*typ = *prototype
|
||||
typ.fields = fs
|
||||
|
||||
// Look in cache
|
||||
structLookupCache.RLock()
|
||||
for _, st := range structLookupCache.m[hash] {
|
||||
t := st.common()
|
||||
if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
|
||||
structLookupCache.RUnlock()
|
||||
return t
|
||||
// Look in cache.
|
||||
if ts, ok := structLookupCache.m.Load(hash); ok {
|
||||
for _, st := range ts.([]Type) {
|
||||
t := st.common()
|
||||
if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
structLookupCache.RUnlock()
|
||||
|
||||
// not in cache, lock and retry
|
||||
// Not in cache, lock and retry.
|
||||
structLookupCache.Lock()
|
||||
defer structLookupCache.Unlock()
|
||||
if structLookupCache.m == nil {
|
||||
structLookupCache.m = make(map[uint32][]interface {
|
||||
common() *rtype
|
||||
})
|
||||
}
|
||||
for _, st := range structLookupCache.m[hash] {
|
||||
t := st.common()
|
||||
if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
|
||||
return t
|
||||
if ts, ok := structLookupCache.m.Load(hash); ok {
|
||||
for _, st := range ts.([]Type) {
|
||||
t := st.common()
|
||||
if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addToCache := func(t Type) Type {
|
||||
var ts []Type
|
||||
if ti, ok := structLookupCache.m.Load(hash); ok {
|
||||
ts = ti.([]Type)
|
||||
}
|
||||
structLookupCache.m.Store(hash, append(ts, t))
|
||||
return t
|
||||
}
|
||||
|
||||
// Look in known types.
|
||||
for _, t := range typesByString(str) {
|
||||
if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
|
||||
// even if 't' wasn't a structType with methods, we should be ok
|
||||
// as the 'u uncommonType' field won't be accessed except when
|
||||
// tflag&tflagUncommon is set.
|
||||
structLookupCache.m[hash] = append(structLookupCache.m[hash], t)
|
||||
return t
|
||||
return addToCache(t)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2781,8 +2720,7 @@ func StructOf(fields []StructField) Type {
|
|||
typ.kind &^= kindDirectIface
|
||||
}
|
||||
|
||||
structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin)
|
||||
return &typ.rtype
|
||||
return addToCache(&typ.rtype)
|
||||
}
|
||||
|
||||
func runtimeStructField(field StructField) structField {
|
||||
|
|
@ -2846,15 +2784,11 @@ const maxPtrmaskBytes = 2048
|
|||
// ArrayOf panics.
|
||||
func ArrayOf(count int, elem Type) Type {
|
||||
typ := elem.(*rtype)
|
||||
// call SliceOf here as it calls cacheGet/cachePut.
|
||||
// ArrayOf also calls cacheGet/cachePut and thus may modify the state of
|
||||
// the lookupCache mutex.
|
||||
slice := SliceOf(elem)
|
||||
|
||||
// Look in cache.
|
||||
ckey := cacheKey{Array, typ, nil, uintptr(count)}
|
||||
if array := cacheGet(ckey); array != nil {
|
||||
return array
|
||||
if array, ok := lookupCache.Load(ckey); ok {
|
||||
return array.(Type)
|
||||
}
|
||||
|
||||
// Look in known types.
|
||||
|
|
@ -2862,7 +2796,8 @@ func ArrayOf(count int, elem Type) Type {
|
|||
for _, tt := range typesByString(s) {
|
||||
array := (*arrayType)(unsafe.Pointer(tt))
|
||||
if array.elem == typ {
|
||||
return cachePut(ckey, tt)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, tt)
|
||||
return ti.(Type)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2889,7 +2824,7 @@ func ArrayOf(count int, elem Type) Type {
|
|||
array.align = typ.align
|
||||
array.fieldAlign = typ.fieldAlign
|
||||
array.len = uintptr(count)
|
||||
array.slice = slice.(*rtype)
|
||||
array.slice = SliceOf(elem).(*rtype)
|
||||
|
||||
array.kind &^= kindNoPointers
|
||||
switch {
|
||||
|
|
@ -3008,7 +2943,8 @@ func ArrayOf(count int, elem Type) Type {
|
|||
array.kind &^= kindDirectIface
|
||||
}
|
||||
|
||||
return cachePut(ckey, &array.rtype)
|
||||
ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
|
||||
return ti.(Type)
|
||||
}
|
||||
|
||||
func appendVarint(x []byte, v uintptr) []byte {
|
||||
|
|
@ -3044,10 +2980,7 @@ type layoutType struct {
|
|||
framePool *sync.Pool
|
||||
}
|
||||
|
||||
var layoutCache struct {
|
||||
sync.RWMutex
|
||||
m map[layoutKey]layoutType
|
||||
}
|
||||
var layoutCache sync.Map // map[layoutKey]layoutType
|
||||
|
||||
// funcLayout computes a struct type representing the layout of the
|
||||
// function arguments and return values for the function type t.
|
||||
|
|
@ -3063,16 +2996,9 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin
|
|||
panic("reflect: funcLayout with interface receiver " + rcvr.String())
|
||||
}
|
||||
k := layoutKey{t, rcvr}
|
||||
layoutCache.RLock()
|
||||
if x := layoutCache.m[k]; x.t != nil {
|
||||
layoutCache.RUnlock()
|
||||
return x.t, x.argSize, x.retOffset, x.stack, x.framePool
|
||||
}
|
||||
layoutCache.RUnlock()
|
||||
layoutCache.Lock()
|
||||
if x := layoutCache.m[k]; x.t != nil {
|
||||
layoutCache.Unlock()
|
||||
return x.t, x.argSize, x.retOffset, x.stack, x.framePool
|
||||
if lti, ok := layoutCache.Load(k); ok {
|
||||
lt := lti.(layoutType)
|
||||
return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
|
||||
}
|
||||
|
||||
tt := (*funcType)(unsafe.Pointer(t))
|
||||
|
|
@ -3133,21 +3059,18 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin
|
|||
x.str = resolveReflectName(newName(s, "", "", false))
|
||||
|
||||
// cache result for future callers
|
||||
if layoutCache.m == nil {
|
||||
layoutCache.m = make(map[layoutKey]layoutType)
|
||||
}
|
||||
framePool = &sync.Pool{New: func() interface{} {
|
||||
return unsafe_New(x)
|
||||
}}
|
||||
layoutCache.m[k] = layoutType{
|
||||
lti, _ := layoutCache.LoadOrStore(k, layoutType{
|
||||
t: x,
|
||||
argSize: argSize,
|
||||
retOffset: retOffset,
|
||||
stack: ptrmap,
|
||||
framePool: framePool,
|
||||
}
|
||||
layoutCache.Unlock()
|
||||
return x, argSize, retOffset, ptrmap, framePool
|
||||
})
|
||||
lt := lti.(layoutType)
|
||||
return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
|
||||
}
|
||||
|
||||
// ifaceIndir reports whether t is stored indirectly in an interface value.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue