2014-07-31 15:12:53 -07:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2015-11-11 12:39:30 -05:00
|
|
|
import (
|
2018-04-10 16:42:44 +08:00
|
|
|
"internal/cpu"
|
2015-11-11 12:39:30 -05:00
|
|
|
"runtime/internal/sys"
|
|
|
|
"unsafe"
|
|
|
|
)
|
2014-07-31 15:12:53 -07:00
|
|
|
|
|
|
|
const (
|
2015-11-11 12:39:30 -05:00
|
|
|
c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
|
|
|
|
c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
|
2014-07-31 15:12:53 -07:00
|
|
|
)
|
|
|
|
|
2014-08-30 08:40:56 +04:00
|
|
|
// type algorithms - known to compiler
|
2014-07-31 15:12:53 -07:00
|
|
|
const (
|
2016-02-20 22:52:15 -08:00
|
|
|
alg_NOEQ = iota
|
2014-07-31 15:12:53 -07:00
|
|
|
alg_MEM0
|
|
|
|
alg_MEM8
|
|
|
|
alg_MEM16
|
|
|
|
alg_MEM32
|
|
|
|
alg_MEM64
|
|
|
|
alg_MEM128
|
|
|
|
alg_STRING
|
|
|
|
alg_INTER
|
|
|
|
alg_NILINTER
|
|
|
|
alg_FLOAT32
|
|
|
|
alg_FLOAT64
|
|
|
|
alg_CPLX64
|
|
|
|
alg_CPLX128
|
|
|
|
alg_max
|
|
|
|
)
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
return h
|
|
|
|
}
|
2017-08-27 14:05:11 +02:00
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func memhash8(p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
return memhash(p, h, 1)
|
|
|
|
}
|
2017-08-27 14:05:11 +02:00
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func memhash16(p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
return memhash(p, h, 2)
|
|
|
|
}
|
2017-08-27 14:05:11 +02:00
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func memhash128(p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
return memhash(p, h, 16)
|
|
|
|
}
|
|
|
|
|
2017-08-05 14:44:00 +08:00
|
|
|
//go:nosplit
|
|
|
|
func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
ptr := getclosureptr()
|
|
|
|
size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
|
|
|
|
return memhash(p, h, size)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
// runtime variable to check if the processor we're running on
|
|
|
|
// actually supports the instructions used by the AES-based
|
|
|
|
// hash implementation.
|
2014-08-30 08:40:56 +04:00
|
|
|
var useAeshash bool
|
2014-07-31 15:12:53 -07:00
|
|
|
|
|
|
|
// in asm_*.s
|
2019-08-20 11:03:13 -07:00
|
|
|
func memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
|
|
|
func memhash32(p unsafe.Pointer, h uintptr) uintptr
|
|
|
|
func memhash64(p unsafe.Pointer, h uintptr) uintptr
|
|
|
|
func strhash(p unsafe.Pointer, h uintptr) uintptr
|
2014-07-31 15:12:53 -07:00
|
|
|
|
2019-08-20 11:03:13 -07:00
|
|
|
func strhashFallback(a unsafe.Pointer, h uintptr) uintptr {
|
2014-12-10 17:02:58 -08:00
|
|
|
x := (*stringStruct)(a)
|
2019-08-20 11:03:13 -07:00
|
|
|
return memhashFallback(x.str, h, uintptr(x.len))
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: Because NaN != NaN, a map can contain any
|
|
|
|
// number of (mostly useless) entries keyed with NaNs.
|
|
|
|
// To avoid long hash chains, we assign a random number
|
|
|
|
// as the hash value for a NaN.
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func f32hash(p unsafe.Pointer, h uintptr) uintptr {
|
2014-08-30 08:40:56 +04:00
|
|
|
f := *(*float32)(p)
|
2014-07-31 15:12:53 -07:00
|
|
|
switch {
|
|
|
|
case f == 0:
|
|
|
|
return c1 * (c0 ^ h) // +0, -0
|
|
|
|
case f != f:
|
2016-06-28 09:22:46 -07:00
|
|
|
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
|
2014-07-31 15:12:53 -07:00
|
|
|
default:
|
2015-01-06 16:42:48 -08:00
|
|
|
return memhash(p, h, 4)
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func f64hash(p unsafe.Pointer, h uintptr) uintptr {
|
2014-08-30 08:40:56 +04:00
|
|
|
f := *(*float64)(p)
|
2014-07-31 15:12:53 -07:00
|
|
|
switch {
|
|
|
|
case f == 0:
|
|
|
|
return c1 * (c0 ^ h) // +0, -0
|
|
|
|
case f != f:
|
2016-06-28 09:22:46 -07:00
|
|
|
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
|
2014-07-31 15:12:53 -07:00
|
|
|
default:
|
2015-01-06 16:42:48 -08:00
|
|
|
return memhash(p, h, 8)
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func c64hash(p unsafe.Pointer, h uintptr) uintptr {
|
2014-08-30 08:40:56 +04:00
|
|
|
x := (*[2]float32)(p)
|
2015-01-06 16:42:48 -08:00
|
|
|
return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func c128hash(p unsafe.Pointer, h uintptr) uintptr {
|
2014-08-30 08:40:56 +04:00
|
|
|
x := (*[2]float64)(p)
|
2015-01-06 16:42:48 -08:00
|
|
|
return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func interhash(p unsafe.Pointer, h uintptr) uintptr {
|
2014-08-30 08:40:56 +04:00
|
|
|
a := (*iface)(p)
|
2014-08-06 13:42:00 -07:00
|
|
|
tab := a.tab
|
2014-08-06 16:22:52 -04:00
|
|
|
if tab == nil {
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
t := tab._type
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if t.equal == nil {
|
|
|
|
// Check hashability here. We could do this check inside
|
|
|
|
// typehash, but we want to report the topmost type in
|
|
|
|
// the error text (e.g. in a struct with a field of slice type
|
|
|
|
// we want to report the struct, not the slice).
|
2016-04-07 16:29:16 -04:00
|
|
|
panic(errorString("hash of unhashable type " + t.string()))
|
2014-08-06 16:22:52 -04:00
|
|
|
}
|
2014-08-18 21:13:11 -04:00
|
|
|
if isDirectIface(t) {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
|
2014-08-06 16:22:52 -04:00
|
|
|
} else {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return c1 * typehash(t, a.data, h^c0)
|
2014-08-06 16:22:52 -04:00
|
|
|
}
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
|
2014-08-30 08:40:56 +04:00
|
|
|
a := (*eface)(p)
|
2014-08-06 13:42:00 -07:00
|
|
|
t := a._type
|
2014-07-31 15:12:53 -07:00
|
|
|
if t == nil {
|
|
|
|
return h
|
|
|
|
}
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
if t.equal == nil {
|
|
|
|
// See comment in interhash above.
|
2016-04-07 16:29:16 -04:00
|
|
|
panic(errorString("hash of unhashable type " + t.string()))
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
2014-08-18 21:13:11 -04:00
|
|
|
if isDirectIface(t) {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
|
2014-07-31 15:12:53 -07:00
|
|
|
} else {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return c1 * typehash(t, a.data, h^c0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// typehash computes the hash of the object of type t at address p.
|
|
|
|
// h is the seed.
|
|
|
|
// This function is seldom used. Most maps use for hashing either
|
|
|
|
// fixed functions (e.g. f32hash) or compiler-generated functions
|
|
|
|
// (e.g. for a type like struct { x, y string }). This implementation
|
|
|
|
// is slower but more general and is used for hashing interface types
|
|
|
|
// (called from interhash or nilinterhash, above) or for hashing in
|
|
|
|
// maps generated by reflect.MapOf (reflect_typehash, below).
|
|
|
|
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
if t.tflag&tflagRegularMemory != 0 {
|
2020-02-13 08:34:41 -08:00
|
|
|
// Handle ptr sizes specially, see issue 37086.
|
|
|
|
switch t.size {
|
|
|
|
case 4:
|
|
|
|
return memhash32(p, h)
|
|
|
|
case 8:
|
|
|
|
return memhash64(p, h)
|
|
|
|
default:
|
|
|
|
return memhash(p, h, t.size)
|
|
|
|
}
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
switch t.kind & kindMask {
|
|
|
|
case kindFloat32:
|
|
|
|
return f32hash(p, h)
|
|
|
|
case kindFloat64:
|
|
|
|
return f64hash(p, h)
|
|
|
|
case kindComplex64:
|
|
|
|
return c64hash(p, h)
|
|
|
|
case kindComplex128:
|
|
|
|
return c128hash(p, h)
|
|
|
|
case kindString:
|
|
|
|
return strhash(p, h)
|
|
|
|
case kindInterface:
|
|
|
|
i := (*interfacetype)(unsafe.Pointer(t))
|
|
|
|
if len(i.mhdr) == 0 {
|
|
|
|
return nilinterhash(p, h)
|
|
|
|
}
|
|
|
|
return interhash(p, h)
|
|
|
|
case kindArray:
|
|
|
|
a := (*arraytype)(unsafe.Pointer(t))
|
|
|
|
for i := uintptr(0); i < a.len; i++ {
|
|
|
|
h = typehash(a.elem, add(p, i*a.elem.size), h)
|
|
|
|
}
|
|
|
|
return h
|
|
|
|
case kindStruct:
|
|
|
|
s := (*structtype)(unsafe.Pointer(t))
|
|
|
|
for _, f := range s.fields {
|
|
|
|
// TODO: maybe we could hash several contiguous fields all at once.
|
|
|
|
if f.name.isBlank() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
h = typehash(f.typ, add(p, f.offset()), h)
|
|
|
|
}
|
|
|
|
return h
|
|
|
|
default:
|
|
|
|
// Should never happen, as typehash should only be called
|
|
|
|
// with comparable types.
|
|
|
|
panic(errorString("hash of unhashable type " + t.string()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:linkname reflect_typehash reflect.typehash
|
|
|
|
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
|
|
|
|
return typehash(t, p, h)
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
2015-01-06 16:42:48 -08:00
|
|
|
func memequal0(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return true
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func memequal8(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*int8)(p) == *(*int8)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func memequal16(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*int16)(p) == *(*int16)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func memequal32(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*int32)(p) == *(*int32)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func memequal64(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*int64)(p) == *(*int64)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func memequal128(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*[2]int64)(p) == *(*[2]int64)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func f32equal(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*float32)(p) == *(*float32)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func f64equal(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*float64)(p) == *(*float64)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func c64equal(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*complex64)(p) == *(*complex64)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func c128equal(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*complex128)(p) == *(*complex128)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func strequal(p, q unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
return *(*string)(p) == *(*string)(q)
|
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func interequal(p, q unsafe.Pointer) bool {
|
2017-03-24 14:03:15 -07:00
|
|
|
x := *(*iface)(p)
|
|
|
|
y := *(*iface)(q)
|
|
|
|
return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2015-01-06 16:42:48 -08:00
|
|
|
func nilinterequal(p, q unsafe.Pointer) bool {
|
2017-03-24 14:03:15 -07:00
|
|
|
x := *(*eface)(p)
|
|
|
|
y := *(*eface)(q)
|
|
|
|
return x._type == y._type && efaceeq(x._type, x.data, y.data)
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2017-03-24 14:03:15 -07:00
|
|
|
func efaceeq(t *_type, x, y unsafe.Pointer) bool {
|
2014-08-07 14:52:55 -07:00
|
|
|
if t == nil {
|
|
|
|
return true
|
|
|
|
}
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
eq := t.equal
|
2014-08-30 08:40:56 +04:00
|
|
|
if eq == nil {
|
2016-04-07 16:29:16 -04:00
|
|
|
panic(errorString("comparing uncomparable type " + t.string()))
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2014-08-18 21:13:11 -04:00
|
|
|
if isDirectIface(t) {
|
2018-12-02 10:15:35 -08:00
|
|
|
// Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
|
|
|
|
// Maps and funcs are not comparable, so they can't reach here.
|
|
|
|
// Ptrs, chans, and single-element items can be compared directly using ==.
|
|
|
|
return x == y
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2017-03-24 14:03:15 -07:00
|
|
|
return eq(x, y)
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2017-03-24 14:03:15 -07:00
|
|
|
func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
|
|
|
|
if tab == nil {
|
2014-08-07 14:52:55 -07:00
|
|
|
return true
|
|
|
|
}
|
2017-03-24 14:03:15 -07:00
|
|
|
t := tab._type
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
eq := t.equal
|
2014-08-30 08:40:56 +04:00
|
|
|
if eq == nil {
|
2016-04-07 16:29:16 -04:00
|
|
|
panic(errorString("comparing uncomparable type " + t.string()))
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2014-08-18 21:13:11 -04:00
|
|
|
if isDirectIface(t) {
|
2018-12-02 10:15:35 -08:00
|
|
|
// See comment in efaceeq.
|
|
|
|
return x == y
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
2017-03-24 14:03:15 -07:00
|
|
|
return eq(x, y)
|
2014-08-07 14:52:55 -07:00
|
|
|
}
|
|
|
|
|
2014-07-31 15:12:53 -07:00
|
|
|
// Testing adapters for hash quality tests (see hash_test.go)
|
|
|
|
func stringHash(s string, seed uintptr) uintptr {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return strhash(noescape(unsafe.Pointer(&s)), seed)
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func bytesHash(b []byte, seed uintptr) uintptr {
|
2015-04-11 10:01:54 +12:00
|
|
|
s := (*slice)(unsafe.Pointer(&b))
|
2015-01-06 16:42:48 -08:00
|
|
|
return memhash(s.array, seed, uintptr(s.len))
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func int32Hash(i uint32, seed uintptr) uintptr {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return memhash32(noescape(unsafe.Pointer(&i)), seed)
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func int64Hash(i uint64, seed uintptr) uintptr {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return memhash64(noescape(unsafe.Pointer(&i)), seed)
|
2014-07-31 15:12:53 -07:00
|
|
|
}
|
2014-08-07 12:33:20 -07:00
|
|
|
|
|
|
|
func efaceHash(i interface{}, seed uintptr) uintptr {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
|
2014-08-07 12:33:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func ifaceHash(i interface {
|
|
|
|
F()
|
|
|
|
}, seed uintptr) uintptr {
|
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
2019-08-06 15:22:51 -07:00
|
|
|
return interhash(noescape(unsafe.Pointer(&i)), seed)
|
2014-08-07 12:33:20 -07:00
|
|
|
}
|
2014-08-20 14:02:11 -07:00
|
|
|
|
2015-11-11 12:39:30 -05:00
|
|
|
const hashRandomBytes = sys.PtrSize / 4 * 64
|
2014-08-30 08:40:56 +04:00
|
|
|
|
2018-04-04 13:15:22 +08:00
|
|
|
// used in asm_{386,amd64,arm64}.s to seed the hash function
|
2014-08-30 08:40:56 +04:00
|
|
|
var aeskeysched [hashRandomBytes]byte
|
|
|
|
|
2015-01-06 09:06:44 -08:00
|
|
|
// used in hash{32,64}.go to seed the hash function
|
|
|
|
var hashkey [4]uintptr
|
2014-08-30 08:40:56 +04:00
|
|
|
|
2016-08-04 13:09:29 -04:00
|
|
|
func alginit() {
|
2018-04-04 13:15:22 +08:00
|
|
|
// Install AES hash algorithms if the instructions needed are present.
|
2015-01-06 09:06:44 -08:00
|
|
|
if (GOARCH == "386" || GOARCH == "amd64") &&
|
2018-04-10 16:42:44 +08:00
|
|
|
cpu.X86.HasAES && // AESENC
|
|
|
|
cpu.X86.HasSSSE3 && // PSHUFB
|
|
|
|
cpu.X86.HasSSE41 { // PINSR{D,Q}
|
2018-04-04 13:15:22 +08:00
|
|
|
initAlgAES()
|
|
|
|
return
|
|
|
|
}
|
2018-04-10 16:42:44 +08:00
|
|
|
if GOARCH == "arm64" && cpu.ARM64.HasAES {
|
2018-04-04 13:15:22 +08:00
|
|
|
initAlgAES()
|
2015-01-06 09:06:44 -08:00
|
|
|
return
|
2014-08-30 08:40:56 +04:00
|
|
|
}
|
2015-11-11 12:39:30 -05:00
|
|
|
getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
|
2015-08-31 16:26:12 -07:00
|
|
|
hashkey[0] |= 1 // make sure these numbers are odd
|
|
|
|
hashkey[1] |= 1
|
|
|
|
hashkey[2] |= 1
|
|
|
|
hashkey[3] |= 1
|
2014-08-30 08:40:56 +04:00
|
|
|
}
|
2018-04-04 13:15:22 +08:00
|
|
|
|
|
|
|
func initAlgAES() {
|
|
|
|
useAeshash = true
|
|
|
|
// Initialize with random data so hash collisions will be hard to engineer.
|
|
|
|
getRandomData(aeskeysched[:])
|
|
|
|
}
|
2018-06-13 14:20:19 -05:00
|
|
|
|
2019-12-10 14:27:53 +00:00
|
|
|
// Note: These routines perform the read with a native endianness.
|
2018-06-13 14:20:19 -05:00
|
|
|
func readUnaligned32(p unsafe.Pointer) uint32 {
|
|
|
|
q := (*[4]byte)(p)
|
|
|
|
if sys.BigEndian {
|
|
|
|
return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
|
|
|
|
}
|
|
|
|
return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
|
|
|
|
}
|
|
|
|
|
|
|
|
func readUnaligned64(p unsafe.Pointer) uint64 {
|
|
|
|
q := (*[8]byte)(p)
|
|
|
|
if sys.BigEndian {
|
|
|
|
return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
|
|
|
|
uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
|
|
|
|
}
|
|
|
|
return uint64(q[0]) | uint64(q[1])<<8 | uint64(q[2])<<16 | uint64(q[3])<<24 | uint64(q[4])<<32 | uint64(q[5])<<40 | uint64(q[6])<<48 | uint64(q[7])<<56
|
|
|
|
}
|