runtime: remove size argument from hash and equal algorithms

The equal algorithm used to take the size
   equal(p, q *T, size uintptr) bool
With this change, it does not
   equal(p, q *T) bool
Similarly for the hash algorithm.

The size is rarely used, as most equal functions know the size
of the thing they are comparing.  For instance f32equal already
knows its inputs are 4 bytes in size.

For cases where the size is not known, we allocate a closure
(one for each size needed) that points to an assembly stub that
reads the size out of the closure and calls generic code that
has a size argument.

Reduces the size of the go binary by 0.07%.  Performance impact
is not measurable.

Change-Id: I6e00adf3dde7ad2974adbcff0ee91e86d2194fec
Reviewed-on: https://go-review.googlesource.com/2392
Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
Keith Randall 2015-01-06 16:42:48 -08:00
parent 60801c4853
commit d5e4c4061b
17 changed files with 410 additions and 183 deletions

View file

@ -252,7 +252,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(t.elem.zero)
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@ -274,7 +274,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
@ -300,7 +300,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
return unsafe.Pointer(t.elem.zero), false
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@ -322,7 +322,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
@ -343,7 +343,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
return nil, nil
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@ -365,7 +365,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
@ -393,7 +393,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
if h.buckets == nil {
if checkgc {
@ -431,7 +431,7 @@ again:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2, uintptr(t.key.size)) {
if !alg.equal(key, k2) {
continue
}
// already have a mapping for key. Update it.
@ -503,7 +503,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
return
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
growWork(t, h, bucket)
@ -523,7 +523,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2, uintptr(t.key.size)) {
if !alg.equal(key, k2) {
continue
}
memclr(k, uintptr(t.keysize))
@ -660,10 +660,10 @@ next:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if t.reflexivekey || alg.equal(k2, k2, uintptr(t.key.size)) {
if t.reflexivekey || alg.equal(k2, k2) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(k2, uintptr(h.hash0))
if hash&(uintptr(1)<<it.B-1) != checkBucket {
continue
}
@ -697,7 +697,7 @@ next:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if t.reflexivekey || alg.equal(k2, k2, uintptr(t.key.size)) {
if t.reflexivekey || alg.equal(k2, k2) {
// Check the current hash table for the data.
// This code handles the case where the key
// has been deleted, updated, or deleted and reinserted.
@ -804,9 +804,9 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
}
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(k2, uintptr(h.hash0))
if h.flags&iterator != 0 {
if !t.reflexivekey && !alg.equal(k2, k2, uintptr(t.key.size)) {
if !t.reflexivekey && !alg.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the