2008-10-23 17:13:34 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2009-08-12 13:18:37 -07:00
|
|
|
package reflect_test
|
2008-10-23 17:13:34 -07:00
|
|
|
|
|
|
|
|
import (
|
2011-04-20 16:24:45 -04:00
|
|
|
"bytes"
|
2011-08-22 13:22:42 +10:00
|
|
|
"encoding/base64"
|
2012-09-22 08:52:27 -04:00
|
|
|
"flag"
|
2010-06-14 11:23:11 -07:00
|
|
|
"fmt"
|
all: clean up code with token.IsExported
A handful of packages were reimplementing IsExported, so use
token.IsExported instead. This caused the deps test to fail for net/rpc.
However, net/rpc deals with Go types, and go/token is light and fairly
low-level in terms of Go tooling packages, so that's okay.
While at it, replace all uses of ast.IsExported with token.IsExported.
This is more consistent, and also means that the import graphs are
leaner. A couple of files no longer need to import go/ast, for example.
We can't get rid of cmd/compile/internal/types.IsExported, as the
compiler can only depend on go/token as of Go 1.4. However, gc used
different implementations in a couple of places, so consolidate the use
of types.IsExported there.
Finally, we can't get rid of the copied IsExported implementation in
encoding/gob, as go/token depends on it as part of a test. That test
can't be an external test either, so there's no easy way to break the
import cycle.
Overall, this removes about forty lines of unnecessary code.
Change-Id: I86a475b7614261e6a7b0b153d5ca02b9f64a7b2d
Reviewed-on: https://go-review.googlesource.com/c/go/+/172037
Run-TryBot: Daniel Martí <mvdan@mvdan.cc>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-04-15 23:10:50 +09:00
|
|
|
"go/token"
|
2009-12-15 15:40:16 -08:00
|
|
|
"io"
|
2015-11-25 10:45:59 -05:00
|
|
|
"math"
|
2012-09-18 14:22:41 -04:00
|
|
|
"math/rand"
|
2009-12-15 15:40:16 -08:00
|
|
|
"os"
|
|
|
|
|
. "reflect"
|
2013-03-06 15:52:32 -08:00
|
|
|
"runtime"
|
2013-03-26 11:50:29 -07:00
|
|
|
"sort"
|
2015-01-27 10:04:11 +01:00
|
|
|
"strconv"
|
2014-02-21 13:51:22 -05:00
|
|
|
"strings"
|
2012-09-18 14:22:41 -04:00
|
|
|
"sync"
|
2017-08-31 22:02:37 -04:00
|
|
|
"sync/atomic"
|
2009-12-15 15:40:16 -08:00
|
|
|
"testing"
|
2012-09-18 14:22:41 -04:00
|
|
|
"time"
|
2009-12-15 15:40:16 -08:00
|
|
|
"unsafe"
|
2008-10-23 17:13:34 -07:00
|
|
|
)
|
|
|
|
|
|
2017-01-12 16:54:42 -05:00
|
|
|
var sink interface{}
|
|
|
|
|
|
2011-11-16 19:18:25 -05:00
|
|
|
func TestBool(t *testing.T) {
|
|
|
|
|
v := ValueOf(true)
|
|
|
|
|
if v.Bool() != true {
|
|
|
|
|
t.Fatal("ValueOf(true).Bool() = false")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-07 11:03:12 -07:00
|
|
|
type integer int
|
2009-11-05 14:23:20 -08:00
|
|
|
type T struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int
|
|
|
|
|
b float64
|
|
|
|
|
c string
|
|
|
|
|
d *int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-07-07 11:03:12 -07:00
|
|
|
|
|
|
|
|
type pair struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
i interface{}
|
|
|
|
|
s string
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
2008-10-23 17:13:34 -07:00
|
|
|
|
2009-07-07 11:03:12 -07:00
|
|
|
func assert(t *testing.T, s, want string) {
|
|
|
|
|
if s != want {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("have %#q want %#q", s, want)
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var typeTests = []pair{
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct{ x int }{}, "int"},
|
|
|
|
|
{struct{ x int8 }{}, "int8"},
|
|
|
|
|
{struct{ x int16 }{}, "int16"},
|
|
|
|
|
{struct{ x int32 }{}, "int32"},
|
|
|
|
|
{struct{ x int64 }{}, "int64"},
|
|
|
|
|
{struct{ x uint }{}, "uint"},
|
|
|
|
|
{struct{ x uint8 }{}, "uint8"},
|
|
|
|
|
{struct{ x uint16 }{}, "uint16"},
|
|
|
|
|
{struct{ x uint32 }{}, "uint32"},
|
|
|
|
|
{struct{ x uint64 }{}, "uint64"},
|
|
|
|
|
{struct{ x float32 }{}, "float32"},
|
|
|
|
|
{struct{ x float64 }{}, "float64"},
|
|
|
|
|
{struct{ x int8 }{}, "int8"},
|
|
|
|
|
{struct{ x (**int8) }{}, "**int8"},
|
|
|
|
|
{struct{ x (**integer) }{}, "**reflect_test.integer"},
|
|
|
|
|
{struct{ x ([32]int32) }{}, "[32]int32"},
|
|
|
|
|
{struct{ x ([]int8) }{}, "[]int8"},
|
2011-12-02 14:45:07 -05:00
|
|
|
{struct{ x (map[string]int32) }{}, "map[string]int32"},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct{ x (chan<- string) }{}, "chan<- string"},
|
|
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
c chan *int32
|
|
|
|
|
d float32
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { c chan *int32; d float32 }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct{ x (func(a int8, b int32)) }{}, "func(int8, int32)"},
|
|
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
c func(chan *integer, *int8)
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { c func(chan *reflect_test.integer, *int8) }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int32
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { a int8; b int32 }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int8
|
|
|
|
|
c int32
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { a int8; b int8; c int32 }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int8
|
|
|
|
|
c int8
|
|
|
|
|
d int32
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { a int8; b int8; c int8; d int32 }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int8
|
|
|
|
|
c int8
|
|
|
|
|
d int8
|
|
|
|
|
e int32
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { a int8; b int8; c int8; d int8; e int32 }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int8
|
|
|
|
|
c int8
|
|
|
|
|
d int8
|
|
|
|
|
e int8
|
|
|
|
|
f int32
|
|
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
|
|
|
|
"struct { a int8; b int8; c int8; d int8; e int8; f int32 }",
|
|
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2011-06-29 09:52:34 -04:00
|
|
|
a int8 `reflect:"hi there"`
|
2009-12-15 15:40:16 -08:00
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
2011-06-29 09:52:34 -04:00
|
|
|
`struct { a int8 "reflect:\"hi there\"" }`,
|
2009-11-05 18:27:30 -08:00
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2011-06-29 09:52:34 -04:00
|
|
|
a int8 `reflect:"hi \x00there\t\n\"\\"`
|
2009-12-15 15:40:16 -08:00
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
2011-06-29 09:52:34 -04:00
|
|
|
`struct { a int8 "reflect:\"hi \\x00there\\t\\n\\\"\\\\\"" }`,
|
2009-11-05 18:27:30 -08:00
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x struct {
|
2010-06-14 11:23:11 -07:00
|
|
|
f func(args ...int)
|
2009-12-15 15:40:16 -08:00
|
|
|
}
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
2010-06-14 11:23:11 -07:00
|
|
|
"struct { f func(...int) }",
|
2009-11-05 18:27:30 -08:00
|
|
|
},
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct {
|
2009-11-05 14:23:20 -08:00
|
|
|
x (interface {
|
2010-02-24 13:24:37 -08:00
|
|
|
a(func(func(int) int) func(func(int)) int)
|
2009-12-15 15:40:16 -08:00
|
|
|
b()
|
|
|
|
|
})
|
2009-11-05 18:27:30 -08:00
|
|
|
}{},
|
2011-04-21 08:14:50 -04:00
|
|
|
"interface { reflect_test.a(func(func(int) int) func(func(int)) int); reflect_test.b() }",
|
2018-01-31 17:12:29 -08:00
|
|
|
},
|
|
|
|
|
{struct {
|
|
|
|
|
x struct {
|
|
|
|
|
int32
|
|
|
|
|
int64
|
|
|
|
|
}
|
|
|
|
|
}{},
|
|
|
|
|
"struct { int32; int64 }",
|
2009-11-05 18:27:30 -08:00
|
|
|
},
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var valueTests = []pair{
|
2013-08-21 14:41:55 +10:00
|
|
|
{new(int), "132"},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(int8), "8"},
|
|
|
|
|
{new(int16), "16"},
|
|
|
|
|
{new(int32), "32"},
|
|
|
|
|
{new(int64), "64"},
|
2013-08-21 14:41:55 +10:00
|
|
|
{new(uint), "132"},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(uint8), "8"},
|
|
|
|
|
{new(uint16), "16"},
|
|
|
|
|
{new(uint32), "32"},
|
|
|
|
|
{new(uint64), "64"},
|
|
|
|
|
{new(float32), "256.25"},
|
|
|
|
|
{new(float64), "512.125"},
|
2013-08-21 14:41:55 +10:00
|
|
|
{new(complex64), "532.125+10i"},
|
|
|
|
|
{new(complex128), "564.25+1i"},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(string), "stringy cheese"},
|
|
|
|
|
{new(bool), "true"},
|
|
|
|
|
{new(*int8), "*int8(0)"},
|
|
|
|
|
{new(**int8), "**int8(0)"},
|
|
|
|
|
{new([5]int32), "[5]int32{0, 0, 0, 0, 0}"},
|
|
|
|
|
{new(**integer), "**reflect_test.integer(0)"},
|
2011-12-02 14:45:07 -05:00
|
|
|
{new(map[string]int32), "map[string]int32{<can't iterate on maps>}"},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(chan<- string), "chan<- string"},
|
|
|
|
|
{new(func(a int8, b int32)), "func(int8, int32)(0)"},
|
|
|
|
|
{new(struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
c chan *int32
|
|
|
|
|
d float32
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
}),
|
2009-11-05 18:27:30 -08:00
|
|
|
"struct { c chan *int32; d float32 }{chan *int32, 0}",
|
|
|
|
|
},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(struct{ c func(chan *integer, *int8) }),
|
2009-11-05 18:27:30 -08:00
|
|
|
"struct { c func(chan *reflect_test.integer, *int8) }{func(chan *reflect_test.integer, *int8)(0)}",
|
|
|
|
|
},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int32
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
}),
|
2009-11-05 18:27:30 -08:00
|
|
|
"struct { a int8; b int32 }{0, 0}",
|
|
|
|
|
},
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
{new(struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int8
|
|
|
|
|
b int8
|
|
|
|
|
c int32
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
}),
|
2009-11-05 18:27:30 -08:00
|
|
|
"struct { a int8; b int8; c int32 }{0, 0, 0}",
|
|
|
|
|
},
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func testType(t *testing.T, i int, typ Type, want string) {
|
2009-12-15 15:40:16 -08:00
|
|
|
s := typ.String()
|
2009-07-07 11:03:12 -07:00
|
|
|
if s != want {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("#%d: have %#q, want %#q", i, s, want)
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestTypes(t *testing.T) {
|
|
|
|
|
for i, tt := range typeTests {
|
2011-04-25 13:39:16 -04:00
|
|
|
testType(t, i, ValueOf(tt.i).Field(0).Type(), tt.s)
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-10-21 19:51:27 -07:00
|
|
|
func TestSet(t *testing.T) {
|
2009-07-07 11:03:12 -07:00
|
|
|
for i, tt := range valueTests {
|
2011-11-16 19:18:25 -05:00
|
|
|
v := ValueOf(tt.i)
|
|
|
|
|
v = v.Elem()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
switch v.Kind() {
|
|
|
|
|
case Int:
|
|
|
|
|
v.SetInt(132)
|
|
|
|
|
case Int8:
|
|
|
|
|
v.SetInt(8)
|
|
|
|
|
case Int16:
|
|
|
|
|
v.SetInt(16)
|
|
|
|
|
case Int32:
|
|
|
|
|
v.SetInt(32)
|
|
|
|
|
case Int64:
|
|
|
|
|
v.SetInt(64)
|
|
|
|
|
case Uint:
|
|
|
|
|
v.SetUint(132)
|
|
|
|
|
case Uint8:
|
|
|
|
|
v.SetUint(8)
|
|
|
|
|
case Uint16:
|
|
|
|
|
v.SetUint(16)
|
|
|
|
|
case Uint32:
|
|
|
|
|
v.SetUint(32)
|
|
|
|
|
case Uint64:
|
|
|
|
|
v.SetUint(64)
|
|
|
|
|
case Float32:
|
|
|
|
|
v.SetFloat(256.25)
|
|
|
|
|
case Float64:
|
|
|
|
|
v.SetFloat(512.125)
|
|
|
|
|
case Complex64:
|
|
|
|
|
v.SetComplex(532.125 + 10i)
|
|
|
|
|
case Complex128:
|
|
|
|
|
v.SetComplex(564.25 + 1i)
|
|
|
|
|
case String:
|
|
|
|
|
v.SetString("stringy cheese")
|
|
|
|
|
case Bool:
|
|
|
|
|
v.SetBool(true)
|
2008-10-23 17:13:34 -07:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
s := valueToString(v)
|
2009-07-07 11:03:12 -07:00
|
|
|
if s != tt.s {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
|
2008-10-23 17:13:34 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-10-21 19:51:27 -07:00
|
|
|
func TestSetValue(t *testing.T) {
|
|
|
|
|
for i, tt := range valueTests {
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(tt.i).Elem()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
switch v.Kind() {
|
|
|
|
|
case Int:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(int(132)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Int8:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(int8(8)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Int16:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(int16(16)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Int32:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(int32(32)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Int64:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(int64(64)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Uint:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(uint(132)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Uint8:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(uint8(8)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Uint16:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(uint16(16)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Uint32:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(uint32(32)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Uint64:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(uint64(64)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Float32:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(float32(256.25)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Float64:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(512.125))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Complex64:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(complex64(532.125 + 10i)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Complex128:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(complex128(564.25 + 1i)))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case String:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf("stringy cheese"))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
case Bool:
|
2011-04-25 13:39:16 -04:00
|
|
|
v.Set(ValueOf(true))
|
2009-10-21 19:51:27 -07:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
s := valueToString(v)
|
2009-10-21 19:51:27 -07:00
|
|
|
if s != tt.s {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
|
2009-10-21 19:51:27 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 14:17:24 -07:00
|
|
|
func TestCanSetField(t *testing.T) {
|
|
|
|
|
type embed struct{ x, X int }
|
|
|
|
|
type Embed struct{ x, X int }
|
|
|
|
|
type S1 struct {
|
|
|
|
|
embed
|
|
|
|
|
x, X int
|
|
|
|
|
}
|
|
|
|
|
type S2 struct {
|
|
|
|
|
*embed
|
|
|
|
|
x, X int
|
|
|
|
|
}
|
|
|
|
|
type S3 struct {
|
|
|
|
|
Embed
|
|
|
|
|
x, X int
|
|
|
|
|
}
|
|
|
|
|
type S4 struct {
|
|
|
|
|
*Embed
|
|
|
|
|
x, X int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type testCase struct {
|
|
|
|
|
index []int
|
|
|
|
|
canSet bool
|
|
|
|
|
}
|
|
|
|
|
tests := []struct {
|
|
|
|
|
val Value
|
|
|
|
|
cases []testCase
|
|
|
|
|
}{{
|
|
|
|
|
val: ValueOf(&S1{}),
|
|
|
|
|
cases: []testCase{
|
|
|
|
|
{[]int{0}, false},
|
|
|
|
|
{[]int{0, 0}, false},
|
|
|
|
|
{[]int{0, 1}, true},
|
|
|
|
|
{[]int{1}, false},
|
|
|
|
|
{[]int{2}, true},
|
|
|
|
|
},
|
|
|
|
|
}, {
|
|
|
|
|
val: ValueOf(&S2{embed: &embed{}}),
|
|
|
|
|
cases: []testCase{
|
|
|
|
|
{[]int{0}, false},
|
|
|
|
|
{[]int{0, 0}, false},
|
|
|
|
|
{[]int{0, 1}, true},
|
|
|
|
|
{[]int{1}, false},
|
|
|
|
|
{[]int{2}, true},
|
|
|
|
|
},
|
|
|
|
|
}, {
|
|
|
|
|
val: ValueOf(&S3{}),
|
|
|
|
|
cases: []testCase{
|
|
|
|
|
{[]int{0}, true},
|
|
|
|
|
{[]int{0, 0}, false},
|
|
|
|
|
{[]int{0, 1}, true},
|
|
|
|
|
{[]int{1}, false},
|
|
|
|
|
{[]int{2}, true},
|
|
|
|
|
},
|
|
|
|
|
}, {
|
|
|
|
|
val: ValueOf(&S4{Embed: &Embed{}}),
|
|
|
|
|
cases: []testCase{
|
|
|
|
|
{[]int{0}, true},
|
|
|
|
|
{[]int{0, 0}, false},
|
|
|
|
|
{[]int{0, 1}, true},
|
|
|
|
|
{[]int{1}, false},
|
|
|
|
|
{[]int{2}, true},
|
|
|
|
|
},
|
|
|
|
|
}}
|
|
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
|
t.Run(tt.val.Type().Name(), func(t *testing.T) {
|
|
|
|
|
for _, tc := range tt.cases {
|
|
|
|
|
f := tt.val
|
|
|
|
|
for _, i := range tc.index {
|
|
|
|
|
if f.Kind() == Ptr {
|
|
|
|
|
f = f.Elem()
|
|
|
|
|
}
|
|
|
|
|
f = f.Field(i)
|
|
|
|
|
}
|
|
|
|
|
if got := f.CanSet(); got != tc.canSet {
|
|
|
|
|
t.Errorf("CanSet() = %v, want %v", got, tc.canSet)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var _i = 7
|
|
|
|
|
|
|
|
|
|
var valueToStringTests = []pair{
|
2010-10-22 10:06:33 -07:00
|
|
|
{123, "123"},
|
|
|
|
|
{123.5, "123.5"},
|
|
|
|
|
{byte(123), "123"},
|
|
|
|
|
{"abc", "abc"},
|
|
|
|
|
{T{123, 456.75, "hello", &_i}, "reflect_test.T{123, 456.75, hello, *int(&7)}"},
|
|
|
|
|
{new(chan *T), "*chan *reflect_test.T(&chan *reflect_test.T)"},
|
|
|
|
|
{[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
|
|
|
|
|
{&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[10]int(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
|
|
|
|
|
{[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
|
|
|
|
|
{&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[]int(&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
2008-10-23 17:13:34 -07:00
|
|
|
|
2009-07-07 11:03:12 -07:00
|
|
|
func TestValueToString(t *testing.T) {
|
|
|
|
|
for i, test := range valueToStringTests {
|
2011-04-25 13:39:16 -04:00
|
|
|
s := valueToString(ValueOf(test.i))
|
2009-07-07 11:03:12 -07:00
|
|
|
if s != test.s {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("#%d: have %#q, want %#q", i, s, test.s)
|
2008-11-12 19:05:05 -08:00
|
|
|
}
|
|
|
|
|
}
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
2008-11-12 19:05:05 -08:00
|
|
|
|
2009-07-07 11:03:12 -07:00
|
|
|
func TestArrayElemSet(t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}).Elem()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v.Index(4).SetInt(123)
|
2009-12-15 15:40:16 -08:00
|
|
|
s := valueToString(v)
|
|
|
|
|
const want = "[10]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}"
|
2009-07-07 11:03:12 -07:00
|
|
|
if s != want {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("[10]int: have %#q want %#q", s, want)
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
2008-11-13 13:42:59 -08:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
v = ValueOf([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v.Index(4).SetInt(123)
|
2009-12-15 15:40:16 -08:00
|
|
|
s = valueToString(v)
|
|
|
|
|
const want1 = "[]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}"
|
2009-07-07 11:03:12 -07:00
|
|
|
if s != want1 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("[]int: have %#q want %#q", s, want1)
|
2008-11-13 13:42:59 -08:00
|
|
|
}
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestPtrPointTo(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
var ip *int32
|
|
|
|
|
var i int32 = 1234
|
2011-04-25 13:39:16 -04:00
|
|
|
vip := ValueOf(&ip)
|
|
|
|
|
vi := ValueOf(&i).Elem()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
vip.Elem().Set(vi.Addr())
|
2009-07-07 11:03:12 -07:00
|
|
|
if *ip != 1234 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("got %d, want 1234", *ip)
|
2008-11-13 13:42:59 -08:00
|
|
|
}
|
2010-08-17 15:12:28 -07:00
|
|
|
|
|
|
|
|
ip = nil
|
2011-04-25 13:39:16 -04:00
|
|
|
vp := ValueOf(&ip).Elem()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
vp.Set(Zero(vp.Type()))
|
2010-08-17 15:12:28 -07:00
|
|
|
if ip != nil {
|
|
|
|
|
t.Errorf("got non-nil (%p), want nil", ip)
|
|
|
|
|
}
|
2008-10-23 17:13:34 -07:00
|
|
|
}
|
2008-11-24 14:51:33 -08:00
|
|
|
|
2010-04-20 17:02:08 -07:00
|
|
|
func TestPtrSetNil(t *testing.T) {
|
|
|
|
|
var i int32 = 1234
|
|
|
|
|
ip := &i
|
2011-04-25 13:39:16 -04:00
|
|
|
vip := ValueOf(&ip)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
vip.Elem().Set(Zero(vip.Elem().Type()))
|
2010-04-20 17:02:08 -07:00
|
|
|
if ip != nil {
|
|
|
|
|
t.Errorf("got non-nil (%d), want nil", *ip)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapSetNil(t *testing.T) {
|
|
|
|
|
m := make(map[string]int)
|
2011-04-25 13:39:16 -04:00
|
|
|
vm := ValueOf(&m)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
vm.Elem().Set(Zero(vm.Elem().Type()))
|
2010-04-20 17:02:08 -07:00
|
|
|
if m != nil {
|
|
|
|
|
t.Errorf("got non-nil (%p), want nil", m)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-08 21:57:59 -08:00
|
|
|
func TestAll(t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
testType(t, 1, TypeOf((int8)(0)), "int8")
|
|
|
|
|
testType(t, 2, TypeOf((*int8)(nil)).Elem(), "int8")
|
2009-07-07 11:03:12 -07:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
typ := TypeOf((*struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
c chan *int32
|
|
|
|
|
d float32
|
|
|
|
|
})(nil))
|
|
|
|
|
testType(t, 3, typ, "*struct { c chan *int32; d float32 }")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
etyp := typ.Elem()
|
2009-12-15 15:40:16 -08:00
|
|
|
testType(t, 4, etyp, "struct { c chan *int32; d float32 }")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
styp := etyp
|
2009-12-15 15:40:16 -08:00
|
|
|
f := styp.Field(0)
|
|
|
|
|
testType(t, 5, f.Type, "chan *int32")
|
|
|
|
|
|
|
|
|
|
f, present := styp.FieldByName("d")
|
2009-07-16 18:21:14 -07:00
|
|
|
if !present {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("FieldByName says present field is absent")
|
2009-07-16 18:21:14 -07:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
testType(t, 6, f.Type, "float32")
|
2009-07-07 11:03:12 -07:00
|
|
|
|
2009-12-15 15:40:16 -08:00
|
|
|
f, present = styp.FieldByName("absent")
|
2009-07-16 18:21:14 -07:00
|
|
|
if present {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("FieldByName says absent field is present")
|
2009-07-16 18:21:14 -07:00
|
|
|
}
|
|
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
typ = TypeOf([32]int32{})
|
2009-12-15 15:40:16 -08:00
|
|
|
testType(t, 7, typ, "[32]int32")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
testType(t, 8, typ.Elem(), "int32")
|
2009-07-07 11:03:12 -07:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
typ = TypeOf((map[string]*int32)(nil))
|
2011-12-02 14:45:07 -05:00
|
|
|
testType(t, 9, typ, "map[string]*int32")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
mtyp := typ
|
2009-12-15 15:40:16 -08:00
|
|
|
testType(t, 10, mtyp.Key(), "string")
|
|
|
|
|
testType(t, 11, mtyp.Elem(), "*int32")
|
2009-07-07 11:03:12 -07:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
typ = TypeOf((chan<- string)(nil))
|
2009-12-15 15:40:16 -08:00
|
|
|
testType(t, 12, typ, "chan<- string")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
testType(t, 13, typ.Elem(), "string")
|
2009-07-07 11:03:12 -07:00
|
|
|
|
|
|
|
|
// make sure tag strings are not part of element type
|
2011-04-25 13:39:16 -04:00
|
|
|
typ = TypeOf(struct {
|
2011-06-29 09:52:34 -04:00
|
|
|
d []uint32 `reflect:"TAG"`
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
}{}).Field(0).Type
|
2009-12-15 15:40:16 -08:00
|
|
|
testType(t, 14, typ, "[]uint32")
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
|
|
|
|
|
2009-01-20 14:40:40 -08:00
|
|
|
func TestInterfaceGet(t *testing.T) {
|
2009-11-05 14:23:20 -08:00
|
|
|
var inter struct {
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
E interface{}
|
2009-12-15 15:40:16 -08:00
|
|
|
}
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
inter.E = 123.456
|
2011-04-25 13:39:16 -04:00
|
|
|
v1 := ValueOf(&inter)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v2 := v1.Elem().Field(0)
|
2011-10-31 18:09:40 +01:00
|
|
|
assert(t, v2.Type().String(), "interface {}")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
i2 := v2.Interface()
|
2011-04-25 13:39:16 -04:00
|
|
|
v3 := ValueOf(i2)
|
2011-01-19 23:09:00 -05:00
|
|
|
assert(t, v3.Type().String(), "float64")
|
2008-11-24 14:51:33 -08:00
|
|
|
}
|
2008-12-10 15:55:59 -08:00
|
|
|
|
2009-04-06 21:28:04 -07:00
|
|
|
func TestInterfaceValue(t *testing.T) {
|
2009-11-05 14:23:20 -08:00
|
|
|
var inter struct {
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
E interface{}
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
inter.E = 123.456
|
2011-04-25 13:39:16 -04:00
|
|
|
v1 := ValueOf(&inter)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v2 := v1.Elem().Field(0)
|
2011-10-31 18:09:40 +01:00
|
|
|
assert(t, v2.Type().String(), "interface {}")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v3 := v2.Elem()
|
2011-01-19 23:09:00 -05:00
|
|
|
assert(t, v3.Type().String(), "float64")
|
2009-04-15 00:55:58 -07:00
|
|
|
|
2009-12-15 15:40:16 -08:00
|
|
|
i3 := v2.Interface()
|
2011-01-19 23:09:00 -05:00
|
|
|
if _, ok := i3.(float64); !ok {
|
2011-04-25 13:39:16 -04:00
|
|
|
t.Error("v2.Interface() did not return float64, got ", TypeOf(i3))
|
2009-04-14 19:03:57 -07:00
|
|
|
}
|
2009-04-06 21:28:04 -07:00
|
|
|
}
|
|
|
|
|
|
2009-04-14 06:46:01 -07:00
|
|
|
func TestFunctionValue(t *testing.T) {
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
var x interface{} = func() {}
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(x)
|
2011-12-06 10:48:17 -05:00
|
|
|
if fmt.Sprint(v.Interface()) != fmt.Sprint(x) {
|
|
|
|
|
t.Fatalf("TestFunction returned wrong pointer")
|
2009-04-14 06:46:01 -07:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
assert(t, v.Type().String(), "func()")
|
2009-04-14 06:46:01 -07:00
|
|
|
}
|
|
|
|
|
|
2010-12-15 08:50:08 +11:00
|
|
|
var appendTests = []struct {
|
|
|
|
|
orig, extra []int
|
|
|
|
|
}{
|
|
|
|
|
{make([]int, 2, 4), []int{22}},
|
|
|
|
|
{make([]int, 2, 4), []int{22, 33, 44}},
|
|
|
|
|
}
|
|
|
|
|
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
func sameInts(x, y []int) bool {
|
|
|
|
|
if len(x) != len(y) {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
for i, xx := range x {
|
|
|
|
|
if xx != y[i] {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-15 08:50:08 +11:00
|
|
|
func TestAppend(t *testing.T) {
|
|
|
|
|
for i, test := range appendTests {
|
|
|
|
|
origLen, extraLen := len(test.orig), len(test.extra)
|
|
|
|
|
want := append(test.orig, test.extra...)
|
|
|
|
|
// Convert extra from []int to []Value.
|
|
|
|
|
e0 := make([]Value, len(test.extra))
|
|
|
|
|
for j, e := range test.extra {
|
2011-04-25 13:39:16 -04:00
|
|
|
e0[j] = ValueOf(e)
|
2010-12-15 08:50:08 +11:00
|
|
|
}
|
|
|
|
|
// Convert extra from []int to *SliceValue.
|
2011-04-25 13:39:16 -04:00
|
|
|
e1 := ValueOf(test.extra)
|
2010-12-15 08:50:08 +11:00
|
|
|
// Test Append.
|
2011-04-25 13:39:16 -04:00
|
|
|
a0 := ValueOf(test.orig)
|
2010-12-15 08:50:08 +11:00
|
|
|
have0 := Append(a0, e0...).Interface().([]int)
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
if !sameInts(have0, want) {
|
|
|
|
|
t.Errorf("Append #%d: have %v, want %v (%p %p)", i, have0, want, test.orig, have0)
|
2010-12-15 08:50:08 +11:00
|
|
|
}
|
|
|
|
|
// Check that the orig and extra slices were not modified.
|
|
|
|
|
if len(test.orig) != origLen {
|
|
|
|
|
t.Errorf("Append #%d origLen: have %v, want %v", i, len(test.orig), origLen)
|
|
|
|
|
}
|
|
|
|
|
if len(test.extra) != extraLen {
|
|
|
|
|
t.Errorf("Append #%d extraLen: have %v, want %v", i, len(test.extra), extraLen)
|
|
|
|
|
}
|
|
|
|
|
// Test AppendSlice.
|
2011-04-25 13:39:16 -04:00
|
|
|
a1 := ValueOf(test.orig)
|
2010-12-15 08:50:08 +11:00
|
|
|
have1 := AppendSlice(a1, e1).Interface().([]int)
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
if !sameInts(have1, want) {
|
2010-12-15 08:50:08 +11:00
|
|
|
t.Errorf("AppendSlice #%d: have %v, want %v", i, have1, want)
|
|
|
|
|
}
|
|
|
|
|
// Check that the orig and extra slices were not modified.
|
|
|
|
|
if len(test.orig) != origLen {
|
|
|
|
|
t.Errorf("AppendSlice #%d origLen: have %v, want %v", i, len(test.orig), origLen)
|
|
|
|
|
}
|
|
|
|
|
if len(test.extra) != extraLen {
|
|
|
|
|
t.Errorf("AppendSlice #%d extraLen: have %v, want %v", i, len(test.extra), extraLen)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestCopy(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
a := []int{1, 2, 3, 4, 10, 9, 8, 7}
|
|
|
|
|
b := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
|
|
|
|
|
c := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
|
2008-12-10 15:55:59 -08:00
|
|
|
for i := 0; i < len(b); i++ {
|
|
|
|
|
if b[i] != c[i] {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Fatalf("b != c before test")
|
2008-12-10 15:55:59 -08:00
|
|
|
}
|
|
|
|
|
}
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
a1 := a
|
|
|
|
|
b1 := b
|
2011-04-25 13:39:16 -04:00
|
|
|
aa := ValueOf(&a1).Elem()
|
|
|
|
|
ab := ValueOf(&b1).Elem()
|
2008-12-18 22:37:22 -08:00
|
|
|
for tocopy := 1; tocopy <= 7; tocopy++ {
|
2009-12-15 15:40:16 -08:00
|
|
|
aa.SetLen(tocopy)
|
2010-12-12 20:27:29 +11:00
|
|
|
Copy(ab, aa)
|
2009-12-15 15:40:16 -08:00
|
|
|
aa.SetLen(8)
|
2008-12-10 15:55:59 -08:00
|
|
|
for i := 0; i < tocopy; i++ {
|
|
|
|
|
if a[i] != b[i] {
|
2009-07-07 11:03:12 -07:00
|
|
|
t.Errorf("(i) tocopy=%d a[%d]=%d, b[%d]=%d",
|
2009-11-09 12:07:39 -08:00
|
|
|
tocopy, i, a[i], i, b[i])
|
2008-12-10 15:55:59 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for i := tocopy; i < len(b); i++ {
|
|
|
|
|
if b[i] != c[i] {
|
|
|
|
|
if i < len(a) {
|
2009-07-07 11:03:12 -07:00
|
|
|
t.Errorf("(ii) tocopy=%d a[%d]=%d, b[%d]=%d, c[%d]=%d",
|
2009-11-09 12:07:39 -08:00
|
|
|
tocopy, i, a[i], i, b[i], i, c[i])
|
2008-12-10 15:55:59 -08:00
|
|
|
} else {
|
2009-07-07 11:03:12 -07:00
|
|
|
t.Errorf("(iii) tocopy=%d b[%d]=%d, c[%d]=%d",
|
2009-11-09 12:07:39 -08:00
|
|
|
tocopy, i, b[i], i, c[i])
|
2008-12-10 15:55:59 -08:00
|
|
|
}
|
2008-12-18 22:37:22 -08:00
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Logf("tocopy=%d elem %d is okay\n", tocopy, i)
|
2008-12-10 15:55:59 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-01-09 00:17:46 -08:00
|
|
|
|
2017-10-12 17:42:18 -03:00
|
|
|
func TestCopyString(t *testing.T) {
|
|
|
|
|
t.Run("Slice", func(t *testing.T) {
|
|
|
|
|
s := bytes.Repeat([]byte{'_'}, 8)
|
|
|
|
|
val := ValueOf(s)
|
|
|
|
|
|
|
|
|
|
n := Copy(val, ValueOf(""))
|
|
|
|
|
if expecting := []byte("________"); n != 0 || !bytes.Equal(s, expecting) {
|
|
|
|
|
t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s, expecting)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n = Copy(val, ValueOf("hello"))
|
|
|
|
|
if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s, expecting) {
|
|
|
|
|
t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s, expecting)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n = Copy(val, ValueOf("helloworld"))
|
|
|
|
|
if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s, expecting) {
|
|
|
|
|
t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s, expecting)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
t.Run("Array", func(t *testing.T) {
|
|
|
|
|
s := [...]byte{'_', '_', '_', '_', '_', '_', '_', '_'}
|
|
|
|
|
val := ValueOf(&s).Elem()
|
|
|
|
|
|
|
|
|
|
n := Copy(val, ValueOf(""))
|
|
|
|
|
if expecting := []byte("________"); n != 0 || !bytes.Equal(s[:], expecting) {
|
|
|
|
|
t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s[:], expecting)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n = Copy(val, ValueOf("hello"))
|
|
|
|
|
if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s[:], expecting) {
|
|
|
|
|
t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s[:], expecting)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n = Copy(val, ValueOf("helloworld"))
|
|
|
|
|
if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s[:], expecting) {
|
|
|
|
|
t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s[:], expecting)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-27 18:22:53 -03:00
|
|
|
func TestCopyArray(t *testing.T) {
|
|
|
|
|
a := [8]int{1, 2, 3, 4, 10, 9, 8, 7}
|
|
|
|
|
b := [11]int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
|
|
|
|
|
c := b
|
|
|
|
|
aa := ValueOf(&a).Elem()
|
|
|
|
|
ab := ValueOf(&b).Elem()
|
|
|
|
|
Copy(ab, aa)
|
|
|
|
|
for i := 0; i < len(a); i++ {
|
|
|
|
|
if a[i] != b[i] {
|
|
|
|
|
t.Errorf("(i) a[%d]=%d, b[%d]=%d", i, a[i], i, b[i])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for i := len(a); i < len(b); i++ {
|
|
|
|
|
if b[i] != c[i] {
|
2011-04-28 14:16:41 -03:00
|
|
|
t.Errorf("(ii) b[%d]=%d, c[%d]=%d", i, b[i], i, c[i])
|
2011-04-27 18:22:53 -03:00
|
|
|
} else {
|
|
|
|
|
t.Logf("elem %d is okay\n", i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-20 14:40:40 -08:00
|
|
|
func TestBigUnnamedStruct(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
b := struct{ a, b, c, d int64 }{1, 2, 3, 4}
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(b)
|
2009-11-05 14:23:20 -08:00
|
|
|
b1 := v.Interface().(struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a, b, c, d int64
|
|
|
|
|
})
|
2009-01-09 00:17:46 -08:00
|
|
|
if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d {
|
2011-04-25 13:39:16 -04:00
|
|
|
t.Errorf("ValueOf(%v).Interface().(*Big) = %v", b, b1)
|
2009-01-09 00:17:46 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-16 12:48:07 -08:00
|
|
|
type big struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a, b, c, d, e int64
|
2009-01-09 00:17:46 -08:00
|
|
|
}
|
2009-11-05 14:23:20 -08:00
|
|
|
|
2009-01-20 14:40:40 -08:00
|
|
|
func TestBigStruct(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
b := big{1, 2, 3, 4, 5}
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(b)
|
2009-12-15 15:40:16 -08:00
|
|
|
b1 := v.Interface().(big)
|
2009-01-09 00:17:46 -08:00
|
|
|
if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d || b1.e != b.e {
|
2011-04-25 13:39:16 -04:00
|
|
|
t.Errorf("ValueOf(%v).Interface().(big) = %v", b, b1)
|
2009-01-09 00:17:46 -08:00
|
|
|
}
|
|
|
|
|
}
|
2009-04-01 22:20:18 -07:00
|
|
|
|
|
|
|
|
type Basic struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x int
|
|
|
|
|
y float32
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
|
2009-04-15 00:55:58 -07:00
|
|
|
type NotBasic Basic
|
|
|
|
|
|
2009-04-01 22:20:18 -07:00
|
|
|
type DeepEqualTest struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a, b interface{}
|
|
|
|
|
eq bool
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
|
2012-02-24 16:25:39 +11:00
|
|
|
// Simple functions for DeepEqual tests.
|
|
|
|
|
var (
|
|
|
|
|
fn1 func() // nil.
|
|
|
|
|
fn2 func() // nil.
|
|
|
|
|
fn3 = func() { fn1() } // Not nil.
|
|
|
|
|
)
|
|
|
|
|
|
2015-11-25 10:45:59 -05:00
|
|
|
type self struct{}
|
|
|
|
|
|
2016-10-20 20:05:41 -04:00
|
|
|
type Loop *Loop
|
|
|
|
|
type Loopy interface{}
|
|
|
|
|
|
|
|
|
|
var loop1, loop2 Loop
|
|
|
|
|
var loopy1, loopy2 Loopy
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
|
loop1 = &loop2
|
|
|
|
|
loop2 = &loop1
|
|
|
|
|
|
|
|
|
|
loopy1 = &loopy2
|
|
|
|
|
loopy2 = &loopy1
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var deepEqualTests = []DeepEqualTest{
|
2009-04-01 22:20:18 -07:00
|
|
|
// Equalities
|
2012-04-23 12:07:02 +10:00
|
|
|
{nil, nil, true},
|
2010-10-22 10:06:33 -07:00
|
|
|
{1, 1, true},
|
|
|
|
|
{int32(1), int32(1), true},
|
|
|
|
|
{0.5, 0.5, true},
|
|
|
|
|
{float32(0.5), float32(0.5), true},
|
|
|
|
|
{"hello", "hello", true},
|
|
|
|
|
{make([]int, 10), make([]int, 10), true},
|
|
|
|
|
{&[3]int{1, 2, 3}, &[3]int{1, 2, 3}, true},
|
|
|
|
|
{Basic{1, 0.5}, Basic{1, 0.5}, true},
|
2011-11-01 22:05:34 -04:00
|
|
|
{error(nil), error(nil), true},
|
2010-10-22 10:06:33 -07:00
|
|
|
{map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true},
|
2012-02-24 16:25:39 +11:00
|
|
|
{fn1, fn2, true},
|
2009-07-01 16:45:09 -07:00
|
|
|
|
2009-04-01 22:20:18 -07:00
|
|
|
// Inequalities
|
2010-10-22 10:06:33 -07:00
|
|
|
{1, 2, false},
|
|
|
|
|
{int32(1), int32(2), false},
|
|
|
|
|
{0.5, 0.6, false},
|
|
|
|
|
{float32(0.5), float32(0.6), false},
|
|
|
|
|
{"hello", "hey", false},
|
|
|
|
|
{make([]int, 10), make([]int, 11), false},
|
|
|
|
|
{&[3]int{1, 2, 3}, &[3]int{1, 2, 4}, false},
|
|
|
|
|
{Basic{1, 0.5}, Basic{1, 0.6}, false},
|
|
|
|
|
{Basic{1, 0}, Basic{2, 0}, false},
|
|
|
|
|
{map[int]string{1: "one", 3: "two"}, map[int]string{2: "two", 1: "one"}, false},
|
|
|
|
|
{map[int]string{1: "one", 2: "txo"}, map[int]string{2: "two", 1: "one"}, false},
|
|
|
|
|
{map[int]string{1: "one"}, map[int]string{2: "two", 1: "one"}, false},
|
|
|
|
|
{map[int]string{2: "two", 1: "one"}, map[int]string{1: "one"}, false},
|
|
|
|
|
{nil, 1, false},
|
|
|
|
|
{1, nil, false},
|
2012-02-24 16:25:39 +11:00
|
|
|
{fn1, fn3, false},
|
|
|
|
|
{fn3, fn3, false},
|
2014-07-01 10:28:10 -07:00
|
|
|
{[][]int{{1}}, [][]int{{2}}, false},
|
2015-11-25 10:45:59 -05:00
|
|
|
{math.NaN(), math.NaN(), false},
|
|
|
|
|
{&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
|
|
|
|
|
{&[1]float64{math.NaN()}, self{}, true},
|
|
|
|
|
{[]float64{math.NaN()}, []float64{math.NaN()}, false},
|
|
|
|
|
{[]float64{math.NaN()}, self{}, true},
|
|
|
|
|
{map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
|
|
|
|
|
{map[float64]float64{math.NaN(): 1}, self{}, true},
|
2009-07-01 16:45:09 -07:00
|
|
|
|
2011-11-14 16:11:15 -05:00
|
|
|
// Nil vs empty: not the same.
|
|
|
|
|
{[]int{}, []int(nil), false},
|
|
|
|
|
{[]int{}, []int{}, true},
|
|
|
|
|
{[]int(nil), []int(nil), true},
|
|
|
|
|
{map[int]int{}, map[int]int(nil), false},
|
|
|
|
|
{map[int]int{}, map[int]int{}, true},
|
|
|
|
|
{map[int]int(nil), map[int]int(nil), true},
|
|
|
|
|
|
2009-04-01 22:20:18 -07:00
|
|
|
// Mismatched types
|
2010-10-22 10:06:33 -07:00
|
|
|
{1, 1.0, false},
|
|
|
|
|
{int32(1), int64(1), false},
|
|
|
|
|
{0.5, "hello", false},
|
|
|
|
|
{[]int{1, 2, 3}, [3]int{1, 2, 3}, false},
|
|
|
|
|
{&[3]interface{}{1, 2, 4}, &[3]interface{}{1, 2, "s"}, false},
|
|
|
|
|
{Basic{1, 0.5}, NotBasic{1, 0.5}, false},
|
|
|
|
|
{map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false},
|
2016-10-20 20:05:41 -04:00
|
|
|
|
|
|
|
|
// Possible loops.
|
|
|
|
|
{&loop1, &loop1, true},
|
|
|
|
|
{&loop1, &loop2, true},
|
|
|
|
|
{&loopy1, &loopy1, true},
|
|
|
|
|
{&loopy1, &loopy2, true},
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestDeepEqual(t *testing.T) {
|
2009-09-15 09:41:59 -07:00
|
|
|
for _, test := range deepEqualTests {
|
2015-11-25 10:45:59 -05:00
|
|
|
if test.b == (self{}) {
|
|
|
|
|
test.b = test.a
|
|
|
|
|
}
|
2009-04-01 22:20:18 -07:00
|
|
|
if r := DeepEqual(test.a, test.b); r != test.eq {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("DeepEqual(%v, %v) = %v, want %v", test.a, test.b, r, test.eq)
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
func TestTypeOf(t *testing.T) {
|
2012-04-23 12:07:02 +10:00
|
|
|
// Special case for nil
|
|
|
|
|
if typ := TypeOf(nil); typ != nil {
|
|
|
|
|
t.Errorf("expected nil type for nil value; got %v", typ)
|
|
|
|
|
}
|
2009-09-15 09:41:59 -07:00
|
|
|
for _, test := range deepEqualTests {
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(test.a)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if !v.IsValid() {
|
2009-11-09 12:07:39 -08:00
|
|
|
continue
|
2009-07-07 11:03:12 -07:00
|
|
|
}
|
2011-04-25 13:39:16 -04:00
|
|
|
typ := TypeOf(test.a)
|
2009-06-25 14:25:38 -07:00
|
|
|
if typ != v.Type() {
|
2011-04-25 13:39:16 -04:00
|
|
|
t.Errorf("TypeOf(%v) = %v, but ValueOf(%v).Type() = %v", test.a, typ, test.a, v.Type())
|
2009-06-25 14:25:38 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-10 11:20:10 -07:00
|
|
|
type Recursive struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x int
|
|
|
|
|
r *Recursive
|
2009-07-10 11:20:10 -07:00
|
|
|
}
|
|
|
|
|
|
2009-04-01 22:20:18 -07:00
|
|
|
func TestDeepEqualRecursiveStruct(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
a, b := new(Recursive), new(Recursive)
|
|
|
|
|
*a = Recursive{12, a}
|
|
|
|
|
*b = Recursive{12, b}
|
2009-04-01 22:20:18 -07:00
|
|
|
if !DeepEqual(a, b) {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Error("DeepEqual(recursive same) = false, want true")
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-06-20 12:16:25 -07:00
|
|
|
type _Complex struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int
|
2010-06-20 12:16:25 -07:00
|
|
|
b [3]*_Complex
|
2009-12-15 15:40:16 -08:00
|
|
|
c *string
|
2011-01-19 23:09:00 -05:00
|
|
|
d map[float64]float64
|
2009-07-10 11:20:10 -07:00
|
|
|
}
|
|
|
|
|
|
2009-04-01 22:20:18 -07:00
|
|
|
func TestDeepEqualComplexStruct(t *testing.T) {
|
2011-01-19 23:09:00 -05:00
|
|
|
m := make(map[float64]float64)
|
2009-12-15 15:40:16 -08:00
|
|
|
stra, strb := "hello", "hello"
|
2010-06-20 12:16:25 -07:00
|
|
|
a, b := new(_Complex), new(_Complex)
|
|
|
|
|
*a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m}
|
|
|
|
|
*b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m}
|
2009-04-01 22:20:18 -07:00
|
|
|
if !DeepEqual(a, b) {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Error("DeepEqual(complex same) = false, want true")
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestDeepEqualComplexStructInequality(t *testing.T) {
|
2011-01-19 23:09:00 -05:00
|
|
|
m := make(map[float64]float64)
|
2009-12-15 15:40:16 -08:00
|
|
|
stra, strb := "hello", "helloo" // Difference is here
|
2010-06-20 12:16:25 -07:00
|
|
|
a, b := new(_Complex), new(_Complex)
|
|
|
|
|
*a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m}
|
|
|
|
|
*b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m}
|
2009-04-01 22:20:18 -07:00
|
|
|
if DeepEqual(a, b) {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Error("DeepEqual(complex different) = true, want false")
|
2009-04-01 22:20:18 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-04-29 22:16:53 -07:00
|
|
|
|
2011-05-03 10:38:37 -04:00
|
|
|
type UnexpT struct {
|
|
|
|
|
m map[int]int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestDeepEqualUnexportedMap(t *testing.T) {
|
|
|
|
|
// Check that DeepEqual can look at unexported fields.
|
|
|
|
|
x1 := UnexpT{map[int]int{1: 2}}
|
|
|
|
|
x2 := UnexpT{map[int]int{1: 2}}
|
|
|
|
|
if !DeepEqual(&x1, &x2) {
|
|
|
|
|
t.Error("DeepEqual(x1, x2) = false, want true")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
y1 := UnexpT{map[int]int{2: 3}}
|
|
|
|
|
if DeepEqual(&x1, &y1) {
|
|
|
|
|
t.Error("DeepEqual(x1, y1) = true, want false")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-29 22:16:53 -07:00
|
|
|
func check2ndField(x interface{}, offs uintptr, t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
s := ValueOf(x)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
f := s.Type().Field(1)
|
2009-07-07 11:03:12 -07:00
|
|
|
if f.Offset != offs {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Error("mismatched offsets in structure alignment:", f.Offset, offs)
|
2009-04-29 22:16:53 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check that structure alignment & offsets viewed through reflect agree with those
|
|
|
|
|
// from the compiler itself.
|
|
|
|
|
func TestAlignment(t *testing.T) {
|
|
|
|
|
type T1inner struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a int
|
2009-04-29 22:16:53 -07:00
|
|
|
}
|
|
|
|
|
type T1 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
T1inner
|
|
|
|
|
f int
|
2009-04-29 22:16:53 -07:00
|
|
|
}
|
|
|
|
|
type T2inner struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a, b int
|
2009-04-29 22:16:53 -07:00
|
|
|
}
|
|
|
|
|
type T2 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
T2inner
|
|
|
|
|
f int
|
2009-04-29 22:16:53 -07:00
|
|
|
}
|
|
|
|
|
|
2009-12-15 15:40:16 -08:00
|
|
|
x := T1{T1inner{2}, 17}
|
|
|
|
|
check2ndField(x, uintptr(unsafe.Pointer(&x.f))-uintptr(unsafe.Pointer(&x)), t)
|
2009-04-29 22:16:53 -07:00
|
|
|
|
2009-12-15 15:40:16 -08:00
|
|
|
x1 := T2{T2inner{2, 3}, 17}
|
|
|
|
|
check2ndField(x1, uintptr(unsafe.Pointer(&x1.f))-uintptr(unsafe.Pointer(&x1)), t)
|
2009-04-29 22:16:53 -07:00
|
|
|
}
|
2009-05-12 14:57:44 -07:00
|
|
|
|
|
|
|
|
func Nil(a interface{}, t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
n := ValueOf(a).Field(0)
|
2009-05-12 14:57:44 -07:00
|
|
|
if !n.IsNil() {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%v should be nil", a)
|
2009-05-12 14:57:44 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func NotNil(a interface{}, t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
n := ValueOf(a).Field(0)
|
2009-05-12 14:57:44 -07:00
|
|
|
if n.IsNil() {
|
2011-04-25 13:39:16 -04:00
|
|
|
t.Errorf("value of type %v should not be nil", ValueOf(a).Type().String())
|
2009-05-12 14:57:44 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestIsNil(t *testing.T) {
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
// These implement IsNil.
|
2009-07-07 11:03:12 -07:00
|
|
|
// Wrap in extra struct to hide interface type.
|
|
|
|
|
doNil := []interface{}{
|
2009-11-06 16:33:53 -08:00
|
|
|
struct{ x *int }{},
|
|
|
|
|
struct{ x interface{} }{},
|
|
|
|
|
struct{ x map[string]int }{},
|
|
|
|
|
struct{ x func() bool }{},
|
|
|
|
|
struct{ x chan int }{},
|
|
|
|
|
struct{ x []string }{},
|
2018-12-23 16:48:38 -05:00
|
|
|
struct{ x unsafe.Pointer }{},
|
2009-12-15 15:40:16 -08:00
|
|
|
}
|
2009-09-15 09:41:59 -07:00
|
|
|
for _, ts := range doNil {
|
2011-04-25 13:39:16 -04:00
|
|
|
ty := TypeOf(ts).Field(0).Type
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v := Zero(ty)
|
|
|
|
|
v.IsNil() // panics if not okay to call
|
2009-05-12 14:57:44 -07:00
|
|
|
}
|
2009-07-07 11:03:12 -07:00
|
|
|
|
2009-05-12 14:57:44 -07:00
|
|
|
// Check the implementations
|
2009-11-05 14:23:20 -08:00
|
|
|
var pi struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x *int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
Nil(pi, t)
|
|
|
|
|
pi.x = new(int)
|
|
|
|
|
NotNil(pi, t)
|
2009-05-12 14:57:44 -07:00
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var si struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x []int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
Nil(si, t)
|
|
|
|
|
si.x = make([]int, 10)
|
|
|
|
|
NotNil(si, t)
|
2009-05-12 14:57:44 -07:00
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var ci struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x chan int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
Nil(ci, t)
|
|
|
|
|
ci.x = make(chan int)
|
|
|
|
|
NotNil(ci, t)
|
2009-07-07 11:03:12 -07:00
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var mi struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x map[int]int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
Nil(mi, t)
|
|
|
|
|
mi.x = make(map[int]int)
|
|
|
|
|
NotNil(mi, t)
|
2009-05-12 14:57:44 -07:00
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var ii struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x interface{}
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
Nil(ii, t)
|
|
|
|
|
ii.x = 2
|
|
|
|
|
NotNil(ii, t)
|
2009-05-12 14:57:44 -07:00
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var fi struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x func(t *testing.T)
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-12-15 15:40:16 -08:00
|
|
|
Nil(fi, t)
|
|
|
|
|
fi.x = TestIsNil
|
|
|
|
|
NotNil(fi, t)
|
2009-05-12 14:57:44 -07:00
|
|
|
}
|
2009-05-21 11:50:20 -07:00
|
|
|
|
2019-04-10 16:20:43 +02:00
|
|
|
func TestIsZero(t *testing.T) {
|
|
|
|
|
for i, tt := range []struct {
|
|
|
|
|
x interface{}
|
|
|
|
|
want bool
|
|
|
|
|
}{
|
|
|
|
|
// Booleans
|
|
|
|
|
{true, false},
|
|
|
|
|
{false, true},
|
|
|
|
|
// Numeric types
|
|
|
|
|
{int(0), true},
|
|
|
|
|
{int(1), false},
|
|
|
|
|
{int8(0), true},
|
|
|
|
|
{int8(1), false},
|
|
|
|
|
{int16(0), true},
|
|
|
|
|
{int16(1), false},
|
|
|
|
|
{int32(0), true},
|
|
|
|
|
{int32(1), false},
|
|
|
|
|
{int64(0), true},
|
|
|
|
|
{int64(1), false},
|
|
|
|
|
{uint(0), true},
|
|
|
|
|
{uint(1), false},
|
|
|
|
|
{uint8(0), true},
|
|
|
|
|
{uint8(1), false},
|
|
|
|
|
{uint16(0), true},
|
|
|
|
|
{uint16(1), false},
|
|
|
|
|
{uint32(0), true},
|
|
|
|
|
{uint32(1), false},
|
|
|
|
|
{uint64(0), true},
|
|
|
|
|
{uint64(1), false},
|
|
|
|
|
{float32(0), true},
|
|
|
|
|
{float32(1.2), false},
|
|
|
|
|
{float64(0), true},
|
|
|
|
|
{float64(1.2), false},
|
|
|
|
|
{math.Copysign(0, -1), false},
|
|
|
|
|
{complex64(0), true},
|
|
|
|
|
{complex64(1.2), false},
|
|
|
|
|
{complex128(0), true},
|
|
|
|
|
{complex128(1.2), false},
|
|
|
|
|
{complex(math.Copysign(0, -1), 0), false},
|
|
|
|
|
{complex(0, math.Copysign(0, -1)), false},
|
|
|
|
|
{complex(math.Copysign(0, -1), math.Copysign(0, -1)), false},
|
|
|
|
|
{uintptr(0), true},
|
|
|
|
|
{uintptr(128), false},
|
|
|
|
|
// Array
|
|
|
|
|
{Zero(TypeOf([5]string{})).Interface(), true},
|
|
|
|
|
{[5]string{"", "", "", "", ""}, true},
|
|
|
|
|
{[5]string{}, true},
|
|
|
|
|
{[5]string{"", "", "", "a", ""}, false},
|
|
|
|
|
// Chan
|
|
|
|
|
{(chan string)(nil), true},
|
|
|
|
|
{make(chan string), false},
|
|
|
|
|
{time.After(1), false},
|
|
|
|
|
// Func
|
|
|
|
|
{(func())(nil), true},
|
|
|
|
|
{New, false},
|
|
|
|
|
// Interface
|
|
|
|
|
{New(TypeOf(new(error)).Elem()).Elem(), true},
|
|
|
|
|
{(io.Reader)(strings.NewReader("")), false},
|
|
|
|
|
// Map
|
|
|
|
|
{(map[string]string)(nil), true},
|
|
|
|
|
{map[string]string{}, false},
|
|
|
|
|
{make(map[string]string), false},
|
|
|
|
|
// Ptr
|
|
|
|
|
{(*func())(nil), true},
|
|
|
|
|
{(*int)(nil), true},
|
|
|
|
|
{new(int), false},
|
|
|
|
|
// Slice
|
|
|
|
|
{[]string{}, false},
|
|
|
|
|
{([]string)(nil), true},
|
|
|
|
|
{make([]string, 0), false},
|
|
|
|
|
// Strings
|
|
|
|
|
{"", true},
|
|
|
|
|
{"not-zero", false},
|
|
|
|
|
// Structs
|
|
|
|
|
{T{}, true},
|
|
|
|
|
{T{123, 456.75, "hello", &_i}, false},
|
|
|
|
|
// UnsafePointer
|
|
|
|
|
{(unsafe.Pointer)(nil), true},
|
|
|
|
|
{(unsafe.Pointer)(new(int)), false},
|
|
|
|
|
} {
|
|
|
|
|
var x Value
|
|
|
|
|
if v, ok := tt.x.(Value); ok {
|
|
|
|
|
x = v
|
|
|
|
|
} else {
|
|
|
|
|
x = ValueOf(tt.x)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b := x.IsZero()
|
|
|
|
|
if b != tt.want {
|
|
|
|
|
t.Errorf("%d: IsZero((%s)(%+v)) = %t, want %t", i, x.Kind(), tt.x, b, tt.want)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !Zero(TypeOf(tt.x)).IsZero() {
|
|
|
|
|
t.Errorf("%d: IsZero(Zero(TypeOf((%s)(%+v)))) is false", i, x.Kind(), tt.x)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func() {
|
|
|
|
|
defer func() {
|
|
|
|
|
if r := recover(); r == nil {
|
|
|
|
|
t.Error("should panic for invalid value")
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
(Value{}).IsZero()
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-21 11:50:20 -07:00
|
|
|
func TestInterfaceExtraction(t *testing.T) {
|
|
|
|
|
var s struct {
|
2011-10-17 18:48:45 -04:00
|
|
|
W io.Writer
|
2009-05-21 11:50:20 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-17 18:48:45 -04:00
|
|
|
s.W = os.Stdout
|
2011-04-25 13:39:16 -04:00
|
|
|
v := Indirect(ValueOf(&s)).Field(0).Interface()
|
2011-10-17 18:48:45 -04:00
|
|
|
if v != s.W.(interface{}) {
|
|
|
|
|
t.Error("Interface() on interface: ", v, s.W)
|
2009-05-21 11:50:20 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-05-21 14:06:43 -07:00
|
|
|
|
2009-06-15 18:35:04 -07:00
|
|
|
func TestNilPtrValueSub(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
var pi *int
|
2011-04-25 13:39:16 -04:00
|
|
|
if pv := ValueOf(pi); pv.Elem().IsValid() {
|
|
|
|
|
t.Error("ValueOf((*int)(nil)).Elem().IsValid()")
|
2009-06-15 18:35:04 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-07-08 13:55:57 -07:00
|
|
|
|
2009-07-08 15:00:54 -07:00
|
|
|
func TestMap(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
m := map[string]int{"a": 1, "b": 2}
|
2011-04-25 13:39:16 -04:00
|
|
|
mv := ValueOf(m)
|
2009-07-08 13:55:57 -07:00
|
|
|
if n := mv.Len(); n != len(m) {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("Len = %d, want %d", n, len(m))
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
keys := mv.MapKeys()
|
|
|
|
|
newmap := MakeMap(mv.Type())
|
2009-07-08 13:55:57 -07:00
|
|
|
for k, v := range m {
|
|
|
|
|
// Check that returned Keys match keys in range.
|
2011-10-18 12:47:34 +11:00
|
|
|
// These aren't required to be in the same order.
|
|
|
|
|
seen := false
|
|
|
|
|
for _, kv := range keys {
|
|
|
|
|
if kv.String() == k {
|
|
|
|
|
seen = true
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !seen {
|
|
|
|
|
t.Errorf("Missing key %q", k)
|
|
|
|
|
}
|
2009-07-08 13:55:57 -07:00
|
|
|
|
|
|
|
|
// Check that value lookup is correct.
|
2011-04-25 13:39:16 -04:00
|
|
|
vv := mv.MapIndex(ValueOf(k))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if vi := vv.Int(); vi != int64(v) {
|
2010-12-07 16:42:54 -05:00
|
|
|
t.Errorf("Key %q: have value %d, want %d", k, vi, v)
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy into new map.
|
2011-04-25 13:39:16 -04:00
|
|
|
newmap.SetMapIndex(ValueOf(k), ValueOf(v))
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
2011-04-25 13:39:16 -04:00
|
|
|
vv := mv.MapIndex(ValueOf("not-present"))
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if vv.IsValid() {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("Invalid key: got non-nil value %s", valueToString(vv))
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
|
|
|
|
|
2009-12-15 15:40:16 -08:00
|
|
|
newm := newmap.Interface().(map[string]int)
|
2009-07-08 13:55:57 -07:00
|
|
|
if len(newm) != len(m) {
|
2013-09-23 13:19:08 -04:00
|
|
|
t.Errorf("length after copy: newm=%d, m=%d", len(newm), len(m))
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for k, v := range newm {
|
2009-12-15 15:40:16 -08:00
|
|
|
mv, ok := m[k]
|
2009-07-08 13:55:57 -07:00
|
|
|
if mv != v {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("newm[%q] = %d, but m[%q] = %d, %v", k, v, k, mv, ok)
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-07-08 15:00:54 -07:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
newmap.SetMapIndex(ValueOf("a"), Value{})
|
2009-12-15 15:40:16 -08:00
|
|
|
v, ok := newm["a"]
|
2009-07-08 13:55:57 -07:00
|
|
|
if ok {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("newm[\"a\"] = %d after delete", v)
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
2010-04-20 17:02:08 -07:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
mv = ValueOf(&m).Elem()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
mv.Set(Zero(mv.Type()))
|
2010-04-20 17:02:08 -07:00
|
|
|
if m != nil {
|
|
|
|
|
t.Errorf("mv.Set(nil) failed")
|
|
|
|
|
}
|
2009-07-08 13:55:57 -07:00
|
|
|
}
|
2009-07-08 15:00:54 -07:00
|
|
|
|
2014-05-19 09:36:47 -04:00
|
|
|
func TestNilMap(t *testing.T) {
|
|
|
|
|
var m map[string]int
|
|
|
|
|
mv := ValueOf(m)
|
|
|
|
|
keys := mv.MapKeys()
|
|
|
|
|
if len(keys) != 0 {
|
|
|
|
|
t.Errorf(">0 keys for nil map: %v", keys)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check that value for missing key is zero.
|
|
|
|
|
x := mv.MapIndex(ValueOf("hello"))
|
|
|
|
|
if x.Kind() != Invalid {
|
|
|
|
|
t.Errorf("m.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check big value too.
|
|
|
|
|
var mbig map[string][10 << 20]byte
|
|
|
|
|
x = ValueOf(mbig).MapIndex(ValueOf("hello"))
|
|
|
|
|
if x.Kind() != Invalid {
|
|
|
|
|
t.Errorf("mbig.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x)
|
|
|
|
|
}
|
2014-05-20 16:26:04 -07:00
|
|
|
|
|
|
|
|
// Test that deletes from a nil map succeed.
|
|
|
|
|
mv.SetMapIndex(ValueOf("hi"), Value{})
|
2014-05-19 09:36:47 -04:00
|
|
|
}
|
|
|
|
|
|
2009-07-08 15:00:54 -07:00
|
|
|
func TestChan(t *testing.T) {
|
|
|
|
|
for loop := 0; loop < 2; loop++ {
|
2009-12-15 15:40:16 -08:00
|
|
|
var c chan int
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
var cv Value
|
2009-07-08 15:00:54 -07:00
|
|
|
|
|
|
|
|
// check both ways to allocate channels
|
|
|
|
|
switch loop {
|
|
|
|
|
case 1:
|
2009-12-15 15:40:16 -08:00
|
|
|
c = make(chan int, 1)
|
2011-04-25 13:39:16 -04:00
|
|
|
cv = ValueOf(c)
|
2009-07-08 15:00:54 -07:00
|
|
|
case 0:
|
2011-04-25 13:39:16 -04:00
|
|
|
cv = MakeChan(TypeOf(c), 1)
|
2009-12-15 15:40:16 -08:00
|
|
|
c = cv.Interface().(chan int)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Send
|
2011-04-25 13:39:16 -04:00
|
|
|
cv.Send(ValueOf(2))
|
2009-07-08 15:00:54 -07:00
|
|
|
if i := <-c; i != 2 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("reflect Send 2, native recv %d", i)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Recv
|
2009-12-15 15:40:16 -08:00
|
|
|
c <- 3
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if i, ok := cv.Recv(); i.Int() != 3 || !ok {
|
|
|
|
|
t.Errorf("native send 3, reflect Recv %d, %t", i.Int(), ok)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TryRecv fail
|
2011-03-11 14:47:44 -05:00
|
|
|
val, ok := cv.TryRecv()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if val.IsValid() || ok {
|
2011-03-11 14:47:44 -05:00
|
|
|
t.Errorf("TryRecv on empty chan: %s, %t", valueToString(val), ok)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TryRecv success
|
2009-12-15 15:40:16 -08:00
|
|
|
c <- 4
|
2011-03-11 14:47:44 -05:00
|
|
|
val, ok = cv.TryRecv()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if !val.IsValid() {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("TryRecv on ready chan got nil")
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
} else if i := val.Int(); i != 4 || !ok {
|
2011-03-11 14:47:44 -05:00
|
|
|
t.Errorf("native send 4, TryRecv %d, %t", i, ok)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TrySend fail
|
2009-12-15 15:40:16 -08:00
|
|
|
c <- 100
|
2011-04-25 13:39:16 -04:00
|
|
|
ok = cv.TrySend(ValueOf(5))
|
2009-12-15 15:40:16 -08:00
|
|
|
i := <-c
|
2009-07-08 15:00:54 -07:00
|
|
|
if ok {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("TrySend on full chan succeeded: value %d", i)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TrySend success
|
2011-04-25 13:39:16 -04:00
|
|
|
ok = cv.TrySend(ValueOf(6))
|
2009-07-08 15:00:54 -07:00
|
|
|
if !ok {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("TrySend on empty chan failed")
|
2014-08-14 14:35:00 -04:00
|
|
|
select {
|
|
|
|
|
case x := <-c:
|
|
|
|
|
t.Errorf("TrySend failed but it did send %d", x)
|
|
|
|
|
default:
|
|
|
|
|
}
|
2009-07-08 15:00:54 -07:00
|
|
|
} else {
|
|
|
|
|
if i = <-c; i != 6 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("TrySend 6, recv %d", i)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-08-26 10:47:18 -07:00
|
|
|
|
|
|
|
|
// Close
|
2009-12-15 15:40:16 -08:00
|
|
|
c <- 123
|
|
|
|
|
cv.Close()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if i, ok := cv.Recv(); i.Int() != 123 || !ok {
|
|
|
|
|
t.Errorf("send 123 then close; Recv %d, %t", i.Int(), ok)
|
2009-08-26 10:47:18 -07:00
|
|
|
}
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if i, ok := cv.Recv(); i.Int() != 0 || ok {
|
|
|
|
|
t.Errorf("after close Recv %d, %t", i.Int(), ok)
|
2009-08-26 10:47:18 -07:00
|
|
|
}
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check creation of unbuffered channel
|
2009-12-15 15:40:16 -08:00
|
|
|
var c chan int
|
2011-04-25 13:39:16 -04:00
|
|
|
cv := MakeChan(TypeOf(c), 0)
|
2009-12-15 15:40:16 -08:00
|
|
|
c = cv.Interface().(chan int)
|
2011-04-25 13:39:16 -04:00
|
|
|
if cv.TrySend(ValueOf(7)) {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("TrySend on sync chan succeeded")
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if v, ok := cv.TryRecv(); v.IsValid() || ok {
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
t.Errorf("TryRecv on sync chan succeeded: isvalid=%v ok=%v", v.IsValid(), ok)
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
2009-08-26 12:42:22 -07:00
|
|
|
|
|
|
|
|
// len/cap
|
2011-04-25 13:39:16 -04:00
|
|
|
cv = MakeChan(TypeOf(c), 10)
|
2009-12-15 15:40:16 -08:00
|
|
|
c = cv.Interface().(chan int)
|
2009-08-26 12:42:22 -07:00
|
|
|
for i := 0; i < 3; i++ {
|
2009-11-09 12:07:39 -08:00
|
|
|
c <- i
|
2009-08-26 12:42:22 -07:00
|
|
|
}
|
|
|
|
|
if l, m := cv.Len(), cv.Cap(); l != len(c) || m != cap(c) {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("Len/Cap = %d/%d want %d/%d", l, m, len(c), cap(c))
|
2009-08-26 12:42:22 -07:00
|
|
|
}
|
2009-07-08 15:00:54 -07:00
|
|
|
}
|
|
|
|
|
|
2012-09-18 14:22:41 -04:00
|
|
|
// caseInfo describes a single case in a select test.
|
|
|
|
|
type caseInfo struct {
|
|
|
|
|
desc string
|
|
|
|
|
canSelect bool
|
|
|
|
|
recv Value
|
|
|
|
|
closed bool
|
|
|
|
|
helper func()
|
|
|
|
|
panic bool
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-22 08:52:27 -04:00
|
|
|
var allselect = flag.Bool("allselect", false, "exhaustive select test")
|
|
|
|
|
|
2012-09-18 14:22:41 -04:00
|
|
|
func TestSelect(t *testing.T) {
|
|
|
|
|
selectWatch.once.Do(func() { go selectWatcher() })
|
|
|
|
|
|
|
|
|
|
var x exhaustive
|
|
|
|
|
nch := 0
|
|
|
|
|
newop := func(n int, cap int) (ch, val Value) {
|
|
|
|
|
nch++
|
|
|
|
|
if nch%101%2 == 1 {
|
|
|
|
|
c := make(chan int, cap)
|
|
|
|
|
ch = ValueOf(c)
|
|
|
|
|
val = ValueOf(n)
|
|
|
|
|
} else {
|
|
|
|
|
c := make(chan string, cap)
|
|
|
|
|
ch = ValueOf(c)
|
|
|
|
|
val = ValueOf(fmt.Sprint(n))
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for n := 0; x.Next(); n++ {
|
|
|
|
|
if testing.Short() && n >= 1000 {
|
|
|
|
|
break
|
|
|
|
|
}
|
2012-09-22 08:52:27 -04:00
|
|
|
if n >= 100000 && !*allselect {
|
|
|
|
|
break
|
|
|
|
|
}
|
2012-09-18 14:22:41 -04:00
|
|
|
if n%100000 == 0 && testing.Verbose() {
|
|
|
|
|
println("TestSelect", n)
|
|
|
|
|
}
|
|
|
|
|
var cases []SelectCase
|
|
|
|
|
var info []caseInfo
|
|
|
|
|
|
|
|
|
|
// Ready send.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
ch, val := newop(len(cases), 1)
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectSend,
|
|
|
|
|
Chan: ch,
|
|
|
|
|
Send: val,
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "ready send", canSelect: true})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Ready recv.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
ch, val := newop(len(cases), 1)
|
|
|
|
|
ch.Send(val)
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectRecv,
|
|
|
|
|
Chan: ch,
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "ready recv", canSelect: true, recv: val})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Blocking send.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
ch, val := newop(len(cases), 0)
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectSend,
|
|
|
|
|
Chan: ch,
|
|
|
|
|
Send: val,
|
|
|
|
|
})
|
|
|
|
|
// Let it execute?
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
f := func() { ch.Recv() }
|
|
|
|
|
info = append(info, caseInfo{desc: "blocking send", helper: f})
|
|
|
|
|
} else {
|
|
|
|
|
info = append(info, caseInfo{desc: "blocking send"})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Blocking recv.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
ch, val := newop(len(cases), 0)
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectRecv,
|
|
|
|
|
Chan: ch,
|
|
|
|
|
})
|
|
|
|
|
// Let it execute?
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
f := func() { ch.Send(val) }
|
|
|
|
|
info = append(info, caseInfo{desc: "blocking recv", recv: val, helper: f})
|
|
|
|
|
} else {
|
|
|
|
|
info = append(info, caseInfo{desc: "blocking recv"})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Zero Chan send.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
// Maybe include value to send.
|
|
|
|
|
var val Value
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
val = ValueOf(100)
|
|
|
|
|
}
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectSend,
|
|
|
|
|
Send: val,
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "zero Chan send"})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Zero Chan receive.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectRecv,
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "zero Chan recv"})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// nil Chan send.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectSend,
|
|
|
|
|
Chan: ValueOf((chan int)(nil)),
|
|
|
|
|
Send: ValueOf(101),
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "nil Chan send"})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// nil Chan recv.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectRecv,
|
|
|
|
|
Chan: ValueOf((chan int)(nil)),
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "nil Chan recv"})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// closed Chan send.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
ch := make(chan int)
|
|
|
|
|
close(ch)
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectSend,
|
|
|
|
|
Chan: ValueOf(ch),
|
|
|
|
|
Send: ValueOf(101),
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "closed Chan send", canSelect: true, panic: true})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// closed Chan recv.
|
|
|
|
|
if x.Maybe() {
|
|
|
|
|
ch, val := newop(len(cases), 0)
|
|
|
|
|
ch.Close()
|
|
|
|
|
val = Zero(val.Type())
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectRecv,
|
|
|
|
|
Chan: ch,
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "closed Chan recv", canSelect: true, closed: true, recv: val})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var helper func() // goroutine to help the select complete
|
|
|
|
|
|
|
|
|
|
// Add default? Must be last case here, but will permute.
|
|
|
|
|
// Add the default if the select would otherwise
|
|
|
|
|
// block forever, and maybe add it anyway.
|
|
|
|
|
numCanSelect := 0
|
|
|
|
|
canProceed := false
|
|
|
|
|
canBlock := true
|
|
|
|
|
canPanic := false
|
|
|
|
|
helpers := []int{}
|
|
|
|
|
for i, c := range info {
|
|
|
|
|
if c.canSelect {
|
|
|
|
|
canProceed = true
|
|
|
|
|
canBlock = false
|
|
|
|
|
numCanSelect++
|
|
|
|
|
if c.panic {
|
|
|
|
|
canPanic = true
|
|
|
|
|
}
|
|
|
|
|
} else if c.helper != nil {
|
|
|
|
|
canProceed = true
|
|
|
|
|
helpers = append(helpers, i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !canProceed || x.Maybe() {
|
|
|
|
|
cases = append(cases, SelectCase{
|
|
|
|
|
Dir: SelectDefault,
|
|
|
|
|
})
|
|
|
|
|
info = append(info, caseInfo{desc: "default", canSelect: canBlock})
|
|
|
|
|
numCanSelect++
|
|
|
|
|
} else if canBlock {
|
|
|
|
|
// Select needs to communicate with another goroutine.
|
|
|
|
|
cas := &info[helpers[x.Choose(len(helpers))]]
|
|
|
|
|
helper = cas.helper
|
|
|
|
|
cas.canSelect = true
|
|
|
|
|
numCanSelect++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Permute cases and case info.
|
|
|
|
|
// Doing too much here makes the exhaustive loop
|
|
|
|
|
// too exhausting, so just do two swaps.
|
|
|
|
|
for loop := 0; loop < 2; loop++ {
|
|
|
|
|
i := x.Choose(len(cases))
|
|
|
|
|
j := x.Choose(len(cases))
|
|
|
|
|
cases[i], cases[j] = cases[j], cases[i]
|
|
|
|
|
info[i], info[j] = info[j], info[i]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if helper != nil {
|
|
|
|
|
// We wait before kicking off a goroutine to satisfy a blocked select.
|
|
|
|
|
// The pause needs to be big enough to let the select block before
|
|
|
|
|
// we run the helper, but if we lose that race once in a while it's okay: the
|
|
|
|
|
// select will just proceed immediately. Not a big deal.
|
|
|
|
|
// For short tests we can grow [sic] the timeout a bit without fear of taking too long
|
|
|
|
|
pause := 10 * time.Microsecond
|
|
|
|
|
if testing.Short() {
|
|
|
|
|
pause = 100 * time.Microsecond
|
|
|
|
|
}
|
|
|
|
|
time.AfterFunc(pause, helper)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Run select.
|
|
|
|
|
i, recv, recvOK, panicErr := runSelect(cases, info)
|
|
|
|
|
if panicErr != nil && !canPanic {
|
|
|
|
|
t.Fatalf("%s\npanicked unexpectedly: %v", fmtSelect(info), panicErr)
|
|
|
|
|
}
|
|
|
|
|
if panicErr == nil && canPanic && numCanSelect == 1 {
|
|
|
|
|
t.Fatalf("%s\nselected #%d incorrectly (should panic)", fmtSelect(info), i)
|
|
|
|
|
}
|
|
|
|
|
if panicErr != nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cas := info[i]
|
|
|
|
|
if !cas.canSelect {
|
|
|
|
|
recvStr := ""
|
|
|
|
|
if recv.IsValid() {
|
|
|
|
|
recvStr = fmt.Sprintf(", received %v, %v", recv.Interface(), recvOK)
|
|
|
|
|
}
|
|
|
|
|
t.Fatalf("%s\nselected #%d incorrectly%s", fmtSelect(info), i, recvStr)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if cas.panic {
|
|
|
|
|
t.Fatalf("%s\nselected #%d incorrectly (case should panic)", fmtSelect(info), i)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if cases[i].Dir == SelectRecv {
|
|
|
|
|
if !recv.IsValid() {
|
|
|
|
|
t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, cas.recv.Interface(), !cas.closed)
|
|
|
|
|
}
|
|
|
|
|
if !cas.recv.IsValid() {
|
|
|
|
|
t.Fatalf("%s\nselected #%d but internal error: missing recv value", fmtSelect(info), i)
|
|
|
|
|
}
|
|
|
|
|
if recv.Interface() != cas.recv.Interface() || recvOK != !cas.closed {
|
|
|
|
|
if recv.Interface() == cas.recv.Interface() && recvOK == !cas.closed {
|
|
|
|
|
t.Fatalf("%s\nselected #%d, got %#v, %v, and DeepEqual is broken on %T", fmtSelect(info), i, recv.Interface(), recvOK, recv.Interface())
|
|
|
|
|
}
|
|
|
|
|
t.Fatalf("%s\nselected #%d but got %#v, %v, want %#v, %v", fmtSelect(info), i, recv.Interface(), recvOK, cas.recv.Interface(), !cas.closed)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if recv.IsValid() || recvOK {
|
|
|
|
|
t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, Value{}, false)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// selectWatch and the selectWatcher are a watchdog mechanism for running Select.
|
|
|
|
|
// If the selectWatcher notices that the select has been blocked for >1 second, it prints
|
2012-10-30 13:38:01 -07:00
|
|
|
// an error describing the select and panics the entire test binary.
|
2012-09-18 14:22:41 -04:00
|
|
|
var selectWatch struct {
|
|
|
|
|
sync.Mutex
|
|
|
|
|
once sync.Once
|
|
|
|
|
now time.Time
|
|
|
|
|
info []caseInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func selectWatcher() {
|
|
|
|
|
for {
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
selectWatch.Lock()
|
2015-07-15 00:04:27 -04:00
|
|
|
if selectWatch.info != nil && time.Since(selectWatch.now) > 10*time.Second {
|
2012-09-18 14:22:41 -04:00
|
|
|
fmt.Fprintf(os.Stderr, "TestSelect:\n%s blocked indefinitely\n", fmtSelect(selectWatch.info))
|
|
|
|
|
panic("select stuck")
|
|
|
|
|
}
|
|
|
|
|
selectWatch.Unlock()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// runSelect runs a single select test.
|
|
|
|
|
// It returns the values returned by Select but also returns
|
|
|
|
|
// a panic value if the Select panics.
|
|
|
|
|
func runSelect(cases []SelectCase, info []caseInfo) (chosen int, recv Value, recvOK bool, panicErr interface{}) {
|
|
|
|
|
defer func() {
|
|
|
|
|
panicErr = recover()
|
|
|
|
|
|
|
|
|
|
selectWatch.Lock()
|
|
|
|
|
selectWatch.info = nil
|
|
|
|
|
selectWatch.Unlock()
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
selectWatch.Lock()
|
|
|
|
|
selectWatch.now = time.Now()
|
|
|
|
|
selectWatch.info = info
|
|
|
|
|
selectWatch.Unlock()
|
|
|
|
|
|
|
|
|
|
chosen, recv, recvOK = Select(cases)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// fmtSelect formats the information about a single select test.
|
|
|
|
|
func fmtSelect(info []caseInfo) string {
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
fmt.Fprintf(&buf, "\nselect {\n")
|
|
|
|
|
for i, cas := range info {
|
|
|
|
|
fmt.Fprintf(&buf, "%d: %s", i, cas.desc)
|
|
|
|
|
if cas.recv.IsValid() {
|
|
|
|
|
fmt.Fprintf(&buf, " val=%#v", cas.recv.Interface())
|
|
|
|
|
}
|
|
|
|
|
if cas.canSelect {
|
|
|
|
|
fmt.Fprintf(&buf, " canselect")
|
|
|
|
|
}
|
|
|
|
|
if cas.panic {
|
|
|
|
|
fmt.Fprintf(&buf, " panic")
|
|
|
|
|
}
|
|
|
|
|
fmt.Fprintf(&buf, "\n")
|
|
|
|
|
}
|
|
|
|
|
fmt.Fprintf(&buf, "}")
|
|
|
|
|
return buf.String()
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-24 20:06:32 -04:00
|
|
|
type two [2]uintptr
|
|
|
|
|
|
2009-07-08 18:16:09 -07:00
|
|
|
// Difficult test for function call because of
|
|
|
|
|
// implicit padding between arguments.
|
2012-09-24 20:06:32 -04:00
|
|
|
func dummy(b byte, c int, d byte, e two, f byte, g float32, h byte) (i byte, j int, k byte, l two, m byte, n float32, o byte) {
|
|
|
|
|
return b, c, d, e, f, g, h
|
2009-07-08 18:16:09 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestFunc(t *testing.T) {
|
2012-09-24 20:06:32 -04:00
|
|
|
ret := ValueOf(dummy).Call([]Value{
|
|
|
|
|
ValueOf(byte(10)),
|
|
|
|
|
ValueOf(20),
|
|
|
|
|
ValueOf(byte(30)),
|
|
|
|
|
ValueOf(two{40, 50}),
|
|
|
|
|
ValueOf(byte(60)),
|
|
|
|
|
ValueOf(float32(70)),
|
|
|
|
|
ValueOf(byte(80)),
|
|
|
|
|
})
|
|
|
|
|
if len(ret) != 7 {
|
|
|
|
|
t.Fatalf("Call returned %d values, want 7", len(ret))
|
2009-07-08 18:16:09 -07:00
|
|
|
}
|
|
|
|
|
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
i := byte(ret[0].Uint())
|
|
|
|
|
j := int(ret[1].Int())
|
|
|
|
|
k := byte(ret[2].Uint())
|
2012-09-24 20:06:32 -04:00
|
|
|
l := ret[3].Interface().(two)
|
|
|
|
|
m := byte(ret[4].Uint())
|
|
|
|
|
n := float32(ret[5].Float())
|
|
|
|
|
o := byte(ret[6].Uint())
|
|
|
|
|
|
|
|
|
|
if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
|
|
|
|
|
t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
|
|
|
|
|
}
|
2016-04-12 15:47:17 -07:00
|
|
|
|
|
|
|
|
for i, v := range ret {
|
|
|
|
|
if v.CanAddr() {
|
|
|
|
|
t.Errorf("result %d is addressable", i)
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-09-24 20:06:32 -04:00
|
|
|
}
|
|
|
|
|
|
2017-11-29 14:44:43 -05:00
|
|
|
func TestCallConvert(t *testing.T) {
|
|
|
|
|
v := ValueOf(new(io.ReadWriter)).Elem()
|
|
|
|
|
f := ValueOf(func(r io.Reader) io.Reader { return r })
|
|
|
|
|
out := f.Call([]Value{v})
|
|
|
|
|
if len(out) != 1 || out[0].Type() != TypeOf(new(io.Reader)).Elem() || !out[0].IsNil() {
|
|
|
|
|
t.Errorf("expected [nil], got %v", out)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-17 14:49:51 -08:00
|
|
|
type emptyStruct struct{}
|
|
|
|
|
|
|
|
|
|
type nonEmptyStruct struct {
|
|
|
|
|
member int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func returnEmpty() emptyStruct {
|
|
|
|
|
return emptyStruct{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func takesEmpty(e emptyStruct) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func returnNonEmpty(i int) nonEmptyStruct {
|
|
|
|
|
return nonEmptyStruct{member: i}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func takesNonEmpty(n nonEmptyStruct) int {
|
|
|
|
|
return n.member
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestCallWithStruct(t *testing.T) {
|
|
|
|
|
r := ValueOf(returnEmpty).Call(nil)
|
|
|
|
|
if len(r) != 1 || r[0].Type() != TypeOf(emptyStruct{}) {
|
|
|
|
|
t.Errorf("returning empty struct returned %#v instead", r)
|
|
|
|
|
}
|
|
|
|
|
r = ValueOf(takesEmpty).Call([]Value{ValueOf(emptyStruct{})})
|
|
|
|
|
if len(r) != 0 {
|
|
|
|
|
t.Errorf("takesEmpty returned values: %#v", r)
|
|
|
|
|
}
|
|
|
|
|
r = ValueOf(returnNonEmpty).Call([]Value{ValueOf(42)})
|
|
|
|
|
if len(r) != 1 || r[0].Type() != TypeOf(nonEmptyStruct{}) || r[0].Field(0).Int() != 42 {
|
|
|
|
|
t.Errorf("returnNonEmpty returned %#v", r)
|
|
|
|
|
}
|
|
|
|
|
r = ValueOf(takesNonEmpty).Call([]Value{ValueOf(nonEmptyStruct{member: 42})})
|
|
|
|
|
if len(r) != 1 || r[0].Type() != TypeOf(1) || r[0].Int() != 42 {
|
|
|
|
|
t.Errorf("takesNonEmpty returned %#v", r)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-31 22:02:37 -04:00
|
|
|
func TestCallReturnsEmpty(t *testing.T) {
|
|
|
|
|
// Issue 21717: past-the-end pointer write in Call with
|
|
|
|
|
// nonzero-sized frame and zero-sized return value.
|
|
|
|
|
runtime.GC()
|
|
|
|
|
var finalized uint32
|
2018-08-07 20:59:04 -07:00
|
|
|
f := func() (emptyStruct, *[2]int64) {
|
|
|
|
|
i := new([2]int64) // big enough to not be tinyalloc'd, so finalizer always runs when i dies
|
|
|
|
|
runtime.SetFinalizer(i, func(*[2]int64) { atomic.StoreUint32(&finalized, 1) })
|
2017-08-31 22:02:37 -04:00
|
|
|
return emptyStruct{}, i
|
|
|
|
|
}
|
|
|
|
|
v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run.
|
|
|
|
|
timeout := time.After(5 * time.Second)
|
|
|
|
|
for atomic.LoadUint32(&finalized) == 0 {
|
|
|
|
|
select {
|
|
|
|
|
case <-timeout:
|
|
|
|
|
t.Fatal("finalizer did not run")
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
runtime.Gosched()
|
|
|
|
|
runtime.GC()
|
|
|
|
|
}
|
|
|
|
|
runtime.KeepAlive(v)
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-22 22:31:55 +03:00
|
|
|
func BenchmarkCall(b *testing.B) {
|
|
|
|
|
fv := ValueOf(func(a, b string) {})
|
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
args := []Value{ValueOf("a"), ValueOf("b")}
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
fv.Call(args)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 15:25:28 -04:00
|
|
|
func BenchmarkCallArgCopy(b *testing.B) {
|
|
|
|
|
byteArray := func(n int) Value {
|
|
|
|
|
return Zero(ArrayOf(n, TypeOf(byte(0))))
|
|
|
|
|
}
|
|
|
|
|
sizes := [...]struct {
|
|
|
|
|
fv Value
|
|
|
|
|
arg Value
|
|
|
|
|
}{
|
|
|
|
|
{ValueOf(func(a [128]byte) {}), byteArray(128)},
|
|
|
|
|
{ValueOf(func(a [256]byte) {}), byteArray(256)},
|
|
|
|
|
{ValueOf(func(a [1024]byte) {}), byteArray(1024)},
|
|
|
|
|
{ValueOf(func(a [4096]byte) {}), byteArray(4096)},
|
|
|
|
|
{ValueOf(func(a [65536]byte) {}), byteArray(65536)},
|
|
|
|
|
}
|
|
|
|
|
for _, size := range sizes {
|
|
|
|
|
bench := func(b *testing.B) {
|
|
|
|
|
args := []Value{size.arg}
|
|
|
|
|
b.SetBytes(int64(size.arg.Len()))
|
|
|
|
|
b.ResetTimer()
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
size.fv.Call(args)
|
|
|
|
|
}
|
|
|
|
|
})
|
2016-08-30 15:25:28 -04:00
|
|
|
}
|
|
|
|
|
name := fmt.Sprintf("size=%v", size.arg.Len())
|
|
|
|
|
b.Run(name, bench)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-24 20:06:32 -04:00
|
|
|
func TestMakeFunc(t *testing.T) {
|
|
|
|
|
f := dummy
|
|
|
|
|
fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in })
|
|
|
|
|
ValueOf(&f).Elem().Set(fv)
|
|
|
|
|
|
|
|
|
|
// Call g with small arguments so that there is
|
|
|
|
|
// something predictable (and different from the
|
|
|
|
|
// correct results) in those positions on the stack.
|
|
|
|
|
g := dummy
|
|
|
|
|
g(1, 2, 3, two{4, 5}, 6, 7, 8)
|
|
|
|
|
|
|
|
|
|
// Call constructed function f.
|
|
|
|
|
i, j, k, l, m, n, o := f(10, 20, 30, two{40, 50}, 60, 70, 80)
|
|
|
|
|
if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
|
|
|
|
|
t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
|
2009-07-08 18:16:09 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-07-09 17:27:49 -07:00
|
|
|
|
2013-10-04 13:12:50 -07:00
|
|
|
func TestMakeFuncInterface(t *testing.T) {
|
|
|
|
|
fn := func(i int) int { return i }
|
|
|
|
|
incr := func(in []Value) []Value {
|
|
|
|
|
return []Value{ValueOf(int(in[0].Int() + 1))}
|
|
|
|
|
}
|
|
|
|
|
fv := MakeFunc(TypeOf(fn), incr)
|
|
|
|
|
ValueOf(&fn).Elem().Set(fv)
|
|
|
|
|
if r := fn(2); r != 3 {
|
|
|
|
|
t.Errorf("Call returned %d, want 3", r)
|
|
|
|
|
}
|
|
|
|
|
if r := fv.Call([]Value{ValueOf(14)})[0].Int(); r != 15 {
|
|
|
|
|
t.Errorf("Call returned %d, want 15", r)
|
|
|
|
|
}
|
|
|
|
|
if r := fv.Interface().(func(int) int)(26); r != 27 {
|
|
|
|
|
t.Errorf("Call returned %d, want 27", r)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-08 22:35:23 -04:00
|
|
|
func TestMakeFuncVariadic(t *testing.T) {
|
|
|
|
|
// Test that variadic arguments are packed into a slice and passed as last arg
|
|
|
|
|
fn := func(_ int, is ...int) []int { return nil }
|
|
|
|
|
fv := MakeFunc(TypeOf(fn), func(in []Value) []Value { return in[1:2] })
|
|
|
|
|
ValueOf(&fn).Elem().Set(fv)
|
|
|
|
|
|
2014-10-08 15:58:56 -07:00
|
|
|
r := fn(1, 2, 3)
|
|
|
|
|
if r[0] != 2 || r[1] != 3 {
|
|
|
|
|
t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r = fn(1, []int{2, 3}...)
|
|
|
|
|
if r[0] != 2 || r[1] != 3 {
|
|
|
|
|
t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r = fv.Call([]Value{ValueOf(1), ValueOf(2), ValueOf(3)})[0].Interface().([]int)
|
2014-04-08 22:35:23 -04:00
|
|
|
if r[0] != 2 || r[1] != 3 {
|
|
|
|
|
t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r = fv.CallSlice([]Value{ValueOf(1), ValueOf([]int{2, 3})})[0].Interface().([]int)
|
|
|
|
|
if r[0] != 2 || r[1] != 3 {
|
|
|
|
|
t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
|
|
|
|
|
}
|
2014-10-08 15:58:56 -07:00
|
|
|
|
|
|
|
|
f := fv.Interface().(func(int, ...int) []int)
|
|
|
|
|
|
|
|
|
|
r = f(1, 2, 3)
|
|
|
|
|
if r[0] != 2 || r[1] != 3 {
|
|
|
|
|
t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
|
|
|
|
|
}
|
|
|
|
|
r = f(1, []int{2, 3}...)
|
|
|
|
|
if r[0] != 2 || r[1] != 3 {
|
|
|
|
|
t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
|
|
|
|
|
}
|
2014-04-08 22:35:23 -04:00
|
|
|
}
|
|
|
|
|
|
2009-07-09 17:27:49 -07:00
|
|
|
type Point struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
x, y int
|
2009-07-09 17:27:49 -07:00
|
|
|
}
|
|
|
|
|
|
2011-06-29 13:11:49 +10:00
|
|
|
// This will be index 0.
|
|
|
|
|
func (p Point) AnotherMethod(scale int) int {
|
|
|
|
|
return -1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This will be index 1.
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
func (p Point) Dist(scale int) int {
|
2013-03-21 16:59:16 -04:00
|
|
|
//println("Point.Dist", p.x, p.y, scale)
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
return p.x*p.x*scale + p.y*p.y*scale
|
|
|
|
|
}
|
2009-07-09 17:27:49 -07:00
|
|
|
|
2014-10-08 15:48:46 -07:00
|
|
|
// This will be index 2.
|
|
|
|
|
func (p Point) GCMethod(k int) int {
|
|
|
|
|
runtime.GC()
|
|
|
|
|
return k + p.x
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This will be index 3.
|
2017-03-28 16:03:24 -04:00
|
|
|
func (p Point) NoArgs() {
|
|
|
|
|
// Exercise no-argument/no-result paths.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This will be index 4.
|
2014-10-08 15:48:46 -07:00
|
|
|
func (p Point) TotalDist(points ...Point) int {
|
|
|
|
|
tot := 0
|
|
|
|
|
for _, q := range points {
|
|
|
|
|
dx := q.x - p.x
|
|
|
|
|
dy := q.y - p.y
|
|
|
|
|
tot += dx*dx + dy*dy // Should call Sqrt, but it's just a test.
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
return tot
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-09 17:27:49 -07:00
|
|
|
func TestMethod(t *testing.T) {
|
|
|
|
|
// Non-curried method of type.
|
2009-12-15 15:40:16 -08:00
|
|
|
p := Point{3, 4}
|
2011-06-29 13:11:49 +10:00
|
|
|
i := TypeOf(p).Method(1).Func.Call([]Value{ValueOf(p), ValueOf(10)})[0].Int()
|
2009-07-09 17:27:49 -07:00
|
|
|
if i != 250 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("Type Method returned %d; want 250", i)
|
2009-07-09 17:27:49 -07:00
|
|
|
}
|
|
|
|
|
|
2011-06-29 13:11:49 +10:00
|
|
|
m, ok := TypeOf(p).MethodByName("Dist")
|
|
|
|
|
if !ok {
|
|
|
|
|
t.Fatalf("method by name failed")
|
|
|
|
|
}
|
2013-03-21 16:59:16 -04:00
|
|
|
i = m.Func.Call([]Value{ValueOf(p), ValueOf(11)})[0].Int()
|
|
|
|
|
if i != 275 {
|
|
|
|
|
t.Errorf("Type MethodByName returned %d; want 275", i)
|
2011-06-29 13:11:49 +10:00
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:03:24 -04:00
|
|
|
m, ok = TypeOf(p).MethodByName("NoArgs")
|
|
|
|
|
if !ok {
|
|
|
|
|
t.Fatalf("method by name failed")
|
|
|
|
|
}
|
|
|
|
|
n := len(m.Func.Call([]Value{ValueOf(p)}))
|
|
|
|
|
if n != 0 {
|
|
|
|
|
t.Errorf("NoArgs returned %d values; want 0", n)
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-21 16:59:16 -04:00
|
|
|
i = TypeOf(&p).Method(1).Func.Call([]Value{ValueOf(&p), ValueOf(12)})[0].Int()
|
|
|
|
|
if i != 300 {
|
|
|
|
|
t.Errorf("Pointer Type Method returned %d; want 300", i)
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
|
2011-06-29 13:11:49 +10:00
|
|
|
m, ok = TypeOf(&p).MethodByName("Dist")
|
|
|
|
|
if !ok {
|
|
|
|
|
t.Fatalf("ptr method by name failed")
|
|
|
|
|
}
|
2013-03-21 16:59:16 -04:00
|
|
|
i = m.Func.Call([]Value{ValueOf(&p), ValueOf(13)})[0].Int()
|
|
|
|
|
if i != 325 {
|
|
|
|
|
t.Errorf("Pointer Type MethodByName returned %d; want 325", i)
|
2011-06-29 13:11:49 +10:00
|
|
|
}
|
|
|
|
|
|
2017-03-28 16:03:24 -04:00
|
|
|
m, ok = TypeOf(&p).MethodByName("NoArgs")
|
|
|
|
|
if !ok {
|
|
|
|
|
t.Fatalf("method by name failed")
|
|
|
|
|
}
|
|
|
|
|
n = len(m.Func.Call([]Value{ValueOf(&p)}))
|
|
|
|
|
if n != 0 {
|
|
|
|
|
t.Errorf("NoArgs returned %d values; want 0", n)
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-09 17:27:49 -07:00
|
|
|
// Curried method of value.
|
2012-10-04 21:03:50 -07:00
|
|
|
tfunc := TypeOf((func(int) int)(nil))
|
2011-11-16 19:18:25 -05:00
|
|
|
v := ValueOf(p).Method(1)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
2013-03-21 16:59:16 -04:00
|
|
|
i = v.Call([]Value{ValueOf(14)})[0].Int()
|
|
|
|
|
if i != 350 {
|
|
|
|
|
t.Errorf("Value Method returned %d; want 350", i)
|
|
|
|
|
}
|
|
|
|
|
v = ValueOf(p).MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = v.Call([]Value{ValueOf(15)})[0].Int()
|
|
|
|
|
if i != 375 {
|
|
|
|
|
t.Errorf("Value MethodByName returned %d; want 375", i)
|
|
|
|
|
}
|
2017-03-28 16:03:24 -04:00
|
|
|
v = ValueOf(p).MethodByName("NoArgs")
|
|
|
|
|
v.Call(nil)
|
2013-03-21 16:59:16 -04:00
|
|
|
|
|
|
|
|
// Curried method of pointer.
|
|
|
|
|
v = ValueOf(&p).Method(1)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = v.Call([]Value{ValueOf(16)})[0].Int()
|
|
|
|
|
if i != 400 {
|
|
|
|
|
t.Errorf("Pointer Value Method returned %d; want 400", i)
|
|
|
|
|
}
|
|
|
|
|
v = ValueOf(&p).MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = v.Call([]Value{ValueOf(17)})[0].Int()
|
|
|
|
|
if i != 425 {
|
|
|
|
|
t.Errorf("Pointer Value MethodByName returned %d; want 425", i)
|
|
|
|
|
}
|
2017-03-28 16:03:24 -04:00
|
|
|
v = ValueOf(&p).MethodByName("NoArgs")
|
|
|
|
|
v.Call(nil)
|
2013-03-21 16:59:16 -04:00
|
|
|
|
|
|
|
|
// Curried method of interface value.
|
|
|
|
|
// Have to wrap interface value in a struct to get at it.
|
|
|
|
|
// Passing it to ValueOf directly would
|
|
|
|
|
// access the underlying Point, not the interface.
|
|
|
|
|
var x interface {
|
|
|
|
|
Dist(int) int
|
|
|
|
|
} = p
|
|
|
|
|
pv := ValueOf(&x).Elem()
|
|
|
|
|
v = pv.Method(0)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Interface Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = v.Call([]Value{ValueOf(18)})[0].Int()
|
|
|
|
|
if i != 450 {
|
|
|
|
|
t.Errorf("Interface Method returned %d; want 450", i)
|
|
|
|
|
}
|
|
|
|
|
v = pv.MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = v.Call([]Value{ValueOf(19)})[0].Int()
|
|
|
|
|
if i != 475 {
|
|
|
|
|
t.Errorf("Interface MethodByName returned %d; want 475", i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMethodValue(t *testing.T) {
|
|
|
|
|
p := Point{3, 4}
|
|
|
|
|
var i int64
|
|
|
|
|
|
|
|
|
|
// Curried method of value.
|
|
|
|
|
tfunc := TypeOf((func(int) int)(nil))
|
|
|
|
|
v := ValueOf(p).Method(1)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(10)})[0].Int()
|
2009-07-09 17:27:49 -07:00
|
|
|
if i != 250 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("Value Method returned %d; want 250", i)
|
2009-07-09 17:27:49 -07:00
|
|
|
}
|
2011-11-16 19:18:25 -05:00
|
|
|
v = ValueOf(p).MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
2013-03-21 16:59:16 -04:00
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(11)})[0].Int()
|
|
|
|
|
if i != 275 {
|
|
|
|
|
t.Errorf("Value MethodByName returned %d; want 275", i)
|
2011-06-29 13:11:49 +10:00
|
|
|
}
|
2017-03-28 16:03:24 -04:00
|
|
|
v = ValueOf(p).MethodByName("NoArgs")
|
|
|
|
|
ValueOf(v.Interface()).Call(nil)
|
|
|
|
|
v.Interface().(func())()
|
2009-07-09 17:27:49 -07:00
|
|
|
|
2011-03-03 13:20:17 -05:00
|
|
|
// Curried method of pointer.
|
2011-11-16 19:18:25 -05:00
|
|
|
v = ValueOf(&p).Method(1)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
2013-03-21 16:59:16 -04:00
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(12)})[0].Int()
|
|
|
|
|
if i != 300 {
|
|
|
|
|
t.Errorf("Pointer Value Method returned %d; want 300", i)
|
2011-06-29 13:11:49 +10:00
|
|
|
}
|
2011-11-16 19:18:25 -05:00
|
|
|
v = ValueOf(&p).MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
2013-03-21 16:59:16 -04:00
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(13)})[0].Int()
|
|
|
|
|
if i != 325 {
|
|
|
|
|
t.Errorf("Pointer Value MethodByName returned %d; want 325", i)
|
2011-03-03 13:20:17 -05:00
|
|
|
}
|
2017-03-28 16:03:24 -04:00
|
|
|
v = ValueOf(&p).MethodByName("NoArgs")
|
|
|
|
|
ValueOf(v.Interface()).Call(nil)
|
|
|
|
|
v.Interface().(func())()
|
2011-03-03 13:20:17 -05:00
|
|
|
|
2013-09-17 15:22:42 -07:00
|
|
|
// Curried method of pointer to pointer.
|
|
|
|
|
pp := &p
|
|
|
|
|
v = ValueOf(&pp).Elem().Method(1)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Pointer Pointer Value Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(14)})[0].Int()
|
|
|
|
|
if i != 350 {
|
|
|
|
|
t.Errorf("Pointer Pointer Value Method returned %d; want 350", i)
|
|
|
|
|
}
|
|
|
|
|
v = ValueOf(&pp).Elem().MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Pointer Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(15)})[0].Int()
|
|
|
|
|
if i != 375 {
|
|
|
|
|
t.Errorf("Pointer Pointer Value MethodByName returned %d; want 375", i)
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-09 17:27:49 -07:00
|
|
|
// Curried method of interface value.
|
|
|
|
|
// Have to wrap interface value in a struct to get at it.
|
2011-04-25 13:39:16 -04:00
|
|
|
// Passing it to ValueOf directly would
|
2009-07-09 17:27:49 -07:00
|
|
|
// access the underlying Point, not the interface.
|
2009-11-05 14:23:20 -08:00
|
|
|
var s = struct {
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
X interface {
|
2009-12-15 15:40:16 -08:00
|
|
|
Dist(int) int
|
|
|
|
|
}
|
|
|
|
|
}{p}
|
2011-04-25 13:39:16 -04:00
|
|
|
pv := ValueOf(s).Field(0)
|
2011-11-16 19:18:25 -05:00
|
|
|
v = pv.Method(0)
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Interface Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
2013-09-17 15:22:42 -07:00
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(16)})[0].Int()
|
|
|
|
|
if i != 400 {
|
|
|
|
|
t.Errorf("Interface Method returned %d; want 400", i)
|
2009-07-09 17:27:49 -07:00
|
|
|
}
|
2011-11-16 19:18:25 -05:00
|
|
|
v = pv.MethodByName("Dist")
|
|
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
2013-09-17 15:22:42 -07:00
|
|
|
i = ValueOf(v.Interface()).Call([]Value{ValueOf(17)})[0].Int()
|
|
|
|
|
if i != 425 {
|
|
|
|
|
t.Errorf("Interface MethodByName returned %d; want 425", i)
|
2011-06-29 13:11:49 +10:00
|
|
|
}
|
2009-07-09 17:27:49 -07:00
|
|
|
}
|
2009-07-10 16:32:26 -07:00
|
|
|
|
2014-10-08 15:48:46 -07:00
|
|
|
func TestVariadicMethodValue(t *testing.T) {
|
|
|
|
|
p := Point{3, 4}
|
|
|
|
|
points := []Point{{20, 21}, {22, 23}, {24, 25}}
|
|
|
|
|
want := int64(p.TotalDist(points[0], points[1], points[2]))
|
|
|
|
|
|
|
|
|
|
// Curried method of value.
|
|
|
|
|
tfunc := TypeOf((func(...Point) int)(nil))
|
2017-03-28 16:03:24 -04:00
|
|
|
v := ValueOf(p).Method(4)
|
2014-10-08 15:48:46 -07:00
|
|
|
if tt := v.Type(); tt != tfunc {
|
|
|
|
|
t.Errorf("Variadic Method Type is %s; want %s", tt, tfunc)
|
|
|
|
|
}
|
|
|
|
|
i := ValueOf(v.Interface()).Call([]Value{ValueOf(points[0]), ValueOf(points[1]), ValueOf(points[2])})[0].Int()
|
|
|
|
|
if i != want {
|
|
|
|
|
t.Errorf("Variadic Method returned %d; want %d", i, want)
|
|
|
|
|
}
|
|
|
|
|
i = ValueOf(v.Interface()).CallSlice([]Value{ValueOf(points)})[0].Int()
|
|
|
|
|
if i != want {
|
|
|
|
|
t.Errorf("Variadic Method CallSlice returned %d; want %d", i, want)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f := v.Interface().(func(...Point) int)
|
|
|
|
|
i = int64(f(points[0], points[1], points[2]))
|
|
|
|
|
if i != want {
|
|
|
|
|
t.Errorf("Variadic Method Interface returned %d; want %d", i, want)
|
|
|
|
|
}
|
|
|
|
|
i = int64(f(points...))
|
|
|
|
|
if i != want {
|
|
|
|
|
t.Errorf("Variadic Method Interface Slice returned %d; want %d", i, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-21 16:59:16 -04:00
|
|
|
// Reflect version of $GOROOT/test/method5.go
|
|
|
|
|
|
|
|
|
|
// Concrete types implementing M method.
|
|
|
|
|
// Smaller than a word, word-sized, larger than a word.
|
|
|
|
|
// Value and pointer receivers.
|
|
|
|
|
|
|
|
|
|
type Tinter interface {
|
|
|
|
|
M(int, byte) (byte, int)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type Tsmallv byte
|
|
|
|
|
|
|
|
|
|
func (v Tsmallv) M(x int, b byte) (byte, int) { return b, x + int(v) }
|
|
|
|
|
|
|
|
|
|
type Tsmallp byte
|
|
|
|
|
|
|
|
|
|
func (p *Tsmallp) M(x int, b byte) (byte, int) { return b, x + int(*p) }
|
|
|
|
|
|
|
|
|
|
type Twordv uintptr
|
|
|
|
|
|
|
|
|
|
func (v Twordv) M(x int, b byte) (byte, int) { return b, x + int(v) }
|
|
|
|
|
|
|
|
|
|
type Twordp uintptr
|
|
|
|
|
|
|
|
|
|
func (p *Twordp) M(x int, b byte) (byte, int) { return b, x + int(*p) }
|
|
|
|
|
|
|
|
|
|
type Tbigv [2]uintptr
|
|
|
|
|
|
|
|
|
|
func (v Tbigv) M(x int, b byte) (byte, int) { return b, x + int(v[0]) + int(v[1]) }
|
|
|
|
|
|
|
|
|
|
type Tbigp [2]uintptr
|
|
|
|
|
|
|
|
|
|
func (p *Tbigp) M(x int, b byte) (byte, int) { return b, x + int(p[0]) + int(p[1]) }
|
|
|
|
|
|
|
|
|
|
type tinter interface {
|
|
|
|
|
m(int, byte) (byte, int)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Embedding via pointer.
|
|
|
|
|
|
|
|
|
|
type Tm1 struct {
|
|
|
|
|
Tm2
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type Tm2 struct {
|
|
|
|
|
*Tm3
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type Tm3 struct {
|
|
|
|
|
*Tm4
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type Tm4 struct {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (t4 Tm4) M(x int, b byte) (byte, int) { return b, x + 40 }
|
|
|
|
|
|
|
|
|
|
func TestMethod5(t *testing.T) {
|
|
|
|
|
CheckF := func(name string, f func(int, byte) (byte, int), inc int) {
|
|
|
|
|
b, x := f(1000, 99)
|
|
|
|
|
if b != 99 || x != 1000+inc {
|
|
|
|
|
t.Errorf("%s(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CheckV := func(name string, i Value, inc int) {
|
|
|
|
|
bx := i.Method(0).Call([]Value{ValueOf(1000), ValueOf(byte(99))})
|
|
|
|
|
b := bx[0].Interface()
|
|
|
|
|
x := bx[1].Interface()
|
|
|
|
|
if b != byte(99) || x != 1000+inc {
|
|
|
|
|
t.Errorf("direct %s.M(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CheckF(name+".M", i.Method(0).Interface().(func(int, byte) (byte, int)), inc)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var TinterType = TypeOf(new(Tinter)).Elem()
|
|
|
|
|
|
|
|
|
|
CheckI := func(name string, i interface{}, inc int) {
|
|
|
|
|
v := ValueOf(i)
|
|
|
|
|
CheckV(name, v, inc)
|
|
|
|
|
CheckV("(i="+name+")", v.Convert(TinterType), inc)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sv := Tsmallv(1)
|
|
|
|
|
CheckI("sv", sv, 1)
|
|
|
|
|
CheckI("&sv", &sv, 1)
|
|
|
|
|
|
|
|
|
|
sp := Tsmallp(2)
|
|
|
|
|
CheckI("&sp", &sp, 2)
|
|
|
|
|
|
|
|
|
|
wv := Twordv(3)
|
|
|
|
|
CheckI("wv", wv, 3)
|
|
|
|
|
CheckI("&wv", &wv, 3)
|
|
|
|
|
|
|
|
|
|
wp := Twordp(4)
|
|
|
|
|
CheckI("&wp", &wp, 4)
|
|
|
|
|
|
|
|
|
|
bv := Tbigv([2]uintptr{5, 6})
|
|
|
|
|
CheckI("bv", bv, 11)
|
|
|
|
|
CheckI("&bv", &bv, 11)
|
|
|
|
|
|
|
|
|
|
bp := Tbigp([2]uintptr{7, 8})
|
|
|
|
|
CheckI("&bp", &bp, 15)
|
|
|
|
|
|
|
|
|
|
t4 := Tm4{}
|
|
|
|
|
t3 := Tm3{&t4}
|
|
|
|
|
t2 := Tm2{&t3}
|
|
|
|
|
t1 := Tm1{t2}
|
|
|
|
|
CheckI("t4", t4, 40)
|
|
|
|
|
CheckI("&t4", &t4, 40)
|
|
|
|
|
CheckI("t3", t3, 40)
|
|
|
|
|
CheckI("&t3", &t3, 40)
|
|
|
|
|
CheckI("t2", t2, 40)
|
|
|
|
|
CheckI("&t2", &t2, 40)
|
|
|
|
|
CheckI("t1", t1, 40)
|
|
|
|
|
CheckI("&t1", &t1, 40)
|
|
|
|
|
|
|
|
|
|
var tnil Tinter
|
|
|
|
|
vnil := ValueOf(&tnil).Elem()
|
|
|
|
|
shouldPanic(func() { vnil.Method(0) })
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-10 16:32:26 -07:00
|
|
|
func TestInterfaceSet(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
p := &Point{3, 4}
|
2009-07-10 16:32:26 -07:00
|
|
|
|
|
|
|
|
var s struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
I interface{}
|
|
|
|
|
P interface {
|
|
|
|
|
Dist(int) int
|
|
|
|
|
}
|
2009-07-10 16:32:26 -07:00
|
|
|
}
|
2011-04-25 13:39:16 -04:00
|
|
|
sv := ValueOf(&s).Elem()
|
|
|
|
|
sv.Field(0).Set(ValueOf(p))
|
2009-07-10 16:32:26 -07:00
|
|
|
if q := s.I.(*Point); q != p {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("i: have %p want %p", q, p)
|
2009-07-10 16:32:26 -07:00
|
|
|
}
|
|
|
|
|
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
pv := sv.Field(1)
|
2011-04-25 13:39:16 -04:00
|
|
|
pv.Set(ValueOf(p))
|
2009-07-10 16:32:26 -07:00
|
|
|
if q := s.P.(*Point); q != p {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("i: have %p want %p", q, p)
|
2009-07-10 16:32:26 -07:00
|
|
|
}
|
2009-08-12 13:18:37 -07:00
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
i := pv.Method(0).Call([]Value{ValueOf(10)})[0].Int()
|
2009-07-10 16:32:26 -07:00
|
|
|
if i != 250 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("Interface Method returned %d; want 250", i)
|
2009-07-10 16:32:26 -07:00
|
|
|
}
|
|
|
|
|
}
|
2009-08-05 15:56:44 -07:00
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
type T1 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
a string
|
|
|
|
|
int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
2009-08-05 15:56:44 -07:00
|
|
|
|
|
|
|
|
func TestAnonymousFields(t *testing.T) {
|
2009-12-15 15:40:16 -08:00
|
|
|
var field StructField
|
|
|
|
|
var ok bool
|
|
|
|
|
var t1 T1
|
2011-04-25 13:39:16 -04:00
|
|
|
type1 := TypeOf(t1)
|
2009-08-05 15:56:44 -07:00
|
|
|
if field, ok = type1.FieldByName("int"); !ok {
|
2012-09-05 09:35:53 -04:00
|
|
|
t.Fatal("no field 'int'")
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
if field.Index[0] != 1 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Error("field index should be 1; is", field.Index)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type FTest struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
s interface{}
|
|
|
|
|
name string
|
|
|
|
|
index []int
|
|
|
|
|
value int
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
2009-08-24 17:04:12 -07:00
|
|
|
type D1 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
d int
|
2009-08-24 17:04:12 -07:00
|
|
|
}
|
|
|
|
|
type D2 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
d int
|
2009-08-24 17:04:12 -07:00
|
|
|
}
|
|
|
|
|
|
2009-08-05 15:56:44 -07:00
|
|
|
type S0 struct {
|
2011-10-17 18:48:45 -04:00
|
|
|
A, B, C int
|
2009-12-15 15:40:16 -08:00
|
|
|
D1
|
|
|
|
|
D2
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S1 struct {
|
2011-10-17 18:48:45 -04:00
|
|
|
B int
|
2009-12-15 15:40:16 -08:00
|
|
|
S0
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S2 struct {
|
2011-10-17 18:48:45 -04:00
|
|
|
A int
|
2009-12-15 15:40:16 -08:00
|
|
|
*S1
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
2009-08-24 17:04:12 -07:00
|
|
|
type S1x struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
S1
|
2009-08-24 17:04:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S1y struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
S1
|
2009-08-24 17:04:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S3 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
S1x
|
|
|
|
|
S2
|
2011-10-17 18:48:45 -04:00
|
|
|
D, E int
|
2009-12-15 15:40:16 -08:00
|
|
|
*S1y
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S4 struct {
|
2009-12-15 15:40:16 -08:00
|
|
|
*S4
|
2011-10-17 18:48:45 -04:00
|
|
|
A int
|
2009-11-05 14:23:20 -08:00
|
|
|
}
|
|
|
|
|
|
2012-09-05 09:35:53 -04:00
|
|
|
// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
|
|
|
|
|
type S5 struct {
|
|
|
|
|
S6
|
|
|
|
|
S7
|
|
|
|
|
S8
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S6 struct {
|
|
|
|
|
X int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S7 S6
|
|
|
|
|
|
|
|
|
|
type S8 struct {
|
|
|
|
|
S9
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S9 struct {
|
|
|
|
|
X int
|
|
|
|
|
Y int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
|
|
|
|
|
type S10 struct {
|
|
|
|
|
S11
|
|
|
|
|
S12
|
|
|
|
|
S13
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S11 struct {
|
|
|
|
|
S6
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S12 struct {
|
|
|
|
|
S6
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S13 struct {
|
|
|
|
|
S8
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-13 10:45:30 -08:00
|
|
|
// The X in S15.S11.S1 and S16.S11.S1 annihilate.
|
|
|
|
|
type S14 struct {
|
|
|
|
|
S15
|
|
|
|
|
S16
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S15 struct {
|
|
|
|
|
S11
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type S16 struct {
|
|
|
|
|
S11
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-05 14:23:20 -08:00
|
|
|
var fieldTests = []FTest{
|
2010-10-22 10:06:33 -07:00
|
|
|
{struct{}{}, "", nil, 0},
|
2011-10-17 18:48:45 -04:00
|
|
|
{struct{}{}, "Foo", nil, 0},
|
|
|
|
|
{S0{A: 'a'}, "A", []int{0}, 'a'},
|
|
|
|
|
{S0{}, "D", nil, 0},
|
|
|
|
|
{S1{S0: S0{A: 'a'}}, "A", []int{1, 0}, 'a'},
|
|
|
|
|
{S1{B: 'b'}, "B", []int{0}, 'b'},
|
2010-10-22 10:06:33 -07:00
|
|
|
{S1{}, "S0", []int{1}, 0},
|
2011-10-17 18:48:45 -04:00
|
|
|
{S1{S0: S0{C: 'c'}}, "C", []int{1, 2}, 'c'},
|
|
|
|
|
{S2{A: 'a'}, "A", []int{0}, 'a'},
|
2010-10-22 10:06:33 -07:00
|
|
|
{S2{}, "S1", []int{1}, 0},
|
2011-10-17 18:48:45 -04:00
|
|
|
{S2{S1: &S1{B: 'b'}}, "B", []int{1, 0}, 'b'},
|
|
|
|
|
{S2{S1: &S1{S0: S0{C: 'c'}}}, "C", []int{1, 1, 2}, 'c'},
|
|
|
|
|
{S2{}, "D", nil, 0},
|
2010-10-22 10:06:33 -07:00
|
|
|
{S3{}, "S1", nil, 0},
|
2011-10-17 18:48:45 -04:00
|
|
|
{S3{S2: S2{A: 'a'}}, "A", []int{1, 0}, 'a'},
|
|
|
|
|
{S3{}, "B", nil, 0},
|
|
|
|
|
{S3{D: 'd'}, "D", []int{2}, 0},
|
|
|
|
|
{S3{E: 'e'}, "E", []int{3}, 'e'},
|
|
|
|
|
{S4{A: 'a'}, "A", []int{1}, 'a'},
|
|
|
|
|
{S4{}, "B", nil, 0},
|
2012-09-05 09:35:53 -04:00
|
|
|
{S5{}, "X", nil, 0},
|
|
|
|
|
{S5{}, "Y", []int{2, 0, 1}, 0},
|
|
|
|
|
{S10{}, "X", nil, 0},
|
|
|
|
|
{S10{}, "Y", []int{2, 0, 0, 1}, 0},
|
2012-11-13 10:45:30 -08:00
|
|
|
{S14{}, "X", nil, 0},
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestFieldByIndex(t *testing.T) {
|
|
|
|
|
for _, test := range fieldTests {
|
2011-04-25 13:39:16 -04:00
|
|
|
s := TypeOf(test.s)
|
2009-12-15 15:40:16 -08:00
|
|
|
f := s.FieldByIndex(test.index)
|
2009-08-05 15:56:44 -07:00
|
|
|
if f.Name != "" {
|
|
|
|
|
if test.index != nil {
|
|
|
|
|
if f.Name != test.name {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s found; want %s", s.Name(), f.Name, test.name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s found", s.Name(), f.Name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else if len(test.index) > 0 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s not found", s.Name(), test.name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if test.value != 0 {
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(test.s).FieldByIndex(test.index)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if v.IsValid() {
|
2009-08-05 15:56:44 -07:00
|
|
|
if x, ok := v.Interface().(int); ok {
|
|
|
|
|
if x != test.value {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s%v is %d; want %d", s.Name(), test.index, x, test.value)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s%v value not an int", s.Name(), test.index)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s%v value not found", s.Name(), test.index)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestFieldByName(t *testing.T) {
|
|
|
|
|
for _, test := range fieldTests {
|
2011-04-25 13:39:16 -04:00
|
|
|
s := TypeOf(test.s)
|
2009-12-15 15:40:16 -08:00
|
|
|
f, found := s.FieldByName(test.name)
|
2009-08-05 15:56:44 -07:00
|
|
|
if found {
|
|
|
|
|
if test.index != nil {
|
|
|
|
|
// Verify field depth and index.
|
|
|
|
|
if len(f.Index) != len(test.index) {
|
2012-09-05 09:35:53 -04:00
|
|
|
t.Errorf("%s.%s depth %d; want %d: %v vs %v", s.Name(), test.name, len(f.Index), len(test.index), f.Index, test.index)
|
2009-08-05 15:56:44 -07:00
|
|
|
} else {
|
|
|
|
|
for i, x := range f.Index {
|
|
|
|
|
if x != test.index[i] {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s.Index[%d] is %d; want %d", s.Name(), test.name, i, x, test.index[i])
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s found", s.Name(), f.Name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else if len(test.index) > 0 {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s not found", s.Name(), test.name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
2009-08-12 13:18:37 -07:00
|
|
|
|
2009-08-05 15:56:44 -07:00
|
|
|
if test.value != 0 {
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(test.s).FieldByName(test.name)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
if v.IsValid() {
|
2009-08-05 15:56:44 -07:00
|
|
|
if x, ok := v.Interface().(int); ok {
|
|
|
|
|
if x != test.value {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s is %d; want %d", s.Name(), test.name, x, test.value)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s value not an int", s.Name(), test.name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2009-11-09 12:07:39 -08:00
|
|
|
t.Errorf("%s.%s value not found", s.Name(), test.name)
|
2009-08-05 15:56:44 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-01-24 23:33:59 -08:00
|
|
|
|
|
|
|
|
func TestImportPath(t *testing.T) {
|
2012-01-20 09:26:17 +11:00
|
|
|
tests := []struct {
|
|
|
|
|
t Type
|
|
|
|
|
path string
|
|
|
|
|
}{
|
|
|
|
|
{TypeOf(&base64.Encoding{}).Elem(), "encoding/base64"},
|
2012-09-01 19:55:55 -04:00
|
|
|
{TypeOf(int(0)), ""},
|
|
|
|
|
{TypeOf(int8(0)), ""},
|
|
|
|
|
{TypeOf(int16(0)), ""},
|
|
|
|
|
{TypeOf(int32(0)), ""},
|
|
|
|
|
{TypeOf(int64(0)), ""},
|
2012-01-20 09:26:17 +11:00
|
|
|
{TypeOf(uint(0)), ""},
|
2012-09-01 19:55:55 -04:00
|
|
|
{TypeOf(uint8(0)), ""},
|
|
|
|
|
{TypeOf(uint16(0)), ""},
|
|
|
|
|
{TypeOf(uint32(0)), ""},
|
|
|
|
|
{TypeOf(uint64(0)), ""},
|
|
|
|
|
{TypeOf(uintptr(0)), ""},
|
|
|
|
|
{TypeOf(float32(0)), ""},
|
|
|
|
|
{TypeOf(float64(0)), ""},
|
|
|
|
|
{TypeOf(complex64(0)), ""},
|
|
|
|
|
{TypeOf(complex128(0)), ""},
|
|
|
|
|
{TypeOf(byte(0)), ""},
|
|
|
|
|
{TypeOf(rune(0)), ""},
|
|
|
|
|
{TypeOf([]byte(nil)), ""},
|
|
|
|
|
{TypeOf([]rune(nil)), ""},
|
|
|
|
|
{TypeOf(string("")), ""},
|
|
|
|
|
{TypeOf((*interface{})(nil)).Elem(), ""},
|
|
|
|
|
{TypeOf((*byte)(nil)), ""},
|
|
|
|
|
{TypeOf((*rune)(nil)), ""},
|
|
|
|
|
{TypeOf((*int64)(nil)), ""},
|
2012-01-20 09:26:17 +11:00
|
|
|
{TypeOf(map[string]int{}), ""},
|
|
|
|
|
{TypeOf((*error)(nil)).Elem(), ""},
|
2016-07-11 22:34:30 -07:00
|
|
|
{TypeOf((*Point)(nil)), ""},
|
|
|
|
|
{TypeOf((*Point)(nil)).Elem(), "reflect_test"},
|
2012-01-20 09:26:17 +11:00
|
|
|
}
|
|
|
|
|
for _, test := range tests {
|
|
|
|
|
if path := test.t.PkgPath(); path != test.path {
|
|
|
|
|
t.Errorf("%v.PkgPath() = %q, want %q", test.t, path, test.path)
|
|
|
|
|
}
|
2010-01-24 23:33:59 -08:00
|
|
|
}
|
|
|
|
|
}
|
2010-06-14 11:23:11 -07:00
|
|
|
|
2016-03-30 11:15:01 -04:00
|
|
|
func TestFieldPkgPath(t *testing.T) {
|
2017-08-30 14:17:24 -07:00
|
|
|
type x int
|
2016-03-30 11:15:01 -04:00
|
|
|
typ := TypeOf(struct {
|
|
|
|
|
Exported string
|
|
|
|
|
unexported string
|
|
|
|
|
OtherPkgFields
|
2017-08-30 14:17:24 -07:00
|
|
|
int // issue 21702
|
|
|
|
|
*x // issue 21122
|
2016-03-30 11:15:01 -04:00
|
|
|
}{})
|
2016-11-16 16:13:22 -08:00
|
|
|
|
|
|
|
|
type pkgpathTest struct {
|
2018-05-10 15:17:58 -07:00
|
|
|
index []int
|
|
|
|
|
pkgPath string
|
|
|
|
|
embedded bool
|
2016-11-16 16:13:22 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
checkPkgPath := func(name string, s []pkgpathTest) {
|
|
|
|
|
for _, test := range s {
|
|
|
|
|
f := typ.FieldByIndex(test.index)
|
|
|
|
|
if got, want := f.PkgPath, test.pkgPath; got != want {
|
|
|
|
|
t.Errorf("%s: Field(%d).PkgPath = %q, want %q", name, test.index, got, want)
|
|
|
|
|
}
|
2018-05-10 15:17:58 -07:00
|
|
|
if got, want := f.Anonymous, test.embedded; got != want {
|
2016-11-16 16:13:22 -08:00
|
|
|
t.Errorf("%s: Field(%d).Anonymous = %v, want %v", name, test.index, got, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
checkPkgPath("testStruct", []pkgpathTest{
|
2016-03-30 11:15:01 -04:00
|
|
|
{[]int{0}, "", false}, // Exported
|
|
|
|
|
{[]int{1}, "reflect_test", false}, // unexported
|
|
|
|
|
{[]int{2}, "", true}, // OtherPkgFields
|
|
|
|
|
{[]int{2, 0}, "", false}, // OtherExported
|
|
|
|
|
{[]int{2, 1}, "reflect", false}, // otherUnexported
|
2017-08-30 14:17:24 -07:00
|
|
|
{[]int{3}, "reflect_test", true}, // int
|
|
|
|
|
{[]int{4}, "reflect_test", true}, // *x
|
2016-11-16 16:13:22 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
type localOtherPkgFields OtherPkgFields
|
|
|
|
|
typ = TypeOf(localOtherPkgFields{})
|
|
|
|
|
checkPkgPath("localOtherPkgFields", []pkgpathTest{
|
|
|
|
|
{[]int{0}, "", false}, // OtherExported
|
|
|
|
|
{[]int{1}, "reflect", false}, // otherUnexported
|
|
|
|
|
})
|
2016-03-30 11:15:01 -04:00
|
|
|
}
|
|
|
|
|
|
2011-07-27 13:44:57 +10:00
|
|
|
func TestVariadicType(t *testing.T) {
|
2011-07-27 13:29:44 +10:00
|
|
|
// Test example from Type documentation.
|
2011-01-19 23:09:00 -05:00
|
|
|
var f func(x int, y ...float64)
|
2011-04-25 13:39:16 -04:00
|
|
|
typ := TypeOf(f)
|
|
|
|
|
if typ.NumIn() == 2 && typ.In(0) == TypeOf(int(0)) {
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
sl := typ.In(1)
|
|
|
|
|
if sl.Kind() == Slice {
|
2011-04-25 13:39:16 -04:00
|
|
|
if sl.Elem() == TypeOf(0.0) {
|
2010-06-14 11:23:11 -07:00
|
|
|
// ok
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Failed
|
2011-01-19 23:09:00 -05:00
|
|
|
t.Errorf("want NumIn() = 2, In(0) = int, In(1) = []float64")
|
2010-06-14 11:23:11 -07:00
|
|
|
s := fmt.Sprintf("have NumIn() = %d", typ.NumIn())
|
|
|
|
|
for i := 0; i < typ.NumIn(); i++ {
|
|
|
|
|
s += fmt.Sprintf(", In(%d) = %s", i, typ.In(i))
|
|
|
|
|
}
|
|
|
|
|
t.Error(s)
|
|
|
|
|
}
|
2010-09-27 14:09:10 -04:00
|
|
|
|
2010-09-28 13:43:50 -04:00
|
|
|
type inner struct {
|
|
|
|
|
x int
|
|
|
|
|
}
|
2010-09-27 14:09:10 -04:00
|
|
|
|
|
|
|
|
type outer struct {
|
2010-09-28 13:43:50 -04:00
|
|
|
y int
|
2010-09-27 14:09:10 -04:00
|
|
|
inner
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-19 13:31:58 -04:00
|
|
|
func (*inner) M() {}
|
|
|
|
|
func (*outer) M() {}
|
2010-09-27 14:09:10 -04:00
|
|
|
|
|
|
|
|
func TestNestedMethods(t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
typ := TypeOf((*outer)(nil))
|
2016-05-19 13:31:58 -04:00
|
|
|
if typ.NumMethod() != 1 || typ.Method(0).Func.Pointer() != ValueOf((*outer).M).Pointer() {
|
|
|
|
|
t.Errorf("Wrong method table for outer: (M=%p)", (*outer).M)
|
2010-09-27 14:09:10 -04:00
|
|
|
for i := 0; i < typ.NumMethod(); i++ {
|
|
|
|
|
m := typ.Method(i)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
t.Errorf("\t%d: %s %#x\n", i, m.Name, m.Func.Pointer())
|
2010-09-27 14:09:10 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-09-28 13:43:50 -04:00
|
|
|
|
2016-03-22 21:25:40 -04:00
|
|
|
type unexp struct{}
|
|
|
|
|
|
|
|
|
|
func (*unexp) f() (int32, int8) { return 7, 7 }
|
|
|
|
|
func (*unexp) g() (int64, int8) { return 8, 8 }
|
|
|
|
|
|
2016-03-25 11:03:47 -04:00
|
|
|
type unexpI interface {
|
|
|
|
|
f() (int32, int8)
|
|
|
|
|
}
|
2016-03-22 21:25:40 -04:00
|
|
|
|
2016-03-25 11:03:47 -04:00
|
|
|
var unexpi unexpI = new(unexp)
|
|
|
|
|
|
|
|
|
|
func TestUnexportedMethods(t *testing.T) {
|
|
|
|
|
typ := TypeOf(unexpi)
|
2016-03-22 21:25:40 -04:00
|
|
|
|
2016-05-24 19:04:51 -04:00
|
|
|
if got := typ.NumMethod(); got != 0 {
|
|
|
|
|
t.Errorf("NumMethod=%d, want 0 satisfied methods", got)
|
2016-03-22 21:25:40 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-21 08:14:50 -04:00
|
|
|
type InnerInt struct {
|
|
|
|
|
X int
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
|
2011-04-21 08:14:50 -04:00
|
|
|
type OuterInt struct {
|
|
|
|
|
Y int
|
|
|
|
|
InnerInt
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
|
2011-04-21 08:14:50 -04:00
|
|
|
func (i *InnerInt) M() int {
|
|
|
|
|
return i.X
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestEmbeddedMethods(t *testing.T) {
|
2011-04-25 13:39:16 -04:00
|
|
|
typ := TypeOf((*OuterInt)(nil))
|
|
|
|
|
if typ.NumMethod() != 1 || typ.Method(0).Func.Pointer() != ValueOf((*OuterInt).M).Pointer() {
|
2011-04-21 08:14:50 -04:00
|
|
|
t.Errorf("Wrong method table for OuterInt: (m=%p)", (*OuterInt).M)
|
2010-09-28 13:43:50 -04:00
|
|
|
for i := 0; i < typ.NumMethod(); i++ {
|
|
|
|
|
m := typ.Method(i)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
t.Errorf("\t%d: %s %#x\n", i, m.Name, m.Func.Pointer())
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-21 08:14:50 -04:00
|
|
|
i := &InnerInt{3}
|
2011-04-25 13:39:16 -04:00
|
|
|
if v := ValueOf(i).Method(0).Call(nil)[0].Int(); v != 3 {
|
2011-04-21 08:14:50 -04:00
|
|
|
t.Errorf("i.M() = %d, want 3", v)
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
|
2011-04-21 08:14:50 -04:00
|
|
|
o := &OuterInt{1, InnerInt{2}}
|
2011-04-25 13:39:16 -04:00
|
|
|
if v := ValueOf(o).Method(0).Call(nil)[0].Int(); v != 2 {
|
2011-04-21 08:14:50 -04:00
|
|
|
t.Errorf("i.M() = %d, want 2", v)
|
2010-09-28 13:43:50 -04:00
|
|
|
}
|
|
|
|
|
|
2011-04-21 08:14:50 -04:00
|
|
|
f := (*OuterInt).M
|
2010-09-28 13:43:50 -04:00
|
|
|
if v := f(o); v != 2 {
|
|
|
|
|
t.Errorf("f(o) = %d, want 2", v)
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-03-03 13:20:17 -05:00
|
|
|
|
2016-03-11 17:49:07 -05:00
|
|
|
type FuncDDD func(...interface{}) error
|
|
|
|
|
|
|
|
|
|
func (f FuncDDD) M() {}
|
|
|
|
|
|
|
|
|
|
func TestNumMethodOnDDD(t *testing.T) {
|
|
|
|
|
rv := ValueOf((FuncDDD)(nil))
|
|
|
|
|
if n := rv.NumMethod(); n != 1 {
|
|
|
|
|
t.Fatalf("NumMethod()=%d, want 1", n)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-03 13:20:17 -05:00
|
|
|
func TestPtrTo(t *testing.T) {
|
2017-02-10 13:08:13 +13:00
|
|
|
// This block of code means that the ptrToThis field of the
|
|
|
|
|
// reflect data for *unsafe.Pointer is non zero, see
|
|
|
|
|
// https://golang.org/issue/19003
|
|
|
|
|
var x unsafe.Pointer
|
|
|
|
|
var y = &x
|
|
|
|
|
var z = &y
|
|
|
|
|
|
2011-03-03 13:20:17 -05:00
|
|
|
var i int
|
|
|
|
|
|
2017-02-10 13:08:13 +13:00
|
|
|
typ := TypeOf(z)
|
2011-03-03 13:20:17 -05:00
|
|
|
for i = 0; i < 100; i++ {
|
|
|
|
|
typ = PtrTo(typ)
|
|
|
|
|
}
|
|
|
|
|
for i = 0; i < 100; i++ {
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
typ = typ.Elem()
|
2011-03-03 13:20:17 -05:00
|
|
|
}
|
2017-02-10 13:08:13 +13:00
|
|
|
if typ != TypeOf(z) {
|
|
|
|
|
t.Errorf("after 100 PtrTo and Elem, have %s, want %s", typ, TypeOf(z))
|
2011-03-03 13:20:17 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-26 11:50:29 -07:00
|
|
|
func TestPtrToGC(t *testing.T) {
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
pt := PtrTo(tt)
|
|
|
|
|
const n = 100
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := New(pt)
|
|
|
|
|
p := new(*uintptr)
|
|
|
|
|
*p = new(uintptr)
|
|
|
|
|
**p = uintptr(i)
|
|
|
|
|
v.Elem().Set(ValueOf(p).Convert(pt))
|
|
|
|
|
x = append(x, v.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
k := ValueOf(xi).Elem().Elem().Elem().Interface().(uintptr)
|
|
|
|
|
if k != uintptr(i) {
|
|
|
|
|
t.Errorf("lost x[%d] = %d, want %d", i, k, i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-10 16:33:21 -05:00
|
|
|
func BenchmarkPtrTo(b *testing.B) {
|
|
|
|
|
// Construct a type with a zero ptrToThis.
|
|
|
|
|
type T struct{ int }
|
|
|
|
|
t := SliceOf(TypeOf(T{}))
|
|
|
|
|
ptrToThis := ValueOf(t).Elem().FieldByName("ptrToThis")
|
|
|
|
|
if !ptrToThis.IsValid() {
|
|
|
|
|
b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t)
|
|
|
|
|
}
|
|
|
|
|
if ptrToThis.Int() != 0 {
|
|
|
|
|
b.Fatalf("%v.ptrToThis unexpectedly nonzero", t)
|
|
|
|
|
}
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
|
|
// Now benchmark calling PtrTo on it: we'll have to hit the ptrMap cache on
|
|
|
|
|
// every call.
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
PtrTo(t)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-03 13:20:17 -05:00
|
|
|
func TestAddr(t *testing.T) {
|
|
|
|
|
var p struct {
|
|
|
|
|
X, Y int
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(&p)
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v = v.Elem()
|
2011-03-03 13:20:17 -05:00
|
|
|
v = v.Addr()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v = v.Elem()
|
|
|
|
|
v = v.Field(0)
|
|
|
|
|
v.SetInt(2)
|
2011-03-03 13:20:17 -05:00
|
|
|
if p.X != 2 {
|
|
|
|
|
t.Errorf("Addr.Elem.Set failed to set value")
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-25 13:39:16 -04:00
|
|
|
// Again but take address of the ValueOf value.
|
2011-03-03 13:20:17 -05:00
|
|
|
// Exercises generation of PtrTypes not present in the binary.
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
q := &p
|
2011-04-25 13:39:16 -04:00
|
|
|
v = ValueOf(&q).Elem()
|
2011-03-03 13:20:17 -05:00
|
|
|
v = v.Addr()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v = v.Elem()
|
|
|
|
|
v = v.Elem()
|
2011-03-03 13:20:17 -05:00
|
|
|
v = v.Addr()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v = v.Elem()
|
|
|
|
|
v = v.Field(0)
|
|
|
|
|
v.SetInt(3)
|
2011-03-03 13:20:17 -05:00
|
|
|
if p.X != 3 {
|
|
|
|
|
t.Errorf("Addr.Elem.Set failed to set value")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Starting without pointer we should get changed value
|
|
|
|
|
// in interface.
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
qq := p
|
2011-04-25 13:39:16 -04:00
|
|
|
v = ValueOf(&qq).Elem()
|
2011-03-03 13:20:17 -05:00
|
|
|
v0 := v
|
|
|
|
|
v = v.Addr()
|
reflect: new Type and Value definitions
Type is now an interface that implements all the possible type methods.
Instead of a type switch on a reflect.Type t, switch on t.Kind().
If a method is invoked on the wrong kind of type (for example,
calling t.Field(0) when t.Kind() != Struct), the call panics.
There is one method renaming: t.(*ChanType).Dir() is now t.ChanDir().
Value is now a struct value that implements all the possible value methods.
Instead of a type switch on a reflect.Value v, switch on v.Kind().
If a method is invoked on the wrong kind of value (for example,
calling t.Recv() when t.Kind() != Chan), the call panics.
Since Value is now a struct, not an interface, its zero value
cannot be compared to nil. Instead of v != nil, use v.IsValid().
Instead of other uses of nil as a Value, use Value{}, the zero value.
Many methods have been renamed, most due to signature conflicts:
OLD NEW
v.(*ArrayValue).Elem v.Index
v.(*BoolValue).Get v.Bool
v.(*BoolValue).Set v.SetBool
v.(*ChanType).Dir v.ChanDir
v.(*ChanValue).Get v.Pointer
v.(*ComplexValue).Get v.Complex
v.(*ComplexValue).Overflow v.OverflowComplex
v.(*ComplexValue).Set v.SetComplex
v.(*FloatValue).Get v.Float
v.(*FloatValue).Overflow v.OverflowFloat
v.(*FloatValue).Set v.SetFloat
v.(*FuncValue).Get v.Pointer
v.(*InterfaceValue).Get v.InterfaceData
v.(*IntValue).Get v.Int
v.(*IntValue).Overflow v.OverflowInt
v.(*IntValue).Set v.SetInt
v.(*MapValue).Elem v.MapIndex
v.(*MapValue).Get v.Pointer
v.(*MapValue).Keys v.MapKeys
v.(*MapValue).SetElem v.SetMapIndex
v.(*PtrValue).Get v.Pointer
v.(*SliceValue).Elem v.Index
v.(*SliceValue).Get v.Pointer
v.(*StringValue).Get v.String
v.(*StringValue).Set v.SetString
v.(*UintValue).Get v.Uint
v.(*UintValue).Overflow v.OverflowUint
v.(*UintValue).Set v.SetUint
v.(*UnsafePointerValue).Get v.Pointer
v.(*UnsafePointerValue).Set v.SetPointer
Part of the motivation for this change is to enable a more
efficient implementation of Value, one that does not allocate
memory during most operations. To reduce the size of the CL,
this CL's implementation is a wrapper around the old API.
Later CLs will make the implementation more efficient without
changing the API.
Other CLs to be submitted at the same time as this one
add support for this change to gofix (4343047) and update
the Go source tree (4353043).
R=gri, iant, niemeyer, r, rog, gustavo, r2
CC=golang-dev
https://golang.org/cl/4281055
2011-04-08 12:26:51 -04:00
|
|
|
v = v.Elem()
|
|
|
|
|
v = v.Field(0)
|
|
|
|
|
v.SetInt(4)
|
2011-03-03 13:20:17 -05:00
|
|
|
if p.X != 3 { // should be unchanged from last time
|
|
|
|
|
t.Errorf("somehow value Set changed original p")
|
|
|
|
|
}
|
|
|
|
|
p = v0.Interface().(struct {
|
|
|
|
|
X, Y int
|
|
|
|
|
})
|
|
|
|
|
if p.X != 4 {
|
|
|
|
|
t.Errorf("Addr.Elem.Set valued to set value in top value")
|
|
|
|
|
}
|
2012-02-03 17:36:25 -08:00
|
|
|
|
|
|
|
|
// Verify that taking the address of a type gives us a pointer
|
|
|
|
|
// which we can convert back using the usual interface
|
|
|
|
|
// notation.
|
|
|
|
|
var s struct {
|
|
|
|
|
B *bool
|
|
|
|
|
}
|
|
|
|
|
ps := ValueOf(&s).Elem().Field(0).Addr().Interface()
|
|
|
|
|
*(ps.(**bool)) = new(bool)
|
|
|
|
|
if s.B == nil {
|
|
|
|
|
t.Errorf("Addr.Interface direct assignment failed")
|
|
|
|
|
}
|
2011-03-03 13:20:17 -05:00
|
|
|
}
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
|
|
|
|
|
func noAlloc(t *testing.T, n int, f func(int)) {
|
2013-08-21 14:00:45 +10:00
|
|
|
if testing.Short() {
|
|
|
|
|
t.Skip("skipping malloc count in short mode")
|
|
|
|
|
}
|
2013-03-06 15:52:32 -08:00
|
|
|
if runtime.GOMAXPROCS(0) > 1 {
|
|
|
|
|
t.Skip("skipping; GOMAXPROCS>1")
|
|
|
|
|
}
|
2013-02-02 22:52:29 -05:00
|
|
|
i := -1
|
|
|
|
|
allocs := testing.AllocsPerRun(n, func() {
|
|
|
|
|
f(i)
|
|
|
|
|
i++
|
|
|
|
|
})
|
|
|
|
|
if allocs > 0 {
|
|
|
|
|
t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs)
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestAllocations(t *testing.T) {
|
|
|
|
|
noAlloc(t, 100, func(j int) {
|
|
|
|
|
var i interface{}
|
|
|
|
|
var v Value
|
2014-10-20 11:10:03 -07:00
|
|
|
|
|
|
|
|
// We can uncomment this when compiler escape analysis
|
|
|
|
|
// is good enough to see that the integer assigned to i
|
|
|
|
|
// does not escape and therefore need not be allocated.
|
|
|
|
|
//
|
|
|
|
|
// i = 42 + j
|
|
|
|
|
// v = ValueOf(i)
|
|
|
|
|
// if int(v.Int()) != 42+j {
|
|
|
|
|
// panic("wrong int")
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
i = func(j int) int { return j }
|
2011-04-25 13:39:16 -04:00
|
|
|
v = ValueOf(i)
|
2014-10-20 11:10:03 -07:00
|
|
|
if v.Interface().(func(int) int)(j) != j {
|
|
|
|
|
panic("wrong result")
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestSmallNegativeInt(t *testing.T) {
|
|
|
|
|
i := int16(-1)
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(i)
|
reflect: more efficient; cannot Set result of NewValue anymore
* Reduces malloc counts during gob encoder/decoder test from 6/6 to 3/5.
The current reflect uses Set to mean two subtly different things.
(1) If you have a reflect.Value v, it might just represent
itself (as in v = reflect.NewValue(42)), in which case calling
v.Set only changed v, not any other data in the program.
(2) If you have a reflect Value v derived from a pointer
or a slice (as in x := []int{42}; v = reflect.NewValue(x).Index(0)),
v represents the value held there. Changing x[0] affects the
value returned by v.Int(), and calling v.Set affects x[0].
This was not really by design; it just happened that way.
The motivation for the new reflect implementation was
to remove mallocs. The use case (1) has an implicit malloc
inside it. If you can do:
v := reflect.NewValue(0)
v.Set(42)
i := v.Int() // i = 42
then that implies that v is referring to some underlying
chunk of memory in order to remember the 42; that is,
NewValue must have allocated some memory.
Almost all the time you are using reflect the goal is to
inspect or to change other data, not to manipulate data
stored solely inside a reflect.Value.
This CL removes use case (1), so that an assignable
reflect.Value must always refer to some other piece of data
in the program. Put another way, removing this case would
make
v := reflect.NewValue(0)
v.Set(42)
as illegal as
0 = 42.
It would also make this illegal:
x := 0
v := reflect.NewValue(x)
v.Set(42)
for the same reason. (Note that right now, v.Set(42) "succeeds"
but does not change the value of x.)
If you really wanted to make v refer to x, you'd start with &x
and dereference it:
x := 0
v := reflect.NewValue(&x).Elem() // v = *&x
v.Set(42)
It's pretty rare, except in tests, to want to use NewValue and then
call Set to change the Value itself instead of some other piece of
data in the program. I haven't seen it happen once yet while
making the tree build with this change.
For the same reasons, reflect.Zero (formerly reflect.MakeZero)
would also return an unassignable, unaddressable value.
This invalidates the (awkward) idiom:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.PointTo(v)
which, when the API changed, turned into:
pv := ... some Ptr Value we have ...
v := reflect.Zero(pv.Type().Elem())
pv.Set(v.Addr())
In both, it is far from clear what the code is trying to do. Now that
it is possible, this CL adds reflect.New(Type) Value that does the
obvious thing (same as Go's new), so this code would be replaced by:
pv := ... some Ptr Value we have ...
pv.Set(reflect.New(pv.Type().Elem()))
The changes just described can be confusing to think about,
but I believe it is because the old API was confusing - it was
conflating two different kinds of Values - and that the new API
by itself is pretty simple: you can only Set (or call Addr on)
a Value if it actually addresses some real piece of data; that is,
only if it is the result of dereferencing a Ptr or indexing a Slice.
If you really want the old behavior, you'd get it by translating:
v := reflect.NewValue(x)
into
v := reflect.New(reflect.Typeof(x)).Elem()
v.Set(reflect.NewValue(x))
Gofix will not be able to help with this, because whether
and how to change the code depends on whether the original
code meant use (1) or use (2), so the developer has to read
and think about the code.
You can see the effect on packages in the tree in
https://golang.org/cl/4423043/.
R=r
CC=golang-dev
https://golang.org/cl/4435042
2011-04-18 14:35:33 -04:00
|
|
|
if v.Int() != -1 {
|
|
|
|
|
t.Errorf("int16(-1).Int() returned %v", v.Int())
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-04-18 20:00:42 -04:00
|
|
|
|
2012-10-21 17:02:10 -04:00
|
|
|
func TestIndex(t *testing.T) {
|
|
|
|
|
xs := []byte{1, 2, 3, 4, 5, 6, 7, 8}
|
|
|
|
|
v := ValueOf(xs).Index(3).Interface().(byte)
|
|
|
|
|
if v != xs[3] {
|
|
|
|
|
t.Errorf("xs.Index(3) = %v; expected %v", v, xs[3])
|
|
|
|
|
}
|
|
|
|
|
xa := [8]byte{10, 20, 30, 40, 50, 60, 70, 80}
|
|
|
|
|
v = ValueOf(xa).Index(2).Interface().(byte)
|
|
|
|
|
if v != xa[2] {
|
|
|
|
|
t.Errorf("xa.Index(2) = %v; expected %v", v, xa[2])
|
|
|
|
|
}
|
|
|
|
|
s := "0123456789"
|
|
|
|
|
v = ValueOf(s).Index(3).Interface().(byte)
|
|
|
|
|
if v != s[3] {
|
|
|
|
|
t.Errorf("s.Index(3) = %v; expected %v", v, s[3])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-18 20:00:42 -04:00
|
|
|
func TestSlice(t *testing.T) {
|
|
|
|
|
xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
|
2011-04-25 13:39:16 -04:00
|
|
|
v := ValueOf(xs).Slice(3, 5).Interface().([]int)
|
2011-12-12 19:45:40 -02:00
|
|
|
if len(v) != 2 {
|
|
|
|
|
t.Errorf("len(xs.Slice(3, 5)) = %d", len(v))
|
|
|
|
|
}
|
|
|
|
|
if cap(v) != 5 {
|
|
|
|
|
t.Errorf("cap(xs.Slice(3, 5)) = %d", cap(v))
|
|
|
|
|
}
|
|
|
|
|
if !DeepEqual(v[0:5], xs[3:]) {
|
|
|
|
|
t.Errorf("xs.Slice(3, 5)[0:5] = %v", v[0:5])
|
2011-04-18 20:00:42 -04:00
|
|
|
}
|
2011-12-12 19:45:40 -02:00
|
|
|
xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
|
2011-04-25 13:39:16 -04:00
|
|
|
v = ValueOf(&xa).Elem().Slice(2, 5).Interface().([]int)
|
2011-12-12 19:45:40 -02:00
|
|
|
if len(v) != 3 {
|
|
|
|
|
t.Errorf("len(xa.Slice(2, 5)) = %d", len(v))
|
|
|
|
|
}
|
|
|
|
|
if cap(v) != 6 {
|
|
|
|
|
t.Errorf("cap(xa.Slice(2, 5)) = %d", cap(v))
|
|
|
|
|
}
|
|
|
|
|
if !DeepEqual(v[0:6], xa[2:]) {
|
|
|
|
|
t.Errorf("xs.Slice(2, 5)[0:6] = %v", v[0:6])
|
2011-04-18 20:00:42 -04:00
|
|
|
}
|
2012-10-21 17:02:10 -04:00
|
|
|
s := "0123456789"
|
|
|
|
|
vs := ValueOf(s).Slice(3, 5).Interface().(string)
|
|
|
|
|
if vs != s[3:5] {
|
|
|
|
|
t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5])
|
|
|
|
|
}
|
2014-08-25 14:38:19 -04:00
|
|
|
|
|
|
|
|
rv := ValueOf(&xs).Elem()
|
|
|
|
|
rv = rv.Slice(3, 4)
|
|
|
|
|
ptr2 := rv.Pointer()
|
|
|
|
|
rv = rv.Slice(5, 5)
|
|
|
|
|
ptr3 := rv.Pointer()
|
|
|
|
|
if ptr3 != ptr2 {
|
|
|
|
|
t.Errorf("xs.Slice(3,4).Slice3(5,5).Pointer() = %#x, want %#x", ptr3, ptr2)
|
|
|
|
|
}
|
2011-04-18 20:00:42 -04:00
|
|
|
}
|
2011-04-20 16:24:45 -04:00
|
|
|
|
2013-07-01 20:32:53 -04:00
|
|
|
func TestSlice3(t *testing.T) {
|
|
|
|
|
xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
|
|
|
|
|
v := ValueOf(xs).Slice3(3, 5, 7).Interface().([]int)
|
|
|
|
|
if len(v) != 2 {
|
|
|
|
|
t.Errorf("len(xs.Slice3(3, 5, 7)) = %d", len(v))
|
|
|
|
|
}
|
|
|
|
|
if cap(v) != 4 {
|
|
|
|
|
t.Errorf("cap(xs.Slice3(3, 5, 7)) = %d", cap(v))
|
|
|
|
|
}
|
|
|
|
|
if !DeepEqual(v[0:4], xs[3:7:7]) {
|
|
|
|
|
t.Errorf("xs.Slice3(3, 5, 7)[0:4] = %v", v[0:4])
|
|
|
|
|
}
|
|
|
|
|
rv := ValueOf(&xs).Elem()
|
|
|
|
|
shouldPanic(func() { rv.Slice3(1, 2, 1) })
|
|
|
|
|
shouldPanic(func() { rv.Slice3(1, 1, 11) })
|
|
|
|
|
shouldPanic(func() { rv.Slice3(2, 2, 1) })
|
|
|
|
|
|
|
|
|
|
xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
|
|
|
|
|
v = ValueOf(&xa).Elem().Slice3(2, 5, 6).Interface().([]int)
|
|
|
|
|
if len(v) != 3 {
|
|
|
|
|
t.Errorf("len(xa.Slice(2, 5, 6)) = %d", len(v))
|
|
|
|
|
}
|
|
|
|
|
if cap(v) != 4 {
|
|
|
|
|
t.Errorf("cap(xa.Slice(2, 5, 6)) = %d", cap(v))
|
|
|
|
|
}
|
|
|
|
|
if !DeepEqual(v[0:4], xa[2:6:6]) {
|
|
|
|
|
t.Errorf("xs.Slice(2, 5, 6)[0:4] = %v", v[0:4])
|
|
|
|
|
}
|
|
|
|
|
rv = ValueOf(&xa).Elem()
|
|
|
|
|
shouldPanic(func() { rv.Slice3(1, 2, 1) })
|
|
|
|
|
shouldPanic(func() { rv.Slice3(1, 1, 11) })
|
|
|
|
|
shouldPanic(func() { rv.Slice3(2, 2, 1) })
|
|
|
|
|
|
|
|
|
|
s := "hello world"
|
|
|
|
|
rv = ValueOf(&s).Elem()
|
|
|
|
|
shouldPanic(func() { rv.Slice3(1, 2, 3) })
|
2014-08-25 14:38:19 -04:00
|
|
|
|
|
|
|
|
rv = ValueOf(&xs).Elem()
|
|
|
|
|
rv = rv.Slice3(3, 5, 7)
|
|
|
|
|
ptr2 := rv.Pointer()
|
|
|
|
|
rv = rv.Slice3(4, 4, 4)
|
|
|
|
|
ptr3 := rv.Pointer()
|
|
|
|
|
if ptr3 != ptr2 {
|
|
|
|
|
t.Errorf("xs.Slice3(3,5,7).Slice3(4,4,4).Pointer() = %#x, want %#x", ptr3, ptr2)
|
|
|
|
|
}
|
2013-07-01 20:32:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestSetLenCap(t *testing.T) {
|
|
|
|
|
xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
|
|
|
|
|
xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
|
|
|
|
|
|
|
|
|
|
vs := ValueOf(&xs).Elem()
|
|
|
|
|
shouldPanic(func() { vs.SetLen(10) })
|
|
|
|
|
shouldPanic(func() { vs.SetCap(10) })
|
|
|
|
|
shouldPanic(func() { vs.SetLen(-1) })
|
|
|
|
|
shouldPanic(func() { vs.SetCap(-1) })
|
|
|
|
|
shouldPanic(func() { vs.SetCap(6) }) // smaller than len
|
|
|
|
|
vs.SetLen(5)
|
|
|
|
|
if len(xs) != 5 || cap(xs) != 8 {
|
|
|
|
|
t.Errorf("after SetLen(5), len, cap = %d, %d, want 5, 8", len(xs), cap(xs))
|
|
|
|
|
}
|
|
|
|
|
vs.SetCap(6)
|
|
|
|
|
if len(xs) != 5 || cap(xs) != 6 {
|
|
|
|
|
t.Errorf("after SetCap(6), len, cap = %d, %d, want 5, 6", len(xs), cap(xs))
|
|
|
|
|
}
|
|
|
|
|
vs.SetCap(5)
|
|
|
|
|
if len(xs) != 5 || cap(xs) != 5 {
|
|
|
|
|
t.Errorf("after SetCap(5), len, cap = %d, %d, want 5, 5", len(xs), cap(xs))
|
|
|
|
|
}
|
|
|
|
|
shouldPanic(func() { vs.SetCap(4) }) // smaller than len
|
|
|
|
|
shouldPanic(func() { vs.SetLen(6) }) // bigger than cap
|
|
|
|
|
|
|
|
|
|
va := ValueOf(&xa).Elem()
|
|
|
|
|
shouldPanic(func() { va.SetLen(8) })
|
|
|
|
|
shouldPanic(func() { va.SetCap(8) })
|
|
|
|
|
}
|
|
|
|
|
|
2011-04-20 16:24:45 -04:00
|
|
|
func TestVariadic(t *testing.T) {
|
|
|
|
|
var b bytes.Buffer
|
2011-04-25 13:39:16 -04:00
|
|
|
V := ValueOf
|
2011-04-20 16:24:45 -04:00
|
|
|
|
|
|
|
|
b.Reset()
|
|
|
|
|
V(fmt.Fprintf).Call([]Value{V(&b), V("%s, %d world"), V("hello"), V(42)})
|
|
|
|
|
if b.String() != "hello, 42 world" {
|
|
|
|
|
t.Errorf("after Fprintf Call: %q != %q", b.String(), "hello 42 world")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b.Reset()
|
|
|
|
|
V(fmt.Fprintf).CallSlice([]Value{V(&b), V("%s, %d world"), V([]interface{}{"hello", 42})})
|
|
|
|
|
if b.String() != "hello, 42 world" {
|
|
|
|
|
t.Errorf("after Fprintf CallSlice: %q != %q", b.String(), "hello 42 world")
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-06-29 09:52:34 -04:00
|
|
|
|
2013-10-03 13:23:02 -07:00
|
|
|
func TestFuncArg(t *testing.T) {
|
|
|
|
|
f1 := func(i int, f func(int) int) int { return f(i) }
|
|
|
|
|
f2 := func(i int) int { return i + 1 }
|
|
|
|
|
r := ValueOf(f1).Call([]Value{ValueOf(100), ValueOf(f2)})
|
|
|
|
|
if r[0].Int() != 101 {
|
|
|
|
|
t.Errorf("function returned %d, want 101", r[0].Int())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-16 13:58:32 -07:00
|
|
|
func TestStructArg(t *testing.T) {
|
|
|
|
|
type padded struct {
|
|
|
|
|
B string
|
|
|
|
|
C int32
|
|
|
|
|
}
|
|
|
|
|
var (
|
|
|
|
|
gotA padded
|
|
|
|
|
gotB uint32
|
|
|
|
|
wantA = padded{"3", 4}
|
|
|
|
|
wantB = uint32(5)
|
|
|
|
|
)
|
|
|
|
|
f := func(a padded, b uint32) {
|
|
|
|
|
gotA, gotB = a, b
|
|
|
|
|
}
|
|
|
|
|
ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)})
|
|
|
|
|
if gotA != wantA || gotB != wantB {
|
|
|
|
|
t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-06-29 09:52:34 -04:00
|
|
|
var tagGetTests = []struct {
|
|
|
|
|
Tag StructTag
|
|
|
|
|
Key string
|
|
|
|
|
Value string
|
|
|
|
|
}{
|
|
|
|
|
{`protobuf:"PB(1,2)"`, `protobuf`, `PB(1,2)`},
|
|
|
|
|
{`protobuf:"PB(1,2)"`, `foo`, ``},
|
|
|
|
|
{`protobuf:"PB(1,2)"`, `rotobuf`, ``},
|
|
|
|
|
{`protobuf:"PB(1,2)" json:"name"`, `json`, `name`},
|
|
|
|
|
{`protobuf:"PB(1,2)" json:"name"`, `protobuf`, `PB(1,2)`},
|
2015-02-05 15:23:52 +11:00
|
|
|
{`k0:"values contain spaces" k1:"and\ttabs"`, "k0", "values contain spaces"},
|
|
|
|
|
{`k0:"values contain spaces" k1:"and\ttabs"`, "k1", "and\ttabs"},
|
2011-06-29 09:52:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestTagGet(t *testing.T) {
|
|
|
|
|
for _, tt := range tagGetTests {
|
|
|
|
|
if v := tt.Tag.Get(tt.Key); v != tt.Value {
|
|
|
|
|
t.Errorf("StructTag(%#q).Get(%#q) = %#q, want %#q", tt.Tag, tt.Key, v, tt.Value)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-08-23 22:50:08 -04:00
|
|
|
|
|
|
|
|
func TestBytes(t *testing.T) {
|
|
|
|
|
type B []byte
|
|
|
|
|
x := B{1, 2, 3, 4}
|
|
|
|
|
y := ValueOf(x).Bytes()
|
|
|
|
|
if !bytes.Equal(x, y) {
|
|
|
|
|
t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
|
|
|
|
|
}
|
|
|
|
|
if &x[0] != &y[0] {
|
|
|
|
|
t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestSetBytes(t *testing.T) {
|
|
|
|
|
type B []byte
|
|
|
|
|
var x B
|
|
|
|
|
y := []byte{1, 2, 3, 4}
|
|
|
|
|
ValueOf(&x).Elem().SetBytes(y)
|
|
|
|
|
if !bytes.Equal(x, y) {
|
|
|
|
|
t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
|
|
|
|
|
}
|
|
|
|
|
if &x[0] != &y[0] {
|
|
|
|
|
t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-10-17 18:48:45 -04:00
|
|
|
|
|
|
|
|
type Private struct {
|
|
|
|
|
x int
|
|
|
|
|
y **int
|
2015-08-31 22:24:07 +02:00
|
|
|
Z int
|
2011-10-17 18:48:45 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *Private) m() {
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:24:07 +02:00
|
|
|
type private struct {
|
|
|
|
|
Z int
|
|
|
|
|
z int
|
|
|
|
|
S string
|
|
|
|
|
A [1]Private
|
|
|
|
|
T []Private
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *private) P() {
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-17 18:48:45 -04:00
|
|
|
type Public struct {
|
|
|
|
|
X int
|
|
|
|
|
Y **int
|
2015-08-31 22:24:07 +02:00
|
|
|
private
|
2011-10-17 18:48:45 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *Public) M() {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestUnexported(t *testing.T) {
|
|
|
|
|
var pub Public
|
2015-08-31 22:24:07 +02:00
|
|
|
pub.S = "S"
|
|
|
|
|
pub.T = pub.A[:]
|
2011-10-17 18:48:45 -04:00
|
|
|
v := ValueOf(&pub)
|
|
|
|
|
isValid(v.Elem().Field(0))
|
|
|
|
|
isValid(v.Elem().Field(1))
|
2015-08-31 22:24:07 +02:00
|
|
|
isValid(v.Elem().Field(2))
|
2011-10-17 18:48:45 -04:00
|
|
|
isValid(v.Elem().FieldByName("X"))
|
|
|
|
|
isValid(v.Elem().FieldByName("Y"))
|
2015-08-31 22:24:07 +02:00
|
|
|
isValid(v.Elem().FieldByName("Z"))
|
2011-10-17 18:48:45 -04:00
|
|
|
isValid(v.Type().Method(0).Func)
|
2015-08-31 22:24:07 +02:00
|
|
|
m, _ := v.Type().MethodByName("M")
|
|
|
|
|
isValid(m.Func)
|
|
|
|
|
m, _ = v.Type().MethodByName("P")
|
|
|
|
|
isValid(m.Func)
|
2011-10-17 18:48:45 -04:00
|
|
|
isNonNil(v.Elem().Field(0).Interface())
|
|
|
|
|
isNonNil(v.Elem().Field(1).Interface())
|
2015-08-31 22:24:07 +02:00
|
|
|
isNonNil(v.Elem().Field(2).Field(2).Index(0))
|
2011-10-17 18:48:45 -04:00
|
|
|
isNonNil(v.Elem().FieldByName("X").Interface())
|
|
|
|
|
isNonNil(v.Elem().FieldByName("Y").Interface())
|
2015-08-31 22:24:07 +02:00
|
|
|
isNonNil(v.Elem().FieldByName("Z").Interface())
|
|
|
|
|
isNonNil(v.Elem().FieldByName("S").Index(0).Interface())
|
2011-10-17 18:48:45 -04:00
|
|
|
isNonNil(v.Type().Method(0).Func.Interface())
|
2015-08-31 22:24:07 +02:00
|
|
|
m, _ = v.Type().MethodByName("P")
|
|
|
|
|
isNonNil(m.Func.Interface())
|
2011-10-17 18:48:45 -04:00
|
|
|
|
|
|
|
|
var priv Private
|
|
|
|
|
v = ValueOf(&priv)
|
|
|
|
|
isValid(v.Elem().Field(0))
|
|
|
|
|
isValid(v.Elem().Field(1))
|
|
|
|
|
isValid(v.Elem().FieldByName("x"))
|
|
|
|
|
isValid(v.Elem().FieldByName("y"))
|
|
|
|
|
shouldPanic(func() { v.Elem().Field(0).Interface() })
|
|
|
|
|
shouldPanic(func() { v.Elem().Field(1).Interface() })
|
|
|
|
|
shouldPanic(func() { v.Elem().FieldByName("x").Interface() })
|
|
|
|
|
shouldPanic(func() { v.Elem().FieldByName("y").Interface() })
|
2016-05-24 19:04:51 -04:00
|
|
|
shouldPanic(func() { v.Type().Method(0) })
|
2011-10-17 18:48:45 -04:00
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:24:07 +02:00
|
|
|
func TestSetPanic(t *testing.T) {
|
|
|
|
|
ok := func(f func()) { f() }
|
|
|
|
|
bad := shouldPanic
|
|
|
|
|
clear := func(v Value) { v.Set(Zero(v.Type())) }
|
|
|
|
|
|
|
|
|
|
type t0 struct {
|
|
|
|
|
W int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type t1 struct {
|
|
|
|
|
Y int
|
|
|
|
|
t0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type T2 struct {
|
|
|
|
|
Z int
|
|
|
|
|
namedT0 t0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type T struct {
|
|
|
|
|
X int
|
|
|
|
|
t1
|
|
|
|
|
T2
|
|
|
|
|
NamedT1 t1
|
|
|
|
|
NamedT2 T2
|
|
|
|
|
namedT1 t1
|
|
|
|
|
namedT2 T2
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// not addressable
|
|
|
|
|
v := ValueOf(T{})
|
|
|
|
|
bad(func() { clear(v.Field(0)) }) // .X
|
|
|
|
|
bad(func() { clear(v.Field(1)) }) // .t1
|
|
|
|
|
bad(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
|
|
|
|
|
bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
|
|
|
|
|
bad(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
|
|
|
|
|
bad(func() { clear(v.Field(2)) }) // .T2
|
|
|
|
|
bad(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
|
|
|
|
|
bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
|
|
|
|
|
bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
|
|
|
|
|
bad(func() { clear(v.Field(3)) }) // .NamedT1
|
|
|
|
|
bad(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
|
|
|
|
|
bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
|
|
|
|
|
bad(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
|
|
|
|
|
bad(func() { clear(v.Field(4)) }) // .NamedT2
|
|
|
|
|
bad(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
|
|
|
|
|
bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
|
|
|
|
|
bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
|
|
|
|
|
bad(func() { clear(v.Field(5)) }) // .namedT1
|
|
|
|
|
bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
|
|
|
|
|
bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
|
|
|
|
|
bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
|
|
|
|
|
bad(func() { clear(v.Field(6)) }) // .namedT2
|
|
|
|
|
bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
|
|
|
|
|
bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
|
|
|
|
|
bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
|
|
|
|
|
|
|
|
|
|
// addressable
|
|
|
|
|
v = ValueOf(&T{}).Elem()
|
|
|
|
|
ok(func() { clear(v.Field(0)) }) // .X
|
|
|
|
|
bad(func() { clear(v.Field(1)) }) // .t1
|
|
|
|
|
ok(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
|
|
|
|
|
bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
|
|
|
|
|
ok(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
|
|
|
|
|
ok(func() { clear(v.Field(2)) }) // .T2
|
|
|
|
|
ok(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
|
|
|
|
|
bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
|
|
|
|
|
bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
|
|
|
|
|
ok(func() { clear(v.Field(3)) }) // .NamedT1
|
|
|
|
|
ok(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
|
|
|
|
|
bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
|
|
|
|
|
ok(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
|
|
|
|
|
ok(func() { clear(v.Field(4)) }) // .NamedT2
|
|
|
|
|
ok(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
|
|
|
|
|
bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
|
|
|
|
|
bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
|
|
|
|
|
bad(func() { clear(v.Field(5)) }) // .namedT1
|
|
|
|
|
bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
|
|
|
|
|
bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
|
|
|
|
|
bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
|
|
|
|
|
bad(func() { clear(v.Field(6)) }) // .namedT2
|
|
|
|
|
bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
|
|
|
|
|
bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
|
|
|
|
|
bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type timp int
|
|
|
|
|
|
|
|
|
|
func (t timp) W() {}
|
|
|
|
|
func (t timp) Y() {}
|
|
|
|
|
func (t timp) w() {}
|
|
|
|
|
func (t timp) y() {}
|
|
|
|
|
|
|
|
|
|
func TestCallPanic(t *testing.T) {
|
|
|
|
|
type t0 interface {
|
|
|
|
|
W()
|
|
|
|
|
w()
|
|
|
|
|
}
|
|
|
|
|
type T1 interface {
|
|
|
|
|
Y()
|
|
|
|
|
y()
|
|
|
|
|
}
|
|
|
|
|
type T2 struct {
|
|
|
|
|
T1
|
|
|
|
|
t0
|
|
|
|
|
}
|
|
|
|
|
type T struct {
|
|
|
|
|
t0 // 0
|
|
|
|
|
T1 // 1
|
|
|
|
|
|
|
|
|
|
NamedT0 t0 // 2
|
|
|
|
|
NamedT1 T1 // 3
|
|
|
|
|
NamedT2 T2 // 4
|
|
|
|
|
|
|
|
|
|
namedT0 t0 // 5
|
|
|
|
|
namedT1 T1 // 6
|
|
|
|
|
namedT2 T2 // 7
|
|
|
|
|
}
|
|
|
|
|
ok := func(f func()) { f() }
|
|
|
|
|
bad := shouldPanic
|
|
|
|
|
call := func(v Value) { v.Call(nil) }
|
|
|
|
|
|
|
|
|
|
i := timp(0)
|
|
|
|
|
v := ValueOf(T{i, i, i, i, T2{i, i}, i, i, T2{i, i}})
|
|
|
|
|
ok(func() { call(v.Field(0).Method(0)) }) // .t0.W
|
2017-09-26 14:55:41 -07:00
|
|
|
bad(func() { call(v.Field(0).Elem().Method(0)) }) // .t0.W
|
2015-08-31 22:24:07 +02:00
|
|
|
bad(func() { call(v.Field(0).Method(1)) }) // .t0.w
|
|
|
|
|
bad(func() { call(v.Field(0).Elem().Method(2)) }) // .t0.w
|
|
|
|
|
ok(func() { call(v.Field(1).Method(0)) }) // .T1.Y
|
|
|
|
|
ok(func() { call(v.Field(1).Elem().Method(0)) }) // .T1.Y
|
|
|
|
|
bad(func() { call(v.Field(1).Method(1)) }) // .T1.y
|
|
|
|
|
bad(func() { call(v.Field(1).Elem().Method(2)) }) // .T1.y
|
|
|
|
|
|
|
|
|
|
ok(func() { call(v.Field(2).Method(0)) }) // .NamedT0.W
|
|
|
|
|
ok(func() { call(v.Field(2).Elem().Method(0)) }) // .NamedT0.W
|
|
|
|
|
bad(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w
|
|
|
|
|
bad(func() { call(v.Field(2).Elem().Method(2)) }) // .NamedT0.w
|
|
|
|
|
|
|
|
|
|
ok(func() { call(v.Field(3).Method(0)) }) // .NamedT1.Y
|
|
|
|
|
ok(func() { call(v.Field(3).Elem().Method(0)) }) // .NamedT1.Y
|
|
|
|
|
bad(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y
|
|
|
|
|
bad(func() { call(v.Field(3).Elem().Method(3)) }) // .NamedT1.y
|
|
|
|
|
|
2017-09-26 14:55:41 -07:00
|
|
|
ok(func() { call(v.Field(4).Field(0).Method(0)) }) // .NamedT2.T1.Y
|
|
|
|
|
ok(func() { call(v.Field(4).Field(0).Elem().Method(0)) }) // .NamedT2.T1.W
|
|
|
|
|
ok(func() { call(v.Field(4).Field(1).Method(0)) }) // .NamedT2.t0.W
|
|
|
|
|
bad(func() { call(v.Field(4).Field(1).Elem().Method(0)) }) // .NamedT2.t0.W
|
2015-08-31 22:24:07 +02:00
|
|
|
|
|
|
|
|
bad(func() { call(v.Field(5).Method(0)) }) // .namedT0.W
|
|
|
|
|
bad(func() { call(v.Field(5).Elem().Method(0)) }) // .namedT0.W
|
|
|
|
|
bad(func() { call(v.Field(5).Method(1)) }) // .namedT0.w
|
|
|
|
|
bad(func() { call(v.Field(5).Elem().Method(2)) }) // .namedT0.w
|
|
|
|
|
|
|
|
|
|
bad(func() { call(v.Field(6).Method(0)) }) // .namedT1.Y
|
|
|
|
|
bad(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.Y
|
|
|
|
|
bad(func() { call(v.Field(6).Method(0)) }) // .namedT1.y
|
|
|
|
|
bad(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.y
|
|
|
|
|
|
|
|
|
|
bad(func() { call(v.Field(7).Field(0).Method(0)) }) // .namedT2.T1.Y
|
|
|
|
|
bad(func() { call(v.Field(7).Field(0).Elem().Method(0)) }) // .namedT2.T1.W
|
|
|
|
|
bad(func() { call(v.Field(7).Field(1).Method(0)) }) // .namedT2.t0.W
|
|
|
|
|
bad(func() { call(v.Field(7).Field(1).Elem().Method(0)) }) // .namedT2.t0.W
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-17 18:48:45 -04:00
|
|
|
func shouldPanic(f func()) {
|
|
|
|
|
defer func() {
|
|
|
|
|
if recover() == nil {
|
|
|
|
|
panic("did not panic")
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
f()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func isNonNil(x interface{}) {
|
|
|
|
|
if x == nil {
|
|
|
|
|
panic("nil interface")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func isValid(v Value) {
|
|
|
|
|
if !v.IsValid() {
|
|
|
|
|
panic("zero Value")
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-03-01 11:48:27 -05:00
|
|
|
|
|
|
|
|
func TestAlias(t *testing.T) {
|
|
|
|
|
x := string("hello")
|
|
|
|
|
v := ValueOf(&x).Elem()
|
|
|
|
|
oldvalue := v.Interface()
|
|
|
|
|
v.SetString("world")
|
|
|
|
|
newvalue := v.Interface()
|
|
|
|
|
|
|
|
|
|
if oldvalue != "hello" || newvalue != "world" {
|
|
|
|
|
t.Errorf("aliasing: old=%q new=%q, want hello, world", oldvalue, newvalue)
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-09-05 09:35:53 -04:00
|
|
|
|
2012-09-22 08:52:27 -04:00
|
|
|
var V = ValueOf
|
|
|
|
|
|
|
|
|
|
func EmptyInterfaceV(x interface{}) Value {
|
|
|
|
|
return ValueOf(&x).Elem()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func ReaderV(x io.Reader) Value {
|
|
|
|
|
return ValueOf(&x).Elem()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func ReadWriterV(x io.ReadWriter) Value {
|
|
|
|
|
return ValueOf(&x).Elem()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type Empty struct{}
|
2016-10-03 12:13:22 -07:00
|
|
|
type MyStruct struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"tag"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}
|
2012-09-22 08:52:27 -04:00
|
|
|
type MyString string
|
|
|
|
|
type MyBytes []byte
|
|
|
|
|
type MyRunes []int32
|
|
|
|
|
type MyFunc func()
|
|
|
|
|
type MyByte byte
|
|
|
|
|
|
|
|
|
|
var convertTests = []struct {
|
|
|
|
|
in Value
|
|
|
|
|
out Value
|
|
|
|
|
}{
|
|
|
|
|
// numbers
|
|
|
|
|
/*
|
|
|
|
|
Edit .+1,/\*\//-1>cat >/tmp/x.go && go run /tmp/x.go
|
|
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
|
|
import "fmt"
|
|
|
|
|
|
|
|
|
|
var numbers = []string{
|
|
|
|
|
"int8", "uint8", "int16", "uint16",
|
|
|
|
|
"int32", "uint32", "int64", "uint64",
|
|
|
|
|
"int", "uint", "uintptr",
|
|
|
|
|
"float32", "float64",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
|
// all pairs but in an unusual order,
|
|
|
|
|
// to emit all the int8, uint8 cases
|
|
|
|
|
// before n grows too big.
|
|
|
|
|
n := 1
|
|
|
|
|
for i, f := range numbers {
|
|
|
|
|
for _, g := range numbers[i:] {
|
|
|
|
|
fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", f, n, g, n)
|
|
|
|
|
n++
|
|
|
|
|
if f != g {
|
|
|
|
|
fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", g, n, f, n)
|
|
|
|
|
n++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
*/
|
|
|
|
|
{V(int8(1)), V(int8(1))},
|
|
|
|
|
{V(int8(2)), V(uint8(2))},
|
|
|
|
|
{V(uint8(3)), V(int8(3))},
|
|
|
|
|
{V(int8(4)), V(int16(4))},
|
|
|
|
|
{V(int16(5)), V(int8(5))},
|
|
|
|
|
{V(int8(6)), V(uint16(6))},
|
|
|
|
|
{V(uint16(7)), V(int8(7))},
|
|
|
|
|
{V(int8(8)), V(int32(8))},
|
|
|
|
|
{V(int32(9)), V(int8(9))},
|
|
|
|
|
{V(int8(10)), V(uint32(10))},
|
|
|
|
|
{V(uint32(11)), V(int8(11))},
|
|
|
|
|
{V(int8(12)), V(int64(12))},
|
|
|
|
|
{V(int64(13)), V(int8(13))},
|
|
|
|
|
{V(int8(14)), V(uint64(14))},
|
|
|
|
|
{V(uint64(15)), V(int8(15))},
|
|
|
|
|
{V(int8(16)), V(int(16))},
|
|
|
|
|
{V(int(17)), V(int8(17))},
|
|
|
|
|
{V(int8(18)), V(uint(18))},
|
|
|
|
|
{V(uint(19)), V(int8(19))},
|
|
|
|
|
{V(int8(20)), V(uintptr(20))},
|
|
|
|
|
{V(uintptr(21)), V(int8(21))},
|
|
|
|
|
{V(int8(22)), V(float32(22))},
|
|
|
|
|
{V(float32(23)), V(int8(23))},
|
|
|
|
|
{V(int8(24)), V(float64(24))},
|
|
|
|
|
{V(float64(25)), V(int8(25))},
|
|
|
|
|
{V(uint8(26)), V(uint8(26))},
|
|
|
|
|
{V(uint8(27)), V(int16(27))},
|
|
|
|
|
{V(int16(28)), V(uint8(28))},
|
|
|
|
|
{V(uint8(29)), V(uint16(29))},
|
|
|
|
|
{V(uint16(30)), V(uint8(30))},
|
|
|
|
|
{V(uint8(31)), V(int32(31))},
|
|
|
|
|
{V(int32(32)), V(uint8(32))},
|
|
|
|
|
{V(uint8(33)), V(uint32(33))},
|
|
|
|
|
{V(uint32(34)), V(uint8(34))},
|
|
|
|
|
{V(uint8(35)), V(int64(35))},
|
|
|
|
|
{V(int64(36)), V(uint8(36))},
|
|
|
|
|
{V(uint8(37)), V(uint64(37))},
|
|
|
|
|
{V(uint64(38)), V(uint8(38))},
|
|
|
|
|
{V(uint8(39)), V(int(39))},
|
|
|
|
|
{V(int(40)), V(uint8(40))},
|
|
|
|
|
{V(uint8(41)), V(uint(41))},
|
|
|
|
|
{V(uint(42)), V(uint8(42))},
|
|
|
|
|
{V(uint8(43)), V(uintptr(43))},
|
|
|
|
|
{V(uintptr(44)), V(uint8(44))},
|
|
|
|
|
{V(uint8(45)), V(float32(45))},
|
|
|
|
|
{V(float32(46)), V(uint8(46))},
|
|
|
|
|
{V(uint8(47)), V(float64(47))},
|
|
|
|
|
{V(float64(48)), V(uint8(48))},
|
|
|
|
|
{V(int16(49)), V(int16(49))},
|
|
|
|
|
{V(int16(50)), V(uint16(50))},
|
|
|
|
|
{V(uint16(51)), V(int16(51))},
|
|
|
|
|
{V(int16(52)), V(int32(52))},
|
|
|
|
|
{V(int32(53)), V(int16(53))},
|
|
|
|
|
{V(int16(54)), V(uint32(54))},
|
|
|
|
|
{V(uint32(55)), V(int16(55))},
|
|
|
|
|
{V(int16(56)), V(int64(56))},
|
|
|
|
|
{V(int64(57)), V(int16(57))},
|
|
|
|
|
{V(int16(58)), V(uint64(58))},
|
|
|
|
|
{V(uint64(59)), V(int16(59))},
|
|
|
|
|
{V(int16(60)), V(int(60))},
|
|
|
|
|
{V(int(61)), V(int16(61))},
|
|
|
|
|
{V(int16(62)), V(uint(62))},
|
|
|
|
|
{V(uint(63)), V(int16(63))},
|
|
|
|
|
{V(int16(64)), V(uintptr(64))},
|
|
|
|
|
{V(uintptr(65)), V(int16(65))},
|
|
|
|
|
{V(int16(66)), V(float32(66))},
|
|
|
|
|
{V(float32(67)), V(int16(67))},
|
|
|
|
|
{V(int16(68)), V(float64(68))},
|
|
|
|
|
{V(float64(69)), V(int16(69))},
|
|
|
|
|
{V(uint16(70)), V(uint16(70))},
|
|
|
|
|
{V(uint16(71)), V(int32(71))},
|
|
|
|
|
{V(int32(72)), V(uint16(72))},
|
|
|
|
|
{V(uint16(73)), V(uint32(73))},
|
|
|
|
|
{V(uint32(74)), V(uint16(74))},
|
|
|
|
|
{V(uint16(75)), V(int64(75))},
|
|
|
|
|
{V(int64(76)), V(uint16(76))},
|
|
|
|
|
{V(uint16(77)), V(uint64(77))},
|
|
|
|
|
{V(uint64(78)), V(uint16(78))},
|
|
|
|
|
{V(uint16(79)), V(int(79))},
|
|
|
|
|
{V(int(80)), V(uint16(80))},
|
|
|
|
|
{V(uint16(81)), V(uint(81))},
|
|
|
|
|
{V(uint(82)), V(uint16(82))},
|
|
|
|
|
{V(uint16(83)), V(uintptr(83))},
|
|
|
|
|
{V(uintptr(84)), V(uint16(84))},
|
|
|
|
|
{V(uint16(85)), V(float32(85))},
|
|
|
|
|
{V(float32(86)), V(uint16(86))},
|
|
|
|
|
{V(uint16(87)), V(float64(87))},
|
|
|
|
|
{V(float64(88)), V(uint16(88))},
|
|
|
|
|
{V(int32(89)), V(int32(89))},
|
|
|
|
|
{V(int32(90)), V(uint32(90))},
|
|
|
|
|
{V(uint32(91)), V(int32(91))},
|
|
|
|
|
{V(int32(92)), V(int64(92))},
|
|
|
|
|
{V(int64(93)), V(int32(93))},
|
|
|
|
|
{V(int32(94)), V(uint64(94))},
|
|
|
|
|
{V(uint64(95)), V(int32(95))},
|
|
|
|
|
{V(int32(96)), V(int(96))},
|
|
|
|
|
{V(int(97)), V(int32(97))},
|
|
|
|
|
{V(int32(98)), V(uint(98))},
|
|
|
|
|
{V(uint(99)), V(int32(99))},
|
|
|
|
|
{V(int32(100)), V(uintptr(100))},
|
|
|
|
|
{V(uintptr(101)), V(int32(101))},
|
|
|
|
|
{V(int32(102)), V(float32(102))},
|
|
|
|
|
{V(float32(103)), V(int32(103))},
|
|
|
|
|
{V(int32(104)), V(float64(104))},
|
|
|
|
|
{V(float64(105)), V(int32(105))},
|
|
|
|
|
{V(uint32(106)), V(uint32(106))},
|
|
|
|
|
{V(uint32(107)), V(int64(107))},
|
|
|
|
|
{V(int64(108)), V(uint32(108))},
|
|
|
|
|
{V(uint32(109)), V(uint64(109))},
|
|
|
|
|
{V(uint64(110)), V(uint32(110))},
|
|
|
|
|
{V(uint32(111)), V(int(111))},
|
|
|
|
|
{V(int(112)), V(uint32(112))},
|
|
|
|
|
{V(uint32(113)), V(uint(113))},
|
|
|
|
|
{V(uint(114)), V(uint32(114))},
|
|
|
|
|
{V(uint32(115)), V(uintptr(115))},
|
|
|
|
|
{V(uintptr(116)), V(uint32(116))},
|
|
|
|
|
{V(uint32(117)), V(float32(117))},
|
|
|
|
|
{V(float32(118)), V(uint32(118))},
|
|
|
|
|
{V(uint32(119)), V(float64(119))},
|
|
|
|
|
{V(float64(120)), V(uint32(120))},
|
|
|
|
|
{V(int64(121)), V(int64(121))},
|
|
|
|
|
{V(int64(122)), V(uint64(122))},
|
|
|
|
|
{V(uint64(123)), V(int64(123))},
|
|
|
|
|
{V(int64(124)), V(int(124))},
|
|
|
|
|
{V(int(125)), V(int64(125))},
|
|
|
|
|
{V(int64(126)), V(uint(126))},
|
|
|
|
|
{V(uint(127)), V(int64(127))},
|
|
|
|
|
{V(int64(128)), V(uintptr(128))},
|
|
|
|
|
{V(uintptr(129)), V(int64(129))},
|
|
|
|
|
{V(int64(130)), V(float32(130))},
|
|
|
|
|
{V(float32(131)), V(int64(131))},
|
|
|
|
|
{V(int64(132)), V(float64(132))},
|
|
|
|
|
{V(float64(133)), V(int64(133))},
|
|
|
|
|
{V(uint64(134)), V(uint64(134))},
|
|
|
|
|
{V(uint64(135)), V(int(135))},
|
|
|
|
|
{V(int(136)), V(uint64(136))},
|
|
|
|
|
{V(uint64(137)), V(uint(137))},
|
|
|
|
|
{V(uint(138)), V(uint64(138))},
|
|
|
|
|
{V(uint64(139)), V(uintptr(139))},
|
|
|
|
|
{V(uintptr(140)), V(uint64(140))},
|
|
|
|
|
{V(uint64(141)), V(float32(141))},
|
|
|
|
|
{V(float32(142)), V(uint64(142))},
|
|
|
|
|
{V(uint64(143)), V(float64(143))},
|
|
|
|
|
{V(float64(144)), V(uint64(144))},
|
|
|
|
|
{V(int(145)), V(int(145))},
|
|
|
|
|
{V(int(146)), V(uint(146))},
|
|
|
|
|
{V(uint(147)), V(int(147))},
|
|
|
|
|
{V(int(148)), V(uintptr(148))},
|
|
|
|
|
{V(uintptr(149)), V(int(149))},
|
|
|
|
|
{V(int(150)), V(float32(150))},
|
|
|
|
|
{V(float32(151)), V(int(151))},
|
|
|
|
|
{V(int(152)), V(float64(152))},
|
|
|
|
|
{V(float64(153)), V(int(153))},
|
|
|
|
|
{V(uint(154)), V(uint(154))},
|
|
|
|
|
{V(uint(155)), V(uintptr(155))},
|
|
|
|
|
{V(uintptr(156)), V(uint(156))},
|
|
|
|
|
{V(uint(157)), V(float32(157))},
|
|
|
|
|
{V(float32(158)), V(uint(158))},
|
|
|
|
|
{V(uint(159)), V(float64(159))},
|
|
|
|
|
{V(float64(160)), V(uint(160))},
|
|
|
|
|
{V(uintptr(161)), V(uintptr(161))},
|
|
|
|
|
{V(uintptr(162)), V(float32(162))},
|
|
|
|
|
{V(float32(163)), V(uintptr(163))},
|
|
|
|
|
{V(uintptr(164)), V(float64(164))},
|
|
|
|
|
{V(float64(165)), V(uintptr(165))},
|
|
|
|
|
{V(float32(166)), V(float32(166))},
|
|
|
|
|
{V(float32(167)), V(float64(167))},
|
|
|
|
|
{V(float64(168)), V(float32(168))},
|
|
|
|
|
{V(float64(169)), V(float64(169))},
|
|
|
|
|
|
|
|
|
|
// truncation
|
|
|
|
|
{V(float64(1.5)), V(int(1))},
|
|
|
|
|
|
|
|
|
|
// complex
|
|
|
|
|
{V(complex64(1i)), V(complex64(1i))},
|
|
|
|
|
{V(complex64(2i)), V(complex128(2i))},
|
|
|
|
|
{V(complex128(3i)), V(complex64(3i))},
|
|
|
|
|
{V(complex128(4i)), V(complex128(4i))},
|
|
|
|
|
|
|
|
|
|
// string
|
|
|
|
|
{V(string("hello")), V(string("hello"))},
|
|
|
|
|
{V(string("bytes1")), V([]byte("bytes1"))},
|
|
|
|
|
{V([]byte("bytes2")), V(string("bytes2"))},
|
|
|
|
|
{V([]byte("bytes3")), V([]byte("bytes3"))},
|
|
|
|
|
{V(string("runes♝")), V([]rune("runes♝"))},
|
|
|
|
|
{V([]rune("runes♕")), V(string("runes♕"))},
|
|
|
|
|
{V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
|
|
|
|
|
{V(int('a')), V(string("a"))},
|
|
|
|
|
{V(int8('a')), V(string("a"))},
|
|
|
|
|
{V(int16('a')), V(string("a"))},
|
|
|
|
|
{V(int32('a')), V(string("a"))},
|
|
|
|
|
{V(int64('a')), V(string("a"))},
|
|
|
|
|
{V(uint('a')), V(string("a"))},
|
|
|
|
|
{V(uint8('a')), V(string("a"))},
|
|
|
|
|
{V(uint16('a')), V(string("a"))},
|
|
|
|
|
{V(uint32('a')), V(string("a"))},
|
|
|
|
|
{V(uint64('a')), V(string("a"))},
|
|
|
|
|
{V(uintptr('a')), V(string("a"))},
|
|
|
|
|
{V(int(-1)), V(string("\uFFFD"))},
|
|
|
|
|
{V(int8(-2)), V(string("\uFFFD"))},
|
|
|
|
|
{V(int16(-3)), V(string("\uFFFD"))},
|
|
|
|
|
{V(int32(-4)), V(string("\uFFFD"))},
|
|
|
|
|
{V(int64(-5)), V(string("\uFFFD"))},
|
|
|
|
|
{V(uint(0x110001)), V(string("\uFFFD"))},
|
|
|
|
|
{V(uint32(0x110002)), V(string("\uFFFD"))},
|
|
|
|
|
{V(uint64(0x110003)), V(string("\uFFFD"))},
|
|
|
|
|
{V(uintptr(0x110004)), V(string("\uFFFD"))},
|
|
|
|
|
|
|
|
|
|
// named string
|
|
|
|
|
{V(MyString("hello")), V(string("hello"))},
|
|
|
|
|
{V(string("hello")), V(MyString("hello"))},
|
|
|
|
|
{V(string("hello")), V(string("hello"))},
|
|
|
|
|
{V(MyString("hello")), V(MyString("hello"))},
|
|
|
|
|
{V(MyString("bytes1")), V([]byte("bytes1"))},
|
|
|
|
|
{V([]byte("bytes2")), V(MyString("bytes2"))},
|
|
|
|
|
{V([]byte("bytes3")), V([]byte("bytes3"))},
|
|
|
|
|
{V(MyString("runes♝")), V([]rune("runes♝"))},
|
|
|
|
|
{V([]rune("runes♕")), V(MyString("runes♕"))},
|
|
|
|
|
{V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
|
|
|
|
|
{V([]rune("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
|
|
|
|
|
{V(MyRunes("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
|
|
|
|
|
{V(int('a')), V(MyString("a"))},
|
|
|
|
|
{V(int8('a')), V(MyString("a"))},
|
|
|
|
|
{V(int16('a')), V(MyString("a"))},
|
|
|
|
|
{V(int32('a')), V(MyString("a"))},
|
|
|
|
|
{V(int64('a')), V(MyString("a"))},
|
|
|
|
|
{V(uint('a')), V(MyString("a"))},
|
|
|
|
|
{V(uint8('a')), V(MyString("a"))},
|
|
|
|
|
{V(uint16('a')), V(MyString("a"))},
|
|
|
|
|
{V(uint32('a')), V(MyString("a"))},
|
|
|
|
|
{V(uint64('a')), V(MyString("a"))},
|
|
|
|
|
{V(uintptr('a')), V(MyString("a"))},
|
|
|
|
|
{V(int(-1)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(int8(-2)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(int16(-3)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(int32(-4)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(int64(-5)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(uint(0x110001)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(uint32(0x110002)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(uint64(0x110003)), V(MyString("\uFFFD"))},
|
|
|
|
|
{V(uintptr(0x110004)), V(MyString("\uFFFD"))},
|
|
|
|
|
|
|
|
|
|
// named []byte
|
|
|
|
|
{V(string("bytes1")), V(MyBytes("bytes1"))},
|
|
|
|
|
{V(MyBytes("bytes2")), V(string("bytes2"))},
|
|
|
|
|
{V(MyBytes("bytes3")), V(MyBytes("bytes3"))},
|
|
|
|
|
{V(MyString("bytes1")), V(MyBytes("bytes1"))},
|
|
|
|
|
{V(MyBytes("bytes2")), V(MyString("bytes2"))},
|
|
|
|
|
|
|
|
|
|
// named []rune
|
|
|
|
|
{V(string("runes♝")), V(MyRunes("runes♝"))},
|
|
|
|
|
{V(MyRunes("runes♕")), V(string("runes♕"))},
|
|
|
|
|
{V(MyRunes("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
|
|
|
|
|
{V(MyString("runes♝")), V(MyRunes("runes♝"))},
|
|
|
|
|
{V(MyRunes("runes♕")), V(MyString("runes♕"))},
|
|
|
|
|
|
|
|
|
|
// named types and equal underlying types
|
|
|
|
|
{V(new(int)), V(new(integer))},
|
|
|
|
|
{V(new(integer)), V(new(int))},
|
|
|
|
|
{V(Empty{}), V(struct{}{})},
|
|
|
|
|
{V(new(Empty)), V(new(struct{}))},
|
|
|
|
|
{V(struct{}{}), V(Empty{})},
|
|
|
|
|
{V(new(struct{})), V(new(Empty))},
|
|
|
|
|
{V(Empty{}), V(Empty{})},
|
|
|
|
|
{V(MyBytes{}), V([]byte{})},
|
|
|
|
|
{V([]byte{}), V(MyBytes{})},
|
|
|
|
|
{V((func())(nil)), V(MyFunc(nil))},
|
|
|
|
|
{V((MyFunc)(nil)), V((func())(nil))},
|
|
|
|
|
|
2016-10-03 12:13:22 -07:00
|
|
|
// structs with different tags
|
|
|
|
|
{V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"foo"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{}), V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"bar"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{})},
|
|
|
|
|
|
|
|
|
|
{V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"bar"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{}), V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"foo"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{})},
|
|
|
|
|
|
|
|
|
|
{V(MyStruct{}), V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"foo"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{})},
|
|
|
|
|
|
|
|
|
|
{V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"foo"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{}), V(MyStruct{})},
|
|
|
|
|
|
|
|
|
|
{V(MyStruct{}), V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"bar"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{})},
|
|
|
|
|
|
|
|
|
|
{V(struct {
|
2016-10-23 14:10:11 -07:00
|
|
|
x int `some:"bar"`
|
2016-10-03 12:13:22 -07:00
|
|
|
}{}), V(MyStruct{})},
|
|
|
|
|
|
2012-09-22 08:52:27 -04:00
|
|
|
// can convert *byte and *MyByte
|
|
|
|
|
{V((*byte)(nil)), V((*MyByte)(nil))},
|
|
|
|
|
{V((*MyByte)(nil)), V((*byte)(nil))},
|
|
|
|
|
|
|
|
|
|
// cannot convert mismatched array sizes
|
|
|
|
|
{V([2]byte{}), V([2]byte{})},
|
|
|
|
|
{V([3]byte{}), V([3]byte{})},
|
|
|
|
|
|
|
|
|
|
// cannot convert other instances
|
|
|
|
|
{V((**byte)(nil)), V((**byte)(nil))},
|
|
|
|
|
{V((**MyByte)(nil)), V((**MyByte)(nil))},
|
|
|
|
|
{V((chan byte)(nil)), V((chan byte)(nil))},
|
|
|
|
|
{V((chan MyByte)(nil)), V((chan MyByte)(nil))},
|
|
|
|
|
{V(([]byte)(nil)), V(([]byte)(nil))},
|
|
|
|
|
{V(([]MyByte)(nil)), V(([]MyByte)(nil))},
|
|
|
|
|
{V((map[int]byte)(nil)), V((map[int]byte)(nil))},
|
|
|
|
|
{V((map[int]MyByte)(nil)), V((map[int]MyByte)(nil))},
|
|
|
|
|
{V((map[byte]int)(nil)), V((map[byte]int)(nil))},
|
|
|
|
|
{V((map[MyByte]int)(nil)), V((map[MyByte]int)(nil))},
|
|
|
|
|
{V([2]byte{}), V([2]byte{})},
|
|
|
|
|
{V([2]MyByte{}), V([2]MyByte{})},
|
|
|
|
|
|
|
|
|
|
// other
|
|
|
|
|
{V((***int)(nil)), V((***int)(nil))},
|
|
|
|
|
{V((***byte)(nil)), V((***byte)(nil))},
|
|
|
|
|
{V((***int32)(nil)), V((***int32)(nil))},
|
|
|
|
|
{V((***int64)(nil)), V((***int64)(nil))},
|
|
|
|
|
{V((chan int)(nil)), V((<-chan int)(nil))},
|
|
|
|
|
{V((chan int)(nil)), V((chan<- int)(nil))},
|
|
|
|
|
{V((chan string)(nil)), V((<-chan string)(nil))},
|
|
|
|
|
{V((chan string)(nil)), V((chan<- string)(nil))},
|
|
|
|
|
{V((chan byte)(nil)), V((chan byte)(nil))},
|
|
|
|
|
{V((chan MyByte)(nil)), V((chan MyByte)(nil))},
|
|
|
|
|
{V((map[int]bool)(nil)), V((map[int]bool)(nil))},
|
|
|
|
|
{V((map[int]byte)(nil)), V((map[int]byte)(nil))},
|
|
|
|
|
{V((map[uint]bool)(nil)), V((map[uint]bool)(nil))},
|
|
|
|
|
{V([]uint(nil)), V([]uint(nil))},
|
|
|
|
|
{V([]int(nil)), V([]int(nil))},
|
|
|
|
|
{V(new(interface{})), V(new(interface{}))},
|
|
|
|
|
{V(new(io.Reader)), V(new(io.Reader))},
|
|
|
|
|
{V(new(io.Writer)), V(new(io.Writer))},
|
|
|
|
|
|
|
|
|
|
// interfaces
|
|
|
|
|
{V(int(1)), EmptyInterfaceV(int(1))},
|
|
|
|
|
{V(string("hello")), EmptyInterfaceV(string("hello"))},
|
|
|
|
|
{V(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
|
|
|
|
|
{ReadWriterV(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
|
|
|
|
|
{V(new(bytes.Buffer)), ReadWriterV(new(bytes.Buffer))},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestConvert(t *testing.T) {
|
|
|
|
|
canConvert := map[[2]Type]bool{}
|
|
|
|
|
all := map[Type]bool{}
|
|
|
|
|
|
|
|
|
|
for _, tt := range convertTests {
|
|
|
|
|
t1 := tt.in.Type()
|
|
|
|
|
if !t1.ConvertibleTo(t1) {
|
|
|
|
|
t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t1)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
t2 := tt.out.Type()
|
|
|
|
|
if !t1.ConvertibleTo(t2) {
|
|
|
|
|
t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t2)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
all[t1] = true
|
|
|
|
|
all[t2] = true
|
|
|
|
|
canConvert[[2]Type{t1, t2}] = true
|
|
|
|
|
|
2013-08-21 14:41:55 +10:00
|
|
|
// vout1 represents the in value converted to the in type.
|
2012-09-22 08:52:27 -04:00
|
|
|
v1 := tt.in
|
|
|
|
|
vout1 := v1.Convert(t1)
|
|
|
|
|
out1 := vout1.Interface()
|
|
|
|
|
if vout1.Type() != tt.in.Type() || !DeepEqual(out1, tt.in.Interface()) {
|
2013-08-21 14:41:55 +10:00
|
|
|
t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t1, out1, tt.in.Interface())
|
2012-09-22 08:52:27 -04:00
|
|
|
}
|
|
|
|
|
|
2013-08-21 14:41:55 +10:00
|
|
|
// vout2 represents the in value converted to the out type.
|
|
|
|
|
vout2 := v1.Convert(t2)
|
|
|
|
|
out2 := vout2.Interface()
|
|
|
|
|
if vout2.Type() != tt.out.Type() || !DeepEqual(out2, tt.out.Interface()) {
|
|
|
|
|
t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out2, tt.out.Interface())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// vout3 represents a new value of the out type, set to vout2. This makes
|
|
|
|
|
// sure the converted value vout2 is really usable as a regular value.
|
|
|
|
|
vout3 := New(t2).Elem()
|
|
|
|
|
vout3.Set(vout2)
|
|
|
|
|
out3 := vout3.Interface()
|
|
|
|
|
if vout3.Type() != tt.out.Type() || !DeepEqual(out3, tt.out.Interface()) {
|
|
|
|
|
t.Errorf("Set(ValueOf(%T(%[1]v)).Convert(%s)) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out3, tt.out.Interface())
|
2012-09-22 08:52:27 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if IsRO(v1) {
|
|
|
|
|
t.Errorf("table entry %v is RO, should not be", v1)
|
|
|
|
|
}
|
|
|
|
|
if IsRO(vout1) {
|
|
|
|
|
t.Errorf("self-conversion output %v is RO, should not be", vout1)
|
|
|
|
|
}
|
2013-08-21 14:41:55 +10:00
|
|
|
if IsRO(vout2) {
|
|
|
|
|
t.Errorf("conversion output %v is RO, should not be", vout2)
|
|
|
|
|
}
|
|
|
|
|
if IsRO(vout3) {
|
|
|
|
|
t.Errorf("set(conversion output) %v is RO, should not be", vout3)
|
2012-09-22 08:52:27 -04:00
|
|
|
}
|
|
|
|
|
if !IsRO(MakeRO(v1).Convert(t1)) {
|
|
|
|
|
t.Errorf("RO self-conversion output %v is not RO, should be", v1)
|
|
|
|
|
}
|
|
|
|
|
if !IsRO(MakeRO(v1).Convert(t2)) {
|
|
|
|
|
t.Errorf("RO conversion output %v is not RO, should be", v1)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Assume that of all the types we saw during the tests,
|
|
|
|
|
// if there wasn't an explicit entry for a conversion between
|
|
|
|
|
// a pair of types, then it's not to be allowed. This checks for
|
|
|
|
|
// things like 'int64' converting to '*int'.
|
|
|
|
|
for t1 := range all {
|
|
|
|
|
for t2 := range all {
|
|
|
|
|
expectOK := t1 == t2 || canConvert[[2]Type{t1, t2}] || t2.Kind() == Interface && t2.NumMethod() == 0
|
|
|
|
|
if ok := t1.ConvertibleTo(t2); ok != expectOK {
|
|
|
|
|
t.Errorf("(%s).ConvertibleTo(%s) = %v, want %v", t1, t2, ok, expectOK)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-16 17:40:10 -04:00
|
|
|
type ComparableStruct struct {
|
|
|
|
|
X int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type NonComparableStruct struct {
|
|
|
|
|
X int
|
|
|
|
|
Y map[string]int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var comparableTests = []struct {
|
|
|
|
|
typ Type
|
|
|
|
|
ok bool
|
|
|
|
|
}{
|
|
|
|
|
{TypeOf(1), true},
|
|
|
|
|
{TypeOf("hello"), true},
|
|
|
|
|
{TypeOf(new(byte)), true},
|
|
|
|
|
{TypeOf((func())(nil)), false},
|
|
|
|
|
{TypeOf([]byte{}), false},
|
|
|
|
|
{TypeOf(map[string]int{}), false},
|
|
|
|
|
{TypeOf(make(chan int)), true},
|
|
|
|
|
{TypeOf(1.5), true},
|
|
|
|
|
{TypeOf(false), true},
|
|
|
|
|
{TypeOf(1i), true},
|
|
|
|
|
{TypeOf(ComparableStruct{}), true},
|
|
|
|
|
{TypeOf(NonComparableStruct{}), false},
|
|
|
|
|
{TypeOf([10]map[string]int{}), false},
|
|
|
|
|
{TypeOf([10]string{}), true},
|
|
|
|
|
{TypeOf(new(interface{})).Elem(), true},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestComparable(t *testing.T) {
|
|
|
|
|
for _, tt := range comparableTests {
|
|
|
|
|
if ok := tt.typ.Comparable(); ok != tt.ok {
|
|
|
|
|
t.Errorf("TypeOf(%v).Comparable() = %v, want %v", tt.typ, ok, tt.ok)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-26 08:39:36 +02:00
|
|
|
func TestOverflow(t *testing.T) {
|
|
|
|
|
if ovf := V(float64(0)).OverflowFloat(1e300); ovf {
|
|
|
|
|
t.Errorf("%v wrongly overflows float64", 1e300)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
maxFloat32 := float64((1<<24 - 1) << (127 - 23))
|
|
|
|
|
if ovf := V(float32(0)).OverflowFloat(maxFloat32); ovf {
|
|
|
|
|
t.Errorf("%v wrongly overflows float32", maxFloat32)
|
|
|
|
|
}
|
|
|
|
|
ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52))
|
|
|
|
|
if ovf := V(float32(0)).OverflowFloat(ovfFloat32); !ovf {
|
|
|
|
|
t.Errorf("%v should overflow float32", ovfFloat32)
|
|
|
|
|
}
|
|
|
|
|
if ovf := V(float32(0)).OverflowFloat(-ovfFloat32); !ovf {
|
|
|
|
|
t.Errorf("%v should overflow float32", -ovfFloat32)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
maxInt32 := int64(0x7fffffff)
|
|
|
|
|
if ovf := V(int32(0)).OverflowInt(maxInt32); ovf {
|
|
|
|
|
t.Errorf("%v wrongly overflows int32", maxInt32)
|
|
|
|
|
}
|
|
|
|
|
if ovf := V(int32(0)).OverflowInt(-1 << 31); ovf {
|
|
|
|
|
t.Errorf("%v wrongly overflows int32", -int64(1)<<31)
|
|
|
|
|
}
|
|
|
|
|
ovfInt32 := int64(1 << 31)
|
|
|
|
|
if ovf := V(int32(0)).OverflowInt(ovfInt32); !ovf {
|
|
|
|
|
t.Errorf("%v should overflow int32", ovfInt32)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
maxUint32 := uint64(0xffffffff)
|
|
|
|
|
if ovf := V(uint32(0)).OverflowUint(maxUint32); ovf {
|
|
|
|
|
t.Errorf("%v wrongly overflows uint32", maxUint32)
|
|
|
|
|
}
|
|
|
|
|
ovfUint32 := uint64(1 << 32)
|
|
|
|
|
if ovf := V(uint32(0)).OverflowUint(ovfUint32); !ovf {
|
|
|
|
|
t.Errorf("%v should overflow uint32", ovfUint32)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-31 20:46:33 -07:00
|
|
|
func checkSameType(t *testing.T, x Type, y interface{}) {
|
|
|
|
|
if x != TypeOf(y) || TypeOf(Zero(x).Interface()) != TypeOf(y) {
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestArrayOf(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
tests := []struct {
|
2015-01-27 10:04:11 +01:00
|
|
|
n int
|
|
|
|
|
value func(i int) interface{}
|
|
|
|
|
comparable bool
|
|
|
|
|
want string
|
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
n: 0,
|
|
|
|
|
value: func(i int) interface{} { type Tint int; return Tint(i) },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tint int; return Tint(i) },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[0 1 2 3 4 5 6 7 8 9]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tfloat float64; return Tfloat(i) },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[0 1 2 3 4 5 6 7 8 9]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tstring string; return Tstring(strconv.Itoa(i)) },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[0 1 2 3 4 5 6 7 8 9]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tstruct struct{ V int }; return Tstruct{i} },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tint int; return []Tint{Tint(i)} },
|
|
|
|
|
comparable: false,
|
|
|
|
|
want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tint int; return [1]Tint{Tint(i)} },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tstruct struct{ V [1]int }; return Tstruct{[1]int{i}} },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type Tstruct struct{ V []int }; return Tstruct{[]int{i}} },
|
|
|
|
|
comparable: false,
|
|
|
|
|
want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} { type TstructUV struct{ U, V int }; return TstructUV{i, i} },
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
n: 10,
|
|
|
|
|
value: func(i int) interface{} {
|
|
|
|
|
type TstructUV struct {
|
|
|
|
|
U int
|
|
|
|
|
V float64
|
|
|
|
|
}
|
|
|
|
|
return TstructUV{i, float64(i)}
|
|
|
|
|
},
|
|
|
|
|
comparable: true,
|
|
|
|
|
want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
|
|
|
|
|
},
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, table := range tests {
|
2015-01-27 10:04:11 +01:00
|
|
|
at := ArrayOf(table.n, TypeOf(table.value(0)))
|
|
|
|
|
v := New(at).Elem()
|
|
|
|
|
vok := New(at).Elem()
|
|
|
|
|
vnot := New(at).Elem()
|
|
|
|
|
for i := 0; i < v.Len(); i++ {
|
|
|
|
|
v.Index(i).Set(ValueOf(table.value(i)))
|
|
|
|
|
vok.Index(i).Set(ValueOf(table.value(i)))
|
|
|
|
|
j := i
|
|
|
|
|
if i+1 == v.Len() {
|
|
|
|
|
j = i + 1
|
|
|
|
|
}
|
|
|
|
|
vnot.Index(i).Set(ValueOf(table.value(j))) // make it differ only by last element
|
|
|
|
|
}
|
|
|
|
|
s := fmt.Sprint(v.Interface())
|
|
|
|
|
if s != table.want {
|
|
|
|
|
t.Errorf("constructed array = %s, want %s", s, table.want)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if table.comparable != at.Comparable() {
|
|
|
|
|
t.Errorf("constructed array (%#v) is comparable=%v, want=%v", v.Interface(), at.Comparable(), table.comparable)
|
|
|
|
|
}
|
|
|
|
|
if table.comparable {
|
|
|
|
|
if table.n > 0 {
|
|
|
|
|
if DeepEqual(vnot.Interface(), v.Interface()) {
|
|
|
|
|
t.Errorf(
|
|
|
|
|
"arrays (%#v) compare ok (but should not)",
|
|
|
|
|
v.Interface(),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !DeepEqual(vok.Interface(), v.Interface()) {
|
|
|
|
|
t.Errorf(
|
|
|
|
|
"arrays (%#v) compare NOT-ok (but should)",
|
|
|
|
|
v.Interface(),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check that type already in binary is found
|
2015-01-27 10:04:11 +01:00
|
|
|
type T int
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, ArrayOf(5, TypeOf(T(1))), [5]T{})
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
}
|
|
|
|
|
|
2015-01-27 10:04:11 +01:00
|
|
|
func TestArrayOfGC(t *testing.T) {
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
const n = 100
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := New(ArrayOf(n, tt)).Elem()
|
|
|
|
|
for j := 0; j < v.Len(); j++ {
|
|
|
|
|
p := new(uintptr)
|
|
|
|
|
*p = uintptr(i*n + j)
|
|
|
|
|
v.Index(j).Set(ValueOf(p).Convert(tt))
|
|
|
|
|
}
|
|
|
|
|
x = append(x, v.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
v := ValueOf(xi)
|
|
|
|
|
for j := 0; j < v.Len(); j++ {
|
|
|
|
|
k := v.Index(j).Elem().Interface()
|
|
|
|
|
if k != uintptr(i*n+j) {
|
|
|
|
|
t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestArrayOfAlg(t *testing.T) {
|
|
|
|
|
at := ArrayOf(6, TypeOf(byte(0)))
|
|
|
|
|
v1 := New(at).Elem()
|
|
|
|
|
v2 := New(at).Elem()
|
|
|
|
|
if v1.Interface() != v1.Interface() {
|
|
|
|
|
t.Errorf("constructed array %v not equal to itself", v1.Interface())
|
|
|
|
|
}
|
|
|
|
|
v1.Index(5).Set(ValueOf(byte(1)))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 {
|
|
|
|
|
t.Errorf("constructed arrays %v and %v should not be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
at = ArrayOf(6, TypeOf([]int(nil)))
|
|
|
|
|
v1 = New(at).Elem()
|
|
|
|
|
shouldPanic(func() { _ = v1.Interface() == v1.Interface() })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestArrayOfGenericAlg(t *testing.T) {
|
|
|
|
|
at1 := ArrayOf(5, TypeOf(string("")))
|
|
|
|
|
at := ArrayOf(6, at1)
|
|
|
|
|
v1 := New(at).Elem()
|
|
|
|
|
v2 := New(at).Elem()
|
|
|
|
|
if v1.Interface() != v1.Interface() {
|
|
|
|
|
t.Errorf("constructed array %v not equal to itself", v1.Interface())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v1.Index(0).Index(0).Set(ValueOf("abc"))
|
|
|
|
|
v2.Index(0).Index(0).Set(ValueOf("efg"))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 {
|
|
|
|
|
t.Errorf("constructed arrays %v and %v should not be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v1.Index(0).Index(0).Set(ValueOf("abc"))
|
|
|
|
|
v2.Index(0).Index(0).Set(ValueOf((v1.Index(0).Index(0).String() + " ")[:3]))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); i1 != i2 {
|
|
|
|
|
t.Errorf("constructed arrays %v and %v should be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Test hash
|
|
|
|
|
m := MakeMap(MapOf(at, TypeOf(int(0))))
|
|
|
|
|
m.SetMapIndex(v1, ValueOf(1))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
|
|
|
|
|
t.Errorf("constructed arrays %v and %v have different hashes", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestArrayOfDirectIface(t *testing.T) {
|
|
|
|
|
{
|
|
|
|
|
type T [1]*byte
|
|
|
|
|
i1 := Zero(TypeOf(T{})).Interface()
|
|
|
|
|
v1 := ValueOf(&i1).Elem()
|
|
|
|
|
p1 := v1.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
i2 := Zero(ArrayOf(1, PtrTo(TypeOf(int8(0))))).Interface()
|
|
|
|
|
v2 := ValueOf(&i2).Elem()
|
|
|
|
|
p2 := v2.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
if p1 != 0 {
|
|
|
|
|
t.Errorf("got p1=%v. want=%v", p1, nil)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if p2 != 0 {
|
|
|
|
|
t.Errorf("got p2=%v. want=%v", p2, nil)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
type T [0]*byte
|
|
|
|
|
i1 := Zero(TypeOf(T{})).Interface()
|
|
|
|
|
v1 := ValueOf(&i1).Elem()
|
|
|
|
|
p1 := v1.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
i2 := Zero(ArrayOf(0, PtrTo(TypeOf(int8(0))))).Interface()
|
|
|
|
|
v2 := ValueOf(&i2).Elem()
|
|
|
|
|
p2 := v2.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
if p1 == 0 {
|
|
|
|
|
t.Errorf("got p1=%v. want=not-%v", p1, nil)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if p2 == 0 {
|
|
|
|
|
t.Errorf("got p2=%v. want=not-%v", p2, nil)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
func TestSliceOf(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
|
|
|
|
type T int
|
|
|
|
|
st := SliceOf(TypeOf(T(1)))
|
2016-04-27 13:10:49 -04:00
|
|
|
if got, want := st.String(), "[]reflect_test.T"; got != want {
|
|
|
|
|
t.Errorf("SliceOf(T(1)).String()=%q, want %q", got, want)
|
|
|
|
|
}
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
v := MakeSlice(st, 10, 10)
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
for i := 0; i < v.Len(); i++ {
|
|
|
|
|
v.Index(i).Set(ValueOf(T(i)))
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
}
|
|
|
|
|
s := fmt.Sprint(v.Interface())
|
|
|
|
|
want := "[0 1 2 3 4 5 6 7 8 9]"
|
|
|
|
|
if s != want {
|
|
|
|
|
t.Errorf("constructed slice = %s, want %s", s, want)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check that type already in binary is found
|
|
|
|
|
type T1 int
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, SliceOf(TypeOf(T1(1))), []T1{})
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
}
|
|
|
|
|
|
2013-05-27 11:29:11 +04:00
|
|
|
func TestSliceOverflow(t *testing.T) {
|
|
|
|
|
// check that MakeSlice panics when size of slice overflows uint
|
|
|
|
|
const S = 1e6
|
|
|
|
|
s := uint(S)
|
|
|
|
|
l := (1<<(unsafe.Sizeof((*byte)(nil))*8)-1)/s + 1
|
|
|
|
|
if l*s >= s {
|
|
|
|
|
t.Fatal("slice size does not overflow")
|
|
|
|
|
}
|
|
|
|
|
var x [S]byte
|
|
|
|
|
st := SliceOf(TypeOf(x))
|
|
|
|
|
defer func() {
|
|
|
|
|
err := recover()
|
|
|
|
|
if err == nil {
|
|
|
|
|
t.Fatal("slice overflow does not panic")
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
MakeSlice(st, int(l), int(l))
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-26 11:50:29 -07:00
|
|
|
func TestSliceOfGC(t *testing.T) {
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
st := SliceOf(tt)
|
|
|
|
|
const n = 100
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := MakeSlice(st, n, n)
|
|
|
|
|
for j := 0; j < v.Len(); j++ {
|
|
|
|
|
p := new(uintptr)
|
|
|
|
|
*p = uintptr(i*n + j)
|
|
|
|
|
v.Index(j).Set(ValueOf(p).Convert(tt))
|
|
|
|
|
}
|
|
|
|
|
x = append(x, v.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
v := ValueOf(xi)
|
|
|
|
|
for j := 0; j < v.Len(); j++ {
|
|
|
|
|
k := v.Index(j).Elem().Interface()
|
|
|
|
|
if k != uintptr(i*n+j) {
|
|
|
|
|
t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 22:13:07 +05:30
|
|
|
func TestStructOfFieldName(t *testing.T) {
|
|
|
|
|
// invalid field name "1nvalid"
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{Name: "valid", Type: TypeOf("")},
|
|
|
|
|
StructField{Name: "1nvalid", Type: TypeOf("")},
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// invalid field name "+"
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{Name: "val1d", Type: TypeOf("")},
|
|
|
|
|
StructField{Name: "+", Type: TypeOf("")},
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// no field name
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{Name: "", Type: TypeOf("")},
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// verify creation of a struct with valid struct fields
|
|
|
|
|
validFields := []StructField{
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "φ",
|
|
|
|
|
Type: TypeOf(""),
|
|
|
|
|
},
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "ValidName",
|
|
|
|
|
Type: TypeOf(""),
|
|
|
|
|
},
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "Val1dNam5",
|
|
|
|
|
Type: TypeOf(""),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
validStruct := StructOf(validFields)
|
|
|
|
|
|
|
|
|
|
const structStr = `struct { φ string; ValidName string; Val1dNam5 string }`
|
|
|
|
|
if got, want := validStruct.String(), structStr; got != want {
|
|
|
|
|
t.Errorf("StructOf(validFields).String()=%q, want %q", got, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-05 13:37:38 +01:00
|
|
|
func TestStructOf(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
|
|
|
|
fields := []StructField{
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "S",
|
|
|
|
|
Tag: "s",
|
|
|
|
|
Type: TypeOf(""),
|
|
|
|
|
},
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "X",
|
|
|
|
|
Tag: "x",
|
|
|
|
|
Type: TypeOf(byte(0)),
|
|
|
|
|
},
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "Y",
|
|
|
|
|
Type: TypeOf(uint64(0)),
|
|
|
|
|
},
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "Z",
|
|
|
|
|
Type: TypeOf([3]uint16{}),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
st := StructOf(fields)
|
|
|
|
|
v := New(st).Elem()
|
|
|
|
|
runtime.GC()
|
|
|
|
|
v.FieldByName("X").Set(ValueOf(byte(2)))
|
|
|
|
|
v.FieldByIndex([]int{1}).Set(ValueOf(byte(1)))
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
s := fmt.Sprint(v.Interface())
|
|
|
|
|
want := `{ 1 0 [0 0 0]}`
|
|
|
|
|
if s != want {
|
|
|
|
|
t.Errorf("constructed struct = %s, want %s", s, want)
|
|
|
|
|
}
|
2016-06-03 11:27:33 -04:00
|
|
|
const stStr = `struct { S string "s"; X uint8 "x"; Y uint64; Z [3]uint16 }`
|
|
|
|
|
if got, want := st.String(), stStr; got != want {
|
|
|
|
|
t.Errorf("StructOf(fields).String()=%q, want %q", got, want)
|
|
|
|
|
}
|
2016-03-05 13:37:38 +01:00
|
|
|
|
|
|
|
|
// check the size, alignment and field offsets
|
|
|
|
|
stt := TypeOf(struct {
|
|
|
|
|
String string
|
|
|
|
|
X byte
|
|
|
|
|
Y uint64
|
|
|
|
|
Z [3]uint16
|
|
|
|
|
}{})
|
|
|
|
|
if st.Size() != stt.Size() {
|
|
|
|
|
t.Errorf("constructed struct size = %v, want %v", st.Size(), stt.Size())
|
|
|
|
|
}
|
|
|
|
|
if st.Align() != stt.Align() {
|
|
|
|
|
t.Errorf("constructed struct align = %v, want %v", st.Align(), stt.Align())
|
|
|
|
|
}
|
|
|
|
|
if st.FieldAlign() != stt.FieldAlign() {
|
|
|
|
|
t.Errorf("constructed struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < st.NumField(); i++ {
|
|
|
|
|
o1 := st.Field(i).Offset
|
|
|
|
|
o2 := stt.Field(i).Offset
|
|
|
|
|
if o1 != o2 {
|
|
|
|
|
t.Errorf("constructed struct field %v offset = %v, want %v", i, o1, o2)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-22 15:50:03 -08:00
|
|
|
// Check size and alignment with a trailing zero-sized field.
|
|
|
|
|
st = StructOf([]StructField{
|
|
|
|
|
{
|
|
|
|
|
Name: "F1",
|
|
|
|
|
Type: TypeOf(byte(0)),
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Name: "F2",
|
|
|
|
|
Type: TypeOf([0]*byte{}),
|
|
|
|
|
},
|
|
|
|
|
})
|
|
|
|
|
stt = TypeOf(struct {
|
|
|
|
|
G1 byte
|
|
|
|
|
G2 [0]*byte
|
|
|
|
|
}{})
|
|
|
|
|
if st.Size() != stt.Size() {
|
|
|
|
|
t.Errorf("constructed zero-padded struct size = %v, want %v", st.Size(), stt.Size())
|
|
|
|
|
}
|
|
|
|
|
if st.Align() != stt.Align() {
|
|
|
|
|
t.Errorf("constructed zero-padded struct align = %v, want %v", st.Align(), stt.Align())
|
|
|
|
|
}
|
|
|
|
|
if st.FieldAlign() != stt.FieldAlign() {
|
|
|
|
|
t.Errorf("constructed zero-padded struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < st.NumField(); i++ {
|
|
|
|
|
o1 := st.Field(i).Offset
|
|
|
|
|
o2 := stt.Field(i).Offset
|
|
|
|
|
if o1 != o2 {
|
|
|
|
|
t.Errorf("constructed zero-padded struct field %v offset = %v, want %v", i, o1, o2)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-05 13:37:38 +01:00
|
|
|
// check duplicate names
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{Name: "string", Type: TypeOf("")},
|
|
|
|
|
StructField{Name: "string", Type: TypeOf("")},
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{Type: TypeOf("")},
|
|
|
|
|
StructField{Name: "string", Type: TypeOf("")},
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{Type: TypeOf("")},
|
|
|
|
|
StructField{Type: TypeOf("")},
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
// check that type already in binary is found
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, StructOf(fields[2:3]), struct{ Y uint64 }{})
|
2018-06-05 16:57:49 -07:00
|
|
|
|
|
|
|
|
// gccgo used to fail this test.
|
|
|
|
|
type structFieldType interface{}
|
|
|
|
|
checkSameType(t,
|
|
|
|
|
StructOf([]StructField{
|
|
|
|
|
StructField{
|
|
|
|
|
Name: "F",
|
|
|
|
|
Type: TypeOf((*structFieldType)(nil)).Elem(),
|
|
|
|
|
},
|
|
|
|
|
}),
|
|
|
|
|
struct{ F structFieldType }{})
|
2016-03-05 13:37:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestStructOfExportRules(t *testing.T) {
|
|
|
|
|
type S1 struct{}
|
|
|
|
|
type s2 struct{}
|
|
|
|
|
type ΦType struct{}
|
|
|
|
|
type φType struct{}
|
|
|
|
|
|
|
|
|
|
testPanic := func(i int, mustPanic bool, f func()) {
|
|
|
|
|
defer func() {
|
|
|
|
|
err := recover()
|
|
|
|
|
if err == nil && mustPanic {
|
|
|
|
|
t.Errorf("test-%d did not panic", i)
|
|
|
|
|
}
|
|
|
|
|
if err != nil && !mustPanic {
|
|
|
|
|
t.Errorf("test-%d panicked: %v\n", i, err)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
f()
|
|
|
|
|
}
|
|
|
|
|
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
tests := []struct {
|
2016-03-05 13:37:38 +01:00
|
|
|
field StructField
|
|
|
|
|
mustPanic bool
|
|
|
|
|
exported bool
|
|
|
|
|
}{
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{})},
|
|
|
|
|
exported: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil))},
|
|
|
|
|
exported: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{})},
|
|
|
|
|
mustPanic: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil))},
|
|
|
|
|
mustPanic: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "Name", Type: nil, PkgPath: ""},
|
2016-03-05 13:37:38 +01:00
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "", Type: TypeOf(S1{}), PkgPath: ""},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{}), PkgPath: "other/pkg"},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{}), PkgPath: "other/pkg"},
|
2016-03-05 13:37:38 +01:00
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"},
|
2016-03-05 13:37:38 +01:00
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"},
|
2016-03-05 13:37:38 +01:00
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"},
|
2016-03-05 13:37:38 +01:00
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "S", Type: TypeOf(S1{})},
|
|
|
|
|
mustPanic: false,
|
|
|
|
|
exported: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "S", Type: TypeOf((*S1)(nil))},
|
|
|
|
|
exported: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "S", Type: TypeOf(s2{})},
|
|
|
|
|
exported: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "S", Type: TypeOf((*s2)(nil))},
|
|
|
|
|
exported: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf(S1{})},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf((*S1)(nil))},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf(s2{})},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf((*s2)(nil))},
|
|
|
|
|
mustPanic: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf(S1{}), PkgPath: "other/pkg"},
|
|
|
|
|
mustPanic: true, // TODO(sbinet): creating a name with a package path
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"},
|
|
|
|
|
mustPanic: true, // TODO(sbinet): creating a name with a package path
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf(s2{}), PkgPath: "other/pkg"},
|
|
|
|
|
mustPanic: true, // TODO(sbinet): creating a name with a package path
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "s", Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"},
|
|
|
|
|
mustPanic: true, // TODO(sbinet): creating a name with a package path
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "", Type: TypeOf(ΦType{})},
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
mustPanic: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
field: StructField{Name: "", Type: TypeOf(φType{})},
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
mustPanic: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "Φ", Type: TypeOf(0)},
|
|
|
|
|
exported: true,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
field: StructField{Name: "φ", Type: TypeOf(0)},
|
|
|
|
|
exported: false,
|
2016-03-05 13:37:38 +01:00
|
|
|
},
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for i, test := range tests {
|
2016-03-05 13:37:38 +01:00
|
|
|
testPanic(i, test.mustPanic, func() {
|
|
|
|
|
typ := StructOf([]StructField{test.field})
|
|
|
|
|
if typ == nil {
|
|
|
|
|
t.Errorf("test-%d: error creating struct type", i)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
field := typ.Field(0)
|
|
|
|
|
n := field.Name
|
|
|
|
|
if n == "" {
|
2017-01-25 10:19:33 -05:00
|
|
|
panic("field.Name must not be empty")
|
2016-03-05 13:37:38 +01:00
|
|
|
}
|
all: clean up code with token.IsExported
A handful of packages were reimplementing IsExported, so use
token.IsExported instead. This caused the deps test to fail for net/rpc.
However, net/rpc deals with Go types, and go/token is light and fairly
low-level in terms of Go tooling packages, so that's okay.
While at it, replace all uses of ast.IsExported with token.IsExported.
This is more consistent, and also means that the import graphs are
leaner. A couple of files no longer need to import go/ast, for example.
We can't get rid of cmd/compile/internal/types.IsExported, as the
compiler can only depend on go/token as of Go 1.4. However, gc used
different implementations in a couple of places, so consolidate the use
of types.IsExported there.
Finally, we can't get rid of the copied IsExported implementation in
encoding/gob, as go/token depends on it as part of a test. That test
can't be an external test either, so there's no easy way to break the
import cycle.
Overall, this removes about forty lines of unnecessary code.
Change-Id: I86a475b7614261e6a7b0b153d5ca02b9f64a7b2d
Reviewed-on: https://go-review.googlesource.com/c/go/+/172037
Run-TryBot: Daniel Martí <mvdan@mvdan.cc>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2019-04-15 23:10:50 +09:00
|
|
|
exported := token.IsExported(n)
|
2016-03-05 13:37:38 +01:00
|
|
|
if exported != test.exported {
|
2016-05-12 15:00:10 +03:00
|
|
|
t.Errorf("test-%d: got exported=%v want exported=%v", i, exported, test.exported)
|
2016-03-05 13:37:38 +01:00
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestStructOfGC(t *testing.T) {
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
fields := []StructField{
|
|
|
|
|
{Name: "X", Type: tt},
|
|
|
|
|
{Name: "Y", Type: tt},
|
|
|
|
|
}
|
|
|
|
|
st := StructOf(fields)
|
|
|
|
|
|
|
|
|
|
const n = 10000
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := New(st).Elem()
|
|
|
|
|
for j := 0; j < v.NumField(); j++ {
|
|
|
|
|
p := new(uintptr)
|
|
|
|
|
*p = uintptr(i*n + j)
|
|
|
|
|
v.Field(j).Set(ValueOf(p).Convert(tt))
|
|
|
|
|
}
|
|
|
|
|
x = append(x, v.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
v := ValueOf(xi)
|
|
|
|
|
for j := 0; j < v.NumField(); j++ {
|
|
|
|
|
k := v.Field(j).Elem().Interface()
|
|
|
|
|
if k != uintptr(i*n+j) {
|
|
|
|
|
t.Errorf("lost x[%d].%c = %d, want %d", i, "XY"[j], k, i*n+j)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestStructOfAlg(t *testing.T) {
|
|
|
|
|
st := StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf(int(0))}})
|
|
|
|
|
v1 := New(st).Elem()
|
|
|
|
|
v2 := New(st).Elem()
|
|
|
|
|
if !DeepEqual(v1.Interface(), v1.Interface()) {
|
|
|
|
|
t.Errorf("constructed struct %v not equal to itself", v1.Interface())
|
|
|
|
|
}
|
|
|
|
|
v1.FieldByName("X").Set(ValueOf(int(1)))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) {
|
|
|
|
|
t.Errorf("constructed structs %v and %v should not be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
st = StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf([]int(nil))}})
|
|
|
|
|
v1 = New(st).Elem()
|
|
|
|
|
shouldPanic(func() { _ = v1.Interface() == v1.Interface() })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestStructOfGenericAlg(t *testing.T) {
|
|
|
|
|
st1 := StructOf([]StructField{
|
|
|
|
|
{Name: "X", Tag: "x", Type: TypeOf(int64(0))},
|
|
|
|
|
{Name: "Y", Type: TypeOf(string(""))},
|
|
|
|
|
})
|
|
|
|
|
st := StructOf([]StructField{
|
|
|
|
|
{Name: "S0", Type: st1},
|
|
|
|
|
{Name: "S1", Type: st1},
|
|
|
|
|
})
|
|
|
|
|
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
tests := []struct {
|
2016-03-05 13:37:38 +01:00
|
|
|
rt Type
|
|
|
|
|
idx []int
|
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
rt: st,
|
|
|
|
|
idx: []int{0, 1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: st1,
|
|
|
|
|
idx: []int{1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf([0]int{})},
|
|
|
|
|
{Name: "YY", Type: TypeOf("")},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf([0]int{})},
|
|
|
|
|
{Name: "YY", Type: TypeOf("")},
|
|
|
|
|
{Name: "ZZ", Type: TypeOf([2]int{})},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf([1]int{})},
|
|
|
|
|
{Name: "YY", Type: TypeOf("")},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf([1]int{})},
|
|
|
|
|
{Name: "YY", Type: TypeOf("")},
|
|
|
|
|
{Name: "ZZ", Type: TypeOf([1]int{})},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf([2]int{})},
|
|
|
|
|
{Name: "YY", Type: TypeOf("")},
|
|
|
|
|
{Name: "ZZ", Type: TypeOf([2]int{})},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf(int64(0))},
|
|
|
|
|
{Name: "YY", Type: TypeOf(byte(0))},
|
|
|
|
|
{Name: "ZZ", Type: TypeOf("")},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{2},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
rt: StructOf(
|
|
|
|
|
[]StructField{
|
|
|
|
|
{Name: "XX", Type: TypeOf(int64(0))},
|
|
|
|
|
{Name: "YY", Type: TypeOf(int64(0))},
|
|
|
|
|
{Name: "ZZ", Type: TypeOf("")},
|
|
|
|
|
{Name: "AA", Type: TypeOf([1]int64{})},
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
idx: []int{2},
|
|
|
|
|
},
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, table := range tests {
|
2016-03-05 13:37:38 +01:00
|
|
|
v1 := New(table.rt).Elem()
|
|
|
|
|
v2 := New(table.rt).Elem()
|
|
|
|
|
|
|
|
|
|
if !DeepEqual(v1.Interface(), v1.Interface()) {
|
|
|
|
|
t.Errorf("constructed struct %v not equal to itself", v1.Interface())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v1.FieldByIndex(table.idx).Set(ValueOf("abc"))
|
|
|
|
|
v2.FieldByIndex(table.idx).Set(ValueOf("def"))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) {
|
|
|
|
|
t.Errorf("constructed structs %v and %v should not be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
abc := "abc"
|
|
|
|
|
v1.FieldByIndex(table.idx).Set(ValueOf(abc))
|
|
|
|
|
val := "+" + abc + "-"
|
|
|
|
|
v2.FieldByIndex(table.idx).Set(ValueOf(val[1:4]))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) {
|
|
|
|
|
t.Errorf("constructed structs %v and %v should be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Test hash
|
|
|
|
|
m := MakeMap(MapOf(table.rt, TypeOf(int(0))))
|
|
|
|
|
m.SetMapIndex(v1, ValueOf(1))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
|
|
|
|
|
t.Errorf("constructed structs %#v and %#v have different hashes", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v2.FieldByIndex(table.idx).Set(ValueOf("abc"))
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) {
|
|
|
|
|
t.Errorf("constructed structs %v and %v should be equal", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
|
|
|
|
|
t.Errorf("constructed structs %v and %v have different hashes", i1, i2)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestStructOfDirectIface(t *testing.T) {
|
|
|
|
|
{
|
|
|
|
|
type T struct{ X [1]*byte }
|
|
|
|
|
i1 := Zero(TypeOf(T{})).Interface()
|
|
|
|
|
v1 := ValueOf(&i1).Elem()
|
|
|
|
|
p1 := v1.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
i2 := Zero(StructOf([]StructField{
|
|
|
|
|
{
|
|
|
|
|
Name: "X",
|
|
|
|
|
Type: ArrayOf(1, TypeOf((*int8)(nil))),
|
|
|
|
|
},
|
|
|
|
|
})).Interface()
|
|
|
|
|
v2 := ValueOf(&i2).Elem()
|
|
|
|
|
p2 := v2.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
if p1 != 0 {
|
|
|
|
|
t.Errorf("got p1=%v. want=%v", p1, nil)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if p2 != 0 {
|
|
|
|
|
t.Errorf("got p2=%v. want=%v", p2, nil)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
type T struct{ X [0]*byte }
|
|
|
|
|
i1 := Zero(TypeOf(T{})).Interface()
|
|
|
|
|
v1 := ValueOf(&i1).Elem()
|
|
|
|
|
p1 := v1.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
i2 := Zero(StructOf([]StructField{
|
|
|
|
|
{
|
|
|
|
|
Name: "X",
|
|
|
|
|
Type: ArrayOf(0, TypeOf((*int8)(nil))),
|
|
|
|
|
},
|
|
|
|
|
})).Interface()
|
|
|
|
|
v2 := ValueOf(&i2).Elem()
|
|
|
|
|
p2 := v2.InterfaceData()[1]
|
|
|
|
|
|
|
|
|
|
if p1 == 0 {
|
|
|
|
|
t.Errorf("got p1=%v. want=not-%v", p1, nil)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if p2 == 0 {
|
|
|
|
|
t.Errorf("got p2=%v. want=not-%v", p2, nil)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StructI int
|
|
|
|
|
|
|
|
|
|
func (i StructI) Get() int { return int(i) }
|
|
|
|
|
|
|
|
|
|
type StructIPtr int
|
|
|
|
|
|
2018-06-27 14:18:31 -07:00
|
|
|
func (i *StructIPtr) Get() int { return int(*i) }
|
|
|
|
|
func (i *StructIPtr) Set(v int) { *(*int)(i) = v }
|
|
|
|
|
|
|
|
|
|
type SettableStruct struct {
|
|
|
|
|
SettableField int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *SettableStruct) Set(v int) { p.SettableField = v }
|
|
|
|
|
|
|
|
|
|
type SettablePointer struct {
|
|
|
|
|
SettableField *int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *SettablePointer) Set(v int) { *p.SettableField = v }
|
2016-03-05 13:37:38 +01:00
|
|
|
|
|
|
|
|
func TestStructOfWithInterface(t *testing.T) {
|
|
|
|
|
const want = 42
|
|
|
|
|
type Iface interface {
|
|
|
|
|
Get() int
|
|
|
|
|
}
|
2018-06-27 14:18:31 -07:00
|
|
|
type IfaceSet interface {
|
|
|
|
|
Set(int)
|
|
|
|
|
}
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
tests := []struct {
|
|
|
|
|
name string
|
2016-03-05 13:37:38 +01:00
|
|
|
typ Type
|
|
|
|
|
val Value
|
|
|
|
|
impl bool
|
|
|
|
|
}{
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
name: "StructI",
|
2016-03-05 13:37:38 +01:00
|
|
|
typ: TypeOf(StructI(want)),
|
|
|
|
|
val: ValueOf(StructI(want)),
|
|
|
|
|
impl: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
name: "StructI",
|
|
|
|
|
typ: PtrTo(TypeOf(StructI(want))),
|
2016-03-05 13:37:38 +01:00
|
|
|
val: ValueOf(func() interface{} {
|
|
|
|
|
v := StructI(want)
|
|
|
|
|
return &v
|
|
|
|
|
}()),
|
|
|
|
|
impl: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
name: "StructIPtr",
|
|
|
|
|
typ: PtrTo(TypeOf(StructIPtr(want))),
|
2016-03-05 13:37:38 +01:00
|
|
|
val: ValueOf(func() interface{} {
|
|
|
|
|
v := StructIPtr(want)
|
|
|
|
|
return &v
|
|
|
|
|
}()),
|
|
|
|
|
impl: true,
|
|
|
|
|
},
|
|
|
|
|
{
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
name: "StructIPtr",
|
2016-03-05 13:37:38 +01:00
|
|
|
typ: TypeOf(StructIPtr(want)),
|
|
|
|
|
val: ValueOf(StructIPtr(want)),
|
|
|
|
|
impl: false,
|
|
|
|
|
},
|
|
|
|
|
// {
|
|
|
|
|
// typ: TypeOf((*Iface)(nil)).Elem(), // FIXME(sbinet): fix method.ifn/tfn
|
|
|
|
|
// val: ValueOf(StructI(want)),
|
|
|
|
|
// impl: true,
|
|
|
|
|
// },
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for i, table := range tests {
|
2017-06-28 12:47:08 -07:00
|
|
|
for j := 0; j < 2; j++ {
|
|
|
|
|
var fields []StructField
|
|
|
|
|
if j == 1 {
|
|
|
|
|
fields = append(fields, StructField{
|
|
|
|
|
Name: "Dummy",
|
|
|
|
|
PkgPath: "",
|
|
|
|
|
Type: TypeOf(int(0)),
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
fields = append(fields, StructField{
|
|
|
|
|
Name: table.name,
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
PkgPath: "",
|
|
|
|
|
Type: table.typ,
|
|
|
|
|
})
|
2016-03-05 13:37:38 +01:00
|
|
|
|
2017-06-28 12:47:08 -07:00
|
|
|
// We currently do not correctly implement methods
|
2018-05-10 15:17:58 -07:00
|
|
|
// for embedded fields other than the first.
|
2017-06-28 12:47:08 -07:00
|
|
|
// Therefore, for now, we expect those methods
|
|
|
|
|
// to not exist. See issues 15924 and 20824.
|
|
|
|
|
// When those issues are fixed, this test of panic
|
|
|
|
|
// should be removed.
|
|
|
|
|
if j == 1 && table.impl {
|
|
|
|
|
func() {
|
|
|
|
|
defer func() {
|
|
|
|
|
if err := recover(); err == nil {
|
|
|
|
|
t.Errorf("test-%d-%d did not panic", i, j)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
_ = StructOf(fields)
|
|
|
|
|
}()
|
|
|
|
|
continue
|
2016-03-05 13:37:38 +01:00
|
|
|
}
|
|
|
|
|
|
2017-06-28 12:47:08 -07:00
|
|
|
rt := StructOf(fields)
|
|
|
|
|
rv := New(rt).Elem()
|
|
|
|
|
rv.Field(j).Set(table.val)
|
2016-03-05 13:37:38 +01:00
|
|
|
|
2017-06-28 12:47:08 -07:00
|
|
|
if _, ok := rv.Interface().(Iface); ok != table.impl {
|
|
|
|
|
if table.impl {
|
|
|
|
|
t.Errorf("test-%d-%d: type=%v fails to implement Iface.\n", i, j, table.typ)
|
|
|
|
|
} else {
|
|
|
|
|
t.Errorf("test-%d-%d: type=%v should NOT implement Iface\n", i, j, table.typ)
|
|
|
|
|
}
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-03-05 13:37:38 +01:00
|
|
|
|
2017-06-28 12:47:08 -07:00
|
|
|
if !table.impl {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v := rv.Interface().(Iface).Get()
|
|
|
|
|
if v != want {
|
|
|
|
|
t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, v, want)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fct := rv.MethodByName("Get")
|
|
|
|
|
out := fct.Call(nil)
|
|
|
|
|
if !DeepEqual(out[0].Interface(), want) {
|
|
|
|
|
t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, out[0].Interface(), want)
|
|
|
|
|
}
|
2016-03-05 13:37:38 +01:00
|
|
|
}
|
|
|
|
|
}
|
2018-06-27 14:18:31 -07:00
|
|
|
|
|
|
|
|
// Test an embedded nil pointer with pointer methods.
|
|
|
|
|
fields := []StructField{{
|
|
|
|
|
Name: "StructIPtr",
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
Type: PtrTo(TypeOf(StructIPtr(want))),
|
|
|
|
|
}}
|
|
|
|
|
rt := StructOf(fields)
|
|
|
|
|
rv := New(rt).Elem()
|
|
|
|
|
// This should panic since the pointer is nil.
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
rv.Interface().(IfaceSet).Set(want)
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// Test an embedded nil pointer to a struct with pointer methods.
|
|
|
|
|
|
|
|
|
|
fields = []StructField{{
|
|
|
|
|
Name: "SettableStruct",
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
Type: PtrTo(TypeOf(SettableStruct{})),
|
|
|
|
|
}}
|
|
|
|
|
rt = StructOf(fields)
|
|
|
|
|
rv = New(rt).Elem()
|
|
|
|
|
// This should panic since the pointer is nil.
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
rv.Interface().(IfaceSet).Set(want)
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// The behavior is different if there is a second field,
|
|
|
|
|
// since now an interface value holds a pointer to the struct
|
|
|
|
|
// rather than just holding a copy of the struct.
|
|
|
|
|
fields = []StructField{
|
|
|
|
|
{
|
|
|
|
|
Name: "SettableStruct",
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
Type: PtrTo(TypeOf(SettableStruct{})),
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Name: "EmptyStruct",
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
Type: StructOf(nil),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
// With the current implementation this is expected to panic.
|
|
|
|
|
// Ideally it should work and we should be able to see a panic
|
|
|
|
|
// if we call the Set method.
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf(fields)
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// Embed a field that can be stored directly in an interface,
|
|
|
|
|
// with a second field.
|
|
|
|
|
fields = []StructField{
|
|
|
|
|
{
|
|
|
|
|
Name: "SettablePointer",
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
Type: TypeOf(SettablePointer{}),
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
Name: "EmptyStruct",
|
|
|
|
|
Anonymous: true,
|
|
|
|
|
Type: StructOf(nil),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
// With the current implementation this is expected to panic.
|
|
|
|
|
// Ideally it should work and we should be able to call the
|
|
|
|
|
// Set and Get methods.
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
StructOf(fields)
|
|
|
|
|
})
|
2016-03-05 13:37:38 +01:00
|
|
|
}
|
|
|
|
|
|
2018-11-06 09:02:03 +00:00
|
|
|
func TestStructOfTooManyFields(t *testing.T) {
|
|
|
|
|
// Bug Fix: #25402 - this should not panic
|
|
|
|
|
tt := StructOf([]StructField{
|
|
|
|
|
{Name: "Time", Type: TypeOf(time.Time{}), Anonymous: true},
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
if _, present := tt.MethodByName("After"); !present {
|
|
|
|
|
t.Errorf("Expected method `After` to be found")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
func TestChanOf(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
|
|
|
|
type T string
|
|
|
|
|
ct := ChanOf(BothDir, TypeOf(T("")))
|
|
|
|
|
v := MakeChan(ct, 2)
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
v.Send(ValueOf(T("hello")))
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
v.Send(ValueOf(T("world")))
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
|
|
|
|
|
sv1, _ := v.Recv()
|
|
|
|
|
sv2, _ := v.Recv()
|
|
|
|
|
s1 := sv1.String()
|
|
|
|
|
s2 := sv2.String()
|
|
|
|
|
if s1 != "hello" || s2 != "world" {
|
|
|
|
|
t.Errorf("constructed chan: have %q, %q, want %q, %q", s1, s2, "hello", "world")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check that type already in binary is found
|
|
|
|
|
type T1 int
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, ChanOf(BothDir, TypeOf(T1(1))), (chan T1)(nil))
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
}
|
|
|
|
|
|
2015-01-01 21:38:12 -05:00
|
|
|
func TestChanOfDir(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
|
|
|
|
type T string
|
|
|
|
|
crt := ChanOf(RecvDir, TypeOf(T("")))
|
|
|
|
|
cst := ChanOf(SendDir, TypeOf(T("")))
|
|
|
|
|
|
|
|
|
|
// check that type already in binary is found
|
|
|
|
|
type T1 int
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, ChanOf(RecvDir, TypeOf(T1(1))), (<-chan T1)(nil))
|
|
|
|
|
checkSameType(t, ChanOf(SendDir, TypeOf(T1(1))), (chan<- T1)(nil))
|
2015-01-01 21:38:12 -05:00
|
|
|
|
|
|
|
|
// check String form of ChanDir
|
|
|
|
|
if crt.ChanDir().String() != "<-chan" {
|
|
|
|
|
t.Errorf("chan dir: have %q, want %q", crt.ChanDir().String(), "<-chan")
|
|
|
|
|
}
|
|
|
|
|
if cst.ChanDir().String() != "chan<-" {
|
|
|
|
|
t.Errorf("chan dir: have %q, want %q", cst.ChanDir().String(), "chan<-")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-26 11:50:29 -07:00
|
|
|
func TestChanOfGC(t *testing.T) {
|
|
|
|
|
done := make(chan bool, 1)
|
|
|
|
|
go func() {
|
|
|
|
|
select {
|
|
|
|
|
case <-done:
|
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
|
panic("deadlock in TestChanOfGC")
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
|
done <- true
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
ct := ChanOf(BothDir, tt)
|
|
|
|
|
|
|
|
|
|
// NOTE: The garbage collector handles allocated channels specially,
|
|
|
|
|
// so we have to save pointers to channels in x; the pointer code will
|
|
|
|
|
// use the gc info in the newly constructed chan type.
|
|
|
|
|
const n = 100
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := MakeChan(ct, n)
|
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
|
p := new(uintptr)
|
|
|
|
|
*p = uintptr(i*n + j)
|
|
|
|
|
v.Send(ValueOf(p).Convert(tt))
|
|
|
|
|
}
|
|
|
|
|
pv := New(ct)
|
|
|
|
|
pv.Elem().Set(v)
|
|
|
|
|
x = append(x, pv.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
v := ValueOf(xi).Elem()
|
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
|
pv, _ := v.Recv()
|
|
|
|
|
k := pv.Elem().Interface()
|
|
|
|
|
if k != uintptr(i*n+j) {
|
|
|
|
|
t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
func TestMapOf(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
|
|
|
|
type K string
|
|
|
|
|
type V float64
|
|
|
|
|
|
|
|
|
|
v := MakeMap(MapOf(TypeOf(K("")), TypeOf(V(0))))
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
v.SetMapIndex(ValueOf(K("a")), ValueOf(V(1)))
|
2013-03-26 11:50:29 -07:00
|
|
|
runtime.GC()
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
|
|
|
|
|
s := fmt.Sprint(v.Interface())
|
|
|
|
|
want := "map[a:1]"
|
|
|
|
|
if s != want {
|
|
|
|
|
t.Errorf("constructed map = %s, want %s", s, want)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check that type already in binary is found
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, MapOf(TypeOf(V(0)), TypeOf(K(""))), map[V]K(nil))
|
2013-03-26 11:50:29 -07:00
|
|
|
|
|
|
|
|
// check that invalid key type panics
|
|
|
|
|
shouldPanic(func() { MapOf(TypeOf((func())(nil)), TypeOf(false)) })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapOfGCKeys(t *testing.T) {
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
mt := MapOf(tt, TypeOf(false))
|
|
|
|
|
|
|
|
|
|
// NOTE: The garbage collector handles allocated maps specially,
|
|
|
|
|
// so we have to save pointers to maps in x; the pointer code will
|
|
|
|
|
// use the gc info in the newly constructed map type.
|
|
|
|
|
const n = 100
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := MakeMap(mt)
|
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
|
p := new(uintptr)
|
|
|
|
|
*p = uintptr(i*n + j)
|
|
|
|
|
v.SetMapIndex(ValueOf(p).Convert(tt), ValueOf(true))
|
|
|
|
|
}
|
|
|
|
|
pv := New(mt)
|
|
|
|
|
pv.Elem().Set(v)
|
|
|
|
|
x = append(x, pv.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
v := ValueOf(xi).Elem()
|
|
|
|
|
var out []int
|
|
|
|
|
for _, kv := range v.MapKeys() {
|
|
|
|
|
out = append(out, int(kv.Elem().Interface().(uintptr)))
|
|
|
|
|
}
|
|
|
|
|
sort.Ints(out)
|
|
|
|
|
for j, k := range out {
|
|
|
|
|
if k != i*n+j {
|
|
|
|
|
t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapOfGCValues(t *testing.T) {
|
|
|
|
|
type T *uintptr
|
|
|
|
|
tt := TypeOf(T(nil))
|
|
|
|
|
mt := MapOf(TypeOf(1), tt)
|
|
|
|
|
|
|
|
|
|
// NOTE: The garbage collector handles allocated maps specially,
|
|
|
|
|
// so we have to save pointers to maps in x; the pointer code will
|
|
|
|
|
// use the gc info in the newly constructed map type.
|
|
|
|
|
const n = 100
|
|
|
|
|
var x []interface{}
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
v := MakeMap(mt)
|
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
|
p := new(uintptr)
|
|
|
|
|
*p = uintptr(i*n + j)
|
|
|
|
|
v.SetMapIndex(ValueOf(j), ValueOf(p).Convert(tt))
|
|
|
|
|
}
|
|
|
|
|
pv := New(mt)
|
|
|
|
|
pv.Elem().Set(v)
|
|
|
|
|
x = append(x, pv.Interface())
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
|
|
|
|
|
for i, xi := range x {
|
|
|
|
|
v := ValueOf(xi).Elem()
|
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
|
k := v.MapIndex(ValueOf(j)).Elem().Interface().(uintptr)
|
|
|
|
|
if k != uintptr(i*n+j) {
|
|
|
|
|
t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
reflect: add ArrayOf, ChanOf, MapOf, SliceOf
In order to add these, we need to be able to find references
to such types that already exist in the binary. To do that, introduce
a new linker section holding a list of the types corresponding to
arrays, chans, maps, and slices.
To offset the storage cost of this list, and to simplify the code,
remove the interface{} header from the representation of a
runtime type. It was used in early versions of the code but was
made obsolete by the kind field: a switch on kind is more efficient
than a type switch.
In the godoc binary, removing the interface{} header cuts two
words from each of about 10,000 types. Adding back the list of pointers
to array, chan, map, and slice types reintroduces one word for
each of about 500 types. On a 64-bit machine, then, this CL *removes*
a net 156 kB of read-only data from the binary.
This CL does not include the needed support for precise garbage
collection. I have created issue 4375 to track that.
This CL also does not set the 'algorithm' - specifically the equality
and copy functions - for a new array correctly, so I have unexported
ArrayOf for now. That is also part of issue 4375.
Fixes #2339.
R=r, remyoudompheng, mirtchovski, iant
CC=golang-dev
https://golang.org/cl/6572043
2012-11-13 13:06:29 -05:00
|
|
|
}
|
|
|
|
|
|
2014-12-23 15:19:30 +11:00
|
|
|
func TestTypelinksSorted(t *testing.T) {
|
|
|
|
|
var last string
|
|
|
|
|
for i, n := range TypeLinks() {
|
|
|
|
|
if n < last {
|
|
|
|
|
t.Errorf("typelinks not sorted: %q [%d] > %q [%d]", last, i-1, n, i)
|
|
|
|
|
}
|
|
|
|
|
last = n
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestFuncOf(t *testing.T) {
|
|
|
|
|
// check construction and use of type not in binary
|
|
|
|
|
type K string
|
|
|
|
|
type V float64
|
|
|
|
|
|
|
|
|
|
fn := func(args []Value) []Value {
|
|
|
|
|
if len(args) != 1 {
|
|
|
|
|
t.Errorf("args == %v, want exactly one arg", args)
|
|
|
|
|
} else if args[0].Type() != TypeOf(K("")) {
|
2016-05-12 15:00:10 +03:00
|
|
|
t.Errorf("args[0] is type %v, want %v", args[0].Type(), TypeOf(K("")))
|
2014-12-23 15:19:30 +11:00
|
|
|
} else if args[0].String() != "gopher" {
|
|
|
|
|
t.Errorf("args[0] = %q, want %q", args[0].String(), "gopher")
|
|
|
|
|
}
|
|
|
|
|
return []Value{ValueOf(V(3.14))}
|
|
|
|
|
}
|
|
|
|
|
v := MakeFunc(FuncOf([]Type{TypeOf(K(""))}, []Type{TypeOf(V(0))}, false), fn)
|
|
|
|
|
|
|
|
|
|
outs := v.Call([]Value{ValueOf(K("gopher"))})
|
|
|
|
|
if len(outs) != 1 {
|
|
|
|
|
t.Fatalf("v.Call returned %v, want exactly one result", outs)
|
|
|
|
|
} else if outs[0].Type() != TypeOf(V(0)) {
|
2016-05-12 15:00:10 +03:00
|
|
|
t.Fatalf("c.Call[0] is type %v, want %v", outs[0].Type(), TypeOf(V(0)))
|
2014-12-23 15:19:30 +11:00
|
|
|
}
|
|
|
|
|
f := outs[0].Float()
|
|
|
|
|
if f != 3.14 {
|
|
|
|
|
t.Errorf("constructed func returned %f, want %f", f, 3.14)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check that types already in binary are found
|
|
|
|
|
type T1 int
|
|
|
|
|
testCases := []struct {
|
|
|
|
|
in, out []Type
|
|
|
|
|
variadic bool
|
|
|
|
|
want interface{}
|
|
|
|
|
}{
|
|
|
|
|
{in: []Type{TypeOf(T1(0))}, want: (func(T1))(nil)},
|
|
|
|
|
{in: []Type{TypeOf(int(0))}, want: (func(int))(nil)},
|
|
|
|
|
{in: []Type{SliceOf(TypeOf(int(0)))}, variadic: true, want: (func(...int))(nil)},
|
|
|
|
|
{in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false)}, want: (func(int) bool)(nil)},
|
|
|
|
|
{in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false), TypeOf("")}, want: (func(int) (bool, string))(nil)},
|
|
|
|
|
}
|
|
|
|
|
for _, tt := range testCases {
|
2018-05-31 20:46:33 -07:00
|
|
|
checkSameType(t, FuncOf(tt.in, tt.out, tt.variadic), tt.want)
|
2014-12-23 15:19:30 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check that variadic requires last element be a slice.
|
|
|
|
|
FuncOf([]Type{TypeOf(1), TypeOf(""), SliceOf(TypeOf(false))}, nil, true)
|
|
|
|
|
shouldPanic(func() { FuncOf([]Type{TypeOf(0), TypeOf(""), TypeOf(false)}, nil, true) })
|
|
|
|
|
shouldPanic(func() { FuncOf(nil, nil, true) })
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-05 09:35:53 -04:00
|
|
|
type B1 struct {
|
|
|
|
|
X int
|
|
|
|
|
Y int
|
|
|
|
|
Z int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func BenchmarkFieldByName1(b *testing.B) {
|
|
|
|
|
t := TypeOf(B1{})
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
t.FieldByName("Z")
|
|
|
|
|
}
|
|
|
|
|
})
|
2012-09-05 09:35:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func BenchmarkFieldByName2(b *testing.B) {
|
|
|
|
|
t := TypeOf(S3{})
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
t.FieldByName("B")
|
|
|
|
|
}
|
|
|
|
|
})
|
2012-09-05 09:35:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R0 struct {
|
|
|
|
|
*R1
|
|
|
|
|
*R2
|
|
|
|
|
*R3
|
|
|
|
|
*R4
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R1 struct {
|
|
|
|
|
*R5
|
|
|
|
|
*R6
|
|
|
|
|
*R7
|
|
|
|
|
*R8
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R2 R1
|
|
|
|
|
type R3 R1
|
|
|
|
|
type R4 R1
|
|
|
|
|
|
|
|
|
|
type R5 struct {
|
|
|
|
|
*R9
|
|
|
|
|
*R10
|
|
|
|
|
*R11
|
|
|
|
|
*R12
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R6 R5
|
|
|
|
|
type R7 R5
|
|
|
|
|
type R8 R5
|
|
|
|
|
|
|
|
|
|
type R9 struct {
|
|
|
|
|
*R13
|
|
|
|
|
*R14
|
|
|
|
|
*R15
|
|
|
|
|
*R16
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R10 R9
|
|
|
|
|
type R11 R9
|
|
|
|
|
type R12 R9
|
|
|
|
|
|
|
|
|
|
type R13 struct {
|
|
|
|
|
*R17
|
|
|
|
|
*R18
|
|
|
|
|
*R19
|
|
|
|
|
*R20
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R14 R13
|
|
|
|
|
type R15 R13
|
|
|
|
|
type R16 R13
|
|
|
|
|
|
|
|
|
|
type R17 struct {
|
|
|
|
|
*R21
|
|
|
|
|
*R22
|
|
|
|
|
*R23
|
|
|
|
|
*R24
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R18 R17
|
|
|
|
|
type R19 R17
|
|
|
|
|
type R20 R17
|
|
|
|
|
|
|
|
|
|
type R21 struct {
|
|
|
|
|
X int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type R22 R21
|
|
|
|
|
type R23 R21
|
|
|
|
|
type R24 R21
|
|
|
|
|
|
|
|
|
|
func TestEmbed(t *testing.T) {
|
|
|
|
|
typ := TypeOf(R0{})
|
|
|
|
|
f, ok := typ.FieldByName("X")
|
|
|
|
|
if ok {
|
|
|
|
|
t.Fatalf(`FieldByName("X") should fail, returned %v`, f.Index)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func BenchmarkFieldByName3(b *testing.B) {
|
|
|
|
|
t := TypeOf(R0{})
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
t.FieldByName("X")
|
|
|
|
|
}
|
|
|
|
|
})
|
2012-09-05 09:35:53 -04:00
|
|
|
}
|
2012-09-18 14:22:41 -04:00
|
|
|
|
2013-08-09 10:49:01 +10:00
|
|
|
type S struct {
|
|
|
|
|
i1 int64
|
|
|
|
|
i2 int64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func BenchmarkInterfaceBig(b *testing.B) {
|
|
|
|
|
v := ValueOf(S{})
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
v.Interface()
|
|
|
|
|
}
|
|
|
|
|
})
|
2013-08-09 10:49:01 +10:00
|
|
|
b.StopTimer()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestAllocsInterfaceBig(t *testing.T) {
|
2013-08-21 14:00:45 +10:00
|
|
|
if testing.Short() {
|
|
|
|
|
t.Skip("skipping malloc count in short mode")
|
|
|
|
|
}
|
2013-08-09 10:49:01 +10:00
|
|
|
v := ValueOf(S{})
|
|
|
|
|
if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 {
|
2013-09-23 13:19:08 -04:00
|
|
|
t.Error("allocs:", allocs)
|
2013-08-09 10:49:01 +10:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func BenchmarkInterfaceSmall(b *testing.B) {
|
|
|
|
|
v := ValueOf(int64(0))
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
v.Interface()
|
|
|
|
|
}
|
|
|
|
|
})
|
2013-08-09 10:49:01 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestAllocsInterfaceSmall(t *testing.T) {
|
2013-08-21 14:00:45 +10:00
|
|
|
if testing.Short() {
|
|
|
|
|
t.Skip("skipping malloc count in short mode")
|
|
|
|
|
}
|
2013-08-09 10:49:01 +10:00
|
|
|
v := ValueOf(int64(0))
|
|
|
|
|
if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 {
|
2013-09-23 13:19:08 -04:00
|
|
|
t.Error("allocs:", allocs)
|
2013-08-09 10:49:01 +10:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-18 14:22:41 -04:00
|
|
|
// An exhaustive is a mechanism for writing exhaustive or stochastic tests.
|
|
|
|
|
// The basic usage is:
|
|
|
|
|
//
|
|
|
|
|
// for x.Next() {
|
|
|
|
|
// ... code using x.Maybe() or x.Choice(n) to create test cases ...
|
|
|
|
|
// }
|
|
|
|
|
//
|
|
|
|
|
// Each iteration of the loop returns a different set of results, until all
|
|
|
|
|
// possible result sets have been explored. It is okay for different code paths
|
|
|
|
|
// to make different method call sequences on x, but there must be no
|
|
|
|
|
// other source of non-determinism in the call sequences.
|
|
|
|
|
//
|
|
|
|
|
// When faced with a new decision, x chooses randomly. Future explorations
|
|
|
|
|
// of that path will choose successive values for the result. Thus, stopping
|
|
|
|
|
// the loop after a fixed number of iterations gives somewhat stochastic
|
|
|
|
|
// testing.
|
|
|
|
|
//
|
|
|
|
|
// Example:
|
|
|
|
|
//
|
|
|
|
|
// for x.Next() {
|
|
|
|
|
// v := make([]bool, x.Choose(4))
|
|
|
|
|
// for i := range v {
|
|
|
|
|
// v[i] = x.Maybe()
|
|
|
|
|
// }
|
|
|
|
|
// fmt.Println(v)
|
|
|
|
|
// }
|
|
|
|
|
//
|
|
|
|
|
// prints (in some order):
|
|
|
|
|
//
|
|
|
|
|
// []
|
|
|
|
|
// [false]
|
|
|
|
|
// [true]
|
|
|
|
|
// [false false]
|
|
|
|
|
// [false true]
|
|
|
|
|
// ...
|
|
|
|
|
// [true true]
|
|
|
|
|
// [false false false]
|
|
|
|
|
// ...
|
|
|
|
|
// [true true true]
|
|
|
|
|
// [false false false false]
|
|
|
|
|
// ...
|
|
|
|
|
// [true true true true]
|
|
|
|
|
//
|
|
|
|
|
type exhaustive struct {
|
|
|
|
|
r *rand.Rand
|
|
|
|
|
pos int
|
|
|
|
|
last []choice
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type choice struct {
|
|
|
|
|
off int
|
|
|
|
|
n int
|
|
|
|
|
max int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (x *exhaustive) Next() bool {
|
|
|
|
|
if x.r == nil {
|
|
|
|
|
x.r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
|
}
|
|
|
|
|
x.pos = 0
|
|
|
|
|
if x.last == nil {
|
|
|
|
|
x.last = []choice{}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
for i := len(x.last) - 1; i >= 0; i-- {
|
|
|
|
|
c := &x.last[i]
|
|
|
|
|
if c.n+1 < c.max {
|
|
|
|
|
c.n++
|
|
|
|
|
x.last = x.last[:i+1]
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (x *exhaustive) Choose(max int) int {
|
|
|
|
|
if x.pos >= len(x.last) {
|
|
|
|
|
x.last = append(x.last, choice{x.r.Intn(max), 0, max})
|
|
|
|
|
}
|
|
|
|
|
c := &x.last[x.pos]
|
|
|
|
|
x.pos++
|
|
|
|
|
if c.max != max {
|
|
|
|
|
panic("inconsistent use of exhaustive tester")
|
|
|
|
|
}
|
|
|
|
|
return (c.n + c.off) % max
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (x *exhaustive) Maybe() bool {
|
|
|
|
|
return x.Choose(2) == 1
|
|
|
|
|
}
|
2013-12-02 13:36:50 -08:00
|
|
|
|
|
|
|
|
func GCFunc(args []Value) []Value {
|
|
|
|
|
runtime.GC()
|
|
|
|
|
return []Value{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestReflectFuncTraceback(t *testing.T) {
|
|
|
|
|
f := MakeFunc(TypeOf(func() {}), GCFunc)
|
|
|
|
|
f.Call([]Value{})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestReflectMethodTraceback(t *testing.T) {
|
|
|
|
|
p := Point{3, 4}
|
|
|
|
|
m := ValueOf(p).MethodByName("GCMethod")
|
|
|
|
|
i := ValueOf(m.Interface()).Call([]Value{ValueOf(5)})[0].Int()
|
|
|
|
|
if i != 8 {
|
|
|
|
|
t.Errorf("Call returned %d; want 8", i)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-12-02 17:58:19 -08:00
|
|
|
|
|
|
|
|
func TestBigZero(t *testing.T) {
|
|
|
|
|
const size = 1 << 10
|
|
|
|
|
var v [size]byte
|
|
|
|
|
z := Zero(ValueOf(v).Type()).Interface().([size]byte)
|
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
|
if z[i] != 0 {
|
|
|
|
|
t.Fatalf("Zero object not all zero, index %d", i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-02-21 13:51:22 -05:00
|
|
|
|
|
|
|
|
func TestFieldByIndexNil(t *testing.T) {
|
|
|
|
|
type P struct {
|
|
|
|
|
F int
|
|
|
|
|
}
|
|
|
|
|
type T struct {
|
|
|
|
|
*P
|
|
|
|
|
}
|
|
|
|
|
v := ValueOf(T{})
|
|
|
|
|
|
|
|
|
|
v.FieldByName("P") // should be fine
|
|
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
|
if err := recover(); err == nil {
|
|
|
|
|
t.Fatalf("no error")
|
|
|
|
|
} else if !strings.Contains(fmt.Sprint(err), "nil pointer to embedded struct") {
|
|
|
|
|
t.Fatalf(`err=%q, wanted error containing "nil pointer to embedded struct"`, err)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
v.FieldByName("F") // should panic
|
|
|
|
|
|
|
|
|
|
t.Fatalf("did not panic")
|
|
|
|
|
}
|
reflect, runtime: fix crash in GC due to reflect.call + precise GC
Given
type Outer struct {
*Inner
...
}
the compiler generates the implementation of (*Outer).M dispatching to
the embedded Inner. The implementation is logically:
func (p *Outer) M() {
(p.Inner).M()
}
but since the only change here is the replacement of one pointer
receiver with another, the actual generated code overwrites the
original receiver with the p.Inner pointer and then jumps to the M
method expecting the *Inner receiver.
During reflect.Value.Call, we create an argument frame and the
associated data structures to describe it to the garbage collector,
populate the frame, call reflect.call to run a function call using
that frame, and then copy the results back out of the frame. The
reflect.call function does a memmove of the frame structure onto the
stack (to set up the inputs), runs the call, and the memmoves the
stack back to the frame structure (to preserve the outputs).
Originally reflect.call did not distinguish inputs from outputs: both
memmoves were for the full stack frame. However, in the case where the
called function was one of these wrappers, the rewritten receiver is
almost certainly a different type than the original receiver. This is
not a problem on the stack, where we use the program counter to
determine the type information and understand that during (*Outer).M
the receiver is an *Outer while during (*Inner).M the receiver in the
same memory word is now an *Inner. But in the statically typed
argument frame created by reflect, the receiver is always an *Outer.
Copying the modified receiver pointer off the stack into the frame
will store an *Inner there, and then if a garbage collection happens
to scan that argument frame before it is discarded, it will scan the
*Inner memory as if it were an *Outer. If the two have different
memory layouts, the collection will intepret the memory incorrectly.
Fix by only copying back the results.
Fixes #7725.
LGTM=khr
R=khr
CC=dave, golang-codereviews
https://golang.org/cl/85180043
2014-04-08 11:11:35 -04:00
|
|
|
|
|
|
|
|
// Given
|
|
|
|
|
// type Outer struct {
|
|
|
|
|
// *Inner
|
|
|
|
|
// ...
|
|
|
|
|
// }
|
|
|
|
|
// the compiler generates the implementation of (*Outer).M dispatching to the embedded Inner.
|
|
|
|
|
// The implementation is logically:
|
|
|
|
|
// func (p *Outer) M() {
|
|
|
|
|
// (p.Inner).M()
|
|
|
|
|
// }
|
|
|
|
|
// but since the only change here is the replacement of one pointer receiver with another,
|
|
|
|
|
// the actual generated code overwrites the original receiver with the p.Inner pointer and
|
|
|
|
|
// then jumps to the M method expecting the *Inner receiver.
|
|
|
|
|
//
|
|
|
|
|
// During reflect.Value.Call, we create an argument frame and the associated data structures
|
|
|
|
|
// to describe it to the garbage collector, populate the frame, call reflect.call to
|
|
|
|
|
// run a function call using that frame, and then copy the results back out of the frame.
|
|
|
|
|
// The reflect.call function does a memmove of the frame structure onto the
|
|
|
|
|
// stack (to set up the inputs), runs the call, and the memmoves the stack back to
|
|
|
|
|
// the frame structure (to preserve the outputs).
|
|
|
|
|
//
|
|
|
|
|
// Originally reflect.call did not distinguish inputs from outputs: both memmoves
|
|
|
|
|
// were for the full stack frame. However, in the case where the called function was
|
|
|
|
|
// one of these wrappers, the rewritten receiver is almost certainly a different type
|
|
|
|
|
// than the original receiver. This is not a problem on the stack, where we use the
|
|
|
|
|
// program counter to determine the type information and understand that
|
|
|
|
|
// during (*Outer).M the receiver is an *Outer while during (*Inner).M the receiver in the same
|
|
|
|
|
// memory word is now an *Inner. But in the statically typed argument frame created
|
|
|
|
|
// by reflect, the receiver is always an *Outer. Copying the modified receiver pointer
|
|
|
|
|
// off the stack into the frame will store an *Inner there, and then if a garbage collection
|
|
|
|
|
// happens to scan that argument frame before it is discarded, it will scan the *Inner
|
|
|
|
|
// memory as if it were an *Outer. If the two have different memory layouts, the
|
2016-02-24 11:55:20 +01:00
|
|
|
// collection will interpret the memory incorrectly.
|
reflect, runtime: fix crash in GC due to reflect.call + precise GC
Given
type Outer struct {
*Inner
...
}
the compiler generates the implementation of (*Outer).M dispatching to
the embedded Inner. The implementation is logically:
func (p *Outer) M() {
(p.Inner).M()
}
but since the only change here is the replacement of one pointer
receiver with another, the actual generated code overwrites the
original receiver with the p.Inner pointer and then jumps to the M
method expecting the *Inner receiver.
During reflect.Value.Call, we create an argument frame and the
associated data structures to describe it to the garbage collector,
populate the frame, call reflect.call to run a function call using
that frame, and then copy the results back out of the frame. The
reflect.call function does a memmove of the frame structure onto the
stack (to set up the inputs), runs the call, and the memmoves the
stack back to the frame structure (to preserve the outputs).
Originally reflect.call did not distinguish inputs from outputs: both
memmoves were for the full stack frame. However, in the case where the
called function was one of these wrappers, the rewritten receiver is
almost certainly a different type than the original receiver. This is
not a problem on the stack, where we use the program counter to
determine the type information and understand that during (*Outer).M
the receiver is an *Outer while during (*Inner).M the receiver in the
same memory word is now an *Inner. But in the statically typed
argument frame created by reflect, the receiver is always an *Outer.
Copying the modified receiver pointer off the stack into the frame
will store an *Inner there, and then if a garbage collection happens
to scan that argument frame before it is discarded, it will scan the
*Inner memory as if it were an *Outer. If the two have different
memory layouts, the collection will intepret the memory incorrectly.
Fix by only copying back the results.
Fixes #7725.
LGTM=khr
R=khr
CC=dave, golang-codereviews
https://golang.org/cl/85180043
2014-04-08 11:11:35 -04:00
|
|
|
//
|
|
|
|
|
// One such possible incorrect interpretation is to treat two arbitrary memory words
|
|
|
|
|
// (Inner.P1 and Inner.P2 below) as an interface (Outer.R below). Because interpreting
|
|
|
|
|
// an interface requires dereferencing the itab word, the misinterpretation will try to
|
|
|
|
|
// deference Inner.P1, causing a crash during garbage collection.
|
|
|
|
|
//
|
|
|
|
|
// This came up in a real program in issue 7725.
|
|
|
|
|
|
|
|
|
|
type Outer struct {
|
|
|
|
|
*Inner
|
|
|
|
|
R io.Reader
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type Inner struct {
|
|
|
|
|
X *Outer
|
|
|
|
|
P1 uintptr
|
|
|
|
|
P2 uintptr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (pi *Inner) M() {
|
|
|
|
|
// Clear references to pi so that the only way the
|
|
|
|
|
// garbage collection will find the pointer is in the
|
|
|
|
|
// argument frame, typed as a *Outer.
|
|
|
|
|
pi.X.Inner = nil
|
|
|
|
|
|
|
|
|
|
// Set up an interface value that will cause a crash.
|
|
|
|
|
// P1 = 1 is a non-zero, so the interface looks non-nil.
|
|
|
|
|
// P2 = pi ensures that the data word points into the
|
|
|
|
|
// allocated heap; if not the collection skips the interface
|
|
|
|
|
// value as irrelevant, without dereferencing P1.
|
|
|
|
|
pi.P1 = 1
|
|
|
|
|
pi.P2 = uintptr(unsafe.Pointer(pi))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestCallMethodJump(t *testing.T) {
|
|
|
|
|
// In reflect.Value.Call, trigger a garbage collection after reflect.call
|
|
|
|
|
// returns but before the args frame has been discarded.
|
|
|
|
|
// This is a little clumsy but makes the failure repeatable.
|
|
|
|
|
*CallGC = true
|
|
|
|
|
|
|
|
|
|
p := &Outer{Inner: new(Inner)}
|
|
|
|
|
p.Inner.X = p
|
|
|
|
|
ValueOf(p).Method(0).Call(nil)
|
|
|
|
|
|
|
|
|
|
// Stop garbage collecting during reflect.call.
|
|
|
|
|
*CallGC = false
|
|
|
|
|
}
|
2014-09-12 07:29:19 -04:00
|
|
|
|
|
|
|
|
func TestMakeFuncStackCopy(t *testing.T) {
|
|
|
|
|
target := func(in []Value) []Value {
|
|
|
|
|
runtime.GC()
|
|
|
|
|
useStack(16)
|
|
|
|
|
return []Value{ValueOf(9)}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var concrete func(*int, int) int
|
|
|
|
|
fn := MakeFunc(ValueOf(concrete).Type(), target)
|
|
|
|
|
ValueOf(&concrete).Elem().Set(fn)
|
|
|
|
|
x := concrete(nil, 7)
|
|
|
|
|
if x != 9 {
|
|
|
|
|
t.Errorf("have %#q want 9", x)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// use about n KB of stack
|
|
|
|
|
func useStack(n int) {
|
|
|
|
|
if n == 0 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
var b [1024]byte // makes frame about 1KB
|
|
|
|
|
useStack(n - 1 + int(b[99]))
|
|
|
|
|
}
|
2014-09-18 21:19:18 -04:00
|
|
|
|
|
|
|
|
type Impl struct{}
|
|
|
|
|
|
2016-05-19 13:31:58 -04:00
|
|
|
func (Impl) F() {}
|
2014-09-18 21:19:18 -04:00
|
|
|
|
|
|
|
|
func TestValueString(t *testing.T) {
|
|
|
|
|
rv := ValueOf(Impl{})
|
|
|
|
|
if rv.String() != "<reflect_test.Impl Value>" {
|
|
|
|
|
t.Errorf("ValueOf(Impl{}).String() = %q, want %q", rv.String(), "<reflect_test.Impl Value>")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
method := rv.Method(0)
|
|
|
|
|
if method.String() != "<func() Value>" {
|
|
|
|
|
t.Errorf("ValueOf(Impl{}).Method(0).String() = %q, want %q", method.String(), "<func() Value>")
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-10-01 16:51:32 -04:00
|
|
|
|
|
|
|
|
func TestInvalid(t *testing.T) {
|
|
|
|
|
// Used to have inconsistency between IsValid() and Kind() != Invalid.
|
|
|
|
|
type T struct{ v interface{} }
|
|
|
|
|
|
|
|
|
|
v := ValueOf(T{}).Field(0)
|
|
|
|
|
if v.IsValid() != true || v.Kind() != Interface {
|
|
|
|
|
t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind())
|
|
|
|
|
}
|
|
|
|
|
v = v.Elem()
|
|
|
|
|
if v.IsValid() != false || v.Kind() != Invalid {
|
|
|
|
|
t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind())
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-10-13 10:01:34 -07:00
|
|
|
|
|
|
|
|
// Issue 8917.
|
|
|
|
|
func TestLargeGCProg(t *testing.T) {
|
|
|
|
|
fv := ValueOf(func([256]*byte) {})
|
|
|
|
|
fv.Call([]Value{ValueOf([256]*byte{})})
|
|
|
|
|
}
|
2014-12-01 07:52:09 -08:00
|
|
|
|
2016-05-10 07:06:47 -07:00
|
|
|
func fieldIndexRecover(t Type, i int) (recovered interface{}) {
|
|
|
|
|
defer func() {
|
|
|
|
|
recovered = recover()
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
t.Field(i)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Issue 15046.
|
|
|
|
|
func TestTypeFieldOutOfRangePanic(t *testing.T) {
|
|
|
|
|
typ := TypeOf(struct{ X int }{10})
|
|
|
|
|
testIndices := [...]struct {
|
|
|
|
|
i int
|
|
|
|
|
mustPanic bool
|
|
|
|
|
}{
|
|
|
|
|
0: {-2, true},
|
|
|
|
|
1: {0, false},
|
|
|
|
|
2: {1, true},
|
|
|
|
|
3: {1 << 10, true},
|
|
|
|
|
}
|
|
|
|
|
for i, tt := range testIndices {
|
|
|
|
|
recoveredErr := fieldIndexRecover(typ, tt.i)
|
|
|
|
|
if tt.mustPanic {
|
|
|
|
|
if recoveredErr == nil {
|
|
|
|
|
t.Errorf("#%d: fieldIndex %d expected to panic", i, tt.i)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if recoveredErr != nil {
|
|
|
|
|
t.Errorf("#%d: got err=%v, expected no panic", i, recoveredErr)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-01 07:52:09 -08:00
|
|
|
// Issue 9179.
|
|
|
|
|
func TestCallGC(t *testing.T) {
|
|
|
|
|
f := func(a, b, c, d, e string) {
|
|
|
|
|
}
|
|
|
|
|
g := func(in []Value) []Value {
|
|
|
|
|
runtime.GC()
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
typ := ValueOf(f).Type()
|
|
|
|
|
f2 := MakeFunc(typ, g).Interface().(func(string, string, string, string, string))
|
|
|
|
|
f2("four", "five5", "six666", "seven77", "eight888")
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-12 16:54:42 -05:00
|
|
|
// Issue 18635 (function version).
|
|
|
|
|
func TestKeepFuncLive(t *testing.T) {
|
|
|
|
|
// Test that we keep makeFuncImpl live as long as it is
|
|
|
|
|
// referenced on the stack.
|
|
|
|
|
typ := TypeOf(func(i int) {})
|
|
|
|
|
var f, g func(in []Value) []Value
|
|
|
|
|
f = func(in []Value) []Value {
|
|
|
|
|
clobber()
|
|
|
|
|
i := int(in[0].Int())
|
|
|
|
|
if i > 0 {
|
|
|
|
|
// We can't use Value.Call here because
|
|
|
|
|
// runtime.call* will keep the makeFuncImpl
|
|
|
|
|
// alive. However, by converting it to an
|
|
|
|
|
// interface value and calling that,
|
|
|
|
|
// reflect.callReflect is the only thing that
|
|
|
|
|
// can keep the makeFuncImpl live.
|
|
|
|
|
//
|
|
|
|
|
// Alternate between f and g so that if we do
|
|
|
|
|
// reuse the memory prematurely it's more
|
|
|
|
|
// likely to get obviously corrupted.
|
|
|
|
|
MakeFunc(typ, g).Interface().(func(i int))(i - 1)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
g = func(in []Value) []Value {
|
|
|
|
|
clobber()
|
|
|
|
|
i := int(in[0].Int())
|
|
|
|
|
MakeFunc(typ, f).Interface().(func(i int))(i)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
MakeFunc(typ, f).Call([]Value{ValueOf(10)})
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-28 20:31:20 -03:00
|
|
|
type UnExportedFirst int
|
|
|
|
|
|
|
|
|
|
func (i UnExportedFirst) ΦExported() {}
|
|
|
|
|
func (i UnExportedFirst) unexported() {}
|
|
|
|
|
|
|
|
|
|
// Issue 21177
|
|
|
|
|
func TestMethodByNameUnExportedFirst(t *testing.T) {
|
|
|
|
|
defer func() {
|
|
|
|
|
if recover() != nil {
|
|
|
|
|
t.Errorf("should not panic")
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
typ := TypeOf(UnExportedFirst(0))
|
|
|
|
|
m, _ := typ.MethodByName("ΦExported")
|
|
|
|
|
if m.Name != "ΦExported" {
|
|
|
|
|
t.Errorf("got %s, expected ΦExported", m.Name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-12 16:54:42 -05:00
|
|
|
// Issue 18635 (method version).
|
|
|
|
|
type KeepMethodLive struct{}
|
|
|
|
|
|
|
|
|
|
func (k KeepMethodLive) Method1(i int) {
|
|
|
|
|
clobber()
|
|
|
|
|
if i > 0 {
|
|
|
|
|
ValueOf(k).MethodByName("Method2").Interface().(func(i int))(i - 1)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (k KeepMethodLive) Method2(i int) {
|
|
|
|
|
clobber()
|
|
|
|
|
ValueOf(k).MethodByName("Method1").Interface().(func(i int))(i)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestKeepMethodLive(t *testing.T) {
|
|
|
|
|
// Test that we keep methodValue live as long as it is
|
|
|
|
|
// referenced on the stack.
|
|
|
|
|
KeepMethodLive{}.Method1(10)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// clobber tries to clobber unreachable memory.
|
|
|
|
|
func clobber() {
|
|
|
|
|
runtime.GC()
|
|
|
|
|
for i := 1; i < 32; i++ {
|
|
|
|
|
for j := 0; j < 10; j++ {
|
|
|
|
|
obj := make([]*byte, i)
|
|
|
|
|
sink = obj
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
runtime.GC()
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-01 07:52:09 -08:00
|
|
|
type funcLayoutTest struct {
|
2014-12-23 10:57:37 -08:00
|
|
|
rcvr, t Type
|
|
|
|
|
size, argsize, retOffset uintptr
|
reflect: ensure correct scanning of return values
During a call to a reflect-generated function or method (via
makeFuncStub or methodValueCall), when should we scan the return
values?
When we're starting a reflect call, the space on the stack for the
return values is not initialized yet, as it contains whatever junk was
on the stack of the caller at the time. The return space must not be
scanned during a GC.
When we're finishing a reflect call, the return values are
initialized, and must be scanned during a GC to make sure that any
pointers in the return values are found and their referents retained.
When the GC stack walk comes across a reflect call in progress on the
stack, it needs to know whether to scan the results or not. It doesn't
know the progress of the reflect call, so it can't decide by
itself. The reflect package needs to tell it.
This CL adds another slot in the frame of makeFuncStub and
methodValueCall so we can put a boolean in there which tells the
runtime whether to scan the results or not.
This CL also adds the args length to reflectMethodValue so the
runtime can restrict its scanning to only the args section (not the
results) if the reflect package says the results aren't ready yet.
Do a delicate dance in the reflect package to set the "results are
valid" bit. We need to make sure we set the bit only after we've
copied the results back to the stack. But we must set the bit before
we drop reflect's copy of the results. Otherwise, we might have a
state where (temporarily) no one has a live copy of the results.
That's the state we were observing in issue #27695 before this CL.
The bitmap used by the runtime currently contains only the args.
(Actually, it contains all the bits, but the size is set so we use
only the args portion.) This is safe for early in a reflect call, but
unsafe late in a reflect call. The test issue27695.go demonstrates
this unsafety. We change the bitmap to always include both args
and results, and decide at runtime which portion to use.
issue27695.go only has a test for method calls. Function calls were ok
because there wasn't a safepoint between when reflect dropped its copy
of the return values and when the caller is resumed. This may change
when we introduce safepoints everywhere.
This truncate-to-only-the-args was part of CL 9888 (in 2015). That
part of the CL fixed the problem demonstrated in issue27695b.go but
introduced the problem demonstrated in issue27695.go.
TODO, in another CL: simplify FuncLayout and its test. stack return
value is now identical to frametype.ptrdata + frametype.gcdata.
Fixes #27695
Change-Id: I2d49b34e34a82c6328b34f02610587a291b25c5f
Reviewed-on: https://go-review.googlesource.com/137440
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2018-09-25 15:54:11 -07:00
|
|
|
stack []byte // pointer bitmap: 1 is pointer, 0 is scalar
|
2014-12-23 10:57:37 -08:00
|
|
|
gc []byte
|
2014-12-01 07:52:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var funcLayoutTests []funcLayoutTest
|
|
|
|
|
|
|
|
|
|
func init() {
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
var argAlign uintptr = PtrSize
|
2014-12-01 07:52:09 -08:00
|
|
|
if runtime.GOARCH == "amd64p32" {
|
|
|
|
|
argAlign = 2 * PtrSize
|
|
|
|
|
}
|
|
|
|
|
roundup := func(x uintptr, a uintptr) uintptr {
|
|
|
|
|
return (x + a - 1) / a * a
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
nil,
|
|
|
|
|
ValueOf(func(a, b string) string { return "" }).Type(),
|
2014-12-23 10:57:37 -08:00
|
|
|
6 * PtrSize,
|
2014-12-01 07:52:09 -08:00
|
|
|
4 * PtrSize,
|
|
|
|
|
4 * PtrSize,
|
reflect: ensure correct scanning of return values
During a call to a reflect-generated function or method (via
makeFuncStub or methodValueCall), when should we scan the return
values?
When we're starting a reflect call, the space on the stack for the
return values is not initialized yet, as it contains whatever junk was
on the stack of the caller at the time. The return space must not be
scanned during a GC.
When we're finishing a reflect call, the return values are
initialized, and must be scanned during a GC to make sure that any
pointers in the return values are found and their referents retained.
When the GC stack walk comes across a reflect call in progress on the
stack, it needs to know whether to scan the results or not. It doesn't
know the progress of the reflect call, so it can't decide by
itself. The reflect package needs to tell it.
This CL adds another slot in the frame of makeFuncStub and
methodValueCall so we can put a boolean in there which tells the
runtime whether to scan the results or not.
This CL also adds the args length to reflectMethodValue so the
runtime can restrict its scanning to only the args section (not the
results) if the reflect package says the results aren't ready yet.
Do a delicate dance in the reflect package to set the "results are
valid" bit. We need to make sure we set the bit only after we've
copied the results back to the stack. But we must set the bit before
we drop reflect's copy of the results. Otherwise, we might have a
state where (temporarily) no one has a live copy of the results.
That's the state we were observing in issue #27695 before this CL.
The bitmap used by the runtime currently contains only the args.
(Actually, it contains all the bits, but the size is set so we use
only the args portion.) This is safe for early in a reflect call, but
unsafe late in a reflect call. The test issue27695.go demonstrates
this unsafety. We change the bitmap to always include both args
and results, and decide at runtime which portion to use.
issue27695.go only has a test for method calls. Function calls were ok
because there wasn't a safepoint between when reflect dropped its copy
of the return values and when the caller is resumed. This may change
when we introduce safepoints everywhere.
This truncate-to-only-the-args was part of CL 9888 (in 2015). That
part of the CL fixed the problem demonstrated in issue27695b.go but
introduced the problem demonstrated in issue27695.go.
TODO, in another CL: simplify FuncLayout and its test. stack return
value is now identical to frametype.ptrdata + frametype.gcdata.
Fixes #27695
Change-Id: I2d49b34e34a82c6328b34f02610587a291b25c5f
Reviewed-on: https://go-review.googlesource.com/137440
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2018-09-25 15:54:11 -07:00
|
|
|
[]byte{1, 0, 1, 0, 1},
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
[]byte{1, 0, 1, 0, 1},
|
2014-12-01 07:52:09 -08:00
|
|
|
})
|
|
|
|
|
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
var r []byte
|
2014-12-01 07:52:09 -08:00
|
|
|
if PtrSize == 4 {
|
2015-04-28 00:28:47 -04:00
|
|
|
r = []byte{0, 0, 0, 1}
|
2014-12-01 07:52:09 -08:00
|
|
|
} else {
|
2015-04-28 00:28:47 -04:00
|
|
|
r = []byte{0, 0, 1}
|
2014-12-01 07:52:09 -08:00
|
|
|
}
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
nil,
|
|
|
|
|
ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(),
|
2014-12-23 10:57:37 -08:00
|
|
|
roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign),
|
2014-12-01 07:52:09 -08:00
|
|
|
roundup(3*4, PtrSize) + PtrSize + 2,
|
|
|
|
|
roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign),
|
|
|
|
|
r,
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
r,
|
2014-12-01 07:52:09 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
nil,
|
|
|
|
|
ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(),
|
|
|
|
|
4 * PtrSize,
|
|
|
|
|
4 * PtrSize,
|
2014-12-23 10:57:37 -08:00
|
|
|
4 * PtrSize,
|
2015-04-28 00:28:47 -04:00
|
|
|
[]byte{1, 0, 1, 1},
|
|
|
|
|
[]byte{1, 0, 1, 1},
|
2014-12-01 07:52:09 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
type S struct {
|
|
|
|
|
a, b uintptr
|
|
|
|
|
c, d *byte
|
|
|
|
|
}
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
nil,
|
|
|
|
|
ValueOf(func(a S) {}).Type(),
|
|
|
|
|
4 * PtrSize,
|
|
|
|
|
4 * PtrSize,
|
2014-12-23 10:57:37 -08:00
|
|
|
4 * PtrSize,
|
2015-04-28 00:28:47 -04:00
|
|
|
[]byte{0, 0, 1, 1},
|
|
|
|
|
[]byte{0, 0, 1, 1},
|
2014-12-01 07:52:09 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
ValueOf((*byte)(nil)).Type(),
|
|
|
|
|
ValueOf(func(a uintptr, b *int) {}).Type(),
|
2014-12-23 10:57:37 -08:00
|
|
|
roundup(3*PtrSize, argAlign),
|
2014-12-01 07:52:09 -08:00
|
|
|
3 * PtrSize,
|
|
|
|
|
roundup(3*PtrSize, argAlign),
|
2015-04-28 00:28:47 -04:00
|
|
|
[]byte{1, 0, 1},
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
[]byte{1, 0, 1},
|
2014-12-23 10:57:37 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
nil,
|
2014-12-23 13:45:58 -08:00
|
|
|
ValueOf(func(a uintptr) {}).Type(),
|
|
|
|
|
roundup(PtrSize, argAlign),
|
2014-12-23 10:57:37 -08:00
|
|
|
PtrSize,
|
2014-12-23 13:45:58 -08:00
|
|
|
roundup(PtrSize, argAlign),
|
2014-12-23 10:57:37 -08:00
|
|
|
[]byte{},
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
[]byte{},
|
2014-12-23 10:57:37 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
nil,
|
2014-12-23 13:45:58 -08:00
|
|
|
ValueOf(func() uintptr { return 0 }).Type(),
|
2014-12-23 10:57:37 -08:00
|
|
|
PtrSize,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
[]byte{},
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
[]byte{},
|
2014-12-23 10:57:37 -08:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
funcLayoutTests = append(funcLayoutTests,
|
|
|
|
|
funcLayoutTest{
|
|
|
|
|
ValueOf(uintptr(0)).Type(),
|
2014-12-23 13:45:58 -08:00
|
|
|
ValueOf(func(a uintptr) {}).Type(),
|
|
|
|
|
2 * PtrSize,
|
|
|
|
|
2 * PtrSize,
|
|
|
|
|
2 * PtrSize,
|
2015-04-28 00:28:47 -04:00
|
|
|
[]byte{1},
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
[]byte{1},
|
2016-03-01 23:21:55 +00:00
|
|
|
// Note: this one is tricky, as the receiver is not a pointer. But we
|
2014-12-23 10:57:37 -08:00
|
|
|
// pass the receiver by reference to the autogenerated pointer-receiver
|
|
|
|
|
// version of the function.
|
2014-12-01 07:52:09 -08:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestFuncLayout(t *testing.T) {
|
|
|
|
|
for _, lt := range funcLayoutTests {
|
2014-12-23 10:57:37 -08:00
|
|
|
typ, argsize, retOffset, stack, gc, ptrs := FuncLayout(lt.t, lt.rcvr)
|
|
|
|
|
if typ.Size() != lt.size {
|
|
|
|
|
t.Errorf("funcLayout(%v, %v).size=%d, want %d", lt.t, lt.rcvr, typ.Size(), lt.size)
|
|
|
|
|
}
|
2014-12-01 07:52:09 -08:00
|
|
|
if argsize != lt.argsize {
|
|
|
|
|
t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.t, lt.rcvr, argsize, lt.argsize)
|
|
|
|
|
}
|
|
|
|
|
if retOffset != lt.retOffset {
|
|
|
|
|
t.Errorf("funcLayout(%v, %v).retOffset=%d, want %d", lt.t, lt.rcvr, retOffset, lt.retOffset)
|
|
|
|
|
}
|
|
|
|
|
if !bytes.Equal(stack, lt.stack) {
|
|
|
|
|
t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.t, lt.rcvr, stack, lt.stack)
|
|
|
|
|
}
|
2014-12-23 10:57:37 -08:00
|
|
|
if !bytes.Equal(gc, lt.gc) {
|
|
|
|
|
t.Errorf("funcLayout(%v, %v).gc=%v, want %v", lt.t, lt.rcvr, gc, lt.gc)
|
|
|
|
|
}
|
|
|
|
|
if ptrs && len(stack) == 0 || !ptrs && len(stack) > 0 {
|
|
|
|
|
t.Errorf("funcLayout(%v, %v) pointers flag=%v, want %v", lt.t, lt.rcvr, ptrs, !ptrs)
|
|
|
|
|
}
|
2014-12-01 07:52:09 -08:00
|
|
|
}
|
|
|
|
|
}
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
|
|
|
|
func verifyGCBits(t *testing.T, typ Type, bits []byte) {
|
|
|
|
|
heapBits := GCBits(New(typ).Interface())
|
|
|
|
|
if !bytes.Equal(heapBits, bits) {
|
2018-09-01 20:16:39 -07:00
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
|
t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-24 11:25:28 -04:00
|
|
|
func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
|
|
|
|
|
// Creating a slice causes the runtime to repeat a bitmap,
|
|
|
|
|
// which exercises a different path from making the compiler
|
|
|
|
|
// repeat a bitmap for a small array or executing a repeat in
|
|
|
|
|
// a GC program.
|
|
|
|
|
val := MakeSlice(typ, 0, cap)
|
|
|
|
|
data := NewAt(ArrayOf(cap, typ), unsafe.Pointer(val.Pointer()))
|
|
|
|
|
heapBits := GCBits(data.Interface())
|
|
|
|
|
// Repeat the bitmap for the slice size, trimming scalars in
|
|
|
|
|
// the last element.
|
|
|
|
|
bits = rep(cap, bits)
|
|
|
|
|
for len(bits) > 2 && bits[len(bits)-1] == 0 {
|
|
|
|
|
bits = bits[:len(bits)-1]
|
|
|
|
|
}
|
runtime: reclaim scan/dead bit in first word
With the switch to separate mark bitmaps, the scan/dead bit for the
first word of each object is now unused. Reclaim this bit and use it
as a scan/dead bit, just like words three and on. The second word is
still used for checkmark.
This dramatically simplifies heapBitsSetTypeNoScan and hasPointers,
since they no longer need different cases for 1, 2, and 3+ word
objects. They can instead just manipulate the heap bitmap for the
first word and be done with it.
In order to enable this, we change heapBitsSetType and runGCProg to
always set the scan/dead bit to scan for the first word on every code
path. Since these functions only apply to types that have pointers,
there's no need to do this conditionally: it's *always* necessary to
set the scan bit in the first word.
We also change every place that scans an object and checks if there
are more pointers. Rather than only checking morePointers if the word
is >= 2, we now check morePointers if word != 1 (since that's the
checkmark word).
Looking forward, we should probably reclaim the checkmark bit, too,
but that's going to be quite a bit more work.
Tested by setting doubleCheck in heapBitsSetType and running all.bash
on both linux/amd64 and linux/386, and by running GOGC=10 all.bash.
This particularly improves the FmtFprintf* go1 benchmarks, since they
do a large amount of noscan allocation.
name old time/op new time/op delta
BinaryTree17-12 2.34s ± 1% 2.38s ± 1% +1.70% (p=0.000 n=17+19)
Fannkuch11-12 2.09s ± 0% 2.09s ± 1% ~ (p=0.276 n=17+16)
FmtFprintfEmpty-12 44.9ns ± 2% 44.8ns ± 2% ~ (p=0.340 n=19+18)
FmtFprintfString-12 127ns ± 0% 125ns ± 0% -1.57% (p=0.000 n=16+15)
FmtFprintfInt-12 128ns ± 0% 122ns ± 1% -4.45% (p=0.000 n=15+20)
FmtFprintfIntInt-12 207ns ± 1% 193ns ± 0% -6.55% (p=0.000 n=19+14)
FmtFprintfPrefixedInt-12 197ns ± 1% 191ns ± 0% -2.93% (p=0.000 n=17+18)
FmtFprintfFloat-12 263ns ± 0% 248ns ± 1% -5.88% (p=0.000 n=15+19)
FmtManyArgs-12 794ns ± 0% 779ns ± 1% -1.90% (p=0.000 n=18+18)
GobDecode-12 7.14ms ± 2% 7.11ms ± 1% ~ (p=0.072 n=20+20)
GobEncode-12 5.85ms ± 1% 5.82ms ± 1% -0.49% (p=0.000 n=20+20)
Gzip-12 218ms ± 1% 215ms ± 1% -1.22% (p=0.000 n=19+19)
Gunzip-12 36.8ms ± 0% 36.7ms ± 0% -0.18% (p=0.006 n=18+20)
HTTPClientServer-12 77.1µs ± 4% 77.1µs ± 3% ~ (p=0.945 n=19+20)
JSONEncode-12 15.6ms ± 1% 15.9ms ± 1% +1.68% (p=0.000 n=18+20)
JSONDecode-12 55.2ms ± 1% 53.6ms ± 1% -2.93% (p=0.000 n=17+19)
Mandelbrot200-12 4.05ms ± 1% 4.05ms ± 0% ~ (p=0.306 n=17+17)
GoParse-12 3.14ms ± 1% 3.10ms ± 1% -1.31% (p=0.000 n=19+18)
RegexpMatchEasy0_32-12 69.3ns ± 1% 70.0ns ± 0% +0.89% (p=0.000 n=19+17)
RegexpMatchEasy0_1K-12 237ns ± 1% 236ns ± 0% -0.62% (p=0.000 n=19+16)
RegexpMatchEasy1_32-12 69.5ns ± 1% 70.3ns ± 1% +1.14% (p=0.000 n=18+17)
RegexpMatchEasy1_1K-12 377ns ± 1% 366ns ± 1% -3.03% (p=0.000 n=15+19)
RegexpMatchMedium_32-12 107ns ± 1% 107ns ± 2% ~ (p=0.318 n=20+19)
RegexpMatchMedium_1K-12 33.8µs ± 3% 33.5µs ± 1% -1.04% (p=0.001 n=20+19)
RegexpMatchHard_32-12 1.68µs ± 1% 1.73µs ± 0% +2.50% (p=0.000 n=20+18)
RegexpMatchHard_1K-12 50.8µs ± 1% 52.0µs ± 1% +2.50% (p=0.000 n=19+18)
Revcomp-12 381ms ± 1% 385ms ± 1% +1.00% (p=0.000 n=17+18)
Template-12 64.9ms ± 3% 62.6ms ± 1% -3.55% (p=0.000 n=19+18)
TimeParse-12 324ns ± 0% 328ns ± 1% +1.25% (p=0.000 n=18+18)
TimeFormat-12 345ns ± 0% 334ns ± 0% -3.31% (p=0.000 n=15+17)
[Geo mean] 52.1µs 51.5µs -1.00%
Change-Id: I13e74da3193a7f80794c654f944d1f0d60817049
Reviewed-on: https://go-review.googlesource.com/22632
Reviewed-by: Rick Hudson <rlh@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-04-29 14:51:48 -04:00
|
|
|
if len(bits) == 2 && bits[0] == 0 && bits[1] == 0 {
|
|
|
|
|
bits = bits[:0]
|
|
|
|
|
}
|
2015-06-24 11:25:28 -04:00
|
|
|
if !bytes.Equal(heapBits, bits) {
|
|
|
|
|
t.Errorf("heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", typ, cap, heapBits, bits)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
func TestGCBits(t *testing.T) {
|
|
|
|
|
verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
|
|
|
|
|
|
|
|
|
|
// Building blocks for types seen by the compiler (like [2]Xscalar).
|
|
|
|
|
// The compiler will create the type structures for the derived types,
|
|
|
|
|
// including their GC metadata.
|
|
|
|
|
type Xscalar struct{ x uintptr }
|
|
|
|
|
type Xptr struct{ x *byte }
|
|
|
|
|
type Xptrscalar struct {
|
|
|
|
|
*byte
|
|
|
|
|
uintptr
|
|
|
|
|
}
|
|
|
|
|
type Xscalarptr struct {
|
|
|
|
|
uintptr
|
|
|
|
|
*byte
|
|
|
|
|
}
|
2015-06-24 11:43:40 -04:00
|
|
|
type Xbigptrscalar struct {
|
|
|
|
|
_ [100]*byte
|
|
|
|
|
_ [100]uintptr
|
|
|
|
|
}
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
cmd/compile, runtime: fix placement of map bucket overflow pointer on nacl
On most systems, a pointer is the worst case alignment, so adding
a pointer field at the end of a struct guarantees there will be no
padding added after that field (to satisfy overall struct alignment
due to some more-aligned field also present).
In the runtime, the map implementation needs a quick way to
get to the overflow pointer, which is last in the bucket struct,
so it uses size - sizeof(pointer) as the offset.
NaCl/amd64p32 is the exception, as always.
The worst case alignment is 64 bits but pointers are 32 bits.
There's a long history that is not worth going into, but when
we moved the overflow pointer to the end of the struct,
we didn't get the padding computation right.
The compiler computed the regular struct size and then
on amd64p32 added another 32-bit field.
And the runtime assumed it could step back two 32-bit fields
(one 64-bit register size) to get to the overflow pointer.
But in fact if the struct needed 64-bit alignment, the computation
of the regular struct size would have added a 32-bit pad already,
and then the code unconditionally added a second 32-bit pad.
This placed the overflow pointer three words from the end, not two.
The last two were padding, and since the runtime was consistent
about using the second-to-last word as the overflow pointer,
no harm done in the sense of overwriting useful memory.
But writing the overflow pointer to a non-pointer word of memory
means that the GC can't see the overflow blocks, so it will
collect them prematurely. Then bad things happen.
Correct all this in a few steps:
1. Add an explicit check at the end of the bucket layout in the
compiler that the overflow field is last in the struct, never
followed by padding.
2. When padding is needed on nacl (not always, just when needed),
insert it before the overflow pointer, to preserve the "last in the struct"
property.
3. Let the compiler have the final word on the width of the struct,
by inserting an explicit padding field instead of overwriting the
results of the width computation it does.
4. For the same reason (tell the truth to the compiler), set the type
of the overflow field when we're trying to pretend its not a pointer
(in this case the runtime maintains a list of the overflow blocks
elsewhere).
5. Make the runtime use "last in the struct" as its location algorithm.
This fixes TestTraceStress on nacl/amd64p32.
The 'bad map state' and 'invalid free list' failures no longer occur.
Fixes #11838.
Change-Id: If918887f8f252d988db0a35159944d2b36512f92
Reviewed-on: https://go-review.googlesource.com/12971
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2015-07-30 22:05:51 -04:00
|
|
|
var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
{
|
|
|
|
|
// Building blocks for types constructed by reflect.
|
|
|
|
|
// This code is in a separate block so that code below
|
|
|
|
|
// cannot accidentally refer to these.
|
|
|
|
|
// The compiler must NOT see types derived from these
|
|
|
|
|
// (for example, [2]Scalar must NOT appear in the program),
|
|
|
|
|
// or else reflect will use it instead of having to construct one.
|
|
|
|
|
// The goal is to test the construction.
|
|
|
|
|
type Scalar struct{ x uintptr }
|
|
|
|
|
type Ptr struct{ x *byte }
|
|
|
|
|
type Ptrscalar struct {
|
|
|
|
|
*byte
|
|
|
|
|
uintptr
|
|
|
|
|
}
|
|
|
|
|
type Scalarptr struct {
|
|
|
|
|
uintptr
|
|
|
|
|
*byte
|
|
|
|
|
}
|
2015-06-24 11:43:40 -04:00
|
|
|
type Bigptrscalar struct {
|
|
|
|
|
_ [100]*byte
|
|
|
|
|
_ [100]uintptr
|
|
|
|
|
}
|
cmd/compile, runtime: fix placement of map bucket overflow pointer on nacl
On most systems, a pointer is the worst case alignment, so adding
a pointer field at the end of a struct guarantees there will be no
padding added after that field (to satisfy overall struct alignment
due to some more-aligned field also present).
In the runtime, the map implementation needs a quick way to
get to the overflow pointer, which is last in the bucket struct,
so it uses size - sizeof(pointer) as the offset.
NaCl/amd64p32 is the exception, as always.
The worst case alignment is 64 bits but pointers are 32 bits.
There's a long history that is not worth going into, but when
we moved the overflow pointer to the end of the struct,
we didn't get the padding computation right.
The compiler computed the regular struct size and then
on amd64p32 added another 32-bit field.
And the runtime assumed it could step back two 32-bit fields
(one 64-bit register size) to get to the overflow pointer.
But in fact if the struct needed 64-bit alignment, the computation
of the regular struct size would have added a 32-bit pad already,
and then the code unconditionally added a second 32-bit pad.
This placed the overflow pointer three words from the end, not two.
The last two were padding, and since the runtime was consistent
about using the second-to-last word as the overflow pointer,
no harm done in the sense of overwriting useful memory.
But writing the overflow pointer to a non-pointer word of memory
means that the GC can't see the overflow blocks, so it will
collect them prematurely. Then bad things happen.
Correct all this in a few steps:
1. Add an explicit check at the end of the bucket layout in the
compiler that the overflow field is last in the struct, never
followed by padding.
2. When padding is needed on nacl (not always, just when needed),
insert it before the overflow pointer, to preserve the "last in the struct"
property.
3. Let the compiler have the final word on the width of the struct,
by inserting an explicit padding field instead of overwriting the
results of the width computation it does.
4. For the same reason (tell the truth to the compiler), set the type
of the overflow field when we're trying to pretend its not a pointer
(in this case the runtime maintains a list of the overflow blocks
elsewhere).
5. Make the runtime use "last in the struct" as its location algorithm.
This fixes TestTraceStress on nacl/amd64p32.
The 'bad map state' and 'invalid free list' failures no longer occur.
Fixes #11838.
Change-Id: If918887f8f252d988db0a35159944d2b36512f92
Reviewed-on: https://go-review.googlesource.com/12971
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2015-07-30 22:05:51 -04:00
|
|
|
type Int64 int64
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
Tscalar = TypeOf(Scalar{})
|
cmd/compile, runtime: fix placement of map bucket overflow pointer on nacl
On most systems, a pointer is the worst case alignment, so adding
a pointer field at the end of a struct guarantees there will be no
padding added after that field (to satisfy overall struct alignment
due to some more-aligned field also present).
In the runtime, the map implementation needs a quick way to
get to the overflow pointer, which is last in the bucket struct,
so it uses size - sizeof(pointer) as the offset.
NaCl/amd64p32 is the exception, as always.
The worst case alignment is 64 bits but pointers are 32 bits.
There's a long history that is not worth going into, but when
we moved the overflow pointer to the end of the struct,
we didn't get the padding computation right.
The compiler computed the regular struct size and then
on amd64p32 added another 32-bit field.
And the runtime assumed it could step back two 32-bit fields
(one 64-bit register size) to get to the overflow pointer.
But in fact if the struct needed 64-bit alignment, the computation
of the regular struct size would have added a 32-bit pad already,
and then the code unconditionally added a second 32-bit pad.
This placed the overflow pointer three words from the end, not two.
The last two were padding, and since the runtime was consistent
about using the second-to-last word as the overflow pointer,
no harm done in the sense of overwriting useful memory.
But writing the overflow pointer to a non-pointer word of memory
means that the GC can't see the overflow blocks, so it will
collect them prematurely. Then bad things happen.
Correct all this in a few steps:
1. Add an explicit check at the end of the bucket layout in the
compiler that the overflow field is last in the struct, never
followed by padding.
2. When padding is needed on nacl (not always, just when needed),
insert it before the overflow pointer, to preserve the "last in the struct"
property.
3. Let the compiler have the final word on the width of the struct,
by inserting an explicit padding field instead of overwriting the
results of the width computation it does.
4. For the same reason (tell the truth to the compiler), set the type
of the overflow field when we're trying to pretend its not a pointer
(in this case the runtime maintains a list of the overflow blocks
elsewhere).
5. Make the runtime use "last in the struct" as its location algorithm.
This fixes TestTraceStress on nacl/amd64p32.
The 'bad map state' and 'invalid free list' failures no longer occur.
Fixes #11838.
Change-Id: If918887f8f252d988db0a35159944d2b36512f92
Reviewed-on: https://go-review.googlesource.com/12971
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2015-07-30 22:05:51 -04:00
|
|
|
Tint64 = TypeOf(Int64(0))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
Tptr = TypeOf(Ptr{})
|
|
|
|
|
Tscalarptr = TypeOf(Scalarptr{})
|
|
|
|
|
Tptrscalar = TypeOf(Ptrscalar{})
|
2015-06-24 11:43:40 -04:00
|
|
|
Tbigptrscalar = TypeOf(Bigptrscalar{})
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
empty := []byte{}
|
|
|
|
|
|
|
|
|
|
verifyGCBits(t, TypeOf(Xscalar{}), empty)
|
|
|
|
|
verifyGCBits(t, Tscalar, empty)
|
|
|
|
|
verifyGCBits(t, TypeOf(Xptr{}), lit(1))
|
|
|
|
|
verifyGCBits(t, Tptr, lit(1))
|
|
|
|
|
verifyGCBits(t, TypeOf(Xscalarptr{}), lit(0, 1))
|
|
|
|
|
verifyGCBits(t, Tscalarptr, lit(0, 1))
|
|
|
|
|
verifyGCBits(t, TypeOf(Xptrscalar{}), lit(1))
|
|
|
|
|
verifyGCBits(t, Tptrscalar, lit(1))
|
|
|
|
|
|
|
|
|
|
verifyGCBits(t, TypeOf([0]Xptr{}), empty)
|
|
|
|
|
verifyGCBits(t, ArrayOf(0, Tptr), empty)
|
|
|
|
|
verifyGCBits(t, TypeOf([1]Xptrscalar{}), lit(1))
|
|
|
|
|
verifyGCBits(t, ArrayOf(1, Tptrscalar), lit(1))
|
|
|
|
|
verifyGCBits(t, TypeOf([2]Xscalar{}), empty)
|
|
|
|
|
verifyGCBits(t, ArrayOf(2, Tscalar), empty)
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf([10000]Xscalar{}), empty)
|
|
|
|
|
verifyGCBits(t, ArrayOf(10000, Tscalar), empty)
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
verifyGCBits(t, TypeOf([2]Xptr{}), lit(1, 1))
|
|
|
|
|
verifyGCBits(t, ArrayOf(2, Tptr), lit(1, 1))
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf([10000]Xptr{}), rep(10000, lit(1)))
|
|
|
|
|
verifyGCBits(t, ArrayOf(10000, Tptr), rep(10000, lit(1)))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
verifyGCBits(t, TypeOf([2]Xscalarptr{}), lit(0, 1, 0, 1))
|
|
|
|
|
verifyGCBits(t, ArrayOf(2, Tscalarptr), lit(0, 1, 0, 1))
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf([10000]Xscalarptr{}), rep(10000, lit(0, 1)))
|
|
|
|
|
verifyGCBits(t, ArrayOf(10000, Tscalarptr), rep(10000, lit(0, 1)))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
verifyGCBits(t, TypeOf([2]Xptrscalar{}), lit(1, 0, 1))
|
|
|
|
|
verifyGCBits(t, ArrayOf(2, Tptrscalar), lit(1, 0, 1))
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf([10000]Xptrscalar{}), rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBits(t, ArrayOf(10000, Tptrscalar), rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBits(t, TypeOf([1][10000]Xptrscalar{}), rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBits(t, ArrayOf(1, ArrayOf(10000, Tptrscalar)), rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBits(t, TypeOf([2][10000]Xptrscalar{}), rep(2*10000, lit(1, 0)))
|
|
|
|
|
verifyGCBits(t, ArrayOf(2, ArrayOf(10000, Tptrscalar)), rep(2*10000, lit(1, 0)))
|
2015-06-24 11:43:40 -04:00
|
|
|
verifyGCBits(t, TypeOf([4]Xbigptrscalar{}), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1))))
|
|
|
|
|
verifyGCBits(t, ArrayOf(4, Tbigptrscalar), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1))))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
2015-06-24 11:25:28 -04:00
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xptr{}), 0, empty)
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tptr), 0, empty)
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 1, lit(1))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tptrscalar), 1, lit(1))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 2, lit(0))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tscalar), 2, lit(0))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 10000, lit(0))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tscalar), 10000, lit(0))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xptr{}), 2, lit(1))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tptr), 2, lit(1))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xptr{}), 10000, lit(1))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tptr), 10000, lit(1))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 2, lit(0, 1))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tscalarptr), 2, lit(0, 1))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 10000, lit(0, 1))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tscalarptr), 10000, lit(0, 1))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 2, lit(1, 0))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tptrscalar), 2, lit(1, 0))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 10000, lit(1, 0))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tptrscalar), 10000, lit(1, 0))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 1, rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 1, rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 2, rep(10000, lit(1, 0)))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 2, rep(10000, lit(1, 0)))
|
2015-06-24 11:43:40 -04:00
|
|
|
verifyGCBitsSlice(t, TypeOf([]Xbigptrscalar{}), 4, join(rep(100, lit(1)), rep(100, lit(0))))
|
|
|
|
|
verifyGCBitsSlice(t, SliceOf(Tbigptrscalar), 4, join(rep(100, lit(1)), rep(100, lit(0))))
|
2015-06-24 11:25:28 -04:00
|
|
|
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
verifyGCBits(t, TypeOf((chan [100]Xscalar)(nil)), lit(1))
|
|
|
|
|
verifyGCBits(t, ChanOf(BothDir, ArrayOf(100, Tscalar)), lit(1))
|
|
|
|
|
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf((func([10000]Xscalarptr))(nil)), lit(1))
|
|
|
|
|
verifyGCBits(t, FuncOf([]Type{ArrayOf(10000, Tscalarptr)}, nil, false), lit(1))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf((map[[10000]Xscalarptr]Xscalar)(nil)), lit(1))
|
|
|
|
|
verifyGCBits(t, MapOf(ArrayOf(10000, Tscalarptr), Tscalar), lit(1))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf((*[10000]Xscalar)(nil)), lit(1))
|
|
|
|
|
verifyGCBits(t, PtrTo(ArrayOf(10000, Tscalar)), lit(1))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
2015-06-07 22:47:59 -04:00
|
|
|
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
|
|
|
|
|
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
|
|
|
|
|
hdr := make([]byte, 8/PtrSize)
|
cmd/compile, runtime: fix placement of map bucket overflow pointer on nacl
On most systems, a pointer is the worst case alignment, so adding
a pointer field at the end of a struct guarantees there will be no
padding added after that field (to satisfy overall struct alignment
due to some more-aligned field also present).
In the runtime, the map implementation needs a quick way to
get to the overflow pointer, which is last in the bucket struct,
so it uses size - sizeof(pointer) as the offset.
NaCl/amd64p32 is the exception, as always.
The worst case alignment is 64 bits but pointers are 32 bits.
There's a long history that is not worth going into, but when
we moved the overflow pointer to the end of the struct,
we didn't get the padding computation right.
The compiler computed the regular struct size and then
on amd64p32 added another 32-bit field.
And the runtime assumed it could step back two 32-bit fields
(one 64-bit register size) to get to the overflow pointer.
But in fact if the struct needed 64-bit alignment, the computation
of the regular struct size would have added a 32-bit pad already,
and then the code unconditionally added a second 32-bit pad.
This placed the overflow pointer three words from the end, not two.
The last two were padding, and since the runtime was consistent
about using the second-to-last word as the overflow pointer,
no harm done in the sense of overwriting useful memory.
But writing the overflow pointer to a non-pointer word of memory
means that the GC can't see the overflow blocks, so it will
collect them prematurely. Then bad things happen.
Correct all this in a few steps:
1. Add an explicit check at the end of the bucket layout in the
compiler that the overflow field is last in the struct, never
followed by padding.
2. When padding is needed on nacl (not always, just when needed),
insert it before the overflow pointer, to preserve the "last in the struct"
property.
3. Let the compiler have the final word on the width of the struct,
by inserting an explicit padding field instead of overwriting the
results of the width computation it does.
4. For the same reason (tell the truth to the compiler), set the type
of the overflow field when we're trying to pretend its not a pointer
(in this case the runtime maintains a list of the overflow blocks
elsewhere).
5. Make the runtime use "last in the struct" as its location algorithm.
This fixes TestTraceStress on nacl/amd64p32.
The 'bad map state' and 'invalid free list' failures no longer occur.
Fixes #11838.
Change-Id: If918887f8f252d988db0a35159944d2b36512f92
Reviewed-on: https://go-review.googlesource.com/12971
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2015-07-30 22:05:51 -04:00
|
|
|
|
|
|
|
|
verifyMapBucket := func(t *testing.T, k, e Type, m interface{}, want []byte) {
|
|
|
|
|
verifyGCBits(t, MapBucketOf(k, e), want)
|
|
|
|
|
verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
|
|
|
|
|
}
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
Tscalar, Tptr,
|
|
|
|
|
map[Xscalar]Xptr(nil),
|
|
|
|
|
join(hdr, rep(8, lit(0)), rep(8, lit(1)), lit(1)))
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
Tscalarptr, Tptr,
|
|
|
|
|
map[Xscalarptr]Xptr(nil),
|
|
|
|
|
join(hdr, rep(8, lit(0, 1)), rep(8, lit(1)), lit(1)))
|
|
|
|
|
verifyMapBucket(t, Tint64, Tptr,
|
|
|
|
|
map[int64]Xptr(nil),
|
|
|
|
|
join(hdr, rep(8, rep(8/PtrSize, lit(0))), rep(8, lit(1)), naclpad(), lit(1)))
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
Tscalar, Tscalar,
|
|
|
|
|
map[Xscalar]Xscalar(nil),
|
|
|
|
|
empty)
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
|
|
|
|
|
map[[2]Xscalarptr][3]Xptrscalar(nil),
|
|
|
|
|
join(hdr, rep(8*2, lit(0, 1)), rep(8*3, lit(1, 0)), lit(1)))
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
|
|
|
|
|
map[[64 / PtrSize]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
|
|
|
|
|
join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
|
|
|
|
|
map[[64/PtrSize + 1]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
|
|
|
|
|
join(hdr, rep(8, lit(1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
|
|
|
|
|
map[[64 / PtrSize]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
|
|
|
|
|
join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
|
|
|
|
|
verifyMapBucket(t,
|
|
|
|
|
ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
|
|
|
|
|
map[[64/PtrSize + 1]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
|
|
|
|
|
join(hdr, rep(8, lit(1)), rep(8, lit(1)), lit(1)))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func naclpad() []byte {
|
|
|
|
|
if runtime.GOARCH == "amd64p32" {
|
|
|
|
|
return lit(0)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-08 01:43:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
|
|
|
|
|
func join(b ...[]byte) []byte { return bytes.Join(b, nil) }
|
|
|
|
|
func lit(x ...byte) []byte { return x }
|
2015-05-15 14:46:20 -04:00
|
|
|
|
|
|
|
|
func TestTypeOfTypeOf(t *testing.T) {
|
|
|
|
|
// Check that all the type constructors return concrete *rtype implementations.
|
|
|
|
|
// It's difficult to test directly because the reflect package is only at arm's length.
|
|
|
|
|
// The easiest thing to do is just call a function that crashes if it doesn't get an *rtype.
|
|
|
|
|
check := func(name string, typ Type) {
|
|
|
|
|
if underlying := TypeOf(typ).String(); underlying != "*reflect.rtype" {
|
|
|
|
|
t.Errorf("%v returned %v, not *reflect.rtype", name, underlying)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type T struct{ int }
|
|
|
|
|
check("TypeOf", TypeOf(T{}))
|
|
|
|
|
|
|
|
|
|
check("ArrayOf", ArrayOf(10, TypeOf(T{})))
|
|
|
|
|
check("ChanOf", ChanOf(BothDir, TypeOf(T{})))
|
|
|
|
|
check("FuncOf", FuncOf([]Type{TypeOf(T{})}, nil, false))
|
|
|
|
|
check("MapOf", MapOf(TypeOf(T{}), TypeOf(T{})))
|
|
|
|
|
check("PtrTo", PtrTo(TypeOf(T{})))
|
|
|
|
|
check("SliceOf", SliceOf(TypeOf(T{})))
|
|
|
|
|
}
|
2015-07-15 00:01:54 -04:00
|
|
|
|
2017-05-10 13:10:46 +02:00
|
|
|
type XM struct{ _ bool }
|
2015-07-15 00:01:54 -04:00
|
|
|
|
|
|
|
|
func (*XM) String() string { return "" }
|
|
|
|
|
|
|
|
|
|
func TestPtrToMethods(t *testing.T) {
|
|
|
|
|
var y struct{ XM }
|
|
|
|
|
yp := New(TypeOf(y)).Interface()
|
|
|
|
|
_, ok := yp.(fmt.Stringer)
|
|
|
|
|
if !ok {
|
|
|
|
|
t.Fatal("does not implement Stringer, but should")
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-18 11:13:19 -08:00
|
|
|
|
|
|
|
|
func TestMapAlloc(t *testing.T) {
|
|
|
|
|
m := ValueOf(make(map[int]int, 10))
|
|
|
|
|
k := ValueOf(5)
|
|
|
|
|
v := ValueOf(7)
|
|
|
|
|
allocs := testing.AllocsPerRun(100, func() {
|
|
|
|
|
m.SetMapIndex(k, v)
|
|
|
|
|
})
|
|
|
|
|
if allocs > 0.5 {
|
|
|
|
|
t.Errorf("allocs per map assignment: want 0 got %f", allocs)
|
|
|
|
|
}
|
2017-03-17 20:10:38 -07:00
|
|
|
|
|
|
|
|
const size = 1000
|
|
|
|
|
tmp := 0
|
|
|
|
|
val := ValueOf(&tmp).Elem()
|
|
|
|
|
allocs = testing.AllocsPerRun(100, func() {
|
|
|
|
|
mv := MakeMapWithSize(TypeOf(map[int]int{}), size)
|
|
|
|
|
// Only adding half of the capacity to not trigger re-allocations due too many overloaded buckets.
|
|
|
|
|
for i := 0; i < size/2; i++ {
|
|
|
|
|
val.SetInt(int64(i))
|
|
|
|
|
mv.SetMapIndex(val, val)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
if allocs > 10 {
|
|
|
|
|
t.Errorf("allocs per map assignment: want at most 10 got %f", allocs)
|
|
|
|
|
}
|
|
|
|
|
// Empirical testing shows that with capacity hint single run will trigger 3 allocations and without 91. I set
|
|
|
|
|
// the threshold to 10, to not make it overly brittle if something changes in the initial allocation of the
|
|
|
|
|
// map, but to still catch a regression where we keep re-allocating in the hashmap as new entries are added.
|
2015-11-18 11:13:19 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestChanAlloc(t *testing.T) {
|
|
|
|
|
// Note: for a chan int, the return Value must be allocated, so we
|
|
|
|
|
// use a chan *int instead.
|
|
|
|
|
c := ValueOf(make(chan *int, 1))
|
|
|
|
|
v := ValueOf(new(int))
|
|
|
|
|
allocs := testing.AllocsPerRun(100, func() {
|
|
|
|
|
c.Send(v)
|
|
|
|
|
_, _ = c.Recv()
|
|
|
|
|
})
|
|
|
|
|
if allocs < 0.5 || allocs > 1.5 {
|
|
|
|
|
t.Errorf("allocs per chan send/recv: want 1 got %f", allocs)
|
|
|
|
|
}
|
|
|
|
|
// Note: there is one allocation in reflect.recv which seems to be
|
2016-03-01 23:21:55 +00:00
|
|
|
// a limitation of escape analysis. If that is ever fixed the
|
2015-11-18 11:13:19 -08:00
|
|
|
// allocs < 0.5 condition will trigger and this test should be fixed.
|
|
|
|
|
}
|
2016-02-17 13:03:21 -05:00
|
|
|
|
2016-04-26 10:53:25 -04:00
|
|
|
type TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678 int
|
|
|
|
|
|
2016-02-17 13:03:21 -05:00
|
|
|
type nameTest struct {
|
|
|
|
|
v interface{}
|
|
|
|
|
want string
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var nameTests = []nameTest{
|
2016-04-27 12:49:27 -04:00
|
|
|
{(*int32)(nil), "int32"},
|
|
|
|
|
{(*D1)(nil), "D1"},
|
|
|
|
|
{(*[]D1)(nil), ""},
|
|
|
|
|
{(*chan D1)(nil), ""},
|
|
|
|
|
{(*func() D1)(nil), ""},
|
|
|
|
|
{(*<-chan D1)(nil), ""},
|
|
|
|
|
{(*chan<- D1)(nil), ""},
|
|
|
|
|
{(*interface{})(nil), ""},
|
|
|
|
|
{(*interface {
|
|
|
|
|
F()
|
|
|
|
|
})(nil), ""},
|
|
|
|
|
{(*TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678)(nil), "TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678"},
|
2016-02-17 13:03:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestNames(t *testing.T) {
|
|
|
|
|
for _, test := range nameTests {
|
2016-04-27 12:49:27 -04:00
|
|
|
typ := TypeOf(test.v).Elem()
|
|
|
|
|
if got := typ.Name(); got != test.want {
|
|
|
|
|
t.Errorf("%v Name()=%q, want %q", typ, got, test.want)
|
2016-02-17 13:03:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-25 14:28:15 -04:00
|
|
|
|
2016-04-07 16:29:16 -04:00
|
|
|
func TestExported(t *testing.T) {
|
|
|
|
|
type ΦExported struct{}
|
|
|
|
|
type φUnexported struct{}
|
|
|
|
|
type BigP *big
|
|
|
|
|
type P int
|
|
|
|
|
type p *P
|
|
|
|
|
type P2 p
|
|
|
|
|
type p3 p
|
|
|
|
|
|
|
|
|
|
type exportTest struct {
|
|
|
|
|
v interface{}
|
|
|
|
|
want bool
|
|
|
|
|
}
|
|
|
|
|
exportTests := []exportTest{
|
|
|
|
|
{D1{}, true},
|
|
|
|
|
{(*D1)(nil), true},
|
|
|
|
|
{big{}, false},
|
|
|
|
|
{(*big)(nil), false},
|
|
|
|
|
{(BigP)(nil), true},
|
|
|
|
|
{(*BigP)(nil), true},
|
|
|
|
|
{ΦExported{}, true},
|
|
|
|
|
{φUnexported{}, false},
|
|
|
|
|
{P(0), true},
|
|
|
|
|
{(p)(nil), false},
|
|
|
|
|
{(P2)(nil), true},
|
|
|
|
|
{(p3)(nil), false},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for i, test := range exportTests {
|
|
|
|
|
typ := TypeOf(test.v)
|
|
|
|
|
if got := IsExported(typ); got != test.want {
|
|
|
|
|
t.Errorf("%d: %s exported=%v, want %v", i, typ.Name(), got, test.want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-25 14:28:15 -04:00
|
|
|
type embed struct {
|
|
|
|
|
EmbedWithUnexpMeth
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestNameBytesAreAligned(t *testing.T) {
|
|
|
|
|
typ := TypeOf(embed{})
|
|
|
|
|
b := FirstMethodNameBytes(typ)
|
|
|
|
|
v := uintptr(unsafe.Pointer(b))
|
|
|
|
|
if v%unsafe.Alignof((*byte)(nil)) != 0 {
|
|
|
|
|
t.Errorf("reflect.name.bytes pointer is not aligned: %x", v)
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-18 04:50:19 -04:00
|
|
|
|
2016-05-13 12:33:27 -04:00
|
|
|
func TestTypeStrings(t *testing.T) {
|
|
|
|
|
type stringTest struct {
|
|
|
|
|
typ Type
|
|
|
|
|
want string
|
|
|
|
|
}
|
|
|
|
|
stringTests := []stringTest{
|
|
|
|
|
{TypeOf(func(int) {}), "func(int)"},
|
|
|
|
|
{FuncOf([]Type{TypeOf(int(0))}, nil, false), "func(int)"},
|
|
|
|
|
{TypeOf(XM{}), "reflect_test.XM"},
|
|
|
|
|
{TypeOf(new(XM)), "*reflect_test.XM"},
|
|
|
|
|
{TypeOf(new(XM).String), "func() string"},
|
|
|
|
|
{TypeOf(new(XM)).Method(0).Type, "func(*reflect_test.XM) string"},
|
2016-08-16 07:44:57 -04:00
|
|
|
{ChanOf(3, TypeOf(XM{})), "chan reflect_test.XM"},
|
|
|
|
|
{MapOf(TypeOf(int(0)), TypeOf(XM{})), "map[int]reflect_test.XM"},
|
2017-05-10 13:10:46 +02:00
|
|
|
{ArrayOf(3, TypeOf(XM{})), "[3]reflect_test.XM"},
|
2017-05-10 13:53:39 +02:00
|
|
|
{ArrayOf(3, TypeOf(struct{}{})), "[3]struct {}"},
|
2016-05-13 12:33:27 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for i, test := range stringTests {
|
|
|
|
|
if got, want := test.typ.String(), test.want; got != want {
|
|
|
|
|
t.Errorf("type %d String()=%q, want %q", i, got, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-25 13:19:11 -04:00
|
|
|
|
|
|
|
|
func TestOffsetLock(t *testing.T) {
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
|
i := i
|
|
|
|
|
wg.Add(1)
|
|
|
|
|
go func() {
|
|
|
|
|
for j := 0; j < 50; j++ {
|
|
|
|
|
ResolveReflectName(fmt.Sprintf("OffsetLockName:%d:%d", i, j))
|
|
|
|
|
}
|
|
|
|
|
wg.Done()
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
wg.Wait()
|
|
|
|
|
}
|
2016-06-23 10:59:38 -04:00
|
|
|
|
|
|
|
|
func BenchmarkNew(b *testing.B) {
|
|
|
|
|
v := TypeOf(XM{})
|
2017-02-10 16:33:21 -05:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
|
for pb.Next() {
|
|
|
|
|
New(v)
|
|
|
|
|
}
|
|
|
|
|
})
|
2016-06-23 10:59:38 -04:00
|
|
|
}
|
2016-09-30 18:12:37 +00:00
|
|
|
|
|
|
|
|
func TestSwapper(t *testing.T) {
|
|
|
|
|
type I int
|
|
|
|
|
var a, b, c I
|
|
|
|
|
type pair struct {
|
|
|
|
|
x, y int
|
|
|
|
|
}
|
|
|
|
|
type pairPtr struct {
|
|
|
|
|
x, y int
|
|
|
|
|
p *I
|
|
|
|
|
}
|
|
|
|
|
type S string
|
|
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
|
in interface{}
|
|
|
|
|
i, j int
|
|
|
|
|
want interface{}
|
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
in: []int{1, 20, 300},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []int{300, 20, 1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []uintptr{1, 20, 300},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []uintptr{300, 20, 1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []int16{1, 20, 300},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []int16{300, 20, 1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []int8{1, 20, 100},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []int8{100, 20, 1},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []*I{&a, &b, &c},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []*I{&c, &b, &a},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []string{"eric", "sergey", "larry"},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []string{"larry", "sergey", "eric"},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []S{"eric", "sergey", "larry"},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []S{"larry", "sergey", "eric"},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []pair{{1, 2}, {3, 4}, {5, 6}},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []pair{{5, 6}, {3, 4}, {1, 2}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
in: []pairPtr{{1, 2, &a}, {3, 4, &b}, {5, 6, &c}},
|
|
|
|
|
i: 0,
|
|
|
|
|
j: 2,
|
|
|
|
|
want: []pairPtr{{5, 6, &c}, {3, 4, &b}, {1, 2, &a}},
|
|
|
|
|
},
|
|
|
|
|
}
|
[dev.typealias] reflect: fix StructOf use of StructField to match StructField docs
The runtime internal structField interprets name=="" as meaning anonymous,
but the exported reflect.StructField has always set Name, even for anonymous
fields, and also set Anonymous=true.
The initial implementation of StructOf confused the internal and public
meanings of the StructField, expecting the runtime representation of
anonymous fields instead of the exported reflect API representation.
It also did not document this fact, so that users had no way to know how
to create an anonymous field.
This CL changes StructOf to use the previously documented interpretation
of reflect.StructField instead of an undocumented one.
The implementation of StructOf also, in some cases, allowed creating
structs with unexported fields (if you knew how to ask) but set the
PkgPath incorrectly on those fields. Rather than try to fix that, this CL
changes StructOf to reject attempts to create unexported fields.
(I think that may be the right design choice, not just a temporary limitation.
In any event, it's not the topic for today's work.)
For #17766.
Fixes #18780.
Change-Id: I585a4e324dc5a90551f49d21ae04d2de9ea04b6c
Reviewed-on: https://go-review.googlesource.com/35731
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Robert Griesemer <gri@golang.org>
2017-01-25 09:50:36 -05:00
|
|
|
|
2016-09-30 18:12:37 +00:00
|
|
|
for i, tt := range tests {
|
|
|
|
|
inStr := fmt.Sprint(tt.in)
|
|
|
|
|
Swapper(tt.in)(tt.i, tt.j)
|
|
|
|
|
if !DeepEqual(tt.in, tt.want) {
|
|
|
|
|
t.Errorf("%d. swapping %v and %v of %v = %v; want %v", i, tt.i, tt.j, inStr, tt.in, tt.want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-11-04 18:22:06 -04:00
|
|
|
|
2016-11-10 19:02:07 -05:00
|
|
|
// TestUnaddressableField tests that the reflect package will not allow
|
|
|
|
|
// a type from another package to be used as a named type with an
|
|
|
|
|
// unexported field.
|
|
|
|
|
//
|
|
|
|
|
// This ensures that unexported fields cannot be modified by other packages.
|
|
|
|
|
func TestUnaddressableField(t *testing.T) {
|
|
|
|
|
var b Buffer // type defined in reflect, a different package
|
2016-11-04 18:22:06 -04:00
|
|
|
var localBuffer struct {
|
|
|
|
|
buf []byte
|
|
|
|
|
}
|
|
|
|
|
lv := ValueOf(&localBuffer).Elem()
|
|
|
|
|
rv := ValueOf(b)
|
|
|
|
|
shouldPanic(func() {
|
|
|
|
|
lv.Set(rv)
|
|
|
|
|
})
|
|
|
|
|
}
|
2017-01-25 10:19:33 -05:00
|
|
|
|
2017-01-24 14:59:22 -05:00
|
|
|
type Tint int
|
|
|
|
|
|
|
|
|
|
type Tint2 = Tint
|
|
|
|
|
|
2017-01-25 10:19:33 -05:00
|
|
|
type Talias1 struct {
|
|
|
|
|
byte
|
|
|
|
|
uint8
|
|
|
|
|
int
|
|
|
|
|
int32
|
|
|
|
|
rune
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-24 14:59:22 -05:00
|
|
|
type Talias2 struct {
|
|
|
|
|
Tint
|
|
|
|
|
Tint2
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-25 10:19:33 -05:00
|
|
|
func TestAliasNames(t *testing.T) {
|
|
|
|
|
t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5}
|
|
|
|
|
out := fmt.Sprintf("%#v", t1)
|
|
|
|
|
want := "reflect_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}"
|
|
|
|
|
if out != want {
|
|
|
|
|
t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want)
|
|
|
|
|
}
|
2017-01-24 14:59:22 -05:00
|
|
|
|
|
|
|
|
t2 := Talias2{Tint: 1, Tint2: 2}
|
|
|
|
|
out = fmt.Sprintf("%#v", t2)
|
|
|
|
|
want = "reflect_test.Talias2{Tint:1, Tint2:2}"
|
|
|
|
|
if out != want {
|
|
|
|
|
t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want)
|
|
|
|
|
}
|
2017-01-25 10:19:33 -05:00
|
|
|
}
|
2017-09-26 14:55:41 -07:00
|
|
|
|
|
|
|
|
func TestIssue22031(t *testing.T) {
|
|
|
|
|
type s []struct{ C int }
|
|
|
|
|
|
|
|
|
|
type t1 struct{ s }
|
|
|
|
|
type t2 struct{ f s }
|
|
|
|
|
|
|
|
|
|
tests := []Value{
|
|
|
|
|
ValueOf(t1{s{{}}}).Field(0).Index(0).Field(0),
|
|
|
|
|
ValueOf(t2{s{{}}}).Field(0).Index(0).Field(0),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for i, test := range tests {
|
|
|
|
|
if test.CanSet() {
|
|
|
|
|
t.Errorf("%d: CanSet: got true, want false", i)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-09-27 20:14:54 -07:00
|
|
|
|
|
|
|
|
type NonExportedFirst int
|
|
|
|
|
|
|
|
|
|
func (i NonExportedFirst) ΦExported() {}
|
|
|
|
|
func (i NonExportedFirst) nonexported() int { panic("wrong") }
|
|
|
|
|
|
|
|
|
|
func TestIssue22073(t *testing.T) {
|
|
|
|
|
m := ValueOf(NonExportedFirst(0)).Method(0)
|
|
|
|
|
|
|
|
|
|
if got := m.Type().NumOut(); got != 0 {
|
|
|
|
|
t.Errorf("NumOut: got %v, want 0", got)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Shouldn't panic.
|
|
|
|
|
m.Call(nil)
|
|
|
|
|
}
|
2016-11-23 15:34:08 -05:00
|
|
|
|
|
|
|
|
func TestMapIterNonEmptyMap(t *testing.T) {
|
|
|
|
|
m := map[string]int{"one": 1, "two": 2, "three": 3}
|
|
|
|
|
iter := ValueOf(m).MapRange()
|
|
|
|
|
if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want {
|
|
|
|
|
t.Errorf("iterator returned %s (after sorting), want %s", got, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapIterNilMap(t *testing.T) {
|
|
|
|
|
var m map[string]int
|
|
|
|
|
iter := ValueOf(m).MapRange()
|
|
|
|
|
if got, want := iterateToString(iter), `[]`; got != want {
|
|
|
|
|
t.Errorf("non-empty result iteratoring nil map: %s", got)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapIterSafety(t *testing.T) {
|
|
|
|
|
// Using a zero MapIter causes a panic, but not a crash.
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
new(MapIter).Key()
|
|
|
|
|
t.Fatal("Key did not panic")
|
|
|
|
|
}()
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
new(MapIter).Value()
|
|
|
|
|
t.Fatal("Value did not panic")
|
|
|
|
|
}()
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
new(MapIter).Next()
|
|
|
|
|
t.Fatal("Next did not panic")
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
// Calling Key/Value on a MapIter before Next
|
|
|
|
|
// causes a panic, but not a crash.
|
|
|
|
|
var m map[string]int
|
|
|
|
|
iter := ValueOf(m).MapRange()
|
|
|
|
|
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
iter.Key()
|
|
|
|
|
t.Fatal("Key did not panic")
|
|
|
|
|
}()
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
iter.Value()
|
|
|
|
|
t.Fatal("Value did not panic")
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
// Calling Next, Key, or Value on an exhausted iterator
|
|
|
|
|
// causes a panic, but not a crash.
|
|
|
|
|
iter.Next() // -> false
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
iter.Key()
|
|
|
|
|
t.Fatal("Key did not panic")
|
|
|
|
|
}()
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
iter.Value()
|
|
|
|
|
t.Fatal("Value did not panic")
|
|
|
|
|
}()
|
|
|
|
|
func() {
|
|
|
|
|
defer func() { recover() }()
|
|
|
|
|
iter.Next()
|
|
|
|
|
t.Fatal("Next did not panic")
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapIterNext(t *testing.T) {
|
|
|
|
|
// The first call to Next should reflect any
|
|
|
|
|
// insertions to the map since the iterator was created.
|
|
|
|
|
m := map[string]int{}
|
|
|
|
|
iter := ValueOf(m).MapRange()
|
|
|
|
|
m["one"] = 1
|
|
|
|
|
if got, want := iterateToString(iter), `[one: 1]`; got != want {
|
|
|
|
|
t.Errorf("iterator returned deleted elements: got %s, want %s", got, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapIterDelete0(t *testing.T) {
|
|
|
|
|
// Delete all elements before first iteration.
|
|
|
|
|
m := map[string]int{"one": 1, "two": 2, "three": 3}
|
|
|
|
|
iter := ValueOf(m).MapRange()
|
|
|
|
|
delete(m, "one")
|
|
|
|
|
delete(m, "two")
|
|
|
|
|
delete(m, "three")
|
|
|
|
|
if got, want := iterateToString(iter), `[]`; got != want {
|
|
|
|
|
t.Errorf("iterator returned deleted elements: got %s, want %s", got, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestMapIterDelete1(t *testing.T) {
|
|
|
|
|
// Delete all elements after first iteration.
|
|
|
|
|
m := map[string]int{"one": 1, "two": 2, "three": 3}
|
|
|
|
|
iter := ValueOf(m).MapRange()
|
|
|
|
|
var got []string
|
|
|
|
|
for iter.Next() {
|
|
|
|
|
got = append(got, fmt.Sprint(iter.Key(), iter.Value()))
|
|
|
|
|
delete(m, "one")
|
|
|
|
|
delete(m, "two")
|
|
|
|
|
delete(m, "three")
|
|
|
|
|
}
|
|
|
|
|
if len(got) != 1 {
|
|
|
|
|
t.Errorf("iterator returned wrong number of elements: got %d, want 1", len(got))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// iterateToString returns the set of elements
|
|
|
|
|
// returned by an iterator in readable form.
|
|
|
|
|
func iterateToString(it *MapIter) string {
|
|
|
|
|
var got []string
|
|
|
|
|
for it.Next() {
|
|
|
|
|
line := fmt.Sprintf("%v: %v", it.Key(), it.Value())
|
|
|
|
|
got = append(got, line)
|
|
|
|
|
}
|
|
|
|
|
sort.Strings(got)
|
|
|
|
|
return "[" + strings.Join(got, ", ") + "]"
|
|
|
|
|
}
|