go/src/cmd/compile/internal/gc/builtin/runtime.go

170 lines
6.1 KiB
Go
Raw Normal View History

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// NOTE: If you change this file you must run "go generate"
// to update builtin.go. This is not done automatically
// to avoid depending on having a working compiler binary.
// +build ignore
package runtime
// emitted by compiler, not referred to by go programs
func newobject(typ *byte) *any
func panicindex()
func panicslice()
func panicdivide()
func throwinit()
func panicwrap(string, string, string)
func gopanic(interface{})
func gorecover(*int32) interface{}
func printbool(bool)
func printfloat(float64)
func printint(int64)
func printhex(uint64)
func printuint(uint64)
func printcomplex(complex128)
func printstring(string)
func printpointer(any)
func printiface(any)
func printeface(any)
func printslice(any)
func printnl()
func printsp()
func printlock()
func printunlock()
func concatstring2(*[32]byte, string, string) string
func concatstring3(*[32]byte, string, string, string) string
func concatstring4(*[32]byte, string, string, string, string) string
func concatstring5(*[32]byte, string, string, string, string, string) string
func concatstrings(*[32]byte, []string) string
func cmpstring(string, string) int
func eqstring(string, string) bool
func intstring(*[4]byte, int64) string
func slicebytetostring(*[32]byte, []byte) string
func slicebytetostringtmp([]byte) string
func slicerunetostring(*[32]byte, []rune) string
func stringtoslicebyte(*[32]byte, string) []byte
func stringtoslicebytetmp(string) []byte
func stringtoslicerune(*[32]rune, string) []rune
cmd/compile: improve string iteration performance Generate a for loop for ranging over strings that only needs to call the runtime function charntorune for non ASCII characters. This provides faster iteration over ASCII characters and slightly faster iteration for other characters. The runtime function charntorune is changed to take an index from where to start decoding and returns the index after the last byte belonging to the decoded rune. All call sites of charntorune in the runtime are replaced by a for loop that will be transformed by the compiler instead of calling the charntorune function directly. go binary size decreases by 80 bytes. godoc binary size increases by around 4 kilobytes. runtime: name old time/op new time/op delta RuneIterate/range/ASCII-4 43.7ns ± 3% 10.3ns ± 4% -76.33% (p=0.000 n=44+45) RuneIterate/range/Japanese-4 72.5ns ± 2% 62.8ns ± 2% -13.41% (p=0.000 n=49+50) RuneIterate/range1/ASCII-4 43.5ns ± 2% 10.4ns ± 3% -76.18% (p=0.000 n=50+50) RuneIterate/range1/Japanese-4 72.5ns ± 2% 62.9ns ± 2% -13.26% (p=0.000 n=50+49) RuneIterate/range2/ASCII-4 43.5ns ± 3% 10.3ns ± 2% -76.22% (p=0.000 n=48+47) RuneIterate/range2/Japanese-4 72.4ns ± 2% 62.7ns ± 2% -13.47% (p=0.000 n=50+50) strings: name old time/op new time/op delta IndexRune-4 64.7ns ± 5% 22.4ns ± 3% -65.43% (p=0.000 n=25+21) MapNoChanges-4 269ns ± 2% 157ns ± 2% -41.46% (p=0.000 n=23+24) Fields-4 23.0ms ± 2% 19.7ms ± 2% -14.35% (p=0.000 n=25+25) FieldsFunc-4 23.1ms ± 2% 19.6ms ± 2% -14.94% (p=0.000 n=25+24) name old speed new speed delta Fields-4 45.6MB/s ± 2% 53.2MB/s ± 2% +16.87% (p=0.000 n=24+25) FieldsFunc-4 45.5MB/s ± 2% 53.5MB/s ± 2% +17.57% (p=0.000 n=25+24) Updates #13162 Change-Id: I79ffaf828d82bf9887592f08e5cad883e9f39701 Reviewed-on: https://go-review.googlesource.com/27853 TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Martin Möhrmann <martisch@uos.de>
2016-08-26 15:00:46 +02:00
func charntorune(string, int) (retv rune, retk int)
func slicecopy(to any, fr any, wid uintptr) int
func slicestringcopy(to any, fr any) int
// interface conversions
func convI2E(elem any) (ret any)
func convI2I(typ *byte, elem any) (ret any)
func convT2E(typ *byte, elem *any) (ret any)
func convT2I(tab *byte, elem *any) (ret any)
// interface type assertions x.(T)
func assertE2E(typ *byte, iface any, ret *any)
func assertE2E2(typ *byte, iface any, ret *any) bool
func assertE2I(typ *byte, iface any, ret *any)
func assertE2I2(typ *byte, iface any, ret *any) bool
func assertE2T(typ *byte, iface any, ret *any)
func assertE2T2(typ *byte, iface any, ret *any) bool
func assertI2E(typ *byte, iface any, ret *any)
func assertI2E2(typ *byte, iface any, ret *any) bool
func assertI2I(typ *byte, iface any, ret *any)
func assertI2I2(typ *byte, iface any, ret *any) bool
func assertI2T(typ *byte, iface any, ret *any)
func assertI2T2(typ *byte, iface any, ret *any) bool
func panicdottype(have, want, iface *byte)
func ifaceeq(i1 any, i2 any) (ret bool)
func efaceeq(i1 any, i2 any) (ret bool)
// *byte is really *runtime.Type
func makemap(mapType *byte, hint int64, mapbuf *any, bucketbuf *any) (hmap map[any]any)
func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any)
func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
func mapaccess1_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any)
func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool)
func mapaccess2_fast32(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
func mapaccess2_fast64(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
func mapdelete(mapType *byte, hmap map[any]any, key *any)
func mapiternext(hiter *any)
// *byte is really *runtime.Type
func makechan(chanType *byte, hint int64) (hchan chan any)
func chanrecv1(chanType *byte, hchan <-chan any, elem *any)
func chanrecv2(chanType *byte, hchan <-chan any, elem *any) bool
func chansend1(chanType *byte, hchan chan<- any, elem *any)
func closechan(hchan any)
var writeBarrier struct {
enabled bool
needed bool
cgo bool
}
cmd/internal/gc: inline writeBarrierEnabled check before calling writebarrierptr I believe the benchmarks that get slower are under register pressure, and not making the call unconditionally makes the pressure worse, and the register allocator doesn't do a great job. But part of the point of this sequence is to get the write barriers out of the way so I can work on the register allocator, so that's okay. name old new delta BenchmarkBinaryTree17 17.9s × (1.00,1.01) 18.0s × (0.99,1.01) ~ BenchmarkFannkuch11 4.43s × (1.00,1.00) 4.43s × (1.00,1.00) ~ BenchmarkFmtFprintfEmpty 110ns × (1.00,1.06) 114ns × (0.95,1.05) ~ BenchmarkFmtFprintfString 487ns × (0.99,1.00) 468ns × (0.99,1.01) -4.00% BenchmarkFmtFprintfInt 450ns × (0.99,1.00) 433ns × (1.00,1.01) -3.88% BenchmarkFmtFprintfIntInt 762ns × (1.00,1.00) 748ns × (0.99,1.01) -1.84% BenchmarkFmtFprintfPrefixedInt 584ns × (0.99,1.01) 547ns × (0.99,1.01) -6.26% BenchmarkFmtFprintfFloat 738ns × (1.00,1.00) 756ns × (1.00,1.01) +2.37% BenchmarkFmtManyArgs 2.80µs × (1.00,1.01) 2.79µs × (1.00,1.01) ~ BenchmarkGobDecode 39.0ms × (0.99,1.00) 39.6ms × (0.99,1.00) +1.54% BenchmarkGobEncode 37.8ms × (0.98,1.01) 37.6ms × (1.00,1.01) ~ BenchmarkGzip 661ms × (0.99,1.01) 663ms × (0.99,1.02) ~ BenchmarkGunzip 142ms × (1.00,1.00) 142ms × (1.00,1.00) ~ BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~ BenchmarkJSONEncode 56.3ms × (0.99,1.01) 56.2ms × (0.99,1.01) ~ BenchmarkJSONDecode 138ms × (0.99,1.01) 138ms × (1.00,1.00) ~ BenchmarkMandelbrot200 6.01ms × (1.00,1.00) 6.03ms × (1.00,1.01) +0.23% BenchmarkGoParse 10.2ms × (0.87,1.05) 9.8ms × (0.93,1.10) ~ BenchmarkRegexpMatchEasy0_32 208ns × (1.00,1.00) 207ns × (1.00,1.00) ~ BenchmarkRegexpMatchEasy0_1K 588ns × (1.00,1.00) 581ns × (1.00,1.01) -1.27% BenchmarkRegexpMatchEasy1_32 182ns × (0.99,1.01) 185ns × (0.99,1.01) +1.65% BenchmarkRegexpMatchEasy1_1K 986ns × (1.00,1.01) 975ns × (1.00,1.01) -1.17% BenchmarkRegexpMatchMedium_32 323ns × (1.00,1.01) 328ns × (0.99,1.00) +1.55% BenchmarkRegexpMatchMedium_1K 89.9µs × (1.00,1.00) 88.6µs × (1.00,1.01) -1.38% BenchmarkRegexpMatchHard_32 4.72µs × (0.95,1.01) 4.69µs × (0.95,1.03) ~ BenchmarkRegexpMatchHard_1K 133µs × (1.00,1.01) 133µs × (1.00,1.01) ~ BenchmarkRevcomp 900ms × (1.00,1.05) 902ms × (0.99,1.05) ~ BenchmarkTemplate 168ms × (0.99,1.01) 174ms × (0.99,1.01) +3.30% BenchmarkTimeParse 637ns × (1.00,1.00) 639ns × (1.00,1.00) +0.31% BenchmarkTimeFormat 738ns × (1.00,1.00) 736ns × (1.00,1.01) ~ Change-Id: I03ce152852edec404538f6c20eb650fac82e2aa2 Reviewed-on: https://go-review.googlesource.com/9224 Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 14:13:06 -04:00
func writebarrierptr(dst *any, src any)
cmd/internal/gc: inline writeBarrierEnabled check before calling writebarrierptr I believe the benchmarks that get slower are under register pressure, and not making the call unconditionally makes the pressure worse, and the register allocator doesn't do a great job. But part of the point of this sequence is to get the write barriers out of the way so I can work on the register allocator, so that's okay. name old new delta BenchmarkBinaryTree17 17.9s × (1.00,1.01) 18.0s × (0.99,1.01) ~ BenchmarkFannkuch11 4.43s × (1.00,1.00) 4.43s × (1.00,1.00) ~ BenchmarkFmtFprintfEmpty 110ns × (1.00,1.06) 114ns × (0.95,1.05) ~ BenchmarkFmtFprintfString 487ns × (0.99,1.00) 468ns × (0.99,1.01) -4.00% BenchmarkFmtFprintfInt 450ns × (0.99,1.00) 433ns × (1.00,1.01) -3.88% BenchmarkFmtFprintfIntInt 762ns × (1.00,1.00) 748ns × (0.99,1.01) -1.84% BenchmarkFmtFprintfPrefixedInt 584ns × (0.99,1.01) 547ns × (0.99,1.01) -6.26% BenchmarkFmtFprintfFloat 738ns × (1.00,1.00) 756ns × (1.00,1.01) +2.37% BenchmarkFmtManyArgs 2.80µs × (1.00,1.01) 2.79µs × (1.00,1.01) ~ BenchmarkGobDecode 39.0ms × (0.99,1.00) 39.6ms × (0.99,1.00) +1.54% BenchmarkGobEncode 37.8ms × (0.98,1.01) 37.6ms × (1.00,1.01) ~ BenchmarkGzip 661ms × (0.99,1.01) 663ms × (0.99,1.02) ~ BenchmarkGunzip 142ms × (1.00,1.00) 142ms × (1.00,1.00) ~ BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~ BenchmarkJSONEncode 56.3ms × (0.99,1.01) 56.2ms × (0.99,1.01) ~ BenchmarkJSONDecode 138ms × (0.99,1.01) 138ms × (1.00,1.00) ~ BenchmarkMandelbrot200 6.01ms × (1.00,1.00) 6.03ms × (1.00,1.01) +0.23% BenchmarkGoParse 10.2ms × (0.87,1.05) 9.8ms × (0.93,1.10) ~ BenchmarkRegexpMatchEasy0_32 208ns × (1.00,1.00) 207ns × (1.00,1.00) ~ BenchmarkRegexpMatchEasy0_1K 588ns × (1.00,1.00) 581ns × (1.00,1.01) -1.27% BenchmarkRegexpMatchEasy1_32 182ns × (0.99,1.01) 185ns × (0.99,1.01) +1.65% BenchmarkRegexpMatchEasy1_1K 986ns × (1.00,1.01) 975ns × (1.00,1.01) -1.17% BenchmarkRegexpMatchMedium_32 323ns × (1.00,1.01) 328ns × (0.99,1.00) +1.55% BenchmarkRegexpMatchMedium_1K 89.9µs × (1.00,1.00) 88.6µs × (1.00,1.01) -1.38% BenchmarkRegexpMatchHard_32 4.72µs × (0.95,1.01) 4.69µs × (0.95,1.03) ~ BenchmarkRegexpMatchHard_1K 133µs × (1.00,1.01) 133µs × (1.00,1.01) ~ BenchmarkRevcomp 900ms × (1.00,1.05) 902ms × (0.99,1.05) ~ BenchmarkTemplate 168ms × (0.99,1.01) 174ms × (0.99,1.01) +3.30% BenchmarkTimeParse 637ns × (1.00,1.00) 639ns × (1.00,1.00) +0.31% BenchmarkTimeFormat 738ns × (1.00,1.00) 736ns × (1.00,1.01) ~ Change-Id: I03ce152852edec404538f6c20eb650fac82e2aa2 Reviewed-on: https://go-review.googlesource.com/9224 Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 14:13:06 -04:00
// *byte is really *runtime.Type
func typedmemmove(typ *byte, dst *any, src *any)
func typedslicecopy(typ *byte, dst any, src any) int
func selectnbsend(chanType *byte, hchan chan<- any, elem *any) bool
func selectnbrecv(chanType *byte, elem *any, hchan <-chan any) bool
func selectnbrecv2(chanType *byte, elem *any, received *bool, hchan <-chan any) bool
func newselect(sel *byte, selsize int64, size int32)
func selectsend(sel *byte, hchan chan<- any, elem *any) (selected bool)
func selectrecv(sel *byte, hchan <-chan any, elem *any) (selected bool)
func selectrecv2(sel *byte, hchan <-chan any, elem *any, received *bool) (selected bool)
func selectdefault(sel *byte) (selected bool)
func selectgo(sel *byte)
func block()
func makeslice(typ *byte, len int, cap int) (ary []any)
func makeslice64(typ *byte, len int64, cap int64) (ary []any)
func growslice(typ *byte, old []any, cap int) (ary []any)
func memmove(to *any, frm *any, length uintptr)
func memclr(ptr *byte, length uintptr)
func memequal(x, y *any, size uintptr) bool
func memequal8(x, y *any) bool
func memequal16(x, y *any) bool
func memequal32(x, y *any) bool
func memequal64(x, y *any) bool
func memequal128(x, y *any) bool
// only used on 32-bit
func int64div(int64, int64) int64
func uint64div(uint64, uint64) uint64
func int64mod(int64, int64) int64
func uint64mod(uint64, uint64) uint64
func float64toint64(float64) int64
func float64touint64(float64) uint64
func float64touint32(float64) uint32
func int64tofloat64(int64) float64
func uint64tofloat64(uint64) float64
func uint32tofloat64(uint32) float64
func complex128div(num complex128, den complex128) (quo complex128)
// race detection
func racefuncenter(uintptr)
func racefuncexit()
func raceread(uintptr)
func racewrite(uintptr)
func racereadrange(addr, size uintptr)
func racewriterange(addr, size uintptr)
// memory sanitizer
func msanread(addr, size uintptr)
func msanwrite(addr, size uintptr)