mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
runtime: fix race builders
Do not run Syscall benchmarks under race detector, they split stack in syscall status. R=golang-dev, rsc CC=golang-dev https://golang.org/cl/12093045
This commit is contained in:
parent
3b6de5e847
commit
5c8ad2e13d
2 changed files with 58 additions and 43 deletions
58
src/pkg/runtime/norace_test.go
Normal file
58
src/pkg/runtime/norace_test.go
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// The file contains tests that can not run under race detector for some reason.
|
||||||
|
// +build !race
|
||||||
|
|
||||||
|
package runtime_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Syscall tests split stack between Entersyscall and Exitsyscall under race detector.
|
||||||
|
func BenchmarkSyscall(b *testing.B) {
|
||||||
|
benchmarkSyscall(b, 0, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSyscallWork(b *testing.B) {
|
||||||
|
benchmarkSyscall(b, 100, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSyscallExcess(b *testing.B) {
|
||||||
|
benchmarkSyscall(b, 0, 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSyscallExcessWork(b *testing.B) {
|
||||||
|
benchmarkSyscall(b, 100, 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkSyscall(b *testing.B, work, excess int) {
|
||||||
|
const CallsPerSched = 1000
|
||||||
|
procs := runtime.GOMAXPROCS(-1) * excess
|
||||||
|
N := int32(b.N / CallsPerSched)
|
||||||
|
c := make(chan bool, procs)
|
||||||
|
for p := 0; p < procs; p++ {
|
||||||
|
go func() {
|
||||||
|
foo := 42
|
||||||
|
for atomic.AddInt32(&N, -1) >= 0 {
|
||||||
|
runtime.Gosched()
|
||||||
|
for g := 0; g < CallsPerSched; g++ {
|
||||||
|
runtime.Entersyscall()
|
||||||
|
for i := 0; i < work; i++ {
|
||||||
|
foo *= 2
|
||||||
|
foo /= 2
|
||||||
|
}
|
||||||
|
runtime.Exitsyscall()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c <- foo == 42
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
for p := 0; p < procs; p++ {
|
||||||
|
<-c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -344,49 +344,6 @@ func BenchmarkStackGrowthDeep(b *testing.B) {
|
||||||
benchmarkStackGrowth(b, 1024)
|
benchmarkStackGrowth(b, 1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkSyscall(b *testing.B) {
|
|
||||||
benchmarkSyscall(b, 0, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSyscallWork(b *testing.B) {
|
|
||||||
benchmarkSyscall(b, 100, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSyscallExcess(b *testing.B) {
|
|
||||||
benchmarkSyscall(b, 0, 4)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSyscallExcessWork(b *testing.B) {
|
|
||||||
benchmarkSyscall(b, 100, 4)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkSyscall(b *testing.B, work, excess int) {
|
|
||||||
const CallsPerSched = 1000
|
|
||||||
procs := runtime.GOMAXPROCS(-1) * excess
|
|
||||||
N := int32(b.N / CallsPerSched)
|
|
||||||
c := make(chan bool, procs)
|
|
||||||
for p := 0; p < procs; p++ {
|
|
||||||
go func() {
|
|
||||||
foo := 42
|
|
||||||
for atomic.AddInt32(&N, -1) >= 0 {
|
|
||||||
runtime.Gosched()
|
|
||||||
for g := 0; g < CallsPerSched; g++ {
|
|
||||||
runtime.Entersyscall()
|
|
||||||
for i := 0; i < work; i++ {
|
|
||||||
foo *= 2
|
|
||||||
foo /= 2
|
|
||||||
}
|
|
||||||
runtime.Exitsyscall()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c <- foo == 42
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
for p := 0; p < procs; p++ {
|
|
||||||
<-c
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCreateGoroutines(b *testing.B) {
|
func BenchmarkCreateGoroutines(b *testing.B) {
|
||||||
benchmarkCreateGoroutines(b, 1)
|
benchmarkCreateGoroutines(b, 1)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue