[dev.regabi] all: merge master (1d78139) into dev.regabi

Merge List:

+ 2020-12-26 1d78139128 runtime/cgo: fix Android build with NDK 22
+ 2020-12-25 2018b68a65 net/mail: don't use MDT in test
+ 2020-12-23 b116404444 runtime: shift timeHistogram buckets and allow negative durations
+ 2020-12-23 8db7e2fecd runtime: fix allocs-by-size and frees-by-size buckets
+ 2020-12-23 fb96f07e1a runtime: fix nStackRoots comment about stack roots
+ 2020-12-23 d1502b3c72 lib/time, time/tzdata: update tzdata to 2020e
+ 2020-12-23 30c99cbb7a cmd/go: add the Retract field to 'go help mod edit' definition of the GoMod struct
+ 2020-12-23 49d0b239cb doc: fix a typo in contribute.html
+ 2020-12-23 98a73030b0 cmd/go: in 'go get', promote named implicit dependencies to explicit
+ 2020-12-23 fd6ba1c8a2 os/signal: fix a deadlock with syscall.AllThreadsSyscall() use
+ 2020-12-23 b0b0d98283 runtime: linux iscgo support for not blocking nptl signals
+ 2020-12-22 223331fc0c cmd/go/internal/modload: add hint for missing implicit dependency

Change-Id: I76d79f17c546cab03fab1facc36cc3f834d9d126
This commit is contained in:
Matthew Dempsky 2020-12-28 00:12:06 -08:00
commit 07569dac4e
33 changed files with 7252 additions and 6948 deletions

View file

@ -1129,7 +1129,7 @@ sometimes required because the standard library code you're modifying
might require a newer version than the stable one you have installed). might require a newer version than the stable one you have installed).
<pre> <pre>
$ cd $GODIR/src/hash/sha1 $ cd $GODIR/src/crypto/sha1
$ [make changes...] $ [make changes...]
$ $GODIR/bin/go test . $ $GODIR/bin/go test .
</pre> </pre>

View file

@ -8,8 +8,8 @@
# Consult https://www.iana.org/time-zones for the latest versions. # Consult https://www.iana.org/time-zones for the latest versions.
# Versions to use. # Versions to use.
CODE=2020d CODE=2020e
DATA=2020d DATA=2020e
set -e set -e
rm -rf work rm -rf work

Binary file not shown.

View file

@ -1192,6 +1192,7 @@
// Require []Require // Require []Require
// Exclude []Module // Exclude []Module
// Replace []Replace // Replace []Replace
// Retract []Retract
// } // }
// //
// type Require struct { // type Require struct {

View file

@ -95,6 +95,7 @@ writing it back to go.mod. The JSON output corresponds to these Go types:
Require []Require Require []Require
Exclude []Module Exclude []Module
Replace []Replace Replace []Replace
Retract []Retract
} }
type Require struct { type Require struct {

View file

@ -28,6 +28,11 @@ import (
// //
var buildList []module.Version var buildList []module.Version
// additionalExplicitRequirements is a list of modules paths for which
// WriteGoMod should record explicit requirements, even if they would be
// selected without those requirements. Each path must also appear in buildList.
var additionalExplicitRequirements []string
// capVersionSlice returns s with its cap reduced to its length. // capVersionSlice returns s with its cap reduced to its length.
func capVersionSlice(s []module.Version) []module.Version { func capVersionSlice(s []module.Version) []module.Version {
return s[:len(s):len(s)] return s[:len(s):len(s)]
@ -121,6 +126,12 @@ func EditBuildList(ctx context.Context, add, mustSelect []module.Version) error
if !inconsistent { if !inconsistent {
buildList = final buildList = final
additionalExplicitRequirements = make([]string, 0, len(mustSelect))
for _, m := range mustSelect {
if m.Version != "none" {
additionalExplicitRequirements = append(additionalExplicitRequirements, m.Path)
}
}
return nil return nil
} }

View file

@ -15,6 +15,7 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -27,6 +28,7 @@ import (
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/mvs" "cmd/go/internal/mvs"
"cmd/go/internal/search" "cmd/go/internal/search"
"cmd/go/internal/str"
"golang.org/x/mod/modfile" "golang.org/x/mod/modfile"
"golang.org/x/mod/module" "golang.org/x/mod/module"
@ -845,13 +847,15 @@ func AllowWriteGoMod() {
// MinReqs returns a Reqs with minimal additional dependencies of Target, // MinReqs returns a Reqs with minimal additional dependencies of Target,
// as will be written to go.mod. // as will be written to go.mod.
func MinReqs() mvs.Reqs { func MinReqs() mvs.Reqs {
var retain []string retain := append([]string{}, additionalExplicitRequirements...)
for _, m := range buildList[1:] { for _, m := range buildList[1:] {
_, explicit := index.require[m] _, explicit := index.require[m]
if explicit || loaded.direct[m.Path] { if explicit || loaded.direct[m.Path] {
retain = append(retain, m.Path) retain = append(retain, m.Path)
} }
} }
sort.Strings(retain)
str.Uniq(&retain)
min, err := mvs.Req(Target, retain, &mvsReqs{buildList: buildList}) min, err := mvs.Req(Target, retain, &mvsReqs{buildList: buildList})
if err != nil { if err != nil {
base.Fatalf("go: %v", err) base.Fatalf("go: %v", err)

View file

@ -863,12 +863,21 @@ func loadFromRoots(params loaderParams) *loader {
for _, pkg := range ld.pkgs { for _, pkg := range ld.pkgs {
if pkg.mod == Target { if pkg.mod == Target {
for _, dep := range pkg.imports { for _, dep := range pkg.imports {
if dep.mod.Path != "" { if dep.mod.Path != "" && dep.mod.Path != Target.Path && index != nil {
_, explicit := index.require[dep.mod]
if allowWriteGoMod && cfg.BuildMod == "readonly" && !explicit {
// TODO(#40775): attach error to package instead of using
// base.Errorf. Ideally, 'go list' should not fail because of this,
// but today, LoadPackages calls WriteGoMod unconditionally, which
// would fail with a less clear message.
base.Errorf("go: %[1]s: package %[2]s imported from implicitly required module; try 'go get -d %[1]s' to add missing requirements", pkg.path, dep.path)
}
ld.direct[dep.mod.Path] = true ld.direct[dep.mod.Path] = true
} }
} }
} }
} }
base.ExitIfErrors()
// If we didn't scan all of the imports from the main module, or didn't use // If we didn't scan all of the imports from the main module, or didn't use
// imports.AnyTags, then we didn't necessarily load every package that // imports.AnyTags, then we didn't necessarily load every package that

View file

@ -21,6 +21,7 @@ import (
"cmd/go/internal/imports" "cmd/go/internal/imports"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/search" "cmd/go/internal/search"
"cmd/go/internal/str"
"cmd/go/internal/trace" "cmd/go/internal/trace"
"golang.org/x/mod/module" "golang.org/x/mod/module"
@ -1005,13 +1006,8 @@ func (rr *replacementRepo) Versions(prefix string) ([]string, error) {
sort.Slice(versions, func(i, j int) bool { sort.Slice(versions, func(i, j int) bool {
return semver.Compare(versions[i], versions[j]) < 0 return semver.Compare(versions[i], versions[j]) < 0
}) })
uniq := versions[:1] str.Uniq(&versions)
for _, v := range versions { return versions, nil
if v != uniq[len(uniq)-1] {
uniq = append(uniq, v)
}
}
return uniq, nil
} }
func (rr *replacementRepo) Stat(rev string) (*modfetch.RevInfo, error) { func (rr *replacementRepo) Stat(rev string) (*modfetch.RevInfo, error) {

View file

@ -96,6 +96,20 @@ func Contains(x []string, s string) bool {
return false return false
} }
// Uniq removes consecutive duplicate strings from ss.
func Uniq(ss *[]string) {
if len(*ss) <= 1 {
return
}
uniq := (*ss)[:1]
for _, s := range *ss {
if s != uniq[len(uniq)-1] {
uniq = append(uniq, s)
}
}
*ss = uniq
}
func isSpaceByte(c byte) bool { func isSpaceByte(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' return c == ' ' || c == '\t' || c == '\n' || c == '\r'
} }

View file

@ -0,0 +1,88 @@
cp go.mod.orig go.mod
# If we list a package in an implicit dependency imported from the main module,
# we should get an error because the dependency should have an explicit
# requirement.
go list -m indirect-with-pkg
stdout '^indirect-with-pkg v1.0.0 => ./indirect-with-pkg$'
! go list ./use-indirect
stderr '^go: m/use-indirect: package indirect-with-pkg imported from implicitly required module; try ''go get -d m/use-indirect'' to add missing requirements$'
# We can promote the implicit requirement by getting the importing package,
# as hinted.
go get -d m/use-indirect
cmp go.mod go.mod.use
cp go.mod.orig go.mod
# We can also promote implicit requirements using 'go get' on them, or their
# packages. This gives us "// indirect" requirements, since 'go get' doesn't
# know they're needed by the main module. See #43131 for the rationale.
go get -d indirect-with-pkg indirect-without-pkg
cmp go.mod go.mod.indirect
-- go.mod.orig --
module m
go 1.16
require direct v1.0.0
replace (
direct v1.0.0 => ./direct
indirect-with-pkg v1.0.0 => ./indirect-with-pkg
indirect-without-pkg v1.0.0 => ./indirect-without-pkg
)
-- go.mod.use --
module m
go 1.16
require (
direct v1.0.0
indirect-with-pkg v1.0.0
)
replace (
direct v1.0.0 => ./direct
indirect-with-pkg v1.0.0 => ./indirect-with-pkg
indirect-without-pkg v1.0.0 => ./indirect-without-pkg
)
-- go.mod.indirect --
module m
go 1.16
require (
direct v1.0.0
indirect-with-pkg v1.0.0 // indirect
indirect-without-pkg v1.0.0 // indirect
)
replace (
direct v1.0.0 => ./direct
indirect-with-pkg v1.0.0 => ./indirect-with-pkg
indirect-without-pkg v1.0.0 => ./indirect-without-pkg
)
-- use-indirect/use-indirect.go --
package use
import _ "indirect-with-pkg"
-- direct/go.mod --
module direct
go 1.16
require (
indirect-with-pkg v1.0.0
indirect-without-pkg v1.0.0
)
-- indirect-with-pkg/go.mod --
module indirect-with-pkg
go 1.16
-- indirect-with-pkg/p.go --
package p
-- indirect-without-pkg/go.mod --
module indirect-without-pkg
go 1.16

View file

@ -107,8 +107,8 @@ func TestDateParsing(t *testing.T) {
time.Date(1997, 11, 20, 9, 55, 6, 0, time.FixedZone("", -6*60*60)), time.Date(1997, 11, 20, 9, 55, 6, 0, time.FixedZone("", -6*60*60)),
}, },
{ {
"Thu, 20 Nov 1997 09:55:06 MDT (MDT)", "Thu, 20 Nov 1997 09:55:06 GMT (GMT)",
time.Date(1997, 11, 20, 9, 55, 6, 0, time.FixedZone("MDT", 0)), time.Date(1997, 11, 20, 9, 55, 6, 0, time.UTC),
}, },
{ {
"Fri, 21 Nov 1997 09:55:06 +1300 (TOT)", "Fri, 21 Nov 1997 09:55:06 +1300 (TOT)",
@ -278,8 +278,8 @@ func TestDateParsingCFWS(t *testing.T) {
true, true,
}, },
{ {
"Fri, 21 Nov 1997 09:55:06 MDT (MDT)", "Fri, 21 Nov 1997 09:55:06 GMT (GMT)",
time.Date(1997, 11, 21, 9, 55, 6, 0, time.FixedZone("MDT", 0)), time.Date(1997, 11, 21, 9, 55, 6, 0, time.UTC),
true, true,
}, },
} }

View file

@ -0,0 +1,42 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package signal
import (
"os"
"syscall"
"testing"
"time"
)
const prSetKeepCaps = 8
// This test validates that syscall.AllThreadsSyscall() can reliably
// reach all 'm' (threads) of the nocgo runtime even when one thread
// is blocked waiting to receive signals from the kernel. This monitors
// for a regression vs. the fix for #43149.
func TestAllThreadsSyscallSignals(t *testing.T) {
if _, _, err := syscall.AllThreadsSyscall(syscall.SYS_PRCTL, prSetKeepCaps, 0, 0); err == syscall.ENOTSUP {
t.Skip("AllThreadsSyscall disabled with cgo")
}
sig := make(chan os.Signal, 1)
Notify(sig, os.Interrupt)
for i := 0; i <= 100; i++ {
if _, _, errno := syscall.AllThreadsSyscall(syscall.SYS_PRCTL, prSetKeepCaps, uintptr(i&1), 0); errno != 0 {
t.Fatalf("[%d] failed to set KEEP_CAPS=%d: %v", i, i&1, errno)
}
}
select {
case <-time.After(10 * time.Millisecond):
case <-sig:
t.Fatal("unexpected signal")
}
Stop(sig)
}

View file

@ -12,7 +12,7 @@ static void *threadentry(void*);
static void (*setg_gcc)(void*); static void (*setg_gcc)(void*);
// This will be set in gcc_android.c for android-specific customization. // This will be set in gcc_android.c for android-specific customization.
void (*x_cgo_inittls)(void **tlsg, void **tlsbase); void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
void void
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase) x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)

View file

@ -14,7 +14,7 @@ static void* threadentry(void*);
static void (*setg_gcc)(void*); static void (*setg_gcc)(void*);
// This will be set in gcc_android.c for android-specific customization. // This will be set in gcc_android.c for android-specific customization.
void (*x_cgo_inittls)(void **tlsg, void **tlsbase); void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
void void
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase) x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)

View file

@ -10,7 +10,7 @@
static void *threadentry(void*); static void *threadentry(void*);
void (*x_cgo_inittls)(void **tlsg, void **tlsbase); void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
static void (*setg_gcc)(void*); static void (*setg_gcc)(void*);
void void

View file

@ -12,7 +12,7 @@
static void *threadentry(void*); static void *threadentry(void*);
void (*x_cgo_inittls)(void **tlsg, void **tlsbase); void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
static void (*setg_gcc)(void*); static void (*setg_gcc)(void*);
void void

View file

@ -1201,12 +1201,12 @@ type TimeHistogram timeHistogram
// Counts returns the counts for the given bucket, subBucket indices. // Counts returns the counts for the given bucket, subBucket indices.
// Returns true if the bucket was valid, otherwise returns the counts // Returns true if the bucket was valid, otherwise returns the counts
// for the overflow bucket and false. // for the underflow bucket and false.
func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) { func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
t := (*timeHistogram)(th) t := (*timeHistogram)(th)
i := bucket*TimeHistNumSubBuckets + subBucket i := bucket*TimeHistNumSubBuckets + subBucket
if i >= uint(len(t.counts)) { if i >= uint(len(t.counts)) {
return t.overflow, false return t.underflow, false
} }
return t.counts[i], true return t.counts[i], true
} }

View file

@ -69,17 +69,15 @@ const (
// for concurrent use. It is also safe to read all the values // for concurrent use. It is also safe to read all the values
// atomically. // atomically.
type timeHistogram struct { type timeHistogram struct {
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64 counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
overflow uint64 underflow uint64
} }
// record adds the given duration to the distribution. // record adds the given duration to the distribution.
//
// Although the duration is an int64 to facilitate ease-of-use
// with e.g. nanotime, the duration must be non-negative.
func (h *timeHistogram) record(duration int64) { func (h *timeHistogram) record(duration int64) {
if duration < 0 { if duration < 0 {
throw("timeHistogram encountered negative duration") atomic.Xadd64(&h.underflow, 1)
return
} }
// The index of the exponential bucket is just the index // The index of the exponential bucket is just the index
// of the highest set bit adjusted for how many bits we // of the highest set bit adjusted for how many bits we
@ -92,15 +90,17 @@ func (h *timeHistogram) record(duration int64) {
superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits
if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) { if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) {
// The bucket index we got is larger than what we support, so // The bucket index we got is larger than what we support, so
// add into the special overflow bucket. // include this count in the highest bucket, which extends to
atomic.Xadd64(&h.overflow, 1) // infinity.
return superBucket = timeHistNumSuperBuckets - 1
subBucket = timeHistNumSubBuckets - 1
} else {
// The linear subbucket index is just the timeHistSubBucketsBits
// bits after the top bit. To extract that value, shift down
// the duration such that we leave the top bit and the next bits
// intact, then extract the index.
subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
} }
// The linear subbucket index is just the timeHistSubBucketsBits
// bits after the top bit. To extract that value, shift down
// the duration such that we leave the top bit and the next bits
// intact, then extract the index.
subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
} else { } else {
subBucket = uint(duration) subBucket = uint(duration)
} }
@ -128,7 +128,7 @@ func timeHistogramMetricsBuckets() []float64 {
// index to combine it with the bucketMin. // index to combine it with the bucketMin.
subBucketShift := uint(0) subBucketShift := uint(0)
if i > 1 { if i > 1 {
// The first two buckets are exact with respect to integers, // The first two super buckets are exact with respect to integers,
// so we'll never have to shift the sub-bucket index. Thereafter, // so we'll never have to shift the sub-bucket index. Thereafter,
// we shift up by 1 with each subsequent bucket. // we shift up by 1 with each subsequent bucket.
subBucketShift = uint(i - 2) subBucketShift = uint(i - 2)

View file

@ -5,6 +5,7 @@
package runtime_test package runtime_test
import ( import (
"math"
. "runtime" . "runtime"
"testing" "testing"
) )
@ -32,8 +33,8 @@ func TestTimeHistogram(t *testing.T) {
h.Record(base + v) h.Record(base + v)
} }
} }
// Hit the overflow bucket. // Hit the underflow bucket.
h.Record(int64(^uint64(0) >> 1)) h.Record(int64(-1))
// Check to make sure there's exactly one count in each // Check to make sure there's exactly one count in each
// bucket. // bucket.
@ -41,7 +42,7 @@ func TestTimeHistogram(t *testing.T) {
for j := uint(0); j < TimeHistNumSubBuckets; j++ { for j := uint(0); j < TimeHistNumSubBuckets; j++ {
c, ok := h.Count(i, j) c, ok := h.Count(i, j)
if !ok { if !ok {
t.Errorf("hit overflow bucket unexpectedly: (%d, %d)", i, j) t.Errorf("hit underflow bucket unexpectedly: (%d, %d)", i, j)
} else if c != 1 { } else if c != 1 {
t.Errorf("bucket (%d, %d) has count that is not 1: %d", i, j, c) t.Errorf("bucket (%d, %d) has count that is not 1: %d", i, j, c)
} }
@ -49,10 +50,21 @@ func TestTimeHistogram(t *testing.T) {
} }
c, ok := h.Count(TimeHistNumSuperBuckets, 0) c, ok := h.Count(TimeHistNumSuperBuckets, 0)
if ok { if ok {
t.Errorf("expected to hit overflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0) t.Errorf("expected to hit underflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
} }
if c != 1 { if c != 1 {
t.Errorf("overflow bucket has count that is not 1: %d", c) t.Errorf("underflow bucket has count that is not 1: %d", c)
} }
// Check overflow behavior.
// By hitting a high value, we should just be adding into the highest bucket.
h.Record(math.MaxInt64)
c, ok = h.Count(TimeHistNumSuperBuckets-1, TimeHistNumSubBuckets-1)
if !ok {
t.Error("hit underflow bucket in highest bucket unexpectedly")
} else if c != 2 {
t.Errorf("highest has count that is not 2: %d", c)
}
dummyTimeHistogram = TimeHistogram{} dummyTimeHistogram = TimeHistogram{}
} }

View file

@ -43,7 +43,18 @@ func initMetrics() {
} }
sizeClassBuckets = make([]float64, _NumSizeClasses) sizeClassBuckets = make([]float64, _NumSizeClasses)
for i := range sizeClassBuckets { for i := range sizeClassBuckets {
sizeClassBuckets[i] = float64(class_to_size[i]) // Size classes have an inclusive upper-bound
// and exclusive lower bound (e.g. 48-byte size class is
// (32, 48]) whereas we want and inclusive lower-bound
// and exclusive upper-bound (e.g. 48-byte size class is
// [33, 49). We can achieve this by shifting all bucket
// boundaries up by 1.
//
// Also, a float64 can precisely represent integers with
// value up to 2^53 and size classes are relatively small
// (nowhere near 2^48 even) so this will give us exact
// boundaries.
sizeClassBuckets[i] = float64(class_to_size[i] + 1)
} }
timeHistBuckets = timeHistogramMetricsBuckets() timeHistBuckets = timeHistogramMetricsBuckets()
metrics = map[string]metricData{ metrics = map[string]metricData{
@ -105,9 +116,9 @@ func initMetrics() {
"/gc/pauses:seconds": { "/gc/pauses:seconds": {
compute: func(_ *statAggregate, out *metricValue) { compute: func(_ *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(timeHistBuckets) hist := out.float64HistOrInit(timeHistBuckets)
hist.counts[len(hist.counts)-1] = atomic.Load64(&memstats.gcPauseDist.overflow) hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
for i := range hist.buckets { for i := range hist.buckets {
hist.counts[i] = atomic.Load64(&memstats.gcPauseDist.counts[i]) hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
} }
}, },
}, },

View file

@ -154,6 +154,12 @@ func TestReadMetricsConsistency(t *testing.T) {
if totalVirtual.got != totalVirtual.want { if totalVirtual.got != totalVirtual.want {
t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want) t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want)
} }
if objects.alloc.Counts[0] > 0 {
t.Error("found counts for objects of non-positive size in allocs-by-size")
}
if objects.free.Counts[0] > 0 {
t.Error("found counts for objects of non-positive size in frees-by-size")
}
if len(objects.alloc.Buckets) != len(objects.free.Buckets) { if len(objects.alloc.Buckets) != len(objects.free.Buckets) {
t.Error("allocs-by-size and frees-by-size buckets don't match in length") t.Error("allocs-by-size and frees-by-size buckets don't match in length")
} else if len(objects.alloc.Counts) != len(objects.free.Counts) { } else if len(objects.alloc.Counts) != len(objects.free.Counts) {

View file

@ -101,8 +101,7 @@ func gcMarkRootPrepare() {
// Gs may be created after this point, but it's okay that we // Gs may be created after this point, but it's okay that we
// ignore them because they begin life without any roots, so // ignore them because they begin life without any roots, so
// there's nothing to scan, and any roots they create during // there's nothing to scan, and any roots they create during
// the concurrent phase will be scanned during mark // the concurrent phase will be caught by the write barrier.
// termination.
work.nStackRoots = int(atomic.Loaduintptr(&allglen)) work.nStackRoots = int(atomic.Loaduintptr(&allglen))
work.markrootNext = 0 work.markrootNext = 0

View file

@ -72,7 +72,7 @@ func clearSignalHandlers() {
} }
//go:nosplit //go:nosplit
func sigblock() { func sigblock(exiting bool) {
} }
// Called to initialize a new m (including the bootstrap m). // Called to initialize a new m (including the bootstrap m).

View file

@ -301,6 +301,24 @@ func getHugePageSize() uintptr {
func osinit() { func osinit() {
ncpu = getproccount() ncpu = getproccount()
physHugePageSize = getHugePageSize() physHugePageSize = getHugePageSize()
if iscgo {
// #42494 glibc and musl reserve some signals for
// internal use and require they not be blocked by
// the rest of a normal C runtime. When the go runtime
// blocks...unblocks signals, temporarily, the blocked
// interval of time is generally very short. As such,
// these expectations of *libc code are mostly met by
// the combined go+cgo system of threads. However,
// when go causes a thread to exit, via a return from
// mstart(), the combined runtime can deadlock if
// these signals are blocked. Thus, don't block these
// signals when exiting threads.
// - glibc: SIGCANCEL (32), SIGSETXID (33)
// - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
sigdelset(&sigsetAllExiting, 32)
sigdelset(&sigsetAllExiting, 33)
sigdelset(&sigsetAllExiting, 34)
}
osArchInit() osArchInit()
} }

View file

@ -195,7 +195,7 @@ func msigrestore(sigmask sigset) {
func clearSignalHandlers() { func clearSignalHandlers() {
} }
func sigblock() { func sigblock(exiting bool) {
} }
// Called to initialize a new m (including the bootstrap m). // Called to initialize a new m (including the bootstrap m).

View file

@ -886,7 +886,7 @@ func clearSignalHandlers() {
} }
//go:nosplit //go:nosplit
func sigblock() { func sigblock(exiting bool) {
} }
// Called to initialize a new m (including the bootstrap m). // Called to initialize a new m (including the bootstrap m).

View file

@ -1313,7 +1313,7 @@ func mexit(osStack bool) {
throw("locked m0 woke up") throw("locked m0 woke up")
} }
sigblock() sigblock(true)
unminit() unminit()
// Free the gsignal stack. // Free the gsignal stack.
@ -1515,6 +1515,7 @@ func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
if netpollinited() { if netpollinited() {
netpollBreak() netpollBreak()
} }
sigRecvPrepareForFixup()
_g_ := getg() _g_ := getg()
if raceenabled { if raceenabled {
// For m's running without racectx, we loan out the // For m's running without racectx, we loan out the
@ -1754,7 +1755,7 @@ func needm() {
// starting a new m to run Go code via newosproc. // starting a new m to run Go code via newosproc.
var sigmask sigset var sigmask sigset
sigsave(&sigmask) sigsave(&sigmask)
sigblock() sigblock(false)
// Lock extra list, take head, unlock popped list. // Lock extra list, take head, unlock popped list.
// nilokay=false is safe here because of the invariant above, // nilokay=false is safe here because of the invariant above,
@ -1903,7 +1904,7 @@ func dropm() {
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
// It's important not to try to handle a signal between those two steps. // It's important not to try to handle a signal between those two steps.
sigmask := mp.sigmask sigmask := mp.sigmask
sigblock() sigblock(false)
unminit() unminit()
mnext := lockextra(true) mnext := lockextra(true)
@ -3776,7 +3777,7 @@ func beforefork() {
// group. See issue #18600. // group. See issue #18600.
gp.m.locks++ gp.m.locks++
sigsave(&gp.m.sigmask) sigsave(&gp.m.sigmask)
sigblock() sigblock(false)
// This function is called before fork in syscall package. // This function is called before fork in syscall package.
// Code between fork and exec must not allocate memory nor even try to grow stack. // Code between fork and exec must not allocate memory nor even try to grow stack.

View file

@ -1042,15 +1042,26 @@ func msigrestore(sigmask sigset) {
sigprocmask(_SIG_SETMASK, &sigmask, nil) sigprocmask(_SIG_SETMASK, &sigmask, nil)
} }
// sigblock blocks all signals in the current thread's signal mask. // sigsetAllExiting is used by sigblock(true) when a thread is
// exiting. sigset_all is defined in OS specific code, and per GOOS
// behavior may override this default for sigsetAllExiting: see
// osinit().
var sigsetAllExiting = sigset_all
// sigblock blocks signals in the current thread's signal mask.
// This is used to block signals while setting up and tearing down g // This is used to block signals while setting up and tearing down g
// when a non-Go thread calls a Go function. // when a non-Go thread calls a Go function. When a thread is exiting
// The OS-specific code is expected to define sigset_all. // we use the sigsetAllExiting value, otherwise the OS specific
// definition of sigset_all is used.
// This is nosplit and nowritebarrierrec because it is called by needm // This is nosplit and nowritebarrierrec because it is called by needm
// which may be called on a non-Go thread with no g available. // which may be called on a non-Go thread with no g available.
//go:nosplit //go:nosplit
//go:nowritebarrierrec //go:nowritebarrierrec
func sigblock() { func sigblock(exiting bool) {
if exiting {
sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
return
}
sigprocmask(_SIG_SETMASK, &sigset_all, nil) sigprocmask(_SIG_SETMASK, &sigset_all, nil)
} }

View file

@ -12,12 +12,16 @@
// sigsend is called by the signal handler to queue a new signal. // sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal. // signal_recv is called by the Go program to receive a newly queued signal.
// Synchronization between sigsend and signal_recv is based on the sig.state // Synchronization between sigsend and signal_recv is based on the sig.state
// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending. // variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
// sigReceiving means that signal_recv is blocked on sig.Note and there are no // sigReceiving means that signal_recv is blocked on sig.Note and there are no
// new pending signals. // new pending signals.
// sigSending means that sig.mask *may* contain new pending signals, // sigSending means that sig.mask *may* contain new pending signals,
// signal_recv can't be blocked in this state. // signal_recv can't be blocked in this state.
// sigIdle means that there are no new pending signals and signal_recv is not blocked. // sigIdle means that there are no new pending signals and signal_recv is not blocked.
// sigFixup is a transient state that can only exist as a short
// transition from sigReceiving and then on to sigIdle: it is
// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
// occurs on the sleeping m, waiting to receive a signal.
// Transitions between states are done atomically with CAS. // Transitions between states are done atomically with CAS.
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask. // When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to // If several sigsends and signal_recv execute concurrently, it can lead to
@ -59,6 +63,7 @@ const (
sigIdle = iota sigIdle = iota
sigReceiving sigReceiving
sigSending sigSending
sigFixup
) )
// sigsend delivers a signal from sighandler to the internal signal delivery queue. // sigsend delivers a signal from sighandler to the internal signal delivery queue.
@ -112,6 +117,9 @@ Send:
notewakeup(&sig.note) notewakeup(&sig.note)
break Send break Send
} }
case sigFixup:
// nothing to do - we need to wait for sigIdle.
osyield()
} }
} }
@ -119,6 +127,19 @@ Send:
return true return true
} }
// sigRecvPrepareForFixup is used to temporarily wake up the
// signal_recv() running thread while it is blocked waiting for the
// arrival of a signal. If it causes the thread to wake up, the
// sig.state travels through this sequence: sigReceiving -> sigFixup
// -> sigIdle -> sigReceiving and resumes. (This is only called while
// GC is disabled.)
//go:nosplit
func sigRecvPrepareForFixup() {
if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
notewakeup(&sig.note)
}
}
// Called to receive the next queued signal. // Called to receive the next queued signal.
// Must only be called from a single goroutine at a time. // Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv //go:linkname signal_recv os/signal.signal_recv
@ -146,7 +167,16 @@ func signal_recv() uint32 {
} }
notetsleepg(&sig.note, -1) notetsleepg(&sig.note, -1)
noteclear(&sig.note) noteclear(&sig.note)
break Receive if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
break Receive
}
// Getting here, the code will
// loop around again to sleep
// in state sigReceiving. This
// path is taken when
// sigRecvPrepareForFixup()
// has been called by another
// thread.
} }
case sigSending: case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) { if atomic.Cas(&sig.state, sigSending, sigIdle) {

View file

@ -92,6 +92,13 @@ func sendNote(s *byte) bool {
return true return true
} }
// sigRecvPrepareForFixup is a no-op on plan9. (This would only be
// called while GC is disabled.)
//
//go:nosplit
func sigRecvPrepareForFixup() {
}
// Called to receive the next queued signal. // Called to receive the next queued signal.
// Must only be called from a single goroutine at a time. // Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv //go:linkname signal_recv os/signal.signal_recv

View file

@ -597,6 +597,14 @@ func compareStatus(filter, expect string) error {
return nil return nil
} }
// killAThread locks the goroutine to an OS thread and exits; this
// causes an OS thread to terminate.
func killAThread(c <-chan struct{}) {
runtime.LockOSThread()
<-c
return
}
// TestSetuidEtc performs tests on all of the wrapped system calls // TestSetuidEtc performs tests on all of the wrapped system calls
// that mirror to the 9 glibc syscalls with POSIX semantics. The test // that mirror to the 9 glibc syscalls with POSIX semantics. The test
// here is considered authoritative and should compile and run // here is considered authoritative and should compile and run
@ -647,6 +655,11 @@ func TestSetuidEtc(t *testing.T) {
} }
for i, v := range vs { for i, v := range vs {
// Generate some thread churn as we execute the tests.
c := make(chan struct{})
go killAThread(c)
close(c)
if err := v.fn(); err != nil { if err := v.fn(); err != nil {
t.Errorf("[%d] %q failed: %v", i, v.call, err) t.Errorf("[%d] %q failed: %v", i, v.call, err)
continue continue

File diff suppressed because it is too large Load diff