all: single space after period.

The tree's pretty inconsistent about single space vs double space
after a period in documentation. Make it consistently a single space,
per earlier decisions. This means contributors won't be confused by
misleading precedence.

This CL doesn't use go/doc to parse. It only addresses // comments.
It was generated with:

$ perl -i -npe 's,^(\s*// .+[a-z]\.)  +([A-Z]),$1 $2,' $(git grep -l -E '^\s*//(.+\.)  +([A-Z])')
$ go test go/doc -update

Change-Id: Iccdb99c37c797ef1f804a94b22ba5ee4b500c4f7
Reviewed-on: https://go-review.googlesource.com/20022
Reviewed-by: Rob Pike <r@golang.org>
Reviewed-by: Dave Day <djd@golang.org>
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Brad Fitzpatrick 2016-03-01 23:21:55 +00:00
parent 8b4deb448e
commit 5fea2ccc77
536 changed files with 1732 additions and 1732 deletions

View file

@ -126,7 +126,7 @@ func main() {
})
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
// during initialization. Most programs won't care, but a few
// do require certain calls to be made by the main thread.
// Those can arrange for main.main to run in the main thread
// by calling runtime.LockOSThread during initialization
@ -237,7 +237,7 @@ func forcegchelper() {
//go:nosplit
// Gosched yields the processor, allowing other goroutines to run. It does not
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
mcall(gosched_m)
@ -424,7 +424,7 @@ func schedinit() {
sched.maxmcount = 10000
// Cache the framepointer experiment. This affects stack unwinding.
// Cache the framepointer experiment. This affects stack unwinding.
framepointer_enabled = haveexperiment("framepointer")
tracebackinit()
@ -454,7 +454,7 @@ func schedinit() {
}
if buildVersion == "" {
// Condition should never trigger. This code just serves
// Condition should never trigger. This code just serves
// to ensure runtime·buildVersion is kept in the resulting binary.
buildVersion = "unknown"
}
@ -1014,7 +1014,7 @@ func startTheWorldWithSema() {
// in the hope that it will be available next time.
// It would have been even better to start it before the collection,
// but doing so requires allocating memory, so it's tricky to
// coordinate. This lazy approach works out in practice:
// coordinate. This lazy approach works out in practice:
// we don't mind if the first couple gc rounds don't have quite
// the maximum number of procs.
newm(mhelpgc, nil)
@ -1470,7 +1470,7 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
// Create a new m. It will start off with a call to fn, or else the scheduler.
// fn needs to be static and not a heap allocated closure.
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrier
@ -2641,7 +2641,7 @@ func newproc(siz int32, fn *funcval) {
// Create a new g running fn with narg bytes of arguments starting
// at argp and returning nret bytes of results. callerpc is the
// address of the go statement that created this. The new g is put
// address of the go statement that created this. The new g is put
// on the queue of g's waiting to run.
func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
_g_ := getg()
@ -2792,7 +2792,7 @@ retry:
_p_.gfree = gp.schedlink.ptr()
_p_.gfreecnt--
if gp.stack.lo == 0 {
// Stack was deallocated in gfput. Allocate a new one.
// Stack was deallocated in gfput. Allocate a new one.
systemstack(func() {
gp.stack, gp.stkbar = stackalloc(_FixedStack)
})
@ -3128,7 +3128,7 @@ func setcpuprofilerate_m(hz int32) {
_g_.m.locks--
}
// Change number of processors. The world is stopped, sched is locked.
// Change number of processors. The world is stopped, sched is locked.
// gcworkbufs are not being modified by either the GC or
// the write barrier code.
// Returns list of Ps with local work, they need to be scheduled by the caller.
@ -3334,7 +3334,7 @@ func incidlelocked(v int32) {
// The check is based on number of running M's, if 0 -> deadlock.
func checkdead() {
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
// there are no running goroutines. The calling program is
// there are no running goroutines. The calling program is
// assumed to be running.
if islibrary || isarchive {
return
@ -3579,7 +3579,7 @@ func retake(now int64) uint32 {
}
// Tell all goroutines that they have been preempted and they should stop.
// This function is purely best-effort. It can fail to inform a goroutine if a
// This function is purely best-effort. It can fail to inform a goroutine if a
// processor just started running it.
// No locks need to be held.
// Returns true if preemption request was issued to at least one goroutine.
@ -3598,8 +3598,8 @@ func preemptall() bool {
}
// Tell the goroutine running on processor P to stop.
// This function is purely best-effort. It can incorrectly fail to inform the
// goroutine. It can send inform the wrong goroutine. Even if it informs the
// This function is purely best-effort. It can incorrectly fail to inform the
// goroutine. It can send inform the wrong goroutine. Even if it informs the
// correct goroutine, that goroutine might ignore the request if it is
// simultaneously executing newstack.
// No lock needs to be held.