mirror of
https://github.com/golang/go.git
synced 2025-10-19 19:13:18 +00:00
all: omit unnecessary type conversions
Found by github.com/mdempsky/unconvert
Change-Id: Ib78cceb718146509d96dbb6da87b27dbaeba1306
GitHub-Last-Rev: dedf354811
GitHub-Pull-Request: golang/go#74771
Reviewed-on: https://go-review.googlesource.com/c/go/+/690735
Reviewed-by: Mark Freeman <mark@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
4569255f8c
commit
e151db3e06
20 changed files with 41 additions and 41 deletions
|
@ -693,14 +693,14 @@ func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B,
|
|||
for _, r16 := range rt.R16 {
|
||||
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, rune(r))
|
||||
rs = append(rs, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, r32 := range rt.R32 {
|
||||
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, rune(r))
|
||||
rs = append(rs, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -554,7 +554,7 @@ func (b *buf) entry(cu *Entry, u *unit) *Entry {
|
|||
case formData16:
|
||||
val = b.bytes(16)
|
||||
case formSdata:
|
||||
val = int64(b.int())
|
||||
val = b.int()
|
||||
case formUdata:
|
||||
val = int64(b.uint())
|
||||
case formImplicitConst:
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestTCPConnKeepAliveConfigDialer(t *testing.T) {
|
|||
oldCfg KeepAliveConfig
|
||||
)
|
||||
testPreHookSetKeepAlive = func(nfd *netFD) {
|
||||
oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd))
|
||||
oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
|
||||
}
|
||||
|
||||
handler := func(ls *localServer, ln Listener) {
|
||||
|
@ -80,7 +80,7 @@ func TestTCPConnKeepAliveConfigListener(t *testing.T) {
|
|||
oldCfg KeepAliveConfig
|
||||
)
|
||||
testPreHookSetKeepAlive = func(nfd *netFD) {
|
||||
oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd))
|
||||
oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
|
||||
}
|
||||
|
||||
ch := make(chan Conn, 1)
|
||||
|
|
|
@ -460,7 +460,7 @@ func dumproots() {
|
|||
continue
|
||||
}
|
||||
spf := (*specialfinalizer)(unsafe.Pointer(sp))
|
||||
p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
|
||||
p := unsafe.Pointer(s.base() + spf.special.offset)
|
||||
dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
|
||||
}
|
||||
}
|
||||
|
@ -659,7 +659,7 @@ func dumpmemprof() {
|
|||
continue
|
||||
}
|
||||
spp := (*specialprofile)(unsafe.Pointer(sp))
|
||||
p := s.base() + uintptr(spp.special.offset)
|
||||
p := s.base() + spp.special.offset
|
||||
dumpint(tagAllocSample)
|
||||
dumpint(uint64(p))
|
||||
dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
|
||||
|
|
|
@ -173,14 +173,14 @@ func (c Cleanup) Stop() {
|
|||
// Reached the end of the linked list. Stop searching at this point.
|
||||
break
|
||||
}
|
||||
if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind &&
|
||||
if offset == s.offset && _KindSpecialCleanup == s.kind &&
|
||||
(*specialCleanup)(unsafe.Pointer(s)).id == c.id {
|
||||
// The special is a cleanup and contains a matching cleanup id.
|
||||
*iter = s.next
|
||||
found = s
|
||||
break
|
||||
}
|
||||
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) {
|
||||
if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
|
||||
// The special is outside the region specified for that kind of
|
||||
// special. The specials are sorted by kind.
|
||||
break
|
||||
|
|
|
@ -415,7 +415,7 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) {
|
|||
// Don't mark finalized object, but scan it so we retain everything it points to.
|
||||
|
||||
// A finalizer can be set for an inner byte of an object, find object beginning.
|
||||
p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
|
||||
p := s.base() + spf.special.offset/s.elemsize*s.elemsize
|
||||
|
||||
// Mark everything that can be reached from
|
||||
// the object (but *not* the object itself or
|
||||
|
|
|
@ -553,7 +553,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
|
|||
siter := newSpecialsIter(s)
|
||||
for siter.valid() {
|
||||
// A finalizer can be set for an inner byte of an object, find object beginning.
|
||||
objIndex := uintptr(siter.s.offset) / size
|
||||
objIndex := siter.s.offset / size
|
||||
p := s.base() + objIndex*size
|
||||
mbits := s.markBitsForIndex(objIndex)
|
||||
if !mbits.isMarked() {
|
||||
|
@ -561,7 +561,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
|
|||
// Pass 1: see if it has a finalizer.
|
||||
hasFinAndRevived := false
|
||||
endOffset := p - s.base() + size
|
||||
for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
|
||||
for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next {
|
||||
if tmp.kind == _KindSpecialFinalizer {
|
||||
// Stop freeing of object if it has a finalizer.
|
||||
mbits.setMarkedNonAtomic()
|
||||
|
@ -573,11 +573,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
|
|||
// Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
|
||||
// before finalization as specified by the weak package. See the documentation
|
||||
// for that package for more details.
|
||||
for siter.valid() && uintptr(siter.s.offset) < endOffset {
|
||||
for siter.valid() && siter.s.offset < endOffset {
|
||||
// Find the exact byte for which the special was setup
|
||||
// (as opposed to object beginning).
|
||||
special := siter.s
|
||||
p := s.base() + uintptr(special.offset)
|
||||
p := s.base() + special.offset
|
||||
if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
|
||||
siter.unlinkAndNext()
|
||||
freeSpecial(special, unsafe.Pointer(p), size)
|
||||
|
@ -589,11 +589,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
|
|||
}
|
||||
} else {
|
||||
// Pass 2: the object is truly dead, free (and handle) all specials.
|
||||
for siter.valid() && uintptr(siter.s.offset) < endOffset {
|
||||
for siter.valid() && siter.s.offset < endOffset {
|
||||
// Find the exact byte for which the special was setup
|
||||
// (as opposed to object beginning).
|
||||
special := siter.s
|
||||
p := s.base() + uintptr(special.offset)
|
||||
p := s.base() + special.offset
|
||||
siter.unlinkAndNext()
|
||||
freeSpecial(special, unsafe.Pointer(p), size)
|
||||
}
|
||||
|
|
|
@ -1488,7 +1488,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
|
|||
s.allocBits = newAllocBits(uintptr(s.nelems))
|
||||
|
||||
// Adjust s.limit down to the object-containing part of the span.
|
||||
s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
|
||||
s.limit = s.base() + s.elemsize*uintptr(s.nelems)
|
||||
|
||||
// It's safe to access h.sweepgen without the heap lock because it's
|
||||
// only ever updated with the world stopped and we run on the
|
||||
|
@ -2152,11 +2152,11 @@ func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special,
|
|||
if s == nil {
|
||||
break
|
||||
}
|
||||
if offset == uintptr(s.offset) && kind == s.kind {
|
||||
if offset == s.offset && kind == s.kind {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) {
|
||||
if offset < s.offset || (offset == s.offset && kind < s.kind) {
|
||||
break
|
||||
}
|
||||
iter = &s.next
|
||||
|
@ -2323,14 +2323,14 @@ func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
|
|||
// Reached the end of the linked list. Stop searching at this point.
|
||||
break
|
||||
}
|
||||
if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
|
||||
if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
|
||||
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
|
||||
// The special is a cleanup and contains a matching cleanup id.
|
||||
*iter = s.next
|
||||
found = (*specialCheckFinalizer)(unsafe.Pointer(s))
|
||||
break
|
||||
}
|
||||
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
|
||||
if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
|
||||
// The special is outside the region specified for that kind of
|
||||
// special. The specials are sorted by kind.
|
||||
break
|
||||
|
@ -2373,14 +2373,14 @@ func clearCleanupContext(ptr uintptr, cleanupID uint64) {
|
|||
// Reached the end of the linked list. Stop searching at this point.
|
||||
break
|
||||
}
|
||||
if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
|
||||
if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
|
||||
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
|
||||
// The special is a cleanup and contains a matching cleanup id.
|
||||
*iter = s.next
|
||||
found = s
|
||||
break
|
||||
}
|
||||
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
|
||||
if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
|
||||
// The special is outside the region specified for that kind of
|
||||
// special. The specials are sorted by kind.
|
||||
break
|
||||
|
@ -2476,7 +2476,7 @@ type specialWeakHandle struct {
|
|||
|
||||
//go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
|
||||
func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
|
||||
return unsafe.Pointer(getOrAddWeakHandle(unsafe.Pointer(p)))
|
||||
return unsafe.Pointer(getOrAddWeakHandle(p))
|
||||
}
|
||||
|
||||
//go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
|
||||
|
|
|
@ -397,5 +397,5 @@ func bytealg_MakeNoZero(len int) []byte {
|
|||
panicmakeslicelen()
|
||||
}
|
||||
cap := roundupsize(uintptr(len), true)
|
||||
return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len]
|
||||
return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func traceSnapshotMemory(gen uintptr) {
|
|||
}
|
||||
|
||||
// Emit info.
|
||||
w.varint(uint64(trace.minPageHeapAddr))
|
||||
w.varint(trace.minPageHeapAddr)
|
||||
w.varint(uint64(pageSize))
|
||||
w.varint(uint64(gc.MinHeapAlign))
|
||||
w.varint(uint64(fixedStack))
|
||||
|
|
|
@ -183,7 +183,7 @@ func (w traceWriter) refill() traceWriter {
|
|||
// Tolerate a nil mp.
|
||||
mID := ^uint64(0)
|
||||
if w.mp != nil {
|
||||
mID = uint64(w.mp.procid)
|
||||
mID = w.mp.procid
|
||||
}
|
||||
|
||||
// Write the buffer's header.
|
||||
|
@ -194,7 +194,7 @@ func (w traceWriter) refill() traceWriter {
|
|||
w.byte(byte(w.exp))
|
||||
}
|
||||
w.varint(uint64(w.gen))
|
||||
w.varint(uint64(mID))
|
||||
w.varint(mID)
|
||||
w.varint(uint64(ts))
|
||||
w.traceBuf.lenPos = w.varintReserve()
|
||||
return w
|
||||
|
|
|
@ -258,7 +258,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
|
|||
if gp != nil {
|
||||
hdr[1] = gp.goid
|
||||
}
|
||||
hdr[2] = uint64(mp.procid)
|
||||
hdr[2] = mp.procid
|
||||
|
||||
// Allow only one writer at a time
|
||||
for !trace.signalLock.CompareAndSwap(0, 1) {
|
||||
|
|
|
@ -42,7 +42,7 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2.
|
|||
tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end()
|
||||
}
|
||||
if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
|
||||
tl.writer().writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
|
||||
tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
|
||||
}
|
||||
return traceEventWriter{tl}
|
||||
}
|
||||
|
|
|
@ -457,7 +457,7 @@ func (tl traceLocker) GoPreempt() {
|
|||
|
||||
// GoStop emits a GoStop event with the provided reason.
|
||||
func (tl traceLocker) GoStop(reason traceGoStopReason) {
|
||||
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0))
|
||||
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0))
|
||||
}
|
||||
|
||||
// GoPark emits a GoBlock event with the provided reason.
|
||||
|
@ -465,7 +465,7 @@ func (tl traceLocker) GoStop(reason traceGoStopReason) {
|
|||
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
|
||||
// that we have both, and waitReason is way more descriptive.
|
||||
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
|
||||
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
|
||||
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip))
|
||||
}
|
||||
|
||||
// GoUnpark emits a GoUnblock event.
|
||||
|
|
|
@ -190,7 +190,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceW
|
|||
|
||||
// Emit stack event.
|
||||
w.byte(byte(tracev2.EvStack))
|
||||
w.varint(uint64(node.id))
|
||||
w.varint(node.id)
|
||||
w.varint(uint64(len(frames)))
|
||||
for _, frame := range frames {
|
||||
w.varint(uint64(frame.PC))
|
||||
|
|
|
@ -64,7 +64,7 @@ func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
|
|||
}
|
||||
|
||||
// Emit type.
|
||||
w.varint(uint64(node.id))
|
||||
w.varint(node.id)
|
||||
w.varint(uint64(uintptr(unsafe.Pointer(typ))))
|
||||
w.varint(uint64(typ.Size()))
|
||||
w.varint(uint64(typ.PtrBytes))
|
||||
|
|
|
@ -161,7 +161,7 @@ func TestConcurrentRange(t *testing.T) {
|
|||
|
||||
m := new(sync.Map)
|
||||
for n := int64(1); n <= mapSize; n++ {
|
||||
m.Store(n, int64(n))
|
||||
m.Store(n, n)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
|
|
@ -33,7 +33,7 @@ func readIntBE(b []byte, size uintptr) uint64 {
|
|||
case 4:
|
||||
return uint64(byteorder.BEUint32(b))
|
||||
case 8:
|
||||
return uint64(byteorder.BEUint64(b))
|
||||
return byteorder.BEUint64(b)
|
||||
default:
|
||||
panic("syscall: readInt with unsupported size")
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func readIntLE(b []byte, size uintptr) uint64 {
|
|||
case 4:
|
||||
return uint64(byteorder.LEUint32(b))
|
||||
case 8:
|
||||
return uint64(byteorder.LEUint64(b))
|
||||
return byteorder.LEUint64(b)
|
||||
default:
|
||||
panic("syscall: readInt with unsupported size")
|
||||
}
|
||||
|
|
|
@ -735,8 +735,8 @@ func ExampleTime_String() {
|
|||
timeWithoutNanoseconds := time.Date(2000, 2, 1, 12, 13, 14, 0, time.UTC)
|
||||
withoutNanoseconds := timeWithoutNanoseconds.String()
|
||||
|
||||
fmt.Printf("withNanoseconds = %v\n", string(withNanoseconds))
|
||||
fmt.Printf("withoutNanoseconds = %v\n", string(withoutNanoseconds))
|
||||
fmt.Printf("withNanoseconds = %v\n", withNanoseconds)
|
||||
fmt.Printf("withoutNanoseconds = %v\n", withoutNanoseconds)
|
||||
|
||||
// Output:
|
||||
// withNanoseconds = 2000-02-01 12:13:14.000000015 +0000 UTC
|
||||
|
|
|
@ -142,7 +142,7 @@ func (t *Timer) Stop() bool {
|
|||
// in Go 1.27 or later.
|
||||
func NewTimer(d Duration) *Timer {
|
||||
c := make(chan Time, 1)
|
||||
t := (*Timer)(newTimer(when(d), 0, sendTime, c, syncTimer(c)))
|
||||
t := newTimer(when(d), 0, sendTime, c, syncTimer(c))
|
||||
t.C = c
|
||||
return t
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ func After(d Duration) <-chan Time {
|
|||
// be used to cancel the call using its Stop method.
|
||||
// The returned Timer's C field is not used and will be nil.
|
||||
func AfterFunc(d Duration, f func()) *Timer {
|
||||
return (*Timer)(newTimer(when(d), 0, goFunc, f, nil))
|
||||
return newTimer(when(d), 0, goFunc, f, nil)
|
||||
}
|
||||
|
||||
func goFunc(arg any, seq uintptr, delta int64) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue