all: omit unnecessary type conversions

Found by github.com/mdempsky/unconvert

Change-Id: Ib78cceb718146509d96dbb6da87b27dbaeba1306
GitHub-Last-Rev: dedf354811
GitHub-Pull-Request: golang/go#74771
Reviewed-on: https://go-review.googlesource.com/c/go/+/690735
Reviewed-by: Mark Freeman <mark@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Jes Cok 2025-07-28 11:36:17 +00:00 committed by Gopher Robot
parent 4569255f8c
commit e151db3e06
20 changed files with 41 additions and 41 deletions

View file

@ -693,14 +693,14 @@ func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B,
for _, r16 := range rt.R16 { for _, r16 := range rt.R16 {
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) { for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
if r != needle { if r != needle {
rs = append(rs, rune(r)) rs = append(rs, r)
} }
} }
} }
for _, r32 := range rt.R32 { for _, r32 := range rt.R32 {
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) { for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
if r != needle { if r != needle {
rs = append(rs, rune(r)) rs = append(rs, r)
} }
} }
} }

View file

@ -554,7 +554,7 @@ func (b *buf) entry(cu *Entry, u *unit) *Entry {
case formData16: case formData16:
val = b.bytes(16) val = b.bytes(16)
case formSdata: case formSdata:
val = int64(b.int()) val = b.int()
case formUdata: case formUdata:
val = int64(b.uint()) val = int64(b.uint())
case formImplicitConst: case formImplicitConst:

View file

@ -22,7 +22,7 @@ func TestTCPConnKeepAliveConfigDialer(t *testing.T) {
oldCfg KeepAliveConfig oldCfg KeepAliveConfig
) )
testPreHookSetKeepAlive = func(nfd *netFD) { testPreHookSetKeepAlive = func(nfd *netFD) {
oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd)) oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
} }
handler := func(ls *localServer, ln Listener) { handler := func(ls *localServer, ln Listener) {
@ -80,7 +80,7 @@ func TestTCPConnKeepAliveConfigListener(t *testing.T) {
oldCfg KeepAliveConfig oldCfg KeepAliveConfig
) )
testPreHookSetKeepAlive = func(nfd *netFD) { testPreHookSetKeepAlive = func(nfd *netFD) {
oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd)) oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
} }
ch := make(chan Conn, 1) ch := make(chan Conn, 1)

View file

@ -460,7 +460,7 @@ func dumproots() {
continue continue
} }
spf := (*specialfinalizer)(unsafe.Pointer(sp)) spf := (*specialfinalizer)(unsafe.Pointer(sp))
p := unsafe.Pointer(s.base() + uintptr(spf.special.offset)) p := unsafe.Pointer(s.base() + spf.special.offset)
dumpfinalizer(p, spf.fn, spf.fint, spf.ot) dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
} }
} }
@ -659,7 +659,7 @@ func dumpmemprof() {
continue continue
} }
spp := (*specialprofile)(unsafe.Pointer(sp)) spp := (*specialprofile)(unsafe.Pointer(sp))
p := s.base() + uintptr(spp.special.offset) p := s.base() + spp.special.offset
dumpint(tagAllocSample) dumpint(tagAllocSample)
dumpint(uint64(p)) dumpint(uint64(p))
dumpint(uint64(uintptr(unsafe.Pointer(spp.b)))) dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))

View file

@ -173,14 +173,14 @@ func (c Cleanup) Stop() {
// Reached the end of the linked list. Stop searching at this point. // Reached the end of the linked list. Stop searching at this point.
break break
} }
if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind && if offset == s.offset && _KindSpecialCleanup == s.kind &&
(*specialCleanup)(unsafe.Pointer(s)).id == c.id { (*specialCleanup)(unsafe.Pointer(s)).id == c.id {
// The special is a cleanup and contains a matching cleanup id. // The special is a cleanup and contains a matching cleanup id.
*iter = s.next *iter = s.next
found = s found = s
break break
} }
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) { if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
// The special is outside the region specified for that kind of // The special is outside the region specified for that kind of
// special. The specials are sorted by kind. // special. The specials are sorted by kind.
break break

View file

@ -415,7 +415,7 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) {
// Don't mark finalized object, but scan it so we retain everything it points to. // Don't mark finalized object, but scan it so we retain everything it points to.
// A finalizer can be set for an inner byte of an object, find object beginning. // A finalizer can be set for an inner byte of an object, find object beginning.
p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize p := s.base() + spf.special.offset/s.elemsize*s.elemsize
// Mark everything that can be reached from // Mark everything that can be reached from
// the object (but *not* the object itself or // the object (but *not* the object itself or

View file

@ -553,7 +553,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
siter := newSpecialsIter(s) siter := newSpecialsIter(s)
for siter.valid() { for siter.valid() {
// A finalizer can be set for an inner byte of an object, find object beginning. // A finalizer can be set for an inner byte of an object, find object beginning.
objIndex := uintptr(siter.s.offset) / size objIndex := siter.s.offset / size
p := s.base() + objIndex*size p := s.base() + objIndex*size
mbits := s.markBitsForIndex(objIndex) mbits := s.markBitsForIndex(objIndex)
if !mbits.isMarked() { if !mbits.isMarked() {
@ -561,7 +561,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Pass 1: see if it has a finalizer. // Pass 1: see if it has a finalizer.
hasFinAndRevived := false hasFinAndRevived := false
endOffset := p - s.base() + size endOffset := p - s.base() + size
for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next { for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next {
if tmp.kind == _KindSpecialFinalizer { if tmp.kind == _KindSpecialFinalizer {
// Stop freeing of object if it has a finalizer. // Stop freeing of object if it has a finalizer.
mbits.setMarkedNonAtomic() mbits.setMarkedNonAtomic()
@ -573,11 +573,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared // Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
// before finalization as specified by the weak package. See the documentation // before finalization as specified by the weak package. See the documentation
// for that package for more details. // for that package for more details.
for siter.valid() && uintptr(siter.s.offset) < endOffset { for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup // Find the exact byte for which the special was setup
// (as opposed to object beginning). // (as opposed to object beginning).
special := siter.s special := siter.s
p := s.base() + uintptr(special.offset) p := s.base() + special.offset
if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle { if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
siter.unlinkAndNext() siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size) freeSpecial(special, unsafe.Pointer(p), size)
@ -589,11 +589,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
} }
} else { } else {
// Pass 2: the object is truly dead, free (and handle) all specials. // Pass 2: the object is truly dead, free (and handle) all specials.
for siter.valid() && uintptr(siter.s.offset) < endOffset { for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup // Find the exact byte for which the special was setup
// (as opposed to object beginning). // (as opposed to object beginning).
special := siter.s special := siter.s
p := s.base() + uintptr(special.offset) p := s.base() + special.offset
siter.unlinkAndNext() siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size) freeSpecial(special, unsafe.Pointer(p), size)
} }

View file

@ -1488,7 +1488,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
s.allocBits = newAllocBits(uintptr(s.nelems)) s.allocBits = newAllocBits(uintptr(s.nelems))
// Adjust s.limit down to the object-containing part of the span. // Adjust s.limit down to the object-containing part of the span.
s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) s.limit = s.base() + s.elemsize*uintptr(s.nelems)
// It's safe to access h.sweepgen without the heap lock because it's // It's safe to access h.sweepgen without the heap lock because it's
// only ever updated with the world stopped and we run on the // only ever updated with the world stopped and we run on the
@ -2152,11 +2152,11 @@ func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special,
if s == nil { if s == nil {
break break
} }
if offset == uintptr(s.offset) && kind == s.kind { if offset == s.offset && kind == s.kind {
found = true found = true
break break
} }
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) { if offset < s.offset || (offset == s.offset && kind < s.kind) {
break break
} }
iter = &s.next iter = &s.next
@ -2323,14 +2323,14 @@ func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
// Reached the end of the linked list. Stop searching at this point. // Reached the end of the linked list. Stop searching at this point.
break break
} }
if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind && if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID { (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id. // The special is a cleanup and contains a matching cleanup id.
*iter = s.next *iter = s.next
found = (*specialCheckFinalizer)(unsafe.Pointer(s)) found = (*specialCheckFinalizer)(unsafe.Pointer(s))
break break
} }
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) { if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of // The special is outside the region specified for that kind of
// special. The specials are sorted by kind. // special. The specials are sorted by kind.
break break
@ -2373,14 +2373,14 @@ func clearCleanupContext(ptr uintptr, cleanupID uint64) {
// Reached the end of the linked list. Stop searching at this point. // Reached the end of the linked list. Stop searching at this point.
break break
} }
if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind && if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID { (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id. // The special is a cleanup and contains a matching cleanup id.
*iter = s.next *iter = s.next
found = s found = s
break break
} }
if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) { if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of // The special is outside the region specified for that kind of
// special. The specials are sorted by kind. // special. The specials are sorted by kind.
break break
@ -2476,7 +2476,7 @@ type specialWeakHandle struct {
//go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer //go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer { func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(getOrAddWeakHandle(unsafe.Pointer(p))) return unsafe.Pointer(getOrAddWeakHandle(p))
} }
//go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak //go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak

View file

@ -397,5 +397,5 @@ func bytealg_MakeNoZero(len int) []byte {
panicmakeslicelen() panicmakeslicelen()
} }
cap := roundupsize(uintptr(len), true) cap := roundupsize(uintptr(len), true)
return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len] return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
} }

View file

@ -37,7 +37,7 @@ func traceSnapshotMemory(gen uintptr) {
} }
// Emit info. // Emit info.
w.varint(uint64(trace.minPageHeapAddr)) w.varint(trace.minPageHeapAddr)
w.varint(uint64(pageSize)) w.varint(uint64(pageSize))
w.varint(uint64(gc.MinHeapAlign)) w.varint(uint64(gc.MinHeapAlign))
w.varint(uint64(fixedStack)) w.varint(uint64(fixedStack))

View file

@ -183,7 +183,7 @@ func (w traceWriter) refill() traceWriter {
// Tolerate a nil mp. // Tolerate a nil mp.
mID := ^uint64(0) mID := ^uint64(0)
if w.mp != nil { if w.mp != nil {
mID = uint64(w.mp.procid) mID = w.mp.procid
} }
// Write the buffer's header. // Write the buffer's header.
@ -194,7 +194,7 @@ func (w traceWriter) refill() traceWriter {
w.byte(byte(w.exp)) w.byte(byte(w.exp))
} }
w.varint(uint64(w.gen)) w.varint(uint64(w.gen))
w.varint(uint64(mID)) w.varint(mID)
w.varint(uint64(ts)) w.varint(uint64(ts))
w.traceBuf.lenPos = w.varintReserve() w.traceBuf.lenPos = w.varintReserve()
return w return w

View file

@ -258,7 +258,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
if gp != nil { if gp != nil {
hdr[1] = gp.goid hdr[1] = gp.goid
} }
hdr[2] = uint64(mp.procid) hdr[2] = mp.procid
// Allow only one writer at a time // Allow only one writer at a time
for !trace.signalLock.CompareAndSwap(0, 1) { for !trace.signalLock.CompareAndSwap(0, 1) {

View file

@ -42,7 +42,7 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2.
tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end() tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end()
} }
if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) { if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
tl.writer().writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end() tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
} }
return traceEventWriter{tl} return traceEventWriter{tl}
} }

View file

@ -457,7 +457,7 @@ func (tl traceLocker) GoPreempt() {
// GoStop emits a GoStop event with the provided reason. // GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason) { func (tl traceLocker) GoStop(reason traceGoStopReason) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0)) tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0))
} }
// GoPark emits a GoBlock event with the provided reason. // GoPark emits a GoBlock event with the provided reason.
@ -465,7 +465,7 @@ func (tl traceLocker) GoStop(reason traceGoStopReason) {
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
// that we have both, and waitReason is way more descriptive. // that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) { func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip)) tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip))
} }
// GoUnpark emits a GoUnblock event. // GoUnpark emits a GoUnblock event.

View file

@ -190,7 +190,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceW
// Emit stack event. // Emit stack event.
w.byte(byte(tracev2.EvStack)) w.byte(byte(tracev2.EvStack))
w.varint(uint64(node.id)) w.varint(node.id)
w.varint(uint64(len(frames))) w.varint(uint64(len(frames)))
for _, frame := range frames { for _, frame := range frames {
w.varint(uint64(frame.PC)) w.varint(uint64(frame.PC))

View file

@ -64,7 +64,7 @@ func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
} }
// Emit type. // Emit type.
w.varint(uint64(node.id)) w.varint(node.id)
w.varint(uint64(uintptr(unsafe.Pointer(typ)))) w.varint(uint64(uintptr(unsafe.Pointer(typ))))
w.varint(uint64(typ.Size())) w.varint(uint64(typ.Size()))
w.varint(uint64(typ.PtrBytes)) w.varint(uint64(typ.PtrBytes))

View file

@ -161,7 +161,7 @@ func TestConcurrentRange(t *testing.T) {
m := new(sync.Map) m := new(sync.Map)
for n := int64(1); n <= mapSize; n++ { for n := int64(1); n <= mapSize; n++ {
m.Store(n, int64(n)) m.Store(n, n)
} }
done := make(chan struct{}) done := make(chan struct{})

View file

@ -33,7 +33,7 @@ func readIntBE(b []byte, size uintptr) uint64 {
case 4: case 4:
return uint64(byteorder.BEUint32(b)) return uint64(byteorder.BEUint32(b))
case 8: case 8:
return uint64(byteorder.BEUint64(b)) return byteorder.BEUint64(b)
default: default:
panic("syscall: readInt with unsupported size") panic("syscall: readInt with unsupported size")
} }
@ -48,7 +48,7 @@ func readIntLE(b []byte, size uintptr) uint64 {
case 4: case 4:
return uint64(byteorder.LEUint32(b)) return uint64(byteorder.LEUint32(b))
case 8: case 8:
return uint64(byteorder.LEUint64(b)) return byteorder.LEUint64(b)
default: default:
panic("syscall: readInt with unsupported size") panic("syscall: readInt with unsupported size")
} }

View file

@ -735,8 +735,8 @@ func ExampleTime_String() {
timeWithoutNanoseconds := time.Date(2000, 2, 1, 12, 13, 14, 0, time.UTC) timeWithoutNanoseconds := time.Date(2000, 2, 1, 12, 13, 14, 0, time.UTC)
withoutNanoseconds := timeWithoutNanoseconds.String() withoutNanoseconds := timeWithoutNanoseconds.String()
fmt.Printf("withNanoseconds = %v\n", string(withNanoseconds)) fmt.Printf("withNanoseconds = %v\n", withNanoseconds)
fmt.Printf("withoutNanoseconds = %v\n", string(withoutNanoseconds)) fmt.Printf("withoutNanoseconds = %v\n", withoutNanoseconds)
// Output: // Output:
// withNanoseconds = 2000-02-01 12:13:14.000000015 +0000 UTC // withNanoseconds = 2000-02-01 12:13:14.000000015 +0000 UTC

View file

@ -142,7 +142,7 @@ func (t *Timer) Stop() bool {
// in Go 1.27 or later. // in Go 1.27 or later.
func NewTimer(d Duration) *Timer { func NewTimer(d Duration) *Timer {
c := make(chan Time, 1) c := make(chan Time, 1)
t := (*Timer)(newTimer(when(d), 0, sendTime, c, syncTimer(c))) t := newTimer(when(d), 0, sendTime, c, syncTimer(c))
t.C = c t.C = c
return t return t
} }
@ -208,7 +208,7 @@ func After(d Duration) <-chan Time {
// be used to cancel the call using its Stop method. // be used to cancel the call using its Stop method.
// The returned Timer's C field is not used and will be nil. // The returned Timer's C field is not used and will be nil.
func AfterFunc(d Duration, f func()) *Timer { func AfterFunc(d Duration, f func()) *Timer {
return (*Timer)(newTimer(when(d), 0, goFunc, f, nil)) return newTimer(when(d), 0, goFunc, f, nil)
} }
func goFunc(arg any, seq uintptr, delta int64) { func goFunc(arg any, seq uintptr, delta int64) {