cmd/compile/internal/ssa: expand runtime.memequal for length {3,5,6,7}

This CL slightly speeds up strings.HasPrefix when testing constant
prefixes of length {3,5,6,7}.

goos: linux
goarch: amd64
cpu: Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
                │      old     │                 new                 │
                │    sec/op    │   sec/op     vs base                │
StringPrefix3-8   11.125n ± 2%   8.539n ± 1%  -23.25% (p=0.000 n=20)
StringPrefix5-8   11.170n ± 2%   8.700n ± 1%  -22.11% (p=0.000 n=20)
StringPrefix6-8   11.190n ± 2%   8.655n ± 1%  -22.65% (p=0.000 n=20)
StringPrefix7-8   11.095n ± 1%   8.878n ± 1%  -19.98% (p=0.000 n=20)

Change-Id: I510a80d59cf78680b57d68780d35d212d24030e2
Reviewed-on: https://go-review.googlesource.com/c/go/+/700816
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Mark Freeman <markfreeman@google.com>
Auto-Submit: Keith Randall <khr@golang.org>
This commit is contained in:
Youlin Feng 2025-09-04 09:08:14 +08:00 committed by Gopher Robot
parent 4c63d798cb
commit a5fa5ea51c
3 changed files with 519 additions and 1 deletions

View file

@ -2084,7 +2084,7 @@
(NilCheck ptr:(NilCheck _ _) _ ) => ptr
// for late-expanded calls, recognize memequal applied to a single constant byte
// Support is limited by 1, 2, 4, 8 byte sizes
// Support is limited by [1-8] byte sizes
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
@ -2131,6 +2131,118 @@
&& canLoadUnaligned(config) && config.PtrSize == 8
=> (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) =>
(MakeResult
(Eq32
(Or32 <typ.Int32>
(ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
(Lsh32x32 <typ.Int32>
(ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
(Const32 <typ.Int32> [16])))
(Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) =>
(MakeResult
(Eq32
(Or32 <typ.Int32>
(ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
(Lsh32x32 <typ.Int32>
(ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
(Const32 <typ.Int32> [16])))
(Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} _ _ (Const64 [0]) mem)
&& isSameCall(callAux, "runtime.memequal")
=> (MakeResult (ConstBool <typ.Bool> [true]) mem)

View file

@ -30884,6 +30884,390 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
// result: (MakeResult (Eq32 (Or32 <typ.Int32> (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem)) (Lsh32x32 <typ.Int32> (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem)) (Const32 <typ.Int32> [16]))) (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 3 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr32, typ.Int32)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.Int32)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.Int32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.Int32)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(2)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v8.AuxInt = int32ToAuxInt(16)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v9.AuxInt = int32ToAuxInt(int32(uint32(read16(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint32(read8(scon, 2)) << 16)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
// result: (MakeResult (Eq32 (Or32 <typ.Int32> (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem)) (Lsh32x32 <typ.Int32> (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem)) (Const32 <typ.Int32> [16]))) (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 3 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr32, typ.Int32)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.Int32)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.Int32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.Int32)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(2)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v8.AuxInt = int32ToAuxInt(16)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v9.AuxInt = int32ToAuxInt(int32(uint32(read16(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint32(read8(scon, 2)) << 16)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 5 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read8(scon, 4)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 5 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read8(scon, 4)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 6 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read16(scon, 4, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 6 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read16(scon, 4, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 7 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(3)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read32(scon, 3, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 7 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(3)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read32(scon, 3, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} _ _ (Const64 [0]) mem)
// cond: isSameCall(callAux, "runtime.memequal")
// result: (MakeResult (ConstBool <typ.Bool> [true]) mem)

View file

@ -6,6 +6,8 @@
package codegen
import "strings"
// This file contains code generation tests related to the handling of
// string types.
@ -89,3 +91,23 @@ func NotEqualSelf(s string) bool {
}
var bsink []byte
func HasPrefix3(s string) bool {
// amd64:-`.*memequal.*`
return strings.HasPrefix(s, "str")
}
func HasPrefix5(s string) bool {
// amd64:-`.*memequal.*`
return strings.HasPrefix(s, "strin")
}
func HasPrefix6(s string) bool {
// amd64:-`.*memequal.*`
return strings.HasPrefix(s, "string")
}
func HasPrefix7(s string) bool {
// amd64:-`.*memequal.*`
return strings.HasPrefix(s, "strings")
}