diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index d31379a2d67..5d699a876f5 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -622,7 +622,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter, ssa.OpARMCALLudiv: + case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter: + s.Call(v) + case ssa.OpARMCALLudiv: + v.Aux = gc.Udiv s.Call(v) case ssa.OpARMDUFFZERO: p := s.Prog(obj.ADUFFZERO) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index fde38e27ff5..7ff45a70b7b 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -270,5 +270,6 @@ var ( writeBarrier, writebarrierptr, typedmemmove, - typedmemclr *obj.LSym + typedmemclr, + Udiv *obj.LSym ) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 9bd731a39a8..476a3294597 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -91,6 +91,7 @@ func initssaconfig() { writebarrierptr = Sysfunc("writebarrierptr") typedmemmove = Sysfunc("typedmemmove") typedmemclr = Sysfunc("typedmemclr") + Udiv = Sysfunc("udiv") } // buildssa builds an SSA function. diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 9a8bbad44e9..087359d3a41 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -34,12 +34,12 @@ (Mul32uhilo x y) -> (MULLU x y) (Div32 x y) -> - (SUB (XOR // negate the result if one operand is negative - (Select0 (CALLudiv {config.ctxt.Lookup("udiv", 0)} + (SUB (XOR // negate the result if one operand is negative + (Select0 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) // negate x if negative (SUB (XOR y (Signmask y)) (Signmask y)))) // negate y if negative (Signmask (XOR x y))) (Signmask (XOR x y))) -(Div32u x y) -> (Select0 (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y)) +(Div32u x y) -> (Select0 (CALLudiv x y)) (Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y)) (Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) (Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y)) @@ -48,12 +48,12 @@ (Div64F x y) -> (DIVD x y) (Mod32 x y) -> - (SUB (XOR // negate the result if x is negative - (Select1 (CALLudiv {config.ctxt.Lookup("udiv", 0)} + (SUB (XOR // negate the result if x is negative + (Select1 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) // negate x if negative (SUB (XOR y (Signmask y)) (Signmask y)))) // negate y if negative (Signmask x)) (Signmask x)) -(Mod32u x y) -> (Select1 (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y)) +(Mod32u x y) -> (Select1 (CALLudiv x y)) (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y)) (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y)) diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 02cb9ce5d72..baf0350aeab 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -152,10 +152,7 @@ func init() { }, clobberFlags: true, typ: "(UInt32,UInt32)", - aux: "SymOff", - // TODO(mdempsky): Should this be true? - call: false, - symEffect: "None", + call: false, // TODO(mdempsky): Should this be true? }, {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index db86a05717e..9ff763431e1 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -8151,10 +8151,8 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLudiv", - auxType: auxSymOff, argLen: 2, clobberFlags: true, - symEffect: SymNone, reg: regInfo{ inputs: []inputInfo{ {0, 2}, // R1 diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 3651c6de36e..d7d4ab21081 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -13582,13 +13582,11 @@ func rewriteValueARM_OpDiv16u(v *Value) bool { func rewriteValueARM_OpDiv32(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config types := &b.Func.Config.Types _ = types // match: (Div32 x y) // cond: - // result: (SUB (XOR (Select0 (CALLudiv {config.ctxt.Lookup("udiv", 0)} (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask (XOR x y))) (Signmask (XOR x y))) + // result: (SUB (XOR (Select0 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask (XOR x y))) (Signmask (XOR x y))) for { x := v.Args[0] y := v.Args[1] @@ -13596,7 +13594,6 @@ func rewriteValueARM_OpDiv32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32) v1 := b.NewValue0(v.Pos, OpSelect0, types.UInt32) v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32)) - v2.Aux = config.ctxt.Lookup("udiv", 0) v3 := b.NewValue0(v.Pos, OpARMSUB, types.UInt32) v4 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32) v4.AddArg(x) @@ -13653,20 +13650,17 @@ func rewriteValueARM_OpDiv32F(v *Value) bool { func rewriteValueARM_OpDiv32u(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config types := &b.Func.Config.Types _ = types // match: (Div32u x y) // cond: - // result: (Select0 (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y)) + // result: (Select0 (CALLudiv x y)) for { x := v.Args[0] y := v.Args[1] v.reset(OpSelect0) v.Type = types.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32)) - v0.Aux = config.ctxt.Lookup("udiv", 0) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) @@ -15088,13 +15082,11 @@ func rewriteValueARM_OpMod16u(v *Value) bool { func rewriteValueARM_OpMod32(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config types := &b.Func.Config.Types _ = types // match: (Mod32 x y) // cond: - // result: (SUB (XOR (Select1 (CALLudiv {config.ctxt.Lookup("udiv", 0)} (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x)) + // result: (SUB (XOR (Select1 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x)) for { x := v.Args[0] y := v.Args[1] @@ -15102,7 +15094,6 @@ func rewriteValueARM_OpMod32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32) v1 := b.NewValue0(v.Pos, OpSelect1, types.UInt32) v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32)) - v2.Aux = config.ctxt.Lookup("udiv", 0) v3 := b.NewValue0(v.Pos, OpARMSUB, types.UInt32) v4 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32) v4.AddArg(x) @@ -15140,20 +15131,17 @@ func rewriteValueARM_OpMod32(v *Value) bool { func rewriteValueARM_OpMod32u(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config types := &b.Func.Config.Types _ = types // match: (Mod32u x y) // cond: - // result: (Select1 (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y)) + // result: (Select1 (CALLudiv x y)) for { x := v.Args[0] y := v.Args[1] v.reset(OpSelect1) v.Type = types.UInt32 v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32)) - v0.Aux = config.ctxt.Lookup("udiv", 0) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) diff --git a/src/cmd/vet/all/whitelist/arm.txt b/src/cmd/vet/all/whitelist/arm.txt index c0ab9de67d0..7eb0132a393 100644 --- a/src/cmd/vet/all/whitelist/arm.txt +++ b/src/cmd/vet/all/whitelist/arm.txt @@ -19,6 +19,7 @@ runtime/duff_arm.s: [arm] duffcopy: function duffcopy missing Go declaration runtime/tls_arm.s: [arm] save_g: function save_g missing Go declaration runtime/tls_arm.s: [arm] load_g: function load_g missing Go declaration runtime/tls_arm.s: [arm] _initcgo: function _initcgo missing Go declaration +runtime/vlop_arm.s: [arm] udiv: function udiv missing Go declaration // Clearer using FP than SP, but that requires named offsets. runtime/asm_arm.s: [arm] rt0_go: use of 4(R13) points beyond argument frame diff --git a/src/runtime/vlop_arm.s b/src/runtime/vlop_arm.s index 6fc325cb936..3f2aa27f830 100644 --- a/src/runtime/vlop_arm.s +++ b/src/runtime/vlop_arm.s @@ -106,7 +106,7 @@ TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4 MOVW g_sigpc(g), LR B runtime·sigpanic(SB) -// func udiv(n, d uint32) (q, r uint32) +// func runtime·udiv(n, d uint32) (q, r uint32) // compiler knowns the register usage of this function // Reference: // Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software @@ -118,7 +118,7 @@ TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4 #define Ra R11 // Be careful: Ra == R11 will be used by the linker for synthesized instructions. -TEXT udiv(SB),NOSPLIT,$-4 +TEXT runtime·udiv(SB),NOSPLIT,$-4 MOVBU runtime·hardDiv(SB), Ra CMP $0, Ra BNE udiv_hardware @@ -241,7 +241,7 @@ TEXT _divu(SB), NOSPLIT, $16-0 MOVW Rn, Rr /* numerator */ MOVW g_m(g), Rq MOVW m_divmod(Rq), Rq /* denominator */ - BL udiv(SB) + BL runtime·udiv(SB) MOVW Rq, RTMP MOVW 4(R13), Rq MOVW 8(R13), Rr @@ -259,7 +259,7 @@ TEXT _modu(SB), NOSPLIT, $16-0 MOVW Rn, Rr /* numerator */ MOVW g_m(g), Rq MOVW m_divmod(Rq), Rq /* denominator */ - BL udiv(SB) + BL runtime·udiv(SB) MOVW Rr, RTMP MOVW 4(R13), Rq MOVW 8(R13), Rr @@ -283,7 +283,7 @@ TEXT _div(SB),NOSPLIT,$16-0 BGE d2 RSB $0, Rq, Rq d0: - BL udiv(SB) /* none/both neg */ + BL runtime·udiv(SB) /* none/both neg */ MOVW Rq, RTMP B out1 d1: @@ -291,7 +291,7 @@ d1: BGE d0 RSB $0, Rq, Rq d2: - BL udiv(SB) /* one neg */ + BL runtime·udiv(SB) /* one neg */ RSB $0, Rq, RTMP out1: MOVW 4(R13), Rq @@ -314,11 +314,11 @@ TEXT _mod(SB),NOSPLIT,$16-0 CMP $0, Rr BGE m1 RSB $0, Rr, Rr - BL udiv(SB) /* neg numerator */ + BL runtime·udiv(SB) /* neg numerator */ RSB $0, Rr, RTMP B out m1: - BL udiv(SB) /* pos numerator */ + BL runtime·udiv(SB) /* pos numerator */ MOVW Rr, RTMP out: MOVW 4(R13), Rq