diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index c8befa40cdb7d0..95904edd6a063d 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -3327,7 +3327,7 @@ func init() { func(s *state, n *Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) }, - sys.AMD64) + sys.AMD64, sys.ARM64) addF("math/bits", "Len32", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { @@ -3336,7 +3336,7 @@ func init() { x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) }, - sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) + sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("math/bits", "Len16", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { if s.config.PtrSize == 4 { diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 1efce66016910b..fc806f75a0ebd4 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -123,6 +123,7 @@ (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) -> (FMOVSgpfp val) (BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ x)) +(BitLen32 x) -> (SUB (MOVDconst [32]) (CLZW x)) (Bswap64 x) -> (REV x) (Bswap32 x) -> (REVW x) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 2afd0f335ea645..05b8b9c6973337 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -427,6 +427,8 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpAtomicStorePtrNoWB_0(v) case OpAvg64u: return rewriteValueARM64_OpAvg64u_0(v) + case OpBitLen32: + return rewriteValueARM64_OpBitLen32_0(v) case OpBitLen64: return rewriteValueARM64_OpBitLen64_0(v) case OpBitRev16: @@ -32715,6 +32717,26 @@ func rewriteValueARM64_OpAvg64u_0(v *Value) bool { return true } } +func rewriteValueARM64_OpBitLen32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (BitLen32 x) + // cond: + // result: (SUB (MOVDconst [32]) (CLZW x)) + for { + x := v.Args[0] + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 32 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int) + v1.AddArg(x) + v.AddArg(v1) + return true + } +} func rewriteValueARM64_OpBitLen64_0(v *Value) bool { b := v.Block _ = b diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index 44ab2c02b75783..d8b1775b0fb019 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -31,7 +31,7 @@ func LeadingZeros64(n uint64) int { func LeadingZeros32(n uint32) int { // amd64:"BSRQ","LEAQ",-"CMOVQEQ" // s390x:"FLOGR" - // arm:"CLZ" arm64:"CLZ" + // arm:"CLZ" arm64:"CLZW" // mips:"CLZ" return bits.LeadingZeros32(n) }