diff --git a/llvm/test/CodeGen/RISCV/shl-cttz.ll b/llvm/test/CodeGen/RISCV/shl-cttz.ll new file mode 100644 index 00000000000000..56aa4ee2d96899 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/shl-cttz.ll @@ -0,0 +1,1233 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBB +; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV64I,RV64IILLEGALI32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBILLEGALI32 +; RUN: llc -mtriple=riscv64 -mattr=+m -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV64I,RV64ILEGALI32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -riscv-experimental-rv64-legal-i32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV64ZBB,RV64ZBBLEGALI32 + +define i8 @shl_cttz_i8(i8 %x, i8 %y) { +; RV32I-LABEL: shl_cttz_i8: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi a2, a1, -1 +; RV32I-NEXT: not a1, a1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a2, a1, 1 +; RV32I-NEXT: andi a2, a2, 85 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: andi a2, a1, 51 +; RV32I-NEXT: srli a1, a1, 2 +; RV32I-NEXT: andi a1, a1, 51 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: srli a2, a1, 4 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: andi a1, a1, 15 +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_i8: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a1, a1 +; RV32ZBB-NEXT: sll a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64IILLEGALI32-LABEL: shl_cttz_i8: +; RV64IILLEGALI32: # %bb.0: # %entry +; RV64IILLEGALI32-NEXT: addi a2, a1, -1 +; RV64IILLEGALI32-NEXT: not a1, a1 +; RV64IILLEGALI32-NEXT: and a1, a1, a2 +; RV64IILLEGALI32-NEXT: srli a2, a1, 1 +; RV64IILLEGALI32-NEXT: andi a2, a2, 85 +; RV64IILLEGALI32-NEXT: subw a1, a1, a2 +; RV64IILLEGALI32-NEXT: andi a2, a1, 51 +; RV64IILLEGALI32-NEXT: srli a1, a1, 2 +; RV64IILLEGALI32-NEXT: andi a1, a1, 51 +; RV64IILLEGALI32-NEXT: add a1, a2, a1 +; RV64IILLEGALI32-NEXT: srli a2, a1, 4 +; RV64IILLEGALI32-NEXT: add a1, a1, a2 +; RV64IILLEGALI32-NEXT: andi a1, a1, 15 +; RV64IILLEGALI32-NEXT: sll a0, a0, a1 +; RV64IILLEGALI32-NEXT: ret +; +; RV64ZBBILLEGALI32-LABEL: shl_cttz_i8: +; RV64ZBBILLEGALI32: # %bb.0: # %entry +; RV64ZBBILLEGALI32-NEXT: ctz a1, a1 +; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1 +; RV64ZBBILLEGALI32-NEXT: ret +; +; RV64ILEGALI32-LABEL: shl_cttz_i8: +; RV64ILEGALI32: # %bb.0: # %entry +; RV64ILEGALI32-NEXT: addi a2, a1, -1 +; RV64ILEGALI32-NEXT: not a1, a1 +; RV64ILEGALI32-NEXT: and a1, a1, a2 +; RV64ILEGALI32-NEXT: srliw a2, a1, 1 +; RV64ILEGALI32-NEXT: andi a2, a2, 85 +; RV64ILEGALI32-NEXT: subw a1, a1, a2 +; RV64ILEGALI32-NEXT: andi a2, a1, 51 +; RV64ILEGALI32-NEXT: srliw a1, a1, 2 +; RV64ILEGALI32-NEXT: andi a1, a1, 51 +; RV64ILEGALI32-NEXT: add a1, a2, a1 +; RV64ILEGALI32-NEXT: srliw a2, a1, 4 +; RV64ILEGALI32-NEXT: add a1, a1, a2 +; RV64ILEGALI32-NEXT: andi a1, a1, 15 +; RV64ILEGALI32-NEXT: sllw a0, a0, a1 +; RV64ILEGALI32-NEXT: ret +; +; RV64ZBBLEGALI32-LABEL: shl_cttz_i8: +; RV64ZBBLEGALI32: # %bb.0: # %entry +; RV64ZBBLEGALI32-NEXT: ctzw a1, a1 +; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1 +; RV64ZBBLEGALI32-NEXT: ret +entry: + %cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true) + %res = shl i8 %x, %cttz + ret i8 %res +} + +define i8 @shl_cttz_constant_i8(i8 %y) { +; RV32I-LABEL: shl_cttz_constant_i8: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: not a0, a0 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: andi a1, a1, 85 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: andi a1, a0, 51 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: andi a0, a0, 51 +; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: andi a0, a0, 15 +; RV32I-NEXT: li a1, 4 +; RV32I-NEXT: sll a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_constant_i8: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a0, a0 +; RV32ZBB-NEXT: li a1, 4 +; RV32ZBB-NEXT: sll a0, a1, a0 +; RV32ZBB-NEXT: ret +; +; RV64IILLEGALI32-LABEL: shl_cttz_constant_i8: +; RV64IILLEGALI32: # %bb.0: # %entry +; RV64IILLEGALI32-NEXT: addi a1, a0, -1 +; RV64IILLEGALI32-NEXT: not a0, a0 +; RV64IILLEGALI32-NEXT: and a0, a0, a1 +; RV64IILLEGALI32-NEXT: srli a1, a0, 1 +; RV64IILLEGALI32-NEXT: andi a1, a1, 85 +; RV64IILLEGALI32-NEXT: subw a0, a0, a1 +; RV64IILLEGALI32-NEXT: andi a1, a0, 51 +; RV64IILLEGALI32-NEXT: srli a0, a0, 2 +; RV64IILLEGALI32-NEXT: andi a0, a0, 51 +; RV64IILLEGALI32-NEXT: add a0, a1, a0 +; RV64IILLEGALI32-NEXT: srli a1, a0, 4 +; RV64IILLEGALI32-NEXT: add a0, a0, a1 +; RV64IILLEGALI32-NEXT: andi a0, a0, 15 +; RV64IILLEGALI32-NEXT: li a1, 4 +; RV64IILLEGALI32-NEXT: sll a0, a1, a0 +; RV64IILLEGALI32-NEXT: ret +; +; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i8: +; RV64ZBBILLEGALI32: # %bb.0: # %entry +; RV64ZBBILLEGALI32-NEXT: ctz a0, a0 +; RV64ZBBILLEGALI32-NEXT: li a1, 4 +; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0 +; RV64ZBBILLEGALI32-NEXT: ret +; +; RV64ILEGALI32-LABEL: shl_cttz_constant_i8: +; RV64ILEGALI32: # %bb.0: # %entry +; RV64ILEGALI32-NEXT: addi a1, a0, -1 +; RV64ILEGALI32-NEXT: not a0, a0 +; RV64ILEGALI32-NEXT: and a0, a0, a1 +; RV64ILEGALI32-NEXT: srliw a1, a0, 1 +; RV64ILEGALI32-NEXT: andi a1, a1, 85 +; RV64ILEGALI32-NEXT: subw a0, a0, a1 +; RV64ILEGALI32-NEXT: andi a1, a0, 51 +; RV64ILEGALI32-NEXT: srliw a0, a0, 2 +; RV64ILEGALI32-NEXT: andi a0, a0, 51 +; RV64ILEGALI32-NEXT: add a0, a1, a0 +; RV64ILEGALI32-NEXT: srliw a1, a0, 4 +; RV64ILEGALI32-NEXT: add a0, a0, a1 +; RV64ILEGALI32-NEXT: andi a0, a0, 15 +; RV64ILEGALI32-NEXT: li a1, 4 +; RV64ILEGALI32-NEXT: sllw a0, a1, a0 +; RV64ILEGALI32-NEXT: ret +; +; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i8: +; RV64ZBBLEGALI32: # %bb.0: # %entry +; RV64ZBBLEGALI32-NEXT: ctzw a0, a0 +; RV64ZBBLEGALI32-NEXT: li a1, 4 +; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0 +; RV64ZBBLEGALI32-NEXT: ret +entry: + %cttz = call i8 @llvm.cttz.i8(i8 %y, i1 true) + %res = shl i8 4, %cttz + ret i8 %res +} + +define i16 @shl_cttz_i16(i16 %x, i16 %y) { +; RV32I-LABEL: shl_cttz_i16: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi a2, a1, -1 +; RV32I-NEXT: not a1, a1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a2, a1, 1 +; RV32I-NEXT: lui a3, 5 +; RV32I-NEXT: addi a3, a3, 1365 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: lui a2, 3 +; RV32I-NEXT: addi a2, a2, 819 +; RV32I-NEXT: and a3, a1, a2 +; RV32I-NEXT: srli a1, a1, 2 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: add a1, a3, a1 +; RV32I-NEXT: srli a2, a1, 4 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: andi a2, a1, 15 +; RV32I-NEXT: slli a1, a1, 20 +; RV32I-NEXT: srli a1, a1, 28 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_i16: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a1, a1 +; RV32ZBB-NEXT: sll a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64IILLEGALI32-LABEL: shl_cttz_i16: +; RV64IILLEGALI32: # %bb.0: # %entry +; RV64IILLEGALI32-NEXT: addi a2, a1, -1 +; RV64IILLEGALI32-NEXT: not a1, a1 +; RV64IILLEGALI32-NEXT: and a1, a1, a2 +; RV64IILLEGALI32-NEXT: srli a2, a1, 1 +; RV64IILLEGALI32-NEXT: lui a3, 5 +; RV64IILLEGALI32-NEXT: addiw a3, a3, 1365 +; RV64IILLEGALI32-NEXT: and a2, a2, a3 +; RV64IILLEGALI32-NEXT: sub a1, a1, a2 +; RV64IILLEGALI32-NEXT: lui a2, 3 +; RV64IILLEGALI32-NEXT: addiw a2, a2, 819 +; RV64IILLEGALI32-NEXT: and a3, a1, a2 +; RV64IILLEGALI32-NEXT: srli a1, a1, 2 +; RV64IILLEGALI32-NEXT: and a1, a1, a2 +; RV64IILLEGALI32-NEXT: add a1, a3, a1 +; RV64IILLEGALI32-NEXT: srli a2, a1, 4 +; RV64IILLEGALI32-NEXT: add a1, a1, a2 +; RV64IILLEGALI32-NEXT: andi a2, a1, 15 +; RV64IILLEGALI32-NEXT: slli a1, a1, 52 +; RV64IILLEGALI32-NEXT: srli a1, a1, 60 +; RV64IILLEGALI32-NEXT: add a1, a2, a1 +; RV64IILLEGALI32-NEXT: sll a0, a0, a1 +; RV64IILLEGALI32-NEXT: ret +; +; RV64ZBBILLEGALI32-LABEL: shl_cttz_i16: +; RV64ZBBILLEGALI32: # %bb.0: # %entry +; RV64ZBBILLEGALI32-NEXT: ctz a1, a1 +; RV64ZBBILLEGALI32-NEXT: sll a0, a0, a1 +; RV64ZBBILLEGALI32-NEXT: ret +; +; RV64ILEGALI32-LABEL: shl_cttz_i16: +; RV64ILEGALI32: # %bb.0: # %entry +; RV64ILEGALI32-NEXT: addi a2, a1, -1 +; RV64ILEGALI32-NEXT: not a1, a1 +; RV64ILEGALI32-NEXT: and a1, a1, a2 +; RV64ILEGALI32-NEXT: srliw a2, a1, 1 +; RV64ILEGALI32-NEXT: lui a3, 5 +; RV64ILEGALI32-NEXT: addi a3, a3, 1365 +; RV64ILEGALI32-NEXT: and a2, a2, a3 +; RV64ILEGALI32-NEXT: subw a1, a1, a2 +; RV64ILEGALI32-NEXT: lui a2, 3 +; RV64ILEGALI32-NEXT: addi a2, a2, 819 +; RV64ILEGALI32-NEXT: and a3, a1, a2 +; RV64ILEGALI32-NEXT: srliw a1, a1, 2 +; RV64ILEGALI32-NEXT: and a1, a1, a2 +; RV64ILEGALI32-NEXT: add a1, a3, a1 +; RV64ILEGALI32-NEXT: srliw a2, a1, 4 +; RV64ILEGALI32-NEXT: add a1, a1, a2 +; RV64ILEGALI32-NEXT: andi a2, a1, 15 +; RV64ILEGALI32-NEXT: slli a1, a1, 52 +; RV64ILEGALI32-NEXT: srli a1, a1, 60 +; RV64ILEGALI32-NEXT: add a1, a2, a1 +; RV64ILEGALI32-NEXT: sllw a0, a0, a1 +; RV64ILEGALI32-NEXT: ret +; +; RV64ZBBLEGALI32-LABEL: shl_cttz_i16: +; RV64ZBBLEGALI32: # %bb.0: # %entry +; RV64ZBBLEGALI32-NEXT: ctzw a1, a1 +; RV64ZBBLEGALI32-NEXT: sllw a0, a0, a1 +; RV64ZBBLEGALI32-NEXT: ret +entry: + %cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true) + %res = shl i16 %x, %cttz + ret i16 %res +} + +define i16 @shl_cttz_constant_i16(i16 %y) { +; RV32I-LABEL: shl_cttz_constant_i16: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: not a0, a0 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: lui a2, 5 +; RV32I-NEXT: addi a2, a2, 1365 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 3 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: andi a1, a0, 15 +; RV32I-NEXT: slli a0, a0, 20 +; RV32I-NEXT: srli a0, a0, 28 +; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: li a1, 4 +; RV32I-NEXT: sll a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_constant_i16: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a0, a0 +; RV32ZBB-NEXT: li a1, 4 +; RV32ZBB-NEXT: sll a0, a1, a0 +; RV32ZBB-NEXT: ret +; +; RV64IILLEGALI32-LABEL: shl_cttz_constant_i16: +; RV64IILLEGALI32: # %bb.0: # %entry +; RV64IILLEGALI32-NEXT: addi a1, a0, -1 +; RV64IILLEGALI32-NEXT: not a0, a0 +; RV64IILLEGALI32-NEXT: and a0, a0, a1 +; RV64IILLEGALI32-NEXT: srli a1, a0, 1 +; RV64IILLEGALI32-NEXT: lui a2, 5 +; RV64IILLEGALI32-NEXT: addiw a2, a2, 1365 +; RV64IILLEGALI32-NEXT: and a1, a1, a2 +; RV64IILLEGALI32-NEXT: sub a0, a0, a1 +; RV64IILLEGALI32-NEXT: lui a1, 3 +; RV64IILLEGALI32-NEXT: addiw a1, a1, 819 +; RV64IILLEGALI32-NEXT: and a2, a0, a1 +; RV64IILLEGALI32-NEXT: srli a0, a0, 2 +; RV64IILLEGALI32-NEXT: and a0, a0, a1 +; RV64IILLEGALI32-NEXT: add a0, a2, a0 +; RV64IILLEGALI32-NEXT: srli a1, a0, 4 +; RV64IILLEGALI32-NEXT: add a0, a0, a1 +; RV64IILLEGALI32-NEXT: andi a1, a0, 15 +; RV64IILLEGALI32-NEXT: slli a0, a0, 52 +; RV64IILLEGALI32-NEXT: srli a0, a0, 60 +; RV64IILLEGALI32-NEXT: add a0, a1, a0 +; RV64IILLEGALI32-NEXT: li a1, 4 +; RV64IILLEGALI32-NEXT: sll a0, a1, a0 +; RV64IILLEGALI32-NEXT: ret +; +; RV64ZBBILLEGALI32-LABEL: shl_cttz_constant_i16: +; RV64ZBBILLEGALI32: # %bb.0: # %entry +; RV64ZBBILLEGALI32-NEXT: ctz a0, a0 +; RV64ZBBILLEGALI32-NEXT: li a1, 4 +; RV64ZBBILLEGALI32-NEXT: sll a0, a1, a0 +; RV64ZBBILLEGALI32-NEXT: ret +; +; RV64ILEGALI32-LABEL: shl_cttz_constant_i16: +; RV64ILEGALI32: # %bb.0: # %entry +; RV64ILEGALI32-NEXT: addi a1, a0, -1 +; RV64ILEGALI32-NEXT: not a0, a0 +; RV64ILEGALI32-NEXT: and a0, a0, a1 +; RV64ILEGALI32-NEXT: srliw a1, a0, 1 +; RV64ILEGALI32-NEXT: lui a2, 5 +; RV64ILEGALI32-NEXT: addi a2, a2, 1365 +; RV64ILEGALI32-NEXT: and a1, a1, a2 +; RV64ILEGALI32-NEXT: subw a0, a0, a1 +; RV64ILEGALI32-NEXT: lui a1, 3 +; RV64ILEGALI32-NEXT: addi a1, a1, 819 +; RV64ILEGALI32-NEXT: and a2, a0, a1 +; RV64ILEGALI32-NEXT: srliw a0, a0, 2 +; RV64ILEGALI32-NEXT: and a0, a0, a1 +; RV64ILEGALI32-NEXT: add a0, a2, a0 +; RV64ILEGALI32-NEXT: srliw a1, a0, 4 +; RV64ILEGALI32-NEXT: add a0, a0, a1 +; RV64ILEGALI32-NEXT: andi a1, a0, 15 +; RV64ILEGALI32-NEXT: slli a0, a0, 52 +; RV64ILEGALI32-NEXT: srli a0, a0, 60 +; RV64ILEGALI32-NEXT: add a0, a1, a0 +; RV64ILEGALI32-NEXT: li a1, 4 +; RV64ILEGALI32-NEXT: sllw a0, a1, a0 +; RV64ILEGALI32-NEXT: ret +; +; RV64ZBBLEGALI32-LABEL: shl_cttz_constant_i16: +; RV64ZBBLEGALI32: # %bb.0: # %entry +; RV64ZBBLEGALI32-NEXT: ctzw a0, a0 +; RV64ZBBLEGALI32-NEXT: li a1, 4 +; RV64ZBBLEGALI32-NEXT: sllw a0, a1, a0 +; RV64ZBBLEGALI32-NEXT: ret +entry: + %cttz = call i16 @llvm.cttz.i16(i16 %y, i1 true) + %res = shl i16 4, %cttz + ret i16 %res +} + +define i32 @shl_cttz_i32(i32 %x, i32 %y) { +; RV32I-LABEL: shl_cttz_i32: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: neg a2, a1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a2, 30667 +; RV32I-NEXT: addi a2, a2, 1329 +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 27 +; RV32I-NEXT: lui a2, %hi(.LCPI4_0) +; RV32I-NEXT: addi a2, a2, %lo(.LCPI4_0) +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: lbu a1, 0(a1) +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_i32: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a1, a1 +; RV32ZBB-NEXT: sll a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_i32: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: lui a2, 30667 +; RV64I-NEXT: addi a2, a2, 1329 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srliw a1, a1, 27 +; RV64I-NEXT: lui a2, %hi(.LCPI4_0) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI4_0) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu a1, 0(a1) +; RV64I-NEXT: sllw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_i32: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctzw a1, a1 +; RV64ZBB-NEXT: sllw a0, a0, a1 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true) + %res = shl i32 %x, %cttz + ret i32 %res +} + +define i32 @shl_cttz_constant_i32(i32 %y) { +; RV32I-LABEL: shl_cttz_constant_i32: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 30667 +; RV32I-NEXT: addi a1, a1, 1329 +; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: srli a0, a0, 27 +; RV32I-NEXT: lui a1, %hi(.LCPI5_0) +; RV32I-NEXT: addi a1, a1, %lo(.LCPI5_0) +; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: li a1, 4 +; RV32I-NEXT: sll a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_constant_i32: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a0, a0 +; RV32ZBB-NEXT: li a1, 4 +; RV32ZBB-NEXT: sll a0, a1, a0 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_constant_i32: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: negw a1, a0 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: lui a1, 30667 +; RV64I-NEXT: addi a1, a1, 1329 +; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: srliw a0, a0, 27 +; RV64I-NEXT: lui a1, %hi(.LCPI5_0) +; RV64I-NEXT: addi a1, a1, %lo(.LCPI5_0) +; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: li a1, 4 +; RV64I-NEXT: sllw a0, a1, a0 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_constant_i32: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctzw a0, a0 +; RV64ZBB-NEXT: li a1, 4 +; RV64ZBB-NEXT: sllw a0, a1, a0 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true) + %res = shl i32 4, %cttz + ret i32 %res +} + +define i32 @shl_cttz_nuw_i32(i32 %x, i32 %y) { +; RV32I-LABEL: shl_cttz_nuw_i32: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: neg a2, a1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a2, 30667 +; RV32I-NEXT: addi a2, a2, 1329 +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 27 +; RV32I-NEXT: lui a2, %hi(.LCPI6_0) +; RV32I-NEXT: addi a2, a2, %lo(.LCPI6_0) +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: lbu a1, 0(a1) +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_nuw_i32: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a1, a1 +; RV32ZBB-NEXT: sll a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_nuw_i32: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: lui a2, 30667 +; RV64I-NEXT: addi a2, a2, 1329 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srliw a1, a1, 27 +; RV64I-NEXT: lui a2, %hi(.LCPI6_0) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI6_0) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu a1, 0(a1) +; RV64I-NEXT: sllw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_nuw_i32: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctzw a1, a1 +; RV64ZBB-NEXT: sllw a0, a0, a1 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true) + %res = shl nuw i32 %x, %cttz + ret i32 %res +} + +define i32 @shl_cttz_nsw_i32(i32 %x, i32 %y) { +; RV32I-LABEL: shl_cttz_nsw_i32: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: neg a2, a1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a2, 30667 +; RV32I-NEXT: addi a2, a2, 1329 +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 27 +; RV32I-NEXT: lui a2, %hi(.LCPI7_0) +; RV32I-NEXT: addi a2, a2, %lo(.LCPI7_0) +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: lbu a1, 0(a1) +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_nsw_i32: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: ctz a1, a1 +; RV32ZBB-NEXT: sll a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_nsw_i32: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: lui a2, 30667 +; RV64I-NEXT: addi a2, a2, 1329 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srliw a1, a1, 27 +; RV64I-NEXT: lui a2, %hi(.LCPI7_0) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI7_0) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu a1, 0(a1) +; RV64I-NEXT: sllw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_nsw_i32: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctzw a1, a1 +; RV64ZBB-NEXT: sllw a0, a0, a1 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true) + %res = shl nsw i32 %x, %cttz + ret i32 %res +} + +define i32 @shl_cttz_multiuse_i32(i32 %x, i32 %y) { +; RV32I-LABEL: shl_cttz_multiuse_i32: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: .cfi_offset s1, -12 +; RV32I-NEXT: neg a2, a1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a2, 30667 +; RV32I-NEXT: addi a2, a2, 1329 +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 27 +; RV32I-NEXT: lui a2, %hi(.LCPI8_0) +; RV32I-NEXT: addi a2, a2, %lo(.LCPI8_0) +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: lbu s0, 0(a1) +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: call use32 +; RV32I-NEXT: sll a0, s1, s0 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_multiuse_i32: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: addi sp, sp, -16 +; RV32ZBB-NEXT: .cfi_def_cfa_offset 16 +; RV32ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: .cfi_offset ra, -4 +; RV32ZBB-NEXT: .cfi_offset s0, -8 +; RV32ZBB-NEXT: .cfi_offset s1, -12 +; RV32ZBB-NEXT: mv s0, a0 +; RV32ZBB-NEXT: ctz s1, a1 +; RV32ZBB-NEXT: mv a0, s1 +; RV32ZBB-NEXT: call use32 +; RV32ZBB-NEXT: sll a0, s0, s1 +; RV32ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: addi sp, sp, 16 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_multiuse_i32: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: .cfi_def_cfa_offset 32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: .cfi_offset s1, -24 +; RV64I-NEXT: negw a2, a1 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: lui a2, 30667 +; RV64I-NEXT: addi a2, a2, 1329 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srliw a1, a1, 27 +; RV64I-NEXT: lui a2, %hi(.LCPI8_0) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI8_0) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu s0, 0(a1) +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: call use32 +; RV64I-NEXT: sllw a0, s1, s0 +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_multiuse_i32: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: addi sp, sp, -32 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 32 +; RV64ZBB-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: .cfi_offset ra, -8 +; RV64ZBB-NEXT: .cfi_offset s0, -16 +; RV64ZBB-NEXT: .cfi_offset s1, -24 +; RV64ZBB-NEXT: mv s0, a0 +; RV64ZBB-NEXT: ctzw s1, a1 +; RV64ZBB-NEXT: mv a0, s1 +; RV64ZBB-NEXT: call use32 +; RV64ZBB-NEXT: sllw a0, s0, s1 +; RV64ZBB-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: addi sp, sp, 32 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i32 @llvm.cttz.i32(i32 %y, i1 true) + call void @use32(i32 %cttz) + %res = shl i32 %x, %cttz + ret i32 %res +} + +define i64 @shl_cttz_i64(i64 %x, i64 %y) { +; RV32I-LABEL: shl_cttz_i64: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a4, 30667 +; RV32I-NEXT: addi a5, a4, 1329 +; RV32I-NEXT: lui a4, %hi(.LCPI9_0) +; RV32I-NEXT: addi a4, a4, %lo(.LCPI9_0) +; RV32I-NEXT: bnez a2, .LBB9_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: neg a2, a3 +; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: mul a2, a2, a5 +; RV32I-NEXT: srli a2, a2, 27 +; RV32I-NEXT: add a2, a4, a2 +; RV32I-NEXT: lbu a2, 0(a2) +; RV32I-NEXT: addi a4, a2, 32 +; RV32I-NEXT: j .LBB9_3 +; RV32I-NEXT: .LBB9_2: +; RV32I-NEXT: neg a3, a2 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: mul a2, a2, a5 +; RV32I-NEXT: srli a2, a2, 27 +; RV32I-NEXT: add a2, a4, a2 +; RV32I-NEXT: lbu a4, 0(a2) +; RV32I-NEXT: .LBB9_3: # %entry +; RV32I-NEXT: addi a3, a4, -32 +; RV32I-NEXT: sll a2, a0, a4 +; RV32I-NEXT: bltz a3, .LBB9_5 +; RV32I-NEXT: # %bb.4: # %entry +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: j .LBB9_6 +; RV32I-NEXT: .LBB9_5: +; RV32I-NEXT: sll a1, a1, a4 +; RV32I-NEXT: not a4, a4 +; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: srl a0, a0, a4 +; RV32I-NEXT: or a1, a1, a0 +; RV32I-NEXT: .LBB9_6: # %entry +; RV32I-NEXT: srai a0, a3, 31 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_i64: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: bnez a2, .LBB9_2 +; RV32ZBB-NEXT: # %bb.1: # %entry +; RV32ZBB-NEXT: ctz a2, a3 +; RV32ZBB-NEXT: addi a4, a2, 32 +; RV32ZBB-NEXT: j .LBB9_3 +; RV32ZBB-NEXT: .LBB9_2: +; RV32ZBB-NEXT: ctz a4, a2 +; RV32ZBB-NEXT: .LBB9_3: # %entry +; RV32ZBB-NEXT: addi a3, a4, -32 +; RV32ZBB-NEXT: sll a2, a0, a4 +; RV32ZBB-NEXT: bltz a3, .LBB9_5 +; RV32ZBB-NEXT: # %bb.4: # %entry +; RV32ZBB-NEXT: mv a1, a2 +; RV32ZBB-NEXT: j .LBB9_6 +; RV32ZBB-NEXT: .LBB9_5: +; RV32ZBB-NEXT: sll a1, a1, a4 +; RV32ZBB-NEXT: not a4, a4 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: srl a0, a0, a4 +; RV32ZBB-NEXT: or a1, a1, a0 +; RV32ZBB-NEXT: .LBB9_6: # %entry +; RV32ZBB-NEXT: srai a0, a3, 31 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_i64: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: lui a2, %hi(.LCPI9_0) +; RV64I-NEXT: ld a2, %lo(.LCPI9_0)(a2) +; RV64I-NEXT: neg a3, a1 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 58 +; RV64I-NEXT: lui a2, %hi(.LCPI9_1) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI9_1) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu a1, 0(a1) +; RV64I-NEXT: sll a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_i64: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctz a1, a1 +; RV64ZBB-NEXT: sll a0, a0, a1 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true) + %res = shl i64 %x, %cttz + ret i64 %res +} + +define i64 @shl_cttz_constant_i64(i64 %y) { +; RV32I-LABEL: shl_cttz_constant_i64: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a2, 30667 +; RV32I-NEXT: addi a3, a2, 1329 +; RV32I-NEXT: lui a2, %hi(.LCPI10_0) +; RV32I-NEXT: addi a2, a2, %lo(.LCPI10_0) +; RV32I-NEXT: bnez a0, .LBB10_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: neg a0, a1 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: mul a0, a0, a3 +; RV32I-NEXT: srli a0, a0, 27 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: addi a1, a0, 32 +; RV32I-NEXT: j .LBB10_3 +; RV32I-NEXT: .LBB10_2: +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: mul a0, a0, a3 +; RV32I-NEXT: srli a0, a0, 27 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: lbu a1, 0(a0) +; RV32I-NEXT: .LBB10_3: # %entry +; RV32I-NEXT: li a0, 4 +; RV32I-NEXT: addi a2, a1, -32 +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: bltz a2, .LBB10_5 +; RV32I-NEXT: # %bb.4: # %entry +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: j .LBB10_6 +; RV32I-NEXT: .LBB10_5: +; RV32I-NEXT: not a1, a1 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: srl a1, a3, a1 +; RV32I-NEXT: .LBB10_6: # %entry +; RV32I-NEXT: srai a2, a2, 31 +; RV32I-NEXT: and a0, a2, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_constant_i64: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: bnez a0, .LBB10_2 +; RV32ZBB-NEXT: # %bb.1: # %entry +; RV32ZBB-NEXT: ctz a0, a1 +; RV32ZBB-NEXT: addi a1, a0, 32 +; RV32ZBB-NEXT: j .LBB10_3 +; RV32ZBB-NEXT: .LBB10_2: +; RV32ZBB-NEXT: ctz a1, a0 +; RV32ZBB-NEXT: .LBB10_3: # %entry +; RV32ZBB-NEXT: li a0, 4 +; RV32ZBB-NEXT: addi a2, a1, -32 +; RV32ZBB-NEXT: sll a0, a0, a1 +; RV32ZBB-NEXT: bltz a2, .LBB10_5 +; RV32ZBB-NEXT: # %bb.4: # %entry +; RV32ZBB-NEXT: mv a1, a0 +; RV32ZBB-NEXT: j .LBB10_6 +; RV32ZBB-NEXT: .LBB10_5: +; RV32ZBB-NEXT: not a1, a1 +; RV32ZBB-NEXT: li a3, 2 +; RV32ZBB-NEXT: srl a1, a3, a1 +; RV32ZBB-NEXT: .LBB10_6: # %entry +; RV32ZBB-NEXT: srai a2, a2, 31 +; RV32ZBB-NEXT: and a0, a2, a0 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_constant_i64: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: lui a1, %hi(.LCPI10_0) +; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1) +; RV64I-NEXT: neg a2, a0 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: srli a0, a0, 58 +; RV64I-NEXT: lui a1, %hi(.LCPI10_1) +; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1) +; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: li a1, 4 +; RV64I-NEXT: sll a0, a1, a0 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_constant_i64: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctz a0, a0 +; RV64ZBB-NEXT: li a1, 4 +; RV64ZBB-NEXT: sll a0, a1, a0 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true) + %res = shl i64 4, %cttz + ret i64 %res +} + +define i64 @shl_cttz_nuw_i64(i64 %x, i64 %y) { +; RV32I-LABEL: shl_cttz_nuw_i64: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a4, 30667 +; RV32I-NEXT: addi a5, a4, 1329 +; RV32I-NEXT: lui a4, %hi(.LCPI11_0) +; RV32I-NEXT: addi a4, a4, %lo(.LCPI11_0) +; RV32I-NEXT: bnez a2, .LBB11_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: neg a2, a3 +; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: mul a2, a2, a5 +; RV32I-NEXT: srli a2, a2, 27 +; RV32I-NEXT: add a2, a4, a2 +; RV32I-NEXT: lbu a2, 0(a2) +; RV32I-NEXT: addi a4, a2, 32 +; RV32I-NEXT: j .LBB11_3 +; RV32I-NEXT: .LBB11_2: +; RV32I-NEXT: neg a3, a2 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: mul a2, a2, a5 +; RV32I-NEXT: srli a2, a2, 27 +; RV32I-NEXT: add a2, a4, a2 +; RV32I-NEXT: lbu a4, 0(a2) +; RV32I-NEXT: .LBB11_3: # %entry +; RV32I-NEXT: addi a3, a4, -32 +; RV32I-NEXT: sll a2, a0, a4 +; RV32I-NEXT: bltz a3, .LBB11_5 +; RV32I-NEXT: # %bb.4: # %entry +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: j .LBB11_6 +; RV32I-NEXT: .LBB11_5: +; RV32I-NEXT: sll a1, a1, a4 +; RV32I-NEXT: not a4, a4 +; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: srl a0, a0, a4 +; RV32I-NEXT: or a1, a1, a0 +; RV32I-NEXT: .LBB11_6: # %entry +; RV32I-NEXT: srai a0, a3, 31 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_nuw_i64: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: bnez a2, .LBB11_2 +; RV32ZBB-NEXT: # %bb.1: # %entry +; RV32ZBB-NEXT: ctz a2, a3 +; RV32ZBB-NEXT: addi a4, a2, 32 +; RV32ZBB-NEXT: j .LBB11_3 +; RV32ZBB-NEXT: .LBB11_2: +; RV32ZBB-NEXT: ctz a4, a2 +; RV32ZBB-NEXT: .LBB11_3: # %entry +; RV32ZBB-NEXT: addi a3, a4, -32 +; RV32ZBB-NEXT: sll a2, a0, a4 +; RV32ZBB-NEXT: bltz a3, .LBB11_5 +; RV32ZBB-NEXT: # %bb.4: # %entry +; RV32ZBB-NEXT: mv a1, a2 +; RV32ZBB-NEXT: j .LBB11_6 +; RV32ZBB-NEXT: .LBB11_5: +; RV32ZBB-NEXT: sll a1, a1, a4 +; RV32ZBB-NEXT: not a4, a4 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: srl a0, a0, a4 +; RV32ZBB-NEXT: or a1, a1, a0 +; RV32ZBB-NEXT: .LBB11_6: # %entry +; RV32ZBB-NEXT: srai a0, a3, 31 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_nuw_i64: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: lui a2, %hi(.LCPI11_0) +; RV64I-NEXT: ld a2, %lo(.LCPI11_0)(a2) +; RV64I-NEXT: neg a3, a1 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 58 +; RV64I-NEXT: lui a2, %hi(.LCPI11_1) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI11_1) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu a1, 0(a1) +; RV64I-NEXT: sll a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_nuw_i64: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctz a1, a1 +; RV64ZBB-NEXT: sll a0, a0, a1 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true) + %res = shl nuw i64 %x, %cttz + ret i64 %res +} + +define i64 @shl_cttz_nsw_i64(i64 %x, i64 %y) { +; RV32I-LABEL: shl_cttz_nsw_i64: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a4, 30667 +; RV32I-NEXT: addi a5, a4, 1329 +; RV32I-NEXT: lui a4, %hi(.LCPI12_0) +; RV32I-NEXT: addi a4, a4, %lo(.LCPI12_0) +; RV32I-NEXT: bnez a2, .LBB12_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: neg a2, a3 +; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: mul a2, a2, a5 +; RV32I-NEXT: srli a2, a2, 27 +; RV32I-NEXT: add a2, a4, a2 +; RV32I-NEXT: lbu a2, 0(a2) +; RV32I-NEXT: addi a4, a2, 32 +; RV32I-NEXT: j .LBB12_3 +; RV32I-NEXT: .LBB12_2: +; RV32I-NEXT: neg a3, a2 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: mul a2, a2, a5 +; RV32I-NEXT: srli a2, a2, 27 +; RV32I-NEXT: add a2, a4, a2 +; RV32I-NEXT: lbu a4, 0(a2) +; RV32I-NEXT: .LBB12_3: # %entry +; RV32I-NEXT: addi a3, a4, -32 +; RV32I-NEXT: sll a2, a0, a4 +; RV32I-NEXT: bltz a3, .LBB12_5 +; RV32I-NEXT: # %bb.4: # %entry +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: j .LBB12_6 +; RV32I-NEXT: .LBB12_5: +; RV32I-NEXT: sll a1, a1, a4 +; RV32I-NEXT: not a4, a4 +; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: srl a0, a0, a4 +; RV32I-NEXT: or a1, a1, a0 +; RV32I-NEXT: .LBB12_6: # %entry +; RV32I-NEXT: srai a0, a3, 31 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_nsw_i64: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: bnez a2, .LBB12_2 +; RV32ZBB-NEXT: # %bb.1: # %entry +; RV32ZBB-NEXT: ctz a2, a3 +; RV32ZBB-NEXT: addi a4, a2, 32 +; RV32ZBB-NEXT: j .LBB12_3 +; RV32ZBB-NEXT: .LBB12_2: +; RV32ZBB-NEXT: ctz a4, a2 +; RV32ZBB-NEXT: .LBB12_3: # %entry +; RV32ZBB-NEXT: addi a3, a4, -32 +; RV32ZBB-NEXT: sll a2, a0, a4 +; RV32ZBB-NEXT: bltz a3, .LBB12_5 +; RV32ZBB-NEXT: # %bb.4: # %entry +; RV32ZBB-NEXT: mv a1, a2 +; RV32ZBB-NEXT: j .LBB12_6 +; RV32ZBB-NEXT: .LBB12_5: +; RV32ZBB-NEXT: sll a1, a1, a4 +; RV32ZBB-NEXT: not a4, a4 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: srl a0, a0, a4 +; RV32ZBB-NEXT: or a1, a1, a0 +; RV32ZBB-NEXT: .LBB12_6: # %entry +; RV32ZBB-NEXT: srai a0, a3, 31 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_nsw_i64: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: lui a2, %hi(.LCPI12_0) +; RV64I-NEXT: ld a2, %lo(.LCPI12_0)(a2) +; RV64I-NEXT: neg a3, a1 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 58 +; RV64I-NEXT: lui a2, %hi(.LCPI12_1) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI12_1) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu a1, 0(a1) +; RV64I-NEXT: sll a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_nsw_i64: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: ctz a1, a1 +; RV64ZBB-NEXT: sll a0, a0, a1 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true) + %res = shl nsw i64 %x, %cttz + ret i64 %res +} + +define i64 @shl_cttz_multiuse_i64(i64 %x, i64 %y) { +; RV32I-LABEL: shl_cttz_multiuse_i64: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: .cfi_offset s1, -12 +; RV32I-NEXT: .cfi_offset s2, -16 +; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: lui a0, 30667 +; RV32I-NEXT: addi a1, a0, 1329 +; RV32I-NEXT: lui a0, %hi(.LCPI13_0) +; RV32I-NEXT: addi a0, a0, %lo(.LCPI13_0) +; RV32I-NEXT: bnez a2, .LBB13_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: neg a2, a3 +; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: mul a1, a2, a1 +; RV32I-NEXT: srli a1, a1, 27 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: addi s2, a0, 32 +; RV32I-NEXT: j .LBB13_3 +; RV32I-NEXT: .LBB13_2: +; RV32I-NEXT: neg a3, a2 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: mul a1, a2, a1 +; RV32I-NEXT: srli a1, a1, 27 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lbu s2, 0(a0) +; RV32I-NEXT: .LBB13_3: # %entry +; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call use64 +; RV32I-NEXT: addi a2, s2, -32 +; RV32I-NEXT: sll a0, s0, s2 +; RV32I-NEXT: bltz a2, .LBB13_5 +; RV32I-NEXT: # %bb.4: # %entry +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: j .LBB13_6 +; RV32I-NEXT: .LBB13_5: +; RV32I-NEXT: sll a1, s1, s2 +; RV32I-NEXT: not a3, s2 +; RV32I-NEXT: srli s0, s0, 1 +; RV32I-NEXT: srl a3, s0, a3 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: .LBB13_6: # %entry +; RV32I-NEXT: srai a2, a2, 31 +; RV32I-NEXT: and a0, a2, a0 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: shl_cttz_multiuse_i64: +; RV32ZBB: # %bb.0: # %entry +; RV32ZBB-NEXT: addi sp, sp, -16 +; RV32ZBB-NEXT: .cfi_def_cfa_offset 16 +; RV32ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: .cfi_offset ra, -4 +; RV32ZBB-NEXT: .cfi_offset s0, -8 +; RV32ZBB-NEXT: .cfi_offset s1, -12 +; RV32ZBB-NEXT: .cfi_offset s2, -16 +; RV32ZBB-NEXT: mv s1, a1 +; RV32ZBB-NEXT: mv s0, a0 +; RV32ZBB-NEXT: bnez a2, .LBB13_2 +; RV32ZBB-NEXT: # %bb.1: # %entry +; RV32ZBB-NEXT: ctz a0, a3 +; RV32ZBB-NEXT: addi s2, a0, 32 +; RV32ZBB-NEXT: j .LBB13_3 +; RV32ZBB-NEXT: .LBB13_2: +; RV32ZBB-NEXT: ctz s2, a2 +; RV32ZBB-NEXT: .LBB13_3: # %entry +; RV32ZBB-NEXT: mv a0, s2 +; RV32ZBB-NEXT: li a1, 0 +; RV32ZBB-NEXT: call use64 +; RV32ZBB-NEXT: addi a2, s2, -32 +; RV32ZBB-NEXT: sll a0, s0, s2 +; RV32ZBB-NEXT: bltz a2, .LBB13_5 +; RV32ZBB-NEXT: # %bb.4: # %entry +; RV32ZBB-NEXT: mv a1, a0 +; RV32ZBB-NEXT: j .LBB13_6 +; RV32ZBB-NEXT: .LBB13_5: +; RV32ZBB-NEXT: sll a1, s1, s2 +; RV32ZBB-NEXT: not a3, s2 +; RV32ZBB-NEXT: srli s0, s0, 1 +; RV32ZBB-NEXT: srl a3, s0, a3 +; RV32ZBB-NEXT: or a1, a1, a3 +; RV32ZBB-NEXT: .LBB13_6: # %entry +; RV32ZBB-NEXT: srai a2, a2, 31 +; RV32ZBB-NEXT: and a0, a2, a0 +; RV32ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32ZBB-NEXT: addi sp, sp, 16 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: shl_cttz_multiuse_i64: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: .cfi_def_cfa_offset 32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: .cfi_offset s1, -24 +; RV64I-NEXT: lui a2, %hi(.LCPI13_0) +; RV64I-NEXT: ld a2, %lo(.LCPI13_0)(a2) +; RV64I-NEXT: neg a3, a1 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 58 +; RV64I-NEXT: lui a2, %hi(.LCPI13_1) +; RV64I-NEXT: addi a2, a2, %lo(.LCPI13_1) +; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: lbu s0, 0(a1) +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: call use64 +; RV64I-NEXT: sll a0, s1, s0 +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: shl_cttz_multiuse_i64: +; RV64ZBB: # %bb.0: # %entry +; RV64ZBB-NEXT: addi sp, sp, -32 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 32 +; RV64ZBB-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: .cfi_offset ra, -8 +; RV64ZBB-NEXT: .cfi_offset s0, -16 +; RV64ZBB-NEXT: .cfi_offset s1, -24 +; RV64ZBB-NEXT: mv s0, a0 +; RV64ZBB-NEXT: ctz s1, a1 +; RV64ZBB-NEXT: mv a0, s1 +; RV64ZBB-NEXT: call use64 +; RV64ZBB-NEXT: sll a0, s0, s1 +; RV64ZBB-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: addi sp, sp, 32 +; RV64ZBB-NEXT: ret +entry: + %cttz = call i64 @llvm.cttz.i64(i64 %y, i1 true) + call void @use64(i64 %cttz) + %res = shl i64 %x, %cttz + ret i64 %res +} + +declare void @use32(i32 signext) +declare void @use64(i64)