Skip to content

Commit

Permalink
[KnownBits] Return zero instead of unknown for always poison shifts
Browse files Browse the repository at this point in the history
For always poison shifts, any KnownBits return value is valid.
Currently we return unknown, but returning zero is generally more
profitable. We had some code in ValueTracking that tried to do this,
but was actually dead code.

Differential Revision: https://reviews.llvm.org/D150648
  • Loading branch information
nikic committed May 23, 2023
1 parent 35ce741 commit f7d1baa
Show file tree
Hide file tree
Showing 12 changed files with 39 additions and 84 deletions.
7 changes: 0 additions & 7 deletions llvm/lib/Analysis/ValueTracking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -994,13 +994,6 @@ static void computeKnownBitsFromShiftOperator(

if (ShiftAmtIsConstant) {
Known = KF(Known2, Known);

// If the known bits conflict, this must be an overflowing left shift, so
// the shift result is poison. We can return anything we want. Choose 0 for
// the best folding opportunity.
if (Known.hasConflict())
Known.setAllZero();

return;
}

Expand Down
18 changes: 12 additions & 6 deletions llvm/lib/Support/KnownBits.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,11 @@ KnownBits KnownBits::shl(const KnownBits &LHS, const KnownBits &RHS) {
unsigned MinTrailingZeros = LHS.countMinTrailingZeros();

APInt MinShiftAmount = RHS.getMinValue();
if (MinShiftAmount.uge(BitWidth))
// Always poison. Return unknown because we don't like returning conflict.
if (MinShiftAmount.uge(BitWidth)) {
// Always poison. Return zero because we don't like returning conflict.
Known.setAllZero();
return Known;
}

// Minimum shift amount low bits are known zero.
MinTrailingZeros += MinShiftAmount.getZExtValue();
Expand Down Expand Up @@ -240,9 +242,11 @@ KnownBits KnownBits::lshr(const KnownBits &LHS, const KnownBits &RHS) {

// Minimum shift amount high bits are known zero.
APInt MinShiftAmount = RHS.getMinValue();
if (MinShiftAmount.uge(BitWidth))
// Always poison. Return unknown because we don't like returning conflict.
if (MinShiftAmount.uge(BitWidth)) {
// Always poison. Return zero because we don't like returning conflict.
Known.setAllZero();
return Known;
}

MinLeadingZeros += MinShiftAmount.getZExtValue();
MinLeadingZeros = std::min(MinLeadingZeros, BitWidth);
Expand Down Expand Up @@ -295,9 +299,11 @@ KnownBits KnownBits::ashr(const KnownBits &LHS, const KnownBits &RHS) {

// Minimum shift amount high bits are known sign bits.
APInt MinShiftAmount = RHS.getMinValue();
if (MinShiftAmount.uge(BitWidth))
// Always poison. Return unknown because we don't like returning conflict.
if (MinShiftAmount.uge(BitWidth)) {
// Always poison. Return zero because we don't like returning conflict.
Known.setAllZero();
return Known;
}

if (MinLeadingZeros) {
MinLeadingZeros += MinShiftAmount.getZExtValue();
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/Analysis/ScalarEvolution/ashr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ define i32 @t4(i32 %x, i32 %y) {
; CHECK-LABEL: 't4'
; CHECK-NEXT: Classifying expressions for: @t4
; CHECK-NEXT: %i0 = ashr exact i32 %x, 32
; CHECK-NEXT: --> %i0 U: full-set S: full-set
; CHECK-NEXT: --> %i0 U: [0,1) S: [0,1)
; CHECK-NEXT: Determining loop execution counts for: @t4
;
%i0 = ashr exact i32 %x, 32
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,7 @@ body: |
; CHECK-NEXT: %x:_(s32) = COPY $w0
; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 32
; CHECK-NEXT: %shift:_(s32) = G_ASHR %x, %lsb(s32)
; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %shift, 1
; CHECK-NEXT: $w0 = COPY %sext_inreg(s32)
; CHECK-NEXT: $w0 = COPY %shift(s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%lsb:_(s32) = G_CONSTANT i32 32
Expand All @@ -122,8 +121,7 @@ body: |
; CHECK-NEXT: %x:_(s32) = COPY $w0
; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 -1
; CHECK-NEXT: %shift:_(s32) = G_ASHR %x, %lsb(s32)
; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %shift, 1
; CHECK-NEXT: $w0 = COPY %sext_inreg(s32)
; CHECK-NEXT: $w0 = COPY %shift(s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%x:_(s32) = COPY $w0
%lsb:_(s32) = G_CONSTANT i32 -1
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/LoongArch/rotl-rotr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,9 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
; LA32-LABEL: rotl_64_mask_or_128_or_64:
; LA32: # %bb.0:
; LA32-NEXT: sll.w $a3, $a0, $a2
; LA32-NEXT: sub.w $a0, $zero, $a2
; LA32-NEXT: srl.w $a0, $a1, $a0
; LA32-NEXT: move $a1, $a3
; LA32-NEXT: move $a1, $zero
; LA32-NEXT: ret
;
; LA64-LABEL: rotl_64_mask_or_128_or_64:
Expand Down Expand Up @@ -499,10 +498,8 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
; LA32-LABEL: rotr_64_mask_or_128_or_64:
; LA32: # %bb.0:
; LA32-NEXT: srl.w $a3, $a1, $a2
; LA32-NEXT: sub.w $a1, $zero, $a2
; LA32-NEXT: sll.w $a1, $a0, $a1
; LA32-NEXT: move $a0, $a3
; LA32-NEXT: srl.w $a0, $a1, $a2
; LA32-NEXT: move $a1, $zero
; LA32-NEXT: ret
;
; LA64-LABEL: rotr_64_mask_or_128_or_64:
Expand Down
56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rotl-rotr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -516,20 +516,15 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
; RV32I-LABEL: rotl_32_mask_or_64_or_32:
; RV32I: # %bb.0:
; RV32I-NEXT: ori a2, a1, 64
; RV32I-NEXT: sll a2, a0, a2
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: ori a1, a1, 32
; RV32I-NEXT: srl a0, a0, a1
; RV32I-NEXT: or a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: rotl_32_mask_or_64_or_32:
; RV64I: # %bb.0:
; RV64I-NEXT: sllw a2, a0, a1
; RV64I-NEXT: negw a1, a1
; RV64I-NEXT: srlw a0, a0, a1
; RV64I-NEXT: or a0, a2, a0
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: rotl_32_mask_or_64_or_32:
Expand Down Expand Up @@ -670,20 +665,13 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
; RV32I-LABEL: rotr_32_mask_or_64_or_32:
; RV32I: # %bb.0:
; RV32I-NEXT: ori a2, a1, 64
; RV32I-NEXT: srl a2, a0, a2
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: ori a1, a1, 32
; RV32I-NEXT: sll a0, a0, a1
; RV32I-NEXT: or a0, a2, a0
; RV32I-NEXT: ori a1, a1, 64
; RV32I-NEXT: srl a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: rotr_32_mask_or_64_or_32:
; RV64I: # %bb.0:
; RV64I-NEXT: srlw a2, a0, a1
; RV64I-NEXT: negw a1, a1
; RV64I-NEXT: sllw a0, a0, a1
; RV64I-NEXT: or a0, a2, a0
; RV64I-NEXT: srlw a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: rotr_32_mask_or_64_or_32:
Expand Down Expand Up @@ -1013,28 +1001,23 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
; RV32I-LABEL: rotl_64_mask_or_128_or_64:
; RV32I: # %bb.0:
; RV32I-NEXT: sll a3, a0, a2
; RV32I-NEXT: neg a0, a2
; RV32I-NEXT: srl a0, a1, a0
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: rotl_64_mask_or_128_or_64:
; RV64I: # %bb.0:
; RV64I-NEXT: ori a2, a1, 128
; RV64I-NEXT: sll a2, a0, a2
; RV64I-NEXT: negw a1, a1
; RV64I-NEXT: ori a1, a1, 64
; RV64I-NEXT: srl a0, a0, a1
; RV64I-NEXT: or a0, a2, a0
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: rotl_64_mask_or_128_or_64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sll a3, a0, a2
; RV32ZBB-NEXT: neg a0, a2
; RV32ZBB-NEXT: srl a0, a1, a0
; RV32ZBB-NEXT: mv a1, a3
; RV32ZBB-NEXT: li a1, 0
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: rotl_64_mask_or_128_or_64:
Expand All @@ -1044,10 +1027,9 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
;
; RV32XTHEADBB-LABEL: rotl_64_mask_or_128_or_64:
; RV32XTHEADBB: # %bb.0:
; RV32XTHEADBB-NEXT: sll a3, a0, a2
; RV32XTHEADBB-NEXT: neg a0, a2
; RV32XTHEADBB-NEXT: srl a0, a1, a0
; RV32XTHEADBB-NEXT: mv a1, a3
; RV32XTHEADBB-NEXT: li a1, 0
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: rotl_64_mask_or_128_or_64:
Expand Down Expand Up @@ -1359,28 +1341,20 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
; RV32I-LABEL: rotr_64_mask_or_128_or_64:
; RV32I: # %bb.0:
; RV32I-NEXT: srl a3, a1, a2
; RV32I-NEXT: neg a1, a2
; RV32I-NEXT: sll a1, a0, a1
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: srl a0, a1, a2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: rotr_64_mask_or_128_or_64:
; RV64I: # %bb.0:
; RV64I-NEXT: ori a2, a1, 128
; RV64I-NEXT: srl a2, a0, a2
; RV64I-NEXT: negw a1, a1
; RV64I-NEXT: ori a1, a1, 64
; RV64I-NEXT: sll a0, a0, a1
; RV64I-NEXT: or a0, a2, a0
; RV64I-NEXT: ori a1, a1, 128
; RV64I-NEXT: srl a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: rotr_64_mask_or_128_or_64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: srl a3, a1, a2
; RV32ZBB-NEXT: neg a1, a2
; RV32ZBB-NEXT: sll a1, a0, a1
; RV32ZBB-NEXT: mv a0, a3
; RV32ZBB-NEXT: srl a0, a1, a2
; RV32ZBB-NEXT: li a1, 0
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: rotr_64_mask_or_128_or_64:
Expand All @@ -1390,10 +1364,8 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
;
; RV32XTHEADBB-LABEL: rotr_64_mask_or_128_or_64:
; RV32XTHEADBB: # %bb.0:
; RV32XTHEADBB-NEXT: srl a3, a1, a2
; RV32XTHEADBB-NEXT: neg a1, a2
; RV32XTHEADBB-NEXT: sll a1, a0, a1
; RV32XTHEADBB-NEXT: mv a0, a3
; RV32XTHEADBB-NEXT: srl a0, a1, a2
; RV32XTHEADBB-NEXT: li a1, 0
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: rotr_64_mask_or_128_or_64:
Expand Down
5 changes: 0 additions & 5 deletions llvm/test/Transforms/InstCombine/not-add.ll
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,6 @@ cond.end:
define void @pr50370(i32 %x) {
; CHECK-LABEL: @pr50370(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], 1
; CHECK-NEXT: [[B15:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537)), [[XOR]]
; CHECK-NEXT: [[B12:%.*]] = add nsw i32 [[B15]], ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537))
; CHECK-NEXT: [[B:%.*]] = xor i32 [[B12]], -1
; CHECK-NEXT: store i32 [[B]], ptr undef, align 4
; CHECK-NEXT: ret void
;
entry:
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ define i1 @oss_fuzz_32759(i1 %y, i1 %c1) {
; CHECK: cond.true:
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: ret i1 false
; CHECK-NEXT: ret i1 [[C1]]
;
entry:
br i1 %c1, label %cond.true, label %end
Expand Down
6 changes: 0 additions & 6 deletions llvm/test/Transforms/InstCombine/shift.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1743,12 +1743,6 @@ define void @ashr_out_of_range(ptr %A) {
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26135
define void @ashr_out_of_range_1(ptr %A) {
; CHECK-LABEL: @ashr_out_of_range_1(
; CHECK-NEXT: [[L:%.*]] = load i177, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[G11:%.*]] = getelementptr i177, ptr [[A]], i64 -1
; CHECK-NEXT: [[B24_LOBIT:%.*]] = ashr i177 [[L]], 175
; CHECK-NEXT: [[TMP1:%.*]] = trunc i177 [[B24_LOBIT]] to i64
; CHECK-NEXT: [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP1]]
; CHECK-NEXT: store i177 0, ptr [[G62]], align 4
; CHECK-NEXT: ret void
;
%L = load i177, ptr %A, align 4
Expand Down
2 changes: 1 addition & 1 deletion llvm/unittests/Analysis/ValueTrackingTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,7 @@ TEST_F(ValueTrackingTest, ComputeNumSignBits_PR32045) {
" %A = ashr i32 %a, -1\n"
" ret i32 %A\n"
"}\n");
EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u);
EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 32u);
}

// No guarantees for canonical IR in this analysis, so this just bails out.
Expand Down
6 changes: 3 additions & 3 deletions llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1630,12 +1630,12 @@ TEST_F(AArch64GISelMITest, TestInvalidQueries) {
KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);


// We don't know what the result of the shift is, but we should not crash
// Result can be anything, but we should not crash.
EXPECT_TRUE(EqSizeRes.One.isZero());
EXPECT_TRUE(EqSizeRes.Zero.isZero());
EXPECT_TRUE(EqSizeRes.Zero.isAllOnes());

EXPECT_TRUE(BiggerSizeRes.One.isZero());
EXPECT_TRUE(BiggerSizeRes.Zero.isZero());
EXPECT_TRUE(BiggerSizeRes.Zero.isAllOnes());
}

TEST_F(AArch64GISelMITest, TestKnownBitsAssertZext) {
Expand Down
4 changes: 2 additions & 2 deletions llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1456,10 +1456,10 @@ TEST_F(AArch64GISelMITest, TestVectorInvalidQueries) {
KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);

EXPECT_TRUE(EqSizeRes.One.isZero());
EXPECT_TRUE(EqSizeRes.Zero.isZero());
EXPECT_TRUE(EqSizeRes.Zero.isAllOnes());

EXPECT_TRUE(BiggerSizeRes.One.isZero());
EXPECT_TRUE(BiggerSizeRes.Zero.isZero());
EXPECT_TRUE(BiggerSizeRes.Zero.isAllOnes());
}

TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertZext) {
Expand Down

0 comments on commit f7d1baa

Please sign in to comment.