Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

JIT: Slightly refactor handling of conditions/conditional branches in the backend #82020

Merged
merged 26 commits into from
Feb 16, 2023
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
5d60b0f
JIT: Refactor representation of conditions/conditional branches in th…
jakobbotsch Feb 12, 2023
265c2a6
Remove comment that no longer describes a special case
jakobbotsch Feb 13, 2023
44b955a
Remove unnecessary assert
jakobbotsch Feb 13, 2023
5d39258
Handle a peephole
jakobbotsch Feb 13, 2023
34a0130
Do not remove new nodes
jakobbotsch Feb 13, 2023
1375915
Remove some dead code
jakobbotsch Feb 13, 2023
4476e53
Bash JTRUE to JCC instead of inserting new node
jakobbotsch Feb 13, 2023
8d6a8fb
Move an FP peephole to lowering
jakobbotsch Feb 13, 2023
8c5ba95
Run jit-format
jakobbotsch Feb 13, 2023
6b99b98
Remove more dead code
jakobbotsch Feb 13, 2023
35ca679
Support removing JCCs when optimizing branches after lowering
jakobbotsch Feb 13, 2023
3684a16
Refactor a bit, increase lookahead for JCC in emitter peephole
jakobbotsch Feb 13, 2023
d3a0de7
Allow liveness to remove CMP/TEST/BT when GTF_SET_FLAGS is not set
jakobbotsch Feb 13, 2023
c92db68
Slightly more refactoring
jakobbotsch Feb 13, 2023
ebe7bd7
Merge branch 'main' of github.com:dotnet/runtime into cpu-flag-nodes-…
jakobbotsch Feb 13, 2023
40b4734
Switch to IsInvariantInRange
jakobbotsch Feb 13, 2023
ce2aa81
Set GTF_SET_FLAGS on a GT_CMP node created by decomposition
jakobbotsch Feb 13, 2023
c0f2ac8
Make peephole more robust
jakobbotsch Feb 13, 2023
a23e52a
Make another peephole more robust
jakobbotsch Feb 13, 2023
106d478
Fix reversing/swapping for new bittest compares
jakobbotsch Feb 13, 2023
cb0c29d
Oops
jakobbotsch Feb 13, 2023
46e4f8a
Update src/coreclr/jit/codegenxarch.cpp
jakobbotsch Feb 14, 2023
29c9547
Update src/coreclr/jit/fgopt.cpp
jakobbotsch Feb 14, 2023
5e1b1df
Guard optimizations moved out of OptimizeConstCompare
jakobbotsch Feb 14, 2023
ddeb70c
Reformat a comment
jakobbotsch Feb 14, 2023
53fa297
Add missing early out in genTryFindFlagsConsumer
jakobbotsch Feb 14, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions src/coreclr/jit/codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -902,6 +902,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

void genCompareFloat(GenTree* treeNode);
void genCompareInt(GenTree* treeNode);
#ifdef TARGET_XARCH
bool genCanAvoidEmittingCompareAgainstZero(GenTree* tree, var_types opType);
GenTreeCC* genTryFindFlagsConsumer(GenTree* flagsProducer);
#endif

#ifdef FEATURE_SIMD
#ifdef TARGET_ARM64
Expand Down Expand Up @@ -1077,7 +1081,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

#ifdef TARGET_XARCH
void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
void genCodeForBT(GenTreeOp* bt);
#endif // TARGET_XARCH

void genCodeForCast(GenTreeOp* tree);
Expand Down Expand Up @@ -1192,7 +1195,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes));
void genJmpMethod(GenTree* jmp);
BasicBlock* genCallFinally(BasicBlock* block);
void genCodeForJumpTrue(GenTreeOp* jtrue);
#if defined(TARGET_LOONGARCH64)
// TODO: refactor for LA.
void genCodeForJumpCompare(GenTreeOp* tree);
Expand Down
4 changes: 0 additions & 4 deletions src/coreclr/jit/codegenarm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1257,13 +1257,9 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();

genConsumeIfReg(op1);
genConsumeIfReg(op2);

if (varTypeIsFloating(op1Type))
{
assert(op1Type == op2Type);
assert(!tree->OperIs(GT_CMP));
emit->emitInsBinary(INS_vcmp, emitTypeSize(op1Type), op1, op2);
// vmrs with register 0xf has special meaning of transferring flags
emit->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/codegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4578,7 +4578,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
// We don't support swapping op1 and op2 to generate cmp reg, imm
assert(!op1->isContainedIntOrIImmed());

instruction ins = tree->OperIs(GT_TEST_EQ, GT_TEST_NE) ? INS_tst : INS_cmp;
instruction ins = tree->OperIs(GT_TEST_EQ, GT_TEST_NE, GT_TEST) ? INS_tst : INS_cmp;

if (op2->isContainedIntOrIImmed())
{
Expand Down
15 changes: 3 additions & 12 deletions src/coreclr/jit/codegenarmarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -349,16 +349,11 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
#ifdef TARGET_ARM64
case GT_TEST_EQ:
case GT_TEST_NE:
// On ARM64 genCodeForCompare does not consume its own operands because
// genCodeForBinary also has this behavior and it can end up calling
// genCodeForCompare when generating compare chains for GT_AND.
// Thus, we must do it here.
case GT_TEST_EQ:
case GT_CMP:
case GT_TEST:
genConsumeOperands(treeNode->AsOp());
#endif // TARGET_ARM64
genCodeForCompare(treeNode->AsOp());
break;

Expand All @@ -368,10 +363,6 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
break;
#endif

case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;

#ifdef TARGET_ARM64
case GT_JCMP:
genCodeForJumpCompare(treeNode->AsOp());
Expand Down
45 changes: 0 additions & 45 deletions src/coreclr/jit/codegenlinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2588,51 +2588,6 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode)
#endif // !defined(TARGET_64BIT)

#ifndef TARGET_LOONGARCH64
//------------------------------------------------------------------------
// genCodeForJumpTrue: Generate code for a GT_JTRUE node.
//
// Arguments:
// jtrue - The node
//
void CodeGen::genCodeForJumpTrue(GenTreeOp* jtrue)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
assert(jtrue->OperIs(GT_JTRUE));

GenTreeOp* relop = jtrue->gtGetOp1()->AsOp();
GenCondition condition = GenCondition::FromRelop(relop);

if (condition.PreferSwap())
{
condition = GenCondition::Swap(condition);
}

#if defined(TARGET_XARCH)
if ((condition.GetCode() == GenCondition::FNEU) &&
(relop->gtGetOp1()->GetRegNum() == relop->gtGetOp2()->GetRegNum()) &&
!relop->gtGetOp1()->isUsedFromSpillTemp() && !relop->gtGetOp2()->isUsedFromSpillTemp())
{
// For floating point, `x != x` is a common way of
// checking for NaN. So, in the case where both
// operands are the same, we can optimize codegen
// to only do a single check.

condition = GenCondition(GenCondition::P);
}

if (relop->MarkedForSignJumpOpt())
{
// If relop was previously marked for a signed jump check optimization because of SF flag
// reuse, replace jge/jl with jns/js.

assert(relop->OperGet() == GT_LT || relop->OperGet() == GT_GE);
condition = (relop->OperGet() == GT_LT) ? GenCondition(GenCondition::S) : GenCondition(GenCondition::NS);
}

#endif

inst_JCC(condition, compiler->compCurBB->bbJumpDest);
}

//------------------------------------------------------------------------
// genCodeForJcc: Generate code for a GT_JCC node.
Expand Down
Loading