diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 755724e74f3ce..023e097c80981 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5260,7 +5260,7 @@ class AssertionPropFlowCallback { ASSERT_TP pAssertionOut; - if (predBlock->KindIs(BBJ_COND) && (predBlock->bbJumpDest == block)) + if (predBlock->KindIs(BBJ_COND) && predBlock->HasJumpTo(block)) { pAssertionOut = mJumpDestOut[predBlock->bbNum]; @@ -5462,7 +5462,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen() optPrintAssertionIndices(block->bbAssertionGen); if (block->KindIs(BBJ_COND)) { - printf(" => " FMT_BB " valueGen = ", block->bbJumpDest->bbNum); + printf(" => " FMT_BB " valueGen = ", block->GetJumpDest()->bbNum); optPrintAssertionIndices(jumpDestGen[block->bbNum]); } printf("\n"); @@ -6022,7 +6022,7 @@ PhaseStatus Compiler::optAssertionPropMain() optDumpAssertionIndices(" out = ", block->bbAssertionOut, "\n"); if (block->KindIs(BBJ_COND)) { - printf(" " FMT_BB " = ", block->bbJumpDest->bbNum); + printf(" " FMT_BB " = ", block->GetJumpDest()->bbNum); optDumpAssertionIndices(bbJtrueAssertionOut[block->bbNum], "\n"); } } diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 34d1156b0c3c9..d24e360eb82c4 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1449,7 +1449,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) /* Record the jump kind in the block */ - block->SetBBJumpKind(jumpKind DEBUG_ARG(this)); + block->SetJumpKind(jumpKind DEBUG_ARG(this)); if (jumpKind == BBJ_THROW) { diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index fb44614fec95e..fb48792ca856c 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -514,21 +514,28 @@ struct BasicBlock : private LIR::Range BBjumpKinds bbJumpKind; // jump (if any) at the end of this block + /* The following union describes the jump target(s) of this block */ + union { + unsigned bbJumpOffs; // PC offset (temporary only) + BasicBlock* bbJumpDest; // basic block + BBswtDesc* bbJumpSwt; // switch descriptor + }; + public: - BBjumpKinds GetBBJumpKind() const + BBjumpKinds GetJumpKind() const { return bbJumpKind; } - void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* compiler)) + void SetJumpKind(BBjumpKinds jumpKind DEBUG_ARG(Compiler* compiler)) { #ifdef DEBUG // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout // TODO: Change assert to check if compiler is in appropriate optimization phase to use BBJ_NONE // (right now, this assertion does the null check to avoid unused variable warnings) - assert((kind != BBJ_NONE) || (compiler != nullptr)); + assert((jumpKind != BBJ_NONE) || (compiler != nullptr)); #endif // DEBUG - bbJumpKind = kind; + bbJumpKind = jumpKind; } BasicBlock* Prev() const @@ -569,12 +576,12 @@ struct BasicBlock : private LIR::Range return (bbNext == nullptr); } - bool PrevIs(BasicBlock* block) const + bool PrevIs(const BasicBlock* block) const { return (bbPrev == block); } - bool NextIs(BasicBlock* block) const + bool NextIs(const BasicBlock* block) const { return (bbNext == block); } @@ -583,12 +590,61 @@ struct BasicBlock : private LIR::Range bool IsFirstColdBlock(Compiler* compiler) const; - /* The following union describes the jump target(s) of this block */ - union { - unsigned bbJumpOffs; // PC offset (temporary only) - BasicBlock* bbJumpDest; // basic block - BBswtDesc* bbJumpSwt; // switch descriptor - }; + unsigned GetJumpOffs() const + { + return bbJumpOffs; + } + + void SetJumpOffs(unsigned jumpOffs) + { + bbJumpOffs = jumpOffs; + } + + BasicBlock* GetJumpDest() const + { + return bbJumpDest; + } + + void SetJumpDest(BasicBlock* jumpDest) + { + bbJumpDest = jumpDest; + } + + void SetJumpKindAndTarget(BBjumpKinds jumpKind, BasicBlock* jumpDest) + { + assert(jumpDest != nullptr); + bbJumpKind = jumpKind; + bbJumpDest = jumpDest; + assert(KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY, BBJ_COND, BBJ_EHCATCHRET, BBJ_LEAVE)); + } + + bool HasJumpTo(const BasicBlock* jumpDest) const + { + return (bbJumpDest == jumpDest); + } + + bool JumpsToNext() const + { + return (bbJumpDest == bbNext); + } + + BBswtDesc* GetJumpSwt() const + { + return bbJumpSwt; + } + + void SetJumpSwt(BBswtDesc* jumpSwt) + { + bbJumpSwt = jumpSwt; + } + + void SetJumpKindAndTarget(BBjumpKinds jumpKind, BBswtDesc* jumpSwt) + { + assert(jumpKind == BBJ_SWITCH); + assert(jumpSwt != nullptr); + bbJumpKind = jumpKind; + bbJumpSwt = jumpSwt; + } BasicBlockFlags bbFlags; @@ -1617,7 +1673,7 @@ inline BBArrayIterator BBSwitchTargetList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -1633,7 +1689,7 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: - m_succs[0] = block->bbJumpDest; + m_succs[0] = block->GetJumpDest(); m_begin = &m_succs[0]; m_end = &m_succs[1]; break; @@ -1650,23 +1706,23 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). - if (block->NextIs(block->bbJumpDest)) + if (block->JumpsToNext()) { m_end = &m_succs[1]; } else { - m_succs[1] = block->bbJumpDest; + m_succs[1] = block->GetJumpDest(); m_end = &m_succs[2]; } break; case BBJ_SWITCH: // We don't use the m_succs in-line data for switches; use the existing jump table in the block. - assert(block->bbJumpSwt != nullptr); - assert(block->bbJumpSwt->bbsDstTab != nullptr); - m_begin = block->bbJumpSwt->bbsDstTab; - m_end = block->bbJumpSwt->bbsDstTab + block->bbJumpSwt->bbsCount; + assert(block->GetJumpSwt() != nullptr); + assert(block->GetJumpSwt()->bbsDstTab != nullptr); + m_begin = block->GetJumpSwt()->bbsDstTab; + m_end = block->GetJumpSwt()->bbsDstTab + block->GetJumpSwt()->bbsCount; break; default: diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 874284cab4dad..d6df2be573d9c 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -125,17 +125,17 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(!block->IsLast()); assert(block->Next()->KindIs(BBJ_ALWAYS)); - assert(block->Next()->bbJumpDest != NULL); - assert(block->Next()->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); + assert(!block->Next()->HasJumpTo(nullptr)); + assert(block->Next()->GetJumpDest()->bbFlags & BBF_FINALLY_TARGET); - bbFinallyRet = block->Next()->bbJumpDest; + bbFinallyRet = block->Next()->GetJumpDest(); // Load the address where the finally funclet should return into LR. // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return. genMov32RelocatableDisplacement(bbFinallyRet, REG_LR); // Jump to the finally BB - inst_JMP(EJ_jmp, block->bbJumpDest); + inst_JMP(EJ_jmp, block->GetJumpDest()); // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the // jump target using bbJumpDest - that is already used to point @@ -150,7 +150,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // genEHCatchRet: void CodeGen::genEHCatchRet(BasicBlock* block) { - genMov32RelocatableDisplacement(block->bbJumpDest, REG_INTRET); + genMov32RelocatableDisplacement(block->GetJumpDest(), REG_INTRET); } //------------------------------------------------------------------------ @@ -633,8 +633,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab; + unsigned jumpCount = compiler->compCurBB->GetJumpSwt()->bbsCount; + BasicBlock** jumpTable = compiler->compCurBB->GetJumpSwt()->bbsDstTab; unsigned jmpTabBase; jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, false); @@ -1299,7 +1299,7 @@ void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); inst_RV_RV(INS_tst, reg, reg, genActualType(op)); - inst_JMP(EJ_ne, compiler->compCurBB->bbJumpDest); + inst_JMP(EJ_ne, compiler->compCurBB->GetJumpDest()); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 4fc3436df5155..b4b411377163f 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -2158,7 +2158,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_R0, REG_SPBASE, /* canSkip */ false); } - GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest); + GetEmitter()->emitIns_J(INS_bl_local, block->GetJumpDest()); BasicBlock* const nextBlock = block->Next(); @@ -2181,7 +2181,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // handler. So turn off GC reporting for this single instruction. GetEmitter()->emitDisableGC(); - BasicBlock* const jumpDest = nextBlock->bbJumpDest; + BasicBlock* const jumpDest = nextBlock->GetJumpDest(); // Now go to where the finally funclet needs to return to. if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) @@ -2216,7 +2216,7 @@ void CodeGen::genEHCatchRet(BasicBlock* block) { // For long address (default): `adrp + add` will be emitted. // For short address (proven later): `adr` will be emitted. - GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET); + GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->GetJumpDest(), REG_INTRET); } // move an immediate value into an integer register @@ -3752,8 +3752,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab; + unsigned jumpCount = compiler->compCurBB->GetJumpSwt()->bbsCount; + BasicBlock** jumpTable = compiler->compCurBB->GetJumpSwt()->bbsDstTab; unsigned jmpTabOffs; unsigned jmpTabBase; @@ -4654,7 +4654,7 @@ void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); - GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(op), compiler->compCurBB->bbJumpDest, reg); + GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(op), compiler->compCurBB->GetJumpDest(), reg); } //------------------------------------------------------------------------ @@ -4872,7 +4872,7 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) instruction ins = (cc.GetCode() == GenCondition::EQ) ? INS_tbz : INS_tbnz; int imm = genLog2((size_t)compareImm); - GetEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->bbJumpDest, reg, imm); + GetEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->GetJumpDest(), reg, imm); } else { @@ -4880,7 +4880,7 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) instruction ins = (cc.GetCode() == GenCondition::EQ) ? INS_cbz : INS_cbnz; - GetEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->bbJumpDest, reg); + GetEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->GetJumpDest(), reg); } } diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index b6659f970a542..a293aa706f5f7 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -376,13 +376,13 @@ void CodeGen::genMarkLabelsForCodegen() for (BasicBlock* const block : compiler->Blocks()) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. case BBJ_COND: case BBJ_EHCATCHRET: - JITDUMP(" " FMT_BB " : branch target\n", block->bbJumpDest->bbNum); - block->bbJumpDest->bbFlags |= BBF_HAS_LABEL; + JITDUMP(" " FMT_BB " : branch target\n", block->GetJumpDest()->bbNum); + block->GetJumpDest()->bbFlags |= BBF_HAS_LABEL; break; case BBJ_SWITCH: diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 5ff4d9cc1b6f1..d828c62495b07 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -622,7 +622,7 @@ void CodeGen::genCodeForBBlist() { // We only need the NOP if we're not going to generate any more code as part of the block end. - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_ALWAYS: case BBJ_THROW: @@ -665,7 +665,7 @@ void CodeGen::genCodeForBBlist() /* Do we need to generate a jump or return? */ - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_RETURN: genExitCode(block); @@ -749,7 +749,7 @@ void CodeGen::genCodeForBBlist() // with a jump, do not remove jumps from such blocks. // Do not remove a jump between hot and cold regions. bool isRemovableJmpCandidate = - !block->hasAlign() && !compiler->fgInDifferentRegions(block, block->bbJumpDest); + !block->hasAlign() && !compiler->fgInDifferentRegions(block, block->GetJumpDest()); #ifdef TARGET_AMD64 // AMD64 requires an instruction after a call instruction for unwinding @@ -758,10 +758,10 @@ void CodeGen::genCodeForBBlist() isRemovableJmpCandidate = isRemovableJmpCandidate && !GetEmitter()->emitIsLastInsCall(); #endif // TARGET_AMD64 - inst_JMP(EJ_jmp, block->bbJumpDest, isRemovableJmpCandidate); + inst_JMP(EJ_jmp, block->GetJumpDest(), isRemovableJmpCandidate); } #else - inst_JMP(EJ_jmp, block->bbJumpDest); + inst_JMP(EJ_jmp, block->GetJumpDest()); #endif // TARGET_XARCH FALLTHROUGH; @@ -782,9 +782,9 @@ void CodeGen::genCodeForBBlist() // block, even if one is not otherwise needed, to be able to calculate the size of this // loop (loop size is calculated by walking the instruction groups; see emitter::getLoopSize()). - if (block->bbJumpDest->isLoopAlign()) + if (block->GetJumpDest()->isLoopAlign()) { - GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest); + GetEmitter()->emitSetLoopBackEdge(block->GetJumpDest()); if (!block->IsLast()) { @@ -2621,7 +2621,7 @@ void CodeGen::genCodeForJcc(GenTreeCC* jcc) assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(jcc->OperIs(GT_JCC)); - inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); + inst_JCC(jcc->gtCondition, compiler->compCurBB->GetJumpDest()); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index ee1bc3f0ac1d0..787dddf12daf2 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1518,7 +1518,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { GetEmitter()->emitIns_R_R_I(INS_ori, EA_PTRSIZE, REG_A0, REG_SPBASE, 0); } - GetEmitter()->emitIns_J(INS_bl, block->bbJumpDest); + GetEmitter()->emitIns_J(INS_bl, block->GetJumpDest()); BasicBlock* const nextBlock = block->Next(); @@ -1541,7 +1541,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // handler. So turn off GC reporting for this single instruction. GetEmitter()->emitDisableGC(); - BasicBlock* const jumpDest = nextBlock->bbJumpDest; + BasicBlock* const jumpDest = nextBlock->GetJumpDest(); // Now go to where the finally funclet needs to return to. if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) @@ -1574,7 +1574,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) void CodeGen::genEHCatchRet(BasicBlock* block) { - GetEmitter()->emitIns_R_L(INS_lea, EA_PTRSIZE, block->bbJumpDest, REG_INTRET); + GetEmitter()->emitIns_R_L(INS_lea, EA_PTRSIZE, block->GetJumpDest(), REG_INTRET); } // move an immediate value into an integer register @@ -2935,8 +2935,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab; + unsigned jumpCount = compiler->compCurBB->GetJumpSwt()->bbsCount; + BasicBlock** jumpTable = compiler->compCurBB->GetJumpSwt()->bbsDstTab; unsigned jmpTabOffs; unsigned jmpTabBase; @@ -4327,7 +4327,7 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) assert(ins != INS_invalid); assert(regs != 0); - emit->emitIns_J(ins, compiler->compCurBB->bbJumpDest, regs); // 5-bits; + emit->emitIns_J(ins, compiler->compCurBB->GetJumpDest(), regs); // 5-bits; } //--------------------------------------------------------------------- @@ -4903,7 +4903,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) case GT_JCC: { - BasicBlock* tgtBlock = compiler->compCurBB->bbJumpDest; + BasicBlock* tgtBlock = compiler->compCurBB->GetJumpDest(); #if !FEATURE_FIXED_OUT_ARGS assert((tgtBlock->bbTgtStkDepth * sizeof(int) == genStackLevel) || isFramePointerUsed()); #endif // !FEATURE_FIXED_OUT_ARGS diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 4f5fe303cf101..8b9fb2b8ddf5b 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -1156,7 +1156,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { GetEmitter()->emitIns_R_R_I(INS_ori, EA_PTRSIZE, REG_A0, REG_SPBASE, 0); } - GetEmitter()->emitIns_J(INS_jal, block->bbJumpDest); + GetEmitter()->emitIns_J(INS_jal, block->GetJumpDest()); BasicBlock* const nextBlock = block->Next(); @@ -1179,7 +1179,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // handler. So turn off GC reporting for this single instruction. GetEmitter()->emitDisableGC(); - BasicBlock* const jumpDest = nextBlock->bbJumpDest; + BasicBlock* const jumpDest = nextBlock->GetJumpDest(); // Now go to where the finally funclet needs to return to. if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) @@ -1212,7 +1212,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) void CodeGen::genEHCatchRet(BasicBlock* block) { - GetEmitter()->emitIns_R_L(INS_lea, EA_PTRSIZE, block->bbJumpDest, REG_INTRET); + GetEmitter()->emitIns_R_L(INS_lea, EA_PTRSIZE, block->GetJumpDest(), REG_INTRET); } // move an immediate value into an integer register @@ -2581,8 +2581,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab; + unsigned jumpCount = compiler->compCurBB->GetJumpSwt()->bbsCount; + BasicBlock** jumpTable = compiler->compCurBB->GetJumpSwt()->bbsDstTab; unsigned jmpTabOffs; unsigned jmpTabBase; @@ -3968,7 +3968,7 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) assert(ins != INS_invalid); assert(regs != 0); - emit->emitIns_J(ins, compiler->compCurBB->bbJumpDest, regs); // 5-bits; + emit->emitIns_J(ins, compiler->compCurBB->GetJumpDest(), regs); // 5-bits; } //--------------------------------------------------------------------- diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 869872e5062d1..0264d9e3e8586 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -228,7 +228,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0); } - GetEmitter()->emitIns_J(INS_call, block->bbJumpDest); + GetEmitter()->emitIns_J(INS_call, block->GetJumpDest()); if (block->bbFlags & BBF_RETLESS_CALL) { @@ -253,7 +253,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) GetEmitter()->emitDisableGC(); #endif // JIT32_GCENCODER - BasicBlock* const jumpDest = nextBlock->bbJumpDest; + BasicBlock* const jumpDest = nextBlock->GetJumpDest(); // Now go to where the finally funclet needs to return to. if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) @@ -316,7 +316,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - GetEmitter()->emitIns_J(INS_push_hide, nextBlock->bbJumpDest); + GetEmitter()->emitIns_J(INS_push_hide, nextBlock->GetJumpDest()); } else { @@ -325,7 +325,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } // Jump to the finally BB - inst_JMP(EJ_jmp, block->bbJumpDest); + inst_JMP(EJ_jmp, block->GetJumpDest()); #endif // !FEATURE_EH_FUNCLETS @@ -348,7 +348,7 @@ void CodeGen::genEHCatchRet(BasicBlock* block) // Generate a RIP-relative // lea reg, [rip + disp32] ; the RIP is implicit // which will be position-independent. - GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET); + GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->GetJumpDest(), REG_INTRET); } #else // !FEATURE_EH_FUNCLETS @@ -1450,7 +1450,7 @@ void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); inst_RV_RV(INS_test, reg, reg, genActualType(op)); - inst_JMP(EJ_jne, compiler->compCurBB->bbJumpDest); + inst_JMP(EJ_jne, compiler->compCurBB->GetJumpDest()); } //------------------------------------------------------------------------ @@ -4270,8 +4270,8 @@ void CodeGen::genJumpTable(GenTree* treeNode) noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); - unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; - BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab; + unsigned jumpCount = compiler->compCurBB->GetJumpSwt()->bbsCount; + BasicBlock** jumpTable = compiler->compCurBB->GetJumpSwt()->bbsDstTab; unsigned jmpTabOffs; unsigned jmpTabBase; diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 080d1fb4be13d..a3bed975f9dfa 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -521,7 +521,7 @@ static BasicBlockVisit VisitEHSuccessors(Compiler* comp, BasicBlock* block, TFun // will be yielded as a normal successor. Don't also yield as // an exceptional successor. BasicBlock* flowBlock = eh->ExFlowBlock(); - if (!block->KindIs(BBJ_CALLFINALLY) || (block->bbJumpDest != flowBlock)) + if (!block->KindIs(BBJ_CALLFINALLY) || !block->HasJumpTo(flowBlock)) { RETURN_ON_ABORT(func(flowBlock)); } @@ -635,7 +635,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -649,7 +649,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -769,7 +769,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -3224,7 +3224,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetJumpKind(BBJ_THROW DEBUG_ARG(this)); // Any block with a throw is rare block->bbSetRunRarely(); @@ -3244,7 +3244,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) leaveBlk->bbPreds = nullptr; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - fgClearFinallyTargetBit(leaveBlk->bbJumpDest); + fgClearFinallyTargetBit(leaveBlk->GetJumpDest()); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } } diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 9533447691a3a..315ab36e896dc 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -5972,7 +5972,7 @@ void emitter::emitSetLoopBackEdge(BasicBlock* loopTopBlock) // With (dstIG != nullptr), ensure that only back edges are tracked. // If there is forward jump, dstIG is not yet generated. // - // We don't rely on (block->bbJumpDest->bbNum <= block->bbNum) because the basic + // We don't rely on (block->GetJumpDest()->bbNum <= block->bbNum) because the basic // block numbering is not guaranteed to be sequential. if ((dstIG != nullptr) && (dstIG->igNum <= emitCurIG->igNum)) { diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 4165d10147125..75078fc57f8d6 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -464,8 +464,8 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne // replace predecessor 'blockSwitch' with 'newTarget' // - unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount; - BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab; + unsigned jumpCnt = blockSwitch->GetJumpSwt()->bbsCount; + BasicBlock** jumpTab = blockSwitch->GetJumpSwt()->bbsDstTab; unsigned i = 0; @@ -537,7 +537,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas assert(block != nullptr); assert(fgPredsComputed); - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_CALLFINALLY: case BBJ_COND: @@ -546,9 +546,9 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas case BBJ_EHFILTERRET: case BBJ_LEAVE: // This function can be called before import, so we still have BBJ_LEAVE - if (block->bbJumpDest == oldTarget) + if (block->HasJumpTo(oldTarget)) { - block->bbJumpDest = newTarget; + block->SetJumpDest(newTarget); fgRemoveRefPred(oldTarget, block); fgAddRefPred(newTarget, block); } @@ -556,8 +556,8 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas case BBJ_SWITCH: { - unsigned const jumpCnt = block->bbJumpSwt->bbsCount; - BasicBlock** const jumpTab = block->bbJumpSwt->bbsDstTab; + unsigned const jumpCnt = block->GetJumpSwt()->bbsCount; + BasicBlock** const jumpTab = block->GetJumpSwt()->bbsDstTab; bool changed = false; for (unsigned i = 0; i < jumpCnt; i++) @@ -2771,19 +2771,19 @@ void Compiler::fgLinkBasicBlocks() for (BasicBlock* const curBBdesc : Blocks()) { - switch (curBBdesc->GetBBJumpKind()) + switch (curBBdesc->GetJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: { - BasicBlock* const jumpDest = fgLookupBB(curBBdesc->bbJumpOffs); - curBBdesc->bbJumpDest = jumpDest; + BasicBlock* const jumpDest = fgLookupBB(curBBdesc->GetJumpOffs()); + curBBdesc->SetJumpDest(jumpDest); fgAddRefPred(jumpDest, curBBdesc, oldEdge); - if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum) + if (curBBdesc->GetJumpDest()->bbNum <= curBBdesc->bbNum) { - fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc); + fgMarkBackwardJump(curBBdesc->GetJumpDest(), curBBdesc); } // Is the next block reachable? @@ -2823,8 +2823,8 @@ void Compiler::fgLinkBasicBlocks() case BBJ_SWITCH: { - unsigned jumpCnt = curBBdesc->bbJumpSwt->bbsCount; - BasicBlock** jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab; + unsigned jumpCnt = curBBdesc->GetJumpSwt()->bbsCount; + BasicBlock** jumpPtr = curBBdesc->GetJumpSwt()->bbsDstTab; do { @@ -3329,14 +3329,14 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F switch (jmpKind) { case BBJ_SWITCH: - curBBdesc->bbJumpSwt = swtDsc; + curBBdesc->SetJumpSwt(swtDsc); break; case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET)); - curBBdesc->bbJumpOffs = jmpAddr; + curBBdesc->SetJumpOffs(jmpAddr); break; default: @@ -3678,9 +3678,9 @@ void Compiler::fgFindBasicBlocks() if (block->KindIs(BBJ_EHFILTERRET)) { // Mark catch handler as successor. - block->bbJumpDest = hndBegBB; + block->SetJumpDest(hndBegBB); fgAddRefPred(hndBegBB, block); - assert(block->bbJumpDest->bbCatchTyp == BBCT_FILTER_HANDLER); + assert(block->GetJumpDest()->bbCatchTyp == BBCT_FILTER_HANDLER); break; } } @@ -3808,7 +3808,7 @@ void Compiler::fgFindBasicBlocks() // BBJ_EHFINALLYRET that were imported to BBJ_EHFAULTRET. if ((hndBegBB->bbCatchTyp == BBCT_FAULT) && block->KindIs(BBJ_EHFINALLYRET)) { - block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); + block->SetJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } @@ -4017,9 +4017,8 @@ void Compiler::fgFixEntryFlowForOSR() fgEnsureFirstBBisScratch(); assert(fgFirstBB->KindIs(BBJ_NONE)); fgRemoveRefPred(fgFirstBB->Next(), fgFirstBB); - fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - fgFirstBB->bbJumpDest = fgOSREntryBB; - FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); + fgFirstBB->SetJumpKindAndTarget(BBJ_ALWAYS, fgOSREntryBB); + FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); // We don't know the right weight for this block, since @@ -4057,7 +4056,7 @@ void Compiler::fgCheckBasicBlockControlFlow() continue; } - switch (blk->GetBBJumpKind()) + switch (blk->GetJumpKind()) { case BBJ_NONE: // block flows into the next one (no jump) @@ -4067,7 +4066,7 @@ void Compiler::fgCheckBasicBlockControlFlow() case BBJ_ALWAYS: // block does unconditional jump to target - fgControlFlowPermitted(blk, blk->bbJumpDest); + fgControlFlowPermitted(blk, blk->GetJumpDest()); break; @@ -4075,7 +4074,7 @@ void Compiler::fgCheckBasicBlockControlFlow() fgControlFlowPermitted(blk, blk->Next()); - fgControlFlowPermitted(blk, blk->bbJumpDest); + fgControlFlowPermitted(blk, blk->GetJumpDest()); break; @@ -4139,7 +4138,7 @@ void Compiler::fgCheckBasicBlockControlFlow() case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded // region. Used temporarily until importing - fgControlFlowPermitted(blk, blk->bbJumpDest, true); + fgControlFlowPermitted(blk, blk->GetJumpDest(), true); break; @@ -4560,7 +4559,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) - BasicBlock* newBlock = bbNewBasicBlock(curr->GetBBJumpKind()); + BasicBlock* newBlock = bbNewBasicBlock(curr->GetJumpKind()); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; @@ -4580,18 +4579,18 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) } } - newBlock->bbJumpDest = curr->bbJumpDest; - curr->bbJumpDest = nullptr; + newBlock->SetJumpDest(curr->GetJumpDest()); + curr->SetJumpDest(nullptr); } else { // In the case of a switch statement there's more complicated logic in order to wire up the predecessor lists // but fortunately there's an existing method that implements this functionality. - newBlock->bbJumpSwt = curr->bbJumpSwt; + newBlock->SetJumpSwt(curr->GetJumpSwt()); fgChangeSwitchBlock(curr, newBlock); - curr->bbJumpSwt = nullptr; + curr->SetJumpSwt(nullptr); } newBlock->inheritWeight(curr); @@ -4628,7 +4627,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + curr->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); fgAddRefPred(newBlock, curr); return newBlock; @@ -4867,7 +4866,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) { newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, curr->isRunRarely()); // The new block always jumps to 'succ' - newBlock->bbJumpDest = succ; + newBlock->SetJumpDest(succ); } newBlock->bbFlags |= (curr->bbFlags & succ->bbFlags & (BBF_BACKWARD_JUMP)); @@ -4877,10 +4876,10 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) if (curr->KindIs(BBJ_COND)) { fgReplacePred(succ, curr, newBlock); - if (curr->bbJumpDest == succ) + if (curr->HasJumpTo(succ)) { // Now 'curr' jumps to newBlock - curr->bbJumpDest = newBlock; + curr->SetJumpDest(newBlock); } fgAddRefPred(newBlock, curr); } @@ -4896,7 +4895,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) { assert(curr->KindIs(BBJ_ALWAYS)); fgReplacePred(succ, curr, newBlock); - curr->bbJumpDest = newBlock; + curr->SetJumpDest(newBlock); fgAddRefPred(newBlock, curr); } @@ -5059,7 +5058,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->bbJumpDest) && + else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->GetJumpDest()) && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && !block->IsFirstColdBlock(this) && !block->IsLastHotBlock(this)) { @@ -5067,7 +5066,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. - bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bPrev->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } // If this is the first Cold basic block update fgFirstColdBlock @@ -5097,7 +5096,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgRemoveBlock(leaveBlk, /* unreachable */ true); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - fgClearFinallyTargetBit(leaveBlk->bbJumpDest); + fgClearFinallyTargetBit(leaveBlk->GetJumpDest()); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (block->KindIs(BBJ_RETURN)) @@ -5125,14 +5124,14 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #ifdef DEBUG /* Some extra checks for the empty case */ - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_NONE: break; case BBJ_ALWAYS: /* Do not remove a block that jumps to itself - used for while (true){} */ - noway_assert(block->bbJumpDest != block); + noway_assert(!block->HasJumpTo(block)); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ noway_assert(bPrev && bPrev->KindIs(BBJ_NONE)); @@ -5152,7 +5151,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->KindIs(BBJ_ALWAYS)) { - succBlock = block->bbJumpDest; + succBlock = block->GetJumpDest(); } else { @@ -5242,7 +5241,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* change all jumps to the removed block */ - switch (predBlock->GetBBJumpKind()) + switch (predBlock->GetJumpKind()) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); @@ -5256,14 +5255,13 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->KindIs(BBJ_ALWAYS)) { /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - bPrev->bbJumpDest = succBlock; + bPrev->SetJumpKindAndTarget(BBJ_ALWAYS, succBlock); } break; case BBJ_COND: /* The links for the direct predecessor case have already been updated above */ - if (predBlock->bbJumpDest != block) + if (!predBlock->HasJumpTo(block)) { break; } @@ -5272,8 +5270,8 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (predBlock->NextIs(succBlock)) { // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. - noway_assert(predBlock->bbJumpDest == block); - predBlock->bbJumpDest = succBlock; + noway_assert(predBlock->HasJumpTo(block)); + predBlock->SetJumpDest(succBlock); fgRemoveConditionalJump(predBlock); break; } @@ -5284,8 +5282,8 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: - noway_assert(predBlock->bbJumpDest == block); - predBlock->bbJumpDest = succBlock; + noway_assert(predBlock->HasJumpTo(block)); + predBlock->SetJumpDest(succBlock); break; case BBJ_SWITCH: @@ -5309,7 +5307,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (bPrev != nullptr) { - switch (bPrev->GetBBJumpKind()) + switch (bPrev->GetJumpKind()) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS @@ -5323,20 +5321,20 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS // pairing. - if (bPrev->NextIs(bPrev->bbJumpDest) && - !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold + if (bPrev->JumpsToNext() && + !fgInDifferentRegions(bPrev, bPrev->GetJumpDest())) // We don't remove a branch from Hot -> Cold { if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type - bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bPrev->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } } break; case BBJ_COND: /* Check for branch to next block */ - if (bPrev->NextIs(bPrev->bbJumpDest)) + if (bPrev->JumpsToNext()) { fgRemoveConditionalJump(bPrev); } @@ -5374,14 +5372,13 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->bbFallsThrough() && !bSrc->NextIs(bDst)) { - switch (bSrc->GetBBJumpKind()) + switch (bSrc->GetJumpKind()) { case BBJ_NONE: - bSrc->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - bSrc->bbJumpDest = bDst; + bSrc->SetJumpKindAndTarget(BBJ_ALWAYS, bDst); JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", - bSrc->bbNum, bSrc->bbJumpDest->bbNum); + bSrc->bbNum, bSrc->GetJumpDest()->bbNum); break; case BBJ_CALLFINALLY: @@ -5437,12 +5434,12 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) } } - jmpBlk->bbJumpDest = bDst; + jmpBlk->SetJumpDest(bDst); fgReplacePred(bDst, bSrc, jmpBlk); JITDUMP("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", - jmpBlk->bbJumpDest->bbNum, bSrc->bbNum); + jmpBlk->GetJumpDest()->bbNum, bSrc->bbNum); break; default: @@ -5455,9 +5452,9 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && bSrc->NextIs(bSrc->bbJumpDest)) + if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && bSrc->JumpsToNext()) { - bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bSrc->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->Next()->bbNum); @@ -5584,7 +5581,7 @@ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL * if (bJump->KindIs(BBJ_COND, BBJ_ALWAYS)) { - BasicBlock* bDest = bJump->bbJumpDest; + BasicBlock* bDest = bJump->GetJumpDest(); BasicBlock* bTemp = (bSrc == nullptr) ? bJump : bSrc; while (true) @@ -6234,7 +6231,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } // if bAlt doesn't jump to bCur it can't be a better fall through than bCur - if (bAlt->bbJumpDest != bCur) + if (!bAlt->HasJumpTo(bCur)) { return false; } diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index c848f1c862faa..60aad57559d3e 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -101,7 +101,7 @@ void Compiler::fgDebugCheckUpdate() if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: @@ -176,7 +176,7 @@ void Compiler::fgDebugCheckUpdate() if (doAssertOnJumpToNextBlock) { - if (block->NextIs(block->bbJumpDest)) + if (block->JumpsToNext()) { noway_assert(!"Unnecessary jump to the next block!"); } @@ -1035,7 +1035,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); - fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->GetBBJumpKind()]); + fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->GetJumpKind()]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); @@ -1158,7 +1158,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, "\n switchCases=\"%d\"", edge->getDupCount()); } - if (bSource->bbJumpSwt->getDefault() == bTarget) + if (bSource->GetJumpSwt()->getDefault() == bTarget) { fprintf(fgxFile, "\n switchDefault=\"true\""); } @@ -2004,34 +2004,34 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * } else { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: - printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, - maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); + printf("-> " FMT_BB "%*s ( cond )", block->GetJumpDest()->bbNum, + maxBlockNumWidth - max(CountDigits(block->GetJumpDest()->bbNum), 2), ""); break; case BBJ_CALLFINALLY: - printf("-> " FMT_BB "%*s (callf )", block->bbJumpDest->bbNum, - maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); + printf("-> " FMT_BB "%*s (callf )", block->GetJumpDest()->bbNum, + maxBlockNumWidth - max(CountDigits(block->GetJumpDest()->bbNum), 2), ""); break; case BBJ_ALWAYS: if (flags & BBF_KEEP_BBJ_ALWAYS) { - printf("-> " FMT_BB "%*s (ALWAYS)", block->bbJumpDest->bbNum, - maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); + printf("-> " FMT_BB "%*s (ALWAYS)", block->GetJumpDest()->bbNum, + maxBlockNumWidth - max(CountDigits(block->GetJumpDest()->bbNum), 2), ""); } else { - printf("-> " FMT_BB "%*s (always)", block->bbJumpDest->bbNum, - maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); + printf("-> " FMT_BB "%*s (always)", block->GetJumpDest()->bbNum, + maxBlockNumWidth - max(CountDigits(block->GetJumpDest()->bbNum), 2), ""); } break; case BBJ_LEAVE: - printf("-> " FMT_BB "%*s (leave )", block->bbJumpDest->bbNum, - maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); + printf("-> " FMT_BB "%*s (leave )", block->GetJumpDest()->bbNum, + maxBlockNumWidth - max(CountDigits(block->GetJumpDest()->bbNum), 2), ""); break; case BBJ_EHFINALLYRET: @@ -2047,8 +2047,8 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * break; case BBJ_EHCATCHRET: - printf("-> " FMT_BB "%*s ( cret )", block->bbJumpDest->bbNum, - maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), ""); + printf("-> " FMT_BB "%*s ( cret )", block->GetJumpDest()->bbNum, + maxBlockNumWidth - max(CountDigits(block->GetJumpDest()->bbNum), 2), ""); break; case BBJ_THROW: @@ -2067,7 +2067,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * { printf("->"); - const BBswtDesc* const bbJumpSwt = block->bbJumpSwt; + const BBswtDesc* const bbJumpSwt = block->GetJumpSwt(); const unsigned jumpCnt = bbJumpSwt->bbsCount; BasicBlock** const jumpTab = bbJumpSwt->bbsDstTab; int switchWidth = 0; @@ -2659,10 +2659,10 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { - switch (blockPred->GetBBJumpKind()) + switch (blockPred->GetJumpKind()) { case BBJ_COND: - assert(blockPred->NextIs(block) || blockPred->bbJumpDest == block); + assert(blockPred->NextIs(block) || blockPred->HasJumpTo(block)); return true; case BBJ_NONE: @@ -2673,7 +2673,7 @@ bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: - assert(blockPred->bbJumpDest == block); + assert(blockPred->HasJumpTo(block)); return true; case BBJ_EHFINALLYRET: @@ -2733,7 +2733,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -2755,7 +2755,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -2988,7 +2988,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef { if (succBlock->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* finallyBlock = succBlock->bbJumpDest; + BasicBlock* finallyBlock = succBlock->GetJumpDest(); assert(finallyBlock->hasHndIndex()); unsigned finallyIndex = finallyBlock->getHndIndex(); @@ -4793,7 +4793,7 @@ void Compiler::fgDebugCheckLoopTable() BasicBlock* e = loop.lpEntry; if (h->KindIs(BBJ_ALWAYS)) { - assert(h->bbJumpDest == e); + assert(h->HasJumpTo(e)); } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 829a5ae4d9c98..8fd44c5e6f89c 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -100,7 +100,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() } // If the finally's block jumps back to itself, then it is not empty. - if (firstBlock->KindIs(BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) + if (firstBlock->KindIs(BBJ_ALWAYS) && firstBlock->HasJumpTo(firstBlock)) { JITDUMP("EH#%u finally has basic block that jumps to itself; skipping.\n", XTnum); XTnum++; @@ -142,7 +142,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() { BasicBlock* nextBlock = currentBlock->Next(); - if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) + if (currentBlock->KindIs(BBJ_CALLFINALLY) && currentBlock->HasJumpTo(firstBlock)) { // Retarget the call finally to jump to the return // point. @@ -152,7 +152,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() noway_assert(currentBlock->isBBCallAlwaysPair()); BasicBlock* const leaveBlock = currentBlock->Next(); - BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; + BasicBlock* const postTryFinallyBlock = leaveBlock->GetJumpDest(); JITDUMP("Modifying callfinally " FMT_BB " leave " FMT_BB " finally " FMT_BB " continuation " FMT_BB "\n", @@ -162,8 +162,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() noway_assert(leaveBlock->KindIs(BBJ_ALWAYS)); - currentBlock->bbJumpDest = postTryFinallyBlock; - currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + currentBlock->SetJumpKindAndTarget(BBJ_ALWAYS, postTryFinallyBlock); // Ref count updates. fgAddRefPred(postTryFinallyBlock, currentBlock); @@ -381,12 +380,12 @@ PhaseStatus Compiler::fgRemoveEmptyTry() continue; } - BasicBlock* const callFinally = firstTryBlock->bbJumpDest; + BasicBlock* const callFinally = firstTryBlock->GetJumpDest(); // Look for call always pair. Note this will also disqualify // empty try removal in cases where the finally doesn't // return. - if (!callFinally->isBBCallAlwaysPair() || (callFinally->bbJumpDest != firstHandlerBlock)) + if (!callFinally->isBBCallAlwaysPair() || !callFinally->HasJumpTo(firstHandlerBlock)) { JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); @@ -407,7 +406,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Look for call always pair within the try itself. Note this // will also disqualify empty try removal in cases where the // finally doesn't return. - if (!firstTryBlock->isBBCallAlwaysPair() || (firstTryBlock->bbJumpDest != firstHandlerBlock)) + if (!firstTryBlock->isBBCallAlwaysPair() || !firstTryBlock->HasJumpTo(firstHandlerBlock)) { JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); XTnum++; @@ -437,7 +436,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->Next()) { - if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) + if (block->KindIs(BBJ_CALLFINALLY) && block->HasJumpTo(firstHandlerBlock)) { assert(block->isBBCallAlwaysPair()); @@ -463,11 +462,11 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Time to optimize. // // (1) Convert the callfinally to a normal jump to the handler - callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + callFinally->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation BasicBlock* const leave = callFinally->Next(); - BasicBlock* const continuation = leave->bbJumpDest; + BasicBlock* const continuation = leave->GetJumpDest(); // (2) Cleanup the leave so it can be deleted by subsequent opts assert((leave->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); @@ -542,8 +541,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - block->bbJumpDest = continuation; + block->SetJumpKindAndTarget(BBJ_ALWAYS, continuation); fgAddRefPred(continuation, block); fgRemoveRefPred(leave, block); } @@ -827,7 +825,7 @@ PhaseStatus Compiler::fgCloneFinally() } else if (block->KindIs(BBJ_ALWAYS)) { - jumpDest = block->bbJumpDest; + jumpDest = block->GetJumpDest(); } if (jumpDest == nullptr) @@ -837,13 +835,13 @@ PhaseStatus Compiler::fgCloneFinally() // The jumpDest must be a callfinally that in turn invokes the // finally of interest. - if (!jumpDest->isBBCallAlwaysPair() || (jumpDest->bbJumpDest != firstBlock)) + if (!jumpDest->isBBCallAlwaysPair() || !jumpDest->HasJumpTo(firstBlock)) { continue; } #else // Look for call finally pair directly within the try - if (!block->isBBCallAlwaysPair() || (block->bbJumpDest != firstBlock)) + if (!block->isBBCallAlwaysPair() || !block->HasJumpTo(firstBlock)) { continue; } @@ -854,7 +852,7 @@ PhaseStatus Compiler::fgCloneFinally() // Found a block that invokes the finally. // BasicBlock* const finallyReturnBlock = jumpDest->Next(); - BasicBlock* const postTryFinallyBlock = finallyReturnBlock->bbJumpDest; + BasicBlock* const postTryFinallyBlock = finallyReturnBlock->GetJumpDest(); bool isUpdate = false; // See if this is the one we want to use to inspire cloning. @@ -971,7 +969,7 @@ PhaseStatus Compiler::fgCloneFinally() { if (block->isBBCallAlwaysPair()) { - if (block->bbJumpDest == firstBlock) + if (block->HasJumpTo(firstBlock)) { firstCallFinallyBlock = block; break; @@ -989,7 +987,7 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->Prev(); - if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) + if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && placeToMoveAfter->HasJumpTo(normalCallFinallyBlock)) { JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n", normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); @@ -1049,7 +1047,7 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->GetBBJumpKind(); + BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->GetJumpKind(); if (block == firstBlock) { @@ -1108,7 +1106,7 @@ PhaseStatus Compiler::fgCloneFinally() newBlock->clearHndIndex(); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. - assert(newBlock->bbJumpDest == nullptr); + assert(newBlock->HasJumpTo(nullptr)); } if (!clonedOk) @@ -1138,7 +1136,7 @@ PhaseStatus Compiler::fgCloneFinally() assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); assert(newBlock->KindIs(BBJ_ALWAYS)); // we mapped this above already - newBlock->bbJumpDest = normalCallFinallyReturn; + newBlock->SetJumpDest(normalCallFinallyReturn); fgAddRefPred(normalCallFinallyReturn, newBlock); } @@ -1163,10 +1161,10 @@ PhaseStatus Compiler::fgCloneFinally() if (currentBlock->isBBCallAlwaysPair()) { - if (currentBlock->bbJumpDest == firstBlock) + if (currentBlock->HasJumpTo(firstBlock)) { BasicBlock* const leaveBlock = currentBlock->Next(); - BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; + BasicBlock* const postTryFinallyBlock = leaveBlock->GetJumpDest(); // Note we must retarget all callfinallies that have this // continuation, or we can't clean up the continuation @@ -1179,8 +1177,7 @@ PhaseStatus Compiler::fgCloneFinally() // This call returns to the expected spot, so // retarget it to branch to the clone. - currentBlock->bbJumpDest = firstCloneBlock; - currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + currentBlock->SetJumpKindAndTarget(BBJ_ALWAYS, firstCloneBlock); // Ref count updates. fgAddRefPred(firstCloneBlock, currentBlock); @@ -1242,7 +1239,7 @@ PhaseStatus Compiler::fgCloneFinally() { if (block->KindIs(BBJ_EHFINALLYRET)) { - block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); + block->SetJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } } @@ -1435,13 +1432,13 @@ void Compiler::fgDebugCheckTryFinallyExits() if (succBlock->KindIs(BBJ_CALLFINALLY)) { // case (a1) - isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); + isCallToFinally = isFinally && succBlock->HasJumpTo(finallyBlock); } #else if (block->KindIs(BBJ_CALLFINALLY)) { // case (a2) - isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); + isCallToFinally = isFinally && block->HasJumpTo(finallyBlock); } #endif // FEATURE_EH_CALLFINALLY_THUNKS @@ -1457,7 +1454,7 @@ void Compiler::fgDebugCheckTryFinallyExits() if (succBlock->isEmpty()) { // case (c) - BasicBlock* const succSuccBlock = succBlock->bbJumpDest; + BasicBlock* const succSuccBlock = succBlock->GetJumpDest(); if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) { @@ -1621,7 +1618,7 @@ void Compiler::fgAddFinallyTargetFlags() if (block->isBBCallAlwaysPair()) { BasicBlock* const leave = block->Next(); - BasicBlock* const continuation = leave->bbJumpDest; + BasicBlock* const continuation = leave->GetJumpDest(); if ((continuation->bbFlags & BBF_FINALLY_TARGET) == 0) { @@ -1792,7 +1789,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() currentBlock = currentBlock->Next()) { // Ignore "retless" callfinallys (where the finally doesn't return). - if (currentBlock->isBBCallAlwaysPair() && (currentBlock->bbJumpDest == beginHandlerBlock)) + if (currentBlock->isBBCallAlwaysPair() && currentBlock->HasJumpTo(beginHandlerBlock)) { // The callfinally must be empty, so that we can // safely retarget anything that branches here to @@ -1804,7 +1801,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() // Locate the continuation BasicBlock* const leaveBlock = currentBlock->Next(); - BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; + BasicBlock* const continuationBlock = leaveBlock->GetJumpDest(); // If this is the first time we've seen this // continuation, register this callfinally as the @@ -1907,14 +1904,14 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, // Screen out cases that are not callfinallys to the right // handler. - BasicBlock* const callFinally = block->bbJumpDest; + BasicBlock* const callFinally = block->GetJumpDest(); if (!callFinally->isBBCallAlwaysPair()) { return false; } - if (callFinally->bbJumpDest != handler) + if (!callFinally->HasJumpTo(handler)) { return false; } @@ -1922,14 +1919,14 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, // Ok, this is a callfinally that invokes the right handler. // Get its continuation. BasicBlock* const leaveBlock = callFinally->Next(); - BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; + BasicBlock* const continuationBlock = leaveBlock->GetJumpDest(); // Find the canonical callfinally for that continuation. BasicBlock* const canonicalCallFinally = continuationMap[continuationBlock]; assert(canonicalCallFinally != nullptr); // If the block already jumps to the canonical call finally, no work needed. - if (block->bbJumpDest == canonicalCallFinally) + if (block->HasJumpTo(canonicalCallFinally)) { JITDUMP(FMT_BB " already canonical\n", block->bbNum); return false; @@ -1939,7 +1936,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, JITDUMP("Redirecting branch in " FMT_BB " from " FMT_BB " to " FMT_BB ".\n", block->bbNum, callFinally->bbNum, canonicalCallFinally->bbNum); - block->bbJumpDest = canonicalCallFinally; + block->SetJumpDest(canonicalCallFinally); fgAddRefPred(canonicalCallFinally, block); assert(callFinally->bbRefs > 0); fgRemoveRefPred(callFinally, block); @@ -2194,7 +2191,7 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* const predBlock = predEdge->getSourceBlock(); nextPredEdge = predEdge->getNextPredEdge(); - switch (predBlock->GetBBJumpKind()) + switch (predBlock->GetJumpKind()) { case BBJ_NONE: { @@ -2218,7 +2215,7 @@ PhaseStatus Compiler::fgTailMergeThrows() fgTailMergeThrowsFallThroughHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge); } - if (predBlock->bbJumpDest == nonCanonicalBlock) + if (predBlock->HasJumpTo(nonCanonicalBlock)) { fgTailMergeThrowsJumpToHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge); } @@ -2302,7 +2299,7 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, // Wire up the new flow fgAddRefPred(newBlock, predBlock, predEdge); - newBlock->bbJumpDest = canonicalBlock; + newBlock->SetJumpDest(canonicalBlock); fgAddRefPred(canonicalBlock, newBlock, predEdge); // If nonCanonicalBlock has only one pred, all its flow transfers. @@ -2331,7 +2328,7 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* canonicalBlock, FlowEdge* predEdge) { - assert(predBlock->bbJumpDest == nonCanonicalBlock); + assert(predBlock->HasJumpTo(nonCanonicalBlock)); JITDUMP("*** " FMT_BB " now branching to " FMT_BB "\n", predBlock->bbNum, canonicalBlock->bbNum); @@ -2339,6 +2336,6 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, fgRemoveRefPred(nonCanonicalBlock, predBlock); // Wire up the new flow - predBlock->bbJumpDest = canonicalBlock; + predBlock->SetJumpDest(canonicalBlock); fgAddRefPred(canonicalBlock, predBlock, predEdge); } diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 23f122177606b..d051d8374a7d5 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -343,7 +343,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* bNext; - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_CALLFINALLY: if (!(block->bbFlags & BBF_RETLESS_CALL)) @@ -361,12 +361,12 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) fgRemoveRefPred(bNext, bNext->bbPreds->getSourceBlock()); } } - fgRemoveRefPred(block->bbJumpDest, block); + fgRemoveRefPred(block->GetJumpDest(), block); break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: - fgRemoveRefPred(block->bbJumpDest, block); + fgRemoveRefPred(block->GetJumpDest(), block); break; case BBJ_NONE: @@ -374,14 +374,14 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) break; case BBJ_COND: - fgRemoveRefPred(block->bbJumpDest, block); + fgRemoveRefPred(block->GetJumpDest(), block); fgRemoveRefPred(block->Next(), block); break; case BBJ_EHFILTERRET: - block->bbJumpDest->bbRefs++; // To compensate the bbRefs-- inside fgRemoveRefPred - fgRemoveRefPred(block->bbJumpDest, block); + block->GetJumpDest()->bbRefs++; // To compensate the bbRefs-- inside fgRemoveRefPred + fgRemoveRefPred(block->GetJumpDest(), block); break; case BBJ_EHFINALLYRET: @@ -403,8 +403,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || - bcall->bbJumpDest != finBeg) + if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -470,7 +469,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index 3f99cb6a7fb39..b1481c9d29c17 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,13 +675,13 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); + block->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->Next(), block); } else { - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); - m_compiler->fgRemoveRefPred(block->bbJumpDest, block); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); + m_compiler->fgRemoveRefPred(block->GetJumpDest(), block); } } } @@ -1529,14 +1529,13 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) if (block->IsLast()) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - block->bbJumpDest = bottomBlock; + block->SetJumpKindAndTarget(BBJ_ALWAYS, bottomBlock); } fgAddRefPred(bottomBlock, block); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 22436d28c834e..bb9cb076a801b 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -139,7 +139,7 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) return true; } - if (b1->KindIs(BBJ_ALWAYS, BBJ_COND) && fgReachable(b1->bbJumpDest, b2)) + if (b1->KindIs(BBJ_ALWAYS, BBJ_COND) && fgReachable(b1->GetJumpDest(), b2)) { return true; } @@ -466,7 +466,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; - block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -475,7 +475,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) if (bIsBBCallAlwaysPair) { noway_assert(block->Next()->KindIs(BBJ_ALWAYS)); - fgClearFinallyTargetBit(block->Next()->bbJumpDest); + fgClearFinallyTargetBit(block->Next()->GetJumpDest()); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } @@ -1650,7 +1650,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->Next())) { - newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + newTryEntry->SetJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { @@ -1787,8 +1787,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); - fromBlock->bbJumpDest = toBlock; + fromBlock->SetJumpKindAndTarget(BBJ_COND, toBlock); fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); @@ -1826,12 +1825,12 @@ PhaseStatus Compiler::fgPostImportationCleanup() // Note even if the OSR is in a nested try, if it's a mutual protect try // it can be reached directly from "outside". // - assert(fgFirstBB->bbJumpDest == osrEntry); + assert(fgFirstBB->HasJumpTo(osrEntry)); assert(fgFirstBB->KindIs(BBJ_ALWAYS)); if (entryJumpTarget != osrEntry) { - fgFirstBB->bbJumpDest = entryJumpTarget; + fgFirstBB->SetJumpDest(entryJumpTarget); fgRemoveRefPred(osrEntry, fgFirstBB); fgAddRefPred(entryJumpTarget, fgFirstBB); @@ -2268,7 +2267,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* set the right links */ - block->SetBBJumpKind(bNext->GetBBJumpKind() DEBUG_ARG(this)); + block->SetJumpKind(bNext->GetJumpKind() DEBUG_ARG(this)); VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). @@ -2328,7 +2327,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Set the jump targets */ - switch (bNext->GetBBJumpKind()) + switch (bNext->GetJumpKind()) { case BBJ_CALLFINALLY: // Propagate RETLESS property @@ -2339,13 +2338,13 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) case BBJ_COND: case BBJ_ALWAYS: case BBJ_EHCATCHRET: - block->bbJumpDest = bNext->bbJumpDest; + block->SetJumpDest(bNext->GetJumpDest()); /* Update the predecessor list for 'bNext->bbJumpDest' */ - fgReplacePred(bNext->bbJumpDest, bNext, block); + fgReplacePred(bNext->GetJumpDest(), bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->Next()) + if (bNext->KindIs(BBJ_COND) && !bNext->JumpsToNext()) { fgReplacePred(bNext->Next(), bNext, block); } @@ -2357,7 +2356,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) break; case BBJ_EHFILTERRET: - fgReplacePred(bNext->bbJumpDest, bNext, block); + fgReplacePred(bNext->GetJumpDest(), bNext, block); break; case BBJ_EHFINALLYRET: @@ -2375,7 +2374,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->Next()) { - if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || !bcall->HasJumpTo(finBeg)) { continue; } @@ -2394,7 +2393,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) break; case BBJ_SWITCH: - block->bbJumpSwt = bNext->bbJumpSwt; + block->SetJumpSwt(bNext->GetJumpSwt()); // We are moving the switch jump from bNext to block. Examine the jump targets // of the BBJ_SWITCH at bNext and replace the predecessor to 'bNext' with ones to 'block' fgChangeSwitchBlock(bNext, block); @@ -2405,7 +2404,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) break; } - if ((bNext->bbJumpDest != nullptr) && bNext->bbJumpDest->isLoopAlign()) + if (!bNext->HasJumpTo(nullptr) && bNext->GetJumpDest()->isLoopAlign()) { // `bNext` has a backward target to some block which mean bNext is part of a loop. // `block` into which `bNext` is compacted should be updated with its loop number @@ -2627,19 +2626,19 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->KindIs(BBJ_COND) && block->NextIs(block->bbJumpDest)); + noway_assert(block->KindIs(BBJ_COND) && block->JumpsToNext()); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->Next(), block); noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); --block->Next()->bbRefs; flow->decrementDupCount(); #ifdef DEBUG - block->bbJumpDest = nullptr; + block->SetJumpDest(nullptr); if (verbose) { printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition" @@ -2746,7 +2745,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc } // Don't optimize a jump to a removed block - if (bDest->bbJumpDest->bbFlags & BBF_REMOVED) + if (bDest->GetJumpDest()->bbFlags & BBF_REMOVED) { optimizeJump = false; } @@ -2782,7 +2781,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc if (verbose) { printf("\nOptimizing a jump to an unconditional jump (" FMT_BB " -> " FMT_BB " -> " FMT_BB ")\n", - block->bbNum, bDest->bbNum, bDest->bbJumpDest->bbNum); + block->bbNum, bDest->bbNum, bDest->GetJumpDest()->bbNum); } #endif // DEBUG @@ -2829,7 +2828,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc bDest->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag } - FlowEdge* edge2 = fgGetPredForBlock(bDest->bbJumpDest, bDest); + FlowEdge* edge2 = fgGetPredForBlock(bDest->GetJumpDest(), bDest); if (edge2 != nullptr) { @@ -2861,9 +2860,9 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc } // Optimize the JUMP to empty unconditional JUMP to go to the new target - block->bbJumpDest = bDest->bbJumpDest; + block->SetJumpDest(bDest->GetJumpDest()); - fgAddRefPred(bDest->bbJumpDest, block, fgRemoveRefPred(bDest, block)); + fgAddRefPred(bDest->GetJumpDest(), block, fgRemoveRefPred(bDest, block)); return true; } @@ -2886,7 +2885,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool madeChanges = false; BasicBlock* bPrev = block->Prev(); - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: case BBJ_SWITCH: @@ -2914,7 +2913,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // A GOTO cannot be to the next block since that // should have been fixed by the optimization above // An exception is made for a jump from Hot to Cold - noway_assert(!block->NextIs(block->bbJumpDest) || block->isBBCallAlwaysPairTail() || + noway_assert(!block->JumpsToNext() || block->isBBCallAlwaysPairTail() || fgInDifferentRegions(block, block->Next())); /* Cannot remove the first BB */ @@ -2924,7 +2923,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } /* Do not remove a block that jumps to itself - used for while (true){} */ - if (block->bbJumpDest == block) + if (block->HasJumpTo(block)) { break; } @@ -2982,7 +2981,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) if (block->KindIs(BBJ_ALWAYS)) { - succBlock = block->bbJumpDest; + succBlock = block->GetJumpDest(); } else { @@ -2999,7 +2998,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { if (predBlock->KindIs(BBJ_EHCATCHRET)) { - assert(predBlock->bbJumpDest == block); + assert(predBlock->HasJumpTo(block)); okToMerge = false; // we can't get rid of the empty block break; } @@ -3121,8 +3120,8 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { assert(block->KindIs(BBJ_SWITCH)); - unsigned jmpCnt = block->bbJumpSwt->bbsCount; - BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; + unsigned jmpCnt = block->GetJumpSwt()->bbsCount; + BasicBlock** jmpTab = block->GetJumpSwt()->bbsDstTab; BasicBlock* bNewDest; // the new jump target for the current switch case BasicBlock* bDest; bool returnvalue = false; @@ -3134,8 +3133,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && - (bDest != bDest->bbJumpDest)) // special case for self jumps + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && !bDest->HasJumpTo(bDest)) // special case for self jumps { bool optimizeJump = true; @@ -3149,7 +3147,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) if (optimizeJump) { - bNewDest = bDest->bbJumpDest; + bNewDest = bDest->GetJumpDest(); #ifdef DEBUG if (verbose) { @@ -3225,8 +3223,8 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // At this point all of the case jump targets have been updated such // that none of them go to block that is an empty unconditional block // - jmpTab = block->bbJumpSwt->bbsDstTab; - jmpCnt = block->bbJumpSwt->bbsCount; + jmpTab = block->GetJumpSwt()->bbsDstTab; + jmpCnt = block->GetJumpSwt()->bbsCount; // Now check for two trivial switch jumps. // @@ -3311,8 +3309,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } // Change the switch jump into a BBJ_ALWAYS - block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + block->SetJumpKindAndTarget(BBJ_ALWAYS, block->GetJumpSwt()->bbsDstTab[0]); if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) @@ -3323,7 +3320,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) return true; } - else if ((block->bbJumpSwt->bbsCount == 2) && block->NextIs(block->bbJumpSwt->bbsDstTab[1])) + else if ((block->GetJumpSwt()->bbsCount == 2) && block->NextIs(block->GetJumpSwt()->bbsDstTab[1])) { /* Use a BBJ_COND(switchVal==0) for a switch with only one significant clause besides the default clause, if the @@ -3376,8 +3373,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) fgSetStmtSeq(switchStmt); } - block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); + block->SetJumpKindAndTarget(BBJ_COND, block->GetJumpSwt()->bbsDstTab[0]); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -3750,10 +3746,10 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* return false; } - if ((target->bbJumpDest->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) + if ((target->GetJumpDest()->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum, - target->bbJumpDest->bbNum); + target->GetJumpDest()->bbNum); return false; } } @@ -3788,9 +3784,8 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); - block->bbJumpDest = target->bbJumpDest; - fgAddRefPred(block->bbJumpDest, block); + block->SetJumpKindAndTarget(BBJ_COND, target->GetJumpDest()); + fgAddRefPred(block->GetJumpDest(), block); fgRemoveRefPred(target, block); // add an unconditional block after this block to jump to the target block's fallthrough block @@ -3800,9 +3795,9 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); - next->bbJumpDest = target->Next(); + next->SetJumpDest(target->Next()); fgAddRefPred(next, block); - fgAddRefPred(next->bbJumpDest, next); + fgAddRefPred(next->GetJumpDest(), next); JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), created new uncond " FMT_BB "\n", block->bbNum, target->bbNum, next->bbNum); @@ -3825,7 +3820,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev) { assert(block->KindIs(BBJ_COND, BBJ_ALWAYS)); - assert(block->bbJumpDest == bNext); + assert(block->HasJumpTo(bNext)); assert(block->NextIs(bNext)); assert(block->PrevIs(bPrev)); @@ -3841,7 +3836,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); #ifdef DEBUG if (verbose) { @@ -3967,7 +3962,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi /* Conditional is gone - simply fall into the next block */ - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ @@ -4019,14 +4014,14 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - BasicBlock* bDest = bJump->bbJumpDest; + BasicBlock* bDest = bJump->GetJumpDest(); if (!bDest->KindIs(BBJ_COND)) { return false; } - if (!bJump->NextIs(bDest->bbJumpDest)) + if (!bJump->NextIs(bDest->GetJumpDest())) { return false; } @@ -4232,8 +4227,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; - bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); - bJump->bbJumpDest = bDest->Next(); + bJump->SetJumpKindAndTarget(BBJ_COND, bDest->Next()); /* Update bbRefs and bbPreds */ @@ -4334,7 +4328,7 @@ bool Compiler::fgOptimizeSwitchJumps() continue; } - if (!block->bbJumpSwt->bbsHasDominantCase) + if (!block->GetJumpSwt()->bbsHasDominantCase) { continue; } @@ -4343,14 +4337,14 @@ bool Compiler::fgOptimizeSwitchJumps() // assert(block->hasProfileWeight()); - const unsigned dominantCase = block->bbJumpSwt->bbsDominantCase; + const unsigned dominantCase = block->GetJumpSwt()->bbsDominantCase; JITDUMP(FMT_BB " has switch with dominant case %u, considering peeling\n", block->bbNum, dominantCase); // The dominant case should not be the default case, as we already peel that one. // - assert(dominantCase < (block->bbJumpSwt->bbsCount - 1)); - BasicBlock* const dominantTarget = block->bbJumpSwt->bbsDstTab[dominantCase]; + assert(dominantCase < (block->GetJumpSwt()->bbsCount - 1)); + BasicBlock* const dominantTarget = block->GetJumpSwt()->bbsDstTab[dominantCase]; Statement* const switchStmt = block->lastStmt(); GenTree* const switchTree = switchStmt->GetRootNode(); assert(switchTree->OperIs(GT_SWITCH)); @@ -4393,8 +4387,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); - block->bbJumpDest = dominantTarget; + block->SetJumpKindAndTarget(BBJ_COND, dominantTarget); FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; assert(blockToNewBlockEdge->getSourceBlock() == block); @@ -4402,7 +4395,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Update profile data // - const weight_t fraction = newBlock->bbJumpSwt->bbsDominantFraction; + const weight_t fraction = newBlock->GetJumpSwt()->bbsDominantFraction; const weight_t blockToTargetWeight = block->bbWeight * fraction; const weight_t blockToNewBlockWeight = block->bbWeight - blockToTargetWeight; @@ -4452,7 +4445,7 @@ bool Compiler::fgOptimizeSwitchJumps() // // But it no longer has a dominant case. // - newBlock->bbJumpSwt->bbsHasDominantCase = false; + newBlock->GetJumpSwt()->bbsHasDominantCase = false; if (fgNodeThreading == NodeThreading::AllTrees) { @@ -4610,11 +4603,11 @@ bool Compiler::fgExpandRarelyRunBlocks() const char* reason = nullptr; - switch (bPrev->GetBBJumpKind()) + switch (bPrev->GetJumpKind()) { case BBJ_ALWAYS: - if (bPrev->bbJumpDest->isRunRarely()) + if (bPrev->GetJumpDest()->isRunRarely()) { reason = "Unconditional jump to a rarely run block"; } @@ -4638,7 +4631,7 @@ bool Compiler::fgExpandRarelyRunBlocks() case BBJ_COND: - if (block->isRunRarely() && bPrev->bbJumpDest->isRunRarely()) + if (block->isRunRarely() && bPrev->GetJumpDest()->isRunRarely()) { reason = "Both sides of a conditional jump are rarely run"; } @@ -4909,7 +4902,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Setup bDest if (bPrev->KindIs(BBJ_COND, BBJ_ALWAYS)) { - bDest = bPrev->bbJumpDest; + bDest = bPrev->GetJumpDest(); forwardBranch = fgIsForwardBranch(bPrev); backwardBranch = !forwardBranch; } @@ -5159,7 +5152,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // candidateBlock and have it fall into bTmp // if ((candidateBlock == nullptr) || !candidateBlock->KindIs(BBJ_COND, BBJ_ALWAYS) || - (candidateBlock->bbJumpDest != bTmp)) + !candidateBlock->HasJumpTo(bTmp)) { // otherwise we have a new candidateBlock // @@ -5715,7 +5708,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if (bEnd->KindIs(BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && + if (bEnd->KindIs(BBJ_ALWAYS) && (!isRare || bEnd->GetJumpDest()->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] @@ -5731,7 +5724,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (nearBlk != bPrev) { // Check if nearBlk satisfies our requirement - if (nearBlk->NextIs(bEnd->bbJumpDest)) + if (nearBlk->NextIs(bEnd->GetJumpDest())) { break; } @@ -5875,7 +5868,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) if (bStart2 == nullptr) { /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ - bPrev->bbJumpDest = bStart; + bPrev->SetJumpDest(bStart); } else { @@ -5883,7 +5876,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(insertAfterBlk->NextIs(block)); /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ - bPrev->bbJumpDest = block; + bPrev->SetJumpDest(block); } } @@ -6104,12 +6097,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (block->KindIs(BBJ_ALWAYS)) { - bDest = block->bbJumpDest; + bDest = block->GetJumpDest(); if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) { change = true; modified = true; - bDest = block->bbJumpDest; + bDest = block->GetJumpDest(); bNext = block->Next(); } } @@ -6121,7 +6114,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { change = true; modified = true; - bDest = block->bbJumpDest; + bDest = block->GetJumpDest(); bNext = block->Next(); } } @@ -6131,7 +6124,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (block->KindIs(BBJ_COND, BBJ_ALWAYS)) { - bDest = block->bbJumpDest; + bDest = block->GetJumpDest(); if (bDest == bNext) { if (fgOptimizeBranchToNext(block, bNext, bPrev)) @@ -6147,7 +6140,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { // Do we have a JUMP to an empty unconditional JUMP block? if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && - (bDest != bDest->bbJumpDest)) // special case for self jumps + !bDest->HasJumpTo(bDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) { @@ -6165,14 +6158,14 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block - bNext->isEmpty() && // and it is an empty block - (bNext != bNext->bbJumpDest) && // special case for self jumps + if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + bNext->isEmpty() && // and it is an empty block + !bNext->HasJumpTo(bNext) && // special case for self jumps !bDest->IsFirstColdBlock(this) && - (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections + !fgInDifferentRegions(block, bDest)) // do not cross hot/cold sections { // case (a) // @@ -6192,7 +6185,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // * don't consider lexical predecessors, or we may confuse loop recognition // * don't consider blocks of different rarities // - BasicBlock* const bNextJumpDest = bNext->bbJumpDest; + BasicBlock* const bNextJumpDest = bNext->GetJumpDest(); const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && (block->isRunRarely() == bDest->isRunRarely()); @@ -6272,7 +6265,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) { BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); bFixup->inheritWeight(bDestNext); - bFixup->bbJumpDest = bDestNext; + bFixup->SetJumpDest(bDestNext); fgRemoveRefPred(bDestNext, bDest); fgAddRefPred(bFixup, bDest); @@ -6304,9 +6297,9 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } // Optimize the Conditional JUMP to go to the new target - block->bbJumpDest = bNext->bbJumpDest; + block->SetJumpDest(bNext->GetJumpDest()); - fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); + fgAddRefPred(bNext->GetJumpDest(), block, fgRemoveRefPred(bNext->GetJumpDest(), bNext)); /* Unlink bNext from the BasicBlock list; note that we can @@ -6370,7 +6363,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) (that we will later connect to 'block'), it is not really unreachable. */ - if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) + if ((bNext->bbRefs > 0) && bNext->HasJumpTo(block) && (block->bbRefs == 1)) { continue; } @@ -6454,11 +6447,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->countOfInEdges() == 1) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: - if (block->bbJumpDest == block) + if (block->HasJumpTo(block)) { fgRemoveBlock(block, /* unreachable */ true); @@ -6551,7 +6544,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_NONE: costSz = 0; @@ -6976,8 +6969,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - predBlock->bbJumpDest = crossJumpTarget; + predBlock->SetJumpKindAndTarget(BBJ_ALWAYS, crossJumpTarget); fgRemoveRefPred(block, predBlock); fgAddRefPred(crossJumpTarget, predBlock); @@ -7067,7 +7059,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if (!block->KindIs(BBJ_COND) || block->NextIs(block->bbJumpDest)) + if (!block->KindIs(BBJ_COND) || block->JumpsToNext()) { return false; } @@ -7116,7 +7108,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) Statement* nextFirstStmt; Statement* destFirstStmt; - if (!getSuccCandidate(block->Next(), &nextFirstStmt) || !getSuccCandidate(block->bbJumpDest, &destFirstStmt)) + if (!getSuccCandidate(block->Next(), &nextFirstStmt) || !getSuccCandidate(block->GetJumpDest(), &destFirstStmt)) { return false; } @@ -7146,7 +7138,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) fgUnlinkStmt(block->Next(), nextFirstStmt); fgInsertStmtNearEnd(block, nextFirstStmt); - fgUnlinkStmt(block->bbJumpDest, destFirstStmt); + fgUnlinkStmt(block->GetJumpDest(), destFirstStmt); block->bbFlags |= block->Next()->bbFlags & BBF_COPY_PROPAGATE; return true; diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 5b61db49a9099..33dd937057db5 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -501,8 +501,7 @@ void BlockCountInstrumentor::RelocateProbes() // if (pred->KindIs(BBJ_NONE)) { - pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); - pred->bbJumpDest = block; + pred->SetJumpKindAndTarget(BBJ_ALWAYS, block); } assert(pred->KindIs(BBJ_ALWAYS)); } @@ -945,7 +944,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) visitor->VisitBlock(block); nBlocks++; - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_CALLFINALLY: { @@ -1018,7 +1017,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // We're leaving a try or catch, not a handler. // Treat this as a normal edge. // - BasicBlock* const target = block->bbJumpDest; + BasicBlock* const target = block->GetJumpDest(); // In some bad IL cases we may not have a target. // In others we may see something other than LEAVE be most-nested in a try. @@ -1554,8 +1553,7 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // if (block->KindIs(BBJ_NONE)) { - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); - block->bbJumpDest = target; + block->SetJumpKindAndTarget(BBJ_ALWAYS, target); } instrumentedBlock = m_comp->fgSplitEdge(block, target); @@ -1697,8 +1695,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // if (pred->KindIs(BBJ_NONE)) { - pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); - pred->bbJumpDest = block; + pred->SetJumpKindAndTarget(BBJ_ALWAYS, block); } assert(pred->KindIs(BBJ_ALWAYS)); } @@ -3803,8 +3800,8 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf { assert(nSucc == 1); assert(block == pseudoEdge->m_sourceBlock); - assert(block->bbJumpDest != nullptr); - FlowEdge* const flowEdge = m_comp->fgGetPredForBlock(block->bbJumpDest, block); + assert(!block->HasJumpTo(nullptr)); + FlowEdge* const flowEdge = m_comp->fgGetPredForBlock(block->GetJumpDest(), block); assert(flowEdge != nullptr); flowEdge->setLikelihood(1.0); return; @@ -3922,7 +3919,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, BlockInfo* info) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_SWITCH: MarkInterestingSwitches(block, info); @@ -4016,8 +4013,8 @@ void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, // If it turns out often we fail at this stage, we might consider building a histogram of switch case // values at runtime, similar to what we do for classes at virtual call sites. // - const unsigned caseCount = block->bbJumpSwt->bbsCount; - BasicBlock** const jumpTab = block->bbJumpSwt->bbsDstTab; + const unsigned caseCount = block->GetJumpSwt()->bbsCount; + BasicBlock** const jumpTab = block->GetJumpSwt()->bbsDstTab; unsigned dominantCase = caseCount; for (unsigned i = 0; i < caseCount; i++) @@ -4043,7 +4040,7 @@ void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, return; } - if (block->bbJumpSwt->bbsHasDefault && (dominantCase == caseCount - 1)) + if (block->GetJumpSwt()->bbsHasDefault && (dominantCase == caseCount - 1)) { // Dominant case is the default case. // This effectively gets peeled already, so defer. @@ -4057,9 +4054,9 @@ void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, "; marking for peeling\n", dominantCase, dominantEdge->m_targetBlock->bbNum, fraction); - block->bbJumpSwt->bbsHasDominantCase = true; - block->bbJumpSwt->bbsDominantCase = dominantCase; - block->bbJumpSwt->bbsDominantFraction = fraction; + block->GetJumpSwt()->bbsHasDominantCase = true; + block->GetJumpSwt()->bbsDominantCase = dominantCase; + block->GetJumpSwt()->bbsDominantFraction = fraction; } //------------------------------------------------------------------------ @@ -4435,7 +4432,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } else if (bSrc->KindIs(BBJ_ALWAYS)) { - bOnlyNext = bSrc->bbJumpDest; + bOnlyNext = bSrc->GetJumpDest(); } else { @@ -4456,7 +4453,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } else if (bDst->KindIs(BBJ_ALWAYS)) { - bOnlyNext = bDst->bbJumpDest; + bOnlyNext = bDst->GetJumpDest(); } else { @@ -4687,7 +4684,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - switch (bSrc->GetBBJumpKind()) + switch (bSrc->GetJumpKind()) { case BBJ_ALWAYS: case BBJ_EHCATCHRET: @@ -4763,7 +4760,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() BasicBlock* otherDst; if (bSrc->NextIs(bDst)) { - otherDst = bSrc->bbJumpDest; + otherDst = bSrc->GetJumpDest(); } else { diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index d50a03260a130..fbea28ed4107c 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -132,7 +132,7 @@ void ProfileSynthesis::AssignLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -303,7 +303,7 @@ void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block) // void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block) { - FlowEdge* const edge = m_comp->fgGetPredForBlock(block->bbJumpDest, block); + FlowEdge* const edge = m_comp->fgGetPredForBlock(block->GetJumpDest(), block); edge->setLikelihood(1.0); } @@ -316,7 +316,7 @@ void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block) // void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) { - BasicBlock* const jump = block->bbJumpDest; + BasicBlock* const jump = block->GetJumpDest(); BasicBlock* const next = block->Next(); // Watch for degenerate case @@ -499,7 +499,7 @@ void ProfileSynthesis::RepairLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -591,7 +591,7 @@ void ProfileSynthesis::BlendLikelihoods() { weight_t sum = SumOutgoingLikelihoods(block, &likelihoods); - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -1220,7 +1220,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) " to reflect capping; current likelihood is " FMT_WT "\n", exitBlock->bbNum, exitEdge->getLikelihood()); - BasicBlock* const jump = exitBlock->bbJumpDest; + BasicBlock* const jump = exitBlock->GetJumpDest(); BasicBlock* const next = exitBlock->Next(); FlowEdge* const jumpEdge = m_comp->fgGetPredForBlock(jump, exitBlock); FlowEdge* const nextEdge = m_comp->fgGetPredForBlock(next, exitBlock); diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 7fd5a41b4f8ef..3f7a622716024 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -120,7 +120,7 @@ PhaseStatus Compiler::fgInsertGCPolls() JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum); pollType = GCPOLL_CALL; } - else if (BBJ_SWITCH == block->GetBBJumpKind()) + else if (BBJ_SWITCH == block->GetJumpKind()) { // We don't want to deal with all the outgoing edges of a switch block. // @@ -261,8 +261,8 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); - bottom = fgNewBBafter(top->GetBBJumpKind(), poll, true); - BBjumpKinds oldJumpKind = top->GetBBJumpKind(); + bottom = fgNewBBafter(top->GetJumpKind(), poll, true); + BBjumpKinds oldJumpKind = top->GetJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; // Update block flags @@ -284,7 +284,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) poll->bbNatLoopNum = lpIndex; // Set the bbNatLoopNum in case we are in a loop // Bottom gets all the outgoing edges and inherited flags of Original. - bottom->bbJumpDest = top->bbJumpDest; + bottom->SetJumpDest(top->GetJumpDest()); bottom->bbNatLoopNum = lpIndex; // Set the bbNatLoopNum in case we are in a loop if (lpIndex != BasicBlock::NOT_IN_LOOP) { @@ -371,8 +371,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) } #endif - top->bbJumpDest = bottom; - top->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); + top->SetJumpKindAndTarget(BBJ_COND, bottom); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. fgAddRefPred(bottom, poll); @@ -400,7 +399,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) case BBJ_ALWAYS: case BBJ_CALLFINALLY: - fgReplacePred(bottom->bbJumpDest, top, bottom); + fgReplacePred(bottom->GetJumpDest(), top, bottom); break; case BBJ_SWITCH: NO_WAY("SWITCH should be a call rather than an inlined poll."); @@ -1287,13 +1286,13 @@ void Compiler::fgLoopCallMark() for (BasicBlock* const block : Blocks()) { - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: - fgLoopCallTest(block, block->bbJumpDest); + fgLoopCallTest(block, block->GetJumpDest()); break; case BBJ_SWITCH: @@ -1837,15 +1836,14 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - block->bbJumpDest = genReturnBB; + block->SetJumpKindAndTarget(BBJ_ALWAYS, genReturnBB); fgAddRefPred(genReturnBB, block); #ifdef DEBUG if (verbose) { printf("Synchronized method - convert block " FMT_BB " to BBJ_ALWAYS [targets " FMT_BB "]\n", block->bbNum, - block->bbJumpDest->bbNum); + block->GetJumpDest()->bbNum); } #endif } @@ -2309,8 +2307,7 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); - returnBlock->bbJumpDest = constReturnBlock; + returnBlock->SetJumpKindAndTarget(BBJ_ALWAYS, constReturnBlock); comp->fgAddRefPred(constReturnBlock, returnBlock); // Remove GT_RETURN since constReturnBlock returns the constant. @@ -3125,11 +3122,11 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // It's a jump from outside the handler; add it to the newHead preds list and remove // it from the block preds list. - switch (predBlock->GetBBJumpKind()) + switch (predBlock->GetJumpKind()) { case BBJ_CALLFINALLY: - noway_assert(predBlock->bbJumpDest == block); - predBlock->bbJumpDest = newHead; + noway_assert(predBlock->HasJumpTo(block)); + predBlock->SetJumpDest(newHead); fgRemoveRefPred(block, predBlock); fgAddRefPred(newHead, predBlock); break; @@ -3503,7 +3500,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // if (prevToFirstColdBlock->bbFallsThrough()) { - switch (prevToFirstColdBlock->GetBBJumpKind()) + switch (prevToFirstColdBlock->GetJumpKind()) { default: noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()"); @@ -3532,7 +3529,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() else { BasicBlock* transitionBlock = fgNewBBafter(BBJ_ALWAYS, prevToFirstColdBlock, true); - transitionBlock->bbJumpDest = firstColdBlock; + transitionBlock->SetJumpDest(firstColdBlock); transitionBlock->inheritWeight(firstColdBlock); // Update the predecessor list for firstColdBlock @@ -3547,8 +3544,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // If the block preceding the first cold block is BBJ_NONE, // convert it to BBJ_ALWAYS to force an explicit jump. - prevToFirstColdBlock->bbJumpDest = firstColdBlock; - prevToFirstColdBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + prevToFirstColdBlock->SetJumpKindAndTarget(BBJ_ALWAYS, firstColdBlock); break; } } @@ -3981,11 +3977,11 @@ PhaseStatus Compiler::fgSetBlockOrder() (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) bool partiallyInterruptible = true; - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: - partiallyInterruptible = EDGE_IS_GC_SAFE(block, block->bbJumpDest); + partiallyInterruptible = EDGE_IS_GC_SAFE(block, block->GetJumpDest()); break; case BBJ_SWITCH: diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 0ab8e106a3d32..5ff68eb5333df 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -336,8 +336,8 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm fgRemoveRefPred(block, prevBb); fgAddRefPred(block, fastPathBb); fgAddRefPred(block, fallbackBb); - nullcheckBb->bbJumpDest = fallbackBb; - fastPathBb->bbJumpDest = block; + nullcheckBb->SetJumpDest(fallbackBb); + fastPathBb->SetJumpDest(block); if (needsSizeCheck) { @@ -351,7 +351,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // fastPathBb is only reachable from successful nullcheckBb fgAddRefPred(fastPathBb, nullcheckBb); // sizeCheckBb fails - jump to fallbackBb - sizeCheckBb->bbJumpDest = fallbackBb; + sizeCheckBb->SetJumpDest(fallbackBb); } else { @@ -783,10 +783,10 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* fgAddRefPred(block, fastPathBb); fgAddRefPred(block, fallbackBb); - maxThreadStaticBlocksCondBB->bbJumpDest = fallbackBb; - threadStaticBlockNullCondBB->bbJumpDest = fastPathBb; - fastPathBb->bbJumpDest = block; - fallbackBb->bbJumpDest = block; + maxThreadStaticBlocksCondBB->SetJumpDest(fallbackBb); + threadStaticBlockNullCondBB->SetJumpDest(fastPathBb); + fastPathBb->SetJumpDest(block); + fallbackBb->SetJumpDest(block); // Inherit the weights block->inheritWeight(prevBb); @@ -1153,7 +1153,7 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G fgAddRefPred(helperCallBb, isInitedBb); // helperCallBb unconditionally jumps to the last block (jumps over fastPathBb) - isInitedBb->bbJumpDest = block; + isInitedBb->SetJumpDest(block); // // Re-distribute weights @@ -1495,7 +1495,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // fastpathBb flows into block fgAddRefPred(block, fastpathBb); // lengthCheckBb jumps to block if condition is met - lengthCheckBb->bbJumpDest = block; + lengthCheckBb->SetJumpDest(block); // // Re-distribute weights diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index 9a09dd1540770..fc49718cfe934 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -175,7 +175,7 @@ void OptIfConversionDsc::IfConvertFindFlow() { // First check for flow with no else case. The final block is the destination of the jump. m_doElseConversion = false; - m_finalBlock = m_startBlock->bbJumpDest; + m_finalBlock = m_startBlock->GetJumpDest(); assert(m_finalBlock != nullptr); if (!IfConvertCheckThenFlow() || m_flowFound) { @@ -392,7 +392,7 @@ void OptIfConversionDsc::IfConvertDump() } if (m_doElseConversion) { - for (BasicBlock* dumpBlock = m_startBlock->bbJumpDest; dumpBlock != m_finalBlock; + for (BasicBlock* dumpBlock = m_startBlock->GetJumpDest(); dumpBlock != m_finalBlock; dumpBlock = dumpBlock->GetUniqueSucc()) { m_comp->fgDumpBlock(dumpBlock); @@ -582,7 +582,7 @@ bool OptIfConversionDsc::optIfConvert() assert(m_thenOperation.node->OperIs(GT_STORE_LCL_VAR, GT_RETURN)); if (m_doElseConversion) { - if (!IfConvertCheckStmts(m_startBlock->bbJumpDest, &m_elseOperation)) + if (!IfConvertCheckStmts(m_startBlock->GetJumpDest(), &m_elseOperation)) { return false; } @@ -743,7 +743,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->Next(), m_startBlock); - m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); + m_startBlock->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 076fb70d2fc1d..8d735c3e99f83 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2455,7 +2455,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; @@ -4242,7 +4242,7 @@ void Compiler::impImportLeave(BasicBlock* block) #endif // DEBUG unsigned const blkAddr = block->bbCodeOffs; - BasicBlock* const leaveTarget = block->bbJumpDest; + BasicBlock* const leaveTarget = block->GetJumpDest(); unsigned const jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 @@ -4322,7 +4322,7 @@ void Compiler::impImportLeave(BasicBlock* block) { assert(step == DUMMY_INIT(NULL)); callBlock = block; - callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock->SetJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) { @@ -4345,11 +4345,11 @@ void Compiler::impImportLeave(BasicBlock* block) /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->KindIs(BBJ_ALWAYS)); - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next + step->SetJumpDest(callBlock); // the previous call to a finally returns to this call (to the next // finally in the chain) fgAddRefPred(callBlock, step); @@ -4397,11 +4397,11 @@ void Compiler::impImportLeave(BasicBlock* block) unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); - if (callBlock->bbJumpDest != nullptr) + if (!callBlock->HasJumpTo(nullptr)) { - fgRemoveRefPred(callBlock->bbJumpDest, callBlock); + fgRemoveRefPred(callBlock->GetJumpDest(), callBlock); } - callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. + callBlock->SetJumpDest(HBtab->ebdHndBeg); // This callBlock will call the "finally" handler. fgAddRefPred(HBtab->ebdHndBeg, callBlock); GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); @@ -4419,7 +4419,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) { @@ -4447,11 +4447,11 @@ void Compiler::impImportLeave(BasicBlock* block) // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = finalStep; + step->SetJumpDest(finalStep); fgAddRefPred(finalStep, step); /* The new block will inherit this block's weight */ @@ -4480,7 +4480,7 @@ void Compiler::impImportLeave(BasicBlock* block) impEndTreeList(finalStep, endLFinStmt, lastStmt); - finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE + finalStep->SetJumpDest(leaveTarget); // this is the ultimate destination of the LEAVE fgAddRefPred(leaveTarget, finalStep); // Queue up the jump target for importing @@ -4508,14 +4508,14 @@ void Compiler::impImportLeave(BasicBlock* block) if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targeting " FMT_BB "):\n", block->bbNum, - block->bbJumpDest->bbNum); + block->GetJumpDest()->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG unsigned blkAddr = block->bbCodeOffs; - BasicBlock* leaveTarget = block->bbJumpDest; + BasicBlock* leaveTarget = block->GetJumpDest(); unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 @@ -4573,7 +4573,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { step = block; - step->SetBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET + step->SetJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG @@ -4595,11 +4595,11 @@ void Compiler::impImportLeave(BasicBlock* block) exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch + step->SetJumpDest(exitBlock); // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block fgAddRefPred(exitBlock, step); @@ -4608,7 +4608,7 @@ void Compiler::impImportLeave(BasicBlock* block) { assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return - step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; + step->GetJumpDest()->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) @@ -4651,9 +4651,8 @@ void Compiler::impImportLeave(BasicBlock* block) // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpDest = callBlock; + fgRemoveRefPred(block->GetJumpDest(), block); + block->SetJumpKindAndTarget(BBJ_ALWAYS, callBlock); fgAddRefPred(callBlock, block); /* The new block will inherit this block's weight */ @@ -4673,7 +4672,7 @@ void Compiler::impImportLeave(BasicBlock* block) #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; - callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock->SetJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) @@ -4713,11 +4712,11 @@ void Compiler::impImportLeave(BasicBlock* block) // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = step2; + step->SetJumpDest(step2); fgAddRefPred(step2, step); step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; @@ -4747,11 +4746,11 @@ void Compiler::impImportLeave(BasicBlock* block) #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next + step->SetJumpDest(callBlock); // the previous call to a finally returns to this call (to the next // finally in the chain) fgAddRefPred(callBlock, step); @@ -4760,7 +4759,7 @@ void Compiler::impImportLeave(BasicBlock* block) { assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return - step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; + step->GetJumpDest()->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) @@ -4794,11 +4793,11 @@ void Compiler::impImportLeave(BasicBlock* block) } #endif - if (callBlock->bbJumpDest != nullptr) + if (!callBlock->HasJumpTo(nullptr)) { - fgRemoveRefPred(callBlock->bbJumpDest, callBlock); + fgRemoveRefPred(callBlock->GetJumpDest(), callBlock); } - callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. + callBlock->SetJumpDest(HBtab->ebdHndBeg); // This callBlock will call the "finally" handler. fgAddRefPred(HBtab->ebdHndBeg, callBlock); } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && @@ -4861,18 +4860,18 @@ void Compiler::impImportLeave(BasicBlock* block) /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = catchStep; + step->SetJumpDest(catchStep); fgAddRefPred(catchStep, step); #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return - step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; + step->GetJumpDest()->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) @@ -4908,7 +4907,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) @@ -4921,11 +4920,11 @@ void Compiler::impImportLeave(BasicBlock* block) } else { - if (step->bbJumpDest != nullptr) + if (!step->HasJumpTo(nullptr)) { - fgRemoveRefPred(step->bbJumpDest, step); + fgRemoveRefPred(step->GetJumpDest(), step); } - step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE + step->SetJumpDest(leaveTarget); // this is the ultimate destination of the LEAVE fgAddRefPred(leaveTarget, step); #if defined(TARGET_ARM) @@ -4933,7 +4932,7 @@ void Compiler::impImportLeave(BasicBlock* block) { assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return - step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; + step->GetJumpDest()->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) @@ -4994,10 +4993,10 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // will be treated as pair and handled correctly. if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = bbNewBasicBlock(block->GetBBJumpKind()); + BasicBlock* dupBlock = bbNewBasicBlock(block->GetJumpKind()); dupBlock->bbFlags = block->bbFlags; - dupBlock->bbJumpDest = block->bbJumpDest; - fgAddRefPred(dupBlock->bbJumpDest, dupBlock); + dupBlock->SetJumpDest(block->GetJumpDest()); + fgAddRefPred(dupBlock->GetJumpDest(), dupBlock); dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; @@ -5024,12 +5023,11 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif // FEATURE_EH_FUNCLETS - block->SetBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); fgInitBBLookup(); - fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpDest = fgLookupBB(jmpAddr); - fgAddRefPred(block->bbJumpDest, block); + fgRemoveRefPred(block->GetJumpDest(), block); + block->SetJumpKindAndTarget(BBJ_LEAVE, fgLookupBB(jmpAddr)); + fgAddRefPred(block->GetJumpDest(), block); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The @@ -6002,7 +6000,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + block->SetJumpKind(BBJ_THROW DEBUG_ARG(this)); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -7261,7 +7259,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) impResetLeaveBlock(block, jmpAddr); } - assert(jmpAddr == block->bbJumpDest->bbCodeOffs); + assert(jmpAddr == block->GetJumpDest()->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); @@ -7298,7 +7296,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) BADCODE("invalid type for brtrue/brfalse"); } - if (opts.OptimizationEnabled() && block->NextIs(block->bbJumpDest)) + if (opts.OptimizationEnabled() && block->JumpsToNext()) { // We may have already modified `block`'s jump kind, if this is a re-importation. // @@ -7306,8 +7304,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->Next()->bbNum); - fgRemoveRefPred(block->bbJumpDest, block); - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + fgRemoveRefPred(block->GetJumpDest(), block); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -7372,15 +7370,15 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (foldedJumpKind == BBJ_NONE) { JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->Next()->bbNum); - fgRemoveRefPred(block->bbJumpDest, block); + fgRemoveRefPred(block->GetJumpDest(), block); } else { JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", - block->bbJumpDest->bbNum); + block->GetJumpDest()->bbNum); fgRemoveRefPred(block->Next(), block); } - block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this)); + block->SetJumpKind(foldedJumpKind DEBUG_ARG(this)); } break; @@ -7544,7 +7542,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) assertImp((genActualType(op1) == genActualType(op2)) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); - if (opts.OptimizationEnabled() && block->NextIs(block->bbJumpDest)) + if (opts.OptimizationEnabled() && block->JumpsToNext()) { // We may have already modified `block`'s jump kind, if this is a re-importation. // @@ -7552,8 +7550,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->Next()->bbNum); - fgRemoveRefPred(block->bbJumpDest, block); - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + fgRemoveRefPred(block->GetJumpDest(), block); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -7615,8 +7613,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // Find the jump target size_t switchVal = (size_t)op1->AsIntCon()->gtIconVal; - unsigned jumpCnt = block->bbJumpSwt->bbsCount; - BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab; + unsigned jumpCnt = block->GetJumpSwt()->bbsCount; + BasicBlock** jumpTab = block->GetJumpSwt()->bbsDstTab; bool foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) @@ -7633,13 +7631,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (!block->NextIs(curJump)) { // transform the basic block into a BBJ_ALWAYS - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - block->bbJumpDest = curJump; + block->SetJumpKindAndTarget(BBJ_ALWAYS, curJump); } else { // transform the basic block into a BBJ_NONE - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -7660,7 +7657,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->KindIs(BBJ_ALWAYS)) { - printf(" to " FMT_BB, block->bbJumpDest->bbNum); + printf(" to " FMT_BB, block->GetJumpDest()->bbNum); } printf("\n"); } @@ -11279,7 +11276,7 @@ void Compiler::impImportBlock(BasicBlock* block) unsigned multRef = impCanReimport ? unsigned(~0) : 0; - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: @@ -11303,15 +11300,15 @@ void Compiler::impImportBlock(BasicBlock* block) /* Try the target of the jump then */ - multRef |= block->bbJumpDest->bbRefs; - baseTmp = block->bbJumpDest->bbStkTempsIn; - tgtBlock = block->bbJumpDest; + multRef |= block->GetJumpDest()->bbRefs; + baseTmp = block->GetJumpDest()->bbStkTempsIn; + tgtBlock = block->GetJumpDest(); break; case BBJ_ALWAYS: - multRef |= block->bbJumpDest->bbRefs; - baseTmp = block->bbJumpDest->bbStkTempsIn; - tgtBlock = block->bbJumpDest; + multRef |= block->GetJumpDest()->bbRefs; + baseTmp = block->GetJumpDest()->bbStkTempsIn; + tgtBlock = block->GetJumpDest(); break; case BBJ_NONE: @@ -12123,7 +12120,7 @@ void Compiler::impImport() } else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) { - entryBlock = entryBlock->bbJumpDest; + entryBlock = entryBlock->GetJumpDest(); } else { diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 000e99f47d486..bc88174a40ff0 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -274,12 +274,12 @@ class IndirectCallTransformer } // checkBlock - checkBlock->bbJumpDest = elseBlock; + checkBlock->SetJumpDest(elseBlock); compiler->fgAddRefPred(elseBlock, checkBlock); compiler->fgAddRefPred(thenBlock, checkBlock); // thenBlock - thenBlock->bbJumpDest = remainderBlock; + thenBlock->SetJumpDest(remainderBlock); compiler->fgAddRefPred(remainderBlock, thenBlock); // elseBlock @@ -573,7 +573,7 @@ class IndirectCallTransformer // There's no need for a new block here. We can just append to currBlock. // checkBlock = currBlock; - checkBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); + checkBlock->SetJumpKind(BBJ_COND DEBUG_ARG(compiler)); } else { @@ -582,7 +582,7 @@ class IndirectCallTransformer checkBlock = CreateAndInsertBasicBlock(BBJ_COND, thenBlock); // prevCheckBlock is expected to jump to this new check (if its type check doesn't succeed) - prevCheckBlock->bbJumpDest = checkBlock; + prevCheckBlock->SetJumpDest(checkBlock); compiler->fgAddRefPred(checkBlock, prevCheckBlock); // Calculate the total likelihood for this check as a sum of likelihoods @@ -651,8 +651,8 @@ class IndirectCallTransformer const bool isLastCheck = (checkIdx == origCall->GetInlineCandidatesCount() - 1); if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { - checkBlock->bbJumpDest = nullptr; - checkBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); + checkBlock->SetJumpDest(nullptr); + checkBlock->SetJumpKind(BBJ_NONE DEBUG_ARG(compiler)); return; } @@ -980,11 +980,11 @@ class IndirectCallTransformer { thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock); thenBlock->bbFlags |= currBlock->bbFlags & BBF_SPLIT_GAINED; - thenBlock->bbJumpDest = remainderBlock; + thenBlock->SetJumpDest(remainderBlock); thenBlock->inheritWeightPercentage(currBlock, origCall->GetGDVCandidateInfo(checkIdx)->likelihood); // thenBlock always jumps to remainderBlock. Also, it has a single pred - last checkBlock - thenBlock->bbJumpDest = remainderBlock; + thenBlock->SetJumpDest(remainderBlock); compiler->fgAddRefPred(thenBlock, checkBlock); compiler->fgAddRefPred(remainderBlock, thenBlock); @@ -1003,7 +1003,7 @@ class IndirectCallTransformer // where we know the last check is always true (in case of "exact" GDV) if (checkBlock->KindIs(BBJ_COND)) { - checkBlock->bbJumpDest = elseBlock; + checkBlock->SetJumpDest(elseBlock); compiler->fgAddRefPred(elseBlock, checkBlock); } else @@ -1081,7 +1081,7 @@ class IndirectCallTransformer BasicBlock* const hotBlock = coldBlock->Prev(); - if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) + if (!hotBlock->KindIs(BBJ_ALWAYS) || !hotBlock->HasJumpTo(checkBlock)) { JITDUMP("Unexpected flow from hot path " FMT_BB "\n", hotBlock->bbNum); return; @@ -1126,8 +1126,7 @@ class IndirectCallTransformer // not fall through to the check block. // compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); - coldBlock->bbJumpDest = elseBlock; + coldBlock->SetJumpKindAndTarget(BBJ_ALWAYS, elseBlock); compiler->fgAddRefPred(elseBlock, coldBlock); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 97ba02897703c..66169d8eb380b 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -2410,7 +2410,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() filterBb->bbCodeOffs = handlerBb->bbCodeOffs; filterBb->bbHndIndex = handlerBb->bbHndIndex; filterBb->bbTryIndex = handlerBb->bbTryIndex; - filterBb->bbJumpDest = handlerBb; + filterBb->SetJumpDest(handlerBb); filterBb->bbSetRunRarely(); filterBb->bbFlags |= BBF_INTERNAL | BBF_DONT_REMOVE; @@ -3506,7 +3506,7 @@ void Compiler::fgVerifyHandlerTab() } // Check for legal block types - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_EHFINALLYRET: { @@ -4056,7 +4056,7 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block) + if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->HasJumpTo(block)) { BasicBlock* pPrev = predBlock->Prev(); if (pPrev != nullptr) @@ -4115,7 +4115,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) // trying to decide how to split up the predecessor edges. if (predBlock->KindIs(BBJ_CALLFINALLY)) { - assert(predBlock->bbJumpDest == block); + assert(predBlock->HasJumpTo(block)); // A BBJ_CALLFINALLY predecessor of the handler can only come from the corresponding try, // not from any EH clauses nested in this handler. However, we represent the BBJ_CALLFINALLY @@ -4414,7 +4414,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); assert(bFilterLast->KindIs(BBJ_EHFILTERRET)); - assert(bFilterLast->bbJumpDest == block); + assert(bFilterLast->HasJumpTo(block)); #ifdef DEBUG if (verbose) { @@ -4423,8 +4423,8 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) } #endif // DEBUG // Change the bbJumpDest for bFilterLast from the old first 'block' to the new first 'bPrev' - fgRemoveRefPred(bFilterLast->bbJumpDest, bFilterLast); - bFilterLast->bbJumpDest = bPrev; + fgRemoveRefPred(bFilterLast->GetJumpDest(), bFilterLast); + bFilterLast->SetJumpDest(bPrev); fgAddRefPred(bPrev, bFilterLast); } } diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 44e810592a006..b589564f3c793 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -1770,7 +1770,7 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range) assert(insertionPoint != nullptr); #if DEBUG - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: assert(insertionPoint->OperIsConditionalJump()); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 9447bf7a8dc54..3c914b2875f68 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -378,7 +378,7 @@ void Compiler::fgPerBlockLocalVarLiveness() block->bbMemoryLiveIn = fullMemoryKindSet; block->bbMemoryLiveOut = fullMemoryKindSet; - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: @@ -886,7 +886,7 @@ void Compiler::fgExtendDbgLifetimes() { VarSetOps::ClearD(this, initVars); - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_NONE: PREFIX_ASSUME(!block->IsLast()); @@ -896,7 +896,7 @@ void Compiler::fgExtendDbgLifetimes() case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: - VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); + VarSetOps::UnionD(this, initVars, block->GetJumpDest()->bbScope); break; case BBJ_CALLFINALLY: @@ -906,13 +906,13 @@ void Compiler::fgExtendDbgLifetimes() PREFIX_ASSUME(!block->IsLast()); VarSetOps::UnionD(this, initVars, block->Next()->bbScope); } - VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); + VarSetOps::UnionD(this, initVars, block->GetJumpDest()->bbScope); break; case BBJ_COND: PREFIX_ASSUME(!block->IsLast()); VarSetOps::UnionD(this, initVars, block->Next()->bbScope); - VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope); + VarSetOps::UnionD(this, initVars, block->GetJumpDest()->bbScope); break; case BBJ_SWITCH: diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index f29a5178b0c5d..727c4f3233a2b 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -838,10 +838,10 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); newBlk->bbNatLoopNum = insertAfter->bbNatLoopNum; - newBlk->bbJumpDest = slowHead; + newBlk->SetJumpDest(slowHead); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->bbJumpDest->bbNum); - comp->fgAddRefPred(newBlk->bbJumpDest, newBlk); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetJumpDest()->bbNum); + comp->fgAddRefPred(newBlk->GetJumpDest(), newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); comp->fgAddRefPred(newBlk, insertAfter); @@ -870,10 +870,10 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true); newBlk->inheritWeight(insertAfter); newBlk->bbNatLoopNum = insertAfter->bbNatLoopNum; - newBlk->bbJumpDest = slowHead; + newBlk->SetJumpDest(slowHead); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->bbJumpDest->bbNum); - comp->fgAddRefPred(newBlk->bbJumpDest, newBlk); + JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetJumpDest()->bbNum); + comp->fgAddRefPred(newBlk->GetJumpDest(), newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); comp->fgAddRefPred(newBlk, insertAfter); @@ -1862,7 +1862,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) return false; } - if (bottom->bbJumpDest != top) + if (!bottom->HasJumpTo(top)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Branch at loop 'bottom' not looping to 'top'.\n", loopInd); return false; @@ -2047,9 +2047,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) if (!h->KindIs(BBJ_NONE)) { assert(h->KindIs(BBJ_ALWAYS)); - assert(h->bbJumpDest == loop.lpEntry); - h2->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - h2->bbJumpDest = loop.lpEntry; + assert(h->HasJumpTo(loop.lpEntry)); + h2->SetJumpKindAndTarget(BBJ_ALWAYS, loop.lpEntry); } fgReplacePred(loop.lpEntry, h, h2); @@ -2063,8 +2062,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - h->bbJumpDest = nullptr; + h->SetJumpDest(nullptr); + h->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. @@ -2085,7 +2084,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning. x2->bbNatLoopNum = ambientLoop; - x2->bbJumpDest = x; + x2->SetJumpDest(x); BlockSetOps::Assign(this, x2->bbReach, h->bbReach); fgAddRefPred(x2, b); // Add b->x2 pred edge @@ -2117,7 +2116,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - BasicBlock* newBlk = fgNewBBafter(blk->GetBBJumpKind(), newPred, /*extendRegion*/ true); + BasicBlock* newBlk = fgNewBBafter(blk->GetJumpKind(), newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2176,7 +2175,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->KindIs(newblk->GetBBJumpKind())); + assert(blk->KindIs(newblk->GetJumpKind())); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2185,7 +2184,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. - switch (newblk->GetBBJumpKind()) + switch (newblk->GetJumpKind()) { case BBJ_NONE: fgAddRefPred(newblk->Next(), newblk); @@ -2193,12 +2192,12 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) case BBJ_ALWAYS: case BBJ_CALLFINALLY: - fgAddRefPred(newblk->bbJumpDest, newblk); + fgAddRefPred(newblk->GetJumpDest(), newblk); break; case BBJ_COND: fgAddRefPred(newblk->Next(), newblk); - fgAddRefPred(newblk->bbJumpDest, newblk); + fgAddRefPred(newblk->GetJumpDest(), newblk); break; case BBJ_SWITCH: @@ -2256,8 +2255,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) { // We can't just fall through to the slow path entry, so make it an unconditional branch. assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. - slowHead->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - slowHead->bbJumpDest = e2; + slowHead->SetJumpKindAndTarget(BBJ_ALWAYS, e2); } fgAddRefPred(e2, slowHead); @@ -2921,8 +2919,8 @@ bool Compiler::optCheckLoopCloningGDVTestProfitable(GenTreeOp* guard, LoopCloneV // Check for (4) // - BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->bbJumpDest : typeTestBlock->Next(); - BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->Next() : typeTestBlock->bbJumpDest; + BasicBlock* const hotSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->GetJumpDest() : typeTestBlock->Next(); + BasicBlock* const coldSuccessor = guard->OperIs(GT_EQ) ? typeTestBlock->Next() : typeTestBlock->GetJumpDest(); if (!hotSuccessor->hasProfileWeight() || !coldSuccessor->hasProfileWeight()) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index d4732bdf0476e..0aada0795e9b1 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -778,8 +778,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // jumpCnt is the number of elements in the jump table array. // jumpTab is the actual pointer to the jump table array. // targetCnt is the number of unique targets in the jump table array. - jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount; - jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab; + jumpCnt = originalSwitchBB->GetJumpSwt()->bbsCount; + jumpTab = originalSwitchBB->GetJumpSwt()->bbsDstTab; targetCnt = originalSwitchBB->NumSucc(comp); // GT_SWITCH must be a top-level node with no use. @@ -801,13 +801,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->NextIs(jumpTab[0])) { - originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); - originalSwitchBB->bbJumpDest = nullptr; + originalSwitchBB->SetJumpKind(BBJ_NONE DEBUG_ARG(comp)); + originalSwitchBB->SetJumpDest(nullptr); } else { - originalSwitchBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); - originalSwitchBB->bbJumpDest = jumpTab[0]; + originalSwitchBB->SetJumpKindAndTarget(BBJ_ALWAYS, jumpTab[0]); } // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) @@ -894,14 +893,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) assert(originalSwitchBB->KindIs(BBJ_NONE)); assert(originalSwitchBB->NextIs(afterDefaultCondBlock)); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); - assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); + assert(afterDefaultCondBlock->GetJumpSwt()->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); - originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; + originalSwitchBB->SetJumpKindAndTarget(BBJ_COND, jumpTab[jumpCnt - 1]); // Fix the pred for the default case: the default block target still has originalSwitchBB // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point @@ -957,13 +955,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } if (afterDefaultCondBlock->NextIs(uniqueSucc)) { - afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); - afterDefaultCondBlock->bbJumpDest = nullptr; + afterDefaultCondBlock->SetJumpKind(BBJ_NONE DEBUG_ARG(comp)); + afterDefaultCondBlock->SetJumpDest(nullptr); } else { - afterDefaultCondBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); - afterDefaultCondBlock->bbJumpDest = uniqueSucc; + afterDefaultCondBlock->SetJumpKindAndTarget(BBJ_ALWAYS, uniqueSucc); } } // If the number of possible destinations is small enough, we proceed to expand the switch @@ -1022,10 +1019,6 @@ GenTree* Lowering::LowerSwitch(GenTree* node) fUsedAfterDefaultCondBlock = true; } - // We're going to have a branch, either a conditional or unconditional, - // to the target. Set the target. - currentBlock->bbJumpDest = jumpTab[i]; - // Wire up the predecessor list for the "branch" case. comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge); @@ -1036,13 +1029,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); + currentBlock->SetJumpKindAndTarget(BBJ_ALWAYS, jumpTab[i]); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); + currentBlock->SetJumpKindAndTarget(BBJ_COND, jumpTab[i]); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1075,7 +1068,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->KindIs(BBJ_SWITCH)); - currentBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); + currentBlock->SetJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1110,7 +1103,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) switchBlockRange.InsertAfter(switchValue, switchTable, switchJump); // this block no longer branches to the default block - afterDefaultCondBlock->bbJumpSwt->removeDefault(); + afterDefaultCondBlock->GetJumpSwt()->removeDefault(); } comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock); @@ -1247,16 +1240,14 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition bbSwitchCondition; - bbSwitch->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); - comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); if (bbSwitch->NextIs(bbCase0)) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set - bbSwitchCondition = GenCondition::C; - bbSwitch->bbJumpDest = bbCase1; + bbSwitchCondition = GenCondition::C; + bbSwitch->SetJumpKindAndTarget(BBJ_COND, bbCase1); comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); @@ -1266,8 +1257,8 @@ bool Lowering::TryLowerSwitchToBitTest( assert(bbSwitch->NextIs(bbCase1)); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set - bbSwitchCondition = GenCondition::NC; - bbSwitch->bbJumpDest = bbCase0; + bbSwitchCondition = GenCondition::NC; + bbSwitch->SetJumpKindAndTarget(BBJ_COND, bbCase0); comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 2ab7d537d5828..391682418ed1e 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -2545,7 +2545,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, if (predBlock->KindIs(BBJ_COND)) { // Special handling to improve matching on backedges. - BasicBlock* otherBlock = predBlock->NextIs(block) ? predBlock->bbJumpDest : predBlock->Next(); + BasicBlock* otherBlock = predBlock->NextIs(block) ? predBlock->GetJumpDest() : predBlock->Next(); noway_assert(otherBlock != nullptr); if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 52fca3a01b6d2..59baa0b4d1561 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6215,7 +6215,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. - compCurBB->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); + compCurBB->SetJumpKind(BBJ_RETURN DEBUG_ARG(this)); } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); @@ -6363,7 +6363,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); + compCurBB->SetJumpKind(BBJ_THROW DEBUG_ARG(this)); } if (isRootReplaced) @@ -7501,7 +7501,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. - block->bbJumpDest = fgEntryBB; + block->SetJumpKindAndTarget(BBJ_ALWAYS, fgEntryBB); } else { @@ -7511,12 +7511,11 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; - block->bbJumpDest = fgFirstBB->Next(); + block->SetJumpKindAndTarget(BBJ_ALWAYS, fgFirstBB->Next()); } // Finish hooking things up. - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - fgAddRefPred(block->bbJumpDest, block); + fgAddRefPred(block->GetJumpDest(), block); block->bbFlags &= ~BBF_HAS_JMP; } @@ -13177,7 +13176,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); - noway_assert((block->Next()->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); + noway_assert((block->Next()->countOfInEdges() > 0) && (block->GetJumpDest()->countOfInEdges() > 0)); if (condTree != cond) { @@ -13202,8 +13201,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - bTaken = block->bbJumpDest; + block->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bTaken = block->GetJumpDest(); bNotTaken = block->Next(); } else @@ -13211,16 +13210,16 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ - if (block->bbJumpDest->isLoopHead() && (block->bbJumpDest->bbNum <= block->bbNum) && - fgReachable(block->bbJumpDest, block)) + if (block->GetJumpDest()->isLoopHead() && (block->GetJumpDest()->bbNum <= block->bbNum) && + fgReachable(block->GetJumpDest(), block)) { - optUnmarkLoopBlocks(block->bbJumpDest, block); + optUnmarkLoopBlocks(block->GetJumpDest(), block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); bTaken = block->Next(); - bNotTaken = block->bbJumpDest; + bNotTaken = block->GetJumpDest(); } if (fgHaveValidEdgeWeights) @@ -13273,7 +13272,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FlowEdge* edge; // Now fix the weights of the edges out of 'bUpdated' - switch (bUpdated->GetBBJumpKind()) + switch (bUpdated->GetJumpKind()) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->Next(), bUpdated); @@ -13290,7 +13289,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FALLTHROUGH; case BBJ_ALWAYS: - edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); + edge = fgGetPredForBlock(bUpdated->GetJumpDest(), bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next()); @@ -13315,7 +13314,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->KindIs(BBJ_ALWAYS)) { - printf(" to " FMT_BB, block->bbJumpDest->bbNum); + printf(" to " FMT_BB, block->GetJumpDest()->bbNum); } printf("\n"); } @@ -13429,8 +13428,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) // Find the actual jump target size_t switchVal = (size_t)cond->AsIntCon()->gtIconVal; - unsigned jumpCnt = block->bbJumpSwt->bbsCount; - BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab; + unsigned jumpCnt = block->GetJumpSwt()->bbsCount; + BasicBlock** jumpTab = block->GetJumpSwt()->bbsDstTab; bool foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) @@ -13447,13 +13446,12 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (!block->NextIs(curJump)) { // transform the basic block into a BBJ_ALWAYS - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - block->bbJumpDest = curJump; + block->SetJumpKindAndTarget(BBJ_ALWAYS, curJump); } else { // transform the basic block into a BBJ_NONE - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -13473,7 +13471,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->KindIs(BBJ_ALWAYS)) { - printf(" to " FMT_BB, block->bbJumpDest->bbNum); + printf(" to " FMT_BB, block->GetJumpDest()->bbNum); } printf("\n"); } @@ -14021,8 +14019,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - block->bbJumpDest = genReturnBB; + block->SetJumpKindAndTarget(BBJ_ALWAYS, genReturnBB); fgAddRefPred(genReturnBB, block); fgReturnCount--; } @@ -14513,8 +14510,8 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); - cond1Block->bbJumpDest = remainderBlock; - cond2Block->bbJumpDest = remainderBlock; + cond1Block->SetJumpDest(remainderBlock); + cond2Block->SetJumpDest(remainderBlock); // Set the weights; some are guesses. asgBlock->inheritWeight(block); @@ -14704,10 +14701,10 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // bbj_cond(true) // gtReverseCond(condExpr); - condBlock->bbJumpDest = elseBlock; + condBlock->SetJumpDest(elseBlock); - thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); - thenBlock->bbJumpDest = remainderBlock; + thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); + thenBlock->SetJumpDest(remainderBlock); thenBlock->bbFlags |= propagateFlagsToAll; if ((block->bbFlags & BBF_INTERNAL) == 0) { @@ -14730,7 +14727,7 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // bbj_cond(true) // gtReverseCond(condExpr); - condBlock->bbJumpDest = remainderBlock; + condBlock->SetJumpDest(remainderBlock); fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; @@ -14746,7 +14743,7 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) // +-->------------+ // bbj_cond(true) // - condBlock->bbJumpDest = remainderBlock; + condBlock->SetJumpDest(remainderBlock); fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index b9728f7899179..580a64d2e6727 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -124,7 +124,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() // Check if m_b1 and m_b2 have the same bbJumpDest - if (m_b1->bbJumpDest == m_b2->bbJumpDest) + if (m_b1->HasJumpTo(m_b2->GetJumpDest())) { // Given the following sequence of blocks : // B1: brtrue(t1, BX) @@ -136,7 +136,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock() m_sameTarget = true; } - else if (m_b2->NextIs(m_b1->bbJumpDest)) + else if (m_b2->NextIs(m_b1->GetJumpDest())) { // Given the following sequence of blocks : // B1: brtrue(t1, B3) @@ -480,13 +480,13 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_t3 = nullptr; bool foundEndOfOrConditions = false; - if (m_b1->NextIs(m_b2) && m_b2->NextIs(m_b1->bbJumpDest)) + if (m_b1->NextIs(m_b2) && m_b2->NextIs(m_b1->GetJumpDest())) { // Found the end of two (or more) conditions being ORed together. // The final condition has been inverted. foundEndOfOrConditions = true; } - else if (m_b1->NextIs(m_b2) && (m_b1->bbJumpDest == m_b2->bbJumpDest)) + else if (m_b1->NextIs(m_b2) && m_b1->HasJumpTo(m_b2->GetJumpDest())) { // Found two conditions connected together. } @@ -586,8 +586,8 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_comp->fgSetStmtSeq(s2); // Update the flow. - m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); - m_b1->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); + m_comp->fgRemoveRefPred(m_b1->GetJumpDest(), m_b1); + m_b1->SetJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); @@ -839,22 +839,22 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { // Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND - FlowEdge* edge1 = m_comp->fgGetPredForBlock(m_b1->bbJumpDest, m_b1); + FlowEdge* edge1 = m_comp->fgGetPredForBlock(m_b1->GetJumpDest(), m_b1); FlowEdge* edge2; if (m_sameTarget) { - edge2 = m_comp->fgGetPredForBlock(m_b2->bbJumpDest, m_b2); + edge2 = m_comp->fgGetPredForBlock(m_b2->GetJumpDest(), m_b2); } else { edge2 = m_comp->fgGetPredForBlock(m_b2->Next(), m_b2); - m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); + m_comp->fgRemoveRefPred(m_b1->GetJumpDest(), m_b1); - m_b1->bbJumpDest = m_b2->bbJumpDest; + m_b1->SetJumpDest(m_b2->GetJumpDest()); - m_comp->fgAddRefPred(m_b2->bbJumpDest, m_b1); + m_comp->fgAddRefPred(m_b2->GetJumpDest(), m_b1); } assert(edge1 != nullptr); @@ -864,11 +864,11 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() weight_t edgeSumMax = edge1->edgeWeightMax() + edge2->edgeWeightMax(); if ((edgeSumMax >= edge1->edgeWeightMax()) && (edgeSumMax >= edge2->edgeWeightMax())) { - edge1->setEdgeWeights(edgeSumMin, edgeSumMax, m_b1->bbJumpDest); + edge1->setEdgeWeights(edgeSumMin, edgeSumMax, m_b1->GetJumpDest()); } else { - edge1->setEdgeWeights(BB_ZERO_WEIGHT, BB_MAX_WEIGHT, m_b1->bbJumpDest); + edge1->setEdgeWeights(BB_ZERO_WEIGHT, BB_MAX_WEIGHT, m_b1->GetJumpDest()); } } @@ -876,10 +876,10 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() if (optReturnBlock) { - m_b1->bbJumpDest = nullptr; - m_b1->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); + m_b1->SetJumpDest(nullptr); + m_b1->SetJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); #ifdef DEBUG - m_b1->bbJumpSwt = m_b2->bbJumpSwt; + m_b1->SetJumpSwt(m_b2->GetJumpSwt()); #endif assert(m_b2->KindIs(BBJ_RETURN)); assert(m_b1->NextIs(m_b2)); @@ -889,7 +889,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() { assert(m_b1->KindIs(BBJ_COND)); assert(m_b2->KindIs(BBJ_COND)); - assert(m_b1->bbJumpDest == m_b2->bbJumpDest); + assert(m_b1->HasJumpTo(m_b2->GetJumpDest())); assert(m_b1->NextIs(m_b2)); assert(!m_b2->IsLast()); } @@ -901,7 +901,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() // Replace pred 'm_b2' for 'm_b2->bbNext' with 'm_b1' // Remove pred 'm_b2' for 'm_b2->bbJumpDest' m_comp->fgReplacePred(m_b2->Next(), m_b2, m_b1); - m_comp->fgRemoveRefPred(m_b2->bbJumpDest, m_b2); + m_comp->fgRemoveRefPred(m_b2->GetJumpDest(), m_b2); } // Get rid of the second block @@ -1494,7 +1494,7 @@ PhaseStatus Compiler::optOptimizeBools() if (b2->KindIs(BBJ_COND)) { - if ((b1->bbJumpDest != b2->bbJumpDest) && !b2->NextIs(b1->bbJumpDest)) + if (!b1->HasJumpTo(b2->GetJumpDest()) && !b2->NextIs(b1->GetJumpDest())) { continue; } @@ -1520,7 +1520,7 @@ PhaseStatus Compiler::optOptimizeBools() else if (b2->KindIs(BBJ_RETURN)) { // Set b3 to b1 jump destination - BasicBlock* b3 = b1->bbJumpDest; + BasicBlock* b3 = b1->GetJumpDest(); // b3 must not be marked as BBF_DONT_REMOVE diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index fa0ff10e8a9e7..a7125734a718d 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -497,16 +497,16 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar reportAfter(); } - if ((skipUnmarkLoop == false) && // If we want to unmark this loop... - block->KindIs(BBJ_ALWAYS, BBJ_COND) && // This block reaches conditionally or always - block->bbJumpDest->isLoopHead() && // to a loop head... - (fgCurBBEpochSize == fgBBNumMax + 1) && // We didn't add new blocks since last renumber... - (block->bbJumpDest->bbNum <= block->bbNum) && // This is a backedge... - fgDomsComputed && // Given the doms are computed and valid... - (fgCurBBEpochSize == fgDomBBcount + 1) && // - fgReachable(block->bbJumpDest, block)) // Block's destination (target of back edge) can reach block... + if ((skipUnmarkLoop == false) && // If we want to unmark this loop... + block->KindIs(BBJ_ALWAYS, BBJ_COND) && // This block reaches conditionally or always + block->GetJumpDest()->isLoopHead() && // to a loop head... + (fgCurBBEpochSize == fgBBNumMax + 1) && // We didn't add new blocks since last renumber... + (block->GetJumpDest()->bbNum <= block->bbNum) && // This is a backedge... + fgDomsComputed && // Given the doms are computed and valid... + (fgCurBBEpochSize == fgDomBBcount + 1) && // + fgReachable(block->GetJumpDest(), block)) // Block's destination (target of back edge) can reach block... { - optUnmarkLoopBlocks(block->bbJumpDest, block); // Unscale the blocks in such loop. + optUnmarkLoopBlocks(block->GetJumpDest(), block); // Unscale the blocks in such loop. } } @@ -1386,10 +1386,10 @@ void Compiler::optCheckPreds() } } noway_assert(bb); - switch (bb->GetBBJumpKind()) + switch (bb->GetJumpKind()) { case BBJ_COND: - if (bb->bbJumpDest == block) + if (bb->HasJumpTo(block)) { break; } @@ -1400,7 +1400,7 @@ void Compiler::optCheckPreds() case BBJ_EHFILTERRET: case BBJ_ALWAYS: case BBJ_EHCATCHRET: - noway_assert(bb->bbJumpDest == block); + noway_assert(bb->HasJumpTo(block)); break; default: break; @@ -1804,10 +1804,10 @@ class LoopSearch { if (head->KindIs(BBJ_ALWAYS)) { - if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) + if (head->GetJumpDest()->bbNum <= bottom->bbNum && head->GetJumpDest()->bbNum >= top->bbNum) { // OK - we enter somewhere within the loop. - return head->bbJumpDest; + return head->GetJumpDest(); } else { @@ -2145,7 +2145,7 @@ class LoopSearch if (newMoveAfter->KindIs(BBJ_ALWAYS, BBJ_COND)) { - unsigned int destNum = newMoveAfter->bbJumpDest->bbNum; + unsigned int destNum = newMoveAfter->GetJumpDest()->bbNum; if ((destNum >= top->bbNum) && (destNum <= bottom->bbNum) && !loopBlocks.IsMember(destNum)) { // Reversing this branch out of block `newMoveAfter` could confuse this algorithm @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if (block->KindIs(BBJ_COND) && (block->bbJumpDest == newNext)) + if (block->KindIs(BBJ_COND) && block->HasJumpTo(newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -2312,7 +2312,7 @@ class LoopSearch } // Redirect the Conditional JUMP to go to `oldNext` - block->bbJumpDest = oldNext; + block->SetJumpDest(oldNext); } else { @@ -2321,7 +2321,7 @@ class LoopSearch noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } - else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext)) + else if (block->KindIs(BBJ_ALWAYS) && block->HasJumpTo(newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->Prev())) @@ -2378,7 +2378,7 @@ class LoopSearch } // Make sure we don't leave around a goto-next unless it's marked KEEP_BBJ_ALWAYS. - assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || (block->bbJumpDest != newNext) || + assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || !block->HasJumpTo(newNext) || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)); return newBlock; } @@ -2398,14 +2398,14 @@ class LoopSearch { BasicBlock* exitPoint; - switch (block->GetBBJumpKind()) + switch (block->GetJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: - assert(block->bbJumpDest); - exitPoint = block->bbJumpDest; + assert(!block->HasJumpTo(nullptr)); + exitPoint = block->GetJumpDest(); if (!loopBlocks.IsMember(exitPoint->bbNum)) { @@ -2738,7 +2738,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R BasicBlock* newJumpDest = nullptr; - switch (blk->GetBBJumpKind()) + switch (blk->GetJumpKind()) { case BBJ_NONE: case BBJ_THROW: @@ -2755,30 +2755,30 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. - if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest)) + if (redirectMap->Lookup(blk->GetJumpDest(), &newJumpDest)) { if (updatePreds) { - fgRemoveRefPred(blk->bbJumpDest, blk); + fgRemoveRefPred(blk->GetJumpDest(), blk); } if (updatePreds || addPreds) { fgAddRefPred(newJumpDest, blk); } - blk->bbJumpDest = newJumpDest; + blk->SetJumpDest(newJumpDest); } else if (addPreds) { - fgAddRefPred(blk->bbJumpDest, blk); + fgAddRefPred(blk->GetJumpDest(), blk); } break; case BBJ_SWITCH: { bool redirected = false; - for (unsigned i = 0; i < blk->bbJumpSwt->bbsCount; i++) + for (unsigned i = 0; i < blk->GetJumpSwt()->bbsCount; i++) { - BasicBlock* switchDest = blk->bbJumpSwt->bbsDstTab[i]; + BasicBlock* switchDest = blk->GetJumpSwt()->bbsDstTab[i]; if (redirectMap->Lookup(switchDest, &newJumpDest)) { if (updatePreds) @@ -2789,8 +2789,8 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R { fgAddRefPred(newJumpDest, blk); } - blk->bbJumpSwt->bbsDstTab[i] = newJumpDest; - redirected = true; + blk->GetJumpSwt()->bbsDstTab[i] = newJumpDest; + redirected = true; } else if (addPreds) { @@ -2818,21 +2818,21 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->KindIs(to->GetBBJumpKind())); // Precondition. + assert(from->KindIs(to->GetJumpKind())); // Precondition. // copy the jump destination(s) from "from" to "to". - switch (to->GetBBJumpKind()) + switch (to->GetJumpKind()) { case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: // All of these have a single jump destination to update. - to->bbJumpDest = from->bbJumpDest; + to->SetJumpDest(from->GetJumpDest()); break; case BBJ_SWITCH: - to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt); + to->SetJumpSwt(new (this, CMK_BasicBlock) BBswtDesc(this, from->GetJumpSwt())); break; default: @@ -2936,14 +2936,14 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if (h->KindIs(BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (h->KindIs(BBJ_ALWAYS) && h->HasJumpTo(t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // Insert new head BasicBlock* const newH = fgNewBBafter(BBJ_NONE, h, /*extendRegion*/ true); newH->inheritWeight(h); newH->bbNatLoopNum = h->bbNatLoopNum; - h->bbJumpDest = newH; + h->SetJumpDest(newH); fgRemoveRefPred(t, h); fgAddRefPred(newH, h); @@ -3210,7 +3210,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati assert(h->KindIs(BBJ_NONE, BBJ_COND)); if (h->KindIs(BBJ_COND)) { - BasicBlock* const hj = h->bbJumpDest; + BasicBlock* const hj = h->GetJumpDest(); assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); } @@ -3439,7 +3439,7 @@ BasicBlock* Compiler::optLoopEntry(BasicBlock* preHeader) else { assert(preHeader->KindIs(BBJ_ALWAYS)); - return preHeader->bbJumpDest; + return preHeader->GetJumpDest(); } } @@ -4361,7 +4361,7 @@ PhaseStatus Compiler::optUnrollLoops() for (BasicBlock* block = loop.lpTop; !loop.lpBottom->NextIs(block); block = block->Next()) { BasicBlock* newBlock = insertAfter = - fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); + fgNewBBafter(block->GetJumpKind(), insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4395,7 +4395,7 @@ PhaseStatus Compiler::optUnrollLoops() newBlock->scaleBBWeight(1.0 / BB_LOOP_WEIGHT_SCALE); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. - assert(newBlock->bbJumpDest == nullptr); + assert(newBlock->HasJumpTo(nullptr)); if (block == bottom) { @@ -4414,7 +4414,7 @@ PhaseStatus Compiler::optUnrollLoops() { testCopyStmt->SetRootNode(sideEffList); } - newBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + newBlock->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } } @@ -4485,9 +4485,9 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpDest(nullptr); block->bbStmtList = nullptr; - block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; // Remove a few unnecessary flags (this list is not comprehensive). @@ -4529,8 +4529,8 @@ PhaseStatus Compiler::optUnrollLoops() Statement* initBlockBranchStmt = initBlock->lastStmt(); noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); - fgRemoveRefPred(initBlock->bbJumpDest, initBlock); - initBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + fgRemoveRefPred(initBlock->GetJumpDest(), initBlock); + initBlock->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { @@ -4846,7 +4846,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) } // Get hold of the jump target - BasicBlock* const bTest = block->bbJumpDest; + BasicBlock* const bTest = block->GetJumpDest(); // Does the bTest consist of 'jtrue(cond) block' ? if (!bTest->KindIs(BBJ_COND)) @@ -4857,7 +4857,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // bTest must be a backwards jump to block->bbNext // This will be the top of the loop. // - BasicBlock* const bTop = bTest->bbJumpDest; + BasicBlock* const bTop = bTest->GetJumpDest(); if (!block->NextIs(bTop)) { @@ -5076,8 +5076,8 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); - block->bbJumpDest = nullptr; + block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); + block->SetJumpDest(nullptr); BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); // Clone each statement in bTest and append to bNewCond. @@ -5123,7 +5123,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Fix flow and profile // - bNewCond->bbJumpDest = bJoin; + bNewCond->SetJumpDest(bJoin); bNewCond->inheritWeight(block); if (allProfileWeightsAreValid) @@ -5236,15 +5236,15 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood; FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->Next(), bNewCond); - FlowEdge* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->bbJumpDest, bNewCond); + FlowEdge* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->GetJumpDest(), bNewCond); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum, bNewCond->Next()->bbNum, blockToNextWeight); JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (avoid loop)\n", bNewCond->bbNum, - bNewCond->bbJumpDest->bbNum, blockToAfterWeight); + bNewCond->GetJumpDest()->bbNum, blockToAfterWeight); edgeBlockToNext->setEdgeWeights(blockToNextWeight, blockToNextWeight, bNewCond->Next()); - edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->bbJumpDest); + edgeBlockToAfter->setEdgeWeights(blockToAfterWeight, blockToAfterWeight, bNewCond->GetJumpDest()); #ifdef DEBUG // If we're checkig profile data, see if profile for the two target blocks is consistent. @@ -5253,7 +5253,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) { const ProfileChecks checks = (ProfileChecks)JitConfig.JitProfileChecks(); const bool nextProfileOk = fgDebugCheckIncomingProfileData(bNewCond->Next(), checks); - const bool jumpProfileOk = fgDebugCheckIncomingProfileData(bNewCond->bbJumpDest, checks); + const bool jumpProfileOk = fgDebugCheckIncomingProfileData(bNewCond->GetJumpDest(), checks); if (hasFlag(checks, ProfileChecks::RAISE_ASSERT)) { @@ -8153,7 +8153,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) if (!isTopEntryLoop) { - preHead->bbJumpDest = entry; + preHead->SetJumpDest(entry); } // Must set IL code offset @@ -8201,7 +8201,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) BasicBlock* skipLoopBlock; if (head->NextIs(entry)) { - skipLoopBlock = head->bbJumpDest; + skipLoopBlock = head->GetJumpDest(); } else { @@ -8296,7 +8296,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) continue; } - switch (predBlock->GetBBJumpKind()) + switch (predBlock->GetJumpKind()) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, @@ -8307,9 +8307,9 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) break; case BBJ_COND: - if (predBlock->bbJumpDest == entry) + if (predBlock->HasJumpTo(entry)) { - predBlock->bbJumpDest = preHead; + predBlock->SetJumpDest(preHead); noway_assert(!predBlock->NextIs(preHead)); } else @@ -8322,17 +8322,17 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) case BBJ_ALWAYS: case BBJ_EHCATCHRET: - noway_assert(predBlock->bbJumpDest == entry); - predBlock->bbJumpDest = preHead; + noway_assert(predBlock->HasJumpTo(entry)); + predBlock->SetJumpDest(preHead); fgRemoveRefPred(entry, predBlock); fgAddRefPred(preHead, predBlock); break; case BBJ_SWITCH: unsigned jumpCnt; - jumpCnt = predBlock->bbJumpSwt->bbsCount; + jumpCnt = predBlock->GetJumpSwt()->bbsCount; BasicBlock** jumpTab; - jumpTab = predBlock->bbJumpSwt->bbsDstTab; + jumpTab = predBlock->GetJumpSwt()->bbsDstTab; do { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 7c99c264439ca..3c7cb1f35fc85 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,8 +145,7 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); // Update flow and flags - block->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); - block->bbJumpDest = remainderBlock; + block->SetJumpKindAndTarget(BBJ_COND, remainderBlock); block->bbFlags |= BBF_INTERNAL; helperBlock->bbFlags |= BBF_BACKWARD_JUMP; @@ -233,8 +232,8 @@ class PatchpointTransformer } // Update flow - block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); - block->bbJumpDest = nullptr; + block->SetJumpDest(nullptr); + block->SetJumpKind(BBJ_THROW DEBUG_ARG(compiler)); // Add helper call // diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 7dcae117530c0..89720f6c7748b 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -941,7 +941,7 @@ void RangeCheck::MergeAssertion(BasicBlock* block, GenTree* op, Range* pRange DE JITDUMP("Merge assertions from pred " FMT_BB " edge: ", pred->bbNum); Compiler::optDumpAssertionIndices(assertions, "\n"); } - else if (pred->KindIs(BBJ_COND, BBJ_ALWAYS) && (pred->bbJumpDest == block)) + else if (pred->KindIs(BBJ_COND, BBJ_ALWAYS) && pred->HasJumpTo(block)) { if (m_pCompiler->bbJtrueAssertionOut != nullptr) { diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index a8365f7b93bdf..5ec4d1da1d46e 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -49,7 +49,7 @@ PhaseStatus Compiler::optRedundantBranches() bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); BasicBlock* const bbNext = block->Next(); - BasicBlock* const bbJump = block->bbJumpDest; + BasicBlock* const bbJump = block->GetJumpDest(); madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -567,7 +567,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) const bool domIsSameRelop = (rii.vnRelation == ValueNumStore::VN_RELATION_KIND::VRK_Same) || (rii.vnRelation == ValueNumStore::VN_RELATION_KIND::VRK_Swap); - BasicBlock* const trueSuccessor = domBlock->bbJumpDest; + BasicBlock* const trueSuccessor = domBlock->GetJumpDest(); BasicBlock* const falseSuccessor = domBlock->Next(); // If we can trace the flow from the dominating relop, we can infer its value. @@ -602,7 +602,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // const bool relopIsTrue = rii.reverseSense ^ (domIsSameRelop | domIsInferredRelop); JITDUMP("Jump successor " FMT_BB " of " FMT_BB " reaches, relop [%06u] must be %s\n", - domBlock->bbJumpDest->bbNum, domBlock->bbNum, dspTreeID(tree), + domBlock->GetJumpDest()->bbNum, domBlock->bbNum, dspTreeID(tree), relopIsTrue ? "true" : "false"); relopValue = relopIsTrue ? 1 : 0; break; @@ -710,7 +710,7 @@ struct JumpThreadInfo { JumpThreadInfo(Compiler* comp, BasicBlock* block) : m_block(block) - , m_trueTarget(block->bbJumpDest) + , m_trueTarget(block->GetJumpDest()) , m_falseTarget(block->Next()) , m_fallThroughPred(nullptr) , m_ambiguousVNBlock(nullptr) @@ -1072,8 +1072,8 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // latter should prove useful in subsequent work, where we aim to enable jump // threading in cases where block has side effects. // - BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->bbJumpDest : domBlock->Next(); - BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->Next() : domBlock->bbJumpDest; + BasicBlock* const domTrueSuccessor = domIsSameRelop ? domBlock->GetJumpDest() : domBlock->Next(); + BasicBlock* const domFalseSuccessor = domIsSameRelop ? domBlock->Next() : domBlock->GetJumpDest(); JumpThreadInfo jti(this, block); for (BasicBlock* const predBlock : block->PredBlocks()) @@ -1460,9 +1460,8 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Possibly defer this until after early out below. // - jti.m_fallThroughPred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); - jti.m_fallThroughPred->bbJumpDest = jti.m_block; - modifiedFlow = true; + jti.m_fallThroughPred->SetJumpKindAndTarget(BBJ_ALWAYS, jti.m_block); + modifiedFlow = true; } else { @@ -1532,7 +1531,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) fgRemoveStmt(jti.m_block, lastStmt); JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_trueTarget->bbNum); fgRemoveRefPred(jti.m_falseTarget, jti.m_block); - jti.m_block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + jti.m_block->SetJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); } else if (falsePredsWillReuseBlock) { @@ -1541,7 +1540,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + jti.m_block->SetJumpKind(BBJ_NONE DEBUG_ARG(this)); } // Now reroute the flow from the predecessors. diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index fe1ecb8d39d74..7ffbb8207ab0b 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -95,10 +95,10 @@ bool IsConstantTestCondBlock(const BasicBlock* block, } *isReversed = rootNode->gtGetOp1()->OperIs(GT_NE); - *blockIfTrue = *isReversed ? block->Next() : block->bbJumpDest; - *blockIfFalse = *isReversed ? block->bbJumpDest : block->Next(); + *blockIfTrue = *isReversed ? block->Next() : block->GetJumpDest(); + *blockIfFalse = *isReversed ? block->GetJumpDest() : block->Next(); - if (block->NextIs(block->bbJumpDest) || (block->bbJumpDest == block)) + if (block->JumpsToNext() || block->HasJumpTo(block)) { // Ignoring weird cases like a condition jumping to itself return false; @@ -319,8 +319,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(isTest); // Convert firstBlock to a switch block - firstBlock->SetBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); - firstBlock->bbJumpDest = nullptr; + firstBlock->SetJumpKindAndTarget(BBJ_SWITCH, new (this, CMK_BasicBlock) BBswtDesc); firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd; firstBlock->lastStmt()->GetRootNode()->ChangeOper(GT_SWITCH); @@ -351,11 +350,10 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert((jumpCount > 0) && (jumpCount <= SWITCH_MAX_DISTANCE + 1)); const auto jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jumpCount + 1 /*default case*/]; - fgHasSwitch = true; - firstBlock->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc; - firstBlock->bbJumpSwt->bbsCount = jumpCount + 1; - firstBlock->bbJumpSwt->bbsHasDefault = true; - firstBlock->bbJumpSwt->bbsDstTab = jmpTab; + fgHasSwitch = true; + firstBlock->GetJumpSwt()->bbsCount = jumpCount + 1; + firstBlock->GetJumpSwt()->bbsHasDefault = true; + firstBlock->GetJumpSwt()->bbsDstTab = jmpTab; firstBlock->SetNext(isReversed ? blockIfTrue : blockIfFalse); // Splitting doesn't work well with jump-tables currently