Skip to content
This repository has been archived by the owner on Jan 23, 2023. It is now read-only.

Commit

Permalink
Cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
CarolEidt committed Sep 25, 2019
1 parent 9a924ee commit 851a089
Show file tree
Hide file tree
Showing 5 changed files with 113 additions and 117 deletions.
3 changes: 1 addition & 2 deletions src/jit/block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1405,8 +1405,7 @@ bool BasicBlock::hasEHFlowOut()
{
// If a predecessor is marked BBF_KEEP_BBJ_ALWAYS, then we must keep all live incoming
// vars on the stack.
if (((bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) || (bbJumpKind == BBJ_EHFILTERRET) ||
(bbJumpKind == BBJ_EHFINALLYRET))
if (((bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) || (bbJumpKind == BBJ_EHFILTERRET) || (bbJumpKind == BBJ_EHFINALLYRET))
{
return true;
}
Expand Down
2 changes: 1 addition & 1 deletion src/jit/block.h
Original file line number Diff line number Diff line change
Expand Up @@ -865,7 +865,7 @@ struct BasicBlock : private LIR::Range
bool hasEHFlowIn();
bool hasEHFlowOut();

// Some non-zero value that will not collide with real tokens for bbCatchTyp
// Some non-zero value that will not collide with real tokens for bbCatchTyp
#define BBCT_NONE 0x00000000
#define BBCT_FAULT 0xFFFFFFFC
#define BBCT_FINALLY 0xFFFFFFFD
Expand Down
121 changes: 61 additions & 60 deletions src/jit/lsra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -832,21 +832,12 @@ void LinearScan::setBlockSequence()
blockInfo[block->bbNum].splitEdgeCount = 0;
#endif // TRACK_LSRA_STATS

BasicBlock* uniquePred = block->GetUniquePred(compiler);
if (uniquePred != nullptr)
{
if (uniquePred->hasEHFlowOut())
{
// If we have a unique predecessor that has EH flow out, we treat this as if it has
// EH flow in, as there's no point in attempting to enregister anything on this edge.
blockInfo[block->bbNum].hasEHBoundaryIn = true;
}
}
else
bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr);
for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
{
for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
BasicBlock* predBlock = pred->flBlock;
if (!hasUniquePred)
{
BasicBlock* predBlock = pred->flBlock;
if (predBlock->NumSucc(compiler) > 1)
{
blockInfo[block->bbNum].hasCriticalInEdge = true;
Expand All @@ -857,7 +848,18 @@ void LinearScan::setBlockSequence()
assert(!"Switch with single successor");
}
}
}
// Treat the following cases as having incoming EH flow:
// - there's a unique predecessor that has EH flow out
// - the first block that has any predecessor with EH flow out, or
// - there's a predecessor that's the ALWAYS block of a BBCallAlwaysPair,
// since we can't insert resolution moves into it.
//
if ((predBlock->hasEHFlowOut() && (hasUniquePred || block == compiler->fgFirstBB)) ||
((predBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0))
{
blockInfo[block->bbNum].hasEHBoundaryIn = true;
}
}

// Determine which block to schedule next.

Expand Down Expand Up @@ -1349,13 +1351,13 @@ void Interval::setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* l
// LinearScan:identifyCandidatesExceptionDataflow: Build the set of variables exposed on EH flow edges
//
// Notes:
// This logic cloned from fgInterBlockLocalVarLiveness.
// This logic was originally cloned from fgInterBlockLocalVarLiveness.
//
void LinearScan::identifyCandidatesExceptionDataflow()
{
VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler));
#ifdef DEBUG
VARSET_TP finallyVars(VarSetOps::MakeEmpty(compiler));
VARSET_TP finallyVars(VarSetOps::MakeEmpty(compiler));
#endif
BasicBlock* block;

Expand All @@ -1382,9 +1384,6 @@ void LinearScan::identifyCandidatesExceptionDataflow()
}
}

// finallyVars are handled as exceptVars (they are also must-init, see below).
VarSetOps::UnionD(compiler, exceptVars, finallyVars);

#ifdef DEBUG
if (VERBOSE)
{
Expand Down Expand Up @@ -5159,58 +5158,66 @@ void LinearScan::freeRegister(RegRecord* physRegRecord)
}
}

bool LinearScan::updateRegAndCheckIfFree(Interval* currentInterval, RefPosition* currentRefPosition)
//------------------------------------------------------------------------
// LinearScan::updateRegs: Update register after allocating a RefPosition
//
// Arguments:
// currentInterval - the Interval for the RefPositoin
// currentRefPosition - the RefPosition that has been allocated
//
// Return Value:
// True iff the register has been unassigned.
//
void LinearScan::RegsToFree::updateRegs(Interval* interval, RefPosition* refPosition, regMaskTP regMask)
{
bool unassign = false;
RefPosition* nextRefPosition = currentRefPosition->nextRefPosition;
regMaskTP assignedRegBit = genRegMask(currentInterval->physReg);
if (currentInterval->isWriteThru)
RefPosition* nextRefPosition = refPosition->nextRefPosition;
if (interval->isWriteThru)
{
if (currentRefPosition->refType == RefTypeDef)
if (refPosition->refType == RefTypeDef)
{
currentRefPosition->writeThru = true;
refPosition->writeThru = true;
}
if (!currentRefPosition->lastUse)
if (!refPosition->lastUse)
{
if (nextRefPosition == nullptr)
{
currentRefPosition->spillAfter = true;
}
else if (currentRefPosition->spillAfter)
if (refPosition->spillAfter)
{
unassignPhysReg(getRegisterRecord(currentInterval->physReg) ARM_ARG(currentInterval->registerType));
unassign = true;
}
}
}
if (currentRefPosition->lastUse || nextRefPosition == nullptr)
if (refPosition->lastUse || nextRefPosition == nullptr)
{
assert(currentRefPosition->isIntervalRef());
assert(refPosition->isIntervalRef());

if (currentRefPosition->copyReg || (currentRefPosition->refType != RefTypeExpUse && nextRefPosition == nullptr))
if (refPosition->copyReg || (refPosition->refType != RefTypeExpUse && nextRefPosition == nullptr))
{
unassign = true;
}
else
{
currentInterval->isActive = false;
interval->isActive = false;
}
}
return unassign;
}

void LinearScan::addRegsToFree(RefPosition* currentRefPosition, regMaskTP regs, RegsToFree& regsToFree)
{
if (currentRefPosition->delayRegFree)
{
regsToFree.delayed |= regs;
}
else
if (unassign)
{
regsToFree.current |= regs;
if (refPosition->delayRegFree)
{
delayed |= regMask;
}
else
{
current |= regMask;
}
}
}

//------------------------------------------------------------------------
// LinearScan::freeRegisters: Free the registers in 'regsToFree'
//
// Arguments:
// regsToFree - the 'RegsToFree' struct
//
void LinearScan::freeRegisters(RegsToFree& regsToFree)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FREE_REGS));
Expand All @@ -5226,9 +5233,9 @@ void LinearScan::freeRegisters(RegsToFree& regsToFree)
regsToFree.delayed = RBM_NONE;
}

// Actual register allocation, accomplished by iterating over all of the previously
// constructed Intervals
// Loosely based on raAssignVars()
//------------------------------------------------------------------------
// LinearScan::allocateRegisters: Perform the actual register allocation by iterating over
// all of the previously constructed Intervals
//
void LinearScan::allocateRegisters()
{
Expand Down Expand Up @@ -5807,12 +5814,8 @@ void LinearScan::allocateRegisters()
assert(copyReg != REG_NA);
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, currentInterval, copyReg));
lastAllocatedRefPosition = currentRefPosition;
if (updateRegAndCheckIfFree(currentInterval, currentRefPosition))
{
addRegsToFree(currentRefPosition,
genRegMask(assignedRegister) | currentRefPosition->registerAssignment,
regsToFree);
}
regsToFree.updateRegs(currentInterval, currentRefPosition,
genRegMask(assignedRegister) | currentRefPosition->registerAssignment);
// If this is a tree temp (non-localVar) interval, we will need an explicit move.
if (!currentInterval->isLocalVar)
{
Expand Down Expand Up @@ -5974,10 +5977,8 @@ void LinearScan::allocateRegisters()
// (it will be freed when it is used).
if (!currentInterval->IsUpperVector())
{
if (updateRegAndCheckIfFree(currentInterval, currentRefPosition))
{
addRegsToFree(currentRefPosition, assignedRegBit, regsToFree);
}
regsToFree.updateRegs(currentInterval, currentRefPosition, assignedRegBit);

// Update the register preferences for the relatedInterval, if this is 'preferencedToDef'.
// Don't propagate to subsequent relatedIntervals; that will happen as they are allocated, and we
// don't know yet whether the register will be retained.
Expand Down
3 changes: 1 addition & 2 deletions src/jit/lsra.h
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,6 @@ class LinearScan : public LinearScanInterface
LsraLocation* nextRefLocationPtr,
RegisterType regType);
void freeRegister(RegRecord* physRegRecord);
bool updateRegAndCheckIfFree(Interval* currentInterval, RefPosition* currentRefPosition);

struct RegsToFree
{
Expand All @@ -1072,9 +1071,9 @@ class LinearScan : public LinearScanInterface
{
return current == RBM_NONE && delayed == RBM_NONE;
}
void updateRegs(Interval* interval, RefPosition* refPosition, regMaskTP regMask);
};
void freeRegisters(RegsToFree& regsToFree);
void addRegsToFree(RefPosition* currentRefPosition, regMaskTP regs, RegsToFree& regsToFree);

// Get the type that this tree defines.
var_types getDefType(GenTree* tree)
Expand Down
101 changes: 49 additions & 52 deletions src/jit/lsrabuild.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2251,65 +2251,62 @@ void LinearScan::buildIntervals()

if (enregisterLocalVars)
{
if (!blockInfo[block->bbNum].hasEHBoundaryOut)
// Insert exposed uses for a lclVar that is live-out of 'block' but not live-in to the
// next block, or any unvisited successors.
// This will address lclVars that are live on a backedge, as well as those that are kept
// live at a GT_JMP.
//
// Blocks ending with "jmp method" are marked as BBJ_HAS_JMP,
// and jmp call is represented using GT_JMP node which is a leaf node.
// Liveness phase keeps all the arguments of the method live till the end of
// block by adding them to liveout set of the block containing GT_JMP.
//
// The target of a GT_JMP implicitly uses all the current method arguments, however
// there are no actual references to them. This can cause LSRA to assert, because
// the variables are live but it sees no references. In order to correctly model the
// liveness of these arguments, we add dummy exposed uses, in the same manner as for
// backward branches. This will happen automatically via expUseSet.
//
// Note that a block ending with GT_JMP has no successors and hence the variables
// for which dummy use ref positions are added are arguments of the method.

VARSET_TP expUseSet(VarSetOps::MakeCopy(compiler, block->bbLiveOut));
VarSetOps::IntersectionD(compiler, expUseSet, registerCandidateVars);
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
{
// Insert exposed uses for a lclVar that is live-out of 'block' but not live-in to the
// next block, or any unvisited successors.
// This will address lclVars that are live on a backedge, as well as those that are kept
// live at a GT_JMP.
//
// Blocks ending with "jmp method" are marked as BBJ_HAS_JMP,
// and jmp call is represented using GT_JMP node which is a leaf node.
// Liveness phase keeps all the arguments of the method live till the end of
// block by adding them to liveout set of the block containing GT_JMP.
//
// The target of a GT_JMP implicitly uses all the current method arguments, however
// there are no actual references to them. This can cause LSRA to assert, because
// the variables are live but it sees no references. In order to correctly model the
// liveness of these arguments, we add dummy exposed uses, in the same manner as for
// backward branches. This will happen automatically via expUseSet.
//
// Note that a block ending with GT_JMP has no successors and hence the variables
// for which dummy use ref positions are added are arguments of the method.

VARSET_TP expUseSet(VarSetOps::MakeCopy(compiler, block->bbLiveOut));
VarSetOps::IntersectionD(compiler, expUseSet, registerCandidateVars);
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
VarSetOps::DiffD(compiler, expUseSet, nextBlock->bbLiveIn);
}
for (BasicBlock* succ : block->GetAllSuccs(compiler))
{
if (VarSetOps::IsEmpty(compiler, expUseSet))
{
VarSetOps::DiffD(compiler, expUseSet, nextBlock->bbLiveIn);
break;
}
for (BasicBlock* succ : block->GetAllSuccs(compiler))
{
if (VarSetOps::IsEmpty(compiler, expUseSet))
{
break;
}

if (isBlockVisited(succ))
{
continue;
}
VarSetOps::DiffD(compiler, expUseSet, succ->bbLiveIn);
if (isBlockVisited(succ))
{
continue;
}
if (!VarSetOps::IsEmpty(compiler, expUseSet))
VarSetOps::DiffD(compiler, expUseSet, succ->bbLiveIn);
}
if (!VarSetOps::IsEmpty(compiler, expUseSet))
{
JITDUMP("Exposed uses:");
VarSetOps::Iter iter(compiler, expUseSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
JITDUMP("Exposed uses:");
VarSetOps::Iter iter(compiler, expUseSet);
unsigned varIndex = 0;
while (iter.NextElem(&varIndex))
{
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* varDsc = compiler->lvaTable + varNum;
assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr,
allRegs(interval->registerType));
pos->setRegOptional(true);
JITDUMP(" V%02u", varNum);
}
JITDUMP("\n");
unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* varDsc = compiler->lvaTable + varNum;
assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos =
newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
pos->setRegOptional(true);
JITDUMP(" V%02u", varNum);
}
JITDUMP("\n");
}

// Clear the "last use" flag on any vars that are live-out from this block.
Expand Down

0 comments on commit 851a089

Please sign in to comment.