Skip to content

Commit

Permalink
Merge pull request #19193 from hrydgard/ir-interpreter-opts
Browse files Browse the repository at this point in the history
IRInterpreter: Enable some optimizations that accidentally were only enabled on non-ARM64.
  • Loading branch information
hrydgard authored May 26, 2024
2 parents 9d11c35 + f2837e3 commit 84d9e30
Show file tree
Hide file tree
Showing 12 changed files with 53 additions and 21 deletions.
1 change: 1 addition & 0 deletions Core/MIPS/ARM64/Arm64IRJit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ Arm64JitBackend::Arm64JitBackend(JitOptions &jitopt, IRBlockCache &blocks)
if (((intptr_t)Memory::base & 0x00000000FFFFFFFFUL) != 0) {
jo.enablePointerify = false;
}
jo.optimizeForInterpreter = false;
#ifdef MASKED_PSP_MEMORY
jo.enablePointerify = false;
#endif
Expand Down
9 changes: 7 additions & 2 deletions Core/MIPS/IR/IRFrontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ void IRFrontend::DoJit(u32 em_address, std::vector<IRInst> &instructions, u32 &m
IRWriter simplified;
IRWriter *code = &ir;
if (!js.hadBreakpoints) {
static const IRPassFunc passes[] = {
std::vector<IRPassFunc> passes{
&ApplyMemoryValidation,
&RemoveLoadStoreLeftRight,
&OptimizeFPMoves,
Expand All @@ -288,7 +288,12 @@ void IRFrontend::DoJit(u32 em_address, std::vector<IRInst> &instructions, u32 &m
// &MergeLoadStore,
// &ThreeOpToTwoOp,
};
if (IRApplyPasses(passes, ARRAY_SIZE(passes), ir, simplified, opts))

if (opts.optimizeForInterpreter) {
// Add special passes here.
// passes.push_back(&ReorderLoadStore);
}
if (IRApplyPasses(passes.data(), passes.size(), ir, simplified, opts))
logBlocks = 1;
code = &simplified;
//if (ir.GetInstructions().size() >= 24)
Expand Down
1 change: 1 addition & 0 deletions Core/MIPS/IR/IRInst.h
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,7 @@ struct IROptions {
bool unalignedLoadStoreVec4;
bool preferVec4;
bool preferVec4Dot;
bool optimizeForInterpreter;
};

const IRMeta *GetIRMeta(IROp op);
Expand Down
7 changes: 5 additions & 2 deletions Core/MIPS/IR/IRJit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,16 @@ IRJit::IRJit(MIPSState *mipsState) : frontend_(mipsState->HasDefaultPrefix()), m
// blTrampolines_ = kernelMemory.Alloc(size, true, "trampoline");
InitIR();

jo.optimizeForInterpreter = true;

IROptions opts{};
opts.disableFlags = g_Config.uJitDisableFlags;
#if PPSSPP_ARCH(RISCV64)
// Assume RISC-V always has very slow unaligned memory accesses.
opts.unalignedLoadStore = false;
opts.unalignedLoadStoreVec4 = true;
opts.preferVec4 = cpu_info.RiscV_V;
#elif PPSSPP_ARCH(ARM)
#elif PPSSPP_ARCH(ARM) || PPSSPP_ARCH(ARM64)
opts.unalignedLoadStore = (opts.disableFlags & (uint32_t)JitDisable::LSU_UNALIGNED) == 0;
opts.unalignedLoadStoreVec4 = true;
opts.preferVec4 = cpu_info.bASIMD || cpu_info.bNEON;
Expand All @@ -65,6 +67,7 @@ IRJit::IRJit(MIPSState *mipsState) : frontend_(mipsState->HasDefaultPrefix()), m
opts.unalignedLoadStoreVec4 = false;
opts.preferVec4 = true;
#endif
opts.optimizeForInterpreter = jo.optimizeForInterpreter;
frontend_.SetOptions(opts);
}

Expand Down Expand Up @@ -143,7 +146,7 @@ bool IRJit::CompileBlock(u32 em_address, std::vector<IRInst> &instructions, u32

IRBlock *b = blocks_.GetBlock(block_num);
b->SetInstructions(instructions);
b->SetOriginalSize(mipsBytes);
b->SetOriginalAddrSize(em_address, mipsBytes);
if (preload) {
// Hash, then only update page stats, don't link yet.
// TODO: Should we always hash? Then we can reuse blocks.
Expand Down
24 changes: 14 additions & 10 deletions Core/MIPS/IR/IRJit.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ class IRBlock {
bool HasOriginalFirstOp() const;
bool RestoreOriginalFirstOp(int number);
bool IsValid() const { return origAddr_ != 0 && origFirstOpcode_.encoding != 0x68FFFFFF; }
void SetOriginalSize(u32 size) {
void SetOriginalAddrSize(u32 address, u32 size) {
origAddr_ = address;
origSize_ = size;
}
void SetTargetOffset(int offset) {
Expand Down Expand Up @@ -114,25 +115,28 @@ class IRBlockCache : public JitBlockCacheDebugInterface {
IRBlockCache() {}
void Clear();
std::vector<int> FindInvalidatedBlockNumbers(u32 address, u32 length);
void FinalizeBlock(int i, bool preload = false);
void FinalizeBlock(int blockNum, bool preload = false);
int GetNumBlocks() const override { return (int)blocks_.size(); }
int AllocateBlock(int emAddr) {
blocks_.push_back(IRBlock(emAddr));
return (int)blocks_.size() - 1;
}
IRBlock *GetBlock(int i) {
if (i >= 0 && i < (int)blocks_.size()) {
return &blocks_[i];
IRBlock *GetBlock(int blockNum) {
if (blockNum >= 0 && blockNum < (int)blocks_.size()) {
return &blocks_[blockNum];
} else {
return nullptr;
}
}
IRBlock *GetBlockUnchecked(int i) {
return &blocks_[i];
bool IsValidBlock(int blockNum) const override {
return blockNum < (int)blocks_.size() && blocks_[blockNum].IsValid();
}
const IRBlock *GetBlock(int i) const {
if (i >= 0 && i < (int)blocks_.size()) {
return &blocks_[i];
IRBlock *GetBlockUnchecked(int blockNum) {
return &blocks_[blockNum];
}
const IRBlock *GetBlock(int blockNum) const {
if (blockNum >= 0 && blockNum < (int)blocks_.size()) {
return &blocks_[blockNum];
} else {
return nullptr;
}
Expand Down
4 changes: 4 additions & 0 deletions Core/MIPS/IR/IRNativeCommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -714,6 +714,10 @@ void IRNativeBlockCacheDebugInterface::Init(const IRNativeBackend *backend) {
backend_ = backend;
}

bool IRNativeBlockCacheDebugInterface::IsValidBlock(int blockNum) const {
return irBlocks_.IsValidBlock(blockNum);
}

int IRNativeBlockCacheDebugInterface::GetNumBlocks() const {
return irBlocks_.GetNumBlocks();
}
Expand Down
9 changes: 5 additions & 4 deletions Core/MIPS/IR/IRNativeCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,10 +162,11 @@ class IRNativeBlockCacheDebugInterface : public JitBlockCacheDebugInterface {
public:
IRNativeBlockCacheDebugInterface(const MIPSComp::IRBlockCache &irBlocks);
void Init(const IRNativeBackend *backend);
int GetNumBlocks() const;
int GetBlockNumberFromStartAddress(u32 em_address, bool realBlocksOnly = true) const;
JitBlockDebugInfo GetBlockDebugInfo(int blockNum) const;
void ComputeStats(BlockCacheStats &bcStats) const;
int GetNumBlocks() const override;
int GetBlockNumberFromStartAddress(u32 em_address, bool realBlocksOnly = true) const override;
JitBlockDebugInfo GetBlockDebugInfo(int blockNum) const override;
void ComputeStats(BlockCacheStats &bcStats) const override;
bool IsValidBlock(int blockNum) const override;

private:
void GetBlockCodeRange(int blockNum, int *startOffset, int *size) const;
Expand Down
2 changes: 2 additions & 0 deletions Core/MIPS/JitCommon/JitBlockCache.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ class JitBlockCacheDebugInterface {
virtual int GetBlockNumberFromStartAddress(u32 em_address, bool realBlocksOnly = true) const = 0;
virtual JitBlockDebugInfo GetBlockDebugInfo(int blockNum) const = 0;
virtual void ComputeStats(BlockCacheStats &bcStats) const = 0;
virtual bool IsValidBlock(int blockNum) const = 0;

virtual ~JitBlockCacheDebugInterface() {}
};
Expand Down Expand Up @@ -164,6 +165,7 @@ class JitBlockCache : public JitBlockCacheDebugInterface {
void RestoreSavedEmuHackOps(const std::vector<u32> &saved);

int GetNumBlocks() const override { return num_blocks_; }
bool IsValidBlock(int blockNum) const override { return blockNum < num_blocks_ && !blocks_[blockNum].invalid; }

static int GetBlockExitSize();

Expand Down
4 changes: 2 additions & 2 deletions Core/MIPS/JitCommon/JitState.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,8 @@ namespace MIPSComp {
// ARM64 and RV64
bool useStaticAlloc;
bool enablePointerify;
// IR Interpreter
bool optimizeForInterpreter;

// Common
bool enableBlocklink;
Expand All @@ -245,6 +247,4 @@ namespace MIPSComp {
bool continueJumps;
int continueMaxInstructions;
};

}

1 change: 1 addition & 0 deletions Core/MIPS/RiscV/RiscVJit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ RiscVJitBackend::RiscVJitBackend(JitOptions &jitopt, IRBlockCache &blocks)
if (((intptr_t)Memory::base & 0x00000000FFFFFFFFUL) != 0) {
jo.enablePointerify = false;
}
jo.optimizeForInterpreter = false;

// Since we store the offset, this is as big as it can be.
// We could shift off one bit to double it, would need to change RiscVAsm.
Expand Down
1 change: 1 addition & 0 deletions Core/MIPS/x86/X64IRJit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ X64JitBackend::X64JitBackend(JitOptions &jitopt, IRBlockCache &blocks)
if (((intptr_t)Memory::base & 0x00000000FFFFFFFFUL) != 0) {
jo.enablePointerify = false;
}
jo.optimizeForInterpreter = false;

// Since we store the offset, this is as big as it can be.
AllocCodeSpace(1024 * 1024 * 16);
Expand Down
11 changes: 10 additions & 1 deletion UI/DevScreens.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1060,6 +1060,9 @@ void JitCompareScreen::UpdateDisasm() {
}

JitBlockCacheDebugInterface *blockCacheDebug = MIPSComp::jit->GetBlockCacheDebugInterface();
if (!blockCacheDebug->IsValidBlock(currentBlock_)) {
return;
}

char temp[256];
snprintf(temp, sizeof(temp), "%i/%i", currentBlock_, blockCacheDebug->GetNumBlocks());
Expand Down Expand Up @@ -1205,7 +1208,13 @@ UI::EventReturn JitCompareScreen::OnRandomBlock(UI::EventParams &e) {

int numBlocks = blockCache->GetNumBlocks();
if (numBlocks > 0) {
currentBlock_ = rand() % numBlocks;
int tries = 100;
while (tries-- > 0) {
currentBlock_ = rand() % numBlocks;
if (blockCache->IsValidBlock(currentBlock_)) {
break;
}
}
}
UpdateDisasm();
return UI::EVENT_DONE;
Expand Down

0 comments on commit 84d9e30

Please sign in to comment.