From 7bb642e3b29f30fbf6a7eb474f6cdd8082a79e6a Mon Sep 17 00:00:00 2001 From: Billy Laws Date: Mon, 18 Nov 2024 22:32:35 +0000 Subject: [PATCH] ARM64EC: Implement inline SMC support using context reconstruction When an SMC trap happens: reconstruct the context before the SMC write then compile the write as a single instruction block to reduce it to regular SMC. SMC where the writing instruction is the instruction being patched will hit the signal handler at most twice: the 1st will trigger the write to be compiled as a single instuction block, the 2nd will detect inline SMC of a single instruction block and then just take the usual invalidate+reprotect+continue step, avoiding a potential infinite loop of recompilation. --- Source/Windows/ARM64EC/Module.S | 3 ++- Source/Windows/ARM64EC/Module.cpp | 14 +++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Source/Windows/ARM64EC/Module.S b/Source/Windows/ARM64EC/Module.S index 46c18d884d..dcb01943cc 100644 --- a/Source/Windows/ARM64EC/Module.S +++ b/Source/Windows/ARM64EC/Module.S @@ -49,7 +49,8 @@ BeginSimulation: bl "#SyncThreadContext" ldr x17, [x18, #0x1788] // TEB->ChpeV2CpuAreaInfo ldr x16, [x17, #0x48] // ChpeV2CpuAreaInfo->EmulatorData[3] - DispatcherLoopTopEnterECFillSRA - br x16 // DispatcherLoopTopEnterECFillSRA(CPUArea:x17) + mov x10, #0 // Zero ENTRY_FILL_SRA_SINGLE_INST_REG to avoid single step + br x16 // DispatcherLoopTopEnterECFillSRA(SingleInst:x10, CPUArea:x17) // Called into by FEXCore // Expects the target code address in x9 diff --git a/Source/Windows/ARM64EC/Module.cpp b/Source/Windows/ARM64EC/Module.cpp index 5f9ab34999..620c3f2822 100644 --- a/Source/Windows/ARM64EC/Module.cpp +++ b/Source/Windows/ARM64EC/Module.cpp @@ -584,7 +584,19 @@ bool ResetToConsistentStateImpl(EXCEPTION_RECORD* Exception, CONTEXT* GuestConte std::scoped_lock Lock(ThreadCreationMutex); if (InvalidationTracker->HandleRWXAccessViolation(FaultAddress)) { - LogMan::Msg::DFmt("Handled self-modifying code: pc: {:X} fault: {:X}", NativeContext->Pc, FaultAddress); + if (CTX->IsAddressInCodeBuffer(CPUArea.ThreadState(), NativeContext->Pc) && !CTX->IsCurrentBlockSingleInst(CPUArea.ThreadState()) && + CTX->IsAddressInCurrentBlock(CPUArea.ThreadState(), FaultAddress, 8)) { + // If we are not patching ourself (single inst block case) and patching the current block, this is inline SMC. Reconstruct the current context (before the SMC write) then single step the write to reduce it to regular SMC. + Exception::ReconstructThreadState(CPUArea.ThreadState(), *NativeContext); + LogMan::Msg::DFmt("Handled inline self-modifying code: pc: {:X} rip: {:X} fault: {:X}", NativeContext->Pc, + CPUArea.ThreadState()->CurrentFrame->State.rip, FaultAddress); + NativeContext->Pc = CPUArea.DispatcherLoopTopEnterECFillSRA(); + NativeContext->Sp = CPUArea.EmulatorStackBase(); + NativeContext->X10 = 1; // Set ENTRY_FILL_SRA_SINGLE_INST_REG to force a single step + } else { + LogMan::Msg::DFmt("Handled self-modifying code: pc: {:X} fault: {:X}", NativeContext->Pc, FaultAddress); + } + return true; } }