mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2025-06-21 12:27:52 -05:00
core: refactor emulated cpu core activation
This commit is contained in:
@ -6,6 +6,7 @@
|
||||
|
||||
#include "common/signal_chain.h"
|
||||
#include "core/arm/nce/arm_nce.h"
|
||||
#include "core/arm/nce/guest_context.h"
|
||||
#include "core/arm/nce/patcher.h"
|
||||
#include "core/core.h"
|
||||
#include "core/memory.h"
|
||||
@ -38,7 +39,7 @@ fpsimd_context* GetFloatingPointState(mcontext_t& host_ctx) {
|
||||
|
||||
} // namespace
|
||||
|
||||
void* ARM_NCE::RestoreGuestContext(void* raw_context) {
|
||||
void* ArmNce::RestoreGuestContext(void* raw_context) {
|
||||
// Retrieve the host context.
|
||||
auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
|
||||
|
||||
@ -71,7 +72,7 @@ void* ARM_NCE::RestoreGuestContext(void* raw_context) {
|
||||
return tpidr;
|
||||
}
|
||||
|
||||
void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
|
||||
void ArmNce::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
|
||||
// Retrieve the host context.
|
||||
auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
|
||||
|
||||
@ -103,7 +104,7 @@ void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
|
||||
host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0);
|
||||
}
|
||||
|
||||
bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
|
||||
bool ArmNce::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
|
||||
auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
|
||||
auto* info = static_cast<siginfo_t*>(raw_info);
|
||||
|
||||
@ -134,7 +135,7 @@ bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* ra
|
||||
// - If we lose the race, then SignalInterrupt will send us a signal we are masking,
|
||||
// and it will do nothing when it is unmasked, as we have already left guest code.
|
||||
// - If we win the race, then SignalInterrupt will wait for us to unlock first.
|
||||
auto& thread_params = guest_ctx->parent->running_thread->GetNativeExecutionParameters();
|
||||
auto& thread_params = guest_ctx->parent->m_running_thread->GetNativeExecutionParameters();
|
||||
thread_params.lock.store(SpinLockLocked);
|
||||
|
||||
// Return to host.
|
||||
@ -142,97 +143,93 @@ bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* ra
|
||||
return false;
|
||||
}
|
||||
|
||||
void ARM_NCE::HandleHostFault(int sig, void* raw_info, void* raw_context) {
|
||||
void ArmNce::HandleHostFault(int sig, void* raw_info, void* raw_context) {
|
||||
return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context);
|
||||
}
|
||||
|
||||
HaltReason ARM_NCE::RunJit() {
|
||||
// Get the thread parameters.
|
||||
// TODO: pass the current thread down from ::Run
|
||||
auto* thread = Kernel::GetCurrentThreadPointer(system.Kernel());
|
||||
void ArmNce::LockThread(Kernel::KThread* thread) {
|
||||
auto* thread_params = &thread->GetNativeExecutionParameters();
|
||||
LockThreadParameters(thread_params);
|
||||
}
|
||||
|
||||
{
|
||||
// Lock our core context.
|
||||
std::scoped_lock lk{lock};
|
||||
void ArmNce::UnlockThread(Kernel::KThread* thread) {
|
||||
auto* thread_params = &thread->GetNativeExecutionParameters();
|
||||
UnlockThreadParameters(thread_params);
|
||||
}
|
||||
|
||||
// We should not be running.
|
||||
ASSERT(running_thread == nullptr);
|
||||
|
||||
// Check if we need to run. If we have already been halted, we are done.
|
||||
u64 halt = guest_ctx.esr_el1.exchange(0);
|
||||
if (halt != 0) {
|
||||
return static_cast<HaltReason>(halt);
|
||||
}
|
||||
|
||||
// Mark that we are running.
|
||||
running_thread = thread;
|
||||
|
||||
// Acquire the lock on the thread parameters.
|
||||
// This allows us to force synchronization with SignalInterrupt.
|
||||
LockThreadParameters(thread_params);
|
||||
HaltReason ArmNce::RunThread(Kernel::KThread* thread) {
|
||||
// Check if we're already interrupted.
|
||||
// If we are, we can just return immediately.
|
||||
HaltReason hr = static_cast<HaltReason>(m_guest_ctx.esr_el1.exchange(0));
|
||||
if (True(hr)) {
|
||||
return hr;
|
||||
}
|
||||
|
||||
// Assign current members.
|
||||
guest_ctx.parent = this;
|
||||
thread_params->native_context = &guest_ctx;
|
||||
thread_params->tpidr_el0 = guest_ctx.tpidr_el0;
|
||||
thread_params->tpidrro_el0 = guest_ctx.tpidrro_el0;
|
||||
thread_params->is_running = true;
|
||||
// Get the thread context.
|
||||
auto* thread_params = &thread->GetNativeExecutionParameters();
|
||||
auto* process = thread->GetOwnerProcess();
|
||||
|
||||
HaltReason halt{};
|
||||
// Assign current members.
|
||||
m_running_thread = thread;
|
||||
m_guest_ctx.parent = this;
|
||||
thread_params->native_context = &m_guest_ctx;
|
||||
thread_params->tpidr_el0 = m_guest_ctx.tpidr_el0;
|
||||
thread_params->tpidrro_el0 = m_guest_ctx.tpidrro_el0;
|
||||
thread_params->is_running = true;
|
||||
|
||||
// TODO: finding and creating the post handler needs to be locked
|
||||
// to deal with dynamic loading of NROs.
|
||||
const auto& post_handlers = system.ApplicationProcess()->GetPostHandlers();
|
||||
if (auto it = post_handlers.find(guest_ctx.pc); it != post_handlers.end()) {
|
||||
halt = ReturnToRunCodeByTrampoline(thread_params, &guest_ctx, it->second);
|
||||
const auto& post_handlers = process->GetPostHandlers();
|
||||
if (auto it = post_handlers.find(m_guest_ctx.pc); it != post_handlers.end()) {
|
||||
hr = ReturnToRunCodeByTrampoline(thread_params, &m_guest_ctx, it->second);
|
||||
} else {
|
||||
halt = ReturnToRunCodeByExceptionLevelChange(thread_id, thread_params);
|
||||
hr = ReturnToRunCodeByExceptionLevelChange(m_thread_id, thread_params);
|
||||
}
|
||||
|
||||
// Unload members.
|
||||
// The thread does not change, so we can persist the old reference.
|
||||
guest_ctx.tpidr_el0 = thread_params->tpidr_el0;
|
||||
m_running_thread = nullptr;
|
||||
m_guest_ctx.tpidr_el0 = thread_params->tpidr_el0;
|
||||
thread_params->native_context = nullptr;
|
||||
thread_params->is_running = false;
|
||||
|
||||
// Unlock the thread parameters.
|
||||
UnlockThreadParameters(thread_params);
|
||||
|
||||
{
|
||||
// Lock the core context.
|
||||
std::scoped_lock lk{lock};
|
||||
|
||||
// On exit, we no longer have an active thread.
|
||||
running_thread = nullptr;
|
||||
}
|
||||
|
||||
// Return the halt reason.
|
||||
return halt;
|
||||
return hr;
|
||||
}
|
||||
|
||||
HaltReason ARM_NCE::StepJit() {
|
||||
HaltReason ArmNce::StepThread(Kernel::KThread* thread) {
|
||||
return HaltReason::StepThread;
|
||||
}
|
||||
|
||||
u32 ARM_NCE::GetSvcNumber() const {
|
||||
return guest_ctx.svc_swi;
|
||||
u32 ArmNce::GetSvcNumber() const {
|
||||
return m_guest_ctx.svc;
|
||||
}
|
||||
|
||||
ARM_NCE::ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_)
|
||||
: ARM_Interface{system_, uses_wall_clock_}, core_index{core_index_} {
|
||||
guest_ctx.system = &system_;
|
||||
void ArmNce::GetSvcArguments(std::span<uint64_t, 8> args) const {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
args[i] = m_guest_ctx.cpu_registers[i];
|
||||
}
|
||||
}
|
||||
|
||||
ARM_NCE::~ARM_NCE() = default;
|
||||
void ArmNce::SetSvcArguments(std::span<const uint64_t, 8> args) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
m_guest_ctx.cpu_registers[i] = args[i];
|
||||
}
|
||||
}
|
||||
|
||||
void ARM_NCE::Initialize() {
|
||||
thread_id = gettid();
|
||||
ArmNce::ArmNce(System& system, bool uses_wall_clock, std::size_t core_index)
|
||||
: ArmInterface{uses_wall_clock}, m_system{system}, m_core_index{core_index} {
|
||||
m_guest_ctx.system = &m_system;
|
||||
}
|
||||
|
||||
ArmNce::~ArmNce() = default;
|
||||
|
||||
void ArmNce::Initialize() {
|
||||
m_thread_id = gettid();
|
||||
|
||||
// Setup our signals
|
||||
static std::once_flag flag;
|
||||
std::call_once(flag, [] {
|
||||
static std::once_flag signals;
|
||||
std::call_once(signals, [] {
|
||||
using HandlerType = decltype(sigaction::sa_sigaction);
|
||||
|
||||
sigset_t signal_mask;
|
||||
@ -244,7 +241,7 @@ void ARM_NCE::Initialize() {
|
||||
struct sigaction return_to_run_code_action {};
|
||||
return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
return_to_run_code_action.sa_sigaction = reinterpret_cast<HandlerType>(
|
||||
&ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler);
|
||||
&ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler);
|
||||
return_to_run_code_action.sa_mask = signal_mask;
|
||||
Common::SigAction(ReturnToRunCodeByExceptionLevelChangeSignal, &return_to_run_code_action,
|
||||
nullptr);
|
||||
@ -252,14 +249,13 @@ void ARM_NCE::Initialize() {
|
||||
struct sigaction break_from_run_code_action {};
|
||||
break_from_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
break_from_run_code_action.sa_sigaction =
|
||||
reinterpret_cast<HandlerType>(&ARM_NCE::BreakFromRunCodeSignalHandler);
|
||||
reinterpret_cast<HandlerType>(&ArmNce::BreakFromRunCodeSignalHandler);
|
||||
break_from_run_code_action.sa_mask = signal_mask;
|
||||
Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr);
|
||||
|
||||
struct sigaction fault_action {};
|
||||
fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART;
|
||||
fault_action.sa_sigaction =
|
||||
reinterpret_cast<HandlerType>(&ARM_NCE::GuestFaultSignalHandler);
|
||||
fault_action.sa_sigaction = reinterpret_cast<HandlerType>(&ArmNce::GuestFaultSignalHandler);
|
||||
fault_action.sa_mask = signal_mask;
|
||||
Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action);
|
||||
|
||||
@ -272,111 +268,59 @@ void ARM_NCE::Initialize() {
|
||||
});
|
||||
}
|
||||
|
||||
void ARM_NCE::SetPC(u64 pc) {
|
||||
guest_ctx.pc = pc;
|
||||
void ArmNce::SetTpidrroEl0(u64 value) {
|
||||
m_guest_ctx.tpidrro_el0 = value;
|
||||
}
|
||||
|
||||
u64 ARM_NCE::GetPC() const {
|
||||
return guest_ctx.pc;
|
||||
}
|
||||
|
||||
u64 ARM_NCE::GetSP() const {
|
||||
return guest_ctx.sp;
|
||||
}
|
||||
|
||||
u64 ARM_NCE::GetReg(int index) const {
|
||||
return guest_ctx.cpu_registers[index];
|
||||
}
|
||||
|
||||
void ARM_NCE::SetReg(int index, u64 value) {
|
||||
guest_ctx.cpu_registers[index] = value;
|
||||
}
|
||||
|
||||
u128 ARM_NCE::GetVectorReg(int index) const {
|
||||
return guest_ctx.vector_registers[index];
|
||||
}
|
||||
|
||||
void ARM_NCE::SetVectorReg(int index, u128 value) {
|
||||
guest_ctx.vector_registers[index] = value;
|
||||
}
|
||||
|
||||
u32 ARM_NCE::GetPSTATE() const {
|
||||
return guest_ctx.pstate;
|
||||
}
|
||||
|
||||
void ARM_NCE::SetPSTATE(u32 pstate) {
|
||||
guest_ctx.pstate = pstate;
|
||||
}
|
||||
|
||||
u64 ARM_NCE::GetTlsAddress() const {
|
||||
return guest_ctx.tpidrro_el0;
|
||||
}
|
||||
|
||||
void ARM_NCE::SetTlsAddress(u64 address) {
|
||||
guest_ctx.tpidrro_el0 = address;
|
||||
}
|
||||
|
||||
u64 ARM_NCE::GetTPIDR_EL0() const {
|
||||
return guest_ctx.tpidr_el0;
|
||||
}
|
||||
|
||||
void ARM_NCE::SetTPIDR_EL0(u64 value) {
|
||||
guest_ctx.tpidr_el0 = value;
|
||||
}
|
||||
|
||||
void ARM_NCE::SaveContext(ThreadContext64& ctx) const {
|
||||
ctx.cpu_registers = guest_ctx.cpu_registers;
|
||||
ctx.sp = guest_ctx.sp;
|
||||
ctx.pc = guest_ctx.pc;
|
||||
ctx.pstate = guest_ctx.pstate;
|
||||
ctx.vector_registers = guest_ctx.vector_registers;
|
||||
ctx.fpcr = guest_ctx.fpcr;
|
||||
ctx.fpsr = guest_ctx.fpsr;
|
||||
ctx.tpidr = guest_ctx.tpidr_el0;
|
||||
}
|
||||
|
||||
void ARM_NCE::LoadContext(const ThreadContext64& ctx) {
|
||||
guest_ctx.cpu_registers = ctx.cpu_registers;
|
||||
guest_ctx.sp = ctx.sp;
|
||||
guest_ctx.pc = ctx.pc;
|
||||
guest_ctx.pstate = ctx.pstate;
|
||||
guest_ctx.vector_registers = ctx.vector_registers;
|
||||
guest_ctx.fpcr = ctx.fpcr;
|
||||
guest_ctx.fpsr = ctx.fpsr;
|
||||
guest_ctx.tpidr_el0 = ctx.tpidr;
|
||||
}
|
||||
|
||||
void ARM_NCE::SignalInterrupt() {
|
||||
// Lock core context.
|
||||
std::scoped_lock lk{lock};
|
||||
|
||||
// Add break loop condition.
|
||||
guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop));
|
||||
|
||||
// If there is no thread running, we are done.
|
||||
if (running_thread == nullptr) {
|
||||
return;
|
||||
void ArmNce::GetContext(Kernel::Svc::ThreadContext& ctx) const {
|
||||
for (size_t i = 0; i < 29; i++) {
|
||||
ctx.r[i] = m_guest_ctx.cpu_registers[i];
|
||||
}
|
||||
ctx.fp = m_guest_ctx.cpu_registers[29];
|
||||
ctx.lr = m_guest_ctx.cpu_registers[30];
|
||||
ctx.sp = m_guest_ctx.sp;
|
||||
ctx.pc = m_guest_ctx.pc;
|
||||
ctx.pstate = m_guest_ctx.pstate;
|
||||
ctx.v = m_guest_ctx.vector_registers;
|
||||
ctx.fpcr = m_guest_ctx.fpcr;
|
||||
ctx.fpsr = m_guest_ctx.fpsr;
|
||||
ctx.tpidr = m_guest_ctx.tpidr_el0;
|
||||
}
|
||||
|
||||
void ArmNce::SetContext(const Kernel::Svc::ThreadContext& ctx) {
|
||||
for (size_t i = 0; i < 29; i++) {
|
||||
m_guest_ctx.cpu_registers[i] = ctx.r[i];
|
||||
}
|
||||
m_guest_ctx.cpu_registers[29] = ctx.fp;
|
||||
m_guest_ctx.cpu_registers[30] = ctx.lr;
|
||||
m_guest_ctx.sp = ctx.sp;
|
||||
m_guest_ctx.pc = ctx.pc;
|
||||
m_guest_ctx.pstate = ctx.pstate;
|
||||
m_guest_ctx.vector_registers = ctx.v;
|
||||
m_guest_ctx.fpcr = ctx.fpcr;
|
||||
m_guest_ctx.fpsr = ctx.fpsr;
|
||||
m_guest_ctx.tpidr_el0 = ctx.tpidr;
|
||||
}
|
||||
|
||||
void ArmNce::SignalInterrupt(Kernel::KThread* thread) {
|
||||
// Add break loop condition.
|
||||
m_guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop));
|
||||
|
||||
// Lock the thread context.
|
||||
auto* params = &running_thread->GetNativeExecutionParameters();
|
||||
auto* params = &thread->GetNativeExecutionParameters();
|
||||
LockThreadParameters(params);
|
||||
|
||||
if (params->is_running) {
|
||||
// We should signal to the running thread.
|
||||
// The running thread will unlock the thread context.
|
||||
syscall(SYS_tkill, thread_id, BreakFromRunCodeSignal);
|
||||
syscall(SYS_tkill, m_thread_id, BreakFromRunCodeSignal);
|
||||
} else {
|
||||
// If the thread is no longer running, we have nothing to do.
|
||||
UnlockThreadParameters(params);
|
||||
}
|
||||
}
|
||||
|
||||
void ARM_NCE::ClearInterrupt() {
|
||||
guest_ctx.esr_el1 = {};
|
||||
}
|
||||
|
||||
void ARM_NCE::ClearInstructionCache() {
|
||||
void ArmNce::ClearInstructionCache() {
|
||||
// TODO: This is not possible to implement correctly on Linux because
|
||||
// we do not have any access to ic iallu.
|
||||
|
||||
@ -384,17 +328,8 @@ void ARM_NCE::ClearInstructionCache() {
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
void ARM_NCE::InvalidateCacheRange(u64 addr, std::size_t size) {
|
||||
void ArmNce::InvalidateCacheRange(u64 addr, std::size_t size) {
|
||||
this->ClearInstructionCache();
|
||||
}
|
||||
|
||||
void ARM_NCE::ClearExclusiveState() {
|
||||
// No-op.
|
||||
}
|
||||
|
||||
void ARM_NCE::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
// No-op. Page table is never used.
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
Reference in New Issue
Block a user