2018-05-02 03:21:38 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-05-03 02:26:14 +01:00
|
|
|
#include <condition_variable>
|
|
|
|
#include <mutex>
|
|
|
|
|
2018-05-02 03:21:38 +01:00
|
|
|
#include "common/logging/log.h"
|
|
|
|
#ifdef ARCHITECTURE_x86_64
|
|
|
|
#include "core/arm/dynarmic/arm_dynarmic.h"
|
|
|
|
#endif
|
2018-09-17 23:15:09 +01:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-05-02 03:21:38 +01:00
|
|
|
#include "core/arm/unicorn/arm_unicorn.h"
|
2019-03-04 21:02:59 +00:00
|
|
|
#include "core/core.h"
|
2018-05-02 03:21:38 +01:00
|
|
|
#include "core/core_cpu.h"
|
|
|
|
#include "core/core_timing.h"
|
|
|
|
#include "core/hle/kernel/scheduler.h"
|
|
|
|
#include "core/hle/kernel/thread.h"
|
2018-08-12 23:50:44 +01:00
|
|
|
#include "core/hle/lock.h"
|
2018-05-02 03:21:38 +01:00
|
|
|
#include "core/settings.h"
|
|
|
|
|
|
|
|
namespace Core {
|
|
|
|
|
2018-05-03 05:16:12 +01:00
|
|
|
void CpuBarrier::NotifyEnd() {
|
2019-04-01 17:29:59 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2018-05-03 05:16:12 +01:00
|
|
|
end = true;
|
|
|
|
condition.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CpuBarrier::Rendezvous() {
|
2018-05-03 05:34:54 +01:00
|
|
|
if (!Settings::values.use_multi_core) {
|
|
|
|
// Meaningless when running in single-core mode
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!end) {
|
2019-04-01 17:29:59 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2018-05-03 05:16:12 +01:00
|
|
|
|
|
|
|
--cores_waiting;
|
|
|
|
if (!cores_waiting) {
|
|
|
|
cores_waiting = NUM_CPU_CORES;
|
|
|
|
condition.notify_all();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
condition.wait(lock);
|
|
|
|
return true;
|
|
|
|
}
|
2018-05-03 05:34:54 +01:00
|
|
|
|
|
|
|
return false;
|
2018-05-03 05:16:12 +01:00
|
|
|
}
|
|
|
|
|
2019-03-04 21:02:59 +00:00
|
|
|
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
|
|
|
|
std::size_t core_index)
|
2019-03-29 21:09:10 +00:00
|
|
|
: cpu_barrier{cpu_barrier}, global_scheduler{system.GlobalScheduler()},
|
|
|
|
core_timing{system.CoreTiming()}, core_index{core_index} {
|
2018-05-02 03:21:38 +01:00
|
|
|
#ifdef ARCHITECTURE_x86_64
|
2019-07-11 10:52:38 +01:00
|
|
|
arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index);
|
2018-05-02 03:21:38 +01:00
|
|
|
#else
|
2019-07-11 10:52:38 +01:00
|
|
|
arm_interface = std::make_unique<ARM_Unicorn>(system);
|
|
|
|
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
2018-05-02 03:21:38 +01:00
|
|
|
#endif
|
|
|
|
|
2019-03-29 21:09:10 +00:00
|
|
|
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface, core_index);
|
2018-05-02 03:21:38 +01:00
|
|
|
}
|
|
|
|
|
2018-09-17 23:15:09 +01:00
|
|
|
Cpu::~Cpu() = default;
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(
|
|
|
|
[[maybe_unused]] Memory::Memory& memory, [[maybe_unused]] std::size_t num_cores) {
|
2018-07-03 14:28:46 +01:00
|
|
|
#ifdef ARCHITECTURE_x86_64
|
2019-11-26 22:39:57 +00:00
|
|
|
return std::make_unique<DynarmicExclusiveMonitor>(memory, num_cores);
|
2018-07-03 14:28:46 +01:00
|
|
|
#else
|
2019-07-11 10:52:38 +01:00
|
|
|
// TODO(merry): Passthrough exclusive monitor
|
|
|
|
return nullptr;
|
2018-07-03 14:28:46 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-05-02 03:21:38 +01:00
|
|
|
void Cpu::RunLoop(bool tight_loop) {
|
2018-05-03 02:26:14 +01:00
|
|
|
// Wait for all other CPU cores to complete the previous slice, such that they run in lock-step
|
2018-10-15 13:42:06 +01:00
|
|
|
if (!cpu_barrier.Rendezvous()) {
|
2018-05-03 05:16:12 +01:00
|
|
|
// If rendezvous failed, session has been killed
|
|
|
|
return;
|
|
|
|
}
|
2018-05-03 02:26:14 +01:00
|
|
|
|
2019-03-29 21:09:10 +00:00
|
|
|
Reschedule();
|
|
|
|
|
2018-05-02 03:21:38 +01:00
|
|
|
// If we don't have a currently active thread then don't execute instructions,
|
|
|
|
// instead advance to the next event and try to yield to the next thread
|
|
|
|
if (Kernel::GetCurrentThread() == nullptr) {
|
2018-07-02 17:13:26 +01:00
|
|
|
LOG_TRACE(Core, "Core-{} idling", core_index);
|
2019-09-10 02:37:29 +01:00
|
|
|
core_timing.Idle();
|
2018-05-02 03:21:38 +01:00
|
|
|
} else {
|
|
|
|
if (tight_loop) {
|
|
|
|
arm_interface->Run();
|
|
|
|
} else {
|
|
|
|
arm_interface->Step();
|
|
|
|
}
|
|
|
|
}
|
2019-03-29 21:09:10 +00:00
|
|
|
core_timing.Advance();
|
2018-05-02 03:21:38 +01:00
|
|
|
|
|
|
|
Reschedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cpu::SingleStep() {
|
|
|
|
return RunLoop(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cpu::PrepareReschedule() {
|
|
|
|
arm_interface->PrepareReschedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cpu::Reschedule() {
|
2018-08-12 23:50:44 +01:00
|
|
|
// Lock the global kernel mutex when we manipulate the HLE state
|
2019-06-19 14:11:18 +01:00
|
|
|
std::lock_guard lock(HLE::g_hle_lock);
|
2019-03-29 21:09:10 +00:00
|
|
|
|
|
|
|
global_scheduler.SelectThread(core_index);
|
|
|
|
scheduler->TryDoContextSwitch();
|
2018-05-02 03:21:38 +01:00
|
|
|
}
|
|
|
|
|
2019-10-12 13:21:51 +01:00
|
|
|
void Cpu::Shutdown() {
|
|
|
|
scheduler->Shutdown();
|
|
|
|
}
|
|
|
|
|
2018-05-02 03:21:38 +01:00
|
|
|
} // namespace Core
|