2014-12-17 05:38:14 +00:00
|
|
|
// Copyright 2014 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 08:49:13 +00:00
|
|
|
// Refer to the license.txt file included.
|
2014-05-10 03:11:18 +01:00
|
|
|
|
2020-02-29 17:58:50 +00:00
|
|
|
#include <array>
|
2018-08-28 17:30:33 +01:00
|
|
|
#include <atomic>
|
2020-02-22 14:27:40 +00:00
|
|
|
#include <bitset>
|
2020-01-26 14:28:23 +00:00
|
|
|
#include <functional>
|
2018-08-28 17:30:33 +01:00
|
|
|
#include <memory>
|
2020-02-14 13:30:53 +00:00
|
|
|
#include <thread>
|
|
|
|
#include <unordered_map>
|
2018-08-28 17:30:33 +01:00
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/logging/log.h"
|
2020-03-12 20:48:43 +00:00
|
|
|
#include "common/microprofile.h"
|
2020-02-25 02:04:12 +00:00
|
|
|
#include "common/thread.h"
|
2020-01-25 22:55:32 +00:00
|
|
|
#include "core/arm/arm_interface.h"
|
2020-02-29 17:58:50 +00:00
|
|
|
#include "core/arm/cpu_interrupt_handler.h"
|
2020-01-25 22:55:32 +00:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-08-28 17:30:33 +01:00
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/core_timing.h"
|
2019-09-10 16:04:40 +01:00
|
|
|
#include "core/core_timing_util.h"
|
2020-02-25 02:04:12 +00:00
|
|
|
#include "core/cpu_manager.h"
|
2020-04-09 02:06:37 +01:00
|
|
|
#include "core/device_memory.h"
|
2020-02-22 14:27:40 +00:00
|
|
|
#include "core/hardware_properties.h"
|
2018-09-02 16:58:58 +01:00
|
|
|
#include "core/hle/kernel/client_port.h"
|
2019-10-07 23:57:13 +01:00
|
|
|
#include "core/hle/kernel/errors.h"
|
2017-05-30 00:45:42 +01:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2020-12-03 02:08:35 +00:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2016-09-21 07:52:38 +01:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2020-04-09 02:06:37 +01:00
|
|
|
#include "core/hle/kernel/memory/memory_layout.h"
|
|
|
|
#include "core/hle/kernel/memory/memory_manager.h"
|
|
|
|
#include "core/hle/kernel/memory/slab_heap.h"
|
2020-01-25 22:55:32 +00:00
|
|
|
#include "core/hle/kernel/physical_core.h"
|
2015-05-04 04:01:16 +01:00
|
|
|
#include "core/hle/kernel/process.h"
|
2015-08-06 01:26:52 +01:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2020-04-09 02:06:37 +01:00
|
|
|
#include "core/hle/kernel/shared_memory.h"
|
2020-02-11 21:36:39 +00:00
|
|
|
#include "core/hle/kernel/synchronization.h"
|
2014-05-10 03:11:18 +01:00
|
|
|
#include "core/hle/kernel/thread.h"
|
2020-02-14 14:56:27 +00:00
|
|
|
#include "core/hle/kernel/time_manager.h"
|
2018-08-28 17:30:33 +01:00
|
|
|
#include "core/hle/lock.h"
|
|
|
|
#include "core/hle/result.h"
|
2019-04-07 06:10:44 +01:00
|
|
|
#include "core/memory.h"
|
2014-05-10 03:11:18 +01:00
|
|
|
|
2020-03-12 20:48:43 +00:00
|
|
|
MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
|
|
|
|
2014-05-20 23:13:25 +01:00
|
|
|
namespace Kernel {
|
2014-05-10 03:11:18 +01:00
|
|
|
|
2018-08-28 17:30:33 +01:00
|
|
|
struct KernelCore::Impl {
|
2020-02-14 02:04:10 +00:00
|
|
|
explicit Impl(Core::System& system, KernelCore& kernel)
|
2020-12-03 02:08:35 +00:00
|
|
|
: synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
|
|
|
|
system} {}
|
2019-03-05 17:28:10 +00:00
|
|
|
|
2020-03-09 02:39:41 +00:00
|
|
|
void SetMulticore(bool is_multicore) {
|
|
|
|
this->is_multicore = is_multicore;
|
|
|
|
}
|
|
|
|
|
2019-03-05 17:28:10 +00:00
|
|
|
void Initialize(KernelCore& kernel) {
|
2020-03-12 00:44:53 +00:00
|
|
|
RegisterHostThread();
|
2015-04-28 03:12:35 +01:00
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
|
|
|
|
2020-01-30 23:19:21 +00:00
|
|
|
InitializePhysicalCores();
|
2018-11-19 17:54:06 +00:00
|
|
|
InitializeSystemResourceLimit(kernel);
|
2020-04-09 02:06:37 +01:00
|
|
|
InitializeMemoryLayout();
|
2020-02-25 02:04:12 +00:00
|
|
|
InitializePreemption(kernel);
|
|
|
|
InitializeSchedulers();
|
|
|
|
InitializeSuspendThreads();
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
|
|
|
|
2020-11-13 19:11:12 +00:00
|
|
|
void InitializeCores() {
|
|
|
|
for (auto& core : cores) {
|
|
|
|
core.Initialize(current_process->Is64BitProcess());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-28 17:30:33 +01:00
|
|
|
void Shutdown() {
|
|
|
|
next_object_id = 0;
|
2019-06-10 05:28:33 +01:00
|
|
|
next_kernel_process_id = Process::InitialKIPIDMin;
|
|
|
|
next_user_process_id = Process::ProcessIDMin;
|
2018-08-28 17:30:33 +01:00
|
|
|
next_thread_id = 1;
|
|
|
|
|
2020-03-12 00:44:53 +00:00
|
|
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
if (suspend_threads[i]) {
|
|
|
|
suspend_threads[i].reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cores.clear();
|
|
|
|
|
2018-08-28 17:30:33 +01:00
|
|
|
process_list.clear();
|
2020-12-03 02:08:35 +00:00
|
|
|
|
2018-10-10 05:42:10 +01:00
|
|
|
current_process = nullptr;
|
2018-08-28 17:30:33 +01:00
|
|
|
|
2018-11-19 17:54:06 +00:00
|
|
|
system_resource_limit = nullptr;
|
2018-08-28 17:30:33 +01:00
|
|
|
|
2020-02-14 13:30:53 +00:00
|
|
|
global_handle_table.Clear();
|
2018-08-28 17:30:33 +01:00
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
preemption_event = nullptr;
|
2019-10-12 13:21:51 +01:00
|
|
|
|
2018-09-02 16:58:58 +01:00
|
|
|
named_ports.clear();
|
2020-01-25 22:55:32 +00:00
|
|
|
|
2020-01-30 23:19:21 +00:00
|
|
|
exclusive_monitor.reset();
|
2020-10-13 22:00:25 +01:00
|
|
|
|
|
|
|
num_host_threads = 0;
|
|
|
|
std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(),
|
|
|
|
std::thread::id{});
|
|
|
|
std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0);
|
2020-01-25 22:55:32 +00:00
|
|
|
}
|
|
|
|
|
2020-01-30 23:19:21 +00:00
|
|
|
void InitializePhysicalCores() {
|
2020-01-26 20:14:18 +00:00
|
|
|
exclusive_monitor =
|
2020-02-14 13:30:53 +00:00
|
|
|
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
|
|
|
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
2020-12-03 02:08:35 +00:00
|
|
|
schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
|
2020-11-13 19:11:12 +00:00
|
|
|
cores.emplace_back(i, system, *schedulers[i], interrupts);
|
2020-01-25 22:55:32 +00:00
|
|
|
}
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
void InitializeSchedulers() {
|
|
|
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
cores[i].Scheduler().Initialize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-19 17:54:06 +00:00
|
|
|
// Creates the default system resource limit
|
|
|
|
void InitializeSystemResourceLimit(KernelCore& kernel) {
|
2019-04-01 21:46:00 +01:00
|
|
|
system_resource_limit = ResourceLimit::Create(kernel);
|
2018-11-19 17:54:06 +00:00
|
|
|
|
|
|
|
// If setting the default system values fails, then something seriously wrong has occurred.
|
2020-04-09 02:07:30 +01:00
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000)
|
2018-11-19 17:54:06 +00:00
|
|
|
.IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess());
|
2020-04-09 02:07:30 +01:00
|
|
|
|
|
|
|
if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) ||
|
|
|
|
!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
void InitializePreemption(KernelCore& kernel) {
|
|
|
|
preemption_event = Core::Timing::CreateEvent(
|
2020-07-28 00:00:41 +01:00
|
|
|
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
|
2020-02-25 02:04:12 +00:00
|
|
|
{
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock lock(kernel);
|
2020-12-03 02:08:35 +00:00
|
|
|
global_scheduler_context->PreemptThreads();
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
2020-07-15 23:30:06 +01:00
|
|
|
const auto time_interval = std::chrono::nanoseconds{
|
|
|
|
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
2019-09-10 16:04:40 +01:00
|
|
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
|
|
|
});
|
|
|
|
|
2020-07-15 23:30:06 +01:00
|
|
|
const auto time_interval =
|
|
|
|
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
2019-09-10 16:04:40 +01:00
|
|
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
|
|
|
}
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
void InitializeSuspendThreads() {
|
|
|
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
std::string name = "Suspend Thread Id:" + std::to_string(i);
|
2020-07-16 18:28:10 +01:00
|
|
|
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
|
2020-02-25 02:04:12 +00:00
|
|
|
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
2020-07-16 18:28:10 +01:00
|
|
|
const auto type =
|
2020-02-25 02:04:12 +00:00
|
|
|
static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
|
2020-07-16 18:28:10 +01:00
|
|
|
auto thread_res =
|
2020-10-21 03:07:39 +01:00
|
|
|
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
2020-07-16 18:28:10 +01:00
|
|
|
nullptr, std::move(init_func), init_func_parameter);
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
suspend_threads[i] = std::move(thread_res).Unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:34:30 +00:00
|
|
|
void MakeCurrentProcess(Process* process) {
|
|
|
|
current_process = process;
|
|
|
|
if (process == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
2020-11-13 19:11:12 +00:00
|
|
|
|
2020-10-13 22:00:25 +01:00
|
|
|
const u32 core_id = GetCurrentHostThreadID();
|
2020-02-25 02:04:12 +00:00
|
|
|
if (core_id < Core::Hardware::NUM_CPU_CORES) {
|
|
|
|
system.Memory().SetCurrentPageTable(*process, core_id);
|
|
|
|
}
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-14 13:30:53 +00:00
|
|
|
void RegisterCoreThread(std::size_t core_id) {
|
2020-10-13 22:00:25 +01:00
|
|
|
const std::thread::id this_id = std::this_thread::get_id();
|
2020-03-09 02:39:41 +00:00
|
|
|
if (!is_multicore) {
|
2020-10-13 22:00:25 +01:00
|
|
|
single_core_thread_id = this_id;
|
2020-03-09 02:39:41 +00:00
|
|
|
}
|
2020-10-21 01:22:33 +01:00
|
|
|
const auto end =
|
|
|
|
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
2020-10-13 22:00:25 +01:00
|
|
|
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
2020-02-14 13:30:53 +00:00
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
2020-10-13 22:00:25 +01:00
|
|
|
ASSERT(it == end);
|
|
|
|
InsertHostThread(static_cast<u32>(core_id));
|
2020-02-14 13:30:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void RegisterHostThread() {
|
|
|
|
const std::thread::id this_id = std::this_thread::get_id();
|
2020-10-21 01:22:33 +01:00
|
|
|
const auto end =
|
|
|
|
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
2020-10-13 22:00:25 +01:00
|
|
|
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
|
|
|
if (it == end) {
|
|
|
|
InsertHostThread(registered_thread_ids++);
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
2020-02-14 13:30:53 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 22:00:25 +01:00
|
|
|
void InsertHostThread(u32 value) {
|
|
|
|
const size_t index = num_host_threads++;
|
|
|
|
ASSERT_MSG(index < NUM_REGISTRABLE_HOST_THREADS, "Too many host threads");
|
|
|
|
register_host_thread_values[index] = value;
|
|
|
|
register_host_thread_keys[index] = std::this_thread::get_id();
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] u32 GetCurrentHostThreadID() const {
|
2020-02-14 13:30:53 +00:00
|
|
|
const std::thread::id this_id = std::this_thread::get_id();
|
2020-10-13 22:00:25 +01:00
|
|
|
if (!is_multicore && single_core_thread_id == this_id) {
|
|
|
|
return static_cast<u32>(system.GetCpuManager().CurrentCore());
|
2020-03-09 02:39:41 +00:00
|
|
|
}
|
2020-10-21 01:22:33 +01:00
|
|
|
const auto end =
|
|
|
|
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
2020-10-13 22:00:25 +01:00
|
|
|
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
|
|
|
if (it == end) {
|
2020-02-14 13:30:53 +00:00
|
|
|
return Core::INVALID_HOST_THREAD_ID;
|
|
|
|
}
|
2020-10-21 01:22:33 +01:00
|
|
|
return register_host_thread_values[static_cast<size_t>(
|
|
|
|
std::distance(register_host_thread_keys.begin(), it))];
|
2020-02-14 13:30:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-22 14:27:40 +00:00
|
|
|
Core::EmuThreadHandle GetCurrentEmuThreadID() const {
|
2020-02-14 13:30:53 +00:00
|
|
|
Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
|
2020-02-22 14:27:40 +00:00
|
|
|
result.host_handle = GetCurrentHostThreadID();
|
2020-02-14 13:30:53 +00:00
|
|
|
if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
|
|
|
|
return result;
|
|
|
|
}
|
2020-12-03 02:08:35 +00:00
|
|
|
const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
|
2020-02-14 13:30:53 +00:00
|
|
|
const Kernel::Thread* current = sched.GetCurrentThread();
|
2020-03-19 17:09:32 +00:00
|
|
|
if (current != nullptr && !current->IsPhantomMode()) {
|
2020-02-14 13:30:53 +00:00
|
|
|
result.guest_handle = current->GetGlobalHandle();
|
|
|
|
} else {
|
|
|
|
result.guest_handle = InvalidHandle;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-04-09 02:06:37 +01:00
|
|
|
void InitializeMemoryLayout() {
|
|
|
|
// Initialize memory layout
|
|
|
|
constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()};
|
|
|
|
constexpr std::size_t hid_size{0x40000};
|
|
|
|
constexpr std::size_t font_size{0x1100000};
|
|
|
|
constexpr std::size_t irs_size{0x8000};
|
|
|
|
constexpr std::size_t time_size{0x1000};
|
|
|
|
constexpr PAddr hid_addr{layout.System().StartAddress()};
|
|
|
|
constexpr PAddr font_pa{layout.System().StartAddress() + hid_size};
|
|
|
|
constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size};
|
|
|
|
constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
|
|
|
|
|
|
|
|
// Initialize memory manager
|
|
|
|
memory_manager = std::make_unique<Memory::MemoryManager>();
|
|
|
|
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application,
|
|
|
|
layout.Application().StartAddress(),
|
|
|
|
layout.Application().EndAddress());
|
|
|
|
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet,
|
|
|
|
layout.Applet().StartAddress(),
|
|
|
|
layout.Applet().EndAddress());
|
|
|
|
memory_manager->InitializeManager(Memory::MemoryManager::Pool::System,
|
|
|
|
layout.System().StartAddress(),
|
|
|
|
layout.System().EndAddress());
|
|
|
|
|
|
|
|
hid_shared_mem = Kernel::SharedMemory::Create(
|
|
|
|
system.Kernel(), system.DeviceMemory(), nullptr,
|
|
|
|
{hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
|
|
|
Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory");
|
|
|
|
font_shared_mem = Kernel::SharedMemory::Create(
|
|
|
|
system.Kernel(), system.DeviceMemory(), nullptr,
|
|
|
|
{font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
|
|
|
Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory");
|
|
|
|
irs_shared_mem = Kernel::SharedMemory::Create(
|
|
|
|
system.Kernel(), system.DeviceMemory(), nullptr,
|
|
|
|
{irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
|
|
|
Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory");
|
|
|
|
time_shared_mem = Kernel::SharedMemory::Create(
|
|
|
|
system.Kernel(), system.DeviceMemory(), nullptr,
|
|
|
|
{time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
|
|
|
Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory");
|
|
|
|
|
|
|
|
// Allocate slab heaps
|
|
|
|
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
|
|
|
|
|
|
|
|
// Initialize slab heaps
|
|
|
|
constexpr u64 user_slab_heap_size{0x3de000};
|
|
|
|
user_slab_heap_pages->Initialize(
|
|
|
|
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
|
|
|
user_slab_heap_size);
|
|
|
|
}
|
|
|
|
|
2018-08-28 17:30:33 +01:00
|
|
|
std::atomic<u32> next_object_id{0};
|
2019-06-10 05:28:33 +01:00
|
|
|
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
|
|
|
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
2018-12-19 03:37:01 +00:00
|
|
|
std::atomic<u64> next_thread_id{1};
|
2018-08-28 17:30:33 +01:00
|
|
|
|
|
|
|
// Lists all processes that exist in the current session.
|
2019-11-25 01:15:51 +00:00
|
|
|
std::vector<std::shared_ptr<Process>> process_list;
|
2018-10-10 05:42:10 +01:00
|
|
|
Process* current_process = nullptr;
|
2020-12-03 02:08:35 +00:00
|
|
|
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
|
2020-02-11 21:36:39 +00:00
|
|
|
Kernel::Synchronization synchronization;
|
2020-02-14 14:56:27 +00:00
|
|
|
Kernel::TimeManager time_manager;
|
2018-08-28 17:30:33 +01:00
|
|
|
|
2019-11-25 01:15:51 +00:00
|
|
|
std::shared_ptr<ResourceLimit> system_resource_limit;
|
2018-08-28 17:30:33 +01:00
|
|
|
|
2019-11-27 02:48:56 +00:00
|
|
|
std::shared_ptr<Core::Timing::EventType> preemption_event;
|
|
|
|
|
2020-02-22 14:27:40 +00:00
|
|
|
// This is the kernel's handle table or supervisor handle table which
|
|
|
|
// stores all the objects in place.
|
2020-07-15 18:18:03 +01:00
|
|
|
HandleTable global_handle_table;
|
2018-09-02 16:58:58 +01:00
|
|
|
|
|
|
|
/// Map of named ports managed by the kernel, which can be retrieved using
|
|
|
|
/// the ConnectToPort SVC.
|
|
|
|
NamedPortTable named_ports;
|
2019-03-05 17:28:10 +00:00
|
|
|
|
2020-01-25 22:55:32 +00:00
|
|
|
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
|
|
|
|
std::vector<Kernel::PhysicalCore> cores;
|
|
|
|
|
2020-02-22 14:27:40 +00:00
|
|
|
// 0-3 IDs represent core threads, >3 represent others
|
2020-10-13 22:00:25 +01:00
|
|
|
std::atomic<u32> registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
|
|
|
|
|
|
|
|
// Number of host threads is a relatively high number to avoid overflowing
|
2020-12-10 05:27:05 +00:00
|
|
|
static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 1024;
|
2020-10-13 22:00:25 +01:00
|
|
|
std::atomic<size_t> num_host_threads{0};
|
|
|
|
std::array<std::atomic<std::thread::id>, NUM_REGISTRABLE_HOST_THREADS>
|
|
|
|
register_host_thread_keys{};
|
|
|
|
std::array<std::atomic<u32>, NUM_REGISTRABLE_HOST_THREADS> register_host_thread_values{};
|
2020-02-14 13:30:53 +00:00
|
|
|
|
2020-04-09 02:06:37 +01:00
|
|
|
// Kernel memory management
|
|
|
|
std::unique_ptr<Memory::MemoryManager> memory_manager;
|
|
|
|
std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages;
|
|
|
|
|
|
|
|
// Shared memory for services
|
|
|
|
std::shared_ptr<Kernel::SharedMemory> hid_shared_mem;
|
|
|
|
std::shared_ptr<Kernel::SharedMemory> font_shared_mem;
|
|
|
|
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
|
|
|
|
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
2020-02-29 17:58:50 +00:00
|
|
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
2020-12-03 02:08:35 +00:00
|
|
|
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
2020-02-25 02:04:12 +00:00
|
|
|
|
2020-03-09 02:39:41 +00:00
|
|
|
bool is_multicore{};
|
|
|
|
std::thread::id single_core_thread_id{};
|
|
|
|
|
2020-03-12 20:48:43 +00:00
|
|
|
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
|
|
|
|
|
2019-03-05 17:28:10 +00:00
|
|
|
// System context
|
|
|
|
Core::System& system;
|
2018-08-28 17:30:33 +01:00
|
|
|
};
|
|
|
|
|
2020-02-14 02:04:10 +00:00
|
|
|
KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
|
2018-08-28 17:30:33 +01:00
|
|
|
KernelCore::~KernelCore() {
|
|
|
|
Shutdown();
|
|
|
|
}
|
|
|
|
|
2020-03-09 02:39:41 +00:00
|
|
|
void KernelCore::SetMulticore(bool is_multicore) {
|
|
|
|
impl->SetMulticore(is_multicore);
|
|
|
|
}
|
|
|
|
|
2019-03-05 17:28:10 +00:00
|
|
|
void KernelCore::Initialize() {
|
|
|
|
impl->Initialize(*this);
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
|
|
|
|
2020-11-13 19:11:12 +00:00
|
|
|
void KernelCore::InitializeCores() {
|
|
|
|
impl->InitializeCores();
|
|
|
|
}
|
|
|
|
|
2018-08-28 17:30:33 +01:00
|
|
|
void KernelCore::Shutdown() {
|
|
|
|
impl->Shutdown();
|
|
|
|
}
|
|
|
|
|
2019-11-25 01:15:51 +00:00
|
|
|
std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
|
2018-11-19 17:54:06 +00:00
|
|
|
return impl->system_resource_limit;
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
|
|
|
|
2020-02-14 13:30:53 +00:00
|
|
|
std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
|
|
|
|
return impl->global_handle_table.Get<Thread>(handle);
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
|
|
|
|
2019-11-25 01:15:51 +00:00
|
|
|
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
|
2018-08-28 17:30:33 +01:00
|
|
|
impl->process_list.push_back(std::move(process));
|
|
|
|
}
|
|
|
|
|
2018-10-10 05:42:10 +01:00
|
|
|
void KernelCore::MakeCurrentProcess(Process* process) {
|
2019-11-26 23:34:30 +00:00
|
|
|
impl->MakeCurrentProcess(process);
|
2018-09-07 01:34:51 +01:00
|
|
|
}
|
|
|
|
|
2018-10-10 05:42:10 +01:00
|
|
|
Process* KernelCore::CurrentProcess() {
|
2018-09-07 01:34:51 +01:00
|
|
|
return impl->current_process;
|
|
|
|
}
|
|
|
|
|
2018-10-10 05:42:10 +01:00
|
|
|
const Process* KernelCore::CurrentProcess() const {
|
2018-09-07 01:34:51 +01:00
|
|
|
return impl->current_process;
|
|
|
|
}
|
|
|
|
|
2019-11-25 01:15:51 +00:00
|
|
|
const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const {
|
2019-03-20 19:03:52 +00:00
|
|
|
return impl->process_list;
|
|
|
|
}
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
|
|
|
|
return *impl->global_scheduler_context;
|
2019-03-29 21:02:57 +00:00
|
|
|
}
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
|
|
|
|
return *impl->global_scheduler_context;
|
2019-03-29 21:02:57 +00:00
|
|
|
}
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
|
2020-03-01 16:14:17 +00:00
|
|
|
return *impl->schedulers[id];
|
2020-02-14 02:04:10 +00:00
|
|
|
}
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
|
2020-03-01 16:14:17 +00:00
|
|
|
return *impl->schedulers[id];
|
2020-02-14 02:04:10 +00:00
|
|
|
}
|
|
|
|
|
2020-01-25 22:55:32 +00:00
|
|
|
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
|
|
|
|
return impl->cores[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
|
|
|
return impl->cores[id];
|
|
|
|
}
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
|
|
|
u32 core_id = impl->GetCurrentHostThreadID();
|
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
return impl->cores[core_id];
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
|
|
|
u32 core_id = impl->GetCurrentHostThreadID();
|
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
return impl->cores[core_id];
|
|
|
|
}
|
|
|
|
|
2020-12-03 02:08:35 +00:00
|
|
|
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
2020-03-01 16:14:17 +00:00
|
|
|
u32 core_id = impl->GetCurrentHostThreadID();
|
2020-12-03 02:08:35 +00:00
|
|
|
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
|
|
|
// This is expected when called from not a guest thread
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
return impl->schedulers[core_id].get();
|
2020-03-01 16:14:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
|
|
|
|
return impl->interrupts;
|
|
|
|
}
|
|
|
|
|
2020-04-01 22:28:49 +01:00
|
|
|
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
|
|
|
|
const {
|
2020-03-01 16:14:17 +00:00
|
|
|
return impl->interrupts;
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 21:36:39 +00:00
|
|
|
Kernel::Synchronization& KernelCore::Synchronization() {
|
|
|
|
return impl->synchronization;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::Synchronization& KernelCore::Synchronization() const {
|
|
|
|
return impl->synchronization;
|
|
|
|
}
|
|
|
|
|
2020-02-14 14:56:27 +00:00
|
|
|
Kernel::TimeManager& KernelCore::TimeManager() {
|
|
|
|
return impl->time_manager;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::TimeManager& KernelCore::TimeManager() const {
|
|
|
|
return impl->time_manager;
|
|
|
|
}
|
|
|
|
|
2020-01-25 22:55:32 +00:00
|
|
|
Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() {
|
|
|
|
return *impl->exclusive_monitor;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
|
|
|
|
return *impl->exclusive_monitor;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::InvalidateAllInstructionCaches() {
|
2020-11-14 07:20:32 +00:00
|
|
|
for (auto& physical_core : impl->cores) {
|
|
|
|
physical_core.ArmInterface().ClearInstructionCache();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
|
|
|
for (auto& physical_core : impl->cores) {
|
|
|
|
if (!physical_core.IsInitialized()) {
|
|
|
|
continue;
|
2020-03-31 20:12:41 +01:00
|
|
|
}
|
2020-11-14 07:20:32 +00:00
|
|
|
physical_core.ArmInterface().InvalidateCacheRange(addr, size);
|
2020-03-31 20:12:41 +01:00
|
|
|
}
|
2020-01-25 22:55:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::PrepareReschedule(std::size_t id) {
|
2020-03-01 16:14:17 +00:00
|
|
|
// TODO: Reimplement, this
|
2020-01-25 22:55:32 +00:00
|
|
|
}
|
|
|
|
|
2019-11-25 01:15:51 +00:00
|
|
|
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
|
2018-09-02 16:58:58 +01:00
|
|
|
impl->named_ports.emplace(std::move(name), std::move(port));
|
|
|
|
}
|
|
|
|
|
|
|
|
KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) {
|
|
|
|
return impl->named_ports.find(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
KernelCore::NamedPortTable::const_iterator KernelCore::FindNamedPort(
|
|
|
|
const std::string& name) const {
|
|
|
|
return impl->named_ports.find(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
|
|
|
|
return port != impl->named_ports.cend();
|
|
|
|
}
|
|
|
|
|
2018-08-28 17:30:33 +01:00
|
|
|
u32 KernelCore::CreateNewObjectID() {
|
|
|
|
return impl->next_object_id++;
|
|
|
|
}
|
|
|
|
|
2018-12-19 03:37:01 +00:00
|
|
|
u64 KernelCore::CreateNewThreadID() {
|
2018-08-28 17:30:33 +01:00
|
|
|
return impl->next_thread_id++;
|
2014-05-20 23:13:25 +01:00
|
|
|
}
|
|
|
|
|
2019-06-10 05:28:33 +01:00
|
|
|
u64 KernelCore::CreateNewKernelProcessID() {
|
|
|
|
return impl->next_kernel_process_id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 KernelCore::CreateNewUserProcessID() {
|
|
|
|
return impl->next_user_process_id++;
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
2015-08-06 01:26:52 +01:00
|
|
|
|
2020-02-14 13:30:53 +00:00
|
|
|
Kernel::HandleTable& KernelCore::GlobalHandleTable() {
|
|
|
|
return impl->global_handle_table;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::HandleTable& KernelCore::GlobalHandleTable() const {
|
|
|
|
return impl->global_handle_table;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::RegisterCoreThread(std::size_t core_id) {
|
|
|
|
impl->RegisterCoreThread(core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::RegisterHostThread() {
|
|
|
|
impl->RegisterHostThread();
|
|
|
|
}
|
|
|
|
|
2020-02-22 14:27:40 +00:00
|
|
|
u32 KernelCore::GetCurrentHostThreadID() const {
|
|
|
|
return impl->GetCurrentHostThreadID();
|
2018-08-28 17:30:33 +01:00
|
|
|
}
|
2015-08-06 01:26:52 +01:00
|
|
|
|
2020-02-22 14:27:40 +00:00
|
|
|
Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const {
|
|
|
|
return impl->GetCurrentEmuThreadID();
|
2014-05-14 02:57:12 +01:00
|
|
|
}
|
2014-05-23 00:06:12 +01:00
|
|
|
|
2020-04-09 02:06:37 +01:00
|
|
|
Memory::MemoryManager& KernelCore::MemoryManager() {
|
|
|
|
return *impl->memory_manager;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Memory::MemoryManager& KernelCore::MemoryManager() const {
|
|
|
|
return *impl->memory_manager;
|
|
|
|
}
|
|
|
|
|
|
|
|
Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() {
|
|
|
|
return *impl->user_slab_heap_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const {
|
|
|
|
return *impl->user_slab_heap_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
Kernel::SharedMemory& KernelCore::GetHidSharedMem() {
|
|
|
|
return *impl->hid_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const {
|
|
|
|
return *impl->hid_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
Kernel::SharedMemory& KernelCore::GetFontSharedMem() {
|
|
|
|
return *impl->font_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const {
|
|
|
|
return *impl->font_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
Kernel::SharedMemory& KernelCore::GetIrsSharedMem() {
|
|
|
|
return *impl->irs_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const {
|
|
|
|
return *impl->irs_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
Kernel::SharedMemory& KernelCore::GetTimeSharedMem() {
|
|
|
|
return *impl->time_shared_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
|
|
|
|
return *impl->time_shared_mem;
|
|
|
|
}
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
void KernelCore::Suspend(bool in_suspention) {
|
|
|
|
const bool should_suspend = exception_exited || in_suspention;
|
|
|
|
{
|
2020-12-04 06:26:42 +00:00
|
|
|
KScopedSchedulerLock lock(*this);
|
2020-02-25 02:04:12 +00:00
|
|
|
ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
|
|
|
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
impl->suspend_threads[i]->SetStatus(status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:13:39 +00:00
|
|
|
bool KernelCore::IsMulticore() const {
|
|
|
|
return impl->is_multicore;
|
|
|
|
}
|
|
|
|
|
2020-02-25 02:04:12 +00:00
|
|
|
void KernelCore::ExceptionalExit() {
|
|
|
|
exception_exited = true;
|
|
|
|
Suspend(true);
|
|
|
|
}
|
|
|
|
|
2020-03-12 20:48:43 +00:00
|
|
|
void KernelCore::EnterSVCProfile() {
|
|
|
|
std::size_t core = impl->GetCurrentHostThreadID();
|
|
|
|
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::ExitSVCProfile() {
|
|
|
|
std::size_t core = impl->GetCurrentHostThreadID();
|
|
|
|
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
|
|
|
}
|
|
|
|
|
2018-01-01 18:25:37 +00:00
|
|
|
} // namespace Kernel
|