mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-19 22:09:57 +00:00
Merge pull request #11843 from liamwhite/sync-process
kernel: update KProcess
This commit is contained in:
commit
6aee148b17
39 changed files with 2013 additions and 1214 deletions
|
@ -86,9 +86,9 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
|
||||||
|
|
||||||
std::map<std::string, Symbols::Symbols> symbols;
|
std::map<std::string, Symbols::Symbols> symbols;
|
||||||
for (const auto& module : modules) {
|
for (const auto& module : modules) {
|
||||||
symbols.insert_or_assign(
|
symbols.insert_or_assign(module.second,
|
||||||
module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(),
|
Symbols::GetSymbols(module.first, system.ApplicationMemory(),
|
||||||
system.ApplicationProcess()->Is64BitProcess()));
|
system.ApplicationProcess()->Is64Bit()));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& entry : out) {
|
for (auto& entry : out) {
|
||||||
|
|
|
@ -309,17 +309,10 @@ struct System::Impl {
|
||||||
|
|
||||||
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
|
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
|
||||||
|
|
||||||
// Create a resource limit for the process.
|
|
||||||
const auto physical_memory_size =
|
|
||||||
kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
|
|
||||||
auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size);
|
|
||||||
|
|
||||||
// Create the process.
|
// Create the process.
|
||||||
auto main_process = Kernel::KProcess::Create(system.Kernel());
|
auto main_process = Kernel::KProcess::Create(system.Kernel());
|
||||||
ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
|
|
||||||
Kernel::KProcess::ProcessType::Userland, resource_limit)
|
|
||||||
.IsSuccess());
|
|
||||||
Kernel::KProcess::Register(system.Kernel(), main_process);
|
Kernel::KProcess::Register(system.Kernel(), main_process);
|
||||||
|
kernel.AppendNewProcess(main_process);
|
||||||
kernel.MakeApplicationProcess(main_process);
|
kernel.MakeApplicationProcess(main_process);
|
||||||
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
||||||
if (load_result != Loader::ResultStatus::Success) {
|
if (load_result != Loader::ResultStatus::Success) {
|
||||||
|
@ -418,6 +411,7 @@ struct System::Impl {
|
||||||
services->KillNVNFlinger();
|
services->KillNVNFlinger();
|
||||||
}
|
}
|
||||||
kernel.CloseServices();
|
kernel.CloseServices();
|
||||||
|
kernel.ShutdownCores();
|
||||||
services.reset();
|
services.reset();
|
||||||
service_manager.reset();
|
service_manager.reset();
|
||||||
cheat_engine.reset();
|
cheat_engine.reset();
|
||||||
|
@ -429,7 +423,6 @@ struct System::Impl {
|
||||||
gpu_core.reset();
|
gpu_core.reset();
|
||||||
host1x_core.reset();
|
host1x_core.reset();
|
||||||
perf_stats.reset();
|
perf_stats.reset();
|
||||||
kernel.ShutdownCores();
|
|
||||||
cpu_manager.Shutdown();
|
cpu_manager.Shutdown();
|
||||||
debugger.reset();
|
debugger.reset();
|
||||||
kernel.Shutdown();
|
kernel.Shutdown();
|
||||||
|
|
|
@ -258,20 +258,20 @@ private:
|
||||||
Kernel::KScopedSchedulerLock sl{system.Kernel()};
|
Kernel::KScopedSchedulerLock sl{system.Kernel()};
|
||||||
|
|
||||||
// Put all threads to sleep on next scheduler round.
|
// Put all threads to sleep on next scheduler round.
|
||||||
for (auto* thread : ThreadList()) {
|
for (auto& thread : ThreadList()) {
|
||||||
thread->RequestSuspend(Kernel::SuspendType::Debug);
|
thread.RequestSuspend(Kernel::SuspendType::Debug);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ResumeEmulation(Kernel::KThread* except = nullptr) {
|
void ResumeEmulation(Kernel::KThread* except = nullptr) {
|
||||||
// Wake up all threads.
|
// Wake up all threads.
|
||||||
for (auto* thread : ThreadList()) {
|
for (auto& thread : ThreadList()) {
|
||||||
if (thread == except) {
|
if (std::addressof(thread) == except) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
thread->SetStepState(Kernel::StepState::NotStepping);
|
thread.SetStepState(Kernel::StepState::NotStepping);
|
||||||
thread->Resume(Kernel::SuspendType::Debug);
|
thread.Resume(Kernel::SuspendType::Debug);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,13 +283,17 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
void UpdateActiveThread() {
|
void UpdateActiveThread() {
|
||||||
const auto& threads{ThreadList()};
|
auto& threads{ThreadList()};
|
||||||
if (std::find(threads.begin(), threads.end(), state->active_thread) == threads.end()) {
|
for (auto& thread : threads) {
|
||||||
state->active_thread = threads.front();
|
if (std::addressof(thread) == state->active_thread) {
|
||||||
|
// Thread is still alive, no need to update.
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
state->active_thread = std::addressof(threads.front());
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::list<Kernel::KThread*>& ThreadList() {
|
Kernel::KProcess::ThreadList& ThreadList() {
|
||||||
return system.ApplicationProcess()->GetThreadList();
|
return system.ApplicationProcess()->GetThreadList();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ static std::string EscapeXML(std::string_view data) {
|
||||||
|
|
||||||
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
|
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
|
||||||
: DebuggerFrontend(backend_), system{system_} {
|
: DebuggerFrontend(backend_), system{system_} {
|
||||||
if (system.ApplicationProcess()->Is64BitProcess()) {
|
if (system.ApplicationProcess()->Is64Bit()) {
|
||||||
arch = std::make_unique<GDBStubA64>();
|
arch = std::make_unique<GDBStubA64>();
|
||||||
} else {
|
} else {
|
||||||
arch = std::make_unique<GDBStubA32>();
|
arch = std::make_unique<GDBStubA32>();
|
||||||
|
@ -446,10 +446,10 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||||
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
|
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
|
||||||
|
|
||||||
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
|
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
|
||||||
const Kernel::KThread* thread) {
|
const Kernel::KThread& thread) {
|
||||||
// Read thread type from TLS
|
// Read thread type from TLS
|
||||||
const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
|
const VAddr tls_thread_type{memory.Read32(thread.GetTlsAddress() + 0x1fc)};
|
||||||
const VAddr argument_thread_type{thread->GetArgument()};
|
const VAddr argument_thread_type{thread.GetArgument()};
|
||||||
|
|
||||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||||
// Probably not created by nnsdk, no name available.
|
// Probably not created by nnsdk, no name available.
|
||||||
|
@ -477,10 +477,10 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
|
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
|
||||||
const Kernel::KThread* thread) {
|
const Kernel::KThread& thread) {
|
||||||
// Read thread type from TLS
|
// Read thread type from TLS
|
||||||
const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
|
const VAddr tls_thread_type{memory.Read64(thread.GetTlsAddress() + 0x1f8)};
|
||||||
const VAddr argument_thread_type{thread->GetArgument()};
|
const VAddr argument_thread_type{thread.GetArgument()};
|
||||||
|
|
||||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||||
// Probably not created by nnsdk, no name available.
|
// Probably not created by nnsdk, no name available.
|
||||||
|
@ -508,16 +508,16 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::optional<std::string> GetThreadName(Core::System& system,
|
static std::optional<std::string> GetThreadName(Core::System& system,
|
||||||
const Kernel::KThread* thread) {
|
const Kernel::KThread& thread) {
|
||||||
if (system.ApplicationProcess()->Is64BitProcess()) {
|
if (system.ApplicationProcess()->Is64Bit()) {
|
||||||
return GetNameFromThreadType64(system.ApplicationMemory(), thread);
|
return GetNameFromThreadType64(system.ApplicationMemory(), thread);
|
||||||
} else {
|
} else {
|
||||||
return GetNameFromThreadType32(system.ApplicationMemory(), thread);
|
return GetNameFromThreadType32(system.ApplicationMemory(), thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
|
static std::string_view GetThreadWaitReason(const Kernel::KThread& thread) {
|
||||||
switch (thread->GetWaitReasonForDebugging()) {
|
switch (thread.GetWaitReasonForDebugging()) {
|
||||||
case Kernel::ThreadWaitReasonForDebugging::Sleep:
|
case Kernel::ThreadWaitReasonForDebugging::Sleep:
|
||||||
return "Sleep";
|
return "Sleep";
|
||||||
case Kernel::ThreadWaitReasonForDebugging::IPC:
|
case Kernel::ThreadWaitReasonForDebugging::IPC:
|
||||||
|
@ -535,8 +535,8 @@ static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string GetThreadState(const Kernel::KThread* thread) {
|
static std::string GetThreadState(const Kernel::KThread& thread) {
|
||||||
switch (thread->GetState()) {
|
switch (thread.GetState()) {
|
||||||
case Kernel::ThreadState::Initialized:
|
case Kernel::ThreadState::Initialized:
|
||||||
return "Initialized";
|
return "Initialized";
|
||||||
case Kernel::ThreadState::Waiting:
|
case Kernel::ThreadState::Waiting:
|
||||||
|
@ -604,7 +604,7 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||||
std::vector<std::string> thread_ids;
|
std::vector<std::string> thread_ids;
|
||||||
for (const auto& thread : threads) {
|
for (const auto& thread : threads) {
|
||||||
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
|
thread_ids.push_back(fmt::format("{:x}", thread.GetThreadId()));
|
||||||
}
|
}
|
||||||
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
|
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
|
||||||
} else if (command.starts_with("sThreadInfo")) {
|
} else if (command.starts_with("sThreadInfo")) {
|
||||||
|
@ -616,14 +616,14 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||||
buffer += "<threads>";
|
buffer += "<threads>";
|
||||||
|
|
||||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||||
for (const auto* thread : threads) {
|
for (const auto& thread : threads) {
|
||||||
auto thread_name{GetThreadName(system, thread)};
|
auto thread_name{GetThreadName(system, thread)};
|
||||||
if (!thread_name) {
|
if (!thread_name) {
|
||||||
thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
|
thread_name = fmt::format("Thread {:d}", thread.GetThreadId());
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
|
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
|
||||||
thread->GetThreadId(), thread->GetActiveCore(),
|
thread.GetThreadId(), thread.GetActiveCore(),
|
||||||
EscapeXML(*thread_name), GetThreadState(thread));
|
EscapeXML(*thread_name), GetThreadState(thread));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -850,10 +850,10 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
|
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
|
||||||
const auto& threads{system.ApplicationProcess()->GetThreadList()};
|
auto& threads{system.ApplicationProcess()->GetThreadList()};
|
||||||
for (auto* thread : threads) {
|
for (auto& thread : threads) {
|
||||||
if (thread->GetThreadId() == thread_id) {
|
if (thread.GetThreadId() == thread_id) {
|
||||||
return thread;
|
return std::addressof(thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -104,16 +104,16 @@ Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
|
/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
|
||||||
// Allow use of cores 0~3 and thread priorities 1~63.
|
// Allow use of cores 0~3 and thread priorities 16~63.
|
||||||
constexpr u32 default_thread_info_capability = 0x30007F7;
|
constexpr u32 default_thread_info_capability = 0x30043F7;
|
||||||
|
|
||||||
ProgramMetadata result;
|
ProgramMetadata result;
|
||||||
|
|
||||||
result.LoadManual(
|
result.LoadManual(
|
||||||
true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
|
true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
|
||||||
0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/,
|
0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x100000 /*main_thread_stack_size*/,
|
||||||
0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/,
|
0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, 0 /*system_resource_size*/,
|
||||||
0x1FE00000 /*system_resource_size*/, {default_thread_info_capability} /*capabilities*/);
|
{default_thread_info_capability} /*capabilities*/);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,6 +73,9 @@ public:
|
||||||
u64 GetFilesystemPermissions() const;
|
u64 GetFilesystemPermissions() const;
|
||||||
u32 GetSystemResourceSize() const;
|
u32 GetSystemResourceSize() const;
|
||||||
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
|
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
|
||||||
|
const std::array<u8, 0x10>& GetName() const {
|
||||||
|
return npdm_header.application_name;
|
||||||
|
}
|
||||||
|
|
||||||
void Print() const;
|
void Print() const;
|
||||||
|
|
||||||
|
@ -164,14 +167,14 @@ private:
|
||||||
u32_le unk_size_2;
|
u32_le unk_size_2;
|
||||||
};
|
};
|
||||||
|
|
||||||
Header npdm_header;
|
Header npdm_header{};
|
||||||
AciHeader aci_header;
|
AciHeader aci_header{};
|
||||||
AcidHeader acid_header;
|
AcidHeader acid_header{};
|
||||||
|
|
||||||
FileAccessControl acid_file_access;
|
FileAccessControl acid_file_access{};
|
||||||
FileAccessHeader aci_file_access;
|
FileAccessHeader aci_file_access{};
|
||||||
|
|
||||||
KernelCapabilityDescriptors aci_kernel_capabilities;
|
KernelCapabilityDescriptors aci_kernel_capabilities{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace FileSys
|
} // namespace FileSys
|
||||||
|
|
|
@ -8,7 +8,11 @@
|
||||||
|
|
||||||
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
|
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
|
||||||
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
|
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
|
||||||
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
#include "core/hle/kernel/k_trace.h"
|
#include "core/hle/kernel/k_trace.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
|
||||||
namespace Kernel::Board::Nintendo::Nx {
|
namespace Kernel::Board::Nintendo::Nx {
|
||||||
|
|
||||||
|
@ -30,6 +34,8 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
|
||||||
constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
|
constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
|
||||||
RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
|
RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
|
||||||
|
|
||||||
|
constexpr const std::size_t SecureAlignment = 128_KiB;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
using namespace Common::Literals;
|
using namespace Common::Literals;
|
||||||
|
@ -183,4 +189,57 @@ u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
|
||||||
|
if (pool == static_cast<u32>(KMemoryManager::Pool::Applet)) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
// return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
|
||||||
|
u32 pool) {
|
||||||
|
// Applet secure memory is handled separately.
|
||||||
|
UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
|
||||||
|
|
||||||
|
// Ensure the size is aligned.
|
||||||
|
const size_t alignment =
|
||||||
|
(pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
|
||||||
|
R_UNLESS(Common::IsAligned(size, alignment), ResultInvalidSize);
|
||||||
|
|
||||||
|
// Allocate the memory.
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
const KPhysicalAddress paddr = kernel.MemoryManager().AllocateAndOpenContinuous(
|
||||||
|
num_pages, alignment / PageSize,
|
||||||
|
KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool),
|
||||||
|
KMemoryManager::Direction::FromFront));
|
||||||
|
R_UNLESS(paddr != 0, ResultOutOfMemory);
|
||||||
|
|
||||||
|
// Ensure we don't leak references to the memory on error.
|
||||||
|
ON_RESULT_FAILURE {
|
||||||
|
kernel.MemoryManager().Close(paddr, num_pages);
|
||||||
|
};
|
||||||
|
|
||||||
|
// We succeeded.
|
||||||
|
*out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
|
||||||
|
R_SUCCEED();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
|
||||||
|
u32 pool) {
|
||||||
|
// Applet secure memory is handled separately.
|
||||||
|
UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
|
||||||
|
|
||||||
|
// Ensure the size is aligned.
|
||||||
|
const size_t alignment =
|
||||||
|
(pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
|
||||||
|
ASSERT(Common::IsAligned(GetInteger(address), alignment));
|
||||||
|
ASSERT(Common::IsAligned(size, alignment));
|
||||||
|
|
||||||
|
// Close the secure region's pages.
|
||||||
|
kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
|
||||||
|
size / PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel::Board::Nintendo::Nx
|
} // namespace Kernel::Board::Nintendo::Nx
|
||||||
|
|
|
@ -4,6 +4,11 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "core/hle/kernel/k_typed_address.h"
|
#include "core/hle/kernel/k_typed_address.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
class KernelCore;
|
||||||
|
}
|
||||||
|
|
||||||
namespace Kernel::Board::Nintendo::Nx {
|
namespace Kernel::Board::Nintendo::Nx {
|
||||||
|
|
||||||
|
@ -25,8 +30,16 @@ public:
|
||||||
static std::size_t GetMinimumNonSecureSystemPoolSize();
|
static std::size_t GetMinimumNonSecureSystemPoolSize();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Randomness.
|
||||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||||
static u64 GenerateRandomU64();
|
static u64 GenerateRandomU64();
|
||||||
|
|
||||||
|
// Secure Memory.
|
||||||
|
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
||||||
|
static Result AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
|
||||||
|
u32 pool);
|
||||||
|
static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
|
||||||
|
u32 pool);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel::Board::Nintendo::Nx
|
} // namespace Kernel::Board::Nintendo::Nx
|
||||||
|
|
|
@ -200,8 +200,8 @@ private:
|
||||||
|
|
||||||
RawCapabilityValue raw;
|
RawCapabilityValue raw;
|
||||||
BitField<0, 15, CapabilityType> id;
|
BitField<0, 15, CapabilityType> id;
|
||||||
BitField<15, 4, u32> major_version;
|
BitField<15, 4, u32> minor_version;
|
||||||
BitField<19, 13, u32> minor_version;
|
BitField<19, 13, u32> major_version;
|
||||||
};
|
};
|
||||||
|
|
||||||
union HandleTable {
|
union HandleTable {
|
||||||
|
|
|
@ -107,12 +107,12 @@ KConditionVariable::KConditionVariable(Core::System& system)
|
||||||
|
|
||||||
KConditionVariable::~KConditionVariable() = default;
|
KConditionVariable::~KConditionVariable() = default;
|
||||||
|
|
||||||
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
|
Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress addr) {
|
||||||
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
|
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
||||||
|
|
||||||
// Signal the address.
|
// Signal the address.
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(m_kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// Remove waiter thread.
|
// Remove waiter thread.
|
||||||
bool has_waiters{};
|
bool has_waiters{};
|
||||||
|
@ -133,7 +133,7 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
|
||||||
|
|
||||||
// Write the value to userspace.
|
// Write the value to userspace.
|
||||||
Result result{ResultSuccess};
|
Result result{ResultSuccess};
|
||||||
if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
|
if (WriteToUser(kernel, addr, std::addressof(next_value))) [[likely]] {
|
||||||
result = ResultSuccess;
|
result = ResultSuccess;
|
||||||
} else {
|
} else {
|
||||||
result = ResultInvalidCurrentMemory;
|
result = ResultInvalidCurrentMemory;
|
||||||
|
@ -148,28 +148,28 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
|
Result KConditionVariable::WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
|
||||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
u32 value) {
|
||||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
|
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||||
|
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||||
|
|
||||||
// Wait for the address.
|
// Wait for the address.
|
||||||
KThread* owner_thread{};
|
KThread* owner_thread{};
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(m_kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
// Check if the thread should terminate.
|
||||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||||
|
|
||||||
// Read the tag from userspace.
|
// Read the tag from userspace.
|
||||||
u32 test_tag{};
|
u32 test_tag{};
|
||||||
R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
|
R_UNLESS(ReadFromUser(kernel, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
||||||
ResultInvalidCurrentMemory);
|
|
||||||
|
|
||||||
// If the tag isn't the handle (with wait mask), we're done.
|
// If the tag isn't the handle (with wait mask), we're done.
|
||||||
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
||||||
|
|
||||||
// Get the lock owner thread.
|
// Get the lock owner thread.
|
||||||
owner_thread = GetCurrentProcess(m_kernel)
|
owner_thread = GetCurrentProcess(kernel)
|
||||||
.GetHandleTable()
|
.GetHandleTable()
|
||||||
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
||||||
.ReleasePointerUnsafe();
|
.ReleasePointerUnsafe();
|
||||||
|
|
|
@ -24,11 +24,12 @@ public:
|
||||||
explicit KConditionVariable(Core::System& system);
|
explicit KConditionVariable(Core::System& system);
|
||||||
~KConditionVariable();
|
~KConditionVariable();
|
||||||
|
|
||||||
// Arbitration
|
// Arbitration.
|
||||||
Result SignalToAddress(KProcessAddress addr);
|
static Result SignalToAddress(KernelCore& kernel, KProcessAddress addr);
|
||||||
Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
|
static Result WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
|
||||||
|
u32 value);
|
||||||
|
|
||||||
// Condition variable
|
// Condition variable.
|
||||||
void Signal(u64 cv_key, s32 count);
|
void Signal(u64 cv_key, s32 count);
|
||||||
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
|
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Pin the current thread.
|
// Pin the current thread.
|
||||||
process->PinCurrentThread(core_id);
|
process->PinCurrentThread();
|
||||||
|
|
||||||
// Set the interrupt flag for the thread.
|
// Set the interrupt flag for the thread.
|
||||||
GetCurrentThread(kernel).SetInterruptFlag();
|
GetCurrentThread(kernel).SetInterruptFlag();
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include "core/hle/kernel/initial_process.h"
|
#include "core/hle/kernel/initial_process.h"
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
#include "core/hle/kernel/k_page_group.h"
|
#include "core/hle/kernel/k_page_group.h"
|
||||||
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
|
||||||
|
@ -168,11 +169,37 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
|
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
|
||||||
UNREACHABLE();
|
const u32 pool_index = static_cast<u32>(pool);
|
||||||
|
|
||||||
|
// Lock the pool.
|
||||||
|
KScopedLightLock lk(m_pool_locks[pool_index]);
|
||||||
|
|
||||||
|
// Check that we don't already have an optimized process.
|
||||||
|
R_UNLESS(!m_has_optimized_process[pool_index], ResultBusy);
|
||||||
|
|
||||||
|
// Set the optimized process id.
|
||||||
|
m_optimized_process_ids[pool_index] = process_id;
|
||||||
|
m_has_optimized_process[pool_index] = true;
|
||||||
|
|
||||||
|
// Clear the management area for the optimized process.
|
||||||
|
for (auto* manager = this->GetFirstManager(pool, Direction::FromFront); manager != nullptr;
|
||||||
|
manager = this->GetNextManager(manager, Direction::FromFront)) {
|
||||||
|
manager->InitializeOptimizedMemory(m_system.Kernel());
|
||||||
|
}
|
||||||
|
|
||||||
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
||||||
UNREACHABLE();
|
const u32 pool_index = static_cast<u32>(pool);
|
||||||
|
|
||||||
|
// Lock the pool.
|
||||||
|
KScopedLightLock lk(m_pool_locks[pool_index]);
|
||||||
|
|
||||||
|
// If the process was optimized, clear it.
|
||||||
|
if (m_has_optimized_process[pool_index] && m_optimized_process_ids[pool_index] == process_id) {
|
||||||
|
m_has_optimized_process[pool_index] = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
|
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
|
||||||
|
@ -207,7 +234,7 @@ KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, siz
|
||||||
|
|
||||||
// Maintain the optimized memory bitmap, if we should.
|
// Maintain the optimized memory bitmap, if we should.
|
||||||
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
|
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
|
||||||
UNIMPLEMENTED();
|
chosen_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block, num_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the first reference to the pages.
|
// Open the first reference to the pages.
|
||||||
|
@ -255,7 +282,8 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||||
|
|
||||||
// Maintain the optimized memory bitmap, if we should.
|
// Maintain the optimized memory bitmap, if we should.
|
||||||
if (unoptimized) {
|
if (unoptimized) {
|
||||||
UNIMPLEMENTED();
|
cur_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block,
|
||||||
|
pages_per_alloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
num_pages -= pages_per_alloc;
|
num_pages -= pages_per_alloc;
|
||||||
|
@ -358,8 +386,8 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
|
||||||
// Process part or all of the block.
|
// Process part or all of the block.
|
||||||
const size_t cur_pages =
|
const size_t cur_pages =
|
||||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||||
any_new =
|
any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
|
||||||
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
|
cur_pages, fill_pattern);
|
||||||
|
|
||||||
// Advance.
|
// Advance.
|
||||||
cur_address += cur_pages * PageSize;
|
cur_address += cur_pages * PageSize;
|
||||||
|
@ -382,7 +410,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
|
||||||
// Track some or all of the current pages.
|
// Track some or all of the current pages.
|
||||||
const size_t cur_pages =
|
const size_t cur_pages =
|
||||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||||
manager.TrackOptimizedAllocation(cur_address, cur_pages);
|
manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
|
||||||
|
|
||||||
// Advance.
|
// Advance.
|
||||||
cur_address += cur_pages * PageSize;
|
cur_address += cur_pages * PageSize;
|
||||||
|
@ -427,17 +455,86 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
|
||||||
return total_management_size;
|
return total_management_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
|
void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
|
||||||
UNREACHABLE();
|
auto optimize_pa =
|
||||||
|
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||||
|
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
|
std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
|
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
UNREACHABLE();
|
size_t num_pages) {
|
||||||
|
auto optimize_pa =
|
||||||
|
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||||
|
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
|
// Get the range we're tracking.
|
||||||
|
size_t offset = this->GetPageOffset(block);
|
||||||
|
const size_t last = offset + num_pages - 1;
|
||||||
|
|
||||||
|
// Track.
|
||||||
|
while (offset <= last) {
|
||||||
|
// Mark the page as not being optimized-allocated.
|
||||||
|
optimize_map[offset / Common::BitSize<u64>()] &=
|
||||||
|
~(u64(1) << (offset % Common::BitSize<u64>()));
|
||||||
|
|
||||||
|
offset++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
|
void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
u8 fill_pattern) {
|
size_t num_pages) {
|
||||||
UNREACHABLE();
|
auto optimize_pa =
|
||||||
|
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||||
|
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
|
// Get the range we're tracking.
|
||||||
|
size_t offset = this->GetPageOffset(block);
|
||||||
|
const size_t last = offset + num_pages - 1;
|
||||||
|
|
||||||
|
// Track.
|
||||||
|
while (offset <= last) {
|
||||||
|
// Mark the page as being optimized-allocated.
|
||||||
|
optimize_map[offset / Common::BitSize<u64>()] |=
|
||||||
|
(u64(1) << (offset % Common::BitSize<u64>()));
|
||||||
|
|
||||||
|
offset++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
|
size_t num_pages, u8 fill_pattern) {
|
||||||
|
auto& device_memory = kernel.System().DeviceMemory();
|
||||||
|
auto optimize_pa =
|
||||||
|
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||||
|
auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
|
||||||
|
|
||||||
|
// We want to return whether any pages were newly allocated.
|
||||||
|
bool any_new = false;
|
||||||
|
|
||||||
|
// Get the range we're processing.
|
||||||
|
size_t offset = this->GetPageOffset(block);
|
||||||
|
const size_t last = offset + num_pages - 1;
|
||||||
|
|
||||||
|
// Process.
|
||||||
|
while (offset <= last) {
|
||||||
|
// Check if the page has been optimized-allocated before.
|
||||||
|
if ((optimize_map[offset / Common::BitSize<u64>()] &
|
||||||
|
(u64(1) << (offset % Common::BitSize<u64>()))) == 0) {
|
||||||
|
// If not, it's new.
|
||||||
|
any_new = true;
|
||||||
|
|
||||||
|
// Fill the page.
|
||||||
|
auto* ptr = device_memory.GetPointer<u8>(m_heap.GetAddress());
|
||||||
|
std::memset(ptr + offset * PageSize, fill_pattern, PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
offset++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the number of pages we processed.
|
||||||
|
return any_new;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
||||||
|
|
|
@ -216,14 +216,14 @@ private:
|
||||||
m_heap.SetInitialUsedSize(reserved_size);
|
m_heap.SetInitialUsedSize(reserved_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeOptimizedMemory() {
|
void InitializeOptimizedMemory(KernelCore& kernel);
|
||||||
UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
void TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
size_t num_pages);
|
||||||
|
void TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages);
|
||||||
|
|
||||||
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
|
bool ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||||
|
size_t num_pages, u8 fill_pattern);
|
||||||
|
|
||||||
constexpr Pool GetPool() const {
|
constexpr Pool GetPool() const {
|
||||||
return m_pool;
|
return m_pool;
|
||||||
|
|
|
@ -82,14 +82,14 @@ public:
|
||||||
|
|
||||||
using namespace Common::Literals;
|
using namespace Common::Literals;
|
||||||
|
|
||||||
constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
|
constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
|
||||||
switch (as_type) {
|
switch (as_type) {
|
||||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
case Svc::CreateProcessFlag::AddressSpace32Bit:
|
||||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
|
||||||
return 32;
|
return 32;
|
||||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
|
||||||
return 36;
|
return 36;
|
||||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
case Svc::CreateProcessFlag::AddressSpace64Bit:
|
||||||
return 39;
|
return 39;
|
||||||
default:
|
default:
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
|
@ -105,7 +105,7 @@ KPageTable::KPageTable(Core::System& system_)
|
||||||
|
|
||||||
KPageTable::~KPageTable() = default;
|
KPageTable::~KPageTable() = default;
|
||||||
|
|
||||||
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
|
||||||
bool enable_das_merge, bool from_back,
|
bool enable_das_merge, bool from_back,
|
||||||
KMemoryManager::Pool pool, KProcessAddress code_addr,
|
KMemoryManager::Pool pool, KProcessAddress code_addr,
|
||||||
size_t code_size, KSystemResource* system_resource,
|
size_t code_size, KSystemResource* system_resource,
|
||||||
|
@ -133,7 +133,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
||||||
ASSERT(code_addr + code_size - 1 <= end - 1);
|
ASSERT(code_addr + code_size - 1 <= end - 1);
|
||||||
|
|
||||||
// Adjust heap/alias size if we don't have an alias region
|
// Adjust heap/alias size if we don't have an alias region
|
||||||
if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
|
if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
|
||||||
heap_region_size += alias_region_size;
|
heap_region_size += alias_region_size;
|
||||||
alias_region_size = 0;
|
alias_region_size = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ public:
|
||||||
explicit KPageTable(Core::System& system_);
|
explicit KPageTable(Core::System& system_);
|
||||||
~KPageTable();
|
~KPageTable();
|
||||||
|
|
||||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
|
||||||
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
|
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
|
||||||
KProcessAddress code_addr, size_t code_size,
|
KProcessAddress code_addr, size_t code_size,
|
||||||
KSystemResource* system_resource, KResourceLimit* resource_limit,
|
KSystemResource* system_resource, KResourceLimit* resource_limit,
|
||||||
|
@ -400,7 +400,7 @@ public:
|
||||||
constexpr size_t GetAliasCodeRegionSize() const {
|
constexpr size_t GetAliasCodeRegionSize() const {
|
||||||
return m_alias_code_region_end - m_alias_code_region_start;
|
return m_alias_code_region_end - m_alias_code_region_start;
|
||||||
}
|
}
|
||||||
size_t GetNormalMemorySize() {
|
size_t GetNormalMemorySize() const {
|
||||||
KScopedLightLock lk(m_general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
return GetHeapSize() + m_mapped_physical_memory_size;
|
return GetHeapSize() + m_mapped_physical_memory_size;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,59 +1,23 @@
|
||||||
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <array>
|
|
||||||
#include <cstddef>
|
|
||||||
#include <list>
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <string>
|
|
||||||
|
#include "core/hle/kernel/code_set.h"
|
||||||
#include "core/hle/kernel/k_address_arbiter.h"
|
#include "core/hle/kernel/k_address_arbiter.h"
|
||||||
#include "core/hle/kernel/k_auto_object.h"
|
#include "core/hle/kernel/k_capabilities.h"
|
||||||
#include "core/hle/kernel/k_condition_variable.h"
|
#include "core/hle/kernel/k_condition_variable.h"
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_page_table_manager.h"
|
||||||
|
#include "core/hle/kernel/k_system_resource.h"
|
||||||
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_local_page.h"
|
#include "core/hle/kernel/k_thread_local_page.h"
|
||||||
#include "core/hle/kernel/k_typed_address.h"
|
|
||||||
#include "core/hle/kernel/k_worker_task.h"
|
|
||||||
#include "core/hle/kernel/process_capability.h"
|
|
||||||
#include "core/hle/kernel/slab_helpers.h"
|
|
||||||
#include "core/hle/result.h"
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
namespace Memory {
|
|
||||||
class Memory;
|
|
||||||
};
|
|
||||||
|
|
||||||
class System;
|
|
||||||
} // namespace Core
|
|
||||||
|
|
||||||
namespace FileSys {
|
|
||||||
class ProgramMetadata;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KernelCore;
|
|
||||||
class KResourceLimit;
|
|
||||||
class KThread;
|
|
||||||
class KSharedMemoryInfo;
|
|
||||||
class TLSPage;
|
|
||||||
|
|
||||||
struct CodeSet;
|
|
||||||
|
|
||||||
enum class MemoryRegion : u16 {
|
|
||||||
APPLICATION = 1,
|
|
||||||
SYSTEM = 2,
|
|
||||||
BASE = 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class ProcessActivity : u32 {
|
|
||||||
Runnable,
|
|
||||||
Paused,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class DebugWatchpointType : u8 {
|
enum class DebugWatchpointType : u8 {
|
||||||
None = 0,
|
None = 0,
|
||||||
Read = 1 << 0,
|
Read = 1 << 0,
|
||||||
|
@ -72,9 +36,6 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
|
||||||
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
|
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit KProcess(KernelCore& kernel);
|
|
||||||
~KProcess() override;
|
|
||||||
|
|
||||||
enum class State {
|
enum class State {
|
||||||
Created = static_cast<u32>(Svc::ProcessState::Created),
|
Created = static_cast<u32>(Svc::ProcessState::Created),
|
||||||
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
|
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
|
||||||
|
@ -86,337 +47,83 @@ public:
|
||||||
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
|
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
|
||||||
};
|
};
|
||||||
|
|
||||||
enum : u64 {
|
using ThreadList = Common::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
|
||||||
/// Lowest allowed process ID for a kernel initial process.
|
|
||||||
InitialKIPIDMin = 1,
|
|
||||||
/// Highest allowed process ID for a kernel initial process.
|
|
||||||
InitialKIPIDMax = 80,
|
|
||||||
|
|
||||||
/// Lowest allowed process ID for a userland process.
|
static constexpr size_t AslrAlignment = 2_MiB;
|
||||||
ProcessIDMin = 81,
|
|
||||||
/// Highest allowed process ID for a userland process.
|
|
||||||
ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Used to determine how process IDs are assigned.
|
public:
|
||||||
enum class ProcessType {
|
static constexpr u64 InitialProcessIdMin = 1;
|
||||||
KernelInternal,
|
static constexpr u64 InitialProcessIdMax = 0x50;
|
||||||
Userland,
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
|
||||||
|
static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
|
||||||
static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
|
|
||||||
ProcessType type, KResourceLimit* res_limit);
|
|
||||||
|
|
||||||
/// Gets a reference to the process' page table.
|
|
||||||
KPageTable& GetPageTable() {
|
|
||||||
return m_page_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets const a reference to the process' page table.
|
|
||||||
const KPageTable& GetPageTable() const {
|
|
||||||
return m_page_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a reference to the process' handle table.
|
|
||||||
KHandleTable& GetHandleTable() {
|
|
||||||
return m_handle_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a const reference to the process' handle table.
|
|
||||||
const KHandleTable& GetHandleTable() const {
|
|
||||||
return m_handle_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a reference to process's memory.
|
|
||||||
Core::Memory::Memory& GetMemory() const;
|
|
||||||
|
|
||||||
Result SignalToAddress(KProcessAddress address) {
|
|
||||||
return m_condition_var.SignalToAddress(address);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
|
|
||||||
return m_condition_var.WaitForAddress(handle, address, tag);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SignalConditionVariable(u64 cv_key, int32_t count) {
|
|
||||||
return m_condition_var.Signal(cv_key, count);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
|
|
||||||
R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
|
|
||||||
s32 count) {
|
|
||||||
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
|
|
||||||
}
|
|
||||||
|
|
||||||
Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
|
|
||||||
s64 timeout) {
|
|
||||||
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
|
||||||
}
|
|
||||||
|
|
||||||
KProcessAddress GetProcessLocalRegionAddress() const {
|
|
||||||
return m_plr_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the current status of the process
|
|
||||||
State GetState() const {
|
|
||||||
return m_state;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the unique ID that identifies this particular process.
|
|
||||||
u64 GetProcessId() const {
|
|
||||||
return m_process_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the program ID corresponding to this process.
|
|
||||||
u64 GetProgramId() const {
|
|
||||||
return m_program_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
KProcessAddress GetEntryPoint() const {
|
|
||||||
return m_code_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the resource limit descriptor for this process
|
|
||||||
KResourceLimit* GetResourceLimit() const;
|
|
||||||
|
|
||||||
/// Gets the ideal CPU core ID for this process
|
|
||||||
u8 GetIdealCoreId() const {
|
|
||||||
return m_ideal_core;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the specified thread priority is valid.
|
|
||||||
bool CheckThreadPriority(s32 prio) const {
|
|
||||||
return ((1ULL << prio) & GetPriorityMask()) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the bitmask of allowed cores that this process' threads can run on.
|
|
||||||
u64 GetCoreMask() const {
|
|
||||||
return m_capabilities.GetCoreMask();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the bitmask of allowed thread priorities.
|
|
||||||
u64 GetPriorityMask() const {
|
|
||||||
return m_capabilities.GetPriorityMask();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the amount of secure memory to allocate for memory management.
|
|
||||||
u32 GetSystemResourceSize() const {
|
|
||||||
return m_system_resource_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the amount of secure memory currently in use for memory management.
|
|
||||||
u32 GetSystemResourceUsage() const {
|
|
||||||
// On hardware, this returns the amount of system resource memory that has
|
|
||||||
// been used by the kernel. This is problematic for Yuzu to emulate, because
|
|
||||||
// system resource memory is used for page tables -- and yuzu doesn't really
|
|
||||||
// have a way to calculate how much memory is required for page tables for
|
|
||||||
// the current process at any given time.
|
|
||||||
// TODO: Is this even worth implementing? Games may retrieve this value via
|
|
||||||
// an SDK function that gets used + available system resource size for debug
|
|
||||||
// or diagnostic purposes. However, it seems unlikely that a game would make
|
|
||||||
// decisions based on how much system memory is dedicated to its page tables.
|
|
||||||
// Is returning a value other than zero wise?
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether this process is an AArch64 or AArch32 process.
|
|
||||||
bool Is64BitProcess() const {
|
|
||||||
return m_is_64bit_process;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsSuspended() const {
|
|
||||||
return m_is_suspended;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetSuspended(bool suspended) {
|
|
||||||
m_is_suspended = suspended;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the total running time of the process instance in ticks.
|
|
||||||
u64 GetCPUTimeTicks() const {
|
|
||||||
return m_total_process_running_time_ticks;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updates the total running time, adding the given ticks to it.
|
|
||||||
void UpdateCPUTimeTicks(u64 ticks) {
|
|
||||||
m_total_process_running_time_ticks += ticks;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the process schedule count, used for thread yielding
|
|
||||||
s64 GetScheduledCount() const {
|
|
||||||
return m_schedule_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Increments the process schedule count, used for thread yielding.
|
|
||||||
void IncrementScheduledCount() {
|
|
||||||
++m_schedule_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void IncrementRunningThreadCount();
|
|
||||||
void DecrementRunningThreadCount();
|
|
||||||
|
|
||||||
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
|
|
||||||
m_running_threads[core] = thread;
|
|
||||||
m_running_thread_idle_counts[core] = idle_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClearRunningThread(KThread* thread) {
|
|
||||||
for (size_t i = 0; i < m_running_threads.size(); ++i) {
|
|
||||||
if (m_running_threads[i] == thread) {
|
|
||||||
m_running_threads[i] = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] KThread* GetRunningThread(s32 core) const {
|
|
||||||
return m_running_threads[core];
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReleaseUserException(KThread* thread);
|
|
||||||
|
|
||||||
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
|
||||||
return m_pinned_threads[core_id];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
|
||||||
u64 GetRandomEntropy(std::size_t index) const {
|
|
||||||
return m_random_entropy.at(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves the total physical memory available to this process in bytes.
|
|
||||||
u64 GetTotalPhysicalMemoryAvailable();
|
|
||||||
|
|
||||||
/// Retrieves the total physical memory available to this process in bytes,
|
|
||||||
/// without the size of the personal system resource heap added to it.
|
|
||||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
|
||||||
|
|
||||||
/// Retrieves the total physical memory used by this process in bytes.
|
|
||||||
u64 GetTotalPhysicalMemoryUsed();
|
|
||||||
|
|
||||||
/// Retrieves the total physical memory used by this process in bytes,
|
|
||||||
/// without the size of the personal system resource heap added to it.
|
|
||||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
|
||||||
|
|
||||||
/// Gets the list of all threads created with this process as their owner.
|
|
||||||
std::list<KThread*>& GetThreadList() {
|
|
||||||
return m_thread_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Registers a thread as being created under this process,
|
|
||||||
/// adding it to this process' thread list.
|
|
||||||
void RegisterThread(KThread* thread);
|
|
||||||
|
|
||||||
/// Unregisters a thread from this process, removing it
|
|
||||||
/// from this process' thread list.
|
|
||||||
void UnregisterThread(KThread* thread);
|
|
||||||
|
|
||||||
/// Retrieves the number of available threads for this process.
|
|
||||||
u64 GetFreeThreadCount() const;
|
|
||||||
|
|
||||||
/// Clears the signaled state of the process if and only if it's signaled.
|
|
||||||
///
|
|
||||||
/// @pre The process must not be already terminated. If this is called on a
|
|
||||||
/// terminated process, then ResultInvalidState will be returned.
|
|
||||||
///
|
|
||||||
/// @pre The process must be in a signaled state. If this is called on a
|
|
||||||
/// process instance that is not signaled, ResultInvalidState will be
|
|
||||||
/// returned.
|
|
||||||
Result Reset();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Loads process-specifics configuration info with metadata provided
|
|
||||||
* by an executable.
|
|
||||||
*
|
|
||||||
* @param metadata The provided metadata to load process specific info from.
|
|
||||||
*
|
|
||||||
* @returns ResultSuccess if all relevant metadata was able to be
|
|
||||||
* loaded and parsed. Otherwise, an error code is returned.
|
|
||||||
*/
|
|
||||||
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
|
||||||
bool is_hbl);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts the main application thread for this process.
|
|
||||||
*
|
|
||||||
* @param main_thread_priority The priority for the main thread.
|
|
||||||
* @param stack_size The stack size for the main thread in bytes.
|
|
||||||
*/
|
|
||||||
void Run(s32 main_thread_priority, u64 stack_size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Prepares a process for termination by stopping all of its threads
|
|
||||||
* and clearing any other resources.
|
|
||||||
*/
|
|
||||||
void PrepareForTermination();
|
|
||||||
|
|
||||||
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
|
|
||||||
|
|
||||||
bool IsInitialized() const override {
|
|
||||||
return m_is_initialized;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg) {}
|
|
||||||
|
|
||||||
void Finalize() override;
|
|
||||||
|
|
||||||
u64 GetId() const override {
|
|
||||||
return GetProcessId();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsHbl() const {
|
|
||||||
return m_is_hbl;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsSignaled() const override;
|
|
||||||
|
|
||||||
void DoWorkerTaskImpl();
|
|
||||||
|
|
||||||
Result SetActivity(ProcessActivity activity);
|
|
||||||
|
|
||||||
void PinCurrentThread(s32 core_id);
|
|
||||||
void UnpinCurrentThread(s32 core_id);
|
|
||||||
void UnpinThread(KThread* thread);
|
|
||||||
|
|
||||||
KLightLock& GetStateLock() {
|
|
||||||
return m_state_lock;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
|
||||||
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Thread-local storage management
|
|
||||||
|
|
||||||
// Marks the next available region as used and returns the address of the slot.
|
|
||||||
[[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
|
|
||||||
|
|
||||||
// Frees a used TLS slot identified by the given address
|
|
||||||
Result DeleteThreadLocalRegion(KProcessAddress addr);
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Debug watchpoint management
|
|
||||||
|
|
||||||
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
|
|
||||||
bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
|
||||||
|
|
||||||
// Attempts to remove the watchpoint specified by the given parameters.
|
|
||||||
bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
|
||||||
|
|
||||||
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
|
|
||||||
return m_watchpoints;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string& GetName() {
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
|
||||||
|
using TLPTree =
|
||||||
|
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||||
|
using TLPIterator = TLPTree::iterator;
|
||||||
|
|
||||||
|
private:
|
||||||
|
KPageTable m_page_table;
|
||||||
|
std::atomic<size_t> m_used_kernel_memory_size{};
|
||||||
|
TLPTree m_fully_used_tlp_tree{};
|
||||||
|
TLPTree m_partially_used_tlp_tree{};
|
||||||
|
s32 m_ideal_core_id{};
|
||||||
|
KResourceLimit* m_resource_limit{};
|
||||||
|
KSystemResource* m_system_resource{};
|
||||||
|
size_t m_memory_release_hint{};
|
||||||
|
State m_state{};
|
||||||
|
KLightLock m_state_lock;
|
||||||
|
KLightLock m_list_lock;
|
||||||
|
KConditionVariable m_cond_var;
|
||||||
|
KAddressArbiter m_address_arbiter;
|
||||||
|
std::array<u64, 4> m_entropy{};
|
||||||
|
bool m_is_signaled{};
|
||||||
|
bool m_is_initialized{};
|
||||||
|
bool m_is_application{};
|
||||||
|
bool m_is_default_application_system_resource{};
|
||||||
|
bool m_is_hbl{};
|
||||||
|
std::array<char, 13> m_name{};
|
||||||
|
std::atomic<u16> m_num_running_threads{};
|
||||||
|
Svc::CreateProcessFlag m_flags{};
|
||||||
|
KMemoryManager::Pool m_memory_pool{};
|
||||||
|
s64 m_schedule_count{};
|
||||||
|
KCapabilities m_capabilities{};
|
||||||
|
u64 m_program_id{};
|
||||||
|
u64 m_process_id{};
|
||||||
|
KProcessAddress m_code_address{};
|
||||||
|
size_t m_code_size{};
|
||||||
|
size_t m_main_thread_stack_size{};
|
||||||
|
size_t m_max_process_memory{};
|
||||||
|
u32 m_version{};
|
||||||
|
KHandleTable m_handle_table;
|
||||||
|
KProcessAddress m_plr_address{};
|
||||||
|
KThread* m_exception_thread{};
|
||||||
|
ThreadList m_thread_list{};
|
||||||
|
SharedMemoryInfoList m_shared_memory_list{};
|
||||||
|
bool m_is_suspended{};
|
||||||
|
bool m_is_immortal{};
|
||||||
|
bool m_is_handle_table_initialized{};
|
||||||
|
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
|
||||||
|
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
|
||||||
|
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_switch_counts{};
|
||||||
|
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
|
||||||
|
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
|
||||||
|
std::map<KProcessAddress, u64> m_debug_page_refcounts{};
|
||||||
|
std::atomic<s64> m_cpu_time{};
|
||||||
|
std::atomic<s64> m_num_process_switches{};
|
||||||
|
std::atomic<s64> m_num_thread_switches{};
|
||||||
|
std::atomic<s64> m_num_fpu_switches{};
|
||||||
|
std::atomic<s64> m_num_supervisor_calls{};
|
||||||
|
std::atomic<s64> m_num_ipc_messages{};
|
||||||
|
std::atomic<s64> m_num_ipc_replies{};
|
||||||
|
std::atomic<s64> m_num_ipc_receives{};
|
||||||
|
|
||||||
|
private:
|
||||||
|
Result StartTermination();
|
||||||
|
void FinishTermination();
|
||||||
|
|
||||||
void PinThread(s32 core_id, KThread* thread) {
|
void PinThread(s32 core_id, KThread* thread) {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
ASSERT(thread != nullptr);
|
ASSERT(thread != nullptr);
|
||||||
|
@ -431,6 +138,395 @@ private:
|
||||||
m_pinned_threads[core_id] = nullptr;
|
m_pinned_threads[core_id] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit KProcess(KernelCore& kernel);
|
||||||
|
~KProcess() override;
|
||||||
|
|
||||||
|
Result Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
|
||||||
|
bool is_real);
|
||||||
|
|
||||||
|
Result Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
|
||||||
|
std::span<const u32> caps, KResourceLimit* res_limit,
|
||||||
|
KMemoryManager::Pool pool, bool immortal);
|
||||||
|
Result Initialize(const Svc::CreateProcessParameter& params, std::span<const u32> user_caps,
|
||||||
|
KResourceLimit* res_limit, KMemoryManager::Pool pool);
|
||||||
|
void Exit();
|
||||||
|
|
||||||
|
const char* GetName() const {
|
||||||
|
return m_name.data();
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetProgramId() const {
|
||||||
|
return m_program_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetProcessId() const {
|
||||||
|
return m_process_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
State GetState() const {
|
||||||
|
return m_state;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetCoreMask() const {
|
||||||
|
return m_capabilities.GetCoreMask();
|
||||||
|
}
|
||||||
|
u64 GetPhysicalCoreMask() const {
|
||||||
|
return m_capabilities.GetPhysicalCoreMask();
|
||||||
|
}
|
||||||
|
u64 GetPriorityMask() const {
|
||||||
|
return m_capabilities.GetPriorityMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 GetIdealCoreId() const {
|
||||||
|
return m_ideal_core_id;
|
||||||
|
}
|
||||||
|
void SetIdealCoreId(s32 core_id) {
|
||||||
|
m_ideal_core_id = core_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CheckThreadPriority(s32 prio) const {
|
||||||
|
return ((1ULL << prio) & this->GetPriorityMask()) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetCreateProcessFlags() const {
|
||||||
|
return static_cast<u32>(m_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Is64Bit() const {
|
||||||
|
return True(m_flags & Svc::CreateProcessFlag::Is64Bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
KProcessAddress GetEntryPoint() const {
|
||||||
|
return m_code_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetMainStackSize() const {
|
||||||
|
return m_main_thread_stack_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
KMemoryManager::Pool GetMemoryPool() const {
|
||||||
|
return m_memory_pool;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetRandomEntropy(size_t i) const {
|
||||||
|
return m_entropy[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsApplication() const {
|
||||||
|
return m_is_application;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsDefaultApplicationSystemResource() const {
|
||||||
|
return m_is_default_application_system_resource;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsSuspended() const {
|
||||||
|
return m_is_suspended;
|
||||||
|
}
|
||||||
|
void SetSuspended(bool suspended) {
|
||||||
|
m_is_suspended = suspended;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result Terminate();
|
||||||
|
|
||||||
|
bool IsTerminated() const {
|
||||||
|
return m_state == State::Terminated;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsPermittedSvc(u32 svc_id) const {
|
||||||
|
return m_capabilities.IsPermittedSvc(svc_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsPermittedInterrupt(s32 interrupt_id) const {
|
||||||
|
return m_capabilities.IsPermittedInterrupt(interrupt_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsPermittedDebug() const {
|
||||||
|
return m_capabilities.IsPermittedDebug();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CanForceDebug() const {
|
||||||
|
return m_capabilities.CanForceDebug();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsHbl() const {
|
||||||
|
return m_is_hbl;
|
||||||
|
}
|
||||||
|
|
||||||
|
Kernel::KMemoryManager::Direction GetAllocateOption() const {
|
||||||
|
// TODO: property of the KPageTableBase
|
||||||
|
return KMemoryManager::Direction::FromFront;
|
||||||
|
}
|
||||||
|
|
||||||
|
ThreadList& GetThreadList() {
|
||||||
|
return m_thread_list;
|
||||||
|
}
|
||||||
|
const ThreadList& GetThreadList() const {
|
||||||
|
return m_thread_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool EnterUserException();
|
||||||
|
bool LeaveUserException();
|
||||||
|
bool ReleaseUserException(KThread* thread);
|
||||||
|
|
||||||
|
KThread* GetPinnedThread(s32 core_id) const {
|
||||||
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
|
return m_pinned_threads[core_id];
|
||||||
|
}
|
||||||
|
|
||||||
|
const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
|
||||||
|
return m_capabilities.GetSvcPermissions();
|
||||||
|
}
|
||||||
|
|
||||||
|
KResourceLimit* GetResourceLimit() const {
|
||||||
|
return m_resource_limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ReserveResource(Svc::LimitableResource which, s64 value);
|
||||||
|
bool ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout);
|
||||||
|
void ReleaseResource(Svc::LimitableResource which, s64 value);
|
||||||
|
void ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint);
|
||||||
|
|
||||||
|
KLightLock& GetStateLock() {
|
||||||
|
return m_state_lock;
|
||||||
|
}
|
||||||
|
KLightLock& GetListLock() {
|
||||||
|
return m_list_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
KPageTable& GetPageTable() {
|
||||||
|
return m_page_table;
|
||||||
|
}
|
||||||
|
const KPageTable& GetPageTable() const {
|
||||||
|
return m_page_table;
|
||||||
|
}
|
||||||
|
|
||||||
|
KHandleTable& GetHandleTable() {
|
||||||
|
return m_handle_table;
|
||||||
|
}
|
||||||
|
const KHandleTable& GetHandleTable() const {
|
||||||
|
return m_handle_table;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetUsedUserPhysicalMemorySize() const;
|
||||||
|
size_t GetTotalUserPhysicalMemorySize() const;
|
||||||
|
size_t GetUsedNonSystemUserPhysicalMemorySize() const;
|
||||||
|
size_t GetTotalNonSystemUserPhysicalMemorySize() const;
|
||||||
|
|
||||||
|
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
||||||
|
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result CreateThreadLocalRegion(KProcessAddress* out);
|
||||||
|
Result DeleteThreadLocalRegion(KProcessAddress addr);
|
||||||
|
|
||||||
|
KProcessAddress GetProcessLocalRegionAddress() const {
|
||||||
|
return m_plr_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
KThread* GetExceptionThread() const {
|
||||||
|
return m_exception_thread;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddCpuTime(s64 diff) {
|
||||||
|
m_cpu_time += diff;
|
||||||
|
}
|
||||||
|
s64 GetCpuTime() {
|
||||||
|
return m_cpu_time.load();
|
||||||
|
}
|
||||||
|
|
||||||
|
s64 GetScheduledCount() const {
|
||||||
|
return m_schedule_count;
|
||||||
|
}
|
||||||
|
void IncrementScheduledCount() {
|
||||||
|
++m_schedule_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void IncrementRunningThreadCount();
|
||||||
|
void DecrementRunningThreadCount();
|
||||||
|
|
||||||
|
size_t GetRequiredSecureMemorySizeNonDefault() const {
|
||||||
|
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
|
||||||
|
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||||
|
return secure_system_resource->CalculateRequiredSecureMemorySize();
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetRequiredSecureMemorySize() const {
|
||||||
|
if (m_system_resource->IsSecureResource()) {
|
||||||
|
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||||
|
return secure_system_resource->CalculateRequiredSecureMemorySize();
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetTotalSystemResourceSize() const {
|
||||||
|
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
|
||||||
|
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||||
|
return secure_system_resource->GetSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetUsedSystemResourceSize() const {
|
||||||
|
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
|
||||||
|
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||||
|
return secure_system_resource->GetUsedSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetRunningThread(s32 core, KThread* thread, u64 idle_count, u64 switch_count) {
|
||||||
|
m_running_threads[core] = thread;
|
||||||
|
m_running_thread_idle_counts[core] = idle_count;
|
||||||
|
m_running_thread_switch_counts[core] = switch_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearRunningThread(KThread* thread) {
|
||||||
|
for (size_t i = 0; i < m_running_threads.size(); ++i) {
|
||||||
|
if (m_running_threads[i] == thread) {
|
||||||
|
m_running_threads[i] = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const KSystemResource& GetSystemResource() const {
|
||||||
|
return *m_system_resource;
|
||||||
|
}
|
||||||
|
|
||||||
|
const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
|
||||||
|
return m_system_resource->GetMemoryBlockSlabManager();
|
||||||
|
}
|
||||||
|
const KBlockInfoManager& GetBlockInfoManager() const {
|
||||||
|
return m_system_resource->GetBlockInfoManager();
|
||||||
|
}
|
||||||
|
const KPageTableManager& GetPageTableManager() const {
|
||||||
|
return m_system_resource->GetPageTableManager();
|
||||||
|
}
|
||||||
|
|
||||||
|
KThread* GetRunningThread(s32 core) const {
|
||||||
|
return m_running_threads[core];
|
||||||
|
}
|
||||||
|
u64 GetRunningThreadIdleCount(s32 core) const {
|
||||||
|
return m_running_thread_idle_counts[core];
|
||||||
|
}
|
||||||
|
u64 GetRunningThreadSwitchCount(s32 core) const {
|
||||||
|
return m_running_thread_switch_counts[core];
|
||||||
|
}
|
||||||
|
|
||||||
|
void RegisterThread(KThread* thread);
|
||||||
|
void UnregisterThread(KThread* thread);
|
||||||
|
|
||||||
|
Result Run(s32 priority, size_t stack_size);
|
||||||
|
|
||||||
|
Result Reset();
|
||||||
|
|
||||||
|
void SetDebugBreak() {
|
||||||
|
if (m_state == State::RunningAttached) {
|
||||||
|
this->ChangeState(State::DebugBreak);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetAttached() {
|
||||||
|
if (m_state == State::DebugBreak) {
|
||||||
|
this->ChangeState(State::RunningAttached);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SetActivity(Svc::ProcessActivity activity);
|
||||||
|
|
||||||
|
void PinCurrentThread();
|
||||||
|
void UnpinCurrentThread();
|
||||||
|
void UnpinThread(KThread* thread);
|
||||||
|
|
||||||
|
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
|
||||||
|
return m_cond_var.Signal(cv_key, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
|
||||||
|
R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result SignalAddressArbiter(uintptr_t address, Svc::SignalType signal_type, s32 value,
|
||||||
|
s32 count) {
|
||||||
|
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result WaitAddressArbiter(uintptr_t address, Svc::ArbitrationType arb_type, s32 value,
|
||||||
|
s64 timeout) {
|
||||||
|
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||||
|
}
|
||||||
|
|
||||||
|
Result GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, s32 max_out_count);
|
||||||
|
|
||||||
|
static void Switch(KProcess* cur_process, KProcess* next_process);
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
|
||||||
|
bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
||||||
|
|
||||||
|
// Attempts to remove the watchpoint specified by the given parameters.
|
||||||
|
bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
||||||
|
|
||||||
|
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
|
||||||
|
return m_watchpoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
||||||
|
bool is_hbl);
|
||||||
|
|
||||||
|
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
|
||||||
|
|
||||||
|
Core::Memory::Memory& GetMemory() const;
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Overridden parent functions.
|
||||||
|
bool IsInitialized() const override {
|
||||||
|
return m_is_initialized;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void PostDestroy(uintptr_t arg) {}
|
||||||
|
|
||||||
|
void Finalize() override;
|
||||||
|
|
||||||
|
u64 GetIdImpl() const {
|
||||||
|
return this->GetProcessId();
|
||||||
|
}
|
||||||
|
u64 GetId() const override {
|
||||||
|
return this->GetIdImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool IsSignaled() const override {
|
||||||
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||||
|
return m_is_signaled;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DoWorkerTaskImpl();
|
||||||
|
|
||||||
|
private:
|
||||||
|
void ChangeState(State new_state) {
|
||||||
|
if (m_state != new_state) {
|
||||||
|
m_state = new_state;
|
||||||
|
m_is_signaled = true;
|
||||||
|
this->NotifyAvailable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result InitializeHandleTable(s32 size) {
|
||||||
|
// Try to initialize the handle table.
|
||||||
|
R_TRY(m_handle_table.Initialize(size));
|
||||||
|
|
||||||
|
// We succeeded, so note that we did.
|
||||||
|
m_is_handle_table_initialized = true;
|
||||||
|
R_SUCCEED();
|
||||||
|
}
|
||||||
|
|
||||||
void FinalizeHandleTable() {
|
void FinalizeHandleTable() {
|
||||||
// Finalize the table.
|
// Finalize the table.
|
||||||
m_handle_table.Finalize();
|
m_handle_table.Finalize();
|
||||||
|
@ -438,118 +534,6 @@ private:
|
||||||
// Note that the table is finalized.
|
// Note that the table is finalized.
|
||||||
m_is_handle_table_initialized = false;
|
m_is_handle_table_initialized = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChangeState(State new_state);
|
|
||||||
|
|
||||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
|
||||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
|
||||||
|
|
||||||
/// Memory manager for this process
|
|
||||||
KPageTable m_page_table;
|
|
||||||
|
|
||||||
/// Current status of the process
|
|
||||||
State m_state{};
|
|
||||||
|
|
||||||
/// The ID of this process
|
|
||||||
u64 m_process_id = 0;
|
|
||||||
|
|
||||||
/// Title ID corresponding to the process
|
|
||||||
u64 m_program_id = 0;
|
|
||||||
|
|
||||||
/// Specifies additional memory to be reserved for the process's memory management by the
|
|
||||||
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
|
|
||||||
/// instead of using the normal global page tables/memory block management.
|
|
||||||
u32 m_system_resource_size = 0;
|
|
||||||
|
|
||||||
/// Resource limit descriptor for this process
|
|
||||||
KResourceLimit* m_resource_limit{};
|
|
||||||
|
|
||||||
KVirtualAddress m_system_resource_address{};
|
|
||||||
|
|
||||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
|
||||||
u8 m_ideal_core = 0;
|
|
||||||
|
|
||||||
/// Contains the parsed process capability descriptors.
|
|
||||||
ProcessCapabilities m_capabilities;
|
|
||||||
|
|
||||||
/// Whether or not this process is AArch64, or AArch32.
|
|
||||||
/// By default, we currently assume this is true, unless otherwise
|
|
||||||
/// specified by metadata provided to the process during loading.
|
|
||||||
bool m_is_64bit_process = true;
|
|
||||||
|
|
||||||
/// Total running time for the process in ticks.
|
|
||||||
std::atomic<u64> m_total_process_running_time_ticks = 0;
|
|
||||||
|
|
||||||
/// Per-process handle table for storing created object handles in.
|
|
||||||
KHandleTable m_handle_table;
|
|
||||||
|
|
||||||
/// Per-process address arbiter.
|
|
||||||
KAddressArbiter m_address_arbiter;
|
|
||||||
|
|
||||||
/// The per-process mutex lock instance used for handling various
|
|
||||||
/// forms of services, such as lock arbitration, and condition
|
|
||||||
/// variable related facilities.
|
|
||||||
KConditionVariable m_condition_var;
|
|
||||||
|
|
||||||
/// Address indicating the location of the process' dedicated TLS region.
|
|
||||||
KProcessAddress m_plr_address = 0;
|
|
||||||
|
|
||||||
/// Address indicating the location of the process's entry point.
|
|
||||||
KProcessAddress m_code_address = 0;
|
|
||||||
|
|
||||||
/// Random values for svcGetInfo RandomEntropy
|
|
||||||
std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
|
|
||||||
|
|
||||||
/// List of threads that are running with this process as their owner.
|
|
||||||
std::list<KThread*> m_thread_list;
|
|
||||||
|
|
||||||
/// List of shared memory that are running with this process as their owner.
|
|
||||||
std::list<KSharedMemoryInfo*> m_shared_memory_list;
|
|
||||||
|
|
||||||
/// Address of the top of the main thread's stack
|
|
||||||
KProcessAddress m_main_thread_stack_top{};
|
|
||||||
|
|
||||||
/// Size of the main thread's stack
|
|
||||||
std::size_t m_main_thread_stack_size{};
|
|
||||||
|
|
||||||
/// Memory usage capacity for the process
|
|
||||||
std::size_t m_memory_usage_capacity{};
|
|
||||||
|
|
||||||
/// Process total image size
|
|
||||||
std::size_t m_image_size{};
|
|
||||||
|
|
||||||
/// Schedule count of this process
|
|
||||||
s64 m_schedule_count{};
|
|
||||||
|
|
||||||
size_t m_memory_release_hint{};
|
|
||||||
|
|
||||||
std::string name{};
|
|
||||||
|
|
||||||
bool m_is_signaled{};
|
|
||||||
bool m_is_suspended{};
|
|
||||||
bool m_is_immortal{};
|
|
||||||
bool m_is_handle_table_initialized{};
|
|
||||||
bool m_is_initialized{};
|
|
||||||
bool m_is_hbl{};
|
|
||||||
|
|
||||||
std::atomic<u16> m_num_running_threads{};
|
|
||||||
|
|
||||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
|
|
||||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
|
|
||||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
|
|
||||||
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
|
|
||||||
std::map<KProcessAddress, u64> m_debug_page_refcounts;
|
|
||||||
|
|
||||||
KThread* m_exception_thread{};
|
|
||||||
|
|
||||||
KLightLock m_state_lock;
|
|
||||||
KLightLock m_list_lock;
|
|
||||||
|
|
||||||
using TLPTree =
|
|
||||||
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
|
||||||
using TLPIterator = TLPTree::iterator;
|
|
||||||
TLPTree m_fully_used_tlp_tree;
|
|
||||||
TLPTree m_partially_used_tlp_tree;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -190,7 +190,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||||
if (m_state.should_count_idle) {
|
if (m_state.should_count_idle) {
|
||||||
if (highest_thread != nullptr) [[likely]] {
|
if (highest_thread != nullptr) [[likely]] {
|
||||||
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
||||||
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
|
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
m_state.idle_count++;
|
m_state.idle_count++;
|
||||||
|
@ -356,7 +356,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
|
||||||
const s64 tick_diff = cur_tick - prev_tick;
|
const s64 tick_diff = cur_tick - prev_tick;
|
||||||
cur_thread->AddCpuTime(m_core_id, tick_diff);
|
cur_thread->AddCpuTime(m_core_id, tick_diff);
|
||||||
if (cur_process != nullptr) {
|
if (cur_process != nullptr) {
|
||||||
cur_process->UpdateCPUTimeTicks(tick_diff);
|
cur_process->AddCpuTime(tick_diff);
|
||||||
}
|
}
|
||||||
m_last_context_switch_time = cur_tick;
|
m_last_context_switch_time = cur_tick;
|
||||||
|
|
||||||
|
|
|
@ -1,25 +1,100 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||||
#include "core/hle/kernel/k_system_resource.h"
|
#include "core/hle/kernel/k_system_resource.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
|
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
|
||||||
KMemoryManager::Pool pool) {
|
KMemoryManager::Pool pool) {
|
||||||
// Unimplemented
|
// Set members.
|
||||||
UNREACHABLE();
|
m_resource_limit = resource_limit;
|
||||||
|
m_resource_size = size;
|
||||||
|
m_resource_pool = pool;
|
||||||
|
|
||||||
|
// Determine required size for our secure resource.
|
||||||
|
const size_t secure_size = this->CalculateRequiredSecureMemorySize();
|
||||||
|
|
||||||
|
// Reserve memory for our secure resource.
|
||||||
|
KScopedResourceReservation memory_reservation(
|
||||||
|
m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, secure_size);
|
||||||
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||||
|
|
||||||
|
// Allocate secure memory.
|
||||||
|
R_TRY(KSystemControl::AllocateSecureMemory(m_kernel, std::addressof(m_resource_address),
|
||||||
|
m_resource_size, static_cast<u32>(m_resource_pool)));
|
||||||
|
ASSERT(m_resource_address != 0);
|
||||||
|
|
||||||
|
// Ensure we clean up the secure memory, if we fail past this point.
|
||||||
|
ON_RESULT_FAILURE {
|
||||||
|
KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
|
||||||
|
static_cast<u32>(m_resource_pool));
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check that our allocation is bigger than the reference counts needed for it.
|
||||||
|
const size_t rc_size =
|
||||||
|
Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
|
||||||
|
R_UNLESS(m_resource_size > rc_size, ResultOutOfMemory);
|
||||||
|
|
||||||
|
// Get resource pointer.
|
||||||
|
KPhysicalAddress resource_paddr =
|
||||||
|
KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
|
||||||
|
auto* resource =
|
||||||
|
m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
|
||||||
|
|
||||||
|
// Initialize slab heaps.
|
||||||
|
m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size,
|
||||||
|
PageSize);
|
||||||
|
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, resource);
|
||||||
|
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||||
|
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||||
|
|
||||||
|
// Initialize managers.
|
||||||
|
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager),
|
||||||
|
std::addressof(m_page_table_heap));
|
||||||
|
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager),
|
||||||
|
std::addressof(m_memory_block_heap));
|
||||||
|
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager),
|
||||||
|
std::addressof(m_block_info_heap));
|
||||||
|
|
||||||
|
// Set our managers.
|
||||||
|
this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
|
||||||
|
|
||||||
|
// Commit the memory reservation.
|
||||||
|
memory_reservation.Commit();
|
||||||
|
|
||||||
|
// Open reference to our resource limit.
|
||||||
|
m_resource_limit->Open();
|
||||||
|
|
||||||
|
// Set ourselves as initialized.
|
||||||
|
m_is_initialized = true;
|
||||||
|
|
||||||
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSecureSystemResource::Finalize() {
|
void KSecureSystemResource::Finalize() {
|
||||||
// Unimplemented
|
// Check that we have no outstanding allocations.
|
||||||
UNREACHABLE();
|
ASSERT(m_memory_block_slab_manager.GetUsed() == 0);
|
||||||
|
ASSERT(m_block_info_manager.GetUsed() == 0);
|
||||||
|
ASSERT(m_page_table_manager.GetUsed() == 0);
|
||||||
|
|
||||||
|
// Free our secure memory.
|
||||||
|
KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
|
||||||
|
static_cast<u32>(m_resource_pool));
|
||||||
|
|
||||||
|
// Release the memory reservation.
|
||||||
|
m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
|
||||||
|
this->CalculateRequiredSecureMemorySize());
|
||||||
|
|
||||||
|
// Close reference to our resource limit.
|
||||||
|
m_resource_limit->Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
|
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
|
||||||
KMemoryManager::Pool pool) {
|
KMemoryManager::Pool pool) {
|
||||||
// Unimplemented
|
return KSystemControl::CalculateRequiredSecureMemorySize(size, static_cast<u32>(pool));
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -122,16 +122,15 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
|
||||||
case ThreadType::Main:
|
case ThreadType::Main:
|
||||||
ASSERT(arg == 0);
|
ASSERT(arg == 0);
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType::HighPriority:
|
|
||||||
[[fallthrough]];
|
|
||||||
case ThreadType::Dummy:
|
|
||||||
[[fallthrough]];
|
|
||||||
case ThreadType::User:
|
case ThreadType::User:
|
||||||
ASSERT(((owner == nullptr) ||
|
ASSERT(((owner == nullptr) ||
|
||||||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
||||||
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
|
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
|
||||||
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
||||||
break;
|
break;
|
||||||
|
case ThreadType::HighPriority:
|
||||||
|
case ThreadType::Dummy:
|
||||||
|
break;
|
||||||
case ThreadType::Kernel:
|
case ThreadType::Kernel:
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
break;
|
break;
|
||||||
|
@ -216,6 +215,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
|
||||||
// Setup the TLS, if needed.
|
// Setup the TLS, if needed.
|
||||||
if (type == ThreadType::User) {
|
if (type == ThreadType::User) {
|
||||||
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
|
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
|
||||||
|
owner->GetMemory().ZeroBlock(m_tls_address, Svc::ThreadLocalRegionSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
m_parent = owner;
|
m_parent = owner;
|
||||||
|
@ -403,7 +403,7 @@ void KThread::StartTermination() {
|
||||||
if (m_parent != nullptr) {
|
if (m_parent != nullptr) {
|
||||||
m_parent->ReleaseUserException(this);
|
m_parent->ReleaseUserException(this);
|
||||||
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
|
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
|
||||||
m_parent->UnpinCurrentThread(m_core_id);
|
m_parent->UnpinCurrentThread();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,10 +415,6 @@ void KThread::StartTermination() {
|
||||||
m_parent->ClearRunningThread(this);
|
m_parent->ClearRunningThread(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Signal.
|
|
||||||
m_signaled = true;
|
|
||||||
KSynchronizationObject::NotifyAvailable();
|
|
||||||
|
|
||||||
// Clear previous thread in KScheduler.
|
// Clear previous thread in KScheduler.
|
||||||
KScheduler::ClearPreviousThread(m_kernel, this);
|
KScheduler::ClearPreviousThread(m_kernel, this);
|
||||||
|
|
||||||
|
@ -437,6 +433,13 @@ void KThread::FinishTermination() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Acquire the scheduler lock.
|
||||||
|
KScopedSchedulerLock sl{m_kernel};
|
||||||
|
|
||||||
|
// Signal.
|
||||||
|
m_signaled = true;
|
||||||
|
KSynchronizationObject::NotifyAvailable();
|
||||||
|
|
||||||
// Close the thread.
|
// Close the thread.
|
||||||
this->Close();
|
this->Close();
|
||||||
}
|
}
|
||||||
|
@ -820,7 +823,7 @@ void KThread::CloneFpuStatus() {
|
||||||
ASSERT(this->GetOwnerProcess() != nullptr);
|
ASSERT(this->GetOwnerProcess() != nullptr);
|
||||||
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
|
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
|
||||||
|
|
||||||
if (this->GetOwnerProcess()->Is64BitProcess()) {
|
if (this->GetOwnerProcess()->Is64Bit()) {
|
||||||
// Clone FPSR and FPCR.
|
// Clone FPSR and FPCR.
|
||||||
ThreadContext64 cur_ctx{};
|
ThreadContext64 cur_ctx{};
|
||||||
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
|
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
|
||||||
|
@ -923,7 +926,7 @@ Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
|
||||||
|
|
||||||
// If we're not terminating, get the thread's user context.
|
// If we're not terminating, get the thread's user context.
|
||||||
if (!this->IsTerminationRequested()) {
|
if (!this->IsTerminationRequested()) {
|
||||||
if (m_parent->Is64BitProcess()) {
|
if (m_parent->Is64Bit()) {
|
||||||
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
||||||
auto context = GetContext64();
|
auto context = GetContext64();
|
||||||
context.pstate &= 0xFF0FFE20;
|
context.pstate &= 0xFF0FFE20;
|
||||||
|
@ -1174,6 +1177,9 @@ Result KThread::Run() {
|
||||||
owner->IncrementRunningThreadCount();
|
owner->IncrementRunningThreadCount();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Open a reference, now that we're running.
|
||||||
|
this->Open();
|
||||||
|
|
||||||
// Set our state and finish.
|
// Set our state and finish.
|
||||||
this->SetState(ThreadState::Runnable);
|
this->SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
|
|
|
@ -721,6 +721,7 @@ private:
|
||||||
// For core KThread implementation
|
// For core KThread implementation
|
||||||
ThreadContext32 m_thread_context_32{};
|
ThreadContext32 m_thread_context_32{};
|
||||||
ThreadContext64 m_thread_context_64{};
|
ThreadContext64 m_thread_context_64{};
|
||||||
|
Common::IntrusiveListNode m_process_list_node;
|
||||||
Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
|
Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
|
||||||
s32 m_priority{};
|
s32 m_priority{};
|
||||||
using ConditionVariableThreadTreeTraits =
|
using ConditionVariableThreadTreeTraits =
|
||||||
|
|
|
@ -101,35 +101,31 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void InitializeCores() {
|
void InitializeCores() {
|
||||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
cores[core_id]->Initialize((*application_process).Is64BitProcess());
|
cores[core_id]->Initialize((*application_process).Is64Bit());
|
||||||
system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
|
system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CloseApplicationProcess() {
|
void TerminateApplicationProcess() {
|
||||||
KProcess* old_process = application_process.exchange(nullptr);
|
application_process.load()->Terminate();
|
||||||
if (old_process == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// old_process->Close();
|
|
||||||
// TODO: The process should be destroyed based on accurate ref counting after
|
|
||||||
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
|
|
||||||
old_process->Finalize();
|
|
||||||
old_process->Destroy();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Shutdown() {
|
void Shutdown() {
|
||||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||||
|
|
||||||
process_list.clear();
|
|
||||||
|
|
||||||
CloseServices();
|
CloseServices();
|
||||||
|
|
||||||
|
auto* old_process = application_process.exchange(nullptr);
|
||||||
|
if (old_process) {
|
||||||
|
old_process->Close();
|
||||||
|
}
|
||||||
|
|
||||||
|
process_list.clear();
|
||||||
|
|
||||||
next_object_id = 0;
|
next_object_id = 0;
|
||||||
next_kernel_process_id = KProcess::InitialKIPIDMin;
|
next_kernel_process_id = KProcess::InitialProcessIdMin;
|
||||||
next_user_process_id = KProcess::ProcessIDMin;
|
next_user_process_id = KProcess::ProcessIdMin;
|
||||||
next_thread_id = 1;
|
next_thread_id = 1;
|
||||||
|
|
||||||
global_handle_table->Finalize();
|
global_handle_table->Finalize();
|
||||||
|
@ -176,8 +172,6 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CloseApplicationProcess();
|
|
||||||
|
|
||||||
// Track kernel objects that were not freed on shutdown
|
// Track kernel objects that were not freed on shutdown
|
||||||
{
|
{
|
||||||
std::scoped_lock lk{registered_objects_lock};
|
std::scoped_lock lk{registered_objects_lock};
|
||||||
|
@ -344,6 +338,8 @@ struct KernelCore::Impl {
|
||||||
// Create the system page table managers.
|
// Create the system page table managers.
|
||||||
app_system_resource = std::make_unique<KSystemResource>(kernel);
|
app_system_resource = std::make_unique<KSystemResource>(kernel);
|
||||||
sys_system_resource = std::make_unique<KSystemResource>(kernel);
|
sys_system_resource = std::make_unique<KSystemResource>(kernel);
|
||||||
|
KAutoObject::Create(std::addressof(*app_system_resource));
|
||||||
|
KAutoObject::Create(std::addressof(*sys_system_resource));
|
||||||
|
|
||||||
// Set the managers for the system resources.
|
// Set the managers for the system resources.
|
||||||
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
|
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
|
||||||
|
@ -792,8 +788,8 @@ struct KernelCore::Impl {
|
||||||
std::mutex registered_in_use_objects_lock;
|
std::mutex registered_in_use_objects_lock;
|
||||||
|
|
||||||
std::atomic<u32> next_object_id{0};
|
std::atomic<u32> next_object_id{0};
|
||||||
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
|
std::atomic<u64> next_kernel_process_id{KProcess::InitialProcessIdMin};
|
||||||
std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
|
std::atomic<u64> next_user_process_id{KProcess::ProcessIdMin};
|
||||||
std::atomic<u64> next_thread_id{1};
|
std::atomic<u64> next_thread_id{1};
|
||||||
|
|
||||||
// Lists all processes that exist in the current session.
|
// Lists all processes that exist in the current session.
|
||||||
|
@ -924,10 +920,6 @@ const KProcess* KernelCore::ApplicationProcess() const {
|
||||||
return impl->application_process;
|
return impl->application_process;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::CloseApplicationProcess() {
|
|
||||||
impl->CloseApplicationProcess();
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
||||||
return impl->process_list;
|
return impl->process_list;
|
||||||
}
|
}
|
||||||
|
@ -1128,8 +1120,8 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
|
||||||
std::function<void()> func) {
|
std::function<void()> func) {
|
||||||
// Make a new process.
|
// Make a new process.
|
||||||
KProcess* process = KProcess::Create(*this);
|
KProcess* process = KProcess::Create(*this);
|
||||||
ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
|
ASSERT(R_SUCCEEDED(
|
||||||
GetSystemResourceLimit())));
|
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||||
|
|
||||||
// Ensure that we don't hold onto any extra references.
|
// Ensure that we don't hold onto any extra references.
|
||||||
SCOPE_EXIT({ process->Close(); });
|
SCOPE_EXIT({ process->Close(); });
|
||||||
|
@ -1156,8 +1148,8 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
|
||||||
|
|
||||||
// Make a new process.
|
// Make a new process.
|
||||||
KProcess* process = KProcess::Create(*this);
|
KProcess* process = KProcess::Create(*this);
|
||||||
ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
|
ASSERT(R_SUCCEEDED(
|
||||||
GetSystemResourceLimit())));
|
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||||
|
|
||||||
// Ensure that we don't hold onto any extra references.
|
// Ensure that we don't hold onto any extra references.
|
||||||
SCOPE_EXIT({ process->Close(); });
|
SCOPE_EXIT({ process->Close(); });
|
||||||
|
@ -1266,7 +1258,8 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
|
||||||
|
|
||||||
void KernelCore::SuspendApplication(bool suspended) {
|
void KernelCore::SuspendApplication(bool suspended) {
|
||||||
const bool should_suspend{exception_exited || suspended};
|
const bool should_suspend{exception_exited || suspended};
|
||||||
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
|
const auto activity =
|
||||||
|
should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
|
||||||
|
|
||||||
// Get the application process.
|
// Get the application process.
|
||||||
KScopedAutoObject<KProcess> process = ApplicationProcess();
|
KScopedAutoObject<KProcess> process = ApplicationProcess();
|
||||||
|
@ -1300,6 +1293,8 @@ void KernelCore::SuspendApplication(bool suspended) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::ShutdownCores() {
|
void KernelCore::ShutdownCores() {
|
||||||
|
impl->TerminateApplicationProcess();
|
||||||
|
|
||||||
KScopedSchedulerLock lk{*this};
|
KScopedSchedulerLock lk{*this};
|
||||||
|
|
||||||
for (auto* thread : impl->shutdown_threads) {
|
for (auto* thread : impl->shutdown_threads) {
|
||||||
|
|
|
@ -134,9 +134,6 @@ public:
|
||||||
/// Retrieves a const pointer to the application process.
|
/// Retrieves a const pointer to the application process.
|
||||||
const KProcess* ApplicationProcess() const;
|
const KProcess* ApplicationProcess() const;
|
||||||
|
|
||||||
/// Closes the application process.
|
|
||||||
void CloseApplicationProcess();
|
|
||||||
|
|
||||||
/// Retrieves the list of processes.
|
/// Retrieves the list of processes.
|
||||||
const std::vector<KProcess*>& GetProcessList() const;
|
const std::vector<KProcess*>& GetProcessList() const;
|
||||||
|
|
||||||
|
|
|
@ -4426,7 +4426,7 @@ void Call(Core::System& system, u32 imm) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
kernel.EnterSVCProfile();
|
kernel.EnterSVCProfile();
|
||||||
|
|
||||||
if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
|
if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
|
||||||
Call64(system, imm);
|
Call64(system, imm);
|
||||||
} else {
|
} else {
|
||||||
Call32(system, imm);
|
Call32(system, imm);
|
||||||
|
|
|
@ -86,20 +86,19 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::TotalMemorySize:
|
case InfoType::TotalMemorySize:
|
||||||
*result = process->GetTotalPhysicalMemoryAvailable();
|
*result = process->GetTotalUserPhysicalMemorySize();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::UsedMemorySize:
|
case InfoType::UsedMemorySize:
|
||||||
*result = process->GetTotalPhysicalMemoryUsed();
|
*result = process->GetUsedUserPhysicalMemorySize();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::SystemResourceSizeTotal:
|
case InfoType::SystemResourceSizeTotal:
|
||||||
*result = process->GetSystemResourceSize();
|
*result = process->GetTotalSystemResourceSize();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::SystemResourceSizeUsed:
|
case InfoType::SystemResourceSizeUsed:
|
||||||
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
|
*result = process->GetUsedSystemResourceSize();
|
||||||
*result = process->GetSystemResourceUsage();
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::ProgramId:
|
case InfoType::ProgramId:
|
||||||
|
@ -111,20 +110,29 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::TotalNonSystemMemorySize:
|
case InfoType::TotalNonSystemMemorySize:
|
||||||
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
*result = process->GetTotalNonSystemUserPhysicalMemorySize();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::UsedNonSystemMemorySize:
|
case InfoType::UsedNonSystemMemorySize:
|
||||||
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
*result = process->GetUsedNonSystemUserPhysicalMemorySize();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::IsApplication:
|
case InfoType::IsApplication:
|
||||||
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
|
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
|
||||||
*result = true;
|
*result = process->IsApplication();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
case InfoType::FreeThreadCount:
|
case InfoType::FreeThreadCount:
|
||||||
*result = process->GetFreeThreadCount();
|
if (KResourceLimit* resource_limit = process->GetResourceLimit();
|
||||||
|
resource_limit != nullptr) {
|
||||||
|
const auto current_value =
|
||||||
|
resource_limit->GetCurrentValue(Svc::LimitableResource::ThreadCountMax);
|
||||||
|
const auto limit_value =
|
||||||
|
resource_limit->GetLimitValue(Svc::LimitableResource::ThreadCountMax);
|
||||||
|
*result = limit_value - current_value;
|
||||||
|
} else {
|
||||||
|
*result = 0;
|
||||||
|
}
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -161,7 +169,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||||
|
|
||||||
case InfoType::RandomEntropy:
|
case InfoType::RandomEntropy:
|
||||||
R_UNLESS(handle == 0, ResultInvalidHandle);
|
R_UNLESS(handle == 0, ResultInvalidHandle);
|
||||||
R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
|
R_UNLESS(info_sub_id < 4, ResultInvalidCombination);
|
||||||
|
|
||||||
*result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
|
*result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
|
|
@ -17,7 +17,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u3
|
||||||
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
|
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
|
||||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
|
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
|
||||||
|
|
||||||
R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
|
R_RETURN(KConditionVariable::WaitForAddress(system.Kernel(), thread_handle, address, tag));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unlock a mutex
|
/// Unlock a mutex
|
||||||
|
@ -28,7 +28,7 @@ Result ArbitrateUnlock(Core::System& system, u64 address) {
|
||||||
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
|
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
|
||||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
|
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
|
||||||
|
|
||||||
R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
|
R_RETURN(KConditionVariable::SignalToAddress(system.Kernel(), address));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
|
Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
|
||||||
|
|
|
@ -46,7 +46,7 @@ Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
|
||||||
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
|
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
|
||||||
auto& page_table{current_process->GetPageTable()};
|
auto& page_table{current_process->GetPageTable()};
|
||||||
|
|
||||||
if (current_process->GetSystemResourceSize() == 0) {
|
if (current_process->GetTotalSystemResourceSize() == 0) {
|
||||||
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
||||||
R_THROW(ResultInvalidState);
|
R_THROW(ResultInvalidState);
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
|
||||||
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
|
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
|
||||||
auto& page_table{current_process->GetPageTable()};
|
auto& page_table{current_process->GetPageTable()};
|
||||||
|
|
||||||
if (current_process->GetSystemResourceSize() == 0) {
|
if (current_process->GetTotalSystemResourceSize() == 0) {
|
||||||
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
||||||
R_THROW(ResultInvalidState);
|
R_THROW(ResultInvalidState);
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ void SynchronizePreemptionState(Core::System& system) {
|
||||||
GetCurrentThread(kernel).ClearInterruptFlag();
|
GetCurrentThread(kernel).ClearInterruptFlag();
|
||||||
|
|
||||||
// Unpin the current thread.
|
// Unpin the current thread.
|
||||||
cur_process->UnpinCurrentThread(core_id);
|
cur_process->UnpinCurrentThread();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,10 +85,6 @@ Result StartThread(Core::System& system, Handle thread_handle) {
|
||||||
// Try to start the thread.
|
// Try to start the thread.
|
||||||
R_TRY(thread->Run());
|
R_TRY(thread->Run());
|
||||||
|
|
||||||
// If we succeeded, persist a reference to the thread.
|
|
||||||
thread->Open();
|
|
||||||
system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +95,6 @@ void ExitThread(Core::System& system) {
|
||||||
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
|
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
|
||||||
system.GlobalSchedulerContext().RemoveThread(current_thread);
|
system.GlobalSchedulerContext().RemoveThread(current_thread);
|
||||||
current_thread->Exit();
|
current_thread->Exit();
|
||||||
system.Kernel().UnregisterInUseObject(current_thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sleep the current thread
|
/// Sleep the current thread
|
||||||
|
@ -260,7 +255,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
|
||||||
|
|
||||||
auto list_iter = thread_list.cbegin();
|
auto list_iter = thread_list.cbegin();
|
||||||
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
|
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
|
||||||
memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
|
memory.Write64(out_thread_ids, list_iter->GetThreadId());
|
||||||
out_thread_ids += sizeof(u64);
|
out_thread_ids += sizeof(u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -592,7 +592,7 @@ void Call(Core::System& system, u32 imm) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
kernel.EnterSVCProfile();
|
kernel.EnterSVCProfile();
|
||||||
|
|
||||||
if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
|
if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
|
||||||
Call64(system, imm);
|
Call64(system, imm);
|
||||||
} else {
|
} else {
|
||||||
Call32(system, imm);
|
Call32(system, imm);
|
||||||
|
|
|
@ -604,13 +604,57 @@ enum class ProcessActivity : u32 {
|
||||||
Paused,
|
Paused,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class CreateProcessFlag : u32 {
|
||||||
|
// Is 64 bit?
|
||||||
|
Is64Bit = (1 << 0),
|
||||||
|
|
||||||
|
// What kind of address space?
|
||||||
|
AddressSpaceShift = 1,
|
||||||
|
AddressSpaceMask = (7 << AddressSpaceShift),
|
||||||
|
AddressSpace32Bit = (0 << AddressSpaceShift),
|
||||||
|
AddressSpace64BitDeprecated = (1 << AddressSpaceShift),
|
||||||
|
AddressSpace32BitWithoutAlias = (2 << AddressSpaceShift),
|
||||||
|
AddressSpace64Bit = (3 << AddressSpaceShift),
|
||||||
|
|
||||||
|
// Should JIT debug be done on crash?
|
||||||
|
EnableDebug = (1 << 4),
|
||||||
|
|
||||||
|
// Should ASLR be enabled for the process?
|
||||||
|
EnableAslr = (1 << 5),
|
||||||
|
|
||||||
|
// Is the process an application?
|
||||||
|
IsApplication = (1 << 6),
|
||||||
|
|
||||||
|
// 4.x deprecated: Should use secure memory?
|
||||||
|
DeprecatedUseSecureMemory = (1 << 7),
|
||||||
|
|
||||||
|
// 5.x+ Pool partition type.
|
||||||
|
PoolPartitionShift = 7,
|
||||||
|
PoolPartitionMask = (0xF << PoolPartitionShift),
|
||||||
|
PoolPartitionApplication = (0 << PoolPartitionShift),
|
||||||
|
PoolPartitionApplet = (1 << PoolPartitionShift),
|
||||||
|
PoolPartitionSystem = (2 << PoolPartitionShift),
|
||||||
|
PoolPartitionSystemNonSecure = (3 << PoolPartitionShift),
|
||||||
|
|
||||||
|
// 7.x+ Should memory allocation be optimized? This requires IsApplication.
|
||||||
|
OptimizeMemoryAllocation = (1 << 11),
|
||||||
|
|
||||||
|
// 11.x+ DisableDeviceAddressSpaceMerge.
|
||||||
|
DisableDeviceAddressSpaceMerge = (1 << 12),
|
||||||
|
|
||||||
|
// Mask of all flags.
|
||||||
|
All = Is64Bit | AddressSpaceMask | EnableDebug | EnableAslr | IsApplication |
|
||||||
|
PoolPartitionMask | OptimizeMemoryAllocation | DisableDeviceAddressSpaceMerge,
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(CreateProcessFlag);
|
||||||
|
|
||||||
struct CreateProcessParameter {
|
struct CreateProcessParameter {
|
||||||
std::array<char, 12> name;
|
std::array<char, 12> name;
|
||||||
u32 version;
|
u32 version;
|
||||||
u64 program_id;
|
u64 program_id;
|
||||||
u64 code_address;
|
u64 code_address;
|
||||||
s32 code_num_pages;
|
s32 code_num_pages;
|
||||||
u32 flags;
|
CreateProcessFlag flags;
|
||||||
Handle reslimit;
|
Handle reslimit;
|
||||||
s32 system_resource_num_pages;
|
s32 system_resource_num_pages;
|
||||||
};
|
};
|
||||||
|
|
|
@ -21,10 +21,8 @@ ServiceContext::ServiceContext(Core::System& system_, std::string name_)
|
||||||
|
|
||||||
// Create the process.
|
// Create the process.
|
||||||
process = Kernel::KProcess::Create(kernel);
|
process = Kernel::KProcess::Create(kernel);
|
||||||
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
|
ASSERT(R_SUCCEEDED(process->Initialize(Kernel::Svc::CreateProcessParameter{},
|
||||||
Kernel::KProcess::ProcessType::KernelInternal,
|
kernel.GetSystemResourceLimit(), false)));
|
||||||
kernel.GetSystemResourceLimit())
|
|
||||||
.IsSuccess());
|
|
||||||
|
|
||||||
// Register the process.
|
// Register the process.
|
||||||
Kernel::KProcess::Register(kernel, process);
|
Kernel::KProcess::Register(kernel, process);
|
||||||
|
|
|
@ -66,7 +66,6 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||||
"ScreenComposition",
|
"ScreenComposition",
|
||||||
[this](std::uintptr_t, s64 time,
|
[this](std::uintptr_t, s64 time,
|
||||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||||
{ const auto lock_guard = Lock(); }
|
|
||||||
vsync_signal.Set();
|
vsync_signal.Set();
|
||||||
return std::chrono::nanoseconds(GetNextTicks());
|
return std::chrono::nanoseconds(GetNextTicks());
|
||||||
});
|
});
|
||||||
|
@ -99,6 +98,7 @@ Nvnflinger::~Nvnflinger() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ShutdownLayers();
|
ShutdownLayers();
|
||||||
|
vsync_thread = {};
|
||||||
|
|
||||||
if (nvdrv) {
|
if (nvdrv) {
|
||||||
nvdrv->Close(disp_fd);
|
nvdrv->Close(disp_fd);
|
||||||
|
@ -106,6 +106,7 @@ Nvnflinger::~Nvnflinger() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Nvnflinger::ShutdownLayers() {
|
void Nvnflinger::ShutdownLayers() {
|
||||||
|
const auto lock_guard = Lock();
|
||||||
for (auto& display : displays) {
|
for (auto& display : displays) {
|
||||||
for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
|
for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
|
||||||
display.GetLayer(layer).Core().NotifyShutdown();
|
display.GetLayer(layer).Core().NotifyShutdown();
|
||||||
|
@ -229,16 +230,6 @@ VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) {
|
||||||
return display->FindLayer(layer_id);
|
return display->FindLayer(layer_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
const VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) const {
|
|
||||||
const auto* const display = FindDisplay(display_id);
|
|
||||||
|
|
||||||
if (display == nullptr) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return display->FindLayer(layer_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) {
|
VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) {
|
||||||
auto* const display = FindDisplay(display_id);
|
auto* const display = FindDisplay(display_id);
|
||||||
|
|
||||||
|
@ -288,7 +279,6 @@ void Nvnflinger::Compose() {
|
||||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
||||||
ASSERT(nvdisp);
|
ASSERT(nvdisp);
|
||||||
|
|
||||||
guard->unlock();
|
|
||||||
Common::Rectangle<int> crop_rect{
|
Common::Rectangle<int> crop_rect{
|
||||||
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
|
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
|
||||||
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
|
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
|
||||||
|
@ -299,7 +289,6 @@ void Nvnflinger::Compose() {
|
||||||
buffer.fence.fences, buffer.fence.num_fences);
|
buffer.fence.fences, buffer.fence.num_fences);
|
||||||
|
|
||||||
MicroProfileFlip();
|
MicroProfileFlip();
|
||||||
guard->lock();
|
|
||||||
|
|
||||||
swap_interval = buffer.swap_interval;
|
swap_interval = buffer.swap_interval;
|
||||||
|
|
||||||
|
|
|
@ -117,9 +117,6 @@ private:
|
||||||
/// Finds the layer identified by the specified ID in the desired display.
|
/// Finds the layer identified by the specified ID in the desired display.
|
||||||
[[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
|
[[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
|
||||||
|
|
||||||
/// Finds the layer identified by the specified ID in the desired display.
|
|
||||||
[[nodiscard]] const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
|
|
||||||
|
|
||||||
/// Finds the layer identified by the specified ID in the desired display,
|
/// Finds the layer identified by the specified ID in the desired display,
|
||||||
/// or creates the layer if it is not found.
|
/// or creates the layer if it is not found.
|
||||||
/// To be used when the system expects the specified ID to already exist.
|
/// To be used when the system expects the specified ID to already exist.
|
||||||
|
|
|
@ -37,7 +37,7 @@ std::optional<Kernel::KProcess*> SearchProcessList(
|
||||||
void GetApplicationPidGeneric(HLERequestContext& ctx,
|
void GetApplicationPidGeneric(HLERequestContext& ctx,
|
||||||
const std::vector<Kernel::KProcess*>& process_list) {
|
const std::vector<Kernel::KProcess*>& process_list) {
|
||||||
const auto process = SearchProcessList(process_list, [](const auto& proc) {
|
const auto process = SearchProcessList(process_list, [](const auto& proc) {
|
||||||
return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin;
|
return proc->GetProcessId() == Kernel::KProcess::ProcessIdMin;
|
||||||
});
|
});
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 4};
|
IPC::ResponseBuilder rb{ctx, 4};
|
||||||
|
|
|
@ -116,7 +116,7 @@ json GetProcessorStateDataAuto(Core::System& system) {
|
||||||
Core::ARM_Interface::ThreadContext64 context{};
|
Core::ARM_Interface::ThreadContext64 context{};
|
||||||
arm.SaveContext(context);
|
arm.SaveContext(context);
|
||||||
|
|
||||||
return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32",
|
return GetProcessorStateData(process->Is64Bit() ? "AArch64" : "AArch32",
|
||||||
GetInteger(process->GetEntryPoint()), context.sp, context.pc,
|
GetInteger(process->GetEntryPoint()), context.sp, context.pc,
|
||||||
context.pstate, context.cpu_registers);
|
context.pstate, context.cpu_registers);
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) {
|
if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64Bit()) {
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2019,7 +2019,7 @@ void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t
|
||||||
std::filesystem::path{Common::U16StringFromBuffer(filename.utf16(), filename.size())}
|
std::filesystem::path{Common::U16StringFromBuffer(filename.utf16(), filename.size())}
|
||||||
.filename());
|
.filename());
|
||||||
}
|
}
|
||||||
const bool is_64bit = system->Kernel().ApplicationProcess()->Is64BitProcess();
|
const bool is_64bit = system->Kernel().ApplicationProcess()->Is64Bit();
|
||||||
const auto instruction_set_suffix = is_64bit ? tr("(64-bit)") : tr("(32-bit)");
|
const auto instruction_set_suffix = is_64bit ? tr("(64-bit)") : tr("(32-bit)");
|
||||||
title_name = tr("%1 %2", "%1 is the title name. %2 indicates if the title is 64-bit or 32-bit")
|
title_name = tr("%1 %2", "%1 is the title name. %2 indicates if the title is 64-bit or 32-bit")
|
||||||
.arg(QString::fromStdString(title_name), instruction_set_suffix)
|
.arg(QString::fromStdString(title_name), instruction_set_suffix)
|
||||||
|
|
Loading…
Reference in a new issue