mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-08 09:09:58 +00:00
Merge branch 'yuzu-emu:master' into new-shortcut
This commit is contained in:
commit
b76a1d987f
34 changed files with 587 additions and 288 deletions
|
@ -3,4 +3,4 @@
|
||||||
|
|
||||||
[codespell]
|
[codespell]
|
||||||
skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES,./src/android/app/src/main/res
|
skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES,./src/android/app/src/main/res
|
||||||
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,zink
|
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,vas,zink
|
||||||
|
|
|
@ -822,11 +822,13 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||||
const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
|
const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
|
||||||
const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
|
const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
|
||||||
const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
|
const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
|
||||||
|
const char p =
|
||||||
|
True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
|
||||||
|
|
||||||
reply +=
|
reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
|
||||||
fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{} [{}, {}]\n",
|
mem_info.base_address,
|
||||||
mem_info.base_address, mem_info.base_address + mem_info.size - 1,
|
mem_info.base_address + mem_info.size - 1, perm, state, l, i,
|
||||||
perm, state, l, i, d, u, mem_info.ipc_count, mem_info.device_count);
|
d, u, p, mem_info.ipc_count, mem_info.device_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
const uintptr_t next_address = mem_info.base_address + mem_info.size;
|
const uintptr_t next_address = mem_info.base_address + mem_info.size;
|
||||||
|
|
|
@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize ==
|
||||||
/// memory.
|
/// memory.
|
||||||
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
|
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
|
||||||
KVirtualAddress slab_addr) {
|
KVirtualAddress slab_addr) {
|
||||||
slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
|
slab_addr -= memory_layout.GetSlabRegion().GetAddress();
|
||||||
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
|
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
// Get the start of the slab region, since that's where we'll be working.
|
// Get the start of the slab region, since that's where we'll be working.
|
||||||
KVirtualAddress address = memory_layout.GetSlabRegionAddress();
|
const KMemoryRegion& slab_region = memory_layout.GetSlabRegion();
|
||||||
|
KVirtualAddress address = slab_region.GetAddress();
|
||||||
|
|
||||||
|
// Clear the slab region.
|
||||||
|
// TODO: implement access to kernel VAs.
|
||||||
|
// std::memset(device_ptr, 0, slab_region.GetSize());
|
||||||
|
|
||||||
// Initialize slab type array to be in sorted order.
|
// Initialize slab type array to be in sorted order.
|
||||||
std::array<KSlabType, KSlabType_Count> slab_types;
|
std::array<KSlabType, KSlabType_Count> slab_types;
|
||||||
|
|
|
@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
|
||||||
MainMemoryAddress);
|
MainMemoryAddress);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline size_t GetInitialProcessBinarySize() {
|
||||||
|
return InitialProcessBinarySizeMax;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -36,6 +36,7 @@ enum class KMemoryState : u32 {
|
||||||
FlagCanChangeAttribute = (1 << 24),
|
FlagCanChangeAttribute = (1 << 24),
|
||||||
FlagCanCodeMemory = (1 << 25),
|
FlagCanCodeMemory = (1 << 25),
|
||||||
FlagLinearMapped = (1 << 26),
|
FlagLinearMapped = (1 << 26),
|
||||||
|
FlagCanPermissionLock = (1 << 27),
|
||||||
|
|
||||||
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||||
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
||||||
|
@ -50,12 +51,16 @@ enum class KMemoryState : u32 {
|
||||||
FlagLinearMapped,
|
FlagLinearMapped,
|
||||||
|
|
||||||
Free = static_cast<u32>(Svc::MemoryState::Free),
|
Free = static_cast<u32>(Svc::MemoryState::Free),
|
||||||
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
|
|
||||||
FlagCanAlignedDeviceMap,
|
IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
|
||||||
|
FlagCanAlignedDeviceMap,
|
||||||
|
IoRegister =
|
||||||
|
static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap,
|
||||||
|
|
||||||
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
||||||
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
||||||
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
||||||
FlagCanCodeMemory,
|
FlagCanCodeMemory | FlagCanPermissionLock,
|
||||||
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
||||||
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
|
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
|
||||||
FlagLinearMapped,
|
FlagLinearMapped,
|
||||||
|
@ -65,7 +70,8 @@ enum class KMemoryState : u32 {
|
||||||
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
||||||
FlagCanCodeAlias,
|
FlagCanCodeAlias,
|
||||||
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
|
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
|
||||||
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
|
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory |
|
||||||
|
FlagCanPermissionLock,
|
||||||
|
|
||||||
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
|
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
@ -73,7 +79,7 @@ enum class KMemoryState : u32 {
|
||||||
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
|
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped,
|
||||||
|
|
||||||
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
||||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||||
|
@ -94,7 +100,7 @@ enum class KMemoryState : u32 {
|
||||||
NonDeviceIpc =
|
NonDeviceIpc =
|
||||||
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
|
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
|
Kernel = static_cast<u32>(Svc::MemoryState::Kernel),
|
||||||
|
|
||||||
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
||||||
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
|
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
|
||||||
|
@ -105,34 +111,36 @@ enum class KMemoryState : u32 {
|
||||||
|
|
||||||
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
|
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
|
||||||
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
|
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
|
||||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc |
|
||||||
|
FlagCanUseNonDeviceIpc,
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
|
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
|
||||||
|
|
||||||
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
|
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
|
static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001);
|
||||||
|
static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
|
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
|
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
|
||||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
|
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
|
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
|
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
|
||||||
|
|
||||||
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
|
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
|
||||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
|
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
|
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
|
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
|
||||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
|
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
|
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
|
||||||
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
|
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
|
||||||
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
|
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
|
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
|
||||||
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
|
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
|
||||||
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
|
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
|
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013);
|
||||||
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
|
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
|
||||||
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
|
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
|
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
|
||||||
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
|
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817);
|
||||||
|
|
||||||
enum class KMemoryPermission : u8 {
|
enum class KMemoryPermission : u8 {
|
||||||
None = 0,
|
None = 0,
|
||||||
|
@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 {
|
||||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||||
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
|
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
|
||||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||||
|
PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked),
|
||||||
|
|
||||||
SetMask = Uncached,
|
SetMask = Uncached | PermissionLocked,
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||||
|
|
||||||
|
@ -261,6 +270,10 @@ struct KMemoryInfo {
|
||||||
return m_state;
|
return m_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr Svc::MemoryState GetSvcState() const {
|
||||||
|
return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask);
|
||||||
|
}
|
||||||
|
|
||||||
constexpr KMemoryPermission GetPermission() const {
|
constexpr KMemoryPermission GetPermission() const {
|
||||||
return m_permission;
|
return m_permission;
|
||||||
}
|
}
|
||||||
|
@ -326,6 +339,10 @@ public:
|
||||||
return this->GetEndAddress() - 1;
|
return this->GetEndAddress() - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr KMemoryState GetState() const {
|
||||||
|
return m_memory_state;
|
||||||
|
}
|
||||||
|
|
||||||
constexpr u16 GetIpcLockCount() const {
|
constexpr u16 GetIpcLockCount() const {
|
||||||
return m_ipc_lock_count;
|
return m_ipc_lock_count;
|
||||||
}
|
}
|
||||||
|
@ -443,6 +460,13 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) {
|
||||||
|
ASSERT(False(mask & KMemoryAttribute::IpcLocked));
|
||||||
|
ASSERT(False(mask & KMemoryAttribute::DeviceShared));
|
||||||
|
|
||||||
|
m_attribute = (m_attribute & ~mask) | attr;
|
||||||
|
}
|
||||||
|
|
||||||
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
|
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
|
||||||
ASSERT(this->GetAddress() < addr);
|
ASSERT(this->GetAddress() < addr);
|
||||||
ASSERT(this->Contains(addr));
|
ASSERT(this->Contains(addr));
|
||||||
|
|
|
@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update block state.
|
// Update block state.
|
||||||
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
|
it->Update(state, perm, attr, it->GetAddress() == address,
|
||||||
static_cast<u8>(clear_disable_attr));
|
static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr));
|
||||||
cur_address += cur_info.GetSize();
|
cur_address += cur_info.GetSize();
|
||||||
remaining_pages -= cur_info.GetNumPages();
|
remaining_pages -= cur_info.GetNumPages();
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
|
||||||
KProcessAddress address, size_t num_pages,
|
KProcessAddress address, size_t num_pages,
|
||||||
KMemoryState test_state, KMemoryPermission test_perm,
|
KMemoryState test_state, KMemoryPermission test_perm,
|
||||||
KMemoryAttribute test_attr, KMemoryState state,
|
KMemoryAttribute test_attr, KMemoryState state,
|
||||||
KMemoryPermission perm, KMemoryAttribute attr) {
|
KMemoryPermission perm, KMemoryAttribute attr,
|
||||||
|
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||||
|
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
||||||
// Ensure for auditing that we never end up with an invalid tree.
|
// Ensure for auditing that we never end up with an invalid tree.
|
||||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||||
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||||
|
@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update block state.
|
// Update block state.
|
||||||
it->Update(state, perm, attr, false, 0, 0);
|
it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr),
|
||||||
|
static_cast<u8>(clear_disable_attr));
|
||||||
cur_address += cur_info.GetSize();
|
cur_address += cur_info.GetSize();
|
||||||
remaining_pages -= cur_info.GetNumPages();
|
remaining_pages -= cur_info.GetNumPages();
|
||||||
} else {
|
} else {
|
||||||
|
@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat
|
||||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||||
|
KProcessAddress address, size_t num_pages,
|
||||||
|
KMemoryAttribute mask, KMemoryAttribute attr) {
|
||||||
|
// Ensure for auditing that we never end up with an invalid tree.
|
||||||
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||||
|
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||||
|
|
||||||
|
KProcessAddress cur_address = address;
|
||||||
|
size_t remaining_pages = num_pages;
|
||||||
|
iterator it = this->FindIterator(address);
|
||||||
|
|
||||||
|
while (remaining_pages > 0) {
|
||||||
|
const size_t remaining_size = remaining_pages * PageSize;
|
||||||
|
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||||
|
|
||||||
|
if ((it->GetAttribute() & mask) != attr) {
|
||||||
|
// If we need to, create a new block before and insert it.
|
||||||
|
if (cur_info.GetAddress() != GetInteger(cur_address)) {
|
||||||
|
KMemoryBlock* new_block = allocator->Allocate();
|
||||||
|
|
||||||
|
it->Split(new_block, cur_address);
|
||||||
|
it = m_memory_block_tree.insert(*new_block);
|
||||||
|
it++;
|
||||||
|
|
||||||
|
cur_info = it->GetMemoryInfo();
|
||||||
|
cur_address = cur_info.GetAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we need to, create a new block after and insert it.
|
||||||
|
if (cur_info.GetSize() > remaining_size) {
|
||||||
|
KMemoryBlock* new_block = allocator->Allocate();
|
||||||
|
|
||||||
|
it->Split(new_block, cur_address + remaining_size);
|
||||||
|
it = m_memory_block_tree.insert(*new_block);
|
||||||
|
|
||||||
|
cur_info = it->GetMemoryInfo();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update block state.
|
||||||
|
it->UpdateAttribute(mask, attr);
|
||||||
|
cur_address += cur_info.GetSize();
|
||||||
|
remaining_pages -= cur_info.GetNumPages();
|
||||||
|
} else {
|
||||||
|
// If we already have the right attributes, just advance.
|
||||||
|
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||||
|
remaining_pages = 0;
|
||||||
|
cur_address += remaining_size;
|
||||||
|
} else {
|
||||||
|
remaining_pages =
|
||||||
|
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||||
|
cur_address = cur_info.GetEndAddress();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it++;
|
||||||
|
}
|
||||||
|
|
||||||
|
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
// Debug.
|
// Debug.
|
||||||
bool KMemoryBlockManager::CheckState() const {
|
bool KMemoryBlockManager::CheckState() const {
|
||||||
// Loop over every block, ensuring that we are sorted and coalesced.
|
// Loop over every block, ensuring that we are sorted and coalesced.
|
||||||
|
|
|
@ -115,7 +115,11 @@ public:
|
||||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||||
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
||||||
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr);
|
KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||||
|
KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
||||||
|
|
||||||
|
void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||||
|
size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr);
|
||||||
|
|
||||||
iterator FindIterator(KProcessAddress address) const {
|
iterator FindIterator(KProcessAddress address) const {
|
||||||
return m_memory_block_tree.find(KMemoryBlock(
|
return m_memory_block_tree.find(KMemoryBlock(
|
||||||
|
|
|
@ -137,11 +137,9 @@ public:
|
||||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
|
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress GetSlabRegionAddress() const {
|
const KMemoryRegion& GetSlabRegion() const {
|
||||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
|
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab));
|
||||||
.GetAddress();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
|
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
|
||||||
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
|
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
|
||||||
// Free each region to its corresponding heap.
|
// Free each region to its corresponding heap.
|
||||||
size_t reserved_sizes[MaxManagerCount] = {};
|
size_t reserved_sizes[MaxManagerCount] = {};
|
||||||
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
|
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
|
||||||
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
|
const size_t ini_size = GetInitialProcessBinarySize();
|
||||||
|
const KPhysicalAddress ini_end = ini_start + ini_size;
|
||||||
const KPhysicalAddress ini_last = ini_end - 1;
|
const KPhysicalAddress ini_last = ini_end - 1;
|
||||||
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||||
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||||
|
@ -137,13 +138,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open/reserve the ini memory.
|
// Open/reserve the ini memory.
|
||||||
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
|
manager.OpenFirst(ini_start, ini_size / PageSize);
|
||||||
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
|
reserved_sizes[it.GetAttributes()] += ini_size;
|
||||||
|
|
||||||
// Free memory after the ini to the heap.
|
// Free memory after the ini to the heap.
|
||||||
if (ini_last != cur_last) {
|
if (ini_last != cur_last) {
|
||||||
ASSERT(cur_end != 0);
|
ASSERT(cur_end != 0);
|
||||||
manager.Free(ini_end, cur_end - ini_end);
|
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Ensure there's no partial overlap with the ini image.
|
// Ensure there's no partial overlap with the ini image.
|
||||||
|
|
|
@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
|
||||||
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
|
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
|
||||||
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
|
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
|
||||||
KMemoryRegionAttr_LinearMapped);
|
KMemoryRegionAttr_LinearMapped);
|
||||||
|
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown =
|
||||||
|
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(
|
||||||
|
KMemoryRegionAttr_LinearMapped);
|
||||||
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
|
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
|
||||||
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||||
KMemoryRegionAttr_LinearMapped));
|
KMemoryRegionAttr_LinearMapped));
|
||||||
|
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() ==
|
||||||
|
(0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||||
|
KMemoryRegionAttr_LinearMapped));
|
||||||
|
|
||||||
constexpr inline auto KMemoryRegionType_DramReservedEarly =
|
constexpr inline auto KMemoryRegionType_DramReservedEarly =
|
||||||
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||||
|
@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition =
|
||||||
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
|
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
|
||||||
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||||
|
|
||||||
constexpr inline auto KMemoryRegionType_DramPoolManagement =
|
// UNUSED: .Derive(4, 1);
|
||||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
|
// UNUSED: .Derive(4, 2);
|
||||||
|
constexpr inline const auto KMemoryRegionType_DramPoolManagement =
|
||||||
|
KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(
|
||||||
KMemoryRegionAttr_CarveoutProtected);
|
KMemoryRegionAttr_CarveoutProtected);
|
||||||
constexpr inline auto KMemoryRegionType_DramUserPool =
|
constexpr inline const auto KMemoryRegionType_DramUserPool =
|
||||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
|
KMemoryRegionType_DramPoolPartition.Derive(4, 3);
|
||||||
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
|
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
|
||||||
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
(0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||||
KMemoryRegionAttr_CarveoutProtected));
|
KMemoryRegionAttr_CarveoutProtected));
|
||||||
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
|
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
|
||||||
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
(0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||||
|
|
||||||
constexpr inline auto KMemoryRegionType_DramApplicationPool =
|
constexpr inline auto KMemoryRegionType_DramApplicationPool =
|
||||||
KMemoryRegionType_DramUserPool.Derive(4, 0);
|
KMemoryRegionType_DramUserPool.Derive(4, 0);
|
||||||
|
@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
|
||||||
constexpr inline auto KMemoryRegionType_DramSystemPool =
|
constexpr inline auto KMemoryRegionType_DramSystemPool =
|
||||||
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
||||||
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
|
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
|
||||||
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
(0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||||
static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
|
static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
|
||||||
(0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
(0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||||
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
|
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
|
||||||
(0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
(0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||||
static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
|
static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
|
||||||
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
(0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||||
KMemoryRegionAttr_CarveoutProtected));
|
KMemoryRegionAttr_CarveoutProtected));
|
||||||
|
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
|
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
|
||||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
|
KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
|
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
|
||||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
|
KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
|
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
|
||||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
|
KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
|
||||||
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
|
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
|
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
||||||
|
|
||||||
// UNUSED: .DeriveSparse(2, 2, 0);
|
// UNUSED: .Derive(4, 2);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
|
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||||
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
|
||||||
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
|
||||||
|
KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
|
||||||
|
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown =
|
||||||
|
KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
|
||||||
|
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32));
|
||||||
|
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
|
||||||
|
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92));
|
||||||
|
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
|
// UNUSED: .Derive(4, 3);
|
||||||
KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
|
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt =
|
||||||
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
|
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
|
||||||
|
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement =
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
|
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
|
||||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool =
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
|
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
|
||||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
|
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
|
||||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
|
|
||||||
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
|
|
||||||
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
|
|
||||||
|
|
||||||
// NOTE: For unknown reason, the pools are derived out-of-order here.
|
// NOTE: For unknown reason, the pools are derived out-of-order here.
|
||||||
// It's worth eventually trying to understand why Nintendo made this choice.
|
// It's worth eventually trying to understand why Nintendo made this choice.
|
||||||
// UNUSED: .Derive(6, 0);
|
// UNUSED: .Derive(6, 0);
|
||||||
// UNUSED: .Derive(6, 1);
|
// UNUSED: .Derive(6, 1);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
|
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool =
|
||||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
|
KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
|
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool =
|
||||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
|
KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
||||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
|
KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
|
||||||
constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
|
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool =
|
||||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
|
KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
|
||||||
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
|
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
|
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
|
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
|
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A);
|
||||||
|
|
||||||
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
|
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
|
||||||
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
|
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
|
||||||
|
@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp =
|
||||||
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
|
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
|
||||||
|
|
||||||
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||||
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
|
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
|
||||||
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
|
||||||
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
||||||
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
|
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
|
||||||
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
|
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
|
||||||
|
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
|
||||||
|
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
|
||||||
|
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
|
||||||
|
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||||
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
||||||
return KMemoryRegionType_VirtualDramUnknownDebug;
|
return KMemoryRegionType_VirtualDramUnknownDebug;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -183,12 +183,17 @@ private:
|
||||||
|
|
||||||
class KScopedPageGroup {
|
class KScopedPageGroup {
|
||||||
public:
|
public:
|
||||||
explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
|
explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) {
|
||||||
if (m_pg) {
|
if (m_pg) {
|
||||||
m_pg->Open();
|
if (not_first) {
|
||||||
|
m_pg->Open();
|
||||||
|
} else {
|
||||||
|
m_pg->OpenFirst();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
|
explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true)
|
||||||
|
: KScopedPageGroup(std::addressof(gp), not_first) {}
|
||||||
~KScopedPageGroup() {
|
~KScopedPageGroup() {
|
||||||
if (m_pg) {
|
if (m_pg) {
|
||||||
m_pg->Close();
|
m_pg->Close();
|
||||||
|
|
|
@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress
|
||||||
R_TRY(this->CheckMemoryStateContiguous(
|
R_TRY(this->CheckMemoryStateContiguous(
|
||||||
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
||||||
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
||||||
KMemoryAttribute::All, KMemoryAttribute::None));
|
KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
|
||||||
|
|
||||||
// Determine whether any pages being unmapped are code.
|
// Determine whether any pages being unmapped are code.
|
||||||
bool any_code_pages = false;
|
bool any_code_pages = false;
|
||||||
|
@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
PageSize;
|
PageSize;
|
||||||
|
|
||||||
// While we have pages to map, map them.
|
// While we have pages to map, map them.
|
||||||
while (map_pages > 0) {
|
{
|
||||||
// Check if we're at the end of the physical block.
|
// Create a page group for the current mapping range.
|
||||||
if (pg_pages == 0) {
|
KPageGroup cur_pg(m_kernel, m_block_info_manager);
|
||||||
// Ensure there are more pages to map.
|
{
|
||||||
ASSERT(pg_it != pg.end());
|
ON_RESULT_FAILURE_2 {
|
||||||
|
cur_pg.OpenFirst();
|
||||||
|
cur_pg.Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Advance our physical block.
|
size_t remain_pages = map_pages;
|
||||||
++pg_it;
|
while (remain_pages > 0) {
|
||||||
pg_phys_addr = pg_it->GetAddress();
|
// Check if we're at the end of the physical block.
|
||||||
pg_pages = pg_it->GetNumPages();
|
if (pg_pages == 0) {
|
||||||
|
// Ensure there are more pages to map.
|
||||||
|
ASSERT(pg_it != pg.end());
|
||||||
|
|
||||||
|
// Advance our physical block.
|
||||||
|
++pg_it;
|
||||||
|
pg_phys_addr = pg_it->GetAddress();
|
||||||
|
pg_pages = pg_it->GetNumPages();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add whatever we can to the current block.
|
||||||
|
const size_t cur_pages = std::min(pg_pages, remain_pages);
|
||||||
|
R_TRY(cur_pg.AddBlock(pg_phys_addr +
|
||||||
|
((pg_pages - cur_pages) * PageSize),
|
||||||
|
cur_pages));
|
||||||
|
|
||||||
|
// Advance.
|
||||||
|
remain_pages -= cur_pages;
|
||||||
|
pg_pages -= cur_pages;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map whatever we can.
|
// Map the pages.
|
||||||
const size_t cur_pages = std::min(pg_pages, map_pages);
|
R_TRY(this->Operate(cur_address, map_pages, cur_pg,
|
||||||
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
|
OperationType::MapFirstGroup));
|
||||||
OperationType::MapFirst, pg_phys_addr));
|
|
||||||
|
|
||||||
// Advance.
|
|
||||||
cur_address += cur_pages * PageSize;
|
|
||||||
map_pages -= cur_pages;
|
|
||||||
|
|
||||||
pg_phys_addr += cur_pages * PageSize;
|
|
||||||
pg_pages -= cur_pages;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
m_memory_block_manager.UpdateIfMatch(
|
m_memory_block_manager.UpdateIfMatch(
|
||||||
std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
|
std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
|
||||||
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
||||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
|
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
|
||||||
|
address == this->GetAliasRegionStart()
|
||||||
|
? KMemoryBlockDisableMergeAttribute::Normal
|
||||||
|
: KMemoryBlockDisableMergeAttribute::None,
|
||||||
|
KMemoryBlockDisableMergeAttribute::None);
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
|
|
||||||
// Iterate over the memory, unmapping as we go.
|
// Iterate over the memory, unmapping as we go.
|
||||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||||
|
|
||||||
|
const auto clear_merge_attr =
|
||||||
|
(it->GetState() == KMemoryState::Normal &&
|
||||||
|
it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
|
||||||
|
? KMemoryBlockDisableMergeAttribute::Normal
|
||||||
|
: KMemoryBlockDisableMergeAttribute::None;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
// Check that the iterator is valid.
|
// Check that the iterator is valid.
|
||||||
ASSERT(it != m_memory_block_manager.end());
|
ASSERT(it != m_memory_block_manager.end());
|
||||||
|
@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
||||||
KMemoryState::Free, KMemoryPermission::None,
|
KMemoryState::Free, KMemoryPermission::None,
|
||||||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
||||||
KMemoryBlockDisableMergeAttribute::None);
|
clear_merge_attr);
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
|
||||||
KScopedPageTableUpdater updater(this);
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
// Perform mapping operation.
|
// Perform mapping operation.
|
||||||
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
|
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
|
||||||
DisableMergeAttribute::DisableHead};
|
|
||||||
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
||||||
|
|
||||||
// Update the blocks.
|
// Update the blocks.
|
||||||
|
@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem
|
||||||
KScopedPageTableUpdater updater(this);
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
// Perform mapping operation.
|
// Perform mapping operation.
|
||||||
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
|
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
|
||||||
DisableMergeAttribute::DisableHead};
|
|
||||||
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
||||||
|
|
||||||
// Update the blocks.
|
// Update the blocks.
|
||||||
|
@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
|
||||||
size_t num_allocator_blocks;
|
size_t num_allocator_blocks;
|
||||||
constexpr auto AttributeTestMask =
|
constexpr auto AttributeTestMask =
|
||||||
~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
|
~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
|
||||||
R_TRY(this->CheckMemoryState(
|
const KMemoryState state_test_mask =
|
||||||
std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
|
static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
|
||||||
std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute,
|
? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
|
||||||
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
: 0) |
|
||||||
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
|
((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
|
||||||
|
? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
|
||||||
|
: 0));
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
|
||||||
|
std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
||||||
|
addr, size, state_test_mask, state_test_mask,
|
||||||
|
KMemoryPermission::None, KMemoryPermission::None,
|
||||||
|
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
|
||||||
|
|
||||||
// Create an update allocator.
|
// Create an update allocator.
|
||||||
Result allocator_result{ResultSuccess};
|
Result allocator_result{ResultSuccess};
|
||||||
|
@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
|
||||||
m_memory_block_slab_manager, num_allocator_blocks);
|
m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
R_TRY(allocator_result);
|
R_TRY(allocator_result);
|
||||||
|
|
||||||
// Determine the new attribute.
|
// If we need to, perform a change attribute operation.
|
||||||
const KMemoryAttribute new_attr =
|
if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
|
||||||
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
|
// Perform operation.
|
||||||
static_cast<KMemoryAttribute>(attr & mask)));
|
R_TRY(this->Operate(addr, num_pages, old_perm,
|
||||||
|
OperationType::ChangePermissionsAndRefreshAndFlush, 0));
|
||||||
// Perform operation.
|
}
|
||||||
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
|
|
||||||
|
|
||||||
// Update the blocks.
|
// Update the blocks.
|
||||||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
|
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
|
||||||
new_attr, KMemoryBlockDisableMergeAttribute::None,
|
static_cast<KMemoryAttribute>(mask),
|
||||||
KMemoryBlockDisableMergeAttribute::None);
|
static_cast<KMemoryAttribute>(attr));
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress
|
||||||
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
||||||
|
|
||||||
// Set whether the locked memory was io.
|
// Set whether the locked memory was io.
|
||||||
*out_is_io = old_state == KMemoryState::Io;
|
*out_is_io =
|
||||||
|
static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr
|
||||||
ASSERT(num_pages == page_group.GetNumPages());
|
ASSERT(num_pages == page_group.GetNumPages());
|
||||||
|
|
||||||
switch (operation) {
|
switch (operation) {
|
||||||
case OperationType::MapGroup: {
|
case OperationType::MapGroup:
|
||||||
|
case OperationType::MapFirstGroup: {
|
||||||
// We want to maintain a new reference to every page in the group.
|
// We want to maintain a new reference to every page in the group.
|
||||||
KScopedPageGroup spg(page_group);
|
KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
|
||||||
|
|
||||||
for (const auto& node : page_group) {
|
for (const auto& node : page_group) {
|
||||||
const size_t size{node.GetNumPages() * PageSize};
|
const size_t size{node.GetNumPages() * PageSize};
|
||||||
|
@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
|
||||||
m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
|
m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case OperationType::MapFirst:
|
|
||||||
case OperationType::Map: {
|
case OperationType::Map: {
|
||||||
ASSERT(map_addr);
|
ASSERT(map_addr);
|
||||||
ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
|
ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
|
||||||
|
@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
|
||||||
|
|
||||||
// Open references to pages, if we should.
|
// Open references to pages, if we should.
|
||||||
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
|
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
|
||||||
if (operation == OperationType::MapFirst) {
|
m_kernel.MemoryManager().Open(map_addr, num_pages);
|
||||||
m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
|
|
||||||
} else {
|
|
||||||
m_kernel.MemoryManager().Open(map_addr, num_pages);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
|
||||||
}
|
}
|
||||||
case OperationType::ChangePermissions:
|
case OperationType::ChangePermissions:
|
||||||
case OperationType::ChangePermissionsAndRefresh:
|
case OperationType::ChangePermissionsAndRefresh:
|
||||||
|
case OperationType::ChangePermissionsAndRefreshAndFlush:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
|
@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
|
KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case KMemoryState::Free:
|
case Svc::MemoryState::Free:
|
||||||
case KMemoryState::Kernel:
|
case Svc::MemoryState::Kernel:
|
||||||
return m_address_space_start;
|
return m_address_space_start;
|
||||||
case KMemoryState::Normal:
|
case Svc::MemoryState::Normal:
|
||||||
return m_heap_region_start;
|
return m_heap_region_start;
|
||||||
case KMemoryState::Ipc:
|
case Svc::MemoryState::Ipc:
|
||||||
case KMemoryState::NonSecureIpc:
|
case Svc::MemoryState::NonSecureIpc:
|
||||||
case KMemoryState::NonDeviceIpc:
|
case Svc::MemoryState::NonDeviceIpc:
|
||||||
return m_alias_region_start;
|
return m_alias_region_start;
|
||||||
case KMemoryState::Stack:
|
case Svc::MemoryState::Stack:
|
||||||
return m_stack_region_start;
|
return m_stack_region_start;
|
||||||
case KMemoryState::Static:
|
case Svc::MemoryState::Static:
|
||||||
case KMemoryState::ThreadLocal:
|
case Svc::MemoryState::ThreadLocal:
|
||||||
return m_kernel_map_region_start;
|
return m_kernel_map_region_start;
|
||||||
case KMemoryState::Io:
|
case Svc::MemoryState::Io:
|
||||||
case KMemoryState::Shared:
|
case Svc::MemoryState::Shared:
|
||||||
case KMemoryState::AliasCode:
|
case Svc::MemoryState::AliasCode:
|
||||||
case KMemoryState::AliasCodeData:
|
case Svc::MemoryState::AliasCodeData:
|
||||||
case KMemoryState::Transfered:
|
case Svc::MemoryState::Transfered:
|
||||||
case KMemoryState::SharedTransfered:
|
case Svc::MemoryState::SharedTransfered:
|
||||||
case KMemoryState::SharedCode:
|
case Svc::MemoryState::SharedCode:
|
||||||
case KMemoryState::GeneratedCode:
|
case Svc::MemoryState::GeneratedCode:
|
||||||
case KMemoryState::CodeOut:
|
case Svc::MemoryState::CodeOut:
|
||||||
case KMemoryState::Coverage:
|
case Svc::MemoryState::Coverage:
|
||||||
case KMemoryState::Insecure:
|
case Svc::MemoryState::Insecure:
|
||||||
return m_alias_code_region_start;
|
return m_alias_code_region_start;
|
||||||
case KMemoryState::Code:
|
case Svc::MemoryState::Code:
|
||||||
case KMemoryState::CodeData:
|
case Svc::MemoryState::CodeData:
|
||||||
return m_code_region_start;
|
return m_code_region_start;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case KMemoryState::Free:
|
case Svc::MemoryState::Free:
|
||||||
case KMemoryState::Kernel:
|
case Svc::MemoryState::Kernel:
|
||||||
return m_address_space_end - m_address_space_start;
|
return m_address_space_end - m_address_space_start;
|
||||||
case KMemoryState::Normal:
|
case Svc::MemoryState::Normal:
|
||||||
return m_heap_region_end - m_heap_region_start;
|
return m_heap_region_end - m_heap_region_start;
|
||||||
case KMemoryState::Ipc:
|
case Svc::MemoryState::Ipc:
|
||||||
case KMemoryState::NonSecureIpc:
|
case Svc::MemoryState::NonSecureIpc:
|
||||||
case KMemoryState::NonDeviceIpc:
|
case Svc::MemoryState::NonDeviceIpc:
|
||||||
return m_alias_region_end - m_alias_region_start;
|
return m_alias_region_end - m_alias_region_start;
|
||||||
case KMemoryState::Stack:
|
case Svc::MemoryState::Stack:
|
||||||
return m_stack_region_end - m_stack_region_start;
|
return m_stack_region_end - m_stack_region_start;
|
||||||
case KMemoryState::Static:
|
case Svc::MemoryState::Static:
|
||||||
case KMemoryState::ThreadLocal:
|
case Svc::MemoryState::ThreadLocal:
|
||||||
return m_kernel_map_region_end - m_kernel_map_region_start;
|
return m_kernel_map_region_end - m_kernel_map_region_start;
|
||||||
case KMemoryState::Io:
|
case Svc::MemoryState::Io:
|
||||||
case KMemoryState::Shared:
|
case Svc::MemoryState::Shared:
|
||||||
case KMemoryState::AliasCode:
|
case Svc::MemoryState::AliasCode:
|
||||||
case KMemoryState::AliasCodeData:
|
case Svc::MemoryState::AliasCodeData:
|
||||||
case KMemoryState::Transfered:
|
case Svc::MemoryState::Transfered:
|
||||||
case KMemoryState::SharedTransfered:
|
case Svc::MemoryState::SharedTransfered:
|
||||||
case KMemoryState::SharedCode:
|
case Svc::MemoryState::SharedCode:
|
||||||
case KMemoryState::GeneratedCode:
|
case Svc::MemoryState::GeneratedCode:
|
||||||
case KMemoryState::CodeOut:
|
case Svc::MemoryState::CodeOut:
|
||||||
case KMemoryState::Coverage:
|
case Svc::MemoryState::Coverage:
|
||||||
case KMemoryState::Insecure:
|
case Svc::MemoryState::Insecure:
|
||||||
return m_alias_code_region_end - m_alias_code_region_start;
|
return m_alias_code_region_end - m_alias_code_region_start;
|
||||||
case KMemoryState::Code:
|
case Svc::MemoryState::Code:
|
||||||
case KMemoryState::CodeData:
|
case Svc::MemoryState::CodeData:
|
||||||
return m_code_region_end - m_code_region_start;
|
return m_code_region_end - m_code_region_start;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
|
||||||
const KProcessAddress end = addr + size;
|
const KProcessAddress end = addr + size;
|
||||||
const KProcessAddress last = end - 1;
|
const KProcessAddress last = end - 1;
|
||||||
|
|
||||||
|
@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat
|
||||||
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
|
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
|
||||||
m_alias_region_start == m_alias_region_end);
|
m_alias_region_start == m_alias_region_end);
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case KMemoryState::Free:
|
case Svc::MemoryState::Free:
|
||||||
case KMemoryState::Kernel:
|
case Svc::MemoryState::Kernel:
|
||||||
return is_in_region;
|
return is_in_region;
|
||||||
case KMemoryState::Io:
|
case Svc::MemoryState::Io:
|
||||||
case KMemoryState::Static:
|
case Svc::MemoryState::Static:
|
||||||
case KMemoryState::Code:
|
case Svc::MemoryState::Code:
|
||||||
case KMemoryState::CodeData:
|
case Svc::MemoryState::CodeData:
|
||||||
case KMemoryState::Shared:
|
case Svc::MemoryState::Shared:
|
||||||
case KMemoryState::AliasCode:
|
case Svc::MemoryState::AliasCode:
|
||||||
case KMemoryState::AliasCodeData:
|
case Svc::MemoryState::AliasCodeData:
|
||||||
case KMemoryState::Stack:
|
case Svc::MemoryState::Stack:
|
||||||
case KMemoryState::ThreadLocal:
|
case Svc::MemoryState::ThreadLocal:
|
||||||
case KMemoryState::Transfered:
|
case Svc::MemoryState::Transfered:
|
||||||
case KMemoryState::SharedTransfered:
|
case Svc::MemoryState::SharedTransfered:
|
||||||
case KMemoryState::SharedCode:
|
case Svc::MemoryState::SharedCode:
|
||||||
case KMemoryState::GeneratedCode:
|
case Svc::MemoryState::GeneratedCode:
|
||||||
case KMemoryState::CodeOut:
|
case Svc::MemoryState::CodeOut:
|
||||||
case KMemoryState::Coverage:
|
case Svc::MemoryState::Coverage:
|
||||||
case KMemoryState::Insecure:
|
case Svc::MemoryState::Insecure:
|
||||||
return is_in_region && !is_in_heap && !is_in_alias;
|
return is_in_region && !is_in_heap && !is_in_alias;
|
||||||
case KMemoryState::Normal:
|
case Svc::MemoryState::Normal:
|
||||||
ASSERT(is_in_heap);
|
ASSERT(is_in_heap);
|
||||||
return is_in_region && !is_in_alias;
|
return is_in_region && !is_in_alias;
|
||||||
case KMemoryState::Ipc:
|
case Svc::MemoryState::Ipc:
|
||||||
case KMemoryState::NonSecureIpc:
|
case Svc::MemoryState::NonSecureIpc:
|
||||||
case KMemoryState::NonDeviceIpc:
|
case Svc::MemoryState::NonDeviceIpc:
|
||||||
ASSERT(is_in_alias);
|
ASSERT(is_in_alias);
|
||||||
return is_in_region && !is_in_heap;
|
return is_in_region && !is_in_heap;
|
||||||
default:
|
default:
|
||||||
|
@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces
|
||||||
|
|
||||||
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
KMemoryBlockManager::const_iterator it,
|
||||||
|
KProcessAddress last_addr, KMemoryState state_mask,
|
||||||
KMemoryState state, KMemoryPermission perm_mask,
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||||
ASSERT(this->IsLockedByCurrentThread());
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
// Get information about the first block.
|
// Get information about the first block.
|
||||||
const KProcessAddress last_addr = addr + size - 1;
|
|
||||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
|
||||||
KMemoryInfo info = it->GetMemoryInfo();
|
KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
|
||||||
// If the start address isn't aligned, we need a block.
|
|
||||||
const size_t blocks_for_start_align =
|
|
||||||
(Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
|
|
||||||
|
|
||||||
// Validate all blocks in the range have correct state.
|
// Validate all blocks in the range have correct state.
|
||||||
const KMemoryState first_state = info.m_state;
|
const KMemoryState first_state = info.m_state;
|
||||||
const KMemoryPermission first_perm = info.m_permission;
|
const KMemoryPermission first_perm = info.m_permission;
|
||||||
|
@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
|
||||||
info = it->GetMemoryInfo();
|
info = it->GetMemoryInfo();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the end address isn't aligned, we need a block.
|
|
||||||
const size_t blocks_for_end_align =
|
|
||||||
(Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
|
||||||
|
|
||||||
// Write output state.
|
// Write output state.
|
||||||
if (out_state != nullptr) {
|
if (out_state != nullptr) {
|
||||||
*out_state = first_state;
|
*out_state = first_state;
|
||||||
|
@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
|
||||||
if (out_attr != nullptr) {
|
if (out_attr != nullptr) {
|
||||||
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the end address isn't aligned, we need a block.
|
||||||
if (out_blocks_needed != nullptr) {
|
if (out_blocks_needed != nullptr) {
|
||||||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
const size_t blocks_for_end_align =
|
||||||
|
(Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
|
||||||
|
? 1
|
||||||
|
: 0;
|
||||||
|
*out_blocks_needed = blocks_for_end_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
R_SUCCEED();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
|
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||||
|
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||||
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||||
|
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
// Check memory state.
|
||||||
|
const KProcessAddress last_addr = addr + size - 1;
|
||||||
|
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
||||||
|
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
|
||||||
|
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
|
||||||
|
|
||||||
|
// If the start address isn't aligned, we need a block.
|
||||||
|
if (out_blocks_needed != nullptr &&
|
||||||
|
Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
|
||||||
|
++(*out_blocks_needed);
|
||||||
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -126,8 +126,6 @@ public:
|
||||||
return m_block_info_manager;
|
return m_block_info_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
|
|
||||||
|
|
||||||
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
||||||
KPhysicalAddress phys_addr, KProcessAddress region_start,
|
KPhysicalAddress phys_addr, KProcessAddress region_start,
|
||||||
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
@ -162,6 +160,21 @@ public:
|
||||||
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
|
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
|
||||||
const KPageGroup& pg);
|
const KPageGroup& pg);
|
||||||
|
|
||||||
|
KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
|
||||||
|
size_t GetRegionSize(Svc::MemoryState state) const;
|
||||||
|
bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
|
||||||
|
|
||||||
|
KProcessAddress GetRegionAddress(KMemoryState state) const {
|
||||||
|
return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||||
|
}
|
||||||
|
size_t GetRegionSize(KMemoryState state) const {
|
||||||
|
return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||||
|
}
|
||||||
|
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||||
|
return this->CanContain(addr, size,
|
||||||
|
static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
struct PageLinkedList {
|
struct PageLinkedList {
|
||||||
private:
|
private:
|
||||||
|
@ -204,12 +217,13 @@ protected:
|
||||||
private:
|
private:
|
||||||
enum class OperationType : u32 {
|
enum class OperationType : u32 {
|
||||||
Map = 0,
|
Map = 0,
|
||||||
MapFirst = 1,
|
MapGroup = 1,
|
||||||
MapGroup = 2,
|
MapFirstGroup = 2,
|
||||||
Unmap = 3,
|
Unmap = 3,
|
||||||
ChangePermissions = 4,
|
ChangePermissions = 4,
|
||||||
ChangePermissionsAndRefresh = 5,
|
ChangePermissionsAndRefresh = 5,
|
||||||
Separate = 6,
|
ChangePermissionsAndRefreshAndFlush = 6,
|
||||||
|
Separate = 7,
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
||||||
|
@ -228,8 +242,6 @@ private:
|
||||||
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
|
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
|
||||||
OperationType operation, KPhysicalAddress map_addr = 0);
|
OperationType operation, KPhysicalAddress map_addr = 0);
|
||||||
void FinalizeUpdate(PageLinkedList* page_list);
|
void FinalizeUpdate(PageLinkedList* page_list);
|
||||||
KProcessAddress GetRegionAddress(KMemoryState state) const;
|
|
||||||
size_t GetRegionSize(KMemoryState state) const;
|
|
||||||
|
|
||||||
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
|
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
|
||||||
size_t num_pages, size_t alignment, size_t offset,
|
size_t num_pages, size_t alignment, size_t offset,
|
||||||
|
@ -250,6 +262,13 @@ private:
|
||||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||||
|
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
|
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||||
|
KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||||
|
|
|
@ -149,7 +149,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsed() {
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
|
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
|
||||||
return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage();
|
return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KProcess::ReleaseUserException(KThread* thread) {
|
bool KProcess::ReleaseUserException(KThread* thread) {
|
||||||
|
|
|
@ -623,14 +623,33 @@ struct KernelCore::Impl {
|
||||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||||
GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||||
|
|
||||||
|
// Insert a physical region for the secure applet memory.
|
||||||
|
const auto secure_applet_end_phys_addr =
|
||||||
|
slab_end_phys_addr + KSystemControl::SecureAppletMemorySize;
|
||||||
|
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
|
||||||
|
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||||
|
GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize,
|
||||||
|
KMemoryRegionType_DramKernelSecureAppletMemory));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert a physical region for the unknown debug2 region.
|
||||||
|
constexpr size_t SecureUnknownRegionSize = 0;
|
||||||
|
const size_t secure_unknown_size = SecureUnknownRegionSize;
|
||||||
|
const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size;
|
||||||
|
if constexpr (SecureUnknownRegionSize > 0) {
|
||||||
|
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||||
|
GetInteger(secure_applet_end_phys_addr), secure_unknown_size,
|
||||||
|
KMemoryRegionType_DramKernelSecureUnknown));
|
||||||
|
}
|
||||||
|
|
||||||
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
||||||
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||||
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
|
const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr;
|
||||||
ASSERT(page_table_heap_size / 4_MiB > 2);
|
ASSERT(page_table_heap_size / 4_MiB > 2);
|
||||||
|
|
||||||
// Insert a physical region for the kernel page table heap region
|
// Insert a physical region for the kernel page table heap region
|
||||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||||
GetInteger(slab_end_phys_addr), page_table_heap_size,
|
GetInteger(secure_unknown_end_phys_addr), page_table_heap_size,
|
||||||
KMemoryRegionType_DramKernelPtHeap));
|
KMemoryRegionType_DramKernelPtHeap));
|
||||||
|
|
||||||
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
||||||
|
|
|
@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
|
Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
|
||||||
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
|
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size,
|
||||||
perm);
|
perm);
|
||||||
|
|
||||||
// Validate address / size.
|
// Validate address / size.
|
||||||
|
@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
|
||||||
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
|
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
// Validate the attribute and mask.
|
// Validate the attribute and mask.
|
||||||
constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
|
constexpr u32 SupportedMask =
|
||||||
|
static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked);
|
||||||
R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
|
R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
|
||||||
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
|
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
|
||||||
|
|
||||||
|
// Check that permission locked is either being set or not masked.
|
||||||
|
R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) ==
|
||||||
|
(static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked),
|
||||||
|
ResultInvalidCombination);
|
||||||
|
|
||||||
// Validate that the region is in range for the current process.
|
// Validate that the region is in range for the current process.
|
||||||
auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()};
|
auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()};
|
||||||
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
|
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
|
|
@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 {
|
||||||
IpcLocked = (1 << 1),
|
IpcLocked = (1 << 1),
|
||||||
DeviceShared = (1 << 2),
|
DeviceShared = (1 << 2),
|
||||||
Uncached = (1 << 3),
|
Uncached = (1 << 3),
|
||||||
|
PermissionLocked = (1 << 4),
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||||
|
|
||||||
|
|
|
@ -407,13 +407,13 @@ protected:
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
const auto base = rp.PopRaw<ProfileBase>();
|
const auto base = rp.PopRaw<ProfileBase>();
|
||||||
|
|
||||||
const auto user_data = ctx.ReadBuffer();
|
const auto image_data = ctx.ReadBufferA(0);
|
||||||
const auto image_data = ctx.ReadBuffer(1);
|
const auto user_data = ctx.ReadBufferX(0);
|
||||||
|
|
||||||
LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
|
LOG_INFO(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
|
||||||
Common::StringFromFixedZeroTerminatedBuffer(
|
Common::StringFromFixedZeroTerminatedBuffer(
|
||||||
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
|
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
|
||||||
base.timestamp, base.user_uuid.RawString());
|
base.timestamp, base.user_uuid.RawString());
|
||||||
|
|
||||||
if (user_data.size() < sizeof(UserData)) {
|
if (user_data.size() < sizeof(UserData)) {
|
||||||
LOG_ERROR(Service_ACC, "UserData buffer too small!");
|
LOG_ERROR(Service_ACC, "UserData buffer too small!");
|
||||||
|
|
|
@ -23,6 +23,17 @@
|
||||||
#include "core/hle/service/ipc_helpers.h"
|
#include "core/hle/service/ipc_helpers.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
static thread_local std::array read_buffer_data_a{
|
||||||
|
Common::ScratchBuffer<u8>(),
|
||||||
|
Common::ScratchBuffer<u8>(),
|
||||||
|
};
|
||||||
|
static thread_local std::array read_buffer_data_x{
|
||||||
|
Common::ScratchBuffer<u8>(),
|
||||||
|
Common::ScratchBuffer<u8>(),
|
||||||
|
};
|
||||||
|
} // Anonymous namespace
|
||||||
|
|
||||||
namespace Service {
|
namespace Service {
|
||||||
|
|
||||||
SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
|
SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
|
||||||
|
@ -328,26 +339,57 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
|
||||||
|
static thread_local std::array read_buffer_a{
|
||||||
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
|
};
|
||||||
|
|
||||||
|
ASSERT_OR_EXECUTE_MSG(
|
||||||
|
BufferDescriptorA().size() > buffer_index, { return {}; },
|
||||||
|
"BufferDescriptorA invalid buffer_index {}", buffer_index);
|
||||||
|
auto& read_buffer = read_buffer_a[buffer_index];
|
||||||
|
return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
|
||||||
|
BufferDescriptorA()[buffer_index].Size(),
|
||||||
|
&read_buffer_data_a[buffer_index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
|
||||||
|
static thread_local std::array read_buffer_x{
|
||||||
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
|
};
|
||||||
|
|
||||||
|
ASSERT_OR_EXECUTE_MSG(
|
||||||
|
BufferDescriptorX().size() > buffer_index, { return {}; },
|
||||||
|
"BufferDescriptorX invalid buffer_index {}", buffer_index);
|
||||||
|
auto& read_buffer = read_buffer_x[buffer_index];
|
||||||
|
return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
|
||||||
|
BufferDescriptorX()[buffer_index].Size(),
|
||||||
|
&read_buffer_data_x[buffer_index]);
|
||||||
|
}
|
||||||
|
|
||||||
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
|
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
|
||||||
static thread_local std::array read_buffer_a{
|
static thread_local std::array read_buffer_a{
|
||||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
};
|
};
|
||||||
static thread_local std::array read_buffer_data_a{
|
|
||||||
Common::ScratchBuffer<u8>(),
|
|
||||||
Common::ScratchBuffer<u8>(),
|
|
||||||
};
|
|
||||||
static thread_local std::array read_buffer_x{
|
static thread_local std::array read_buffer_x{
|
||||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||||
};
|
};
|
||||||
static thread_local std::array read_buffer_data_x{
|
|
||||||
Common::ScratchBuffer<u8>(),
|
|
||||||
Common::ScratchBuffer<u8>(),
|
|
||||||
};
|
|
||||||
|
|
||||||
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
||||||
BufferDescriptorA()[buffer_index].Size()};
|
BufferDescriptorA()[buffer_index].Size()};
|
||||||
|
const bool is_buffer_x{BufferDescriptorX().size() > buffer_index &&
|
||||||
|
BufferDescriptorX()[buffer_index].Size()};
|
||||||
|
|
||||||
|
if (is_buffer_a && is_buffer_x) {
|
||||||
|
LOG_WARNING(Input, "Both buffer descriptors are available a.size={}, x.size={}",
|
||||||
|
BufferDescriptorA()[buffer_index].Size(),
|
||||||
|
BufferDescriptorX()[buffer_index].Size());
|
||||||
|
}
|
||||||
|
|
||||||
if (is_buffer_a) {
|
if (is_buffer_a) {
|
||||||
ASSERT_OR_EXECUTE_MSG(
|
ASSERT_OR_EXECUTE_MSG(
|
||||||
BufferDescriptorA().size() > buffer_index, { return {}; },
|
BufferDescriptorA().size() > buffer_index, { return {}; },
|
||||||
|
|
|
@ -253,6 +253,12 @@ public:
|
||||||
return domain_message_header.has_value();
|
return domain_message_header.has_value();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper function to get a span of a buffer using the buffer descriptor A
|
||||||
|
[[nodiscard]] std::span<const u8> ReadBufferA(std::size_t buffer_index = 0) const;
|
||||||
|
|
||||||
|
/// Helper function to get a span of a buffer using the buffer descriptor X
|
||||||
|
[[nodiscard]] std::span<const u8> ReadBufferX(std::size_t buffer_index = 0) const;
|
||||||
|
|
||||||
/// Helper function to get a span of a buffer using the appropriate buffer descriptor
|
/// Helper function to get a span of a buffer using the appropriate buffer descriptor
|
||||||
[[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
|
[[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ void CoreData::BuildRandom(Age age, Gender gender, Race race) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SetDefault();
|
||||||
SetGender(gender);
|
SetGender(gender);
|
||||||
SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max));
|
SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max));
|
||||||
SetRegionMove(0);
|
SetRegionMove(0);
|
||||||
|
|
|
@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||||
// Get bounds of where mapping is possible.
|
// Get bounds of where mapping is possible.
|
||||||
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
|
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
|
||||||
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
|
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
|
||||||
const auto state = Kernel::KMemoryState::Io;
|
const auto state = Kernel::KMemoryState::IoMemory;
|
||||||
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
|
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
|
||||||
std::mt19937_64 rng{process->GetRandomEntropy(0)};
|
std::mt19937_64 rng{process->GetRandomEntropy(0)};
|
||||||
|
|
||||||
|
|
|
@ -58,14 +58,8 @@ private:
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
const auto process_id = rp.PopRaw<u64>();
|
const auto process_id = rp.PopRaw<u64>();
|
||||||
|
|
||||||
const auto data1 = ctx.ReadBuffer(0);
|
const auto data1 = ctx.ReadBufferA(0);
|
||||||
const auto data2 = [&ctx] {
|
const auto data2 = ctx.ReadBufferX(0);
|
||||||
if (ctx.CanReadBuffer(1)) {
|
|
||||||
return ctx.ReadBuffer(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::span<const u8>{};
|
|
||||||
}();
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_PREPO,
|
LOG_DEBUG(Service_PREPO,
|
||||||
"called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}",
|
"called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}",
|
||||||
|
@ -85,14 +79,8 @@ private:
|
||||||
const auto user_id = rp.PopRaw<u128>();
|
const auto user_id = rp.PopRaw<u128>();
|
||||||
const auto process_id = rp.PopRaw<u64>();
|
const auto process_id = rp.PopRaw<u64>();
|
||||||
|
|
||||||
const auto data1 = ctx.ReadBuffer(0);
|
const auto data1 = ctx.ReadBufferA(0);
|
||||||
const auto data2 = [&ctx] {
|
const auto data2 = ctx.ReadBufferX(0);
|
||||||
if (ctx.CanReadBuffer(1)) {
|
|
||||||
return ctx.ReadBuffer(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::span<const u8>{};
|
|
||||||
}();
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_PREPO,
|
LOG_DEBUG(Service_PREPO,
|
||||||
"called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, "
|
"called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, "
|
||||||
|
@ -137,14 +125,8 @@ private:
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
const auto title_id = rp.PopRaw<u64>();
|
const auto title_id = rp.PopRaw<u64>();
|
||||||
|
|
||||||
const auto data1 = ctx.ReadBuffer(0);
|
const auto data1 = ctx.ReadBufferA(0);
|
||||||
const auto data2 = [&ctx] {
|
const auto data2 = ctx.ReadBufferX(0);
|
||||||
if (ctx.CanReadBuffer(1)) {
|
|
||||||
return ctx.ReadBuffer(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::span<const u8>{};
|
|
||||||
}();
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}",
|
LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}",
|
||||||
title_id, data1.size(), data2.size());
|
title_id, data1.size(), data2.size());
|
||||||
|
@ -161,14 +143,8 @@ private:
|
||||||
const auto user_id = rp.PopRaw<u128>();
|
const auto user_id = rp.PopRaw<u128>();
|
||||||
const auto title_id = rp.PopRaw<u64>();
|
const auto title_id = rp.PopRaw<u64>();
|
||||||
|
|
||||||
const auto data1 = ctx.ReadBuffer(0);
|
const auto data1 = ctx.ReadBufferA(0);
|
||||||
const auto data2 = [&ctx] {
|
const auto data2 = ctx.ReadBufferX(0);
|
||||||
if (ctx.CanReadBuffer(1)) {
|
|
||||||
return ctx.ReadBuffer(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::span<const u8>{};
|
|
||||||
}();
|
|
||||||
|
|
||||||
LOG_DEBUG(Service_PREPO,
|
LOG_DEBUG(Service_PREPO,
|
||||||
"called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, "
|
"called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, "
|
||||||
|
|
|
@ -46,8 +46,8 @@ TEST_CASE("UniqueFunction", "[common]") {
|
||||||
Noisy noisy;
|
Noisy noisy;
|
||||||
REQUIRE(noisy.state == "Default constructed");
|
REQUIRE(noisy.state == "Default constructed");
|
||||||
|
|
||||||
Common::UniqueFunction<void> func = [noisy = std::move(noisy)] {
|
Common::UniqueFunction<void> func = [noisy_inner = std::move(noisy)] {
|
||||||
REQUIRE(noisy.state == "Move constructed");
|
REQUIRE(noisy_inner.state == "Move constructed");
|
||||||
};
|
};
|
||||||
REQUIRE(noisy.state == "Moved away");
|
REQUIRE(noisy.state == "Moved away");
|
||||||
func();
|
func();
|
||||||
|
@ -101,7 +101,7 @@ TEST_CASE("UniqueFunction", "[common]") {
|
||||||
};
|
};
|
||||||
Foo object{&num_destroyed};
|
Foo object{&num_destroyed};
|
||||||
{
|
{
|
||||||
Common::UniqueFunction<void> func = [object = std::move(object)] {};
|
Common::UniqueFunction<void> func = [object_inner = std::move(object)] {};
|
||||||
REQUIRE(num_destroyed == 0);
|
REQUIRE(num_destroyed == 0);
|
||||||
}
|
}
|
||||||
REQUIRE(num_destroyed == 1);
|
REQUIRE(num_destroyed == 1);
|
||||||
|
|
|
@ -86,7 +86,10 @@ public:
|
||||||
uncommitted_operations.emplace_back(std::move(func));
|
uncommitted_operations.emplace_back(std::move(func));
|
||||||
}
|
}
|
||||||
pending_operations.emplace_back(std::move(uncommitted_operations));
|
pending_operations.emplace_back(std::move(uncommitted_operations));
|
||||||
QueueFence(new_fence);
|
{
|
||||||
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
|
QueueFence(new_fence);
|
||||||
|
}
|
||||||
if (!delay_fence) {
|
if (!delay_fence) {
|
||||||
func();
|
func();
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ set(SHADER_FILES
|
||||||
block_linear_unswizzle_2d.comp
|
block_linear_unswizzle_2d.comp
|
||||||
block_linear_unswizzle_3d.comp
|
block_linear_unswizzle_3d.comp
|
||||||
convert_abgr8_to_d24s8.frag
|
convert_abgr8_to_d24s8.frag
|
||||||
|
convert_abgr8_to_d32f.frag
|
||||||
convert_d32f_to_abgr8.frag
|
convert_d32f_to_abgr8.frag
|
||||||
convert_d24s8_to_abgr8.frag
|
convert_d24s8_to_abgr8.frag
|
||||||
convert_depth_to_float.frag
|
convert_depth_to_float.frag
|
||||||
|
|
15
src/video_core/host_shaders/convert_abgr8_to_d32f.frag
Normal file
15
src/video_core/host_shaders/convert_abgr8_to_d32f.frag
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
layout(binding = 0) uniform sampler2D color_texture;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
ivec2 coord = ivec2(gl_FragCoord.xy);
|
||||||
|
vec4 color = texelFetch(color_texture, coord, 0).abgr;
|
||||||
|
|
||||||
|
float value = color.a * (color.r + color.g + color.b) / 3.0f;
|
||||||
|
|
||||||
|
gl_FragDepth = value;
|
||||||
|
}
|
|
@ -9,6 +9,6 @@ layout(location = 0) out vec4 color;
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
ivec2 coord = ivec2(gl_FragCoord.xy);
|
ivec2 coord = ivec2(gl_FragCoord.xy);
|
||||||
float depth = textureLod(depth_tex, coord, 0).r;
|
float depth = texelFetch(depth_tex, coord, 0).r;
|
||||||
color = vec4(depth, depth, depth, 1.0);
|
color = vec4(depth, depth, depth, 1.0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "video_core/host_shaders/blit_color_float_frag_spv.h"
|
#include "video_core/host_shaders/blit_color_float_frag_spv.h"
|
||||||
#include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h"
|
#include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h"
|
||||||
|
#include "video_core/host_shaders/convert_abgr8_to_d32f_frag_spv.h"
|
||||||
#include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h"
|
#include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h"
|
||||||
#include "video_core/host_shaders/convert_d32f_to_abgr8_frag_spv.h"
|
#include "video_core/host_shaders/convert_d32f_to_abgr8_frag_spv.h"
|
||||||
#include "video_core/host_shaders/convert_depth_to_float_frag_spv.h"
|
#include "video_core/host_shaders/convert_depth_to_float_frag_spv.h"
|
||||||
|
@ -434,6 +435,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
|
||||||
convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)),
|
convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)),
|
||||||
convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)),
|
convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)),
|
||||||
convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)),
|
convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)),
|
||||||
|
convert_abgr8_to_d32f_frag(BuildShader(device, CONVERT_ABGR8_TO_D32F_FRAG_SPV)),
|
||||||
convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)),
|
convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)),
|
||||||
convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)),
|
convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)),
|
||||||
convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)),
|
convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)),
|
||||||
|
@ -559,6 +561,13 @@ void BlitImageHelper::ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer,
|
||||||
Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view);
|
Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BlitImageHelper::ConvertABGR8ToD32F(const Framebuffer* dst_framebuffer,
|
||||||
|
const ImageView& src_image_view) {
|
||||||
|
ConvertPipelineDepthTargetEx(convert_abgr8_to_d32f_pipeline, dst_framebuffer->RenderPass(),
|
||||||
|
convert_abgr8_to_d32f_frag);
|
||||||
|
Convert(*convert_abgr8_to_d32f_pipeline, dst_framebuffer, src_image_view);
|
||||||
|
}
|
||||||
|
|
||||||
void BlitImageHelper::ConvertD32FToABGR8(const Framebuffer* dst_framebuffer,
|
void BlitImageHelper::ConvertD32FToABGR8(const Framebuffer* dst_framebuffer,
|
||||||
ImageView& src_image_view) {
|
ImageView& src_image_view) {
|
||||||
ConvertPipelineColorTargetEx(convert_d32f_to_abgr8_pipeline, dst_framebuffer->RenderPass(),
|
ConvertPipelineColorTargetEx(convert_d32f_to_abgr8_pipeline, dst_framebuffer->RenderPass(),
|
||||||
|
|
|
@ -67,6 +67,8 @@ public:
|
||||||
|
|
||||||
void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);
|
void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);
|
||||||
|
|
||||||
|
void ConvertABGR8ToD32F(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);
|
||||||
|
|
||||||
void ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view);
|
void ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view);
|
||||||
|
|
||||||
void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view);
|
void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view);
|
||||||
|
@ -130,6 +132,7 @@ private:
|
||||||
vk::ShaderModule convert_depth_to_float_frag;
|
vk::ShaderModule convert_depth_to_float_frag;
|
||||||
vk::ShaderModule convert_float_to_depth_frag;
|
vk::ShaderModule convert_float_to_depth_frag;
|
||||||
vk::ShaderModule convert_abgr8_to_d24s8_frag;
|
vk::ShaderModule convert_abgr8_to_d24s8_frag;
|
||||||
|
vk::ShaderModule convert_abgr8_to_d32f_frag;
|
||||||
vk::ShaderModule convert_d32f_to_abgr8_frag;
|
vk::ShaderModule convert_d32f_to_abgr8_frag;
|
||||||
vk::ShaderModule convert_d24s8_to_abgr8_frag;
|
vk::ShaderModule convert_d24s8_to_abgr8_frag;
|
||||||
vk::ShaderModule convert_s8d24_to_abgr8_frag;
|
vk::ShaderModule convert_s8d24_to_abgr8_frag;
|
||||||
|
@ -149,6 +152,7 @@ private:
|
||||||
vk::Pipeline convert_d16_to_r16_pipeline;
|
vk::Pipeline convert_d16_to_r16_pipeline;
|
||||||
vk::Pipeline convert_r16_to_d16_pipeline;
|
vk::Pipeline convert_r16_to_d16_pipeline;
|
||||||
vk::Pipeline convert_abgr8_to_d24s8_pipeline;
|
vk::Pipeline convert_abgr8_to_d24s8_pipeline;
|
||||||
|
vk::Pipeline convert_abgr8_to_d32f_pipeline;
|
||||||
vk::Pipeline convert_d32f_to_abgr8_pipeline;
|
vk::Pipeline convert_d32f_to_abgr8_pipeline;
|
||||||
vk::Pipeline convert_d24s8_to_abgr8_pipeline;
|
vk::Pipeline convert_d24s8_to_abgr8_pipeline;
|
||||||
vk::Pipeline convert_s8d24_to_abgr8_pipeline;
|
vk::Pipeline convert_s8d24_to_abgr8_pipeline;
|
||||||
|
|
|
@ -132,12 +132,16 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||||
const bool use_accelerated =
|
const bool use_accelerated =
|
||||||
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
|
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
|
||||||
const bool is_srgb = use_accelerated && screen_info.is_srgb;
|
const bool is_srgb = use_accelerated && screen_info.is_srgb;
|
||||||
RenderScreenshot(*framebuffer, use_accelerated);
|
|
||||||
|
|
||||||
Frame* frame = present_manager.GetRenderFrame();
|
{
|
||||||
blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
|
std::scoped_lock lock{rasterizer.LockCaches()};
|
||||||
scheduler.Flush(*frame->render_ready);
|
RenderScreenshot(*framebuffer, use_accelerated);
|
||||||
present_manager.Present(frame);
|
|
||||||
|
Frame* frame = present_manager.GetRenderFrame();
|
||||||
|
blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
|
||||||
|
scheduler.Flush(*frame->render_ready);
|
||||||
|
present_manager.Present(frame);
|
||||||
|
}
|
||||||
|
|
||||||
gpu.RendererFrameEndNotify();
|
gpu.RendererFrameEndNotify();
|
||||||
rasterizer.TickFrame();
|
rasterizer.TickFrame();
|
||||||
|
|
|
@ -198,7 +198,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
||||||
if (!pipeline) {
|
if (!pipeline) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{LockCaches()};
|
||||||
// update engine as channel may be different.
|
// update engine as channel may be different.
|
||||||
pipeline->SetEngine(maxwell3d, gpu_memory);
|
pipeline->SetEngine(maxwell3d, gpu_memory);
|
||||||
pipeline->Configure(is_indexed);
|
pipeline->Configure(is_indexed);
|
||||||
|
@ -708,6 +708,7 @@ void RasterizerVulkan::TiledCacheBarrier() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::FlushCommands() {
|
void RasterizerVulkan::FlushCommands() {
|
||||||
|
std::scoped_lock lock{LockCaches()};
|
||||||
if (draw_counter == 0) {
|
if (draw_counter == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -805,6 +806,7 @@ void RasterizerVulkan::FlushWork() {
|
||||||
if ((++draw_counter & 7) != 7) {
|
if ((++draw_counter & 7) != 7) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
std::scoped_lock lock{LockCaches()};
|
||||||
if (draw_counter < DRAWS_TO_DISPATCH) {
|
if (draw_counter < DRAWS_TO_DISPATCH) {
|
||||||
// Send recorded tasks to the worker thread
|
// Send recorded tasks to the worker thread
|
||||||
scheduler.DispatchWork();
|
scheduler.DispatchWork();
|
||||||
|
@ -1499,7 +1501,7 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
|
||||||
void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
|
void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
|
||||||
CreateChannel(channel);
|
CreateChannel(channel);
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{LockCaches()};
|
||||||
texture_cache.CreateChannel(channel);
|
texture_cache.CreateChannel(channel);
|
||||||
buffer_cache.CreateChannel(channel);
|
buffer_cache.CreateChannel(channel);
|
||||||
}
|
}
|
||||||
|
@ -1512,7 +1514,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
|
||||||
const s32 channel_id = channel.bind_id;
|
const s32 channel_id = channel.bind_id;
|
||||||
BindToChannel(channel_id);
|
BindToChannel(channel_id);
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{LockCaches()};
|
||||||
texture_cache.BindToChannel(channel_id);
|
texture_cache.BindToChannel(channel_id);
|
||||||
buffer_cache.BindToChannel(channel_id);
|
buffer_cache.BindToChannel(channel_id);
|
||||||
}
|
}
|
||||||
|
@ -1525,7 +1527,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
|
||||||
void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
|
void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
|
||||||
EraseChannel(channel_id);
|
EraseChannel(channel_id);
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{LockCaches()};
|
||||||
texture_cache.EraseChannel(channel_id);
|
texture_cache.EraseChannel(channel_id);
|
||||||
buffer_cache.EraseChannel(channel_id);
|
buffer_cache.EraseChannel(channel_id);
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,6 +133,10 @@ public:
|
||||||
|
|
||||||
void ReleaseChannel(s32 channel_id) override;
|
void ReleaseChannel(s32 channel_id) override;
|
||||||
|
|
||||||
|
std::scoped_lock<std::recursive_mutex, std::recursive_mutex> LockCaches() {
|
||||||
|
return std::scoped_lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr size_t MAX_TEXTURES = 192;
|
static constexpr size_t MAX_TEXTURES = 192;
|
||||||
static constexpr size_t MAX_IMAGES = 48;
|
static constexpr size_t MAX_IMAGES = 48;
|
||||||
|
|
|
@ -1194,6 +1194,11 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
|
||||||
return blit_image_helper.ConvertD16ToR16(dst, src_view);
|
return blit_image_helper.ConvertD16ToR16(dst, src_view);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case PixelFormat::A8B8G8R8_SRGB:
|
||||||
|
if (src_view.format == PixelFormat::D32_FLOAT) {
|
||||||
|
return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
|
||||||
|
}
|
||||||
|
break;
|
||||||
case PixelFormat::A8B8G8R8_UNORM:
|
case PixelFormat::A8B8G8R8_UNORM:
|
||||||
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
|
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
|
||||||
return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view);
|
return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view);
|
||||||
|
@ -1205,6 +1210,16 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
|
||||||
return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
|
return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case PixelFormat::B8G8R8A8_SRGB:
|
||||||
|
if (src_view.format == PixelFormat::D32_FLOAT) {
|
||||||
|
return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case PixelFormat::B8G8R8A8_UNORM:
|
||||||
|
if (src_view.format == PixelFormat::D32_FLOAT) {
|
||||||
|
return blit_image_helper.ConvertD32FToABGR8(dst, src_view);
|
||||||
|
}
|
||||||
|
break;
|
||||||
case PixelFormat::R32_FLOAT:
|
case PixelFormat::R32_FLOAT:
|
||||||
if (src_view.format == PixelFormat::D32_FLOAT) {
|
if (src_view.format == PixelFormat::D32_FLOAT) {
|
||||||
return blit_image_helper.ConvertD32ToR32(dst, src_view);
|
return blit_image_helper.ConvertD32ToR32(dst, src_view);
|
||||||
|
@ -1222,6 +1237,12 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case PixelFormat::D32_FLOAT:
|
case PixelFormat::D32_FLOAT:
|
||||||
|
if (src_view.format == PixelFormat::A8B8G8R8_UNORM ||
|
||||||
|
src_view.format == PixelFormat::B8G8R8A8_UNORM ||
|
||||||
|
src_view.format == PixelFormat::A8B8G8R8_SRGB ||
|
||||||
|
src_view.format == PixelFormat::B8G8R8A8_SRGB) {
|
||||||
|
return blit_image_helper.ConvertABGR8ToD32F(dst, src_view);
|
||||||
|
}
|
||||||
if (src_view.format == PixelFormat::R32_FLOAT) {
|
if (src_view.format == PixelFormat::R32_FLOAT) {
|
||||||
return blit_image_helper.ConvertR32ToD32(dst, src_view);
|
return blit_image_helper.ConvertR32ToD32(dst, src_view);
|
||||||
}
|
}
|
||||||
|
@ -2034,7 +2055,7 @@ void TextureCacheRuntime::TransitionImageLayout(Image& image) {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
scheduler.Record([barrier = barrier](vk::CommandBuffer cmdbuf) {
|
scheduler.Record([barrier](vk::CommandBuffer cmdbuf) {
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||||
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, barrier);
|
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, barrier);
|
||||||
});
|
});
|
||||||
|
|
Loading…
Reference in a new issue