2015-05-13 03:38:56 +01:00
|
|
|
// Copyright 2015 Citra Emulator Project
|
2014-12-17 05:38:14 +00:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-04-09 00:15:46 +01:00
|
|
|
// Refer to the license.txt file included.
|
2013-09-19 04:52:51 +01:00
|
|
|
|
2018-01-27 15:16:39 +00:00
|
|
|
#include <algorithm>
|
2015-09-10 04:23:44 +01:00
|
|
|
#include <cstring>
|
2018-10-30 04:03:25 +00:00
|
|
|
#include <optional>
|
2018-07-19 00:02:47 +01:00
|
|
|
#include <utility>
|
|
|
|
|
2015-05-13 03:38:56 +01:00
|
|
|
#include "common/assert.h"
|
2015-05-06 08:06:12 +01:00
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/logging/log.h"
|
2019-03-02 20:20:28 +00:00
|
|
|
#include "common/page_table.h"
|
2015-05-06 08:06:12 +01:00
|
|
|
#include "common/swap.h"
|
2017-09-24 22:44:13 +01:00
|
|
|
#include "core/arm/arm_interface.h"
|
|
|
|
#include "core/core.h"
|
2020-01-12 16:04:15 +00:00
|
|
|
#include "core/hle/kernel/physical_memory.h"
|
2015-08-06 01:26:52 +01:00
|
|
|
#include "core/hle/kernel/process.h"
|
2018-09-25 01:01:45 +01:00
|
|
|
#include "core/hle/kernel/vm_manager.h"
|
2016-09-21 07:52:38 +01:00
|
|
|
#include "core/memory.h"
|
2019-02-23 04:38:45 +00:00
|
|
|
#include "video_core/gpu.h"
|
2016-04-16 23:57:57 +01:00
|
|
|
|
2020-03-31 20:10:44 +01:00
|
|
|
namespace Core::Memory {
|
2015-05-13 03:38:56 +01:00
|
|
|
|
2019-11-26 17:33:20 +00:00
|
|
|
// Implementation class used to keep the specifics of the memory subsystem hidden
|
|
|
|
// from outside classes. This also allows modification to the internals of the memory
|
|
|
|
// subsystem without needing to rebuild all files that make use of the memory interface.
|
|
|
|
struct Memory::Impl {
|
|
|
|
explicit Impl(Core::System& system_) : system{system_} {}
|
|
|
|
|
2019-11-26 23:34:30 +00:00
|
|
|
void SetCurrentPageTable(Kernel::Process& process) {
|
|
|
|
current_page_table = &process.VMManager().page_table;
|
|
|
|
|
|
|
|
const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth();
|
|
|
|
|
|
|
|
system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width);
|
|
|
|
system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
|
|
|
|
system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
|
|
|
|
system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
|
|
|
|
}
|
|
|
|
|
2020-01-12 16:04:15 +00:00
|
|
|
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Kernel::PhysicalMemory& memory, VAddr offset) {
|
|
|
|
MapMemoryRegion(page_table, base, size, memory.data() + offset);
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
|
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
|
|
|
|
}
|
2019-11-26 17:33:20 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Common::MemoryHookPointer mmio_handler) {
|
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
|
|
|
|
Common::PageType::Special);
|
|
|
|
|
|
|
|
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
|
|
|
|
const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice,
|
|
|
|
std::move(mmio_handler)};
|
|
|
|
page_table.special_regions.add(
|
|
|
|
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
|
|
|
|
}
|
2019-11-26 17:33:20 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
|
|
|
|
Common::PageType::Unmapped);
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 18:25:54 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
|
|
|
|
page_table.special_regions.erase(interval);
|
|
|
|
}
|
2018-05-03 03:36:51 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Common::MemoryHookPointer hook) {
|
|
|
|
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
|
|
|
|
const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
|
|
|
|
page_table.special_regions.add(
|
|
|
|
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
|
|
|
|
}
|
2017-09-24 22:42:42 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Common::MemoryHookPointer hook) {
|
|
|
|
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
|
|
|
|
const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
|
|
|
|
page_table.special_regions.subtract(
|
|
|
|
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:46:41 +00:00
|
|
|
bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
|
|
|
|
const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
|
|
|
|
if (page_pointer != nullptr) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsValidVirtualAddress(VAddr vaddr) const {
|
|
|
|
return IsValidVirtualAddress(*system.CurrentProcess(), vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:28:44 +00:00
|
|
|
/**
|
|
|
|
* Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
|
|
|
|
* using a VMA from the current process
|
|
|
|
*/
|
|
|
|
u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
|
|
|
|
const auto& vm_manager = process.VMManager();
|
|
|
|
|
|
|
|
const auto it = vm_manager.FindVMA(vaddr);
|
|
|
|
DEBUG_ASSERT(vm_manager.IsValidHandle(it));
|
|
|
|
|
|
|
|
u8* direct_pointer = nullptr;
|
|
|
|
const auto& vma = it->second;
|
|
|
|
switch (vma.type) {
|
|
|
|
case Kernel::VMAType::AllocatedMemoryBlock:
|
|
|
|
direct_pointer = vma.backing_block->data() + vma.offset;
|
|
|
|
break;
|
|
|
|
case Kernel::VMAType::BackingMemory:
|
|
|
|
direct_pointer = vma.backing_memory;
|
|
|
|
break;
|
|
|
|
case Kernel::VMAType::Free:
|
|
|
|
return nullptr;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
return direct_pointer + (vaddr - vma.base);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
|
|
|
|
* using a VMA from the current process.
|
|
|
|
*/
|
|
|
|
u8* GetPointerFromVMA(VAddr vaddr) {
|
|
|
|
return GetPointerFromVMA(*system.CurrentProcess(), vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:19:15 +00:00
|
|
|
u8* GetPointer(const VAddr vaddr) {
|
|
|
|
u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
|
|
|
|
if (page_pointer != nullptr) {
|
2019-12-30 23:11:45 +00:00
|
|
|
return page_pointer + vaddr;
|
2019-11-26 20:19:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
|
|
|
|
Common::PageType::RasterizerCachedMemory) {
|
|
|
|
return GetPointerFromVMA(vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
u8 Read8(const VAddr addr) {
|
|
|
|
return Read<u8>(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Read16(const VAddr addr) {
|
|
|
|
return Read<u16_le>(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Read32(const VAddr addr) {
|
|
|
|
return Read<u32_le>(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 Read64(const VAddr addr) {
|
|
|
|
return Read<u64_le>(addr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Write8(const VAddr addr, const u8 data) {
|
|
|
|
Write<u8>(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write16(const VAddr addr, const u16 data) {
|
|
|
|
Write<u16_le>(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write32(const VAddr addr, const u32 data) {
|
|
|
|
Write<u32_le>(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write64(const VAddr addr, const u64 data) {
|
|
|
|
Write<u64_le>(addr, data);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
std::string string;
|
|
|
|
string.reserve(max_length);
|
|
|
|
for (std::size_t i = 0; i < max_length; ++i) {
|
|
|
|
const char c = Read8(vaddr);
|
|
|
|
if (c == '\0') {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
string.push_back(c);
|
|
|
|
++vaddr;
|
|
|
|
}
|
|
|
|
string.shrink_to_fit();
|
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
|
|
|
switch (page_table.attributes[page_index]) {
|
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
|
|
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
|
|
|
|
2019-12-30 23:11:45 +00:00
|
|
|
const u8* const src_ptr =
|
|
|
|
page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
|
2019-11-26 21:29:34 +00:00
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
|
2020-04-05 17:58:23 +01:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
2019-11-26 21:29:34 +00:00
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-05 22:23:49 +01:00
|
|
|
void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
|
|
|
switch (page_table.attributes[page_index]) {
|
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
|
|
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
|
|
|
|
|
|
|
const u8* const src_ptr =
|
|
|
|
page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
|
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
|
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 22:23:49 +01:00
|
|
|
void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
ReadBlockUnsafe(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = dest_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = dest_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
|
|
|
switch (page_table.attributes[page_index]) {
|
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
|
|
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
|
|
|
|
2019-12-30 23:11:45 +00:00
|
|
|
u8* const dest_ptr =
|
|
|
|
page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
|
2019-11-26 22:39:57 +00:00
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
|
2020-04-05 17:58:23 +01:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
2019-11-26 22:39:57 +00:00
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-05 22:23:49 +01:00
|
|
|
void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr,
|
|
|
|
const void* src_buffer, const std::size_t size) {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = dest_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = dest_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
|
|
|
switch (page_table.attributes[page_index]) {
|
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
|
|
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
|
|
|
|
|
|
|
u8* const dest_ptr =
|
|
|
|
page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
|
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
|
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 22:23:49 +01:00
|
|
|
void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
WriteBlockUnsafe(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:06:49 +00:00
|
|
|
void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = dest_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = dest_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
|
|
|
switch (page_table.attributes[page_index]) {
|
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
|
|
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
|
|
|
|
2019-12-30 23:11:45 +00:00
|
|
|
u8* dest_ptr =
|
|
|
|
page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(dest_ptr, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
|
2020-04-05 17:58:23 +01:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(host_ptr, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ZeroBlock(const VAddr dest_addr, const std::size_t size) {
|
|
|
|
ZeroBlock(*system.CurrentProcess(), dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
const auto& page_table = process.VMManager().page_table;
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
|
|
|
switch (page_table.attributes[page_index]) {
|
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
ZeroBlock(process, dest_addr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
|
|
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
2019-12-30 23:11:45 +00:00
|
|
|
const u8* src_ptr =
|
|
|
|
page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
|
2019-11-26 21:06:49 +00:00
|
|
|
WriteBlock(process, dest_addr, src_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
|
2020-04-05 17:58:23 +01:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
2019-11-26 21:06:49 +00:00
|
|
|
WriteBlock(process, dest_addr, host_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
src_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) {
|
|
|
|
return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
if (vaddr == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
|
|
|
|
// address space, marking the region as un/cached. The region is marked un/cached at a
|
|
|
|
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
|
|
|
// is different). This assumes the specified GPU address region is contiguous as well.
|
|
|
|
|
|
|
|
u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
|
|
|
|
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
|
|
|
Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
|
|
|
|
|
|
|
if (cached) {
|
|
|
|
// Switch page type to cached if now cached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
page_type = Common::PageType::RasterizerCachedMemory;
|
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already marked as cached.
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to uncached if now uncached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already unmarked as cached.
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
|
|
|
|
if (pointer == nullptr) {
|
|
|
|
// It's possible that this function has been called while updating the
|
|
|
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
|
|
|
// longer exist, and we should just leave the pagetable entry blank.
|
|
|
|
page_type = Common::PageType::Unmapped;
|
|
|
|
} else {
|
|
|
|
page_type = Common::PageType::Memory;
|
2019-12-30 23:11:45 +00:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS] =
|
|
|
|
pointer - (vaddr & ~PAGE_MASK);
|
2019-11-26 20:56:13 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
/**
|
|
|
|
* Maps a region of pages as a specific type.
|
|
|
|
*
|
|
|
|
* @param page_table The page table to use to perform the mapping.
|
|
|
|
* @param base The base address to begin mapping at.
|
|
|
|
* @param size The total size of the range in bytes.
|
|
|
|
* @param memory The memory to map.
|
|
|
|
* @param type The page type to map the memory as.
|
|
|
|
*/
|
|
|
|
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
|
|
|
|
Common::PageType type) {
|
|
|
|
LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
|
|
|
|
(base + size) * PAGE_SIZE);
|
|
|
|
|
|
|
|
// During boot, current_page_table might not be set yet, in which case we need not flush
|
|
|
|
if (system.IsPoweredOn()) {
|
|
|
|
auto& gpu = system.GPU();
|
|
|
|
for (u64 i = 0; i < size; i++) {
|
|
|
|
const auto page = base + i;
|
|
|
|
if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
|
|
|
|
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
|
|
|
|
}
|
2019-09-19 02:50:21 +01:00
|
|
|
}
|
|
|
|
}
|
2018-03-23 02:56:41 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
const VAddr end = base + size;
|
|
|
|
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
|
|
|
base + page_table.pointers.size());
|
2015-05-13 03:38:56 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
|
2015-05-13 03:38:56 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
if (memory == nullptr) {
|
|
|
|
std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end,
|
|
|
|
memory);
|
|
|
|
} else {
|
|
|
|
while (base != end) {
|
2019-12-30 23:11:45 +00:00
|
|
|
page_table.pointers[base] = memory - (base << PAGE_BITS);
|
|
|
|
ASSERT_MSG(page_table.pointers[base],
|
|
|
|
"memory mapping base yield a nullptr within the table");
|
2019-02-27 22:22:47 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
base += 1;
|
|
|
|
memory += PAGE_SIZE;
|
|
|
|
}
|
2019-02-27 22:22:47 +00:00
|
|
|
}
|
2014-04-01 23:18:02 +01:00
|
|
|
}
|
2013-09-19 04:52:51 +01:00
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
/**
|
|
|
|
* Reads a particular data type out of memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to read the data type from.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to read out of memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*
|
|
|
|
* @returns The instance of T read from the specified virtual address.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T Read(const VAddr vaddr) {
|
|
|
|
const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
|
|
|
|
if (page_pointer != nullptr) {
|
|
|
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
|
|
|
T value;
|
2019-12-30 23:11:45 +00:00
|
|
|
std::memcpy(&value, &page_pointer[vaddr], sizeof(T));
|
2019-11-26 21:29:34 +00:00
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
|
|
|
switch (type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
|
|
|
return 0;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
const u8* const host_ptr = GetPointerFromVMA(vaddr);
|
2020-04-05 17:58:23 +01:00
|
|
|
system.GPU().FlushRegion(vaddr, sizeof(T));
|
2019-11-26 21:29:34 +00:00
|
|
|
T value;
|
|
|
|
std::memcpy(&value, host_ptr, sizeof(T));
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
/**
|
|
|
|
* Writes a particular data type to memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to write the data type to.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to write to memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*
|
|
|
|
* @returns The instance of T write to the specified virtual address.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void Write(const VAddr vaddr, const T data) {
|
|
|
|
u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
|
|
|
|
if (page_pointer != nullptr) {
|
|
|
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
2019-12-30 23:11:45 +00:00
|
|
|
std::memcpy(&page_pointer[vaddr], &data, sizeof(T));
|
2019-11-26 22:39:57 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
|
|
|
switch (type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
|
|
|
|
static_cast<u32>(data), vaddr);
|
|
|
|
return;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
|
|
|
u8* const host_ptr{GetPointerFromVMA(vaddr)};
|
2020-04-05 17:58:23 +01:00
|
|
|
system.GPU().InvalidateRegion(vaddr, sizeof(T));
|
2019-11-26 22:39:57 +00:00
|
|
|
std::memcpy(host_ptr, &data, sizeof(T));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:34:30 +00:00
|
|
|
Common::PageTable* current_page_table = nullptr;
|
2019-11-26 18:09:12 +00:00
|
|
|
Core::System& system;
|
|
|
|
};
|
2014-04-26 06:27:25 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
|
|
|
|
Memory::~Memory() = default;
|
2016-01-30 18:41:04 +00:00
|
|
|
|
2019-11-26 23:34:30 +00:00
|
|
|
void Memory::SetCurrentPageTable(Kernel::Process& process) {
|
|
|
|
impl->SetCurrentPageTable(process);
|
|
|
|
}
|
|
|
|
|
2020-01-12 16:04:15 +00:00
|
|
|
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Kernel::PhysicalMemory& memory, VAddr offset) {
|
|
|
|
impl->MapMemoryRegion(page_table, base, size, memory, offset);
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
|
|
|
|
impl->MapMemoryRegion(page_table, base, size, target);
|
2015-05-13 03:38:56 +01:00
|
|
|
}
|
2014-12-30 03:35:06 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void Memory::MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Common::MemoryHookPointer mmio_handler) {
|
|
|
|
impl->MapIoRegion(page_table, base, size, std::move(mmio_handler));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
impl->UnmapRegion(page_table, base, size);
|
|
|
|
}
|
2016-04-16 23:57:57 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void Memory::AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Common::MemoryHookPointer hook) {
|
|
|
|
impl->AddDebugHook(page_table, base, size, std::move(hook));
|
2018-01-27 15:16:39 +00:00
|
|
|
}
|
2016-04-16 23:57:57 +01:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
|
|
|
|
Common::MemoryHookPointer hook) {
|
|
|
|
impl->RemoveDebugHook(page_table, base, size, std::move(hook));
|
2016-04-16 23:57:57 +01:00
|
|
|
}
|
|
|
|
|
2019-11-26 18:46:41 +00:00
|
|
|
bool Memory::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const {
|
|
|
|
return impl->IsValidVirtualAddress(process, vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
|
|
|
return impl->IsValidVirtualAddress(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:19:15 +00:00
|
|
|
u8* Memory::GetPointer(VAddr vaddr) {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
const u8* Memory::GetPointer(VAddr vaddr) const {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
u8 Memory::Read8(const VAddr addr) {
|
|
|
|
return impl->Read8(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Memory::Read16(const VAddr addr) {
|
|
|
|
return impl->Read16(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Memory::Read32(const VAddr addr) {
|
|
|
|
return impl->Read32(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 Memory::Read64(const VAddr addr) {
|
|
|
|
return impl->Read64(addr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Memory::Write8(VAddr addr, u8 data) {
|
|
|
|
impl->Write8(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write16(VAddr addr, u16 data) {
|
|
|
|
impl->Write16(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write32(VAddr addr, u32 data) {
|
|
|
|
impl->Write32(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write64(VAddr addr, u64 data) {
|
|
|
|
impl->Write64(addr, data);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
return impl->ReadCString(vaddr, max_length);
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:29:34 +00:00
|
|
|
void Memory::ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
impl->ReadBlock(process, src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlock(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 22:23:49 +01:00
|
|
|
void Memory::ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr,
|
|
|
|
void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlockUnsafe(process, src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer,
|
|
|
|
std::size_t size) {
|
|
|
|
impl->WriteBlock(process, dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
impl->WriteBlock(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 22:23:49 +01:00
|
|
|
void Memory::WriteBlockUnsafe(const Kernel::Process& process, VAddr dest_addr,
|
|
|
|
const void* src_buffer, std::size_t size) {
|
|
|
|
impl->WriteBlockUnsafe(process, dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:06:49 +00:00
|
|
|
void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) {
|
|
|
|
impl->ZeroBlock(process, dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) {
|
|
|
|
impl->ZeroBlock(dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
impl->CopyBlock(process, dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) {
|
|
|
|
impl->CopyBlock(dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
impl->RasterizerMarkRegionCached(vaddr, size, cached);
|
|
|
|
}
|
|
|
|
|
2018-06-22 07:47:59 +01:00
|
|
|
bool IsKernelVirtualAddress(const VAddr vaddr) {
|
|
|
|
return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
|
|
|
|
}
|
|
|
|
|
2020-03-31 20:10:44 +01:00
|
|
|
} // namespace Core::Memory
|