chore: make yuzu REUSE compliant
[REUSE] is a specification that aims at making file copyright
information consistent, so that it can be both human and machine
readable. It basically requires that all files have a header containing
copyright and licensing information. When this isn't possible, like
when dealing with binary assets, generated files or embedded third-party
dependencies, it is permitted to insert copyright information in the
`.reuse/dep5` file.
Oh, and it also requires that all the licenses used in the project are
present in the `LICENSES` folder, that's why the diff is so huge.
This can be done automatically with `reuse download --all`.
The `reuse` tool also contains a handy subcommand that analyzes the
project and tells whether or not the project is (still) compliant,
`reuse lint`.
Following REUSE has a few advantages over the current approach:
- Copyright information is easy to access for users / downstream
- Files like `dist/license.md` do not need to exist anymore, as
`.reuse/dep5` is used instead
- `reuse lint` makes it easy to ensure that copyright information of
files like binary assets / images is always accurate and up to date
To add copyright information of files that didn't have it I looked up
who committed what and when, for each file. As yuzu contributors do not
have to sign a CLA or similar I couldn't assume that copyright ownership
was of the "yuzu Emulator Project", so I used the name and/or email of
the commit author instead.
[REUSE]: https://reuse.software
Follow-up to 01cf05bc75b1e47beb08937439f3ed9339e7b254
2022-05-15 01:06:02 +01:00
|
|
|
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
|
2023-11-10 15:43:56 +00:00
|
|
|
// SPDX-FileCopyrightText: 2018 yuzu Emulator Project
|
chore: make yuzu REUSE compliant
[REUSE] is a specification that aims at making file copyright
information consistent, so that it can be both human and machine
readable. It basically requires that all files have a header containing
copyright and licensing information. When this isn't possible, like
when dealing with binary assets, generated files or embedded third-party
dependencies, it is permitted to insert copyright information in the
`.reuse/dep5` file.
Oh, and it also requires that all the licenses used in the project are
present in the `LICENSES` folder, that's why the diff is so huge.
This can be done automatically with `reuse download --all`.
The `reuse` tool also contains a handy subcommand that analyzes the
project and tells whether or not the project is (still) compliant,
`reuse lint`.
Following REUSE has a few advantages over the current approach:
- Copyright information is easy to access for users / downstream
- Files like `dist/license.md` do not need to exist anymore, as
`.reuse/dep5` is used instead
- `reuse lint` makes it easy to ensure that copyright information of
files like binary assets / images is always accurate and up to date
To add copyright information of files that didn't have it I looked up
who committed what and when, for each file. As yuzu contributors do not
have to sign a CLA or similar I couldn't assume that copyright ownership
was of the "yuzu Emulator Project", so I used the name and/or email of
the commit author instead.
[REUSE]: https://reuse.software
Follow-up to 01cf05bc75b1e47beb08937439f3ed9339e7b254
2022-05-15 01:06:02 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2013-09-19 04:52:51 +01:00
|
|
|
|
2018-01-27 15:16:39 +00:00
|
|
|
#include <algorithm>
|
2015-09-10 04:23:44 +01:00
|
|
|
#include <cstring>
|
2023-11-10 15:43:56 +00:00
|
|
|
#include <mutex>
|
2023-06-28 18:32:50 +01:00
|
|
|
#include <span>
|
2018-07-19 00:02:47 +01:00
|
|
|
|
2015-05-13 03:38:56 +01:00
|
|
|
#include "common/assert.h"
|
2020-03-07 22:59:42 +00:00
|
|
|
#include "common/atomic_ops.h"
|
2015-05-06 08:06:12 +01:00
|
|
|
#include "common/common_types.h"
|
2023-12-26 04:21:08 +00:00
|
|
|
#include "common/heap_tracker.h"
|
2015-05-06 08:06:12 +01:00
|
|
|
#include "common/logging/log.h"
|
2019-03-02 20:20:28 +00:00
|
|
|
#include "common/page_table.h"
|
2023-11-10 15:43:56 +00:00
|
|
|
#include "common/scope_exit.h"
|
2020-01-19 00:49:30 +00:00
|
|
|
#include "common/settings.h"
|
2015-05-06 08:06:12 +01:00
|
|
|
#include "common/swap.h"
|
2017-09-24 22:44:13 +01:00
|
|
|
#include "core/core.h"
|
2020-04-09 03:50:46 +01:00
|
|
|
#include "core/device_memory.h"
|
2023-06-28 05:28:13 +01:00
|
|
|
#include "core/gpu_dirty_memory_manager.h"
|
2023-04-30 16:14:06 +01:00
|
|
|
#include "core/hardware_properties.h"
|
2021-02-13 01:58:31 +00:00
|
|
|
#include "core/hle/kernel/k_page_table.h"
|
2021-04-24 06:04:28 +01:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2016-09-21 07:52:38 +01:00
|
|
|
#include "core/memory.h"
|
2019-02-23 04:38:45 +00:00
|
|
|
#include "video_core/gpu.h"
|
2023-12-25 06:32:16 +00:00
|
|
|
#include "video_core/host1x/gpu_device_memory_manager.h"
|
|
|
|
#include "video_core/host1x/host1x.h"
|
2023-04-30 16:14:06 +01:00
|
|
|
#include "video_core/rasterizer_download_area.h"
|
|
|
|
|
2020-03-31 20:10:44 +01:00
|
|
|
namespace Core::Memory {
|
2015-05-13 03:38:56 +01:00
|
|
|
|
2023-07-25 14:51:06 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
bool AddressSpaceContains(const Common::PageTable& table, const Common::ProcessAddress addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
const Common::ProcessAddress max_addr = 1ULL << table.GetAddressSpaceBits();
|
|
|
|
return addr + size >= addr && addr + size <= max_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2019-11-26 17:33:20 +00:00
|
|
|
// Implementation class used to keep the specifics of the memory subsystem hidden
|
|
|
|
// from outside classes. This also allows modification to the internals of the memory
|
|
|
|
// subsystem without needing to rebuild all files that make use of the memory interface.
|
|
|
|
struct Memory::Impl {
|
|
|
|
explicit Impl(Core::System& system_) : system{system_} {}
|
|
|
|
|
2023-11-28 19:30:39 +00:00
|
|
|
void SetCurrentPageTable(Kernel::KProcess& process) {
|
2023-10-23 02:16:38 +01:00
|
|
|
current_page_table = &process.GetPageTable().GetImpl();
|
2023-12-18 01:46:41 +00:00
|
|
|
|
|
|
|
if (std::addressof(process) == system.ApplicationProcess() &&
|
|
|
|
Settings::IsFastmemEnabled()) {
|
|
|
|
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
|
|
|
|
} else {
|
|
|
|
current_page_table->fastmem_arena = nullptr;
|
|
|
|
}
|
2023-12-26 04:21:08 +00:00
|
|
|
|
|
|
|
#ifdef __linux__
|
|
|
|
heap_tracker.emplace(system.DeviceMemory().buffer);
|
|
|
|
buffer = std::addressof(*heap_tracker);
|
|
|
|
#else
|
|
|
|
buffer = std::addressof(system.DeviceMemory().buffer);
|
|
|
|
#endif
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
2023-12-26 04:21:08 +00:00
|
|
|
Common::PhysicalAddress target, Common::MemoryPermission perms,
|
|
|
|
bool separate_heap) {
|
2022-08-19 00:28:55 +01:00
|
|
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
2023-03-18 01:26:04 +00:00
|
|
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
|
|
|
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
|
|
|
|
GetInteger(target));
|
2022-08-19 00:28:55 +01:00
|
|
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
|
|
|
|
Common::PageType::Memory);
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2023-12-18 01:46:41 +00:00
|
|
|
if (current_page_table->fastmem_arena) {
|
2023-12-26 04:21:08 +00:00
|
|
|
buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
|
|
|
|
separate_heap);
|
2021-06-06 08:57:24 +01:00
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-11-26 17:33:20 +00:00
|
|
|
|
2023-12-26 04:21:08 +00:00
|
|
|
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
|
|
|
bool separate_heap) {
|
2022-08-19 00:28:55 +01:00
|
|
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
2023-03-18 01:26:04 +00:00
|
|
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
2022-08-19 00:28:55 +01:00
|
|
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
|
|
|
|
Common::PageType::Unmapped);
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2023-12-18 01:46:41 +00:00
|
|
|
if (current_page_table->fastmem_arena) {
|
2023-12-26 04:21:08 +00:00
|
|
|
buffer->Unmap(GetInteger(base), size, separate_heap);
|
2021-06-06 08:57:24 +01:00
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2018-05-03 03:36:51 +01:00
|
|
|
|
2023-11-23 09:26:06 +00:00
|
|
|
void ProtectRegion(Common::PageTable& page_table, VAddr vaddr, u64 size,
|
|
|
|
Common::MemoryPermission perms) {
|
|
|
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
|
|
|
|
|
2023-12-18 01:46:41 +00:00
|
|
|
if (!current_page_table->fastmem_arena) {
|
2023-11-23 09:26:06 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 protect_bytes{};
|
|
|
|
u64 protect_begin{};
|
|
|
|
for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
|
|
|
|
const Common::PageType page_type{
|
|
|
|
current_page_table->pointers[addr >> YUZU_PAGEBITS].Type()};
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
if (protect_bytes > 0) {
|
2023-12-26 04:21:08 +00:00
|
|
|
buffer->Protect(protect_begin, protect_bytes, perms);
|
2023-11-23 09:26:06 +00:00
|
|
|
protect_bytes = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (protect_bytes == 0) {
|
|
|
|
protect_begin = addr;
|
|
|
|
}
|
|
|
|
protect_bytes += YUZU_PAGESIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (protect_bytes > 0) {
|
2023-12-26 04:21:08 +00:00
|
|
|
buffer->Protect(protect_begin, protect_bytes, perms);
|
2023-11-23 09:26:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
|
|
|
|
const Common::PhysicalAddress paddr{
|
|
|
|
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
2019-11-26 23:28:44 +00:00
|
|
|
|
2020-04-09 03:50:46 +01:00
|
|
|
if (!paddr) {
|
|
|
|
return {};
|
2019-11-26 23:28:44 +00:00
|
|
|
}
|
|
|
|
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
|
2019-11-26 23:28:44 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
[[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
|
|
|
|
const Common::PhysicalAddress paddr{
|
|
|
|
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
2022-06-06 17:56:01 +01:00
|
|
|
|
|
|
|
if (paddr == 0) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
|
2022-06-06 17:56:01 +01:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u8 Read8(const Common::ProcessAddress addr) {
|
2019-11-26 21:29:34 +00:00
|
|
|
return Read<u8>(addr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u16 Read16(const Common::ProcessAddress addr) {
|
2020-04-09 04:03:25 +01:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
return Read<u16_le>(addr);
|
|
|
|
} else {
|
2020-10-13 13:10:50 +01:00
|
|
|
const u32 a{Read<u8>(addr)};
|
|
|
|
const u32 b{Read<u8>(addr + sizeof(u8))};
|
|
|
|
return static_cast<u16>((b << 8) | a);
|
2020-04-09 04:03:25 +01:00
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u32 Read32(const Common::ProcessAddress addr) {
|
2020-04-09 04:03:25 +01:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
return Read<u32_le>(addr);
|
|
|
|
} else {
|
2020-10-13 13:10:50 +01:00
|
|
|
const u32 a{Read16(addr)};
|
|
|
|
const u32 b{Read16(addr + sizeof(u16))};
|
|
|
|
return (b << 16) | a;
|
2020-04-09 04:03:25 +01:00
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u64 Read64(const Common::ProcessAddress addr) {
|
2020-04-09 04:03:25 +01:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
return Read<u64_le>(addr);
|
|
|
|
} else {
|
|
|
|
const u32 a{Read32(addr)};
|
|
|
|
const u32 b{Read32(addr + sizeof(u32))};
|
|
|
|
return (static_cast<u64>(b) << 32) | a;
|
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Write8(const Common::ProcessAddress addr, const u8 data) {
|
2019-11-26 22:39:57 +00:00
|
|
|
Write<u8>(addr, data);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Write16(const Common::ProcessAddress addr, const u16 data) {
|
2020-04-09 04:03:25 +01:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
Write<u16_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write<u8>(addr, static_cast<u8>(data));
|
|
|
|
Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Write32(const Common::ProcessAddress addr, const u32 data) {
|
2020-04-09 04:03:25 +01:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
Write<u32_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write16(addr, static_cast<u16>(data));
|
|
|
|
Write16(addr + sizeof(u16), static_cast<u16>(data >> 16));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Write64(const Common::ProcessAddress addr, const u64 data) {
|
2020-04-09 04:03:25 +01:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
Write<u64_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write32(addr, static_cast<u32>(data));
|
|
|
|
Write32(addr + sizeof(u32), static_cast<u32>(data >> 32));
|
|
|
|
}
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool WriteExclusive8(const Common::ProcessAddress addr, const u8 data, const u8 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return WriteExclusive<u8>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool WriteExclusive16(const Common::ProcessAddress addr, const u16 data, const u16 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return WriteExclusive<u16_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool WriteExclusive32(const Common::ProcessAddress addr, const u32 data, const u32 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return WriteExclusive<u32_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool WriteExclusive64(const Common::ProcessAddress addr, const u64 data, const u64 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return WriteExclusive<u64_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
std::string ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
|
2019-11-26 20:48:19 +00:00
|
|
|
std::string string;
|
|
|
|
string.reserve(max_length);
|
|
|
|
for (std::size_t i = 0; i < max_length; ++i) {
|
2021-08-05 21:11:14 +01:00
|
|
|
const char c = Read<s8>(vaddr);
|
2019-11-26 20:48:19 +00:00
|
|
|
if (c == '\0') {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
string.push_back(c);
|
|
|
|
++vaddr;
|
|
|
|
}
|
|
|
|
string.shrink_to_fit();
|
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
|
|
|
|
auto on_memory, auto on_rasterizer, auto increment) {
|
2023-12-18 01:46:41 +00:00
|
|
|
const auto& page_table = *current_page_table;
|
2019-11-26 21:29:34 +00:00
|
|
|
std::size_t remaining_size = size;
|
2022-08-19 00:28:55 +01:00
|
|
|
std::size_t page_index = addr >> YUZU_PAGEBITS;
|
|
|
|
std::size_t page_offset = addr & YUZU_PAGEMASK;
|
2023-07-15 03:19:59 +01:00
|
|
|
bool user_accessible = true;
|
2019-11-26 21:29:34 +00:00
|
|
|
|
2023-07-25 14:51:06 +01:00
|
|
|
if (!AddressSpaceContains(page_table, addr, size)) [[unlikely]] {
|
|
|
|
on_unmapped(size, addr);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-08-05 21:11:14 +01:00
|
|
|
while (remaining_size) {
|
2019-11-26 21:29:34 +00:00
|
|
|
const std::size_t copy_amount =
|
2022-08-19 00:28:55 +01:00
|
|
|
std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr =
|
2023-03-18 01:26:04 +00:00
|
|
|
static_cast<u64>((page_index << YUZU_PAGEBITS) + page_offset);
|
2019-11-26 21:29:34 +00:00
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Unmapped: {
|
2023-07-15 03:19:59 +01:00
|
|
|
user_accessible = false;
|
2021-08-05 21:11:14 +01:00
|
|
|
on_unmapped(copy_amount, current_vaddr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
u8* mem_ptr =
|
|
|
|
reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS));
|
2021-08-05 21:11:14 +01:00
|
|
|
on_memory(copy_amount, mem_ptr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
2022-06-06 17:56:01 +01:00
|
|
|
case Common::PageType::DebugMemory: {
|
|
|
|
u8* const mem_ptr{GetPointerFromDebugMemory(current_vaddr)};
|
|
|
|
on_memory(copy_amount, mem_ptr);
|
|
|
|
break;
|
|
|
|
}
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2021-08-05 21:11:14 +01:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
|
|
|
on_rasterizer(current_vaddr, copy_amount, host_ptr);
|
2019-11-26 21:29:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
2021-08-05 21:11:14 +01:00
|
|
|
increment(copy_amount);
|
2019-11-26 21:29:34 +00:00
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
2023-07-15 03:19:59 +01:00
|
|
|
|
|
|
|
return user_accessible;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2021-08-05 21:11:14 +01:00
|
|
|
template <bool UNSAFE>
|
2023-07-15 03:19:59 +01:00
|
|
|
bool ReadBlockImpl(const Common::ProcessAddress src_addr, void* dest_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
return WalkBlock(
|
|
|
|
src_addr, size,
|
2021-08-05 21:11:14 +01:00
|
|
|
[src_addr, size, &dest_buffer](const std::size_t copy_amount,
|
2023-03-18 01:26:04 +00:00
|
|
|
const Common::ProcessAddress current_vaddr) {
|
2020-04-05 22:23:49 +01:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(current_vaddr), GetInteger(src_addr), size);
|
2020-04-05 22:23:49 +01:00
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2022-10-21 07:34:07 +01:00
|
|
|
[&](const std::size_t copy_amount, const u8* const src_ptr) {
|
2020-04-05 22:23:49 +01:00
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
2022-10-21 07:34:07 +01:00
|
|
|
const u8* const host_ptr) {
|
2021-08-05 21:29:43 +01:00
|
|
|
if constexpr (!UNSAFE) {
|
2023-04-30 16:14:06 +01:00
|
|
|
HandleRasterizerDownload(GetInteger(current_vaddr), copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
}
|
2020-04-05 22:23:49 +01:00
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2022-10-21 07:34:07 +01:00
|
|
|
[&](const std::size_t copy_amount) {
|
2021-08-05 21:11:14 +01:00
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
});
|
2020-04-05 22:23:49 +01:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return ReadBlockImpl<false>(src_addr, dest_buffer, size);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return ReadBlockImpl<true>(src_addr, dest_buffer, size);
|
2020-04-05 22:23:49 +01:00
|
|
|
}
|
|
|
|
|
2023-05-29 00:35:51 +01:00
|
|
|
const u8* GetSpan(const VAddr src_addr, const std::size_t size) const {
|
|
|
|
if (current_page_table->blocks[src_addr >> YUZU_PAGEBITS] ==
|
|
|
|
current_page_table->blocks[(src_addr + size) >> YUZU_PAGEBITS]) {
|
|
|
|
return GetPointerSilent(src_addr);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
u8* GetSpan(const VAddr src_addr, const std::size_t size) {
|
|
|
|
if (current_page_table->blocks[src_addr >> YUZU_PAGEBITS] ==
|
|
|
|
current_page_table->blocks[(src_addr + size) >> YUZU_PAGEBITS]) {
|
|
|
|
return GetPointerSilent(src_addr);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-08-05 21:11:14 +01:00
|
|
|
template <bool UNSAFE>
|
2023-07-15 03:19:59 +01:00
|
|
|
bool WriteBlockImpl(const Common::ProcessAddress dest_addr, const void* src_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
return WalkBlock(
|
|
|
|
dest_addr, size,
|
2023-03-18 01:26:04 +00:00
|
|
|
[dest_addr, size](const std::size_t copy_amount,
|
|
|
|
const Common::ProcessAddress current_vaddr) {
|
2019-11-26 22:39:57 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(current_vaddr), GetInteger(dest_addr), size);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2022-10-21 07:34:07 +01:00
|
|
|
[&](const std::size_t copy_amount, u8* const dest_ptr) {
|
2019-11-26 22:39:57 +00:00
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
|
|
|
u8* const host_ptr) {
|
2021-08-05 21:29:43 +01:00
|
|
|
if constexpr (!UNSAFE) {
|
2023-11-10 15:43:56 +00:00
|
|
|
HandleRasterizerWrite(GetInteger(current_vaddr), copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
}
|
2020-04-05 22:23:49 +01:00
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2022-10-21 07:34:07 +01:00
|
|
|
[&](const std::size_t copy_amount) {
|
2021-08-05 21:11:14 +01:00
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
});
|
2020-04-05 22:23:49 +01:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return WriteBlockImpl<false>(dest_addr, src_buffer, size);
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return WriteBlockImpl<true>(dest_addr, src_buffer, size);
|
2020-04-05 22:23:49 +01:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool ZeroBlock(const Common::ProcessAddress dest_addr, const std::size_t size) {
|
|
|
|
return WalkBlock(
|
|
|
|
dest_addr, size,
|
2023-03-18 01:26:04 +00:00
|
|
|
[dest_addr, size](const std::size_t copy_amount,
|
|
|
|
const Common::ProcessAddress current_vaddr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(current_vaddr), GetInteger(dest_addr), size);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
|
|
|
[](const std::size_t copy_amount, u8* const dest_ptr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(dest_ptr, 0, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
|
|
|
u8* const host_ptr) {
|
2023-11-10 15:43:56 +00:00
|
|
|
HandleRasterizerWrite(GetInteger(current_vaddr), copy_amount);
|
2019-11-26 21:06:49 +00:00
|
|
|
std::memset(host_ptr, 0, copy_amount);
|
2021-08-05 21:11:14 +01:00
|
|
|
},
|
|
|
|
[](const std::size_t copy_amount) {});
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool CopyBlock(Common::ProcessAddress dest_addr, Common::ProcessAddress src_addr,
|
|
|
|
const std::size_t size) {
|
|
|
|
return WalkBlock(
|
|
|
|
dest_addr, size,
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const std::size_t copy_amount, const Common::ProcessAddress current_vaddr) {
|
2019-11-26 21:06:49 +00:00
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(current_vaddr), GetInteger(src_addr), size);
|
2023-07-15 03:19:59 +01:00
|
|
|
ZeroBlock(dest_addr, copy_amount);
|
2021-08-05 22:09:08 +01:00
|
|
|
},
|
2022-10-21 07:34:07 +01:00
|
|
|
[&](const std::size_t copy_amount, const u8* const src_ptr) {
|
2023-07-15 03:19:59 +01:00
|
|
|
WriteBlockImpl<false>(dest_addr, src_ptr, copy_amount);
|
2021-08-05 22:09:08 +01:00
|
|
|
},
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
|
|
|
u8* const host_ptr) {
|
2023-04-30 16:14:06 +01:00
|
|
|
HandleRasterizerDownload(GetInteger(current_vaddr), copy_amount);
|
2023-07-15 03:19:59 +01:00
|
|
|
WriteBlockImpl<false>(dest_addr, host_ptr, copy_amount);
|
2021-08-05 22:09:08 +01:00
|
|
|
},
|
2022-10-21 07:34:07 +01:00
|
|
|
[&](const std::size_t copy_amount) {
|
2023-03-18 01:26:04 +00:00
|
|
|
dest_addr += copy_amount;
|
|
|
|
src_addr += copy_amount;
|
2021-08-05 22:09:08 +01:00
|
|
|
});
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2022-11-12 16:02:07 +00:00
|
|
|
template <typename Callback>
|
2023-07-15 03:19:59 +01:00
|
|
|
Result PerformCacheOperation(Common::ProcessAddress dest_addr, std::size_t size,
|
|
|
|
Callback&& cb) {
|
2022-11-12 16:02:07 +00:00
|
|
|
class InvalidMemoryException : public std::exception {};
|
|
|
|
|
|
|
|
try {
|
|
|
|
WalkBlock(
|
2023-07-15 03:19:59 +01:00
|
|
|
dest_addr, size,
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const std::size_t block_size, const Common::ProcessAddress current_vaddr) {
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}",
|
|
|
|
GetInteger(current_vaddr));
|
2022-11-12 16:02:07 +00:00
|
|
|
throw InvalidMemoryException();
|
|
|
|
},
|
2022-12-10 17:24:33 +00:00
|
|
|
[&](const std::size_t block_size, u8* const host_ptr) {},
|
2023-03-18 01:26:04 +00:00
|
|
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t block_size,
|
|
|
|
u8* const host_ptr) { cb(current_vaddr, block_size); },
|
2022-11-12 16:02:07 +00:00
|
|
|
[](const std::size_t block_size) {});
|
|
|
|
} catch (InvalidMemoryException&) {
|
|
|
|
return Kernel::ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
Result InvalidateDataCache(Common::ProcessAddress dest_addr, std::size_t size) {
|
2023-03-18 01:26:04 +00:00
|
|
|
auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
|
|
|
|
const std::size_t block_size) {
|
2022-12-10 17:24:33 +00:00
|
|
|
// dc ivac: Invalidate to point of coherency
|
|
|
|
// GPU flush -> CPU invalidate
|
2023-04-30 16:14:06 +01:00
|
|
|
HandleRasterizerDownload(GetInteger(current_vaddr), block_size);
|
2022-11-12 16:02:07 +00:00
|
|
|
};
|
2023-07-15 03:19:59 +01:00
|
|
|
return PerformCacheOperation(dest_addr, size, on_rasterizer);
|
2022-11-12 16:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
Result StoreDataCache(Common::ProcessAddress dest_addr, std::size_t size) {
|
2023-03-18 01:26:04 +00:00
|
|
|
auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
|
|
|
|
const std::size_t block_size) {
|
2022-11-12 16:02:07 +00:00
|
|
|
// dc cvac: Store to point of coherency
|
2022-12-10 17:24:33 +00:00
|
|
|
// CPU flush -> GPU invalidate
|
2023-11-10 15:43:56 +00:00
|
|
|
HandleRasterizerWrite(GetInteger(current_vaddr), block_size);
|
2022-11-12 16:02:07 +00:00
|
|
|
};
|
2023-07-15 03:19:59 +01:00
|
|
|
return PerformCacheOperation(dest_addr, size, on_rasterizer);
|
2022-11-12 16:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
Result FlushDataCache(Common::ProcessAddress dest_addr, std::size_t size) {
|
2023-03-18 01:26:04 +00:00
|
|
|
auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
|
|
|
|
const std::size_t block_size) {
|
2022-11-12 16:02:07 +00:00
|
|
|
// dc civac: Store to point of coherency, and invalidate from cache
|
2022-12-10 17:24:33 +00:00
|
|
|
// CPU flush -> GPU invalidate
|
2023-11-10 15:43:56 +00:00
|
|
|
HandleRasterizerWrite(GetInteger(current_vaddr), block_size);
|
2022-11-12 16:02:07 +00:00
|
|
|
};
|
2023-07-15 03:19:59 +01:00
|
|
|
return PerformCacheOperation(dest_addr, size, on_rasterizer);
|
2022-11-12 16:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void MarkRegionDebug(u64 vaddr, u64 size, bool debug) {
|
2023-07-25 14:51:06 +01:00
|
|
|
if (vaddr == 0 || !AddressSpaceContains(*current_page_table, vaddr, size)) {
|
2022-06-06 17:56:01 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-12-18 01:46:41 +00:00
|
|
|
if (current_page_table->fastmem_arena) {
|
2023-12-26 04:21:08 +00:00
|
|
|
const auto perm{debug ? Common::MemoryPermission{}
|
|
|
|
: Common::MemoryPermission::ReadWrite};
|
|
|
|
buffer->Protect(vaddr, size, perm);
|
2023-01-15 15:24:31 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 17:56:01 +01:00
|
|
|
// Iterate over a contiguous CPU address space, marking/unmarking the region.
|
|
|
|
// The region is at a granularity of CPU pages.
|
|
|
|
|
2022-08-19 00:28:55 +01:00
|
|
|
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
|
|
|
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
2022-06-06 17:56:01 +01:00
|
|
|
const Common::PageType page_type{
|
2022-08-19 00:28:55 +01:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
2022-06-06 17:56:01 +01:00
|
|
|
if (debug) {
|
|
|
|
// Switch page type to debug if now debug
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
ASSERT_MSG(false, "Attempted to mark unmapped pages as debug");
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
case Common::PageType::DebugMemory:
|
|
|
|
// Page is already marked.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
2022-08-19 00:28:55 +01:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
0, Common::PageType::DebugMemory);
|
2022-06-06 17:56:01 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to non-debug if now non-debug
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
ASSERT_MSG(false, "Attempted to mark unmapped pages as non-debug");
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
// Don't mess with already non-debug or rasterizer memory.
|
|
|
|
break;
|
|
|
|
case Common::PageType::DebugMemory: {
|
2022-08-19 00:28:55 +01:00
|
|
|
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
|
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
|
|
|
|
Common::PageType::Memory);
|
2022-06-06 17:56:01 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void RasterizerMarkRegionCached(u64 vaddr, u64 size, bool cached) {
|
2023-07-25 14:51:06 +01:00
|
|
|
if (vaddr == 0 || !AddressSpaceContains(*current_page_table, vaddr, size)) {
|
2019-11-26 20:56:13 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2023-12-18 01:46:41 +00:00
|
|
|
if (current_page_table->fastmem_arena) {
|
2023-12-26 04:21:08 +00:00
|
|
|
Common::MemoryPermission perm{};
|
|
|
|
if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
|
|
|
|
perm |= Common::MemoryPermission::Read;
|
|
|
|
}
|
|
|
|
if (!cached) {
|
|
|
|
perm |= Common::MemoryPermission::Write;
|
|
|
|
}
|
|
|
|
buffer->Protect(vaddr, size, perm);
|
2021-06-06 08:57:24 +01:00
|
|
|
}
|
2020-01-19 00:49:30 +00:00
|
|
|
|
2019-11-26 20:56:13 +00:00
|
|
|
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
|
|
|
|
// address space, marking the region as un/cached. The region is marked un/cached at a
|
|
|
|
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
|
|
|
// is different). This assumes the specified GPU address region is contiguous as well.
|
|
|
|
|
2022-08-19 00:28:55 +01:00
|
|
|
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
|
|
|
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
const Common::PageType page_type{
|
2022-08-19 00:28:55 +01:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
2019-11-26 20:56:13 +00:00
|
|
|
if (cached) {
|
|
|
|
// Switch page type to cached if now cached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
2022-06-06 17:56:01 +01:00
|
|
|
case Common::PageType::DebugMemory:
|
2019-11-26 20:56:13 +00:00
|
|
|
case Common::PageType::Memory:
|
2022-08-19 00:28:55 +01:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
0, Common::PageType::RasterizerCachedMemory);
|
2019-11-26 20:56:13 +00:00
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already marked as cached.
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to uncached if now uncached
|
|
|
|
switch (page_type) {
|
2021-08-05 21:29:43 +01:00
|
|
|
case Common::PageType::Unmapped: // NOLINT(bugprone-branch-clone)
|
2019-11-26 20:56:13 +00:00
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
2021-08-05 21:29:43 +01:00
|
|
|
break;
|
2022-06-06 17:56:01 +01:00
|
|
|
case Common::PageType::DebugMemory:
|
2019-11-26 20:56:13 +00:00
|
|
|
case Common::PageType::Memory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already unmarked as cached.
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2022-08-19 00:28:55 +01:00
|
|
|
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
|
2019-11-26 20:56:13 +00:00
|
|
|
if (pointer == nullptr) {
|
|
|
|
// It's possible that this function has been called while updating the
|
|
|
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
|
|
|
// longer exist, and we should just leave the pagetable entry blank.
|
2022-08-19 00:28:55 +01:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
0, Common::PageType::Unmapped);
|
2019-11-26 20:56:13 +00:00
|
|
|
} else {
|
2022-08-19 00:28:55 +01:00
|
|
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
|
|
|
|
Common::PageType::Memory);
|
2019-11-26 20:56:13 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
/**
|
|
|
|
* Maps a region of pages as a specific type.
|
|
|
|
*
|
|
|
|
* @param page_table The page table to use to perform the mapping.
|
|
|
|
* @param base The base address to begin mapping at.
|
|
|
|
* @param size The total size of the range in bytes.
|
2020-09-23 18:39:00 +01:00
|
|
|
* @param target The target address to begin mapping from.
|
2019-11-26 18:09:12 +00:00
|
|
|
* @param type The page type to map the memory as.
|
|
|
|
*/
|
2023-03-18 01:26:04 +00:00
|
|
|
void MapPages(Common::PageTable& page_table, Common::ProcessAddress base_address, u64 size,
|
|
|
|
Common::PhysicalAddress target, Common::PageType type) {
|
|
|
|
auto base = GetInteger(base_address);
|
|
|
|
|
|
|
|
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
|
|
|
|
base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
|
2019-11-26 18:09:12 +00:00
|
|
|
|
|
|
|
// During boot, current_page_table might not be set yet, in which case we need not flush
|
2023-12-25 06:32:16 +00:00
|
|
|
/*if (system.IsPoweredOn()) {
|
2019-11-26 18:09:12 +00:00
|
|
|
auto& gpu = system.GPU();
|
|
|
|
for (u64 i = 0; i < size; i++) {
|
|
|
|
const auto page = base + i;
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
2023-12-25 06:32:16 +00:00
|
|
|
|
2022-08-19 00:28:55 +01:00
|
|
|
gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-09-19 02:50:21 +01:00
|
|
|
}
|
2023-12-25 06:32:16 +00:00
|
|
|
}*/
|
2018-03-23 02:56:41 +00:00
|
|
|
|
2023-05-29 00:35:51 +01:00
|
|
|
const auto end = base + size;
|
2019-11-26 18:09:12 +00:00
|
|
|
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
|
|
|
base + page_table.pointers.size());
|
2015-05-13 03:38:56 +01:00
|
|
|
|
2020-04-09 03:50:46 +01:00
|
|
|
if (!target) {
|
2020-07-05 11:25:08 +01:00
|
|
|
ASSERT_MSG(type != Common::PageType::Memory,
|
2022-08-19 00:28:55 +01:00
|
|
|
"Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
|
2020-07-05 11:25:08 +01:00
|
|
|
|
2020-04-09 03:50:46 +01:00
|
|
|
while (base != end) {
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
page_table.pointers[base].Store(0, type);
|
2020-04-09 03:50:46 +01:00
|
|
|
page_table.backing_addr[base] = 0;
|
2023-05-29 00:35:51 +01:00
|
|
|
page_table.blocks[base] = 0;
|
2020-04-09 03:50:46 +01:00
|
|
|
base += 1;
|
|
|
|
}
|
2019-11-26 18:09:12 +00:00
|
|
|
} else {
|
2023-05-29 00:35:51 +01:00
|
|
|
auto orig_base = base;
|
2019-11-26 18:09:12 +00:00
|
|
|
while (base != end) {
|
2023-05-29 00:35:51 +01:00
|
|
|
auto host_ptr =
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) -
|
|
|
|
(base << YUZU_PAGEBITS);
|
2023-05-29 00:35:51 +01:00
|
|
|
auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
|
|
|
|
page_table.pointers[base].Store(host_ptr, type);
|
|
|
|
page_table.backing_addr[base] = backing;
|
|
|
|
page_table.blocks[base] = orig_base << YUZU_PAGEBITS;
|
2020-04-09 03:50:46 +01:00
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
2019-12-30 23:11:45 +00:00
|
|
|
"memory mapping base yield a nullptr within the table");
|
2019-02-27 22:22:47 +00:00
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
base += 1;
|
2022-08-19 00:28:55 +01:00
|
|
|
target += YUZU_PAGESIZE;
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2019-02-27 22:22:47 +00:00
|
|
|
}
|
2014-04-01 23:18:02 +01:00
|
|
|
}
|
2013-09-19 04:52:51 +01:00
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const {
|
2021-05-29 08:24:09 +01:00
|
|
|
// AARCH64 masks the upper 16 bit of all memory accesses
|
2023-03-18 01:26:04 +00:00
|
|
|
vaddr = vaddr & 0xffffffffffffULL;
|
2021-05-29 08:24:09 +01:00
|
|
|
|
2023-07-25 14:51:06 +01:00
|
|
|
if (!AddressSpaceContains(*current_page_table, vaddr, 1)) [[unlikely]] {
|
2021-08-07 02:32:06 +01:00
|
|
|
on_unmapped();
|
|
|
|
return nullptr;
|
2021-05-29 08:24:09 +01:00
|
|
|
}
|
|
|
|
|
2020-12-25 05:51:49 +00:00
|
|
|
// Avoid adding any extra logic to this fast-path block
|
2022-08-19 00:28:55 +01:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
|
|
|
return reinterpret_cast<u8*>(pointer + vaddr);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Unmapped:
|
2021-08-07 02:32:06 +01:00
|
|
|
on_unmapped();
|
|
|
|
return nullptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::Memory:
|
2021-08-07 02:32:06 +01:00
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr);
|
|
|
|
return nullptr;
|
2022-06-06 17:56:01 +01:00
|
|
|
case Common::PageType::DebugMemory:
|
|
|
|
return GetPointerFromDebugMemory(vaddr);
|
2019-11-26 21:29:34 +00:00
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2021-08-07 02:32:06 +01:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
|
|
|
on_rasterizer();
|
|
|
|
return host_ptr;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2021-08-07 02:32:06 +01:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
[[nodiscard]] u8* GetPointer(const Common::ProcessAddress vaddr) const {
|
2021-08-07 02:32:06 +01:00
|
|
|
return GetPointerImpl(
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr),
|
|
|
|
[vaddr]() {
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", GetInteger(vaddr));
|
|
|
|
},
|
2021-08-07 02:32:06 +01:00
|
|
|
[]() {});
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
[[nodiscard]] u8* GetPointerSilent(const Common::ProcessAddress vaddr) const {
|
2022-02-19 13:18:02 +00:00
|
|
|
return GetPointerImpl(
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr), []() {}, []() {});
|
2022-02-19 13:18:02 +00:00
|
|
|
}
|
|
|
|
|
2021-08-07 02:32:06 +01:00
|
|
|
/**
|
|
|
|
* Reads a particular data type out of memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to read the data type from.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to read out of memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*
|
|
|
|
* @returns The instance of T read from the specified virtual address.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2023-03-18 01:26:04 +00:00
|
|
|
T Read(Common::ProcessAddress vaddr) {
|
2021-08-07 02:32:06 +01:00
|
|
|
T result = 0;
|
|
|
|
const u8* const ptr = GetPointerImpl(
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr),
|
2021-08-07 02:32:06 +01:00
|
|
|
[vaddr]() {
|
2023-03-18 01:26:04 +00:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8,
|
|
|
|
GetInteger(vaddr));
|
2021-08-07 02:32:06 +01:00
|
|
|
},
|
2023-05-04 02:16:57 +01:00
|
|
|
[&]() { HandleRasterizerDownload(GetInteger(vaddr), sizeof(T)); });
|
2021-08-07 02:32:06 +01:00
|
|
|
if (ptr) {
|
|
|
|
std::memcpy(&result, ptr, sizeof(T));
|
|
|
|
}
|
|
|
|
return result;
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 22:39:57 +00:00
|
|
|
/**
|
|
|
|
* Writes a particular data type to memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to write the data type to.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to write to memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2023-03-18 01:26:04 +00:00
|
|
|
void Write(Common::ProcessAddress vaddr, const T data) {
|
2021-08-07 02:32:06 +01:00
|
|
|
u8* const ptr = GetPointerImpl(
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr),
|
2021-08-07 02:32:06 +01:00
|
|
|
[vaddr, data]() {
|
2021-08-07 04:03:21 +01:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr), static_cast<u64>(data));
|
2021-08-07 02:32:06 +01:00
|
|
|
},
|
2023-06-28 18:32:50 +01:00
|
|
|
[&]() { HandleRasterizerWrite(GetInteger(vaddr), sizeof(T)); });
|
2021-08-07 02:32:06 +01:00
|
|
|
if (ptr) {
|
|
|
|
std::memcpy(ptr, &data, sizeof(T));
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-07 22:59:42 +00:00
|
|
|
template <typename T>
|
2023-03-18 01:26:04 +00:00
|
|
|
bool WriteExclusive(Common::ProcessAddress vaddr, const T data, const T expected) {
|
2021-08-07 02:32:06 +01:00
|
|
|
u8* const ptr = GetPointerImpl(
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr),
|
2021-08-07 02:32:06 +01:00
|
|
|
[vaddr, data]() {
|
2021-08-07 04:03:21 +01:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
|
2023-03-18 01:26:04 +00:00
|
|
|
sizeof(T) * 8, GetInteger(vaddr), static_cast<u64>(data));
|
2021-08-07 02:32:06 +01:00
|
|
|
},
|
2023-06-28 18:32:50 +01:00
|
|
|
[&]() { HandleRasterizerWrite(GetInteger(vaddr), sizeof(T)); });
|
2021-08-07 02:32:06 +01:00
|
|
|
if (ptr) {
|
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-07 22:59:42 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool WriteExclusive128(Common::ProcessAddress vaddr, const u128 data, const u128 expected) {
|
2021-08-07 02:32:06 +01:00
|
|
|
u8* const ptr = GetPointerImpl(
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr),
|
2021-08-07 02:32:06 +01:00
|
|
|
[vaddr, data]() {
|
2021-08-07 04:03:21 +01:00
|
|
|
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
|
2023-03-18 01:26:04 +00:00
|
|
|
GetInteger(vaddr), static_cast<u64>(data[1]), static_cast<u64>(data[0]));
|
2021-08-07 02:32:06 +01:00
|
|
|
},
|
2023-06-28 18:32:50 +01:00
|
|
|
[&]() { HandleRasterizerWrite(GetInteger(vaddr), sizeof(u128)); });
|
2021-08-07 02:32:06 +01:00
|
|
|
if (ptr) {
|
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 00:16:57 +00:00
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-07 22:59:42 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-12-25 06:32:16 +00:00
|
|
|
void HandleRasterizerDownload(VAddr v_address, size_t size) {
|
|
|
|
const auto* p = GetPointerImpl(
|
|
|
|
v_address, []() {}, []() {});
|
|
|
|
auto& gpu_device_memory = system.Host1x().MemoryManager();
|
|
|
|
DAddr address =
|
|
|
|
gpu_device_memory.GetAddressFromPAddr(system.DeviceMemory().GetRawPhysicalAddr(p));
|
2023-04-30 16:14:06 +01:00
|
|
|
const size_t core = system.GetCurrentHostThreadID();
|
2023-06-28 18:32:50 +01:00
|
|
|
auto& current_area = rasterizer_read_areas[core];
|
2023-12-25 06:32:16 +00:00
|
|
|
const DAddr end_address = address + size;
|
2023-05-04 02:16:57 +01:00
|
|
|
if (current_area.start_address <= address && end_address <= current_area.end_address)
|
|
|
|
[[likely]] {
|
2023-04-30 16:14:06 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
current_area = system.GPU().OnCPURead(address, size);
|
|
|
|
}
|
|
|
|
|
2023-12-25 06:32:16 +00:00
|
|
|
void HandleRasterizerWrite(VAddr v_address, size_t size) {
|
|
|
|
const auto* p = GetPointerImpl(
|
|
|
|
v_address, []() {}, []() {});
|
|
|
|
PAddr address = system.DeviceMemory().GetRawPhysicalAddr(p);
|
2023-11-10 15:43:56 +00:00
|
|
|
constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
|
|
|
|
const size_t core = std::min(system.GetCurrentHostThreadID(),
|
|
|
|
sys_core); // any other calls threads go to syscore.
|
|
|
|
// Guard on sys_core;
|
|
|
|
if (core == sys_core) [[unlikely]] {
|
|
|
|
sys_core_guard.lock();
|
|
|
|
}
|
|
|
|
SCOPE_EXIT({
|
|
|
|
if (core == sys_core) [[unlikely]] {
|
|
|
|
sys_core_guard.unlock();
|
|
|
|
}
|
|
|
|
});
|
2023-06-28 18:32:50 +01:00
|
|
|
auto& current_area = rasterizer_write_areas[core];
|
2023-12-25 06:32:16 +00:00
|
|
|
PAddr subaddress = address >> YUZU_PAGEBITS;
|
2023-06-28 18:32:50 +01:00
|
|
|
bool do_collection = current_area.last_address == subaddress;
|
|
|
|
if (!do_collection) [[unlikely]] {
|
|
|
|
do_collection = system.GPU().OnCPUWrite(address, size);
|
|
|
|
if (!do_collection) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
current_area.last_address = subaddress;
|
|
|
|
}
|
|
|
|
gpu_dirty_managers[core].Collect(address, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct GPUDirtyState {
|
2023-12-25 06:32:16 +00:00
|
|
|
PAddr last_address;
|
2023-06-28 18:32:50 +01:00
|
|
|
};
|
|
|
|
|
2023-05-29 00:35:51 +01:00
|
|
|
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
|
|
|
|
system.GPU().InvalidateRegion(GetInteger(dest_addr), size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
|
|
|
|
system.GPU().FlushRegion(GetInteger(dest_addr), size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
Core::System& system;
|
2023-06-28 18:32:50 +01:00
|
|
|
Common::PageTable* current_page_table = nullptr;
|
|
|
|
std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES>
|
|
|
|
rasterizer_read_areas{};
|
|
|
|
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
|
|
|
|
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
|
2023-11-10 15:43:56 +00:00
|
|
|
std::mutex sys_core_guard;
|
2023-12-26 04:21:08 +00:00
|
|
|
|
|
|
|
std::optional<Common::HeapTracker> heap_tracker;
|
|
|
|
#ifdef __linux__
|
|
|
|
Common::HeapTracker* buffer{};
|
|
|
|
#else
|
|
|
|
Common::HostMemory* buffer{};
|
|
|
|
#endif
|
2019-11-26 18:09:12 +00:00
|
|
|
};
|
2014-04-26 06:27:25 +01:00
|
|
|
|
2021-04-03 01:06:21 +01:00
|
|
|
Memory::Memory(Core::System& system_) : system{system_} {
|
|
|
|
Reset();
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:09:12 +00:00
|
|
|
Memory::~Memory() = default;
|
2016-01-30 18:41:04 +00:00
|
|
|
|
2021-04-03 01:06:21 +01:00
|
|
|
void Memory::Reset() {
|
|
|
|
impl = std::make_unique<Impl>(system);
|
|
|
|
}
|
|
|
|
|
2023-11-28 19:30:39 +00:00
|
|
|
void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
|
|
|
|
impl->SetCurrentPageTable(process);
|
2019-11-26 23:34:30 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
2023-12-26 04:21:08 +00:00
|
|
|
Common::PhysicalAddress target, Common::MemoryPermission perms,
|
|
|
|
bool separate_heap) {
|
|
|
|
impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
|
2015-05-13 03:38:56 +01:00
|
|
|
}
|
2014-12-30 03:35:06 +00:00
|
|
|
|
2023-12-26 04:21:08 +00:00
|
|
|
void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
|
|
|
bool separate_heap) {
|
|
|
|
impl->UnmapRegion(page_table, base, size, separate_heap);
|
2019-11-26 18:09:12 +00:00
|
|
|
}
|
2016-04-16 23:57:57 +01:00
|
|
|
|
2023-11-23 09:26:06 +00:00
|
|
|
void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
|
|
|
|
Common::MemoryPermission perms) {
|
|
|
|
impl->ProtectRegion(page_table, GetInteger(vaddr), size, perms);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
|
2023-12-18 01:46:41 +00:00
|
|
|
const auto& page_table = *impl->current_page_table;
|
2022-08-19 00:28:55 +01:00
|
|
|
const size_t page = vaddr >> YUZU_PAGEBITS;
|
2021-09-29 12:54:59 +01:00
|
|
|
if (page >= page_table.pointers.size()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const auto [pointer, type] = page_table.pointers[page].PointerType();
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 23:00:39 +01:00
|
|
|
return pointer != 0 || type == Common::PageType::RasterizerCachedMemory ||
|
2022-06-06 17:56:01 +01:00
|
|
|
type == Common::PageType::DebugMemory;
|
2019-11-26 18:46:41 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const {
|
|
|
|
Common::ProcessAddress end = base + size;
|
|
|
|
Common::ProcessAddress page = Common::AlignDown(GetInteger(base), YUZU_PAGESIZE);
|
2022-05-31 00:35:01 +01:00
|
|
|
|
2022-08-19 00:28:55 +01:00
|
|
|
for (; page < end; page += YUZU_PAGESIZE) {
|
2022-05-31 00:35:01 +01:00
|
|
|
if (!IsValidVirtualAddress(page)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u8* Memory::GetPointer(Common::ProcessAddress vaddr) {
|
2019-11-26 20:19:15 +00:00
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u8* Memory::GetPointerSilent(Common::ProcessAddress vaddr) {
|
2022-02-19 13:18:02 +00:00
|
|
|
return impl->GetPointerSilent(vaddr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
const u8* Memory::GetPointer(Common::ProcessAddress vaddr) const {
|
2019-11-26 20:19:15 +00:00
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u8 Memory::Read8(const Common::ProcessAddress addr) {
|
2019-11-26 21:29:34 +00:00
|
|
|
return impl->Read8(addr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u16 Memory::Read16(const Common::ProcessAddress addr) {
|
2019-11-26 21:29:34 +00:00
|
|
|
return impl->Read16(addr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u32 Memory::Read32(const Common::ProcessAddress addr) {
|
2019-11-26 21:29:34 +00:00
|
|
|
return impl->Read32(addr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
u64 Memory::Read64(const Common::ProcessAddress addr) {
|
2019-11-26 21:29:34 +00:00
|
|
|
return impl->Read64(addr);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::Write8(Common::ProcessAddress addr, u8 data) {
|
2019-11-26 22:39:57 +00:00
|
|
|
impl->Write8(addr, data);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::Write16(Common::ProcessAddress addr, u16 data) {
|
2019-11-26 22:39:57 +00:00
|
|
|
impl->Write16(addr, data);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::Write32(Common::ProcessAddress addr, u32 data) {
|
2019-11-26 22:39:57 +00:00
|
|
|
impl->Write32(addr, data);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::Write64(Common::ProcessAddress addr, u64 data) {
|
2019-11-26 22:39:57 +00:00
|
|
|
impl->Write64(addr, data);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::WriteExclusive8(Common::ProcessAddress addr, u8 data, u8 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return impl->WriteExclusive8(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::WriteExclusive16(Common::ProcessAddress addr, u16 data, u16 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return impl->WriteExclusive16(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::WriteExclusive32(Common::ProcessAddress addr, u32 data, u32 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return impl->WriteExclusive32(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::WriteExclusive64(Common::ProcessAddress addr, u64 data, u64 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return impl->WriteExclusive64(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
bool Memory::WriteExclusive128(Common::ProcessAddress addr, u128 data, u128 expected) {
|
2020-03-07 22:59:42 +00:00
|
|
|
return impl->WriteExclusive128(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
std::string Memory::ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
|
2019-11-26 20:48:19 +00:00
|
|
|
return impl->ReadCString(vaddr, max_length);
|
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool Memory::ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->ReadBlock(src_addr, dest_buffer, size);
|
2019-11-26 21:29:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool Memory::ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
|
2020-04-05 22:23:49 +01:00
|
|
|
}
|
|
|
|
|
2023-05-29 00:35:51 +01:00
|
|
|
const u8* Memory::GetSpan(const VAddr src_addr, const std::size_t size) const {
|
|
|
|
return impl->GetSpan(src_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
u8* Memory::GetSpan(const VAddr src_addr, const std::size_t size) {
|
|
|
|
return impl->GetSpan(src_addr, size);
|
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool Memory::WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
|
2023-03-18 01:26:04 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->WriteBlock(dest_addr, src_buffer, size);
|
2019-11-26 22:39:57 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool Memory::WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
|
2020-04-05 22:23:49 +01:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
|
2020-04-05 22:23:49 +01:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool Memory::CopyBlock(Common::ProcessAddress dest_addr, Common::ProcessAddress src_addr,
|
2023-03-23 23:58:48 +00:00
|
|
|
const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->CopyBlock(dest_addr, src_addr, size);
|
2019-11-26 21:06:49 +00:00
|
|
|
}
|
|
|
|
|
2023-07-15 03:19:59 +01:00
|
|
|
bool Memory::ZeroBlock(Common::ProcessAddress dest_addr, const std::size_t size) {
|
|
|
|
return impl->ZeroBlock(dest_addr, size);
|
2022-07-16 23:48:45 +01:00
|
|
|
}
|
|
|
|
|
2023-06-28 18:32:50 +01:00
|
|
|
void Memory::SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers) {
|
|
|
|
impl->gpu_dirty_managers = managers;
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
Result Memory::InvalidateDataCache(Common::ProcessAddress dest_addr, const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->InvalidateDataCache(dest_addr, size);
|
2022-11-12 16:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
Result Memory::StoreDataCache(Common::ProcessAddress dest_addr, const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->StoreDataCache(dest_addr, size);
|
2022-11-12 16:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
Result Memory::FlushDataCache(Common::ProcessAddress dest_addr, const std::size_t size) {
|
2023-07-15 03:19:59 +01:00
|
|
|
return impl->FlushDataCache(dest_addr, size);
|
2022-11-12 16:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::RasterizerMarkRegionCached(Common::ProcessAddress vaddr, u64 size, bool cached) {
|
|
|
|
impl->RasterizerMarkRegionCached(GetInteger(vaddr), size, cached);
|
2019-11-26 20:56:13 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) {
|
|
|
|
impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
|
2022-06-06 17:56:01 +01:00
|
|
|
}
|
|
|
|
|
2023-05-29 00:35:51 +01:00
|
|
|
void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
|
|
|
|
impl->InvalidateRegion(dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
|
|
|
|
impl->FlushRegion(dest_addr, size);
|
|
|
|
}
|
|
|
|
|
2023-11-17 21:44:53 +00:00
|
|
|
bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
|
2023-12-26 04:21:08 +00:00
|
|
|
[[maybe_unused]] bool mapped = true;
|
|
|
|
[[maybe_unused]] bool rasterizer = false;
|
|
|
|
|
2023-11-17 21:44:53 +00:00
|
|
|
u8* const ptr = impl->GetPointerImpl(
|
|
|
|
GetInteger(vaddr),
|
|
|
|
[&] {
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size,
|
|
|
|
GetInteger(vaddr));
|
|
|
|
mapped = false;
|
|
|
|
},
|
2023-12-26 04:21:08 +00:00
|
|
|
[&] {
|
|
|
|
impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
|
|
|
|
rasterizer = true;
|
|
|
|
});
|
|
|
|
|
|
|
|
#ifdef __linux__
|
|
|
|
if (!rasterizer && mapped) {
|
|
|
|
impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-11-17 21:44:53 +00:00
|
|
|
return mapped && ptr != nullptr;
|
|
|
|
}
|
|
|
|
|
2023-12-26 04:21:08 +00:00
|
|
|
bool Memory::InvalidateSeparateHeap(void* fault_address) {
|
|
|
|
#ifdef __linux__
|
|
|
|
return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-03-31 20:10:44 +01:00
|
|
|
} // namespace Core::Memory
|