mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-20 15:39:59 +00:00
memory_manager: Use GPUVAdddr, not PAddr, for GPU addresses.
This commit is contained in:
parent
e8c2bb24b2
commit
9e11a76e92
7 changed files with 57 additions and 60 deletions
|
@ -90,9 +90,7 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params)
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::ProcessCommandList(GPUVAddr address, u32 size) {
|
void GPU::ProcessCommandList(GPUVAddr address, u32 size) {
|
||||||
// TODO(Subv): PhysicalToVirtualAddress is a misnomer, it converts a GPU VAddr into an
|
const VAddr head_address = memory_manager->GpuToCpuAddress(address);
|
||||||
// application VAddr.
|
|
||||||
const VAddr head_address = memory_manager->PhysicalToVirtualAddress(address);
|
|
||||||
VAddr current_addr = head_address;
|
VAddr current_addr = head_address;
|
||||||
while (current_addr < head_address + size * sizeof(CommandHeader)) {
|
while (current_addr < head_address + size * sizeof(CommandHeader)) {
|
||||||
const CommandHeader header = {Memory::Read32(current_addr)};
|
const CommandHeader header = {Memory::Read32(current_addr)};
|
||||||
|
|
|
@ -145,7 +145,7 @@ void Maxwell3D::ProcessQueryGet() {
|
||||||
GPUVAddr sequence_address = regs.query.QueryAddress();
|
GPUVAddr sequence_address = regs.query.QueryAddress();
|
||||||
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
|
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
|
||||||
// VAddr before writing.
|
// VAddr before writing.
|
||||||
VAddr address = memory_manager.PhysicalToVirtualAddress(sequence_address);
|
VAddr address = memory_manager.GpuToCpuAddress(sequence_address);
|
||||||
|
|
||||||
// TODO(Subv): Support the other query units.
|
// TODO(Subv): Support the other query units.
|
||||||
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
|
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
|
||||||
|
@ -225,8 +225,7 @@ void Maxwell3D::ProcessCBData(u32 value) {
|
||||||
// Don't allow writing past the end of the buffer.
|
// Don't allow writing past the end of the buffer.
|
||||||
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
|
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
|
||||||
|
|
||||||
VAddr address =
|
VAddr address = memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
|
||||||
memory_manager.PhysicalToVirtualAddress(buffer_address + regs.const_buffer.cb_pos);
|
|
||||||
|
|
||||||
Memory::Write32(address, value);
|
Memory::Write32(address, value);
|
||||||
|
|
||||||
|
@ -238,7 +237,7 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||||
GPUVAddr tic_base_address = regs.tic.TICAddress();
|
GPUVAddr tic_base_address = regs.tic.TICAddress();
|
||||||
|
|
||||||
GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
|
GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
|
||||||
VAddr tic_address_cpu = memory_manager.PhysicalToVirtualAddress(tic_address_gpu);
|
VAddr tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
|
||||||
|
|
||||||
Texture::TICEntry tic_entry;
|
Texture::TICEntry tic_entry;
|
||||||
Memory::ReadBlock(tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
Memory::ReadBlock(tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
||||||
|
@ -268,7 +267,7 @@ Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
|
||||||
GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
|
GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
|
||||||
|
|
||||||
GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
|
GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
|
||||||
VAddr tsc_address_cpu = memory_manager.PhysicalToVirtualAddress(tsc_address_gpu);
|
VAddr tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
|
||||||
|
|
||||||
Texture::TSCEntry tsc_entry;
|
Texture::TSCEntry tsc_entry;
|
||||||
Memory::ReadBlock(tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
Memory::ReadBlock(tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
||||||
|
@ -293,7 +292,7 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt
|
||||||
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
||||||
|
|
||||||
Texture::TextureHandle tex_handle{
|
Texture::TextureHandle tex_handle{
|
||||||
Memory::Read32(memory_manager.PhysicalToVirtualAddress(current_texture))};
|
Memory::Read32(memory_manager.GpuToCpuAddress(current_texture))};
|
||||||
|
|
||||||
Texture::FullTextureInfo tex_info{};
|
Texture::FullTextureInfo tex_info{};
|
||||||
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
||||||
|
|
|
@ -8,90 +8,90 @@
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
PAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
||||||
boost::optional<PAddr> paddr = FindFreeBlock(size, align);
|
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, align);
|
||||||
ASSERT(paddr);
|
ASSERT(gpu_addr);
|
||||||
|
|
||||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||||
ASSERT(PageSlot(*paddr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||||
PageSlot(*paddr + offset) = static_cast<u64>(PageStatus::Allocated);
|
PageSlot(*gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
return *paddr;
|
return *gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
PAddr MemoryManager::AllocateSpace(PAddr paddr, u64 size, u64 align) {
|
GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
|
||||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||||
ASSERT(PageSlot(paddr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||||
PageSlot(paddr + offset) = static_cast<u64>(PageStatus::Allocated);
|
PageSlot(gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
return paddr;
|
return gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
PAddr MemoryManager::MapBufferEx(VAddr vaddr, u64 size) {
|
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
|
||||||
boost::optional<PAddr> paddr = FindFreeBlock(size, PAGE_SIZE);
|
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, PAGE_SIZE);
|
||||||
ASSERT(paddr);
|
ASSERT(gpu_addr);
|
||||||
|
|
||||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||||
ASSERT(PageSlot(*paddr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped));
|
||||||
PageSlot(*paddr + offset) = vaddr + offset;
|
PageSlot(*gpu_addr + offset) = cpu_addr + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
return *paddr;
|
return *gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
PAddr MemoryManager::MapBufferEx(VAddr vaddr, PAddr paddr, u64 size) {
|
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
|
||||||
ASSERT((paddr & PAGE_MASK) == 0);
|
ASSERT((gpu_addr & PAGE_MASK) == 0);
|
||||||
|
|
||||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||||
ASSERT(PageSlot(paddr + offset) == static_cast<u64>(PageStatus::Allocated));
|
ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Allocated));
|
||||||
PageSlot(paddr + offset) = vaddr + offset;
|
PageSlot(gpu_addr + offset) = cpu_addr + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
return paddr;
|
return gpu_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::optional<PAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
boost::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
||||||
PAddr paddr = 0;
|
GPUVAddr gpu_addr = 0;
|
||||||
u64 free_space = 0;
|
u64 free_space = 0;
|
||||||
align = (align + PAGE_MASK) & ~PAGE_MASK;
|
align = (align + PAGE_MASK) & ~PAGE_MASK;
|
||||||
|
|
||||||
while (paddr + free_space < MAX_ADDRESS) {
|
while (gpu_addr + free_space < MAX_ADDRESS) {
|
||||||
if (!IsPageMapped(paddr + free_space)) {
|
if (!IsPageMapped(gpu_addr + free_space)) {
|
||||||
free_space += PAGE_SIZE;
|
free_space += PAGE_SIZE;
|
||||||
if (free_space >= size) {
|
if (free_space >= size) {
|
||||||
return paddr;
|
return gpu_addr;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
paddr += free_space + PAGE_SIZE;
|
gpu_addr += free_space + PAGE_SIZE;
|
||||||
free_space = 0;
|
free_space = 0;
|
||||||
paddr = Common::AlignUp(paddr, align);
|
gpu_addr = Common::AlignUp(gpu_addr, align);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr MemoryManager::PhysicalToVirtualAddress(PAddr paddr) {
|
VAddr MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
||||||
VAddr base_addr = PageSlot(paddr);
|
VAddr base_addr = PageSlot(gpu_addr);
|
||||||
ASSERT(base_addr != static_cast<u64>(PageStatus::Unmapped));
|
ASSERT(base_addr != static_cast<u64>(PageStatus::Unmapped));
|
||||||
return base_addr + (paddr & PAGE_MASK);
|
return base_addr + (gpu_addr & PAGE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::IsPageMapped(PAddr paddr) {
|
bool MemoryManager::IsPageMapped(GPUVAddr gpu_addr) {
|
||||||
return PageSlot(paddr) != static_cast<u64>(PageStatus::Unmapped);
|
return PageSlot(gpu_addr) != static_cast<u64>(PageStatus::Unmapped);
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr& MemoryManager::PageSlot(PAddr paddr) {
|
VAddr& MemoryManager::PageSlot(GPUVAddr gpu_addr) {
|
||||||
auto& block = page_table[(paddr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
|
auto& block = page_table[(gpu_addr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
|
||||||
if (!block) {
|
if (!block) {
|
||||||
block = std::make_unique<PageBlock>();
|
block = std::make_unique<PageBlock>();
|
||||||
for (unsigned index = 0; index < PAGE_BLOCK_SIZE; index++) {
|
for (unsigned index = 0; index < PAGE_BLOCK_SIZE; index++) {
|
||||||
(*block)[index] = static_cast<u64>(PageStatus::Unmapped);
|
(*block)[index] = static_cast<u64>(PageStatus::Unmapped);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (*block)[(paddr >> PAGE_BITS) & PAGE_BLOCK_MASK];
|
return (*block)[(gpu_addr >> PAGE_BITS) & PAGE_BLOCK_MASK];
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -18,20 +18,20 @@ class MemoryManager final {
|
||||||
public:
|
public:
|
||||||
MemoryManager() = default;
|
MemoryManager() = default;
|
||||||
|
|
||||||
PAddr AllocateSpace(u64 size, u64 align);
|
GPUVAddr AllocateSpace(u64 size, u64 align);
|
||||||
PAddr AllocateSpace(PAddr paddr, u64 size, u64 align);
|
GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align);
|
||||||
PAddr MapBufferEx(VAddr vaddr, u64 size);
|
GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
|
||||||
PAddr MapBufferEx(VAddr vaddr, PAddr paddr, u64 size);
|
GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
|
||||||
VAddr PhysicalToVirtualAddress(PAddr paddr);
|
VAddr GpuToCpuAddress(GPUVAddr gpu_addr);
|
||||||
|
|
||||||
static constexpr u64 PAGE_BITS = 16;
|
static constexpr u64 PAGE_BITS = 16;
|
||||||
static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS;
|
static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS;
|
||||||
static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
|
static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
boost::optional<PAddr> FindFreeBlock(u64 size, u64 align = 1);
|
boost::optional<GPUVAddr> FindFreeBlock(u64 size, u64 align = 1);
|
||||||
bool IsPageMapped(PAddr paddr);
|
bool IsPageMapped(GPUVAddr gpu_addr);
|
||||||
VAddr& PageSlot(PAddr paddr);
|
VAddr& PageSlot(GPUVAddr gpu_addr);
|
||||||
|
|
||||||
enum class PageStatus : u64 {
|
enum class PageStatus : u64 {
|
||||||
Unmapped = 0xFFFFFFFFFFFFFFFFULL,
|
Unmapped = 0xFFFFFFFFFFFFFFFFULL,
|
||||||
|
|
|
@ -233,7 +233,7 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset) {
|
||||||
// Fetch program code from memory
|
// Fetch program code from memory
|
||||||
GLShader::ProgramCode program_code;
|
GLShader::ProgramCode program_code;
|
||||||
const u64 gpu_address{gpu.regs.code_address.CodeAddress() + shader_config.offset};
|
const u64 gpu_address{gpu.regs.code_address.CodeAddress() + shader_config.offset};
|
||||||
const VAddr cpu_address{gpu.memory_manager.PhysicalToVirtualAddress(gpu_address)};
|
const VAddr cpu_address{gpu.memory_manager.GpuToCpuAddress(gpu_address)};
|
||||||
Memory::ReadBlock(cpu_address, program_code.data(), program_code.size() * sizeof(u64));
|
Memory::ReadBlock(cpu_address, program_code.data(), program_code.size() * sizeof(u64));
|
||||||
GLShader::ShaderSetup setup{std::move(program_code)};
|
GLShader::ShaderSetup setup{std::move(program_code)};
|
||||||
|
|
||||||
|
@ -395,7 +395,7 @@ void RasterizerOpenGL::DrawArrays() {
|
||||||
if (is_indexed) {
|
if (is_indexed) {
|
||||||
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
|
const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager;
|
||||||
const VAddr index_data_addr{
|
const VAddr index_data_addr{
|
||||||
memory_manager->PhysicalToVirtualAddress(regs.index_array.StartAddress())};
|
memory_manager->GpuToCpuAddress(regs.index_array.StartAddress())};
|
||||||
Memory::ReadBlock(index_data_addr, offseted_buffer, index_buffer_size);
|
Memory::ReadBlock(index_data_addr, offseted_buffer, index_buffer_size);
|
||||||
|
|
||||||
index_buffer_offset = buffer_offset;
|
index_buffer_offset = buffer_offset;
|
||||||
|
@ -659,7 +659,7 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, GLuint progr
|
||||||
buffer_draw_state.enabled = true;
|
buffer_draw_state.enabled = true;
|
||||||
buffer_draw_state.bindpoint = current_bindpoint + bindpoint;
|
buffer_draw_state.bindpoint = current_bindpoint + bindpoint;
|
||||||
|
|
||||||
VAddr addr = gpu.memory_manager->PhysicalToVirtualAddress(buffer.address);
|
VAddr addr = gpu.memory_manager->GpuToCpuAddress(buffer.address);
|
||||||
std::vector<u8> data(used_buffer.GetSize() * sizeof(float));
|
std::vector<u8> data(used_buffer.GetSize() * sizeof(float));
|
||||||
Memory::ReadBlock(addr, data.data(), data.size());
|
Memory::ReadBlock(addr, data.data(), data.size());
|
||||||
|
|
||||||
|
|
|
@ -1028,7 +1028,7 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu
|
||||||
auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
|
|
||||||
SurfaceParams params;
|
SurfaceParams params;
|
||||||
params.addr = gpu.memory_manager->PhysicalToVirtualAddress(config.tic.Address());
|
params.addr = gpu.memory_manager->GpuToCpuAddress(config.tic.Address());
|
||||||
params.width = config.tic.Width();
|
params.width = config.tic.Width();
|
||||||
params.height = config.tic.Height();
|
params.height = config.tic.Height();
|
||||||
params.is_tiled = config.tic.IsTiled();
|
params.is_tiled = config.tic.IsTiled();
|
||||||
|
@ -1106,7 +1106,7 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces(
|
||||||
color_params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
color_params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
||||||
SurfaceParams depth_params = color_params;
|
SurfaceParams depth_params = color_params;
|
||||||
|
|
||||||
color_params.addr = memory_manager->PhysicalToVirtualAddress(config.Address());
|
color_params.addr = memory_manager->GpuToCpuAddress(config.Address());
|
||||||
color_params.pixel_format = SurfaceParams::PixelFormatFromRenderTargetFormat(config.format);
|
color_params.pixel_format = SurfaceParams::PixelFormatFromRenderTargetFormat(config.format);
|
||||||
color_params.component_type = SurfaceParams::ComponentTypeFromRenderTarget(config.format);
|
color_params.component_type = SurfaceParams::ComponentTypeFromRenderTarget(config.format);
|
||||||
color_params.UpdateParams();
|
color_params.UpdateParams();
|
||||||
|
|
|
@ -378,7 +378,7 @@ void GraphicsSurfaceWidget::OnUpdate() {
|
||||||
// TODO: Implement a good way to visualize alpha components!
|
// TODO: Implement a good way to visualize alpha components!
|
||||||
|
|
||||||
QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32);
|
QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32);
|
||||||
VAddr address = gpu.memory_manager->PhysicalToVirtualAddress(surface_address);
|
VAddr address = gpu.memory_manager->GpuToCpuAddress(surface_address);
|
||||||
|
|
||||||
auto unswizzled_data =
|
auto unswizzled_data =
|
||||||
Tegra::Texture::UnswizzleTexture(address, surface_format, surface_width, surface_height);
|
Tegra::Texture::UnswizzleTexture(address, surface_format, surface_width, surface_height);
|
||||||
|
@ -437,7 +437,7 @@ void GraphicsSurfaceWidget::SaveSurface() {
|
||||||
pixmap->save(&file, "PNG");
|
pixmap->save(&file, "PNG");
|
||||||
} else if (selectedFilter == bin_filter) {
|
} else if (selectedFilter == bin_filter) {
|
||||||
auto& gpu = Core::System::GetInstance().GPU();
|
auto& gpu = Core::System::GetInstance().GPU();
|
||||||
VAddr address = gpu.memory_manager->PhysicalToVirtualAddress(surface_address);
|
VAddr address = gpu.memory_manager->GpuToCpuAddress(surface_address);
|
||||||
|
|
||||||
const u8* buffer = Memory::GetPointer(address);
|
const u8* buffer = Memory::GetPointer(address);
|
||||||
ASSERT_MSG(buffer != nullptr, "Memory not accessible");
|
ASSERT_MSG(buffer != nullptr, "Memory not accessible");
|
||||||
|
|
Loading…
Reference in a new issue