From 9b929e934b1d2cc2ae5e0be7fcafe22c6994fdba Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 9 Oct 2018 19:15:05 -0400 Subject: [PATCH 01/16] gl_rasterizer_cache: Reintroduce code for handling swizzle and flush to guest RAM. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 134 ++++++++++++++---- .../renderer_opengl/gl_rasterizer_cache.h | 13 ++ 2 files changed, 119 insertions(+), 28 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 801d45144..1bb842fe7 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -336,20 +336,22 @@ void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 d constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT; constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format); + // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual + // pixel values. + const u32 tile_size{IsFormatBCn(format) ? 4U : 1U}; + if (morton_to_gl) { - // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual - // pixel values. - const u32 tile_size{IsFormatBCn(format) ? 4U : 1U}; const std::vector data = Tegra::Texture::UnswizzleTexture( addr, tile_size, bytes_per_pixel, stride, height, depth, block_height, block_depth); const std::size_t size_to_copy{std::min(gl_buffer_size, data.size())}; memcpy(gl_buffer, data.data(), size_to_copy); } else { - // TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should - // check the configuration for this and perform more generic un/swizzle - LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!"); - VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel, - Memory::GetPointer(addr), gl_buffer, morton_to_gl); + std::vector data(height * stride * bytes_per_pixel); + Tegra::Texture::CopySwizzledData(stride / tile_size, height / tile_size, depth, + bytes_per_pixel, bytes_per_pixel, data.data(), gl_buffer, + false, block_height, block_depth); + const std::size_t size_to_copy{std::min(gl_buffer_size, data.size())}; + memcpy(Memory::GetPointer(addr), data.data(), size_to_copy); } } @@ -430,17 +432,16 @@ static constexpr std::array, MortonCopy, MortonCopy, - // TODO(Subv): Swizzling DXT1/DXT23/DXT45/DXN1/DXN2/BC7U/BC6H_UF16/BC6H_SF16/ASTC_2D_4X4 - // formats are not supported - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, + MortonCopy, + MortonCopy, + MortonCopy, + MortonCopy, + MortonCopy, + MortonCopy, + MortonCopy, + MortonCopy, + MortonCopy, + // TODO(Subv): Swizzling ASTC formats are not supported nullptr, MortonCopy, MortonCopy, @@ -754,7 +755,7 @@ CachedSurface::CachedSurface(const SurfaceParams& params) SurfaceParams::SurfaceTargetName(params.target)); } -static void ConvertS8Z24ToZ24S8(std::vector& data, u32 width, u32 height) { +static void ConvertS8Z24ToZ24S8(std::vector& data, u32 width, u32 height, bool reverse) { union S8Z24 { BitField<0, 24, u32> z24; BitField<24, 8, u32> s8; @@ -767,16 +768,23 @@ static void ConvertS8Z24ToZ24S8(std::vector& data, u32 width, u32 height) { }; static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size"); - S8Z24 input_pixel{}; - Z24S8 output_pixel{}; + S8Z24 s8z24_pixel{}; + Z24S8 z24s8_pixel{}; constexpr auto bpp{CachedSurface::GetGLBytesPerPixel(PixelFormat::S8Z24)}; for (std::size_t y = 0; y < height; ++y) { for (std::size_t x = 0; x < width; ++x) { const std::size_t offset{bpp * (y * width + x)}; - std::memcpy(&input_pixel, &data[offset], sizeof(S8Z24)); - output_pixel.s8.Assign(input_pixel.s8); - output_pixel.z24.Assign(input_pixel.z24); - std::memcpy(&data[offset], &output_pixel, sizeof(Z24S8)); + if (reverse) { + std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8)); + s8z24_pixel.s8.Assign(z24s8_pixel.s8); + s8z24_pixel.z24.Assign(z24s8_pixel.z24); + std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24)); + } else { + std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24)); + z24s8_pixel.s8.Assign(s8z24_pixel.s8); + z24s8_pixel.z24.Assign(s8z24_pixel.z24); + std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8)); + } } } } @@ -814,7 +822,7 @@ static void ConvertFormatAsNeeded_LoadGLBuffer(std::vector& data, PixelForma } case PixelFormat::S8Z24: // Convert the S8Z24 depth format to Z24S8, as OpenGL does not support S8Z24. - ConvertS8Z24ToZ24S8(data, width, height); + ConvertS8Z24ToZ24S8(data, width, height, false); break; case PixelFormat::G8R8U: @@ -825,6 +833,30 @@ static void ConvertFormatAsNeeded_LoadGLBuffer(std::vector& data, PixelForma } } +/** + * Helper function to perform software conversion (as needed) when flushing a buffer from OpenGL to + * Switch memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or + * with typical desktop GPUs. + */ +static void ConvertFormatAsNeeded_FlushGLBuffer(std::vector& data, PixelFormat pixel_format, + u32 width, u32 height) { + switch (pixel_format) { + case PixelFormat::G8R8U: + case PixelFormat::G8R8S: + case PixelFormat::ASTC_2D_4X4: + case PixelFormat::ASTC_2D_8X8: { + LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented", + static_cast(pixel_format)); + UNREACHABLE(); + break; + } + case PixelFormat::S8Z24: + // Convert the Z24S8 depth format to S8Z24, as OpenGL does not support S8Z24. + ConvertS8Z24ToZ24S8(data, width, height, true); + break; + } +} + MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64, 192)); void CachedSurface::LoadGLBuffer() { ASSERT(params.type != SurfaceType::Fill); @@ -864,11 +896,57 @@ void CachedSurface::LoadGLBuffer() { } ConvertFormatAsNeeded_LoadGLBuffer(gl_buffer, params.pixel_format, params.width, params.height); + + dirty = false; } MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64)); void CachedSurface::FlushGLBuffer() { - ASSERT_MSG(false, "Unimplemented"); + MICROPROFILE_SCOPE(OpenGL_SurfaceFlush); + const auto& rect{params.GetRect()}; + // Load data from memory to the surface + const GLint x0 = static_cast(rect.left); + const GLint y0 = static_cast(rect.bottom); + const size_t buffer_offset = + static_cast(static_cast(y0) * params.width + static_cast(x0)) * + GetGLBytesPerPixel(params.pixel_format); + const u32 bytes_per_pixel = GetGLBytesPerPixel(params.pixel_format); + const u32 copy_size = params.width * params.height * bytes_per_pixel; + gl_buffer.resize(static_cast(params.depth) * copy_size); + const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type); + // Ensure no bad interactions with GL_UNPACK_ALIGNMENT + ASSERT(params.width * GetGLBytesPerPixel(params.pixel_format) % 4 == 0); + glPixelStorei(GL_PACK_ROW_LENGTH, static_cast(params.width)); + ASSERT(!tuple.compressed); + ASSERT(x0 == 0 && y0 == 0); + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + glGetTextureImage(texture.handle, 0, tuple.format, tuple.type, gl_buffer.size(), + gl_buffer.data()); + glPixelStorei(GL_PACK_ROW_LENGTH, 0); + ConvertFormatAsNeeded_FlushGLBuffer(gl_buffer, params.pixel_format, params.width, + params.height); + ASSERT(params.type != SurfaceType::Fill); + const u8* const texture_src_data = Memory::GetPointer(params.addr); + ASSERT(texture_src_data); + if (params.is_tiled) { + u32 depth = params.depth; + u32 block_depth = params.block_depth; + + ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", + params.block_width, static_cast(params.target)); + + if (params.target == SurfaceParams::SurfaceTarget::Texture2D) { + // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented. + depth = 1U; + block_depth = 1U; + } + gl_to_morton_fns[static_cast(params.pixel_format)]( + params.width, params.block_height, params.height, block_depth, depth, + &gl_buffer[buffer_offset], copy_size, params.addr + buffer_offset); + } else { + Memory::WriteBlock(params.addr + buffer_offset, &gl_buffer[buffer_offset], + gl_buffer.size() - buffer_offset); + } } MICROPROFILE_DEFINE(OpenGL_TextureUL, "OpenGL", "Texture Upload", MP_RGB(128, 64, 192)); diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 0b8ae3eb4..a15fb7b07 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -802,6 +802,18 @@ public: return params.size_in_bytes_total; } + void Flush() { + // There is no need to flush the surface if it hasn't been modified by us. + if (!dirty) + return; + FlushGLBuffer(); + dirty = false; + } + + void MarkAsDirty() { + dirty = true; + } + const OGLTexture& Texture() const { return texture; } @@ -833,6 +845,7 @@ private: std::vector gl_buffer; SurfaceParams params; GLenum gl_target; + bool dirty = false; }; class RasterizerCacheOpenGL final : public RasterizerCache { From 0be7e8228952c2e08644e4ebc56aa0274042bdae Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 9 Oct 2018 19:28:58 -0400 Subject: [PATCH 02/16] rasterizer_cache: Reintroduce method for flushing. --- src/video_core/rasterizer_cache.h | 17 +++++++++++++++++ .../renderer_opengl/gl_buffer_cache.h | 3 +++ .../renderer_opengl/gl_shader_cache.h | 3 +++ 3 files changed, 23 insertions(+) diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index 083b283b0..4a34491a9 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h @@ -17,6 +17,22 @@ template class RasterizerCache : NonCopyable { public: + /// Write any cached resources overlapping the region back to memory (if dirty) + void FlushRegion(Tegra::GPUVAddr addr, size_t size) { + if (size == 0) + return; + + const ObjectInterval interval{addr, addr + size}; + for (auto& pair : boost::make_iterator_range(object_cache.equal_range(interval))) { + for (auto& cached_object : pair.second) { + if (!cached_object) + continue; + + cached_object->Flush(); + } + } + } + /// Mark the specified region as being invalidated void InvalidateRegion(VAddr addr, u64 size) { if (size == 0) @@ -71,6 +87,7 @@ protected: void Unregister(const T& object) { auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer(); rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1); + object->Flush(); object_cache.subtract({GetInterval(object), ObjectSet{object}}); } diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index 965976334..b389ca684 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -24,6 +24,9 @@ struct CachedBufferEntry final { return size; } + // We do not have to flush this cache as things in it are never modified by us. + void Flush() {} + VAddr addr; std::size_t size; GLintptr offset; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 7bb287f56..d9157ec3c 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -33,6 +33,9 @@ public: return GLShader::MAX_PROGRAM_CODE_LENGTH * sizeof(u64); } + // We do not have to flush this cache as things in it are never modified by us. + void Flush() {} + /// Gets the shader entries for the shader const GLShader::ShaderEntries& GetShaderEntries() const { return entries; From 37575eae654c1cbcf63724107a77a03ffc114e5a Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Oct 2018 21:52:16 -0400 Subject: [PATCH 03/16] memory_manager: Add a method for querying the end of a mapped GPU region. --- src/video_core/memory_manager.cpp | 10 ++++++++++ src/video_core/memory_manager.h | 1 + 2 files changed, 11 insertions(+) diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index ca923d17d..022d4ab74 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -87,6 +87,16 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { return gpu_addr; } +GPUVAddr MemoryManager::GetRegionEnd(GPUVAddr region_start) const { + for (const auto& region : mapped_regions) { + const GPUVAddr region_end{region.gpu_addr + region.size}; + if (region_start >= region.gpu_addr && region_start < region_end) { + return region_end; + } + } + return {}; +} + boost::optional MemoryManager::FindFreeBlock(u64 size, u64 align) { GPUVAddr gpu_addr = 0; u64 free_space = 0; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 86765e72a..caf80093f 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -26,6 +26,7 @@ public: GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size); GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size); GPUVAddr UnmapBuffer(GPUVAddr gpu_addr, u64 size); + GPUVAddr GetRegionEnd(GPUVAddr region_start) const; boost::optional GpuToCpuAddress(GPUVAddr gpu_addr); std::vector CpuToGpuAddress(VAddr cpu_addr) const; From 4e9683e9d5518a2e5dc69ddd12f20685a94646e2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Oct 2018 22:24:40 -0400 Subject: [PATCH 04/16] gl_rasterizer_cache: Clamp cached surface size to mapped GPU region size. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 45 ++++++++++++------- .../renderer_opengl/gl_rasterizer_cache.h | 11 +++-- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 1bb842fe7..24781a3c1 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -34,16 +34,29 @@ struct FormatTuple { bool compressed; }; -static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { - auto& gpu{Core::System::GetInstance().GPU()}; - const auto cpu_addr{gpu.MemoryManager().GpuToCpuAddress(gpu_addr)}; - return cpu_addr ? *cpu_addr : 0; +void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr) { + auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; + const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; + const auto max_size{memory_manager.GetRegionEnd(gpu_addr) - gpu_addr}; + + addr = cpu_addr ? *cpu_addr : 0; + size_in_bytes_total = SizeInBytesTotal(); + size_in_bytes_2d = SizeInBytes2D(); + + // Clamp sizes to mapped GPU memory region + if (size_in_bytes_2d > max_size) { + LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes_2d, max_size); + size_in_bytes_total = max_size; + size_in_bytes_2d = max_size; + } else if (size_in_bytes_total > max_size) { + LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes_total, max_size); + size_in_bytes_total = max_size; + } } /*static*/ SurfaceParams SurfaceParams::CreateForTexture( const Tegra::Texture::FullTextureInfo& config, const GLShader::SamplerEntry& entry) { SurfaceParams params{}; - params.addr = TryGetCpuAddr(config.tic.Address()); params.is_tiled = config.tic.IsTiled(); params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0, params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0, @@ -87,18 +100,18 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { break; } - params.size_in_bytes_total = params.SizeInBytesTotal(); - params.size_in_bytes_2d = params.SizeInBytes2D(); params.max_mip_level = config.tic.max_mip_level + 1; params.rt = {}; + params.InitCacheParameters(config.tic.Address()); + return params; } /*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(std::size_t index) { const auto& config{Core::System::GetInstance().GPU().Maxwell3D().regs.rt[index]}; SurfaceParams params{}; - params.addr = TryGetCpuAddr(config.Address()); + params.is_tiled = config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear; params.block_width = 1 << config.memory_layout.block_width; @@ -112,8 +125,6 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { params.unaligned_height = config.height; params.target = SurfaceTarget::Texture2D; params.depth = 1; - params.size_in_bytes_total = params.SizeInBytesTotal(); - params.size_in_bytes_2d = params.SizeInBytes2D(); params.max_mip_level = 0; // Render target specific parameters, not used for caching @@ -122,6 +133,8 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { params.rt.layer_stride = config.layer_stride; params.rt.base_layer = config.base_layer; + params.InitCacheParameters(config.Address()); + return params; } @@ -130,7 +143,7 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { u32 block_width, u32 block_height, u32 block_depth, Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) { SurfaceParams params{}; - params.addr = TryGetCpuAddr(zeta_address); + params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear; params.block_width = 1 << std::min(block_width, 5U); params.block_height = 1 << std::min(block_height, 5U); @@ -143,18 +156,18 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { params.unaligned_height = zeta_height; params.target = SurfaceTarget::Texture2D; params.depth = 1; - params.size_in_bytes_total = params.SizeInBytesTotal(); - params.size_in_bytes_2d = params.SizeInBytes2D(); params.max_mip_level = 0; params.rt = {}; + params.InitCacheParameters(zeta_address); + return params; } /*static*/ SurfaceParams SurfaceParams::CreateForFermiCopySurface( const Tegra::Engines::Fermi2D::Regs::Surface& config) { SurfaceParams params{}; - params.addr = TryGetCpuAddr(config.Address()); + params.is_tiled = !config.linear; params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 32U) : 0, params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 32U) : 0, @@ -167,11 +180,11 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) { params.unaligned_height = config.height; params.target = SurfaceTarget::Texture2D; params.depth = 1; - params.size_in_bytes_total = params.SizeInBytesTotal(); - params.size_in_bytes_2d = params.SizeInBytes2D(); params.max_mip_level = 0; params.rt = {}; + params.InitCacheParameters(config.Address()); + return params; } diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index a15fb7b07..f6f7aad82 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -742,7 +742,9 @@ struct SurfaceParams { other.depth); } - VAddr addr; + /// Initializes parameters for caching, should be called after everything has been initialized + void InitCacheParameters(Tegra::GPUVAddr gpu_addr); + bool is_tiled; u32 block_width; u32 block_height; @@ -754,11 +756,14 @@ struct SurfaceParams { u32 height; u32 depth; u32 unaligned_height; - std::size_t size_in_bytes_total; - std::size_t size_in_bytes_2d; SurfaceTarget target; u32 max_mip_level; + // Parameters used for caching + VAddr addr; + std::size_t size_in_bytes_total; + std::size_t size_in_bytes_2d; + // Render target specific parameters, not used in caching struct { u32 index; From b4e29ccb8143ed54d9efeb378db9f45d0153b2e2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Oct 2018 22:28:02 -0400 Subject: [PATCH 05/16] gl_rasterizer_cache: Remove usage of Memory::Read/Write functions. - These cannot be used within the cache, as they change cache state. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 24781a3c1..3eedb0d50 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -668,8 +668,10 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface, std::size_t remaining_size = dst_params.size_in_bytes_total - src_params.size_in_bytes_total; std::vector data(remaining_size); - Memory::ReadBlock(dst_params.addr + src_params.size_in_bytes_total, data.data(), - data.size()); + std::memcpy(data.data(), + Memory::GetPointer(dst_params.addr + src_params.size_in_bytes_total), + data.size()); + glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes_total, remaining_size, data.data()); } @@ -916,13 +918,8 @@ void CachedSurface::LoadGLBuffer() { MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64)); void CachedSurface::FlushGLBuffer() { MICROPROFILE_SCOPE(OpenGL_SurfaceFlush); - const auto& rect{params.GetRect()}; + // Load data from memory to the surface - const GLint x0 = static_cast(rect.left); - const GLint y0 = static_cast(rect.bottom); - const size_t buffer_offset = - static_cast(static_cast(y0) * params.width + static_cast(x0)) * - GetGLBytesPerPixel(params.pixel_format); const u32 bytes_per_pixel = GetGLBytesPerPixel(params.pixel_format); const u32 copy_size = params.width * params.height * bytes_per_pixel; gl_buffer.resize(static_cast(params.depth) * copy_size); @@ -931,7 +928,6 @@ void CachedSurface::FlushGLBuffer() { ASSERT(params.width * GetGLBytesPerPixel(params.pixel_format) % 4 == 0); glPixelStorei(GL_PACK_ROW_LENGTH, static_cast(params.width)); ASSERT(!tuple.compressed); - ASSERT(x0 == 0 && y0 == 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); glGetTextureImage(texture.handle, 0, tuple.format, tuple.type, gl_buffer.size(), gl_buffer.data()); @@ -954,11 +950,10 @@ void CachedSurface::FlushGLBuffer() { block_depth = 1U; } gl_to_morton_fns[static_cast(params.pixel_format)]( - params.width, params.block_height, params.height, block_depth, depth, - &gl_buffer[buffer_offset], copy_size, params.addr + buffer_offset); + params.width, params.block_height, params.height, block_depth, depth, gl_buffer.data(), + copy_size, GetAddr()); } else { - Memory::WriteBlock(params.addr + buffer_offset, &gl_buffer[buffer_offset], - gl_buffer.size() - buffer_offset); + std::memcpy(Memory::GetPointer(GetAddr()), gl_buffer.data(), GetSizeInBytes()); } } From 3afdfd7bfa50399ace417114786fedf429d44e70 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Oct 2018 22:31:04 -0400 Subject: [PATCH 06/16] gl_rasterizer: Implement flushing. --- .../renderer_opengl/gl_rasterizer.cpp | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 8d5f277e2..18db07217 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -424,6 +424,13 @@ void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_dep // Used when just a single color attachment is enabled, e.g. for clearing a color buffer Surface color_surface = res_cache.GetColorBufferSurface(*single_color_target, preserve_contents); + + if (color_surface) { + // Assume that a surface will be written to if it is used as a framebuffer, even if + // the shader doesn't actually write to it. + color_surface->MarkAsDirty(); + } + glFramebufferTexture2D( GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + static_cast(*single_color_target), GL_TEXTURE_2D, @@ -434,6 +441,13 @@ void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_dep std::array buffers; for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) { Surface color_surface = res_cache.GetColorBufferSurface(index, preserve_contents); + + if (color_surface) { + // Assume that a surface will be written to if it is used as a framebuffer, even + // if the shader doesn't actually write to it. + color_surface->MarkAsDirty(); + } + buffers[index] = GL_COLOR_ATTACHMENT0 + regs.rt_control.GetMap(index); glFramebufferTexture2D( GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + static_cast(index), @@ -453,6 +467,10 @@ void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_dep } if (depth_surface) { + // Assume that a surface will be written to if it is used as a framebuffer, even if + // the shader doesn't actually write to it. + depth_surface->MarkAsDirty(); + if (regs.stencil_enable) { // Attach both depth and stencil glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, @@ -617,7 +635,12 @@ void RasterizerOpenGL::DrawArrays() { void RasterizerOpenGL::FlushAll() {} -void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {} +void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { + MICROPROFILE_SCOPE(OpenGL_CacheManagement); + res_cache.FlushRegion(addr, size); + shader_cache.FlushRegion(addr, size); + buffer_cache.FlushRegion(addr, size); +} void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); @@ -627,6 +650,7 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { } void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { + FlushRegion(addr, size); InvalidateRegion(addr, size); } From cf7b46c101ec56b24968c041e62c22dbeaa8e468 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Oct 2018 22:32:28 -0400 Subject: [PATCH 07/16] gl_rasterizer_cache: Remove unused FlushSurface method. --- src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | 4 ---- src/video_core/renderer_opengl/gl_rasterizer_cache.h | 3 --- 2 files changed, 7 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 3eedb0d50..6abb89f36 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -1121,10 +1121,6 @@ void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) { surface->UploadGLTexture(read_framebuffer.handle, draw_framebuffer.handle); } -void RasterizerCacheOpenGL::FlushSurface(const Surface& surface) { - surface->FlushGLBuffer(); -} - Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) { if (params.addr == 0 || params.height * params.width == 0) { return {}; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index f6f7aad82..247b58297 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -867,9 +867,6 @@ public: /// Get the color surface based on the framebuffer configuration and the specified render target Surface GetColorBufferSurface(std::size_t index, bool preserve_contents); - /// Flushes the surface to Switch memory - void FlushSurface(const Surface& surface); - /// Tries to find a framebuffer using on the provided CPU address Surface TryFindFramebufferSurface(VAddr addr) const; From 58be4dff79ab48a4e052714250386bc61ef08982 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Oct 2018 23:44:18 -0400 Subject: [PATCH 08/16] gl_rasterizer_cache: Rename GetGLBytesPerPixel to GetBytesPerPixel. - This does not really have anything to do with OpenGL. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 20 +++++++++---------- .../renderer_opengl/gl_rasterizer_cache.h | 15 +++++++------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 6abb89f36..c2cd67f02 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -346,8 +346,7 @@ static bool IsFormatBCn(PixelFormat format) { template void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, u8* gl_buffer, std::size_t gl_buffer_size, VAddr addr) { - constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT; - constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format); + constexpr u32 bytes_per_pixel = SurfaceParams::GetBytesPerPixel(format); // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual // pixel values. @@ -785,7 +784,7 @@ static void ConvertS8Z24ToZ24S8(std::vector& data, u32 width, u32 height, bo S8Z24 s8z24_pixel{}; Z24S8 z24s8_pixel{}; - constexpr auto bpp{CachedSurface::GetGLBytesPerPixel(PixelFormat::S8Z24)}; + constexpr auto bpp{SurfaceParams::GetBytesPerPixel(PixelFormat::S8Z24)}; for (std::size_t y = 0; y < height; ++y) { for (std::size_t x = 0; x < width; ++x) { const std::size_t offset{bpp * (y * width + x)}; @@ -805,7 +804,7 @@ static void ConvertS8Z24ToZ24S8(std::vector& data, u32 width, u32 height, bo } static void ConvertG8R8ToR8G8(std::vector& data, u32 width, u32 height) { - constexpr auto bpp{CachedSurface::GetGLBytesPerPixel(PixelFormat::G8R8U)}; + constexpr auto bpp{SurfaceParams::GetBytesPerPixel(PixelFormat::G8R8U)}; for (std::size_t y = 0; y < height; ++y) { for (std::size_t x = 0; x < width; ++x) { const std::size_t offset{bpp * (y * width + x)}; @@ -880,7 +879,7 @@ void CachedSurface::LoadGLBuffer() { ASSERT(texture_src_data); - const u32 bytes_per_pixel = GetGLBytesPerPixel(params.pixel_format); + const u32 bytes_per_pixel = SurfaceParams::GetBytesPerPixel(params.pixel_format); const u32 copy_size = params.width * params.height * bytes_per_pixel; const std::size_t total_size = copy_size * params.depth; @@ -920,12 +919,12 @@ void CachedSurface::FlushGLBuffer() { MICROPROFILE_SCOPE(OpenGL_SurfaceFlush); // Load data from memory to the surface - const u32 bytes_per_pixel = GetGLBytesPerPixel(params.pixel_format); + const u32 bytes_per_pixel = SurfaceParams::GetBytesPerPixel(params.pixel_format); const u32 copy_size = params.width * params.height * bytes_per_pixel; gl_buffer.resize(static_cast(params.depth) * copy_size); const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type); // Ensure no bad interactions with GL_UNPACK_ALIGNMENT - ASSERT(params.width * GetGLBytesPerPixel(params.pixel_format) % 4 == 0); + ASSERT(params.width * SurfaceParams::GetBytesPerPixel(params.pixel_format) % 4 == 0); glPixelStorei(GL_PACK_ROW_LENGTH, static_cast(params.width)); ASSERT(!tuple.compressed); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); @@ -965,7 +964,8 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle MICROPROFILE_SCOPE(OpenGL_TextureUL); ASSERT(gl_buffer.size() == static_cast(params.width) * params.height * - GetGLBytesPerPixel(params.pixel_format) * params.depth); + SurfaceParams::GetBytesPerPixel(params.pixel_format) * + params.depth); const auto& rect{params.GetRect()}; @@ -975,7 +975,7 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle std::size_t buffer_offset = static_cast(static_cast(y0) * params.width + static_cast(x0)) * - GetGLBytesPerPixel(params.pixel_format); + SurfaceParams::GetBytesPerPixel(params.pixel_format); const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type); const GLuint target_tex = texture.handle; @@ -991,7 +991,7 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle cur_state.Apply(); // Ensure no bad interactions with GL_UNPACK_ALIGNMENT - ASSERT(params.width * GetGLBytesPerPixel(params.pixel_format) % 4 == 0); + ASSERT(params.width * SurfaceParams::GetBytesPerPixel(params.pixel_format) % 4 == 0); glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast(params.width)); glActiveTexture(GL_TEXTURE0); diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 247b58297..5dbef0c89 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -701,6 +701,14 @@ struct SurfaceParams { return SurfaceType::Invalid; } + /// Returns the sizer in bytes of the specified pixel format + static constexpr u32 GetBytesPerPixel(PixelFormat pixel_format) { + if (pixel_format == SurfaceParams::PixelFormat::Invalid) { + return 0; + } + return GetFormatBpp(pixel_format) / CHAR_BIT; + } + /// Returns the rectangle corresponding to this surface MathUtil::Rectangle GetRect() const; @@ -827,13 +835,6 @@ public: return gl_target; } - static constexpr unsigned int GetGLBytesPerPixel(SurfaceParams::PixelFormat format) { - if (format == SurfaceParams::PixelFormat::Invalid) - return 0; - - return SurfaceParams::GetFormatBpp(format) / CHAR_BIT; - } - const SurfaceParams& GetSurfaceParams() const { return params; } From 5f79ba04bd617254f47e1d707479ab2468f8aaf9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 13 Oct 2018 02:08:04 -0400 Subject: [PATCH 09/16] gl_rasterizer_cache: Separate guest and host surface size managment. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 146 ++++++++---------- .../renderer_opengl/gl_rasterizer_cache.h | 38 +++-- 2 files changed, 93 insertions(+), 91 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index c2cd67f02..4ba34ebc4 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -34,23 +34,56 @@ struct FormatTuple { bool compressed; }; +static bool IsPixelFormatASTC(PixelFormat format) { + switch (format) { + case PixelFormat::ASTC_2D_4X4: + case PixelFormat::ASTC_2D_5X4: + case PixelFormat::ASTC_2D_8X8: + case PixelFormat::ASTC_2D_8X5: + return true; + default: + return false; + } +} + +static std::pair GetASTCBlockSize(PixelFormat format) { + switch (format) { + case PixelFormat::ASTC_2D_4X4: + return {4, 4}; + case PixelFormat::ASTC_2D_5X4: + return {5, 4}; + case PixelFormat::ASTC_2D_8X8: + return {8, 8}; + case PixelFormat::ASTC_2D_8X5: + return {8, 5}; + default: + LOG_CRITICAL(HW_GPU, "Unhandled format: {}", static_cast(format)); + UNREACHABLE(); + } +} + void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr) { auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; const auto max_size{memory_manager.GetRegionEnd(gpu_addr) - gpu_addr}; addr = cpu_addr ? *cpu_addr : 0; - size_in_bytes_total = SizeInBytesTotal(); - size_in_bytes_2d = SizeInBytes2D(); + size_in_bytes = SizeInBytesRaw(); - // Clamp sizes to mapped GPU memory region - if (size_in_bytes_2d > max_size) { - LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes_2d, max_size); - size_in_bytes_total = max_size; - size_in_bytes_2d = max_size; - } else if (size_in_bytes_total > max_size) { - LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes_total, max_size); - size_in_bytes_total = max_size; + if (IsPixelFormatASTC(pixel_format)) { + // ASTC is uncompressed in software, in emulated as RGBA8 + size_in_bytes_gl = width * height * depth * 4; + } else { + size_in_bytes_gl = SizeInBytesGL(); + } + + // Clamp size to mapped GPU memory region + // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000 + // R32F render buffer. We do not yet know if this is a game bug or something else, but this + // check is necessary to prevent flushing from overwriting unmapped memory. + if (size_in_bytes > max_size) { + LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes, max_size); + size_in_bytes = max_size; } } @@ -289,34 +322,6 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType return format; } -static bool IsPixelFormatASTC(PixelFormat format) { - switch (format) { - case PixelFormat::ASTC_2D_4X4: - case PixelFormat::ASTC_2D_5X4: - case PixelFormat::ASTC_2D_8X8: - case PixelFormat::ASTC_2D_8X5: - return true; - default: - return false; - } -} - -static std::pair GetASTCBlockSize(PixelFormat format) { - switch (format) { - case PixelFormat::ASTC_2D_4X4: - return {4, 4}; - case PixelFormat::ASTC_2D_5X4: - return {5, 4}; - case PixelFormat::ASTC_2D_8X8: - return {8, 8}; - case PixelFormat::ASTC_2D_8X5: - return {8, 5}; - default: - LOG_CRITICAL(HW_GPU, "Unhandled format: {}", static_cast(format)); - UNREACHABLE(); - } -} - MathUtil::Rectangle SurfaceParams::GetRect() const { u32 actual_height{unaligned_height}; if (IsPixelFormatASTC(pixel_format)) { @@ -358,7 +363,7 @@ void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 d const std::size_t size_to_copy{std::min(gl_buffer_size, data.size())}; memcpy(gl_buffer, data.data(), size_to_copy); } else { - std::vector data(height * stride * bytes_per_pixel); + std::vector data(gl_buffer_size); Tegra::Texture::CopySwizzledData(stride / tile_size, height / tile_size, depth, bytes_per_pixel, bytes_per_pixel, data.data(), gl_buffer, false, block_height, block_depth); @@ -639,22 +644,21 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface, auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type); auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type); - std::size_t buffer_size = - std::max(src_params.size_in_bytes_total, dst_params.size_in_bytes_total); + std::size_t buffer_size = std::max(src_params.size_in_bytes, dst_params.size_in_bytes); glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle); glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB); if (source_format.compressed) { glGetCompressedTextureImage(src_surface->Texture().handle, src_attachment, - static_cast(src_params.size_in_bytes_total), nullptr); + static_cast(src_params.size_in_bytes), nullptr); } else { glGetTextureImage(src_surface->Texture().handle, src_attachment, source_format.format, - source_format.type, static_cast(src_params.size_in_bytes_total), + source_format.type, static_cast(src_params.size_in_bytes), nullptr); } // If the new texture is bigger than the previous one, we need to fill in the rest with data // from the CPU. - if (src_params.size_in_bytes_total < dst_params.size_in_bytes_total) { + if (src_params.size_in_bytes < dst_params.size_in_bytes) { // Upload the rest of the memory. if (dst_params.is_tiled) { // TODO(Subv): We might have to de-tile the subtexture and re-tile it with the rest @@ -664,14 +668,12 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface, LOG_DEBUG(HW_GPU, "Trying to upload extra texture data from the CPU during " "reinterpretation but the texture is tiled."); } - std::size_t remaining_size = - dst_params.size_in_bytes_total - src_params.size_in_bytes_total; + std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes; std::vector data(remaining_size); - std::memcpy(data.data(), - Memory::GetPointer(dst_params.addr + src_params.size_in_bytes_total), + std::memcpy(data.data(), Memory::GetPointer(dst_params.addr + src_params.size_in_bytes), data.size()); - glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes_total, remaining_size, + glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes, remaining_size, data.data()); } @@ -873,20 +875,10 @@ static void ConvertFormatAsNeeded_FlushGLBuffer(std::vector& data, PixelForm MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64, 192)); void CachedSurface::LoadGLBuffer() { - ASSERT(params.type != SurfaceType::Fill); - - const u8* const texture_src_data = Memory::GetPointer(params.addr); - - ASSERT(texture_src_data); - - const u32 bytes_per_pixel = SurfaceParams::GetBytesPerPixel(params.pixel_format); - const u32 copy_size = params.width * params.height * bytes_per_pixel; - const std::size_t total_size = copy_size * params.depth; - MICROPROFILE_SCOPE(OpenGL_SurfaceLoad); + gl_buffer.resize(params.size_in_bytes_gl); if (params.is_tiled) { - gl_buffer.resize(total_size); u32 depth = params.depth; u32 block_depth = params.block_depth; @@ -899,13 +891,12 @@ void CachedSurface::LoadGLBuffer() { block_depth = 1U; } - const std::size_t size = copy_size * depth; - morton_to_gl_fns[static_cast(params.pixel_format)]( params.width, params.block_height, params.height, block_depth, depth, gl_buffer.data(), - size, params.addr); + gl_buffer.size(), params.addr); } else { - const u8* const texture_src_data_end{texture_src_data + total_size}; + const auto texture_src_data{Memory::GetPointer(params.addr)}; + const auto texture_src_data_end{texture_src_data + params.size_in_bytes_gl}; gl_buffer.assign(texture_src_data, texture_src_data_end); } @@ -918,10 +909,11 @@ MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, void CachedSurface::FlushGLBuffer() { MICROPROFILE_SCOPE(OpenGL_SurfaceFlush); - // Load data from memory to the surface - const u32 bytes_per_pixel = SurfaceParams::GetBytesPerPixel(params.pixel_format); - const u32 copy_size = params.width * params.height * bytes_per_pixel; - gl_buffer.resize(static_cast(params.depth) * copy_size); + ASSERT_MSG(!IsPixelFormatASTC(params.pixel_format), "Unimplemented"); + + // OpenGL temporary buffer needs to be big enough to store raw texture size + gl_buffer.resize(params.size_in_bytes); + const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type); // Ensure no bad interactions with GL_UNPACK_ALIGNMENT ASSERT(params.width * SurfaceParams::GetBytesPerPixel(params.pixel_format) % 4 == 0); @@ -950,7 +942,7 @@ void CachedSurface::FlushGLBuffer() { } gl_to_morton_fns[static_cast(params.pixel_format)]( params.width, params.block_height, params.height, block_depth, depth, gl_buffer.data(), - copy_size, GetAddr()); + gl_buffer.size(), GetAddr()); } else { std::memcpy(Memory::GetPointer(GetAddr()), gl_buffer.data(), GetSizeInBytes()); } @@ -963,10 +955,6 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle MICROPROFILE_SCOPE(OpenGL_TextureUL); - ASSERT(gl_buffer.size() == static_cast(params.width) * params.height * - SurfaceParams::GetBytesPerPixel(params.pixel_format) * - params.depth); - const auto& rect{params.GetRect()}; // Load data from memory to the surface @@ -1001,7 +989,7 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle glCompressedTexImage2D( SurfaceTargetToGL(params.target), 0, tuple.internal_format, static_cast(params.width), static_cast(params.height), 0, - static_cast(params.size_in_bytes_2d), &gl_buffer[buffer_offset]); + static_cast(params.size_in_bytes_gl), &gl_buffer[buffer_offset]); break; case SurfaceParams::SurfaceTarget::Texture3D: case SurfaceParams::SurfaceTarget::Texture2DArray: @@ -1009,16 +997,16 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle SurfaceTargetToGL(params.target), 0, tuple.internal_format, static_cast(params.width), static_cast(params.height), static_cast(params.depth), 0, - static_cast(params.size_in_bytes_total), &gl_buffer[buffer_offset]); + static_cast(params.size_in_bytes_gl), &gl_buffer[buffer_offset]); break; case SurfaceParams::SurfaceTarget::TextureCubemap: for (std::size_t face = 0; face < params.depth; ++face) { glCompressedTexImage2D(static_cast(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face), 0, tuple.internal_format, static_cast(params.width), static_cast(params.height), 0, - static_cast(params.size_in_bytes_2d), + static_cast(params.SizeInBytesCubeFaceGL()), &gl_buffer[buffer_offset]); - buffer_offset += params.size_in_bytes_2d; + buffer_offset += params.SizeInBytesCubeFace(); } break; default: @@ -1028,7 +1016,7 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle glCompressedTexImage2D( GL_TEXTURE_2D, 0, tuple.internal_format, static_cast(params.width), static_cast(params.height), 0, - static_cast(params.size_in_bytes_2d), &gl_buffer[buffer_offset]); + static_cast(params.size_in_bytes_gl), &gl_buffer[buffer_offset]); } } else { @@ -1057,7 +1045,7 @@ void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle y0, static_cast(rect.GetWidth()), static_cast(rect.GetHeight()), tuple.format, tuple.type, &gl_buffer[buffer_offset]); - buffer_offset += params.size_in_bytes_2d; + buffer_offset += params.SizeInBytesCubeFace(); } break; default: diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 5dbef0c89..843f18cea 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -18,6 +18,7 @@ #include "video_core/rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_shader_gen.h" +#include "video_core/textures/decoders.h" #include "video_core/textures/texture.h" namespace OpenGL { @@ -712,18 +713,31 @@ struct SurfaceParams { /// Returns the rectangle corresponding to this surface MathUtil::Rectangle GetRect() const; - /// Returns the size of this surface as a 2D texture in bytes, adjusted for compression - std::size_t SizeInBytes2D() const { + /// Returns the total size of this surface in bytes, adjusted for compression + std::size_t SizeInBytesRaw(bool ignore_tiled = false) const { const u32 compression_factor{GetCompressionFactor(pixel_format)}; - ASSERT(width % compression_factor == 0); - ASSERT(height % compression_factor == 0); - return (width / compression_factor) * (height / compression_factor) * - GetFormatBpp(pixel_format) / CHAR_BIT; + const u32 bytes_per_pixel{GetBytesPerPixel(pixel_format)}; + const size_t uncompressed_size{ + Tegra::Texture::CalculateSize((ignore_tiled ? false : is_tiled), bytes_per_pixel, width, + height, depth, block_height, block_depth)}; + + // Divide by compression_factor^2, as height and width are factored by this + return uncompressed_size / (compression_factor * compression_factor); } - /// Returns the total size of this surface in bytes, adjusted for compression - std::size_t SizeInBytesTotal() const { - return SizeInBytes2D() * depth; + /// Returns the size of this surface as an OpenGL texture in bytes + std::size_t SizeInBytesGL() const { + return SizeInBytesRaw(true); + } + + /// Returns the size of this surface as a cube face in bytes + std::size_t SizeInBytesCubeFace() const { + return size_in_bytes / 6; + } + + /// Returns the size of this surface as an OpenGL cube face in bytes + std::size_t SizeInBytesCubeFaceGL() const { + return size_in_bytes_gl / 6; } /// Creates SurfaceParams from a texture configuration @@ -769,8 +783,8 @@ struct SurfaceParams { // Parameters used for caching VAddr addr; - std::size_t size_in_bytes_total; - std::size_t size_in_bytes_2d; + std::size_t size_in_bytes; + std::size_t size_in_bytes_gl; // Render target specific parameters, not used in caching struct { @@ -812,7 +826,7 @@ public: } std::size_t GetSizeInBytes() const { - return params.size_in_bytes_total; + return params.size_in_bytes; } void Flush() { From 949d7832fafa33f7f553e2ca1139c1a16150984f Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 14 Oct 2018 16:09:01 -0400 Subject: [PATCH 10/16] gl_rasterizer_cache: Only flush when use_accurate_framebuffers is enabled. --- src/video_core/rasterizer_cache.h | 8 +++++++- src/video_core/renderer_opengl/gl_rasterizer.cpp | 7 ++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index 4a34491a9..76743a85b 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h @@ -11,6 +11,7 @@ #include "common/common_types.h" #include "core/core.h" +#include "core/settings.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" @@ -87,7 +88,12 @@ protected: void Unregister(const T& object) { auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer(); rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1); - object->Flush(); + + if (Settings::values.use_accurate_framebuffers) { + // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit + object->Flush(); + } + object_cache.subtract({GetInterval(object), ObjectSet{object}}); } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 18db07217..0485dfb7a 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -637,7 +637,12 @@ void RasterizerOpenGL::FlushAll() {} void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); - res_cache.FlushRegion(addr, size); + + if (Settings::values.use_accurate_framebuffers) { + // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit + res_cache.FlushRegion(addr, size); + } + shader_cache.FlushRegion(addr, size); buffer_cache.FlushRegion(addr, size); } From 0e59291310b324264c311e7b7e7e309f080bd232 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 15 Oct 2018 21:24:34 -0400 Subject: [PATCH 11/16] gl_rasterizer_cache: Refactor to only call GetRegionEnd on surface creation. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 32 +++++++++++-------- .../renderer_opengl/gl_rasterizer_cache.h | 7 ++-- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 4ba34ebc4..a1f541e75 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -62,12 +62,12 @@ static std::pair GetASTCBlockSize(PixelFormat format) { } } -void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr) { +void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; - const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; - const auto max_size{memory_manager.GetRegionEnd(gpu_addr) - gpu_addr}; + const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr_)}; addr = cpu_addr ? *cpu_addr : 0; + gpu_addr = gpu_addr_; size_in_bytes = SizeInBytesRaw(); if (IsPixelFormatASTC(pixel_format)) { @@ -76,15 +76,6 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr) { } else { size_in_bytes_gl = SizeInBytesGL(); } - - // Clamp size to mapped GPU memory region - // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000 - // R32F render buffer. We do not yet know if this is a game bug or something else, but this - // check is necessary to prevent flushing from overwriting unmapped memory. - if (size_in_bytes > max_size) { - LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", size_in_bytes, max_size); - size_in_bytes = max_size; - } } /*static*/ SurfaceParams SurfaceParams::CreateForTexture( @@ -719,7 +710,8 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface, } CachedSurface::CachedSurface(const SurfaceParams& params) - : params(params), gl_target(SurfaceTargetToGL(params.target)) { + : params(params), gl_target(SurfaceTargetToGL(params.target)), + cached_size_in_bytes(params.size_in_bytes) { texture.Create(); const auto& rect{params.GetRect()}; @@ -769,6 +761,18 @@ CachedSurface::CachedSurface(const SurfaceParams& params) VideoCore::LabelGLObject(GL_TEXTURE, texture.handle, params.addr, SurfaceParams::SurfaceTargetName(params.target)); + + // Clamp size to mapped GPU memory region + // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000 + // R32F render buffer. We do not yet know if this is a game bug or something else, but this + // check is necessary to prevent flushing from overwriting unmapped memory. + + auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; + const u64 max_size{memory_manager.GetRegionEnd(params.gpu_addr) - params.gpu_addr}; + if (cached_size_in_bytes > max_size) { + LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", params.size_in_bytes, max_size); + cached_size_in_bytes = max_size; + } } static void ConvertS8Z24ToZ24S8(std::vector& data, u32 width, u32 height, bool reverse) { @@ -912,7 +916,7 @@ void CachedSurface::FlushGLBuffer() { ASSERT_MSG(!IsPixelFormatASTC(params.pixel_format), "Unimplemented"); // OpenGL temporary buffer needs to be big enough to store raw texture size - gl_buffer.resize(params.size_in_bytes); + gl_buffer.resize(GetSizeInBytes()); const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type); // Ensure no bad interactions with GL_UNPACK_ALIGNMENT diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 843f18cea..39fd7cd75 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -783,6 +783,7 @@ struct SurfaceParams { // Parameters used for caching VAddr addr; + Tegra::GPUVAddr gpu_addr; std::size_t size_in_bytes; std::size_t size_in_bytes_gl; @@ -802,7 +803,8 @@ struct SurfaceReserveKey : Common::HashableStruct { static SurfaceReserveKey Create(const OpenGL::SurfaceParams& params) { SurfaceReserveKey res; res.state = params; - res.state.rt = {}; // Ignore rt config in caching + res.state.gpu_addr = {}; // Ignore GPU vaddr in caching + res.state.rt = {}; // Ignore rt config in caching return res; } }; @@ -826,7 +828,7 @@ public: } std::size_t GetSizeInBytes() const { - return params.size_in_bytes; + return cached_size_in_bytes; } void Flush() { @@ -865,6 +867,7 @@ private: std::vector gl_buffer; SurfaceParams params; GLenum gl_target; + std::size_t cached_size_in_bytes; bool dirty = false; }; From 91602de7f27e391c8e322a2670ef9d50a64f7517 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 16 Oct 2018 16:51:53 -0400 Subject: [PATCH 12/16] rasterizer_cache: Refactor to support in-order flushing. --- src/video_core/rasterizer_cache.h | 132 +++++++++++++----- .../renderer_opengl/gl_buffer_cache.h | 8 +- .../renderer_opengl/gl_rasterizer.cpp | 9 +- .../renderer_opengl/gl_rasterizer_cache.cpp | 3 +- .../renderer_opengl/gl_rasterizer_cache.h | 17 +-- .../renderer_opengl/gl_shader_cache.h | 10 +- 6 files changed, 116 insertions(+), 63 deletions(-) diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index 76743a85b..3ec01b967 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h @@ -15,45 +15,73 @@ #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" +class RasterizerCacheObject { +public: + /// Gets the address of the shader in guest memory, required for cache management + virtual VAddr GetAddr() const = 0; + + /// Gets the size of the shader in guest memory, required for cache management + virtual std::size_t GetSizeInBytes() const = 0; + + /// Wriets any cached resources back to memory + virtual void Flush() = 0; + + /// Sets whether the cached object should be considered registered + void SetIsRegistered(bool registered) { + is_registered = registered; + } + + /// Returns true if the cached object is registered + bool IsRegistered() const { + return is_registered; + } + + /// Returns true if the cached object is dirty + bool IsDirty() const { + return is_dirty; + } + + /// Returns ticks from when this cached object was last modified + u64 GetLastModifiedTicks() const { + return last_modified_ticks; + } + + /// Marks an object as recently modified, used to specify whether it is clean or dirty + template + void MarkAsModified(bool dirty, T& cache) { + is_dirty = dirty; + last_modified_ticks = cache.GetModifiedTicks(); + } + +private: + bool is_registered{}; ///< Whether the object is currently registered with the cache + bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) + u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing +}; + template class RasterizerCache : NonCopyable { + friend class RasterizerCacheObject; + public: - /// Write any cached resources overlapping the region back to memory (if dirty) + /// Write any cached resources overlapping the specified region back to memory void FlushRegion(Tegra::GPUVAddr addr, size_t size) { - if (size == 0) - return; - - const ObjectInterval interval{addr, addr + size}; - for (auto& pair : boost::make_iterator_range(object_cache.equal_range(interval))) { - for (auto& cached_object : pair.second) { - if (!cached_object) - continue; - - cached_object->Flush(); - } + const auto& objects{GetSortedObjectsFromRegion(addr, size)}; + for (auto& object : objects) { + FlushObject(object); } } /// Mark the specified region as being invalidated void InvalidateRegion(VAddr addr, u64 size) { - if (size == 0) - return; - - const ObjectInterval interval{addr, addr + size}; - for (auto& pair : boost::make_iterator_range(object_cache.equal_range(interval))) { - for (auto& cached_object : pair.second) { - if (!cached_object) - continue; - - remove_objects.emplace(cached_object); + const auto& objects{GetSortedObjectsFromRegion(addr, size)}; + for (auto& object : objects) { + if (!object->IsRegistered()) { + // Skip duplicates + continue; } + Unregister(object); } - - for (auto& remove_object : remove_objects) { - Unregister(remove_object); - } - - remove_objects.clear(); } /// Invalidates everything in the cache @@ -79,6 +107,7 @@ protected: /// Register an object into the cache void Register(const T& object) { + object->SetIsRegistered(true); object_cache.add({GetInterval(object), ObjectSet{object}}); auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer(); rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), 1); @@ -86,18 +115,57 @@ protected: /// Unregisters an object from the cache void Unregister(const T& object) { + object->SetIsRegistered(false); auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer(); rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1); + // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit if (Settings::values.use_accurate_framebuffers) { - // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit - object->Flush(); + FlushObject(object); } object_cache.subtract({GetInterval(object), ObjectSet{object}}); } + /// Returns a ticks counter used for tracking when cached objects were last modified + u64 GetModifiedTicks() { + return ++modified_ticks; + } + private: + /// Returns a list of cached objects from the specified memory region, ordered by access time + std::vector GetSortedObjectsFromRegion(VAddr addr, u64 size) { + if (size == 0) { + return {}; + } + + std::vector objects; + const ObjectInterval interval{addr, addr + size}; + for (auto& pair : boost::make_iterator_range(object_cache.equal_range(interval))) { + for (auto& cached_object : pair.second) { + if (!cached_object) { + continue; + } + objects.push_back(cached_object); + } + } + + std::sort(objects.begin(), objects.end(), [](const T& a, const T& b) -> bool { + return a->GetLastModifiedTicks() < b->GetLastModifiedTicks(); + }); + + return objects; + } + + /// Flushes the specified object, updating appropriate cache state as needed + void FlushObject(const T& object) { + if (!object->IsDirty()) { + return; + } + object->Flush(); + object->MarkAsModified(false, *this); + } + using ObjectSet = std::set; using ObjectCache = boost::icl::interval_map; using ObjectInterval = typename ObjectCache::interval_type; @@ -107,6 +175,6 @@ private: object->GetAddr() + object->GetSizeInBytes()); } - ObjectCache object_cache; - ObjectSet remove_objects; + ObjectCache object_cache; ///< Cache of objects + u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing }; diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index b389ca684..be29dc8be 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -15,17 +15,17 @@ namespace OpenGL { -struct CachedBufferEntry final { - VAddr GetAddr() const { +struct CachedBufferEntry final : public RasterizerCacheObject { + VAddr GetAddr() const override { return addr; } - std::size_t GetSizeInBytes() const { + std::size_t GetSizeInBytes() const override { return size; } // We do not have to flush this cache as things in it are never modified by us. - void Flush() {} + void Flush() override {} VAddr addr; std::size_t size; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 0485dfb7a..6ce183c25 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -428,7 +428,7 @@ void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_dep if (color_surface) { // Assume that a surface will be written to if it is used as a framebuffer, even if // the shader doesn't actually write to it. - color_surface->MarkAsDirty(); + color_surface->MarkAsModified(true, res_cache); } glFramebufferTexture2D( @@ -445,7 +445,7 @@ void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_dep if (color_surface) { // Assume that a surface will be written to if it is used as a framebuffer, even // if the shader doesn't actually write to it. - color_surface->MarkAsDirty(); + color_surface->MarkAsModified(true, res_cache); } buffers[index] = GL_COLOR_ATTACHMENT0 + regs.rt_control.GetMap(index); @@ -469,7 +469,7 @@ void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_dep if (depth_surface) { // Assume that a surface will be written to if it is used as a framebuffer, even if // the shader doesn't actually write to it. - depth_surface->MarkAsDirty(); + depth_surface->MarkAsModified(true, res_cache); if (regs.stencil_enable) { // Attach both depth and stencil @@ -642,9 +642,6 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit res_cache.FlushRegion(addr, size); } - - shader_cache.FlushRegion(addr, size); - buffer_cache.FlushRegion(addr, size); } void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index a1f541e75..f79b4f221 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -905,8 +905,6 @@ void CachedSurface::LoadGLBuffer() { } ConvertFormatAsNeeded_LoadGLBuffer(gl_buffer, params.pixel_format, params.width, params.height); - - dirty = false; } MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64)); @@ -1111,6 +1109,7 @@ Surface RasterizerCacheOpenGL::GetColorBufferSurface(std::size_t index, bool pre void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) { surface->LoadGLBuffer(); surface->UploadGLTexture(read_framebuffer.handle, draw_framebuffer.handle); + surface->MarkAsModified(false, *this); } Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 39fd7cd75..77d925250 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -819,28 +819,20 @@ struct hash { namespace OpenGL { -class CachedSurface final { +class CachedSurface final : public RasterizerCacheObject { public: CachedSurface(const SurfaceParams& params); - VAddr GetAddr() const { + VAddr GetAddr() const override { return params.addr; } - std::size_t GetSizeInBytes() const { + std::size_t GetSizeInBytes() const override { return cached_size_in_bytes; } - void Flush() { - // There is no need to flush the surface if it hasn't been modified by us. - if (!dirty) - return; + void Flush() override { FlushGLBuffer(); - dirty = false; - } - - void MarkAsDirty() { - dirty = true; } const OGLTexture& Texture() const { @@ -868,7 +860,6 @@ private: SurfaceParams params; GLenum gl_target; std::size_t cached_size_in_bytes; - bool dirty = false; }; class RasterizerCacheOpenGL final : public RasterizerCache { diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index d9157ec3c..a210f1731 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -19,22 +19,20 @@ class CachedShader; using Shader = std::shared_ptr; using Maxwell = Tegra::Engines::Maxwell3D::Regs; -class CachedShader final { +class CachedShader final : public RasterizerCacheObject { public: CachedShader(VAddr addr, Maxwell::ShaderProgram program_type); - /// Gets the address of the shader in guest memory, required for cache management - VAddr GetAddr() const { + VAddr GetAddr() const override { return addr; } - /// Gets the size of the shader in guest memory, required for cache management - std::size_t GetSizeInBytes() const { + std::size_t GetSizeInBytes() const override { return GLShader::MAX_PROGRAM_CODE_LENGTH * sizeof(u64); } // We do not have to flush this cache as things in it are never modified by us. - void Flush() {} + void Flush() override {} /// Gets the shader entries for the shader const GLShader::ShaderEntries& GetShaderEntries() const { From ee7c2dbf5a9086a006e713fcc9eff43f18cafcc9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 16 Oct 2018 17:02:29 -0400 Subject: [PATCH 13/16] config: Rename use_accurate_framebuffers -> use_accurate_gpu_emulation. - This will be used as a catch-all for slow-but-accurate GPU emulation paths. --- src/core/settings.h | 2 +- src/core/telemetry_session.cpp | 4 ++-- src/video_core/rasterizer_cache.h | 4 ++-- src/video_core/renderer_opengl/gl_rasterizer.cpp | 4 ++-- src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | 4 ++-- src/yuzu/configuration/config.cpp | 6 +++--- src/yuzu/configuration/configure_graphics.cpp | 4 ++-- src/yuzu/configuration/configure_graphics.ui | 4 ++-- src/yuzu_cmd/config.cpp | 4 ++-- src/yuzu_cmd/default_ini.h | 4 ++-- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/core/settings.h b/src/core/settings.h index 83b9a04c8..8f2da01c8 100644 --- a/src/core/settings.h +++ b/src/core/settings.h @@ -136,7 +136,7 @@ struct Values { float resolution_factor; bool use_frame_limit; u16 frame_limit; - bool use_accurate_framebuffers; + bool use_accurate_gpu_emulation; float bg_red; float bg_green; diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index 7b04792b5..0de13edd3 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp @@ -163,8 +163,8 @@ TelemetrySession::TelemetrySession() { AddField(Telemetry::FieldType::UserConfig, "Renderer_UseFrameLimit", Settings::values.use_frame_limit); AddField(Telemetry::FieldType::UserConfig, "Renderer_FrameLimit", Settings::values.frame_limit); - AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAccurateFramebuffers", - Settings::values.use_accurate_framebuffers); + AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAccurateGpuEmulation", + Settings::values.use_accurate_gpu_emulation); AddField(Telemetry::FieldType::UserConfig, "System_UseDockedMode", Settings::values.use_docked_mode); } diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index 3ec01b967..0a3b3951e 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h @@ -119,8 +119,8 @@ protected: auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer(); rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1); - // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit - if (Settings::values.use_accurate_framebuffers) { + // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit + if (Settings::values.use_accurate_gpu_emulation) { FlushObject(object); } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 6ce183c25..468253033 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -638,8 +638,8 @@ void RasterizerOpenGL::FlushAll() {} void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); - if (Settings::values.use_accurate_framebuffers) { - // Only flush if use_accurate_framebuffers is enabled, as it incurs a performance hit + if (Settings::values.use_accurate_gpu_emulation) { + // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit res_cache.FlushRegion(addr, size); } } diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index f79b4f221..1fddc1c26 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -1197,11 +1197,11 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, // If the format is the same, just do a framebuffer blit. This is significantly faster than // using PBOs. The is also likely less accurate, as textures will be converted rather than - // reinterpreted. When use_accurate_framebuffers setting is enabled, perform a more accurate + // reinterpreted. When use_accurate_gpu_emulation setting is enabled, perform a more accurate // surface copy, where pixels are reinterpreted as a new format (without conversion). This // code path uses OpenGL PBOs and is quite slow. const bool is_blit{old_params.pixel_format == new_params.pixel_format || - !Settings::values.use_accurate_framebuffers}; + !Settings::values.use_accurate_gpu_emulation}; switch (new_params.target) { case SurfaceParams::SurfaceTarget::Texture2D: diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 7fec15991..71c6ebb41 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -85,8 +85,8 @@ void Config::ReadValues() { Settings::values.resolution_factor = qt_config->value("resolution_factor", 1.0).toFloat(); Settings::values.use_frame_limit = qt_config->value("use_frame_limit", true).toBool(); Settings::values.frame_limit = qt_config->value("frame_limit", 100).toInt(); - Settings::values.use_accurate_framebuffers = - qt_config->value("use_accurate_framebuffers", false).toBool(); + Settings::values.use_accurate_gpu_emulation = + qt_config->value("use_accurate_gpu_emulation", false).toBool(); Settings::values.bg_red = qt_config->value("bg_red", 0.0).toFloat(); Settings::values.bg_green = qt_config->value("bg_green", 0.0).toFloat(); @@ -233,7 +233,7 @@ void Config::SaveValues() { qt_config->setValue("resolution_factor", (double)Settings::values.resolution_factor); qt_config->setValue("use_frame_limit", Settings::values.use_frame_limit); qt_config->setValue("frame_limit", Settings::values.frame_limit); - qt_config->setValue("use_accurate_framebuffers", Settings::values.use_accurate_framebuffers); + qt_config->setValue("use_accurate_gpu_emulation", Settings::values.use_accurate_gpu_emulation); // Cast to double because Qt's written float values are not human-readable qt_config->setValue("bg_red", (double)Settings::values.bg_red); diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp index cd1549462..8290b4384 100644 --- a/src/yuzu/configuration/configure_graphics.cpp +++ b/src/yuzu/configuration/configure_graphics.cpp @@ -75,7 +75,7 @@ void ConfigureGraphics::setConfiguration() { static_cast(FromResolutionFactor(Settings::values.resolution_factor))); ui->toggle_frame_limit->setChecked(Settings::values.use_frame_limit); ui->frame_limit->setValue(Settings::values.frame_limit); - ui->use_accurate_framebuffers->setChecked(Settings::values.use_accurate_framebuffers); + ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation); bg_color = QColor::fromRgbF(Settings::values.bg_red, Settings::values.bg_green, Settings::values.bg_blue); ui->bg_button->setStyleSheet( @@ -87,7 +87,7 @@ void ConfigureGraphics::applyConfiguration() { ToResolutionFactor(static_cast(ui->resolution_factor_combobox->currentIndex())); Settings::values.use_frame_limit = ui->toggle_frame_limit->isChecked(); Settings::values.frame_limit = ui->frame_limit->value(); - Settings::values.use_accurate_framebuffers = ui->use_accurate_framebuffers->isChecked(); + Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked(); Settings::values.bg_red = static_cast(bg_color.redF()); Settings::values.bg_green = static_cast(bg_color.greenF()); Settings::values.bg_blue = static_cast(bg_color.blueF()); diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui index 8fc00af1b..91fcad994 100644 --- a/src/yuzu/configuration/configure_graphics.ui +++ b/src/yuzu/configuration/configure_graphics.ui @@ -50,9 +50,9 @@ - + - Use accurate framebuffers (slow) + Use accurate GPU emulation (slow) diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp index 2470f4640..5e42e48b2 100644 --- a/src/yuzu_cmd/config.cpp +++ b/src/yuzu_cmd/config.cpp @@ -99,8 +99,8 @@ void Config::ReadValues() { Settings::values.use_frame_limit = sdl2_config->GetBoolean("Renderer", "use_frame_limit", true); Settings::values.frame_limit = static_cast(sdl2_config->GetInteger("Renderer", "frame_limit", 100)); - Settings::values.use_accurate_framebuffers = - sdl2_config->GetBoolean("Renderer", "use_accurate_framebuffers", false); + Settings::values.use_accurate_gpu_emulation = + sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false); Settings::values.bg_red = (float)sdl2_config->GetReal("Renderer", "bg_red", 0.0); Settings::values.bg_green = (float)sdl2_config->GetReal("Renderer", "bg_green", 0.0); diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h index 762396e3b..a97b75f7b 100644 --- a/src/yuzu_cmd/default_ini.h +++ b/src/yuzu_cmd/default_ini.h @@ -110,9 +110,9 @@ use_frame_limit = # 1 - 9999: Speed limit as a percentage of target game speed. 100 (default) frame_limit = -# Whether to use accurate framebuffers +# Whether to use accurate GPU emulation # 0 (default): Off (fast), 1 : On (slow) -use_accurate_framebuffers = +use_accurate_gpu_emulation = # The clear color for the renderer. What shows up on the sides of the bottom screen. # Must be in range of 0.0-1.0. Defaults to 1.0 for all. From 43b9494a0fc0abb688f97a636fc80ede5f76ae08 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 16 Oct 2018 17:20:49 -0400 Subject: [PATCH 14/16] gl_rasterizer_cache: Use AccurateCopySurface for use_accurate_gpu_emulation. --- .../renderer_opengl/gl_rasterizer_cache.cpp | 17 +++++++++++++++-- .../renderer_opengl/gl_rasterizer_cache.h | 3 +++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 1fddc1c26..0456472fd 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -1178,6 +1178,14 @@ void RasterizerCacheOpenGL::FermiCopySurface( FastCopySurface(GetSurface(src_params, true), GetSurface(dst_params, false)); } +void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface, + const Surface& dst_surface) { + const auto& src_params{src_surface->GetSurfaceParams()}; + const auto& dst_params{dst_surface->GetSurfaceParams()}; + FlushRegion(src_params.addr, dst_params.size_in_bytes); + LoadSurface(dst_surface); +} + Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, const SurfaceParams& new_params) { // Verify surface is compatible for blitting @@ -1186,6 +1194,12 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, // Get a new surface with the new parameters, and blit the previous surface to it Surface new_surface{GetUncachedSurface(new_params)}; + // With use_accurate_gpu_emulation enabled, do an accurate surface copy + if (Settings::values.use_accurate_gpu_emulation) { + AccurateCopySurface(old_surface, new_surface); + return new_surface; + } + // For compatible surfaces, we can just do fast glCopyImageSubData based copy if (old_params.target == new_params.target && old_params.type == new_params.type && old_params.depth == new_params.depth && old_params.depth == 1 && @@ -1200,8 +1214,7 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, // reinterpreted. When use_accurate_gpu_emulation setting is enabled, perform a more accurate // surface copy, where pixels are reinterpreted as a new format (without conversion). This // code path uses OpenGL PBOs and is quite slow. - const bool is_blit{old_params.pixel_format == new_params.pixel_format || - !Settings::values.use_accurate_gpu_emulation}; + const bool is_blit{old_params.pixel_format == new_params.pixel_format}; switch (new_params.target) { case SurfaceParams::SurfaceTarget::Texture2D: diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 77d925250..7c1cb72d0 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -899,6 +899,9 @@ private: /// Tries to get a reserved surface for the specified parameters Surface TryGetReservedSurface(const SurfaceParams& params); + /// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data + void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface); + /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have /// previously been used. This is to prevent surfaces from being constantly created and /// destroyed when used with different surface parameters. From 2a035a1f6f6e834adc639d51884ea36dde5a3445 Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 17 Oct 2018 18:19:35 -0400 Subject: [PATCH 15/16] gl_rasterizer_cache: Remove unnecessary temporary buffer with unswizzle. --- src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 0456472fd..8a0ed964a 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -354,12 +354,9 @@ void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 d const std::size_t size_to_copy{std::min(gl_buffer_size, data.size())}; memcpy(gl_buffer, data.data(), size_to_copy); } else { - std::vector data(gl_buffer_size); Tegra::Texture::CopySwizzledData(stride / tile_size, height / tile_size, depth, - bytes_per_pixel, bytes_per_pixel, data.data(), gl_buffer, - false, block_height, block_depth); - const std::size_t size_to_copy{std::min(gl_buffer_size, data.size())}; - memcpy(Memory::GetPointer(addr), data.data(), size_to_copy); + bytes_per_pixel, bytes_per_pixel, Memory::GetPointer(addr), + gl_buffer, false, block_height, block_depth); } } From 648b55c6b90347c0bf823c095abd8de5ba42fdc6 Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 17 Oct 2018 18:20:15 -0400 Subject: [PATCH 16/16] gl_rasterizer_cache: Remove unnecessary block_depth=1 on Flush. --- src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 8a0ed964a..1cb77aaf2 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -937,7 +937,6 @@ void CachedSurface::FlushGLBuffer() { if (params.target == SurfaceParams::SurfaceTarget::Texture2D) { // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented. depth = 1U; - block_depth = 1U; } gl_to_morton_fns[static_cast(params.pixel_format)]( params.width, params.block_height, params.height, block_depth, depth, gl_buffer.data(),