diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 27bf9ece7..817fa07a8 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -798,12 +798,58 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface, // Verify surface is compatible for blitting const auto& params{surface->GetSurfaceParams()}; ASSERT(params.type == new_params.type); + ASSERT_MSG(params.GetCompressionFactor(params.pixel_format) == 1, + "Compressed texture reinterpretation is not supported"); // Create a new surface with the new parameters, and blit the previous surface to it Surface new_surface{std::make_shared(new_params)}; - BlitTextures(surface->Texture().handle, params.GetRect(), new_surface->Texture().handle, - new_surface->GetSurfaceParams().GetRect(), params.type, read_framebuffer.handle, - draw_framebuffer.handle); + + auto source_format = GetFormatTuple(params.pixel_format, params.component_type); + auto dest_format = GetFormatTuple(new_params.pixel_format, new_params.component_type); + + size_t buffer_size = std::max(params.SizeInBytes(), new_params.SizeInBytes()); + + // Use a Pixel Buffer Object to download the previous texture and then upload it to the new one + // using the new format. + OGLBuffer pbo; + pbo.Create(); + + glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo.handle); + glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB); + glGetTextureImage(surface->Texture().handle, 0, source_format.format, source_format.type, + params.SizeInBytes(), nullptr); + + // If the new texture is bigger than the previous one, we need to fill in the rest with data + // from the CPU. + if (params.SizeInBytes() < new_params.SizeInBytes()) { + // Upload the rest of the memory. + if (new_params.is_tiled) { + // TODO(Subv): We might have to de-tile the subtexture and re-tile it with the rest of + // the data in this case. Games like Super Mario Odyssey seem to hit this case when + // drawing, it re-uses the memory of a previous texture as a bigger framebuffer but it + // doesn't clear it beforehand, the texture is already full of zeros. + LOG_CRITICAL(HW_GPU, "Trying to upload extra texture data from the CPU during " + "reinterpretation but the texture is tiled."); + } + size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes(); + auto address = Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress( + new_params.addr + params.SizeInBytes()); + std::vector data(remaining_size); + Memory::ReadBlock(*address, data.data(), data.size()); + glBufferSubData(GL_PIXEL_PACK_BUFFER, params.SizeInBytes(), remaining_size, data.data()); + } + + glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); + + const auto& dest_rect{new_params.GetRect()}; + + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo.handle); + glTextureSubImage2D( + new_surface->Texture().handle, 0, 0, 0, static_cast(dest_rect.GetWidth()), + static_cast(dest_rect.GetHeight()), dest_format.format, dest_format.type, nullptr); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); + + pbo.Release(); // Update cache accordingly UnregisterSurface(surface);