Merge pull request #11806 from liamwhite/needs-more-locking

renderer_vulkan: add locks to avoid scheduler flushes from CPU
This commit is contained in:
liamwhite 2023-10-20 10:26:03 -04:00 committed by GitHub
commit b56c7397ad
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 23 additions and 10 deletions

View file

@ -86,7 +86,10 @@ public:
uncommitted_operations.emplace_back(std::move(func));
}
pending_operations.emplace_back(std::move(uncommitted_operations));
QueueFence(new_fence);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
QueueFence(new_fence);
}
if (!delay_fence) {
func();
}

View file

@ -132,12 +132,16 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
const bool use_accelerated =
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
const bool is_srgb = use_accelerated && screen_info.is_srgb;
RenderScreenshot(*framebuffer, use_accelerated);
Frame* frame = present_manager.GetRenderFrame();
blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
scheduler.Flush(*frame->render_ready);
present_manager.Present(frame);
{
std::scoped_lock lock{rasterizer.LockCaches()};
RenderScreenshot(*framebuffer, use_accelerated);
Frame* frame = present_manager.GetRenderFrame();
blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
scheduler.Flush(*frame->render_ready);
present_manager.Present(frame);
}
gpu.RendererFrameEndNotify();
rasterizer.TickFrame();

View file

@ -198,7 +198,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
if (!pipeline) {
return;
}
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
// update engine as channel may be different.
pipeline->SetEngine(maxwell3d, gpu_memory);
pipeline->Configure(is_indexed);
@ -708,6 +708,7 @@ void RasterizerVulkan::TiledCacheBarrier() {
}
void RasterizerVulkan::FlushCommands() {
std::scoped_lock lock{LockCaches()};
if (draw_counter == 0) {
return;
}
@ -805,6 +806,7 @@ void RasterizerVulkan::FlushWork() {
if ((++draw_counter & 7) != 7) {
return;
}
std::scoped_lock lock{LockCaches()};
if (draw_counter < DRAWS_TO_DISPATCH) {
// Send recorded tasks to the worker thread
scheduler.DispatchWork();
@ -1499,7 +1501,7 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
CreateChannel(channel);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
texture_cache.CreateChannel(channel);
buffer_cache.CreateChannel(channel);
}
@ -1512,7 +1514,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
const s32 channel_id = channel.bind_id;
BindToChannel(channel_id);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
texture_cache.BindToChannel(channel_id);
buffer_cache.BindToChannel(channel_id);
}
@ -1525,7 +1527,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
EraseChannel(channel_id);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
texture_cache.EraseChannel(channel_id);
buffer_cache.EraseChannel(channel_id);
}

View file

@ -133,6 +133,10 @@ public:
void ReleaseChannel(s32 channel_id) override;
std::scoped_lock<std::recursive_mutex, std::recursive_mutex> LockCaches() {
return std::scoped_lock{buffer_cache.mutex, texture_cache.mutex};
}
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;