yuzu/src/video_core/query_cache.h

482 lines
17 KiB
C++
Raw Normal View History

// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <algorithm>
#include <array>
#include <cstring>
2023-04-23 22:47:05 +01:00
#include <functional>
#include <iterator>
#include <list>
#include <memory>
#include <mutex>
#include <optional>
#include <unordered_map>
2020-04-15 21:36:14 +01:00
#include <unordered_set>
#include <vector>
#include "common/assert.h"
#include "common/settings.h"
2023-04-23 20:55:16 +01:00
#include "core/memory.h"
#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
2023-04-23 20:55:16 +01:00
#include "video_core/texture_cache/slot_vector.h"
namespace VideoCommon {
2023-04-23 20:55:16 +01:00
using AsyncJobId = SlotId;
static constexpr AsyncJobId NULL_ASYNC_JOB_ID{0};
template <class QueryCache, class HostCounter>
class CounterStreamBase {
public:
explicit CounterStreamBase(QueryCache& cache_, VideoCore::QueryType type_)
: cache{cache_}, type{type_} {}
/// Updates the state of the stream, enabling or disabling as needed.
void Update(bool enabled) {
if (enabled) {
Enable();
} else {
Disable();
}
}
/// Resets the stream to zero. It doesn't disable the query after resetting.
void Reset() {
if (current) {
current->EndQuery();
// Immediately start a new query to avoid disabling its state.
current = cache.Counter(nullptr, type);
}
last = nullptr;
}
/// Returns the current counter slicing as needed.
std::shared_ptr<HostCounter> Current() {
if (!current) {
return nullptr;
}
current->EndQuery();
last = std::move(current);
current = cache.Counter(last, type);
return last;
}
/// Returns true when the counter stream is enabled.
bool IsEnabled() const {
2020-02-14 00:11:21 +00:00
return current != nullptr;
}
private:
/// Enables the stream.
void Enable() {
if (current) {
return;
}
current = cache.Counter(last, type);
}
// Disables the stream.
void Disable() {
if (current) {
current->EndQuery();
}
last = std::exchange(current, nullptr);
}
QueryCache& cache;
const VideoCore::QueryType type;
std::shared_ptr<HostCounter> current;
std::shared_ptr<HostCounter> last;
};
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
2023-04-23 20:55:16 +01:00
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
Core::Memory::Memory& cpu_memory_)
2023-04-23 22:47:05 +01:00
: rasterizer{rasterizer_},
cpu_memory{cpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
VideoCore::QueryType::SamplesPassed}}} {
(void)slot_async_jobs.insert(); // Null value
2023-04-23 20:55:16 +01:00
}
void InvalidateRegion(VAddr addr, std::size_t size) {
std::unique_lock lock{mutex};
FlushAndRemoveRegion(addr, size);
}
void FlushRegion(VAddr addr, std::size_t size) {
std::unique_lock lock{mutex};
FlushAndRemoveRegion(addr, size);
}
/**
* Records a query in GPU mapped memory, potentially marked with a timestamp.
* @param gpu_addr GPU address to flush to when the mapped memory is read.
* @param type Query type, e.g. SamplesPassed.
* @param timestamp Timestamp, when empty the flushed query is assumed to be short.
*/
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
std::unique_lock lock{mutex};
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
CachedQuery* query = TryGet(*cpu_addr);
if (!query) {
ASSERT_OR_EXECUTE(cpu_addr, return;);
u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
}
2023-04-23 22:47:05 +01:00
auto result = query->BindCounter(Stream(type).Current(), timestamp);
2023-04-23 20:55:16 +01:00
if (result) {
auto async_job_id = query->GetAsyncJob();
auto& async_job = slot_async_jobs[async_job_id];
async_job.collected = true;
async_job.value = *result;
query->SetAsyncJob(NULL_ASYNC_JOB_ID);
}
2023-04-23 20:55:16 +01:00
AsyncFlushQuery(query, timestamp, lock);
}
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
void UpdateCounters() {
std::unique_lock lock{mutex};
if (maxwell3d) {
const auto& regs = maxwell3d->regs;
2022-08-12 10:58:09 +01:00
Stream(VideoCore::QueryType::SamplesPassed).Update(regs.zpass_pixel_count_enable);
}
}
/// Resets a counter to zero. It doesn't disable the query after resetting.
void ResetCounter(VideoCore::QueryType type) {
std::unique_lock lock{mutex};
Stream(type).Reset();
}
/// Disable all active streams. Expected to be called at the end of a command buffer.
void DisableStreams() {
std::unique_lock lock{mutex};
for (auto& stream : streams) {
stream.Update(false);
}
}
/// Returns a new host counter.
std::shared_ptr<HostCounter> Counter(std::shared_ptr<HostCounter> dependency,
VideoCore::QueryType type) {
return std::make_shared<HostCounter>(static_cast<QueryCache&>(*this), std::move(dependency),
type);
}
/// Returns the counter stream of the specified type.
CounterStream& Stream(VideoCore::QueryType type) {
return streams[static_cast<std::size_t>(type)];
}
2020-02-14 00:11:21 +00:00
/// Returns the counter stream of the specified type.
const CounterStream& Stream(VideoCore::QueryType type) const {
return streams[static_cast<std::size_t>(type)];
}
2020-04-15 21:36:14 +01:00
void CommitAsyncFlushes() {
std::unique_lock lock{mutex};
2020-04-16 17:29:53 +01:00
committed_flushes.push_back(uncommitted_flushes);
uncommitted_flushes.reset();
2020-04-15 21:36:14 +01:00
}
2020-04-16 17:29:53 +01:00
bool HasUncommittedFlushes() const {
std::unique_lock lock{mutex};
2020-04-16 17:29:53 +01:00
return uncommitted_flushes != nullptr;
2020-04-15 21:36:14 +01:00
}
2020-04-16 17:29:53 +01:00
bool ShouldWaitAsyncFlushes() const {
std::unique_lock lock{mutex};
2020-04-16 17:29:53 +01:00
if (committed_flushes.empty()) {
2020-04-15 21:36:14 +01:00
return false;
}
2020-04-16 17:29:53 +01:00
return committed_flushes.front() != nullptr;
2020-04-15 21:36:14 +01:00
}
void PopAsyncFlushes() {
std::unique_lock lock{mutex};
2020-04-16 17:29:53 +01:00
if (committed_flushes.empty()) {
2020-04-15 21:36:14 +01:00
return;
}
2020-04-16 17:29:53 +01:00
auto& flush_list = committed_flushes.front();
2020-04-15 21:36:14 +01:00
if (!flush_list) {
2020-04-16 17:29:53 +01:00
committed_flushes.pop_front();
2020-04-15 21:36:14 +01:00
return;
}
2023-04-23 20:55:16 +01:00
for (AsyncJobId async_job_id : *flush_list) {
AsyncJob& async_job = slot_async_jobs[async_job_id];
if (!async_job.collected) {
FlushAndRemoveRegion(async_job.query_location, 2, true);
}
2020-04-15 21:36:14 +01:00
}
2020-04-16 17:29:53 +01:00
committed_flushes.pop_front();
2020-04-15 21:36:14 +01:00
}
private:
2023-04-23 20:55:16 +01:00
struct AsyncJob {
bool collected = false;
u64 value = 0;
VAddr query_location = 0;
std::optional<u64> timestamp{};
};
/// Flushes a memory range to guest memory and removes it from the cache.
2023-04-23 20:55:16 +01:00
void FlushAndRemoveRegion(VAddr addr, std::size_t size, bool async = false) {
const u64 addr_begin = addr;
const u64 addr_end = addr_begin + size;
const auto in_range = [addr_begin, addr_end](const CachedQuery& query) {
const u64 cache_begin = query.GetCpuAddr();
const u64 cache_end = cache_begin + query.SizeInBytes();
return cache_begin < addr_end && addr_begin < cache_end;
};
const u64 page_end = addr_end >> YUZU_PAGEBITS;
for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) {
const auto& it = cached_queries.find(page);
if (it == std::end(cached_queries)) {
continue;
}
auto& contents = it->second;
for (auto& query : contents) {
if (!in_range(query)) {
continue;
}
2023-04-23 20:55:16 +01:00
AsyncJobId async_job_id = query.GetAsyncJob();
auto flush_result = query.Flush(async);
if (async_job_id == NULL_ASYNC_JOB_ID) {
ASSERT_MSG(false, "This should not be reachable at all");
continue;
}
AsyncJob& async_job = slot_async_jobs[async_job_id];
async_job.collected = true;
async_job.value = flush_result;
query.SetAsyncJob(NULL_ASYNC_JOB_ID);
}
std::erase_if(contents, in_range);
}
}
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
host_ptr);
}
/// Tries to a get a cached query. Returns nullptr on failure.
CachedQuery* TryGet(VAddr addr) {
const u64 page = static_cast<u64>(addr) >> YUZU_PAGEBITS;
const auto it = cached_queries.find(page);
if (it == std::end(cached_queries)) {
return nullptr;
}
auto& contents = it->second;
2020-04-06 00:26:15 +01:00
const auto found = std::find_if(std::begin(contents), std::end(contents),
[addr](auto& query) { return query.GetCpuAddr() == addr; });
return found != std::end(contents) ? &*found : nullptr;
}
2023-04-23 20:55:16 +01:00
void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
std::unique_lock<std::recursive_mutex>& lock) {
const AsyncJobId new_async_job_id = slot_async_jobs.insert();
2023-04-23 22:47:05 +01:00
{
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
query->SetAsyncJob(new_async_job_id);
async_job.query_location = query->GetCpuAddr();
async_job.collected = false;
2023-04-23 20:55:16 +01:00
2023-04-23 22:47:05 +01:00
if (!uncommitted_flushes) {
uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
}
uncommitted_flushes->push_back(new_async_job_id);
2020-04-15 21:36:14 +01:00
}
2023-04-23 20:55:16 +01:00
lock.unlock();
std::function<void()> operation([this, new_async_job_id, timestamp] {
std::unique_lock local_lock{mutex};
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
2023-04-28 22:53:46 +01:00
u64 value = async_job.value;
VAddr address = async_job.query_location;
slot_async_jobs.erase(new_async_job_id);
local_lock.unlock();
2023-04-23 20:55:16 +01:00
if (timestamp) {
u64 timestamp_value = *timestamp;
2023-04-28 22:53:46 +01:00
cpu_memory.WriteBlockUnsafe(address + sizeof(u64), &timestamp_value, sizeof(u64));
cpu_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
rasterizer.InvalidateRegion(address, sizeof(u64) * 2,
VideoCommon::CacheType::NoQueryCache);
2023-04-23 20:55:16 +01:00
} else {
2023-04-28 22:53:46 +01:00
u32 small_value = static_cast<u32>(value);
cpu_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
rasterizer.InvalidateRegion(address, sizeof(u32),
VideoCommon::CacheType::NoQueryCache);
2023-04-23 20:55:16 +01:00
}
});
rasterizer.SyncOperation(std::move(operation));
2020-04-15 21:36:14 +01:00
}
static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
static constexpr unsigned YUZU_PAGEBITS = 12;
2023-04-23 20:55:16 +01:00
SlotVector<AsyncJob> slot_async_jobs;
VideoCore::RasterizerInterface& rasterizer;
2023-04-23 20:55:16 +01:00
Core::Memory::Memory& cpu_memory;
mutable std::recursive_mutex mutex;
std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
2020-04-15 21:36:14 +01:00
2023-04-23 20:55:16 +01:00
std::shared_ptr<std::vector<AsyncJobId>> uncommitted_flushes{};
std::list<std::shared_ptr<std::vector<AsyncJobId>>> committed_flushes;
};
template <class QueryCache, class HostCounter>
class HostCounterBase {
public:
explicit HostCounterBase(std::shared_ptr<HostCounter> dependency_)
: dependency{std::move(dependency_)}, depth{dependency ? (dependency->Depth() + 1) : 0} {
// Avoid nesting too many dependencies to avoid a stack overflow when these are deleted.
2020-02-14 00:11:21 +00:00
constexpr u64 depth_threshold = 96;
if (depth > depth_threshold) {
depth = 0;
base_result = dependency->Query();
dependency = nullptr;
}
}
2020-02-14 00:11:21 +00:00
virtual ~HostCounterBase() = default;
/// Returns the current value of the query.
2023-04-23 20:55:16 +01:00
u64 Query(bool async = false) {
if (result) {
return *result;
}
2023-04-23 20:55:16 +01:00
u64 value = BlockingQuery(async) + base_result;
if (dependency) {
value += dependency->Query();
dependency = nullptr;
}
2020-02-14 00:11:21 +00:00
result = value;
return *result;
}
/// Returns true when flushing this query will potentially wait.
bool WaitPending() const noexcept {
return result.has_value();
}
u64 Depth() const noexcept {
return depth;
}
protected:
/// Returns the value of query from the backend API blocking as needed.
2023-04-23 20:55:16 +01:00
virtual u64 BlockingQuery(bool async = false) const = 0;
private:
std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value.
std::optional<u64> result; ///< Filled with the already returned value.
u64 depth; ///< Number of nested dependencies.
u64 base_result = 0; ///< Equivalent to nested dependencies value.
};
template <class HostCounter>
class CachedQueryBase {
public:
explicit CachedQueryBase(VAddr cpu_addr_, u8* host_ptr_)
: cpu_addr{cpu_addr_}, host_ptr{host_ptr_} {}
2020-02-14 00:11:21 +00:00
virtual ~CachedQueryBase() = default;
2020-02-14 00:11:21 +00:00
CachedQueryBase(CachedQueryBase&&) noexcept = default;
CachedQueryBase(const CachedQueryBase&) = delete;
2020-02-14 00:11:21 +00:00
CachedQueryBase& operator=(CachedQueryBase&&) noexcept = default;
CachedQueryBase& operator=(const CachedQueryBase&) = delete;
/// Flushes the query to guest memory.
2023-04-23 20:55:16 +01:00
virtual u64 Flush(bool async = false) {
2023-03-12 03:10:38 +00:00
// When counter is nullptr it means that it's just been reset. We are supposed to write a
// zero in these cases.
2023-04-23 20:55:16 +01:00
const u64 value = counter ? counter->Query(async) : 0;
2023-04-23 22:47:05 +01:00
if (async) {
return value;
}
std::memcpy(host_ptr, &value, sizeof(u64));
if (timestamp) {
std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
}
2023-04-23 20:55:16 +01:00
return value;
}
/// Binds a counter to this query.
2023-04-23 22:47:05 +01:00
std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_,
std::optional<u64> timestamp_) {
2023-04-23 20:55:16 +01:00
std::optional<u64> result{};
if (counter) {
// If there's an old counter set it means the query is being rewritten by the game.
// To avoid losing the data forever, flush here.
2023-04-23 20:55:16 +01:00
result = std::make_optional(Flush());
}
counter = std::move(counter_);
2023-04-23 22:47:05 +01:00
timestamp = timestamp_;
2023-04-23 20:55:16 +01:00
return result;
}
VAddr GetCpuAddr() const noexcept {
return cpu_addr;
}
u64 SizeInBytes() const noexcept {
return SizeInBytes(timestamp.has_value());
}
2020-02-14 00:11:21 +00:00
static constexpr u64 SizeInBytes(bool with_timestamp) noexcept {
return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE;
}
2023-04-23 20:55:16 +01:00
void SetAsyncJob(AsyncJobId assigned_async_job_) {
assigned_async_job = assigned_async_job_;
}
AsyncJobId GetAsyncJob() const {
return assigned_async_job;
}
protected:
/// Returns true when querying the counter may potentially block.
bool WaitPending() const noexcept {
return counter && counter->WaitPending();
}
private:
static constexpr std::size_t SMALL_QUERY_SIZE = 8; // Query size without timestamp.
static constexpr std::size_t LARGE_QUERY_SIZE = 16; // Query size with timestamp.
static constexpr std::intptr_t TIMESTAMP_OFFSET = 8; // Timestamp offset in a large query.
VAddr cpu_addr; ///< Guest CPU address.
u8* host_ptr; ///< Writable host pointer.
std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree.
std::optional<u64> timestamp; ///< Timestamp to flush to guest memory.
2023-04-23 20:55:16 +01:00
AsyncJobId assigned_async_job;
};
} // namespace VideoCommon