2022-04-23 09:59:50 +01:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2020-02-11 19:02:41 +00:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <array>
|
|
|
|
#include <cstring>
|
2023-04-23 22:47:05 +01:00
|
|
|
#include <functional>
|
2020-02-11 19:02:41 +00:00
|
|
|
#include <iterator>
|
2021-10-01 05:57:02 +01:00
|
|
|
#include <list>
|
2020-02-11 19:02:41 +00:00
|
|
|
#include <memory>
|
2020-02-13 17:28:22 +00:00
|
|
|
#include <mutex>
|
2020-02-11 19:02:41 +00:00
|
|
|
#include <optional>
|
|
|
|
#include <unordered_map>
|
2020-04-15 21:36:14 +01:00
|
|
|
#include <unordered_set>
|
2020-02-11 19:02:41 +00:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
2021-04-15 00:07:40 +01:00
|
|
|
#include "common/settings.h"
|
2023-04-23 20:55:16 +01:00
|
|
|
#include "core/memory.h"
|
2021-11-05 14:52:31 +00:00
|
|
|
#include "video_core/control/channel_state_cache.h"
|
2020-02-11 19:02:41 +00:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
|
|
|
#include "video_core/memory_manager.h"
|
|
|
|
#include "video_core/rasterizer_interface.h"
|
2023-04-23 20:55:16 +01:00
|
|
|
#include "video_core/texture_cache/slot_vector.h"
|
2020-02-11 19:02:41 +00:00
|
|
|
|
|
|
|
namespace VideoCommon {
|
|
|
|
|
2023-04-23 20:55:16 +01:00
|
|
|
using AsyncJobId = SlotId;
|
|
|
|
|
|
|
|
static constexpr AsyncJobId NULL_ASYNC_JOB_ID{0};
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
template <class QueryCache, class HostCounter>
|
|
|
|
class CounterStreamBase {
|
|
|
|
public:
|
2020-12-05 16:40:14 +00:00
|
|
|
explicit CounterStreamBase(QueryCache& cache_, VideoCore::QueryType type_)
|
|
|
|
: cache{cache_}, type{type_} {}
|
2020-02-11 19:02:41 +00:00
|
|
|
|
|
|
|
/// Updates the state of the stream, enabling or disabling as needed.
|
|
|
|
void Update(bool enabled) {
|
|
|
|
if (enabled) {
|
|
|
|
Enable();
|
|
|
|
} else {
|
|
|
|
Disable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Resets the stream to zero. It doesn't disable the query after resetting.
|
|
|
|
void Reset() {
|
|
|
|
if (current) {
|
|
|
|
current->EndQuery();
|
|
|
|
|
|
|
|
// Immediately start a new query to avoid disabling its state.
|
|
|
|
current = cache.Counter(nullptr, type);
|
|
|
|
}
|
|
|
|
last = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the current counter slicing as needed.
|
|
|
|
std::shared_ptr<HostCounter> Current() {
|
|
|
|
if (!current) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
current->EndQuery();
|
|
|
|
last = std::move(current);
|
|
|
|
current = cache.Counter(last, type);
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true when the counter stream is enabled.
|
|
|
|
bool IsEnabled() const {
|
2020-02-14 00:11:21 +00:00
|
|
|
return current != nullptr;
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// Enables the stream.
|
|
|
|
void Enable() {
|
|
|
|
if (current) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
current = cache.Counter(last, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disables the stream.
|
|
|
|
void Disable() {
|
|
|
|
if (current) {
|
|
|
|
current->EndQuery();
|
|
|
|
}
|
|
|
|
last = std::exchange(current, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
QueryCache& cache;
|
|
|
|
const VideoCore::QueryType type;
|
|
|
|
|
|
|
|
std::shared_ptr<HostCounter> current;
|
|
|
|
std::shared_ptr<HostCounter> last;
|
|
|
|
};
|
|
|
|
|
2020-09-10 07:43:30 +01:00
|
|
|
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
|
2021-11-05 14:52:31 +00:00
|
|
|
class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
|
2020-02-11 19:02:41 +00:00
|
|
|
public:
|
2023-04-23 20:55:16 +01:00
|
|
|
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
|
|
|
|
Core::Memory::Memory& cpu_memory_)
|
2023-04-23 22:47:05 +01:00
|
|
|
: rasterizer{rasterizer_},
|
|
|
|
cpu_memory{cpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
|
|
|
|
VideoCore::QueryType::SamplesPassed}}} {
|
|
|
|
(void)slot_async_jobs.insert(); // Null value
|
2023-04-23 20:55:16 +01:00
|
|
|
}
|
2020-02-11 19:02:41 +00:00
|
|
|
|
2020-04-05 23:39:24 +01:00
|
|
|
void InvalidateRegion(VAddr addr, std::size_t size) {
|
2020-02-13 17:28:22 +00:00
|
|
|
std::unique_lock lock{mutex};
|
2020-02-11 19:02:41 +00:00
|
|
|
FlushAndRemoveRegion(addr, size);
|
|
|
|
}
|
|
|
|
|
2020-04-05 23:39:24 +01:00
|
|
|
void FlushRegion(VAddr addr, std::size_t size) {
|
2020-02-13 17:28:22 +00:00
|
|
|
std::unique_lock lock{mutex};
|
2020-02-11 19:02:41 +00:00
|
|
|
FlushAndRemoveRegion(addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Records a query in GPU mapped memory, potentially marked with a timestamp.
|
|
|
|
* @param gpu_addr GPU address to flush to when the mapped memory is read.
|
|
|
|
* @param type Query type, e.g. SamplesPassed.
|
|
|
|
* @param timestamp Timestamp, when empty the flushed query is assumed to be short.
|
|
|
|
*/
|
|
|
|
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
|
2020-02-13 17:28:22 +00:00
|
|
|
std::unique_lock lock{mutex};
|
2021-11-05 14:52:31 +00:00
|
|
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
|
2020-06-12 01:24:45 +01:00
|
|
|
ASSERT(cpu_addr);
|
2020-02-11 19:02:41 +00:00
|
|
|
|
2020-06-12 01:24:45 +01:00
|
|
|
CachedQuery* query = TryGet(*cpu_addr);
|
2020-02-11 19:02:41 +00:00
|
|
|
if (!query) {
|
2020-06-12 01:24:45 +01:00
|
|
|
ASSERT_OR_EXECUTE(cpu_addr, return;);
|
2021-11-05 14:52:31 +00:00
|
|
|
u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
|
2020-02-11 19:02:41 +00:00
|
|
|
|
2020-06-12 01:24:45 +01:00
|
|
|
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
2023-04-23 22:47:05 +01:00
|
|
|
auto result = query->BindCounter(Stream(type).Current(), timestamp);
|
2023-04-23 20:55:16 +01:00
|
|
|
if (result) {
|
|
|
|
auto async_job_id = query->GetAsyncJob();
|
|
|
|
auto& async_job = slot_async_jobs[async_job_id];
|
|
|
|
async_job.collected = true;
|
|
|
|
async_job.value = *result;
|
|
|
|
query->SetAsyncJob(NULL_ASYNC_JOB_ID);
|
2020-04-16 02:03:30 +01:00
|
|
|
}
|
2023-04-23 20:55:16 +01:00
|
|
|
AsyncFlushQuery(query, timestamp, lock);
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
|
|
|
|
void UpdateCounters() {
|
2020-02-11 21:59:44 +00:00
|
|
|
std::unique_lock lock{mutex};
|
2022-08-20 03:15:23 +01:00
|
|
|
if (maxwell3d) {
|
|
|
|
const auto& regs = maxwell3d->regs;
|
2022-08-12 10:58:09 +01:00
|
|
|
Stream(VideoCore::QueryType::SamplesPassed).Update(regs.zpass_pixel_count_enable);
|
2022-08-20 03:15:23 +01:00
|
|
|
}
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Resets a counter to zero. It doesn't disable the query after resetting.
|
|
|
|
void ResetCounter(VideoCore::QueryType type) {
|
2020-02-11 21:59:44 +00:00
|
|
|
std::unique_lock lock{mutex};
|
2020-02-11 19:02:41 +00:00
|
|
|
Stream(type).Reset();
|
|
|
|
}
|
|
|
|
|
2020-02-11 21:59:44 +00:00
|
|
|
/// Disable all active streams. Expected to be called at the end of a command buffer.
|
|
|
|
void DisableStreams() {
|
|
|
|
std::unique_lock lock{mutex};
|
|
|
|
for (auto& stream : streams) {
|
|
|
|
stream.Update(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
/// Returns a new host counter.
|
|
|
|
std::shared_ptr<HostCounter> Counter(std::shared_ptr<HostCounter> dependency,
|
|
|
|
VideoCore::QueryType type) {
|
|
|
|
return std::make_shared<HostCounter>(static_cast<QueryCache&>(*this), std::move(dependency),
|
|
|
|
type);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the counter stream of the specified type.
|
|
|
|
CounterStream& Stream(VideoCore::QueryType type) {
|
|
|
|
return streams[static_cast<std::size_t>(type)];
|
|
|
|
}
|
|
|
|
|
2020-02-14 00:11:21 +00:00
|
|
|
/// Returns the counter stream of the specified type.
|
|
|
|
const CounterStream& Stream(VideoCore::QueryType type) const {
|
|
|
|
return streams[static_cast<std::size_t>(type)];
|
|
|
|
}
|
|
|
|
|
2020-04-15 21:36:14 +01:00
|
|
|
void CommitAsyncFlushes() {
|
2023-04-14 23:03:48 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 17:29:53 +01:00
|
|
|
committed_flushes.push_back(uncommitted_flushes);
|
|
|
|
uncommitted_flushes.reset();
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-16 17:29:53 +01:00
|
|
|
bool HasUncommittedFlushes() const {
|
2023-04-14 23:03:48 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 17:29:53 +01:00
|
|
|
return uncommitted_flushes != nullptr;
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-16 17:29:53 +01:00
|
|
|
bool ShouldWaitAsyncFlushes() const {
|
2023-04-14 23:03:48 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 17:29:53 +01:00
|
|
|
if (committed_flushes.empty()) {
|
2020-04-15 21:36:14 +01:00
|
|
|
return false;
|
|
|
|
}
|
2020-04-16 17:29:53 +01:00
|
|
|
return committed_flushes.front() != nullptr;
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PopAsyncFlushes() {
|
2023-04-14 23:03:48 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 17:29:53 +01:00
|
|
|
if (committed_flushes.empty()) {
|
2020-04-15 21:36:14 +01:00
|
|
|
return;
|
|
|
|
}
|
2020-04-16 17:29:53 +01:00
|
|
|
auto& flush_list = committed_flushes.front();
|
2020-04-15 21:36:14 +01:00
|
|
|
if (!flush_list) {
|
2020-04-16 17:29:53 +01:00
|
|
|
committed_flushes.pop_front();
|
2020-04-15 21:36:14 +01:00
|
|
|
return;
|
|
|
|
}
|
2023-04-23 20:55:16 +01:00
|
|
|
for (AsyncJobId async_job_id : *flush_list) {
|
|
|
|
AsyncJob& async_job = slot_async_jobs[async_job_id];
|
|
|
|
if (!async_job.collected) {
|
|
|
|
FlushAndRemoveRegion(async_job.query_location, 2, true);
|
|
|
|
}
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
2020-04-16 17:29:53 +01:00
|
|
|
committed_flushes.pop_front();
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
private:
|
2023-04-23 20:55:16 +01:00
|
|
|
struct AsyncJob {
|
|
|
|
bool collected = false;
|
|
|
|
u64 value = 0;
|
|
|
|
VAddr query_location = 0;
|
|
|
|
std::optional<u64> timestamp{};
|
|
|
|
};
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
/// Flushes a memory range to guest memory and removes it from the cache.
|
2023-04-23 20:55:16 +01:00
|
|
|
void FlushAndRemoveRegion(VAddr addr, std::size_t size, bool async = false) {
|
2021-04-12 09:51:16 +01:00
|
|
|
const u64 addr_begin = addr;
|
|
|
|
const u64 addr_end = addr_begin + size;
|
|
|
|
const auto in_range = [addr_begin, addr_end](const CachedQuery& query) {
|
2020-04-05 23:39:24 +01:00
|
|
|
const u64 cache_begin = query.GetCpuAddr();
|
2020-02-11 19:02:41 +00:00
|
|
|
const u64 cache_end = cache_begin + query.SizeInBytes();
|
|
|
|
return cache_begin < addr_end && addr_begin < cache_end;
|
|
|
|
};
|
|
|
|
|
2022-08-19 00:28:55 +01:00
|
|
|
const u64 page_end = addr_end >> YUZU_PAGEBITS;
|
|
|
|
for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) {
|
2020-02-11 19:02:41 +00:00
|
|
|
const auto& it = cached_queries.find(page);
|
|
|
|
if (it == std::end(cached_queries)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto& contents = it->second;
|
|
|
|
for (auto& query : contents) {
|
|
|
|
if (!in_range(query)) {
|
|
|
|
continue;
|
|
|
|
}
|
2023-04-23 20:55:16 +01:00
|
|
|
AsyncJobId async_job_id = query.GetAsyncJob();
|
|
|
|
auto flush_result = query.Flush(async);
|
|
|
|
if (async_job_id == NULL_ASYNC_JOB_ID) {
|
|
|
|
ASSERT_MSG(false, "This should not be reachable at all");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
AsyncJob& async_job = slot_async_jobs[async_job_id];
|
|
|
|
async_job.collected = true;
|
|
|
|
async_job.value = flush_result;
|
|
|
|
query.SetAsyncJob(NULL_ASYNC_JOB_ID);
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
2021-04-12 09:51:16 +01:00
|
|
|
std::erase_if(contents, in_range);
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
|
|
|
|
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
|
2022-08-19 00:28:55 +01:00
|
|
|
const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
|
2020-02-11 19:02:41 +00:00
|
|
|
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
|
|
|
|
host_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tries to a get a cached query. Returns nullptr on failure.
|
2020-04-05 23:39:24 +01:00
|
|
|
CachedQuery* TryGet(VAddr addr) {
|
2022-08-19 00:28:55 +01:00
|
|
|
const u64 page = static_cast<u64>(addr) >> YUZU_PAGEBITS;
|
2020-02-11 19:02:41 +00:00
|
|
|
const auto it = cached_queries.find(page);
|
|
|
|
if (it == std::end(cached_queries)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
auto& contents = it->second;
|
2020-04-06 00:26:15 +01:00
|
|
|
const auto found = std::find_if(std::begin(contents), std::end(contents),
|
|
|
|
[addr](auto& query) { return query.GetCpuAddr() == addr; });
|
2020-02-11 19:02:41 +00:00
|
|
|
return found != std::end(contents) ? &*found : nullptr;
|
|
|
|
}
|
|
|
|
|
2023-04-23 20:55:16 +01:00
|
|
|
void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
|
|
|
|
std::unique_lock<std::recursive_mutex>& lock) {
|
|
|
|
const AsyncJobId new_async_job_id = slot_async_jobs.insert();
|
2023-04-23 22:47:05 +01:00
|
|
|
{
|
|
|
|
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
|
|
|
|
query->SetAsyncJob(new_async_job_id);
|
|
|
|
async_job.query_location = query->GetCpuAddr();
|
|
|
|
async_job.collected = false;
|
2023-04-23 20:55:16 +01:00
|
|
|
|
2023-04-23 22:47:05 +01:00
|
|
|
if (!uncommitted_flushes) {
|
|
|
|
uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
|
|
|
|
}
|
|
|
|
uncommitted_flushes->push_back(new_async_job_id);
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
2023-04-23 20:55:16 +01:00
|
|
|
lock.unlock();
|
|
|
|
std::function<void()> operation([this, new_async_job_id, timestamp] {
|
|
|
|
std::unique_lock local_lock{mutex};
|
|
|
|
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
|
2023-04-28 22:53:46 +01:00
|
|
|
u64 value = async_job.value;
|
|
|
|
VAddr address = async_job.query_location;
|
|
|
|
slot_async_jobs.erase(new_async_job_id);
|
|
|
|
local_lock.unlock();
|
2023-04-23 20:55:16 +01:00
|
|
|
if (timestamp) {
|
|
|
|
u64 timestamp_value = *timestamp;
|
2023-04-28 22:53:46 +01:00
|
|
|
cpu_memory.WriteBlockUnsafe(address + sizeof(u64), ×tamp_value, sizeof(u64));
|
|
|
|
cpu_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
|
|
|
|
rasterizer.InvalidateRegion(address, sizeof(u64) * 2,
|
|
|
|
VideoCommon::CacheType::NoQueryCache);
|
2023-04-23 20:55:16 +01:00
|
|
|
} else {
|
2023-04-28 22:53:46 +01:00
|
|
|
u32 small_value = static_cast<u32>(value);
|
|
|
|
cpu_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
|
|
|
|
rasterizer.InvalidateRegion(address, sizeof(u32),
|
|
|
|
VideoCommon::CacheType::NoQueryCache);
|
2023-04-23 20:55:16 +01:00
|
|
|
}
|
|
|
|
});
|
|
|
|
rasterizer.SyncOperation(std::move(operation));
|
2020-04-15 21:36:14 +01:00
|
|
|
}
|
|
|
|
|
2022-08-19 00:28:55 +01:00
|
|
|
static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
|
|
|
|
static constexpr unsigned YUZU_PAGEBITS = 12;
|
2020-02-11 19:02:41 +00:00
|
|
|
|
2023-04-23 20:55:16 +01:00
|
|
|
SlotVector<AsyncJob> slot_async_jobs;
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
VideoCore::RasterizerInterface& rasterizer;
|
2023-04-23 20:55:16 +01:00
|
|
|
Core::Memory::Memory& cpu_memory;
|
2020-02-11 19:02:41 +00:00
|
|
|
|
2023-04-14 23:03:48 +01:00
|
|
|
mutable std::recursive_mutex mutex;
|
2020-02-13 17:28:22 +00:00
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
|
|
|
|
|
|
|
|
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
|
2020-04-15 21:36:14 +01:00
|
|
|
|
2023-04-23 20:55:16 +01:00
|
|
|
std::shared_ptr<std::vector<AsyncJobId>> uncommitted_flushes{};
|
|
|
|
std::list<std::shared_ptr<std::vector<AsyncJobId>>> committed_flushes;
|
2020-02-11 19:02:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
template <class QueryCache, class HostCounter>
|
|
|
|
class HostCounterBase {
|
|
|
|
public:
|
2020-02-11 21:59:44 +00:00
|
|
|
explicit HostCounterBase(std::shared_ptr<HostCounter> dependency_)
|
|
|
|
: dependency{std::move(dependency_)}, depth{dependency ? (dependency->Depth() + 1) : 0} {
|
|
|
|
// Avoid nesting too many dependencies to avoid a stack overflow when these are deleted.
|
2020-02-14 00:11:21 +00:00
|
|
|
constexpr u64 depth_threshold = 96;
|
2020-02-11 21:59:44 +00:00
|
|
|
if (depth > depth_threshold) {
|
|
|
|
depth = 0;
|
|
|
|
base_result = dependency->Query();
|
|
|
|
dependency = nullptr;
|
|
|
|
}
|
|
|
|
}
|
2020-02-14 00:11:21 +00:00
|
|
|
virtual ~HostCounterBase() = default;
|
2020-02-11 19:02:41 +00:00
|
|
|
|
|
|
|
/// Returns the current value of the query.
|
2023-04-23 20:55:16 +01:00
|
|
|
u64 Query(bool async = false) {
|
2020-02-11 19:02:41 +00:00
|
|
|
if (result) {
|
|
|
|
return *result;
|
|
|
|
}
|
|
|
|
|
2023-04-23 20:55:16 +01:00
|
|
|
u64 value = BlockingQuery(async) + base_result;
|
2020-02-11 19:02:41 +00:00
|
|
|
if (dependency) {
|
|
|
|
value += dependency->Query();
|
2020-02-11 21:59:44 +00:00
|
|
|
dependency = nullptr;
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
2020-02-14 00:11:21 +00:00
|
|
|
result = value;
|
|
|
|
return *result;
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true when flushing this query will potentially wait.
|
|
|
|
bool WaitPending() const noexcept {
|
|
|
|
return result.has_value();
|
|
|
|
}
|
|
|
|
|
2020-02-11 21:59:44 +00:00
|
|
|
u64 Depth() const noexcept {
|
|
|
|
return depth;
|
|
|
|
}
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
protected:
|
|
|
|
/// Returns the value of query from the backend API blocking as needed.
|
2023-04-23 20:55:16 +01:00
|
|
|
virtual u64 BlockingQuery(bool async = false) const = 0;
|
2020-02-11 19:02:41 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value.
|
|
|
|
std::optional<u64> result; ///< Filled with the already returned value.
|
2020-02-11 21:59:44 +00:00
|
|
|
u64 depth; ///< Number of nested dependencies.
|
|
|
|
u64 base_result = 0; ///< Equivalent to nested dependencies value.
|
2020-02-11 19:02:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
template <class HostCounter>
|
|
|
|
class CachedQueryBase {
|
|
|
|
public:
|
2020-12-05 16:40:14 +00:00
|
|
|
explicit CachedQueryBase(VAddr cpu_addr_, u8* host_ptr_)
|
|
|
|
: cpu_addr{cpu_addr_}, host_ptr{host_ptr_} {}
|
2020-02-14 00:11:21 +00:00
|
|
|
virtual ~CachedQueryBase() = default;
|
2020-02-11 19:02:41 +00:00
|
|
|
|
2020-02-14 00:11:21 +00:00
|
|
|
CachedQueryBase(CachedQueryBase&&) noexcept = default;
|
2020-02-11 19:02:41 +00:00
|
|
|
CachedQueryBase(const CachedQueryBase&) = delete;
|
|
|
|
|
2020-02-14 00:11:21 +00:00
|
|
|
CachedQueryBase& operator=(CachedQueryBase&&) noexcept = default;
|
|
|
|
CachedQueryBase& operator=(const CachedQueryBase&) = delete;
|
2020-02-11 19:02:41 +00:00
|
|
|
|
|
|
|
/// Flushes the query to guest memory.
|
2023-04-23 20:55:16 +01:00
|
|
|
virtual u64 Flush(bool async = false) {
|
2023-03-12 03:10:38 +00:00
|
|
|
// When counter is nullptr it means that it's just been reset. We are supposed to write a
|
2020-02-11 19:02:41 +00:00
|
|
|
// zero in these cases.
|
2023-04-23 20:55:16 +01:00
|
|
|
const u64 value = counter ? counter->Query(async) : 0;
|
2023-04-23 22:47:05 +01:00
|
|
|
if (async) {
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
std::memcpy(host_ptr, &value, sizeof(u64));
|
|
|
|
|
|
|
|
if (timestamp) {
|
|
|
|
std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
|
|
|
|
}
|
2023-04-23 20:55:16 +01:00
|
|
|
return value;
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Binds a counter to this query.
|
2023-04-23 22:47:05 +01:00
|
|
|
std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_,
|
|
|
|
std::optional<u64> timestamp_) {
|
2023-04-23 20:55:16 +01:00
|
|
|
std::optional<u64> result{};
|
2020-02-11 19:02:41 +00:00
|
|
|
if (counter) {
|
|
|
|
// If there's an old counter set it means the query is being rewritten by the game.
|
|
|
|
// To avoid losing the data forever, flush here.
|
2023-04-23 20:55:16 +01:00
|
|
|
result = std::make_optional(Flush());
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
counter = std::move(counter_);
|
2023-04-23 22:47:05 +01:00
|
|
|
timestamp = timestamp_;
|
2023-04-23 20:55:16 +01:00
|
|
|
return result;
|
2020-02-11 19:02:41 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 23:39:24 +01:00
|
|
|
VAddr GetCpuAddr() const noexcept {
|
2020-02-11 19:02:41 +00:00
|
|
|
return cpu_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 SizeInBytes() const noexcept {
|
|
|
|
return SizeInBytes(timestamp.has_value());
|
|
|
|
}
|
|
|
|
|
2020-02-14 00:11:21 +00:00
|
|
|
static constexpr u64 SizeInBytes(bool with_timestamp) noexcept {
|
2020-02-11 19:02:41 +00:00
|
|
|
return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE;
|
|
|
|
}
|
|
|
|
|
2023-04-23 20:55:16 +01:00
|
|
|
void SetAsyncJob(AsyncJobId assigned_async_job_) {
|
|
|
|
assigned_async_job = assigned_async_job_;
|
|
|
|
}
|
|
|
|
|
|
|
|
AsyncJobId GetAsyncJob() const {
|
|
|
|
return assigned_async_job;
|
|
|
|
}
|
|
|
|
|
2020-02-11 19:02:41 +00:00
|
|
|
protected:
|
|
|
|
/// Returns true when querying the counter may potentially block.
|
|
|
|
bool WaitPending() const noexcept {
|
|
|
|
return counter && counter->WaitPending();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr std::size_t SMALL_QUERY_SIZE = 8; // Query size without timestamp.
|
|
|
|
static constexpr std::size_t LARGE_QUERY_SIZE = 16; // Query size with timestamp.
|
|
|
|
static constexpr std::intptr_t TIMESTAMP_OFFSET = 8; // Timestamp offset in a large query.
|
|
|
|
|
|
|
|
VAddr cpu_addr; ///< Guest CPU address.
|
|
|
|
u8* host_ptr; ///< Writable host pointer.
|
|
|
|
std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree.
|
|
|
|
std::optional<u64> timestamp; ///< Timestamp to flush to guest memory.
|
2023-04-23 20:55:16 +01:00
|
|
|
AsyncJobId assigned_async_job;
|
2020-02-11 19:02:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace VideoCommon
|