mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-18 07:00:11 +00:00
Merge pull request #10086 from Morph1984/coretiming-ng-1
core_timing: Use CNTPCT as the guest CPU tick
This commit is contained in:
commit
e3122c5b46
31 changed files with 283 additions and 432 deletions
|
@ -7,7 +7,6 @@
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
namespace AudioCore::AudioRenderer::ADSP {
|
namespace AudioCore::AudioRenderer::ADSP {
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
|
|
||||||
MICROPROFILE_DEFINE(Audio_Renderer, "Audio", "DSP", MP_RGB(60, 19, 97));
|
MICROPROFILE_DEFINE(Audio_Renderer, "Audio", "DSP", MP_RGB(60, 19, 97));
|
||||||
|
|
||||||
|
@ -144,6 +143,7 @@ void AudioRenderer::ThreadFunc(std::stop_token stop_token) {
|
||||||
|
|
||||||
mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_InitializeOK);
|
mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_InitializeOK);
|
||||||
|
|
||||||
|
// 0.12 seconds (2304000 / 19200000)
|
||||||
constexpr u64 max_process_time{2'304'000ULL};
|
constexpr u64 max_process_time{2'304'000ULL};
|
||||||
|
|
||||||
while (!stop_token.stop_requested()) {
|
while (!stop_token.stop_requested()) {
|
||||||
|
@ -184,8 +184,7 @@ void AudioRenderer::ThreadFunc(std::stop_token stop_token) {
|
||||||
u64 max_time{max_process_time};
|
u64 max_time{max_process_time};
|
||||||
if (index == 1 && command_buffer.applet_resource_user_id ==
|
if (index == 1 && command_buffer.applet_resource_user_id ==
|
||||||
mailbox->GetCommandBuffer(0).applet_resource_user_id) {
|
mailbox->GetCommandBuffer(0).applet_resource_user_id) {
|
||||||
max_time = max_process_time -
|
max_time = max_process_time - render_times_taken[0];
|
||||||
Core::Timing::CyclesToNs(render_times_taken[0]).count();
|
|
||||||
if (render_times_taken[0] > max_process_time) {
|
if (render_times_taken[0] > max_process_time) {
|
||||||
max_time = 0;
|
max_time = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
namespace AudioCore::AudioRenderer::ADSP {
|
namespace AudioCore::AudioRenderer::ADSP {
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
#include "audio_core/renderer/command/performance/performance.h"
|
#include "audio_core/renderer/command/performance/performance.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
|
|
||||||
namespace AudioCore::AudioRenderer {
|
namespace AudioCore::AudioRenderer {
|
||||||
|
|
||||||
|
@ -18,20 +17,18 @@ void PerformanceCommand::Process(const ADSP::CommandListProcessor& processor) {
|
||||||
auto base{entry_address.translated_address};
|
auto base{entry_address.translated_address};
|
||||||
if (state == PerformanceState::Start) {
|
if (state == PerformanceState::Start) {
|
||||||
auto start_time_ptr{reinterpret_cast<u32*>(base + entry_address.entry_start_time_offset)};
|
auto start_time_ptr{reinterpret_cast<u32*>(base + entry_address.entry_start_time_offset)};
|
||||||
*start_time_ptr = static_cast<u32>(
|
*start_time_ptr =
|
||||||
Core::Timing::CyclesToUs(processor.system->CoreTiming().GetClockTicks() -
|
static_cast<u32>(processor.system->CoreTiming().GetClockTicks() - processor.start_time -
|
||||||
processor.start_time - processor.current_processing_time)
|
processor.current_processing_time);
|
||||||
.count());
|
|
||||||
} else if (state == PerformanceState::Stop) {
|
} else if (state == PerformanceState::Stop) {
|
||||||
auto processed_time_ptr{
|
auto processed_time_ptr{
|
||||||
reinterpret_cast<u32*>(base + entry_address.entry_processed_time_offset)};
|
reinterpret_cast<u32*>(base + entry_address.entry_processed_time_offset)};
|
||||||
auto entry_count_ptr{
|
auto entry_count_ptr{
|
||||||
reinterpret_cast<u32*>(base + entry_address.header_entry_count_offset)};
|
reinterpret_cast<u32*>(base + entry_address.header_entry_count_offset)};
|
||||||
|
|
||||||
*processed_time_ptr = static_cast<u32>(
|
*processed_time_ptr =
|
||||||
Core::Timing::CyclesToUs(processor.system->CoreTiming().GetClockTicks() -
|
static_cast<u32>(processor.system->CoreTiming().GetClockTicks() - processor.start_time -
|
||||||
processor.start_time - processor.current_processing_time)
|
processor.current_processing_time);
|
||||||
.count());
|
|
||||||
(*entry_count_ptr)++;
|
(*entry_count_ptr)++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
|
|
||||||
namespace AudioCore::Sink {
|
namespace AudioCore::Sink {
|
||||||
|
|
||||||
|
|
|
@ -172,6 +172,8 @@ if(ARCHITECTURE_x86_64)
|
||||||
x64/cpu_wait.h
|
x64/cpu_wait.h
|
||||||
x64/native_clock.cpp
|
x64/native_clock.cpp
|
||||||
x64/native_clock.h
|
x64/native_clock.h
|
||||||
|
x64/rdtsc.cpp
|
||||||
|
x64/rdtsc.h
|
||||||
x64/xbyak_abi.h
|
x64/xbyak_abi.h
|
||||||
x64/xbyak_util.h
|
x64/xbyak_util.h
|
||||||
)
|
)
|
||||||
|
|
|
@ -28,13 +28,12 @@ static s64 GetSystemTimeNS() {
|
||||||
// GetSystemTimePreciseAsFileTime returns the file time in 100ns units.
|
// GetSystemTimePreciseAsFileTime returns the file time in 100ns units.
|
||||||
static constexpr s64 Multiplier = 100;
|
static constexpr s64 Multiplier = 100;
|
||||||
// Convert Windows epoch to Unix epoch.
|
// Convert Windows epoch to Unix epoch.
|
||||||
static constexpr s64 WindowsEpochToUnixEpochNS = 0x19DB1DED53E8000LL;
|
static constexpr s64 WindowsEpochToUnixEpoch = 0x19DB1DED53E8000LL;
|
||||||
|
|
||||||
FILETIME filetime;
|
FILETIME filetime;
|
||||||
GetSystemTimePreciseAsFileTime(&filetime);
|
GetSystemTimePreciseAsFileTime(&filetime);
|
||||||
return Multiplier * ((static_cast<s64>(filetime.dwHighDateTime) << 32) +
|
return Multiplier * ((static_cast<s64>(filetime.dwHighDateTime) << 32) +
|
||||||
static_cast<s64>(filetime.dwLowDateTime)) -
|
static_cast<s64>(filetime.dwLowDateTime) - WindowsEpochToUnixEpoch);
|
||||||
WindowsEpochToUnixEpochNS;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -2,88 +2,75 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include "common/steady_clock.h"
|
#include "common/steady_clock.h"
|
||||||
#include "common/uint128.h"
|
|
||||||
#include "common/wall_clock.h"
|
#include "common/wall_clock.h"
|
||||||
|
|
||||||
#ifdef ARCHITECTURE_x86_64
|
#ifdef ARCHITECTURE_x86_64
|
||||||
#include "common/x64/cpu_detect.h"
|
#include "common/x64/cpu_detect.h"
|
||||||
#include "common/x64/native_clock.h"
|
#include "common/x64/native_clock.h"
|
||||||
|
#include "common/x64/rdtsc.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
class StandardWallClock final : public WallClock {
|
class StandardWallClock final : public WallClock {
|
||||||
public:
|
public:
|
||||||
explicit StandardWallClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequency_)
|
explicit StandardWallClock() : start_time{SteadyClock::Now()} {}
|
||||||
: WallClock{emulated_cpu_frequency_, emulated_clock_frequency_, false},
|
|
||||||
start_time{SteadyClock::Now()} {}
|
|
||||||
|
|
||||||
std::chrono::nanoseconds GetTimeNS() override {
|
std::chrono::nanoseconds GetTimeNS() const override {
|
||||||
return SteadyClock::Now() - start_time;
|
return SteadyClock::Now() - start_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::microseconds GetTimeUS() override {
|
std::chrono::microseconds GetTimeUS() const override {
|
||||||
return std::chrono::duration_cast<std::chrono::microseconds>(GetTimeNS());
|
return static_cast<std::chrono::microseconds>(GetHostTicksElapsed() / NsToUsRatio::den);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::milliseconds GetTimeMS() override {
|
std::chrono::milliseconds GetTimeMS() const override {
|
||||||
return std::chrono::duration_cast<std::chrono::milliseconds>(GetTimeNS());
|
return static_cast<std::chrono::milliseconds>(GetHostTicksElapsed() / NsToMsRatio::den);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetClockCycles() override {
|
u64 GetCNTPCT() const override {
|
||||||
const u128 temp = Common::Multiply64Into128(GetTimeNS().count(), emulated_clock_frequency);
|
return GetHostTicksElapsed() * NsToCNTPCTRatio::num / NsToCNTPCTRatio::den;
|
||||||
return Common::Divide128On32(temp, NS_RATIO).first;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetCPUCycles() override {
|
u64 GetGPUTick() const override {
|
||||||
const u128 temp = Common::Multiply64Into128(GetTimeNS().count(), emulated_cpu_frequency);
|
return GetHostTicksElapsed() * NsToGPUTickRatio::num / NsToGPUTickRatio::den;
|
||||||
return Common::Divide128On32(temp, NS_RATIO).first;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pause([[maybe_unused]] bool is_paused) override {
|
u64 GetHostTicksNow() const override {
|
||||||
// Do nothing in this clock type.
|
return static_cast<u64>(SteadyClock::Now().time_since_epoch().count());
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetHostTicksElapsed() const override {
|
||||||
|
return static_cast<u64>(GetTimeNS().count());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsNative() const override {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
SteadyClock::time_point start_time;
|
SteadyClock::time_point start_time;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::unique_ptr<WallClock> CreateOptimalClock() {
|
||||||
#ifdef ARCHITECTURE_x86_64
|
#ifdef ARCHITECTURE_x86_64
|
||||||
|
|
||||||
std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
|
|
||||||
u64 emulated_clock_frequency) {
|
|
||||||
const auto& caps = GetCPUCaps();
|
const auto& caps = GetCPUCaps();
|
||||||
u64 rtsc_frequency = 0;
|
|
||||||
if (caps.invariant_tsc) {
|
|
||||||
rtsc_frequency = caps.tsc_frequency ? caps.tsc_frequency : EstimateRDTSCFrequency();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to StandardWallClock if the hardware TSC does not have the precision greater than:
|
if (caps.invariant_tsc && caps.tsc_frequency >= WallClock::GPUTickFreq) {
|
||||||
// - A nanosecond
|
return std::make_unique<X64::NativeClock>(caps.tsc_frequency);
|
||||||
// - The emulated CPU frequency
|
|
||||||
// - The emulated clock counter frequency (CNTFRQ)
|
|
||||||
if (rtsc_frequency <= WallClock::NS_RATIO || rtsc_frequency <= emulated_cpu_frequency ||
|
|
||||||
rtsc_frequency <= emulated_clock_frequency) {
|
|
||||||
return std::make_unique<StandardWallClock>(emulated_cpu_frequency,
|
|
||||||
emulated_clock_frequency);
|
|
||||||
} else {
|
} else {
|
||||||
return std::make_unique<X64::NativeClock>(emulated_cpu_frequency, emulated_clock_frequency,
|
// Fallback to StandardWallClock if the hardware TSC
|
||||||
rtsc_frequency);
|
// - Is not invariant
|
||||||
|
// - Is not more precise than GPUTickFreq
|
||||||
|
return std::make_unique<StandardWallClock>();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
return std::make_unique<StandardWallClock>();
|
||||||
std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
|
#endif
|
||||||
u64 emulated_clock_frequency) {
|
|
||||||
return std::make_unique<StandardWallClock>(emulated_cpu_frequency, emulated_clock_frequency);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
std::unique_ptr<WallClock> CreateStandardWallClock() {
|
||||||
|
return std::make_unique<StandardWallClock>();
|
||||||
std::unique_ptr<WallClock> CreateStandardWallClock(u64 emulated_cpu_frequency,
|
|
||||||
u64 emulated_clock_frequency) {
|
|
||||||
return std::make_unique<StandardWallClock>(emulated_cpu_frequency, emulated_clock_frequency);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <ratio>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
@ -12,50 +13,82 @@ namespace Common {
|
||||||
|
|
||||||
class WallClock {
|
class WallClock {
|
||||||
public:
|
public:
|
||||||
static constexpr u64 NS_RATIO = 1'000'000'000;
|
static constexpr u64 CNTFRQ = 19'200'000; // CNTPCT_EL0 Frequency = 19.2 MHz
|
||||||
static constexpr u64 US_RATIO = 1'000'000;
|
static constexpr u64 GPUTickFreq = 614'400'000; // GM20B GPU Tick Frequency = 614.4 MHz
|
||||||
static constexpr u64 MS_RATIO = 1'000;
|
static constexpr u64 CPUTickFreq = 1'020'000'000; // T210/4 A57 CPU Tick Frequency = 1020.0 MHz
|
||||||
|
|
||||||
virtual ~WallClock() = default;
|
virtual ~WallClock() = default;
|
||||||
|
|
||||||
/// Returns current wall time in nanoseconds
|
/// @returns The time in nanoseconds since the construction of this clock.
|
||||||
[[nodiscard]] virtual std::chrono::nanoseconds GetTimeNS() = 0;
|
virtual std::chrono::nanoseconds GetTimeNS() const = 0;
|
||||||
|
|
||||||
/// Returns current wall time in microseconds
|
/// @returns The time in microseconds since the construction of this clock.
|
||||||
[[nodiscard]] virtual std::chrono::microseconds GetTimeUS() = 0;
|
virtual std::chrono::microseconds GetTimeUS() const = 0;
|
||||||
|
|
||||||
/// Returns current wall time in milliseconds
|
/// @returns The time in milliseconds since the construction of this clock.
|
||||||
[[nodiscard]] virtual std::chrono::milliseconds GetTimeMS() = 0;
|
virtual std::chrono::milliseconds GetTimeMS() const = 0;
|
||||||
|
|
||||||
/// Returns current wall time in emulated clock cycles
|
/// @returns The guest CNTPCT ticks since the construction of this clock.
|
||||||
[[nodiscard]] virtual u64 GetClockCycles() = 0;
|
virtual u64 GetCNTPCT() const = 0;
|
||||||
|
|
||||||
/// Returns current wall time in emulated cpu cycles
|
/// @returns The guest GPU ticks since the construction of this clock.
|
||||||
[[nodiscard]] virtual u64 GetCPUCycles() = 0;
|
virtual u64 GetGPUTick() const = 0;
|
||||||
|
|
||||||
virtual void Pause(bool is_paused) = 0;
|
/// @returns The raw host timer ticks since an indeterminate epoch.
|
||||||
|
virtual u64 GetHostTicksNow() const = 0;
|
||||||
|
|
||||||
/// Tells if the wall clock, uses the host CPU's hardware clock
|
/// @returns The raw host timer ticks since the construction of this clock.
|
||||||
[[nodiscard]] bool IsNative() const {
|
virtual u64 GetHostTicksElapsed() const = 0;
|
||||||
return is_native;
|
|
||||||
|
/// @returns Whether the clock directly uses the host's hardware clock.
|
||||||
|
virtual bool IsNative() const = 0;
|
||||||
|
|
||||||
|
static inline u64 NSToCNTPCT(u64 ns) {
|
||||||
|
return ns * NsToCNTPCTRatio::num / NsToCNTPCTRatio::den;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 NSToGPUTick(u64 ns) {
|
||||||
|
return ns * NsToGPUTickRatio::num / NsToGPUTickRatio::den;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cycle Timing
|
||||||
|
|
||||||
|
static inline u64 CPUTickToNS(u64 cpu_tick) {
|
||||||
|
return cpu_tick * CPUTickToNsRatio::num / CPUTickToNsRatio::den;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 CPUTickToUS(u64 cpu_tick) {
|
||||||
|
return cpu_tick * CPUTickToUsRatio::num / CPUTickToUsRatio::den;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 CPUTickToCNTPCT(u64 cpu_tick) {
|
||||||
|
return cpu_tick * CPUTickToCNTPCTRatio::num / CPUTickToCNTPCTRatio::den;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 CPUTickToGPUTick(u64 cpu_tick) {
|
||||||
|
return cpu_tick * CPUTickToGPUTickRatio::num / CPUTickToGPUTickRatio::den;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit WallClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequency_, bool is_native_)
|
using NsRatio = std::nano;
|
||||||
: emulated_cpu_frequency{emulated_cpu_frequency_},
|
using UsRatio = std::micro;
|
||||||
emulated_clock_frequency{emulated_clock_frequency_}, is_native{is_native_} {}
|
using MsRatio = std::milli;
|
||||||
|
|
||||||
u64 emulated_cpu_frequency;
|
using NsToUsRatio = std::ratio_divide<std::nano, std::micro>;
|
||||||
u64 emulated_clock_frequency;
|
using NsToMsRatio = std::ratio_divide<std::nano, std::milli>;
|
||||||
|
using NsToCNTPCTRatio = std::ratio<CNTFRQ, std::nano::den>;
|
||||||
|
using NsToGPUTickRatio = std::ratio<GPUTickFreq, std::nano::den>;
|
||||||
|
|
||||||
private:
|
// Cycle Timing
|
||||||
bool is_native;
|
|
||||||
|
using CPUTickToNsRatio = std::ratio<std::nano::den, CPUTickFreq>;
|
||||||
|
using CPUTickToUsRatio = std::ratio<std::micro::den, CPUTickFreq>;
|
||||||
|
using CPUTickToCNTPCTRatio = std::ratio<CNTFRQ, CPUTickFreq>;
|
||||||
|
using CPUTickToGPUTickRatio = std::ratio<GPUTickFreq, CPUTickFreq>;
|
||||||
};
|
};
|
||||||
|
|
||||||
[[nodiscard]] std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
|
std::unique_ptr<WallClock> CreateOptimalClock();
|
||||||
u64 emulated_clock_frequency);
|
|
||||||
|
|
||||||
[[nodiscard]] std::unique_ptr<WallClock> CreateStandardWallClock(u64 emulated_cpu_frequency,
|
std::unique_ptr<WallClock> CreateStandardWallClock();
|
||||||
u64 emulated_clock_frequency);
|
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/x64/cpu_detect.h"
|
#include "common/x64/cpu_detect.h"
|
||||||
|
#include "common/x64/rdtsc.h"
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
@ -187,6 +188,8 @@ static CPUCaps Detect() {
|
||||||
caps.tsc_frequency = static_cast<u64>(caps.crystal_frequency) *
|
caps.tsc_frequency = static_cast<u64>(caps.crystal_frequency) *
|
||||||
caps.tsc_crystal_ratio_numerator /
|
caps.tsc_crystal_ratio_numerator /
|
||||||
caps.tsc_crystal_ratio_denominator;
|
caps.tsc_crystal_ratio_denominator;
|
||||||
|
} else {
|
||||||
|
caps.tsc_frequency = X64::EstimateRDTSCFrequency();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,19 +9,11 @@
|
||||||
|
|
||||||
#include "common/x64/cpu_detect.h"
|
#include "common/x64/cpu_detect.h"
|
||||||
#include "common/x64/cpu_wait.h"
|
#include "common/x64/cpu_wait.h"
|
||||||
|
#include "common/x64/rdtsc.h"
|
||||||
|
|
||||||
namespace Common::X64 {
|
namespace Common::X64 {
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
__forceinline static u64 FencedRDTSC() {
|
|
||||||
_mm_lfence();
|
|
||||||
_ReadWriteBarrier();
|
|
||||||
const u64 result = __rdtsc();
|
|
||||||
_mm_lfence();
|
|
||||||
_ReadWriteBarrier();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
__forceinline static void TPAUSE() {
|
__forceinline static void TPAUSE() {
|
||||||
// 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
|
// 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
|
||||||
// For reference:
|
// For reference:
|
||||||
|
@ -32,16 +24,6 @@ __forceinline static void TPAUSE() {
|
||||||
_tpause(0, FencedRDTSC() + PauseCycles);
|
_tpause(0, FencedRDTSC() + PauseCycles);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static u64 FencedRDTSC() {
|
|
||||||
u64 eax;
|
|
||||||
u64 edx;
|
|
||||||
asm volatile("lfence\n\t"
|
|
||||||
"rdtsc\n\t"
|
|
||||||
"lfence\n\t"
|
|
||||||
: "=a"(eax), "=d"(edx));
|
|
||||||
return (edx << 32) | eax;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void TPAUSE() {
|
static void TPAUSE() {
|
||||||
// 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
|
// 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
|
||||||
// For reference:
|
// For reference:
|
||||||
|
|
|
@ -1,164 +1,50 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include <array>
|
|
||||||
#include <chrono>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
#include "common/atomic_ops.h"
|
|
||||||
#include "common/steady_clock.h"
|
|
||||||
#include "common/uint128.h"
|
#include "common/uint128.h"
|
||||||
#include "common/x64/native_clock.h"
|
#include "common/x64/native_clock.h"
|
||||||
|
#include "common/x64/rdtsc.h"
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
namespace Common::X64 {
|
||||||
#include <intrin.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace Common {
|
NativeClock::NativeClock(u64 rdtsc_frequency_)
|
||||||
|
: start_ticks{FencedRDTSC()}, rdtsc_frequency{rdtsc_frequency_},
|
||||||
|
ns_rdtsc_factor{GetFixedPoint64Factor(NsRatio::den, rdtsc_frequency)},
|
||||||
|
us_rdtsc_factor{GetFixedPoint64Factor(UsRatio::den, rdtsc_frequency)},
|
||||||
|
ms_rdtsc_factor{GetFixedPoint64Factor(MsRatio::den, rdtsc_frequency)},
|
||||||
|
cntpct_rdtsc_factor{GetFixedPoint64Factor(CNTFRQ, rdtsc_frequency)},
|
||||||
|
gputick_rdtsc_factor{GetFixedPoint64Factor(GPUTickFreq, rdtsc_frequency)} {}
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
std::chrono::nanoseconds NativeClock::GetTimeNS() const {
|
||||||
__forceinline static u64 FencedRDTSC() {
|
return std::chrono::nanoseconds{MultiplyHigh(GetHostTicksElapsed(), ns_rdtsc_factor)};
|
||||||
_mm_lfence();
|
|
||||||
_ReadWriteBarrier();
|
|
||||||
const u64 result = __rdtsc();
|
|
||||||
_mm_lfence();
|
|
||||||
_ReadWriteBarrier();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static u64 FencedRDTSC() {
|
|
||||||
u64 eax;
|
|
||||||
u64 edx;
|
|
||||||
asm volatile("lfence\n\t"
|
|
||||||
"rdtsc\n\t"
|
|
||||||
"lfence\n\t"
|
|
||||||
: "=a"(eax), "=d"(edx));
|
|
||||||
return (edx << 32) | eax;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template <u64 Nearest>
|
|
||||||
static u64 RoundToNearest(u64 value) {
|
|
||||||
const auto mod = value % Nearest;
|
|
||||||
return mod >= (Nearest / 2) ? (value - mod + Nearest) : (value - mod);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 EstimateRDTSCFrequency() {
|
std::chrono::microseconds NativeClock::GetTimeUS() const {
|
||||||
// Discard the first result measuring the rdtsc.
|
return std::chrono::microseconds{MultiplyHigh(GetHostTicksElapsed(), us_rdtsc_factor)};
|
||||||
FencedRDTSC();
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{1});
|
|
||||||
FencedRDTSC();
|
|
||||||
|
|
||||||
// Get the current time.
|
|
||||||
const auto start_time = Common::RealTimeClock::Now();
|
|
||||||
const u64 tsc_start = FencedRDTSC();
|
|
||||||
// Wait for 250 milliseconds.
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{250});
|
|
||||||
const auto end_time = Common::RealTimeClock::Now();
|
|
||||||
const u64 tsc_end = FencedRDTSC();
|
|
||||||
// Calculate differences.
|
|
||||||
const u64 timer_diff = static_cast<u64>(
|
|
||||||
std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time).count());
|
|
||||||
const u64 tsc_diff = tsc_end - tsc_start;
|
|
||||||
const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
|
|
||||||
return RoundToNearest<1000>(tsc_freq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace X64 {
|
std::chrono::milliseconds NativeClock::GetTimeMS() const {
|
||||||
NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequency_,
|
return std::chrono::milliseconds{MultiplyHigh(GetHostTicksElapsed(), ms_rdtsc_factor)};
|
||||||
u64 rtsc_frequency_)
|
|
||||||
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
|
|
||||||
rtsc_frequency_} {
|
|
||||||
// Thread to re-adjust the RDTSC frequency after 10 seconds has elapsed.
|
|
||||||
time_sync_thread = std::jthread{[this](std::stop_token token) {
|
|
||||||
// Get the current time.
|
|
||||||
const auto start_time = Common::RealTimeClock::Now();
|
|
||||||
const u64 tsc_start = FencedRDTSC();
|
|
||||||
// Wait for 10 seconds.
|
|
||||||
if (!Common::StoppableTimedWait(token, std::chrono::seconds{10})) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const auto end_time = Common::RealTimeClock::Now();
|
|
||||||
const u64 tsc_end = FencedRDTSC();
|
|
||||||
// Calculate differences.
|
|
||||||
const u64 timer_diff = static_cast<u64>(
|
|
||||||
std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time).count());
|
|
||||||
const u64 tsc_diff = tsc_end - tsc_start;
|
|
||||||
const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
|
|
||||||
rtsc_frequency = tsc_freq;
|
|
||||||
CalculateAndSetFactors();
|
|
||||||
}};
|
|
||||||
|
|
||||||
time_point.inner.last_measure = FencedRDTSC();
|
|
||||||
time_point.inner.accumulated_ticks = 0U;
|
|
||||||
CalculateAndSetFactors();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 NativeClock::GetRTSC() {
|
u64 NativeClock::GetCNTPCT() const {
|
||||||
TimePoint new_time_point{};
|
return MultiplyHigh(GetHostTicksElapsed(), cntpct_rdtsc_factor);
|
||||||
TimePoint current_time_point{};
|
|
||||||
|
|
||||||
current_time_point.pack = Common::AtomicLoad128(time_point.pack.data());
|
|
||||||
do {
|
|
||||||
const u64 current_measure = FencedRDTSC();
|
|
||||||
u64 diff = current_measure - current_time_point.inner.last_measure;
|
|
||||||
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
|
|
||||||
new_time_point.inner.last_measure = current_measure > current_time_point.inner.last_measure
|
|
||||||
? current_measure
|
|
||||||
: current_time_point.inner.last_measure;
|
|
||||||
new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
|
|
||||||
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
|
|
||||||
current_time_point.pack, current_time_point.pack));
|
|
||||||
return new_time_point.inner.accumulated_ticks;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void NativeClock::Pause(bool is_paused) {
|
u64 NativeClock::GetGPUTick() const {
|
||||||
if (!is_paused) {
|
return MultiplyHigh(GetHostTicksElapsed(), gputick_rdtsc_factor);
|
||||||
TimePoint current_time_point{};
|
|
||||||
TimePoint new_time_point{};
|
|
||||||
|
|
||||||
current_time_point.pack = Common::AtomicLoad128(time_point.pack.data());
|
|
||||||
do {
|
|
||||||
new_time_point.pack = current_time_point.pack;
|
|
||||||
new_time_point.inner.last_measure = FencedRDTSC();
|
|
||||||
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
|
|
||||||
current_time_point.pack, current_time_point.pack));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::nanoseconds NativeClock::GetTimeNS() {
|
u64 NativeClock::GetHostTicksNow() const {
|
||||||
const u64 rtsc_value = GetRTSC();
|
return FencedRDTSC();
|
||||||
return std::chrono::nanoseconds{MultiplyHigh(rtsc_value, ns_rtsc_factor)};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::microseconds NativeClock::GetTimeUS() {
|
u64 NativeClock::GetHostTicksElapsed() const {
|
||||||
const u64 rtsc_value = GetRTSC();
|
return FencedRDTSC() - start_ticks;
|
||||||
return std::chrono::microseconds{MultiplyHigh(rtsc_value, us_rtsc_factor)};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::milliseconds NativeClock::GetTimeMS() {
|
bool NativeClock::IsNative() const {
|
||||||
const u64 rtsc_value = GetRTSC();
|
return true;
|
||||||
return std::chrono::milliseconds{MultiplyHigh(rtsc_value, ms_rtsc_factor)};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 NativeClock::GetClockCycles() {
|
} // namespace Common::X64
|
||||||
const u64 rtsc_value = GetRTSC();
|
|
||||||
return MultiplyHigh(rtsc_value, clock_rtsc_factor);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 NativeClock::GetCPUCycles() {
|
|
||||||
const u64 rtsc_value = GetRTSC();
|
|
||||||
return MultiplyHigh(rtsc_value, cpu_rtsc_factor);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NativeClock::CalculateAndSetFactors() {
|
|
||||||
ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency);
|
|
||||||
us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
|
|
||||||
ms_rtsc_factor = GetFixedPoint64Factor(MS_RATIO, rtsc_frequency);
|
|
||||||
clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
|
|
||||||
cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace X64
|
|
||||||
|
|
||||||
} // namespace Common
|
|
||||||
|
|
|
@ -3,58 +3,39 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common/polyfill_thread.h"
|
|
||||||
#include "common/wall_clock.h"
|
#include "common/wall_clock.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common::X64 {
|
||||||
|
|
||||||
namespace X64 {
|
|
||||||
class NativeClock final : public WallClock {
|
class NativeClock final : public WallClock {
|
||||||
public:
|
public:
|
||||||
explicit NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequency_,
|
explicit NativeClock(u64 rdtsc_frequency_);
|
||||||
u64 rtsc_frequency_);
|
|
||||||
|
|
||||||
std::chrono::nanoseconds GetTimeNS() override;
|
std::chrono::nanoseconds GetTimeNS() const override;
|
||||||
|
|
||||||
std::chrono::microseconds GetTimeUS() override;
|
std::chrono::microseconds GetTimeUS() const override;
|
||||||
|
|
||||||
std::chrono::milliseconds GetTimeMS() override;
|
std::chrono::milliseconds GetTimeMS() const override;
|
||||||
|
|
||||||
u64 GetClockCycles() override;
|
u64 GetCNTPCT() const override;
|
||||||
|
|
||||||
u64 GetCPUCycles() override;
|
u64 GetGPUTick() const override;
|
||||||
|
|
||||||
void Pause(bool is_paused) override;
|
u64 GetHostTicksNow() const override;
|
||||||
|
|
||||||
|
u64 GetHostTicksElapsed() const override;
|
||||||
|
|
||||||
|
bool IsNative() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u64 GetRTSC();
|
u64 start_ticks;
|
||||||
|
u64 rdtsc_frequency;
|
||||||
|
|
||||||
void CalculateAndSetFactors();
|
u64 ns_rdtsc_factor;
|
||||||
|
u64 us_rdtsc_factor;
|
||||||
union alignas(16) TimePoint {
|
u64 ms_rdtsc_factor;
|
||||||
TimePoint() : pack{} {}
|
u64 cntpct_rdtsc_factor;
|
||||||
u128 pack{};
|
u64 gputick_rdtsc_factor;
|
||||||
struct Inner {
|
|
||||||
u64 last_measure{};
|
|
||||||
u64 accumulated_ticks{};
|
|
||||||
} inner;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
TimePoint time_point;
|
} // namespace Common::X64
|
||||||
|
|
||||||
// factors
|
|
||||||
u64 clock_rtsc_factor{};
|
|
||||||
u64 cpu_rtsc_factor{};
|
|
||||||
u64 ns_rtsc_factor{};
|
|
||||||
u64 us_rtsc_factor{};
|
|
||||||
u64 ms_rtsc_factor{};
|
|
||||||
|
|
||||||
u64 rtsc_frequency;
|
|
||||||
|
|
||||||
std::jthread time_sync_thread;
|
|
||||||
};
|
|
||||||
} // namespace X64
|
|
||||||
|
|
||||||
u64 EstimateRDTSCFrequency();
|
|
||||||
|
|
||||||
} // namespace Common
|
|
||||||
|
|
39
src/common/x64/rdtsc.cpp
Normal file
39
src/common/x64/rdtsc.cpp
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "common/steady_clock.h"
|
||||||
|
#include "common/uint128.h"
|
||||||
|
#include "common/x64/rdtsc.h"
|
||||||
|
|
||||||
|
namespace Common::X64 {
|
||||||
|
|
||||||
|
template <u64 Nearest>
|
||||||
|
static u64 RoundToNearest(u64 value) {
|
||||||
|
const auto mod = value % Nearest;
|
||||||
|
return mod >= (Nearest / 2) ? (value - mod + Nearest) : (value - mod);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 EstimateRDTSCFrequency() {
|
||||||
|
// Discard the first result measuring the rdtsc.
|
||||||
|
FencedRDTSC();
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds{1});
|
||||||
|
FencedRDTSC();
|
||||||
|
|
||||||
|
// Get the current time.
|
||||||
|
const auto start_time = RealTimeClock::Now();
|
||||||
|
const u64 tsc_start = FencedRDTSC();
|
||||||
|
// Wait for 100 milliseconds.
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds{100});
|
||||||
|
const auto end_time = RealTimeClock::Now();
|
||||||
|
const u64 tsc_end = FencedRDTSC();
|
||||||
|
// Calculate differences.
|
||||||
|
const u64 timer_diff = static_cast<u64>(
|
||||||
|
std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time).count());
|
||||||
|
const u64 tsc_diff = tsc_end - tsc_start;
|
||||||
|
const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
|
||||||
|
return RoundToNearest<100'000>(tsc_freq);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common::X64
|
37
src/common/x64/rdtsc.h
Normal file
37
src/common/x64/rdtsc.h
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
#include <intrin.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Common::X64 {
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
__forceinline static u64 FencedRDTSC() {
|
||||||
|
_mm_lfence();
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
const u64 result = __rdtsc();
|
||||||
|
_mm_lfence();
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline u64 FencedRDTSC() {
|
||||||
|
u64 eax;
|
||||||
|
u64 edx;
|
||||||
|
asm volatile("lfence\n\t"
|
||||||
|
"rdtsc\n\t"
|
||||||
|
"lfence\n\t"
|
||||||
|
: "=a"(eax), "=d"(edx));
|
||||||
|
return (edx << 32) | eax;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
u64 EstimateRDTSCFrequency();
|
||||||
|
|
||||||
|
} // namespace Common::X64
|
|
@ -14,7 +14,6 @@ add_library(core STATIC
|
||||||
core.h
|
core.h
|
||||||
core_timing.cpp
|
core_timing.cpp
|
||||||
core_timing.h
|
core_timing.h
|
||||||
core_timing_util.h
|
|
||||||
cpu_manager.cpp
|
cpu_manager.cpp
|
||||||
cpu_manager.h
|
cpu_manager.h
|
||||||
crypto/aes_util.cpp
|
crypto/aes_util.cpp
|
||||||
|
|
|
@ -16,12 +16,11 @@
|
||||||
|
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
|
|
||||||
namespace Core::Timing {
|
namespace Core::Timing {
|
||||||
|
|
||||||
constexpr s64 MAX_SLICE_LENGTH = 4000;
|
constexpr s64 MAX_SLICE_LENGTH = 10000;
|
||||||
|
|
||||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
||||||
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||||
|
@ -45,9 +44,7 @@ struct CoreTiming::Event {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
CoreTiming::CoreTiming()
|
CoreTiming::CoreTiming() : clock{Common::CreateOptimalClock()} {}
|
||||||
: cpu_clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)},
|
|
||||||
event_clock{Common::CreateStandardWallClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
|
|
||||||
|
|
||||||
CoreTiming::~CoreTiming() {
|
CoreTiming::~CoreTiming() {
|
||||||
Reset();
|
Reset();
|
||||||
|
@ -68,7 +65,7 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||||
on_thread_init = std::move(on_thread_init_);
|
on_thread_init = std::move(on_thread_init_);
|
||||||
event_fifo_id = 0;
|
event_fifo_id = 0;
|
||||||
shutting_down = false;
|
shutting_down = false;
|
||||||
ticks = 0;
|
cpu_ticks = 0;
|
||||||
const auto empty_timed_callback = [](std::uintptr_t, u64, std::chrono::nanoseconds)
|
const auto empty_timed_callback = [](std::uintptr_t, u64, std::chrono::nanoseconds)
|
||||||
-> std::optional<std::chrono::nanoseconds> { return std::nullopt; };
|
-> std::optional<std::chrono::nanoseconds> { return std::nullopt; };
|
||||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||||
|
@ -173,38 +170,30 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::AddTicks(u64 ticks_to_add) {
|
void CoreTiming::AddTicks(u64 ticks_to_add) {
|
||||||
ticks += ticks_to_add;
|
cpu_ticks += ticks_to_add;
|
||||||
downcount -= static_cast<s64>(ticks);
|
downcount -= static_cast<s64>(cpu_ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::Idle() {
|
void CoreTiming::Idle() {
|
||||||
if (!event_queue.empty()) {
|
cpu_ticks += 1000U;
|
||||||
const u64 next_event_time = event_queue.front().time;
|
|
||||||
const u64 next_ticks = nsToCycles(std::chrono::nanoseconds(next_event_time)) + 10U;
|
|
||||||
if (next_ticks > ticks) {
|
|
||||||
ticks = next_ticks;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ticks += 1000U;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::ResetTicks() {
|
void CoreTiming::ResetTicks() {
|
||||||
downcount = MAX_SLICE_LENGTH;
|
downcount = MAX_SLICE_LENGTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 CoreTiming::GetCPUTicks() const {
|
|
||||||
if (is_multicore) [[likely]] {
|
|
||||||
return cpu_clock->GetCPUCycles();
|
|
||||||
}
|
|
||||||
return ticks;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 CoreTiming::GetClockTicks() const {
|
u64 CoreTiming::GetClockTicks() const {
|
||||||
if (is_multicore) [[likely]] {
|
if (is_multicore) [[likely]] {
|
||||||
return cpu_clock->GetClockCycles();
|
return clock->GetCNTPCT();
|
||||||
}
|
}
|
||||||
return CpuCyclesToClockCycles(ticks);
|
return Common::WallClock::CPUTickToCNTPCT(cpu_ticks);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 CoreTiming::GetGPUTicks() const {
|
||||||
|
if (is_multicore) [[likely]] {
|
||||||
|
return clock->GetGPUTick();
|
||||||
|
}
|
||||||
|
return Common::WallClock::CPUTickToGPUTick(cpu_ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<s64> CoreTiming::Advance() {
|
std::optional<s64> CoreTiming::Advance() {
|
||||||
|
@ -297,9 +286,7 @@ void CoreTiming::ThreadLoop() {
|
||||||
}
|
}
|
||||||
|
|
||||||
paused_set = true;
|
paused_set = true;
|
||||||
event_clock->Pause(true);
|
|
||||||
pause_event.Wait();
|
pause_event.Wait();
|
||||||
event_clock->Pause(false);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,25 +302,18 @@ void CoreTiming::Reset() {
|
||||||
has_started = false;
|
has_started = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::nanoseconds CoreTiming::GetCPUTimeNs() const {
|
|
||||||
if (is_multicore) [[likely]] {
|
|
||||||
return cpu_clock->GetTimeNS();
|
|
||||||
}
|
|
||||||
return CyclesToNs(ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||||
if (is_multicore) [[likely]] {
|
if (is_multicore) [[likely]] {
|
||||||
return event_clock->GetTimeNS();
|
return clock->GetTimeNS();
|
||||||
}
|
}
|
||||||
return CyclesToNs(ticks);
|
return std::chrono::nanoseconds{Common::WallClock::CPUTickToNS(cpu_ticks)};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||||
if (is_multicore) [[likely]] {
|
if (is_multicore) [[likely]] {
|
||||||
return event_clock->GetTimeUS();
|
return clock->GetTimeUS();
|
||||||
}
|
}
|
||||||
return CyclesToUs(ticks);
|
return std::chrono::microseconds{Common::WallClock::CPUTickToUS(cpu_ticks)};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Core::Timing
|
} // namespace Core::Timing
|
||||||
|
|
|
@ -116,14 +116,11 @@ public:
|
||||||
return downcount;
|
return downcount;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns current time in emulated CPU cycles
|
/// Returns the current CNTPCT tick value.
|
||||||
u64 GetCPUTicks() const;
|
|
||||||
|
|
||||||
/// Returns current time in emulated in Clock cycles
|
|
||||||
u64 GetClockTicks() const;
|
u64 GetClockTicks() const;
|
||||||
|
|
||||||
/// Returns current time in nanoseconds.
|
/// Returns the current GPU tick value.
|
||||||
std::chrono::nanoseconds GetCPUTimeNs() const;
|
u64 GetGPUTicks() const;
|
||||||
|
|
||||||
/// Returns current time in microseconds.
|
/// Returns current time in microseconds.
|
||||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||||
|
@ -142,8 +139,7 @@ private:
|
||||||
|
|
||||||
void Reset();
|
void Reset();
|
||||||
|
|
||||||
std::unique_ptr<Common::WallClock> cpu_clock;
|
std::unique_ptr<Common::WallClock> clock;
|
||||||
std::unique_ptr<Common::WallClock> event_clock;
|
|
||||||
|
|
||||||
s64 global_timer = 0;
|
s64 global_timer = 0;
|
||||||
|
|
||||||
|
@ -171,7 +167,7 @@ private:
|
||||||
s64 pause_end_time{};
|
s64 pause_end_time{};
|
||||||
|
|
||||||
/// Cycle timing
|
/// Cycle timing
|
||||||
u64 ticks{};
|
u64 cpu_ticks{};
|
||||||
s64 downcount{};
|
s64 downcount{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "core/hardware_properties.h"
|
|
||||||
|
|
||||||
namespace Core::Timing {
|
|
||||||
|
|
||||||
namespace detail {
|
|
||||||
constexpr u64 CNTFREQ_ADJUSTED = Hardware::CNTFREQ / 1000;
|
|
||||||
constexpr u64 BASE_CLOCK_RATE_ADJUSTED = Hardware::BASE_CLOCK_RATE / 1000;
|
|
||||||
} // namespace detail
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr s64 msToCycles(std::chrono::milliseconds ms) {
|
|
||||||
return ms.count() * detail::BASE_CLOCK_RATE_ADJUSTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr s64 usToCycles(std::chrono::microseconds us) {
|
|
||||||
return us.count() * detail::BASE_CLOCK_RATE_ADJUSTED / 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr s64 nsToCycles(std::chrono::nanoseconds ns) {
|
|
||||||
return ns.count() * detail::BASE_CLOCK_RATE_ADJUSTED / 1000000;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr u64 msToClockCycles(std::chrono::milliseconds ms) {
|
|
||||||
return static_cast<u64>(ms.count()) * detail::CNTFREQ_ADJUSTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr u64 usToClockCycles(std::chrono::microseconds us) {
|
|
||||||
return us.count() * detail::CNTFREQ_ADJUSTED / 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr u64 nsToClockCycles(std::chrono::nanoseconds ns) {
|
|
||||||
return ns.count() * detail::CNTFREQ_ADJUSTED / 1000000;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr u64 CpuCyclesToClockCycles(u64 ticks) {
|
|
||||||
return ticks * detail::CNTFREQ_ADJUSTED / detail::BASE_CLOCK_RATE_ADJUSTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr std::chrono::milliseconds CyclesToMs(s64 cycles) {
|
|
||||||
return std::chrono::milliseconds(cycles / detail::BASE_CLOCK_RATE_ADJUSTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr std::chrono::nanoseconds CyclesToNs(s64 cycles) {
|
|
||||||
return std::chrono::nanoseconds(cycles * 1000000 / detail::BASE_CLOCK_RATE_ADJUSTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr std::chrono::microseconds CyclesToUs(s64 cycles) {
|
|
||||||
return std::chrono::microseconds(cycles * 1000 / detail::BASE_CLOCK_RATE_ADJUSTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Core::Timing
|
|
|
@ -184,7 +184,8 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||||
prev_highest_thread != highest_thread) [[likely]] {
|
prev_highest_thread != highest_thread) [[likely]] {
|
||||||
if (prev_highest_thread != nullptr) [[likely]] {
|
if (prev_highest_thread != nullptr) [[likely]] {
|
||||||
IncrementScheduledCount(prev_highest_thread);
|
IncrementScheduledCount(prev_highest_thread);
|
||||||
prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks());
|
prev_highest_thread->SetLastScheduledTick(
|
||||||
|
m_kernel.System().CoreTiming().GetClockTicks());
|
||||||
}
|
}
|
||||||
if (m_state.should_count_idle) {
|
if (m_state.should_count_idle) {
|
||||||
if (highest_thread != nullptr) [[likely]] {
|
if (highest_thread != nullptr) [[likely]] {
|
||||||
|
@ -351,7 +352,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
|
||||||
|
|
||||||
// Update the CPU time tracking variables.
|
// Update the CPU time tracking variables.
|
||||||
const s64 prev_tick = m_last_context_switch_time;
|
const s64 prev_tick = m_last_context_switch_time;
|
||||||
const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks();
|
const s64 cur_tick = m_kernel.System().CoreTiming().GetClockTicks();
|
||||||
const s64 tick_diff = cur_tick - prev_tick;
|
const s64 tick_diff = cur_tick - prev_tick;
|
||||||
cur_thread->AddCpuTime(m_core_id, tick_diff);
|
cur_thread->AddCpuTime(m_core_id, tick_diff);
|
||||||
if (cur_process != nullptr) {
|
if (cur_process != nullptr) {
|
||||||
|
|
|
@ -199,9 +199,9 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||||
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
|
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
|
||||||
const u64 thread_ticks = current_thread->GetCpuTime();
|
const u64 thread_ticks = current_thread->GetCpuTime();
|
||||||
|
|
||||||
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
out_ticks = thread_ticks + (core_timing.GetClockTicks() - prev_ctx_ticks);
|
||||||
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
|
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
|
||||||
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
out_ticks = core_timing.GetClockTicks() - prev_ctx_ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
*result = out_ticks;
|
*result = out_ticks;
|
||||||
|
|
|
@ -12,16 +12,8 @@ namespace Kernel::Svc {
|
||||||
int64_t GetSystemTick(Core::System& system) {
|
int64_t GetSystemTick(Core::System& system) {
|
||||||
LOG_TRACE(Kernel_SVC, "called");
|
LOG_TRACE(Kernel_SVC, "called");
|
||||||
|
|
||||||
auto& core_timing = system.CoreTiming();
|
|
||||||
|
|
||||||
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
|
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
|
||||||
const u64 result{core_timing.GetClockTicks()};
|
return static_cast<int64_t>(system.CoreTiming().GetClockTicks());
|
||||||
|
|
||||||
if (!system.Kernel().IsMulticore()) {
|
|
||||||
core_timing.AddTicks(400U);
|
|
||||||
}
|
|
||||||
|
|
||||||
return static_cast<int64_t>(result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t GetSystemTick64(Core::System& system) {
|
int64_t GetSystemTick64(Core::System& system) {
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
#include "core/hid/hid_types.h"
|
#include "core/hid/hid_types.h"
|
||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/kernel/k_readable_event.h"
|
#include "core/hle/kernel/k_readable_event.h"
|
||||||
|
|
|
@ -51,8 +51,8 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
|
||||||
stride, format, transform, crop_rect};
|
stride, format, transform, crop_rect};
|
||||||
|
|
||||||
system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences);
|
system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences);
|
||||||
system.GetPerfStats().EndSystemFrame();
|
|
||||||
system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
|
system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||||
|
system.GetPerfStats().EndSystemFrame();
|
||||||
system.GetPerfStats().BeginSystemFrame();
|
system.GetPerfStats().BeginSystemFrame();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,8 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||||
[this](std::uintptr_t, s64 time,
|
[this](std::uintptr_t, s64 time,
|
||||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||||
vsync_signal.store(true);
|
vsync_signal.store(true);
|
||||||
vsync_signal.notify_all();
|
{ const auto lock_guard = Lock(); }
|
||||||
|
vsync_signal.notify_one();
|
||||||
return std::chrono::nanoseconds(GetNextTicks());
|
return std::chrono::nanoseconds(GetNextTicks());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <ratio>
|
||||||
|
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/uuid.h"
|
#include "common/uuid.h"
|
||||||
|
@ -74,18 +76,19 @@ static_assert(std::is_trivially_copyable_v<ContinuousAdjustmentTimePoint>,
|
||||||
/// https://switchbrew.org/wiki/Glue_services#TimeSpanType
|
/// https://switchbrew.org/wiki/Glue_services#TimeSpanType
|
||||||
struct TimeSpanType {
|
struct TimeSpanType {
|
||||||
s64 nanoseconds{};
|
s64 nanoseconds{};
|
||||||
static constexpr s64 ns_per_second{1000000000ULL};
|
|
||||||
|
|
||||||
s64 ToSeconds() const {
|
s64 ToSeconds() const {
|
||||||
return nanoseconds / ns_per_second;
|
return nanoseconds / std::nano::den;
|
||||||
}
|
}
|
||||||
|
|
||||||
static TimeSpanType FromSeconds(s64 seconds) {
|
static TimeSpanType FromSeconds(s64 seconds) {
|
||||||
return {seconds * ns_per_second};
|
return {seconds * std::nano::den};
|
||||||
}
|
}
|
||||||
|
|
||||||
static TimeSpanType FromTicks(u64 ticks, u64 frequency) {
|
template <u64 Frequency>
|
||||||
return FromSeconds(static_cast<s64>(ticks) / static_cast<s64>(frequency));
|
static TimeSpanType FromTicks(u64 ticks) {
|
||||||
|
using TicksToNSRatio = std::ratio<std::nano::den, Frequency>;
|
||||||
|
return {static_cast<s64>(ticks * TicksToNSRatio::num / TicksToNSRatio::den)};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
static_assert(sizeof(TimeSpanType) == 8, "TimeSpanType is incorrect size");
|
static_assert(sizeof(TimeSpanType) == 8, "TimeSpanType is incorrect size");
|
||||||
|
|
|
@ -10,7 +10,7 @@ namespace Service::Time::Clock {
|
||||||
|
|
||||||
TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) {
|
TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) {
|
||||||
const TimeSpanType ticks_time_span{
|
const TimeSpanType ticks_time_span{
|
||||||
TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
|
TimeSpanType::FromTicks<Core::Hardware::CNTFREQ>(system.CoreTiming().GetClockTicks())};
|
||||||
TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds};
|
TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds};
|
||||||
|
|
||||||
if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) {
|
if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) {
|
||||||
|
|
|
@ -10,7 +10,7 @@ namespace Service::Time::Clock {
|
||||||
|
|
||||||
SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) {
|
SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) {
|
||||||
const TimeSpanType ticks_time_span{
|
const TimeSpanType ticks_time_span{
|
||||||
TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
|
TimeSpanType::FromTicks<Core::Hardware::CNTFREQ>(system.CoreTiming().GetClockTicks())};
|
||||||
|
|
||||||
return {ticks_time_span.ToSeconds(), GetClockSourceId()};
|
return {ticks_time_span.ToSeconds(), GetClockSourceId()};
|
||||||
}
|
}
|
||||||
|
|
|
@ -240,8 +240,8 @@ void Module::Interface::CalculateMonotonicSystemClockBaseTimePoint(HLERequestCon
|
||||||
const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
|
const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
|
||||||
|
|
||||||
if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
|
if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
|
||||||
const auto ticks{Clock::TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(),
|
const auto ticks{Clock::TimeSpanType::FromTicks<Core::Hardware::CNTFREQ>(
|
||||||
Core::Hardware::CNTFREQ)};
|
system.CoreTiming().GetClockTicks())};
|
||||||
const s64 base_time_point{context.offset + current_time_point.time_point -
|
const s64 base_time_point{context.offset + current_time_point.time_point -
|
||||||
ticks.ToSeconds()};
|
ticks.ToSeconds()};
|
||||||
IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2};
|
IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2};
|
||||||
|
|
|
@ -21,8 +21,9 @@ SharedMemory::~SharedMemory() = default;
|
||||||
|
|
||||||
void SharedMemory::SetupStandardSteadyClock(const Common::UUID& clock_source_id,
|
void SharedMemory::SetupStandardSteadyClock(const Common::UUID& clock_source_id,
|
||||||
Clock::TimeSpanType current_time_point) {
|
Clock::TimeSpanType current_time_point) {
|
||||||
const Clock::TimeSpanType ticks_time_span{Clock::TimeSpanType::FromTicks(
|
const Clock::TimeSpanType ticks_time_span{
|
||||||
system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
|
Clock::TimeSpanType::FromTicks<Core::Hardware::CNTFREQ>(
|
||||||
|
system.CoreTiming().GetClockTicks())};
|
||||||
const Clock::SteadyClockContext context{
|
const Clock::SteadyClockContext context{
|
||||||
static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds),
|
static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds),
|
||||||
clock_source_id};
|
clock_source_id};
|
||||||
|
|
|
@ -193,18 +193,13 @@ struct GPU::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] u64 GetTicks() const {
|
[[nodiscard]] u64 GetTicks() const {
|
||||||
// This values were reversed engineered by fincs from NVN
|
u64 gpu_tick = system.CoreTiming().GetGPUTicks();
|
||||||
// The gpu clock is reported in units of 385/625 nanoseconds
|
|
||||||
constexpr u64 gpu_ticks_num = 384;
|
|
||||||
constexpr u64 gpu_ticks_den = 625;
|
|
||||||
|
|
||||||
u64 nanoseconds = system.CoreTiming().GetCPUTimeNs().count();
|
|
||||||
if (Settings::values.use_fast_gpu_time.GetValue()) {
|
if (Settings::values.use_fast_gpu_time.GetValue()) {
|
||||||
nanoseconds /= 256;
|
gpu_tick /= 256;
|
||||||
}
|
}
|
||||||
const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
|
|
||||||
const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
|
return gpu_tick;
|
||||||
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] bool IsAsync() const {
|
[[nodiscard]] bool IsAsync() const {
|
||||||
|
|
Loading…
Reference in a new issue