diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 124149697..0c6b20db3 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -171,7 +171,7 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) R_UNLESS(owner_thread != nullptr, ResultInvalidHandle); // Update the lock. - cur_thread->SetAddressKey(addr, value); + cur_thread->SetUserAddressKey(addr, value); owner_thread->AddWaiter(cur_thread); // Begin waiting. diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index 43185320d..d791acbe3 100644 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp @@ -68,7 +68,7 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { // Add the current thread as a waiter on the owner. KThread* owner_thread = reinterpret_cast(_owner & ~1ULL); - cur_thread->SetAddressKey(reinterpret_cast(std::addressof(tag))); + cur_thread->SetKernelAddressKey(reinterpret_cast(std::addressof(tag))); owner_thread->AddWaiter(cur_thread); // Begin waiting to hold the lock. diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index fd6e1d3e6..17fa1a6ed 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -67,9 +67,9 @@ constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize + KernelPageBufferHeapSize; -constexpr bool IsKernelAddressKey(VAddr key) { - return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; -} +//! NB: Use KThread::GetAddressKeyIsKernel(). +//! See explanation for deviation of GetAddressKey. +bool IsKernelAddressKey(VAddr key) = delete; constexpr bool IsKernelAddress(VAddr address) { return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 21207fe99..84ff3c64b 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -330,7 +330,7 @@ void KThread::Finalize() { KThread* const waiter = std::addressof(*it); // The thread shouldn't be a kernel waiter. - ASSERT(!IsKernelAddressKey(waiter->GetAddressKey())); + ASSERT(!waiter->GetAddressKeyIsKernel()); // Clear the lock owner. waiter->SetLockOwner(nullptr); @@ -763,19 +763,6 @@ void KThread::Continue() { KScheduler::OnThreadStateChanged(kernel, this, old_state); } -void KThread::WaitUntilSuspended() { - // Make sure we have a suspend requested. - ASSERT(IsSuspendRequested()); - - // Loop until the thread is not executing on any core. - for (std::size_t i = 0; i < static_cast(Core::Hardware::NUM_CPU_CORES); ++i) { - KThread* core_thread{}; - do { - core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread(); - } while (core_thread == this); - } -} - Result KThread::SetActivity(Svc::ThreadActivity activity) { // Lock ourselves. KScopedLightLock lk(activity_pause_lock); @@ -897,7 +884,7 @@ void KThread::AddWaiterImpl(KThread* thread) { } // Keep track of how many kernel waiters we have. - if (IsKernelAddressKey(thread->GetAddressKey())) { + if (thread->GetAddressKeyIsKernel()) { ASSERT((num_kernel_waiters++) >= 0); KScheduler::SetSchedulerUpdateNeeded(kernel); } @@ -911,7 +898,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Keep track of how many kernel waiters we have. - if (IsKernelAddressKey(thread->GetAddressKey())) { + if (thread->GetAddressKeyIsKernel()) { ASSERT((num_kernel_waiters--) > 0); KScheduler::SetSchedulerUpdateNeeded(kernel); } @@ -987,7 +974,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { KThread* thread = std::addressof(*it); // Keep track of how many kernel waiters we have. - if (IsKernelAddressKey(thread->GetAddressKey())) { + if (thread->GetAddressKeyIsKernel()) { ASSERT((num_kernel_waiters--) > 0); KScheduler::SetSchedulerUpdateNeeded(kernel); } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 7cd94a340..9d771de0e 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -214,8 +214,6 @@ public: void Continue(); - void WaitUntilSuspended(); - constexpr void SetSyncedIndex(s32 index) { synced_index = index; } @@ -607,13 +605,30 @@ public: return address_key_value; } - void SetAddressKey(VAddr key) { - address_key = key; + [[nodiscard]] bool GetAddressKeyIsKernel() const { + return address_key_is_kernel; } - void SetAddressKey(VAddr key, u32 val) { + //! NB: intentional deviation from official kernel. + // + // Separate SetAddressKey into user and kernel versions + // to cope with arbitrary host pointers making their way + // into things. + + void SetUserAddressKey(VAddr key) { + address_key = key; + address_key_is_kernel = false; + } + + void SetUserAddressKey(VAddr key, u32 val) { address_key = key; address_key_value = val; + address_key_is_kernel = false; + } + + void SetKernelAddressKey(VAddr key) { + address_key = key; + address_key_is_kernel = true; } void ClearWaitQueue() { @@ -772,6 +787,7 @@ private: bool debug_attached{}; s8 priority_inheritance_count{}; bool resource_limit_release_hint{}; + bool address_key_is_kernel{}; StackParameters stack_parameters{}; Common::SpinLock context_guard{}; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 1fb25f221..d9eafe261 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -1198,27 +1198,34 @@ void KernelCore::Suspend(bool suspended) { const bool should_suspend{exception_exited || suspended}; const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; - std::vector> process_threads; - { - KScopedSchedulerLock sl{*this}; - - if (auto* process = CurrentProcess(); process != nullptr) { - process->SetActivity(activity); - - if (!should_suspend) { - // Runnable now; no need to wait. - return; - } - - for (auto* thread : process->GetThreadList()) { - process_threads.emplace_back(thread); - } - } + //! This refers to the application process, not the current process. + KScopedAutoObject process = CurrentProcess(); + if (process.IsNull()) { + return; } - // Wait for execution to stop. - for (auto& thread : process_threads) { - thread->WaitUntilSuspended(); + // Set the new activity. + process->SetActivity(activity); + + // Wait for process execution to stop. + bool must_wait{should_suspend}; + + // KernelCore::Suspend must be called from locked context, or we + // could race another call to SetActivity, interfering with waiting. + while (must_wait) { + KScopedSchedulerLock sl{*this}; + + // Assume that all threads have finished running. + must_wait = false; + + for (auto i = 0; i < static_cast(Core::Hardware::NUM_CPU_CORES); ++i) { + if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() == + process.GetPointerUnsafe()) { + // A thread has not finished running yet. + // Continue waiting. + must_wait = true; + } + } } }