Merge pull request #1042 from Subv/races

Fixed a bunch of race conditions when running in multicore mode.
This commit is contained in:
bunnei 2018-08-12 22:05:48 -04:00 committed by GitHub
commit e4ed5bc836
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 13 additions and 5 deletions

View file

@ -14,6 +14,7 @@
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/hle/lock.h"
#include "core/settings.h" #include "core/settings.h"
namespace Core { namespace Core {
@ -125,6 +126,8 @@ void Cpu::Reschedule() {
} }
reschedule_pending = false; reschedule_pending = false;
// Lock the global kernel mutex when we manipulate the HLE state
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
scheduler->Reschedule(); scheduler->Reschedule();
} }

View file

@ -79,7 +79,7 @@ private:
std::shared_ptr<CpuBarrier> cpu_barrier; std::shared_ptr<CpuBarrier> cpu_barrier;
std::shared_ptr<Kernel::Scheduler> scheduler; std::shared_ptr<Kernel::Scheduler> scheduler;
bool reschedule_pending{}; std::atomic<bool> reschedule_pending = false;
size_t core_index; size_t core_index;
}; };

View file

@ -135,11 +135,9 @@ void ClearPendingEvents() {
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) { void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
ASSERT(event_type != nullptr); ASSERT(event_type != nullptr);
s64 timeout = GetTicks() + cycles_into_future; s64 timeout = GetTicks() + cycles_into_future;
// If this event needs to be scheduled before the next advance(), force one early // If this event needs to be scheduled before the next advance(), force one early
if (!is_global_timer_sane) if (!is_global_timer_sane)
ForceExceptionCheck(cycles_into_future); ForceExceptionCheck(cycles_into_future);
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
} }

View file

@ -23,6 +23,7 @@
#include "core/hle/kernel/object.h" #include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/hle/lock.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/memory.h" #include "core/memory.h"
@ -104,6 +105,10 @@ void ExitCurrentThread() {
*/ */
static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) { static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
const auto proper_handle = static_cast<Handle>(thread_handle); const auto proper_handle = static_cast<Handle>(thread_handle);
// Lock the global kernel mutex when we enter the kernel HLE.
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>(proper_handle); SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>(proper_handle);
if (thread == nullptr) { if (thread == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
@ -155,7 +160,9 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
if (nanoseconds == -1) if (nanoseconds == -1)
return; return;
CoreTiming::ScheduleEvent(CoreTiming::nsToCycles(nanoseconds), ThreadWakeupEventType, // This function might be called from any thread so we have to be cautious and use the
// thread-safe version of ScheduleEvent.
CoreTiming::ScheduleEventThreadsafe(CoreTiming::nsToCycles(nanoseconds), ThreadWakeupEventType,
callback_handle); callback_handle);
} }