hle: kernel: Use C++ style comments in KScheduler, etc.

This commit is contained in:
bunnei 2020-12-04 23:37:35 -08:00
parent bc59ca92b6
commit b1326d9230
4 changed files with 136 additions and 152 deletions

View file

@ -87,15 +87,15 @@ public:
}
constexpr bool PushBack(s32 core, Member* member) {
/* Get the entry associated with the member. */
// Get the entry associated with the member.
Entry& member_entry = member->GetPriorityQueueEntry(core);
/* Get the entry associated with the end of the queue. */
// Get the entry associated with the end of the queue.
Member* tail = this->root[core].GetPrev();
Entry& tail_entry =
(tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
/* Link the entries. */
// Link the entries.
member_entry.SetPrev(tail);
member_entry.SetNext(nullptr);
tail_entry.SetNext(member);
@ -105,15 +105,15 @@ public:
}
constexpr bool PushFront(s32 core, Member* member) {
/* Get the entry associated with the member. */
// Get the entry associated with the member.
Entry& member_entry = member->GetPriorityQueueEntry(core);
/* Get the entry associated with the front of the queue. */
// Get the entry associated with the front of the queue.
Member* head = this->root[core].GetNext();
Entry& head_entry =
(head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
/* Link the entries. */
// Link the entries.
member_entry.SetPrev(nullptr);
member_entry.SetNext(head);
head_entry.SetPrev(member);
@ -123,10 +123,10 @@ public:
}
constexpr bool Remove(s32 core, Member* member) {
/* Get the entry associated with the member. */
// Get the entry associated with the member.
Entry& member_entry = member->GetPriorityQueueEntry(core);
/* Get the entries associated with next and prev. */
// Get the entries associated with next and prev.
Member* prev = member_entry.GetPrev();
Member* next = member_entry.GetNext();
Entry& prev_entry =
@ -134,7 +134,7 @@ public:
Entry& next_entry =
(next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
/* Unlink. */
// Unlink.
prev_entry.SetNext(next);
next_entry.SetPrev(prev);
@ -152,8 +152,7 @@ public:
Common::BitSet64<NumPriority> available_priorities[NumCores];
public:
constexpr KPriorityQueueImpl() : queues(), available_priorities() { /* ... */
}
constexpr KPriorityQueueImpl() : queues(), available_priorities() {}
constexpr void PushBack(s32 priority, s32 core, Member* member) {
ASSERT(IsValidCore(core));
@ -267,14 +266,14 @@ private:
constexpr void PushBack(s32 priority, Member* member) {
ASSERT(IsValidPriority(priority));
/* Push onto the scheduled queue for its core, if we can. */
// Push onto the scheduled queue for its core, if we can.
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
this->scheduled_queue.PushBack(priority, core, member);
ClearAffinityBit(affinity, core);
}
/* And suggest the thread for all other cores. */
// And suggest the thread for all other cores.
while (affinity) {
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
}
@ -283,15 +282,15 @@ private:
constexpr void PushFront(s32 priority, Member* member) {
ASSERT(IsValidPriority(priority));
/* Push onto the scheduled queue for its core, if we can. */
// Push onto the scheduled queue for its core, if we can.
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
this->scheduled_queue.PushFront(priority, core, member);
ClearAffinityBit(affinity, core);
}
/* And suggest the thread for all other cores. */
/* Note: Nintendo pushes onto the back of the suggested queue, not the front. */
// And suggest the thread for all other cores.
// Note: Nintendo pushes onto the back of the suggested queue, not the front.
while (affinity) {
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
}
@ -300,24 +299,24 @@ private:
constexpr void Remove(s32 priority, Member* member) {
ASSERT(IsValidPriority(priority));
/* Remove from the scheduled queue for its core. */
// Remove from the scheduled queue for its core.
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
this->scheduled_queue.Remove(priority, core, member);
ClearAffinityBit(affinity, core);
}
/* Remove from the suggested queue for all other cores. */
// Remove from the suggested queue for all other cores.
while (affinity) {
this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
}
}
public:
constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */
constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { // ...
}
/* Getters. */
// Getters.
constexpr Member* GetScheduledFront(s32 core) const {
return this->scheduled_queue.GetFront(core);
}
@ -346,7 +345,7 @@ public:
return member->GetPriorityQueueEntry(core).GetNext();
}
/* Mutators. */
// Mutators.
constexpr void PushBack(Member* member) {
this->PushBack(member->GetPriority(), member);
}
@ -364,15 +363,15 @@ public:
member);
}
/* First class fancy operations. */
// First class fancy operations.
constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
ASSERT(IsValidPriority(prev_priority));
/* Remove the member from the queues. */
// Remove the member from the queues.
const s32 new_priority = member->GetPriority();
this->Remove(prev_priority, member);
/* And enqueue. If the member is running, we want to keep it running. */
// And enqueue. If the member is running, we want to keep it running.
if (is_running) {
this->PushFront(new_priority, member);
} else {
@ -382,12 +381,12 @@ public:
constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
Member* member) {
/* Get the new information. */
// Get the new information.
const s32 priority = member->GetPriority();
const AffinityMaskType& new_affinity = member->GetAffinityMask();
const s32 new_core = member->GetActiveCore();
/* Remove the member from all queues it was in before. */
// Remove the member from all queues it was in before.
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (prev_affinity.GetAffinity(core)) {
if (core == prev_core) {
@ -398,7 +397,7 @@ public:
}
}
/* And add the member to all queues it should be in now. */
// And add the member to all queues it should be in now.
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (new_affinity.GetAffinity(core)) {
if (core == new_core) {
@ -411,18 +410,18 @@ public:
}
constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
/* Get the new information. */
// Get the new information.
const s32 new_core = member->GetActiveCore();
const s32 priority = member->GetPriority();
/* We don't need to do anything if the core is the same. */
// We don't need to do anything if the core is the same.
if (prev_core != new_core) {
/* Remove from the scheduled queue for the previous core. */
// Remove from the scheduled queue for the previous core.
if (prev_core >= 0) {
this->scheduled_queue.Remove(priority, prev_core, member);
}
/* Remove from the suggested queue and add to the scheduled queue for the new core. */
// Remove from the suggested queue and add to the scheduled queue for the new core.
if (new_core >= 0) {
this->suggested_queue.Remove(priority, new_core, member);
if (to_front) {
@ -432,7 +431,7 @@ public:
}
}
/* Add to the suggested queue for the previous core. */
// Add to the suggested queue for the previous core.
if (prev_core >= 0) {
this->suggested_queue.PushBack(priority, prev_core, member);
}

View file

@ -84,35 +84,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
/* Clear that we need to update. */
// Clear that we need to update.
ClearSchedulerUpdateNeeded(kernel);
u64 cores_needing_scheduling = 0, idle_cores = 0;
Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
auto& priority_queue = GetPriorityQueue(kernel);
/* We want to go over all cores, finding the highest priority thread and determining if
* scheduling is needed for that core. */
/// We want to go over all cores, finding the highest priority thread and determining if
/// scheduling is needed for that core.
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id);
if (top_thread != nullptr) {
///* If the thread has no waiters, we need to check if the process has a thread pinned.
///*/
// if (top_thread->GetNumKernelWaiters() == 0) {
// if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
// if (Thread* pinned = parent->GetPinnedThread(core_id);
// pinned != nullptr && pinned != top_thread) {
// /* We prefer our parent's pinned thread if possible. However, we also
// don't
// * want to schedule un-runnable threads. */
// if (pinned->GetRawState() == Thread::ThreadState_Runnable) {
// top_thread = pinned;
// } else {
// top_thread = nullptr;
// }
// }
// }
//}
// If the thread has no waiters, we need to check if the process has a thread pinned.
// TODO(bunnei): Implement thread pinning
} else {
idle_cores |= (1ULL << core_id);
}
@ -122,27 +107,27 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
}
/* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
while (idle_cores != 0) {
u32 core_id = Common::CountTrailingZeroes64(idle_cores);
if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
size_t num_candidates = 0;
/* While we have a suggested thread, try to migrate it! */
// While we have a suggested thread, try to migrate it!
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_thread =
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
top_thread != suggested) {
/* Make sure we're not dealing with threads too high priority for migration. */
// Make sure we're not dealing with threads too high priority for migration.
if (top_thread != nullptr &&
top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
break;
}
/* The suggested thread isn't bound to its core, so we can migrate it! */
// The suggested thread isn't bound to its core, so we can migrate it!
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
@ -152,30 +137,30 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
break;
}
/* Note this core as a candidate for migration. */
// Note this core as a candidate for migration.
ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
migration_candidates[num_candidates++] = suggested_core;
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
* candidate cores' top threads. */
// If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
// candidate cores' top threads.
if (suggested == nullptr) {
for (size_t i = 0; i < num_candidates; i++) {
/* Check if there's some other thread that can run on the candidate core. */
// Check if there's some other thread that can run on the candidate core.
const s32 candidate_core = migration_candidates[i];
suggested = top_threads[candidate_core];
if (Thread* next_on_candidate_core =
priority_queue.GetScheduledNext(candidate_core, suggested);
next_on_candidate_core != nullptr) {
/* The candidate core can run some other thread! We'll migrate its current
* top thread to us. */
// The candidate core can run some other thread! We'll migrate its current
// top thread to us.
top_threads[candidate_core] = next_on_candidate_core;
cores_needing_scheduling |=
kernel.Scheduler(candidate_core)
.UpdateHighestPriorityThread(top_threads[candidate_core]);
/* Perform the migration. */
// Perform the migration.
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(candidate_core, suggested);
@ -199,20 +184,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
u32 old_state) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
/* Check if the state has changed, because if it hasn't there's nothing to do. */
// Check if the state has changed, because if it hasn't there's nothing to do.
const auto cur_state = thread->scheduling_state;
if (cur_state == old_state) {
return;
}
/* Update the priority queues. */
// Update the priority queues.
if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
/* If we were previously runnable, then we're not runnable now, and we should remove. */
// If we were previously runnable, then we're not runnable now, and we should remove.
GetPriorityQueue(kernel).Remove(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
} else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
/* If we're now runnable, then we weren't previously, and we should add. */
// If we're now runnable, then we weren't previously, and we should add.
GetPriorityQueue(kernel).PushBack(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
@ -224,7 +209,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
/* If the thread is runnable, we want to change its priority in the queue. */
// If the thread is runnable, we want to change its priority in the queue.
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
GetPriorityQueue(kernel).ChangePriority(
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
@ -238,7 +223,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
s32 old_core) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
/* If the thread is runnable, we want to change its affinity in the queue. */
// If the thread is runnable, we want to change its affinity in the queue.
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
IncrementScheduledCount(thread);
@ -249,11 +234,11 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
ASSERT(system.GlobalSchedulerContext().IsLocked());
/* Get a reference to the priority queue. */
// Get a reference to the priority queue.
auto& kernel = system.Kernel();
auto& priority_queue = GetPriorityQueue(kernel);
/* Rotate the front of the queue to the end. */
// Rotate the front of the queue to the end.
Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
Thread* next_thread = nullptr;
if (top_thread != nullptr) {
@ -264,27 +249,27 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
}
}
/* While we have a suggested thread, try to migrate it! */
// While we have a suggested thread, try to migrate it!
{
Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
/* If the next thread is a new thread that has been waiting longer than our
* suggestion, we prefer it to our suggestion. */
// If the next thread is a new thread that has been waiting longer than our
// suggestion, we prefer it to our suggestion.
if (top_thread != next_thread && next_thread != nullptr &&
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
suggested = nullptr;
break;
}
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
* to the front of the queue. */
// If we're allowed to do a migration, do one.
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
// to the front of the queue.
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
@ -294,38 +279,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
}
}
/* Get the next suggestion. */
// Get the next suggestion.
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
}
}
/* Now that we might have migrated a thread with the same priority, check if we can do better.
*/
// Now that we might have migrated a thread with the same priority, check if we can do better.
{
Thread* best_thread = priority_queue.GetScheduledFront(core_id);
if (best_thread == GetCurrentThread()) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
/* If the best thread we can choose has a priority the same or worse than ours, try to
* migrate a higher priority thread. */
// If the best thread we can choose has a priority the same or worse than ours, try to
// migrate a higher priority thread.
if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* If the suggestion's priority is the same as ours, don't bother. */
// If the suggestion's priority is the same as ours, don't bother.
if (suggested->GetPriority() >= best_thread->GetPriority()) {
break;
}
/* Check if the suggested thread is the top thread on its core. */
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
* suggestion to the front of the queue. */
// If we're allowed to do a migration, do one.
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
// suggestion to the front of the queue.
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >=
HighestCoreMigrationAllowedPriority) {
@ -336,13 +321,13 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
}
}
/* Get the next suggestion. */
// Get the next suggestion.
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
}
}
/* After a rotation, we need a scheduler update. */
// After a rotation, we need a scheduler update.
SetSchedulerUpdateNeeded(kernel);
}
@ -392,38 +377,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
void KScheduler::YieldWithoutCoreMigration() {
auto& kernel = system.Kernel();
/* Validate preconditions. */
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
/* Get the current thread and process. */
// Get the current thread and process.
Thread& cur_thread = *GetCurrentThread();
Process& cur_process = *kernel.CurrentProcess();
/* If the thread's yield count matches, there's nothing for us to do. */
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
return;
}
/* Get a reference to the priority queue. */
// Get a reference to the priority queue.
auto& priority_queue = GetPriorityQueue(kernel);
/* Perform the yield. */
// Perform the yield.
{
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.scheduling_state;
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
/* Put the current thread at the back of the queue. */
// Put the current thread at the back of the queue.
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
/* If the next thread is different, we have an update to perform. */
// If the next thread is different, we have an update to perform.
if (next_thread != std::addressof(cur_thread)) {
SetSchedulerUpdateNeeded(kernel);
} else {
/* Otherwise, set the thread's yield count so that we won't waste work until the
* process is scheduled again. */
// Otherwise, set the thread's yield count so that we won't waste work until the
// process is scheduled again.
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
}
}
@ -433,40 +418,40 @@ void KScheduler::YieldWithoutCoreMigration() {
void KScheduler::YieldWithCoreMigration() {
auto& kernel = system.Kernel();
/* Validate preconditions. */
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
/* Get the current thread and process. */
// Get the current thread and process.
Thread& cur_thread = *GetCurrentThread();
Process& cur_process = *kernel.CurrentProcess();
/* If the thread's yield count matches, there's nothing for us to do. */
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
return;
}
/* Get a reference to the priority queue. */
// Get a reference to the priority queue.
auto& priority_queue = GetPriorityQueue(kernel);
/* Perform the yield. */
// Perform the yield.
{
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.scheduling_state;
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
/* Get the current active core. */
// Get the current active core.
const s32 core_id = cur_thread.GetActiveCore();
/* Put the current thread at the back of the queue. */
// Put the current thread at the back of the queue.
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
/* While we have a suggested thread, try to migrate it! */
// While we have a suggested thread, try to migrate it!
bool recheck = false;
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* Check if the suggested thread is the thread running on its core. */
// Check if the suggested thread is the thread running on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* running_on_suggested_core =
@ -474,10 +459,10 @@ void KScheduler::YieldWithCoreMigration() {
? kernel.Scheduler(suggested_core).state.highest_priority_thread
: nullptr;
running_on_suggested_core != suggested) {
/* If the current thread's priority is higher than our suggestion's we prefer
* the next thread to the suggestion. */
/* We also prefer the next thread when the current thread's priority is equal to
* the suggestions, but the next thread has been waiting longer. */
// If the current thread's priority is higher than our suggestion's we prefer
// the next thread to the suggestion. We also prefer the next thread when the
// current thread's priority is equal to the suggestions, but the next thread
// has been waiting longer.
if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
(suggested->GetPriority() == cur_thread.GetPriority() &&
next_thread != std::addressof(cur_thread) &&
@ -486,9 +471,9 @@ void KScheduler::YieldWithCoreMigration() {
break;
}
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
* suggestion to the front of the queue. */
// If we're allowed to do a migration, do one.
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
// suggestion to the front of the queue.
if (running_on_suggested_core == nullptr ||
running_on_suggested_core->GetPriority() >=
HighestCoreMigrationAllowedPriority) {
@ -497,23 +482,23 @@ void KScheduler::YieldWithCoreMigration() {
IncrementScheduledCount(suggested);
break;
} else {
/* We couldn't perform a migration, but we should check again on a future
* yield. */
// We couldn't perform a migration, but we should check again on a future
// yield.
recheck = true;
}
}
/* Get the next suggestion. */
// Get the next suggestion.
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If we still have a suggestion or the next thread is different, we have an update to
* perform. */
// If we still have a suggestion or the next thread is different, we have an update to
// perform.
if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
SetSchedulerUpdateNeeded(kernel);
} else if (!recheck) {
/* Otherwise if we don't need to re-check, set the thread's yield count so that we
* won't waste work until the process is scheduled again. */
// Otherwise if we don't need to re-check, set the thread's yield count so that we
// won't waste work until the process is scheduled again.
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
}
}
@ -523,48 +508,48 @@ void KScheduler::YieldWithCoreMigration() {
void KScheduler::YieldToAnyThread() {
auto& kernel = system.Kernel();
/* Validate preconditions. */
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
/* Get the current thread and process. */
// Get the current thread and process.
Thread& cur_thread = *GetCurrentThread();
Process& cur_process = *kernel.CurrentProcess();
/* If the thread's yield count matches, there's nothing for us to do. */
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
return;
}
/* Get a reference to the priority queue. */
// Get a reference to the priority queue.
auto& priority_queue = GetPriorityQueue(kernel);
/* Perform the yield. */
// Perform the yield.
{
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.scheduling_state;
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
/* Get the current active core. */
// Get the current active core.
const s32 core_id = cur_thread.GetActiveCore();
/* Migrate the current thread to core -1. */
// Migrate the current thread to core -1.
cur_thread.SetActiveCore(-1);
priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
/* If there's nothing scheduled, we can try to perform a migration. */
// If there's nothing scheduled, we can try to perform a migration.
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
/* While we have a suggested thread, try to migrate it! */
// While we have a suggested thread, try to migrate it!
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
/* If we're allowed to do a migration, do one. */
// If we're allowed to do a migration, do one.
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >=
HighestCoreMigrationAllowedPriority) {
@ -573,25 +558,25 @@ void KScheduler::YieldToAnyThread() {
IncrementScheduledCount(suggested);
}
/* Regardless of whether we migrated, we had a candidate, so we're done. */
// Regardless of whether we migrated, we had a candidate, so we're done.
break;
}
/* Get the next suggestion. */
// Get the next suggestion.
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If the suggestion is different from the current thread, we need to perform an
* update. */
// If the suggestion is different from the current thread, we need to perform an
// update.
if (suggested != std::addressof(cur_thread)) {
SetSchedulerUpdateNeeded(kernel);
} else {
/* Otherwise, set the thread's yield count so that we won't waste work until the
* process is scheduled again. */
// Otherwise, set the thread's yield count so that we won't waste work until the
// process is scheduled again.
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
}
} else {
/* Otherwise, we have an update to perform. */
// Otherwise, we have an update to perform.
SetSchedulerUpdateNeeded(kernel);
}
}

View file

@ -34,19 +34,19 @@ public:
void Lock() {
if (this->IsLockedByCurrentThread()) {
/* If we already own the lock, we can just increment the count. */
// If we already own the lock, we can just increment the count.
ASSERT(this->lock_count > 0);
this->lock_count++;
} else {
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
// Otherwise, we want to disable scheduling and acquire the spinlock.
SchedulerType::DisableScheduling(kernel);
this->spin_lock.lock();
/* For debug, ensure that our state is valid. */
// For debug, ensure that our state is valid.
ASSERT(this->lock_count == 0);
ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
/* Increment count, take ownership. */
// Increment count, take ownership.
this->lock_count = 1;
this->owner_thread = kernel.GetCurrentEmuThreadID();
}
@ -56,18 +56,18 @@ public:
ASSERT(this->IsLockedByCurrentThread());
ASSERT(this->lock_count > 0);
/* Release an instance of the lock. */
// Release an instance of the lock.
if ((--this->lock_count) == 0) {
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
// We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling =
SchedulerType::UpdateHighestPriorityThreads(kernel);
Core::EmuThreadHandle leaving_thread = owner_thread;
/* Note that we no longer hold the lock, and unlock the spinlock. */
// Note that we no longer hold the lock, and unlock the spinlock.
this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
this->spin_lock.unlock();
/* Enable scheduling, and perform a rescheduling operation. */
// Enable scheduling, and perform a rescheduling operation.
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
}
}

View file

@ -28,17 +28,17 @@ public:
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
event_handle = InvalidHandle;
/* Lock the scheduler. */
// Lock the scheduler.
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
}
~KScopedSchedulerLockAndSleep() {
/* Register the sleep. */
// Register the sleep.
if (this->timeout_tick > 0) {
kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
}
/* Unlock the scheduler. */
// Unlock the scheduler.
kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
}