hle: kernel: svc: Migrate WaitSynchronization.

This commit is contained in:
bunnei 2021-04-17 23:38:20 -07:00
parent 126aaeb6d3
commit 674122038a
2 changed files with 80 additions and 49 deletions

View file

@ -10,6 +10,7 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/object.h"
#include "core/hle/result.h" #include "core/hle/result.h"
@ -110,6 +111,16 @@ public:
return DynamicObjectCast<T>(GetGeneric(handle)); return DynamicObjectCast<T>(GetGeneric(handle));
} }
template <typename T = KAutoObject>
KAutoObject* GetObjectImpl(Handle handle) const {
if (!IsValid(handle)) {
return nullptr;
}
auto* obj = objects_new[static_cast<u16>(handle >> 15)];
return obj->DynamicCast<T*>();
}
template <typename T = KAutoObject> template <typename T = KAutoObject>
KScopedAutoObject<T> GetObject(Handle handle) const { KScopedAutoObject<T> GetObject(Handle handle) const {
if (handle == CurrentThread) { if (handle == CurrentThread) {
@ -148,6 +159,48 @@ public:
ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type); ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type);
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
// Try to convert and open all the handles.
size_t num_opened;
{
// Lock the table.
KScopedSpinLock lk(lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
const auto cur_handle = handles[num_opened];
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) {
break;
}
// Open a reference to the current object.
cur_t->Open();
out[num_opened] = cur_t;
}
}
// If we converted every object, succeed.
if (num_opened == num_handles) {
return true;
}
// If we didn't convert entry object, close the ones we opened.
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private: private:
/// Stores the Object referenced by the handle or null if the slot is empty. /// Stores the Object referenced by the handle or null if the slot is empty.
std::array<std::shared_ptr<Object>, MAX_COUNT> objects; std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
@ -175,6 +228,8 @@ private:
/// Head of the free slots linked list. /// Head of the free slots linked list.
u16 next_free_slot = 0; u16 next_free_slot = 0;
mutable KSpinLock lock;
/// Underlying kernel instance that this handle table operates under. /// Underlying kernel instance that this handle table operates under.
KernelCore& kernel; KernelCore& kernel;
}; };

View file

@ -430,65 +430,41 @@ static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds /// Wait for the given handles to synchronize, timeout after the specified nanoseconds
static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
u64 handle_count, s64 nano_seconds) { u64 num_handles, s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
handles_address, handle_count, nano_seconds); handles_address, num_handles, nano_seconds);
auto& memory = system.Memory(); // Ensure number of handles is valid.
if (!memory.IsValidVirtualAddress(handles_address)) { R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
LOG_ERROR(Kernel_SVC,
"Handle address is not a valid virtual address, handle_address=0x{:016X}",
handles_address);
return ResultInvalidPointer;
}
static constexpr u64 MaxHandles = 0x40;
if (handle_count > MaxHandles) {
LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}",
MaxHandles, handle_count);
return ResultOutOfRange;
}
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
std::vector<KSynchronizationObject*> objects(handle_count); std::vector<KSynchronizationObject*> objs(num_handles);
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
Handle* handles = system.Memory().GetPointer<Handle>(handles_address);
for (u64 i = 0; i < handle_count; ++i) { // Copy user handles.
const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); if (num_handles > 0) {
// Convert the handles to objects.
bool succeeded{}; R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
{ num_handles),
auto object = handle_table.Get<KSynchronizationObject>(handle); ResultInvalidHandle);
if (object) {
objects[i] = object;
succeeded = true;
}
}
// TODO(bunnei): WORKAROUND WHILE WE HAVE TWO HANDLE TABLES
if (!succeeded) {
{
auto object = handle_table.GetObject<KSynchronizationObject>(handle);
if (object.IsNull()) {
LOG_ERROR(Kernel_SVC, "Object is a nullptr");
return ResultInvalidHandle;
}
objects[i] = object.GetPointerUnsafe();
succeeded = true;
}
}
} }
return KSynchronizationObject::Wait(kernel, index, objects.data(),
static_cast<s32>(objects.size()), nano_seconds); // Ensure handles are closed when we're done.
SCOPE_EXIT({
for (u64 i = 0; i < num_handles; ++i) {
objs[i]->Close();
}
});
return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()),
nano_seconds);
} }
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
s32 handle_count, u32 timeout_high, s32* index) { s32 num_handles, u32 timeout_high, s32* index) {
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
} }
/// Resumes a thread waiting on WaitSynchronization /// Resumes a thread waiting on WaitSynchronization