From 345b9e6a08f7ce99bb71f7184157ed0fe22bf27d Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 5 Sep 2022 17:51:50 -0700 Subject: [PATCH] core: hle: kernel: Add KDynamicPageManager. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_dynamic_page_manager.h | 136 +++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 src/core/hle/kernel/k_dynamic_page_manager.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index abeb5859b..2bb4dea6a 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -190,6 +190,7 @@ add_library(core STATIC hle/kernel/k_code_memory.h hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h + hle/kernel/k_dynamic_page_manager.h hle/kernel/k_event.cpp hle/kernel/k_event.h hle/kernel/k_handle_table.cpp diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h new file mode 100644 index 000000000..88d53776a --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -0,0 +1,136 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_page_bitmap.h" +#include "core/hle/kernel/k_spin_lock.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +class KDynamicPageManager { +public: + class PageBuffer { + private: + u8 m_buffer[PageSize]; + }; + static_assert(sizeof(PageBuffer) == PageSize); + +public: + KDynamicPageManager() = default; + + template + T* GetPointer(VAddr addr) { + return reinterpret_cast(m_backing_memory.data() + (addr - m_address)); + } + + template + const T* GetPointer(VAddr addr) const { + return reinterpret_cast(m_backing_memory.data() + (addr - m_address)); + } + + Result Initialize(VAddr addr, size_t sz) { + // We need to have positive size. + R_UNLESS(sz > 0, ResultOutOfMemory); + m_backing_memory.resize(sz); + + // Calculate management overhead. + const size_t management_size = + KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); + const size_t allocatable_size = sz - management_size; + + // Set tracking fields. + m_address = addr; + m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); + m_count = allocatable_size / sizeof(PageBuffer); + R_UNLESS(m_count > 0, ResultOutOfMemory); + + // Clear the management region. + u64* management_ptr = GetPointer(m_address + allocatable_size); + std::memset(management_ptr, 0, management_size); + + // Initialize the bitmap. + m_page_bitmap.Initialize(management_ptr, m_count); + + // Free the pages to the bitmap. + for (size_t i = 0; i < m_count; i++) { + // Ensure the freed page is all-zero. + std::memset(GetPointer(m_address) + i, 0, PageSize); + + // Set the bit for the free page. + m_page_bitmap.SetBit(i); + } + + return ResultSuccess; + } + + VAddr GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + size_t GetUsed() const { + return m_used; + } + size_t GetPeak() const { + return m_peak; + } + size_t GetCount() const { + return m_count; + } + + PageBuffer* Allocate() { + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Find a random free block. + s64 soffset = m_page_bitmap.FindFreeBlock(true); + if (soffset < 0) [[unlikely]] { + return nullptr; + } + + const size_t offset = static_cast(soffset); + + // Update our tracking. + m_page_bitmap.ClearBit(offset); + m_peak = std::max(m_peak, (++m_used)); + + return GetPointer(m_address) + offset; + } + + void Free(PageBuffer* pb) { + // Ensure all pages in the heap are zero. + std::memset(pb, 0, PageSize); + + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Set the bit for the free page. + size_t offset = (reinterpret_cast(pb) - m_address) / sizeof(PageBuffer); + m_page_bitmap.SetBit(offset); + + // Decrement our used count. + --m_used; + } + +private: + KSpinLock m_lock; + KPageBitmap m_page_bitmap; + size_t m_used{}; + size_t m_peak{}; + size_t m_count{}; + VAddr m_address{}; + size_t m_size{}; + + // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. + std::vector m_backing_memory; +}; + +} // namespace Kernel