mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-08 04:59:58 +00:00
vk_memory_manager: Misc changes
* Allocate memory in discrete exponentially increasing chunks until the 128 MiB threshold. Allocations larger thant that increase linearly by 256 MiB (depending on the required size). This allows to use small allocations for small resources. * Move memory maps to a RAII abstraction. To optimize for debugging tools (like RenderDoc) users will map/unmap on usage. If this ever becomes a noticeable overhead (from my profiling it doesn't) we can transparently move to persistent memory maps without harming the API, getting optimal performance for both gameplay and debugging. * Improve messages on exceptional situations. * Fix typos "requeriments" -> "requirements". * Small style changes.
This commit is contained in:
parent
85bb6a6f08
commit
ceb851b590
2 changed files with 143 additions and 89 deletions
|
@ -6,6 +6,7 @@
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
@ -16,34 +17,32 @@
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
// TODO(Rodrigo): Fine tune this number
|
namespace {
|
||||||
constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024;
|
|
||||||
|
u64 GetAllocationChunkSize(u64 required_size) {
|
||||||
|
static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20};
|
||||||
|
auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size);
|
||||||
|
return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // Anonymous namespace
|
||||||
|
|
||||||
class VKMemoryAllocation final {
|
class VKMemoryAllocation final {
|
||||||
public:
|
public:
|
||||||
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
|
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
|
||||||
vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type)
|
vk::MemoryPropertyFlags properties, u64 allocation_size, u32 type)
|
||||||
: device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size},
|
: device{device}, memory{memory}, properties{properties}, allocation_size{allocation_size},
|
||||||
shifted_type{ShiftType(type)}, is_mappable{properties &
|
shifted_type{ShiftType(type)} {}
|
||||||
vk::MemoryPropertyFlagBits::eHostVisible} {
|
|
||||||
if (is_mappable) {
|
|
||||||
const auto dev = device.GetLogical();
|
|
||||||
const auto& dld = device.GetDispatchLoader();
|
|
||||||
base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
~VKMemoryAllocation() {
|
~VKMemoryAllocation() {
|
||||||
const auto dev = device.GetLogical();
|
const auto dev = device.GetLogical();
|
||||||
const auto& dld = device.GetDispatchLoader();
|
const auto& dld = device.GetDispatchLoader();
|
||||||
if (is_mappable)
|
|
||||||
dev.unmapMemory(memory, dld);
|
|
||||||
dev.free(memory, nullptr, dld);
|
dev.free(memory, nullptr, dld);
|
||||||
}
|
}
|
||||||
|
|
||||||
VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
|
VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
|
||||||
auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size),
|
auto found = TryFindFreeSection(free_iterator, allocation_size,
|
||||||
static_cast<u64>(alignment));
|
static_cast<u64>(commit_size), static_cast<u64>(alignment));
|
||||||
if (!found) {
|
if (!found) {
|
||||||
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
|
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
|
||||||
static_cast<u64>(alignment));
|
static_cast<u64>(alignment));
|
||||||
|
@ -52,8 +51,7 @@ public:
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
u8* address = is_mappable ? base_address + *found : nullptr;
|
auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found,
|
||||||
auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found,
|
|
||||||
*found + commit_size);
|
*found + commit_size);
|
||||||
commits.push_back(commit.get());
|
commits.push_back(commit.get());
|
||||||
|
|
||||||
|
@ -65,12 +63,10 @@ public:
|
||||||
|
|
||||||
void Free(const VKMemoryCommitImpl* commit) {
|
void Free(const VKMemoryCommitImpl* commit) {
|
||||||
ASSERT(commit);
|
ASSERT(commit);
|
||||||
const auto it =
|
|
||||||
std::find_if(commits.begin(), commits.end(),
|
const auto it = std::find(std::begin(commits), std::end(commits), commit);
|
||||||
[&](const auto& stored_commit) { return stored_commit == commit; });
|
|
||||||
if (it == commits.end()) {
|
if (it == commits.end()) {
|
||||||
LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!");
|
UNREACHABLE_MSG("Freeing unallocated commit!");
|
||||||
UNREACHABLE();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
commits.erase(it);
|
commits.erase(it);
|
||||||
|
@ -88,11 +84,11 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
|
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
|
||||||
/// requeriments.
|
/// requirements.
|
||||||
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
|
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
|
||||||
u64 iterator = start;
|
u64 iterator = Common::AlignUp(start, alignment);
|
||||||
while (iterator + size < end) {
|
while (iterator + size <= end) {
|
||||||
const u64 try_left = Common::AlignUp(iterator, alignment);
|
const u64 try_left = iterator;
|
||||||
const u64 try_right = try_left + size;
|
const u64 try_right = try_left + size;
|
||||||
|
|
||||||
bool overlap = false;
|
bool overlap = false;
|
||||||
|
@ -100,7 +96,7 @@ private:
|
||||||
const auto [commit_left, commit_right] = commit->interval;
|
const auto [commit_left, commit_right] = commit->interval;
|
||||||
if (try_left < commit_right && commit_left < try_right) {
|
if (try_left < commit_right && commit_left < try_right) {
|
||||||
// There's an overlap, continue the search where the overlapping commit ends.
|
// There's an overlap, continue the search where the overlapping commit ends.
|
||||||
iterator = commit_right;
|
iterator = Common::AlignUp(commit_right, alignment);
|
||||||
overlap = true;
|
overlap = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -110,6 +106,7 @@ private:
|
||||||
return try_left;
|
return try_left;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No free regions where found, return an empty optional.
|
// No free regions where found, return an empty optional.
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
@ -117,12 +114,8 @@ private:
|
||||||
const VKDevice& device; ///< Vulkan device.
|
const VKDevice& device; ///< Vulkan device.
|
||||||
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
||||||
const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
|
const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
|
||||||
const u64 alloc_size; ///< Size of this allocation.
|
const u64 allocation_size; ///< Size of this allocation.
|
||||||
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
||||||
const bool is_mappable; ///< Whether the allocation is mappable.
|
|
||||||
|
|
||||||
/// Base address of the mapped pointer.
|
|
||||||
u8* base_address{};
|
|
||||||
|
|
||||||
/// Hints where the next free region is likely going to be.
|
/// Hints where the next free region is likely going to be.
|
||||||
u64 free_iterator{};
|
u64 free_iterator{};
|
||||||
|
@ -132,13 +125,15 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
VKMemoryManager::VKMemoryManager(const VKDevice& device)
|
VKMemoryManager::VKMemoryManager(const VKDevice& device)
|
||||||
: device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())},
|
: device{device}, properties{device.GetPhysical().getMemoryProperties(
|
||||||
is_memory_unified{GetMemoryUnified(props)} {}
|
device.GetDispatchLoader())},
|
||||||
|
is_memory_unified{GetMemoryUnified(properties)} {}
|
||||||
|
|
||||||
VKMemoryManager::~VKMemoryManager() = default;
|
VKMemoryManager::~VKMemoryManager() = default;
|
||||||
|
|
||||||
VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) {
|
VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirements,
|
||||||
ASSERT(reqs.size < ALLOC_CHUNK_SIZE);
|
bool host_visible) {
|
||||||
|
const u64 chunk_size = GetAllocationChunkSize(requirements.size);
|
||||||
|
|
||||||
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
||||||
// for a fast device local type.
|
// for a fast device local type.
|
||||||
|
@ -147,32 +142,21 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool
|
||||||
? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent
|
? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent
|
||||||
: vk::MemoryPropertyFlagBits::eDeviceLocal;
|
: vk::MemoryPropertyFlagBits::eDeviceLocal;
|
||||||
|
|
||||||
const auto TryCommit = [&]() -> VKMemoryCommit {
|
if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
|
||||||
for (auto& alloc : allocs) {
|
|
||||||
if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) {
|
|
||||||
return commit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return {};
|
|
||||||
};
|
|
||||||
|
|
||||||
if (auto commit = TryCommit(); commit) {
|
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit has failed, allocate more memory.
|
// Commit has failed, allocate more memory.
|
||||||
if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) {
|
if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) {
|
||||||
// TODO(Rodrigo): Try to use host memory.
|
// TODO(Rodrigo): Handle these situations in some way like flushing to guest memory.
|
||||||
LOG_CRITICAL(Render_Vulkan, "Ran out of memory!");
|
// Allocation has failed, panic.
|
||||||
UNREACHABLE();
|
UNREACHABLE_MSG("Ran out of VRAM!");
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
|
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
|
||||||
// there's a bug.
|
// there's a bug.
|
||||||
auto commit = TryCommit();
|
auto commit = TryAllocCommit(requirements, wanted_properties);
|
||||||
ASSERT(commit);
|
ASSERT(commit);
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
|
@ -180,8 +164,7 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool
|
||||||
VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
|
VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
|
||||||
const auto dev = device.GetLogical();
|
const auto dev = device.GetLogical();
|
||||||
const auto& dld = device.GetDispatchLoader();
|
const auto& dld = device.GetDispatchLoader();
|
||||||
const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld);
|
auto commit = Commit(dev.getBufferMemoryRequirements(buffer, dld), host_visible);
|
||||||
auto commit = Commit(requeriments, host_visible);
|
|
||||||
dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
|
dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
|
@ -189,25 +172,23 @@ VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
|
||||||
VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) {
|
VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) {
|
||||||
const auto dev = device.GetLogical();
|
const auto dev = device.GetLogical();
|
||||||
const auto& dld = device.GetDispatchLoader();
|
const auto& dld = device.GetDispatchLoader();
|
||||||
const auto requeriments = dev.getImageMemoryRequirements(image, dld);
|
auto commit = Commit(dev.getImageMemoryRequirements(image, dld), host_visible);
|
||||||
auto commit = Commit(requeriments, host_visible);
|
|
||||||
dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
|
dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask,
|
bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask,
|
||||||
u64 size) {
|
u64 size) {
|
||||||
const u32 type = [&]() {
|
const u32 type = [&] {
|
||||||
for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
|
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
|
||||||
const auto flags = props.memoryTypes[type_index].propertyFlags;
|
const auto flags = properties.memoryTypes[type_index].propertyFlags;
|
||||||
if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
|
if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
|
||||||
// The type matches in type and in the wanted properties.
|
// The type matches in type and in the wanted properties.
|
||||||
return type_index;
|
return type_index;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!");
|
UNREACHABLE_MSG("Couldn't find a compatible memory type!");
|
||||||
UNREACHABLE();
|
return 0U;
|
||||||
return 0u;
|
|
||||||
}();
|
}();
|
||||||
|
|
||||||
const auto dev = device.GetLogical();
|
const auto dev = device.GetLogical();
|
||||||
|
@ -216,19 +197,33 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32
|
||||||
// Try to allocate found type.
|
// Try to allocate found type.
|
||||||
const vk::MemoryAllocateInfo memory_ai(size, type);
|
const vk::MemoryAllocateInfo memory_ai(size, type);
|
||||||
vk::DeviceMemory memory;
|
vk::DeviceMemory memory;
|
||||||
if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld);
|
if (const auto res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld);
|
||||||
res != vk::Result::eSuccess) {
|
res != vk::Result::eSuccess) {
|
||||||
LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res));
|
LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
allocs.push_back(
|
allocations.push_back(
|
||||||
std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type));
|
std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) {
|
VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& requirements,
|
||||||
for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) {
|
vk::MemoryPropertyFlags wanted_properties) {
|
||||||
if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
|
for (auto& allocation : allocations) {
|
||||||
|
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) {
|
||||||
|
return commit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
/*static*/ bool VKMemoryManager::GetMemoryUnified(
|
||||||
|
const vk::PhysicalDeviceMemoryProperties& properties) {
|
||||||
|
for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
|
||||||
|
if (!(properties.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
|
||||||
// Memory is considered unified when heaps are device local only.
|
// Memory is considered unified when heaps are device local only.
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -236,17 +231,28 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory,
|
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
|
||||||
u8* data, u64 begin, u64 end)
|
vk::DeviceMemory memory, u64 begin, u64 end)
|
||||||
: interval(std::make_pair(begin, end)), memory{memory}, allocation{allocation}, data{data} {}
|
: device{device}, interval{begin, end}, memory{memory}, allocation{allocation} {}
|
||||||
|
|
||||||
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
|
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
|
||||||
allocation->Free(this);
|
allocation->Free(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* VKMemoryCommitImpl::GetData() const {
|
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
|
||||||
ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit.");
|
const auto dev = device.GetLogical();
|
||||||
return data;
|
const auto address = reinterpret_cast<u8*>(
|
||||||
|
dev.mapMemory(memory, interval.first + offset_, size, {}, device.GetDispatchLoader()));
|
||||||
|
return MemoryMap{this, address};
|
||||||
|
}
|
||||||
|
|
||||||
|
void VKMemoryCommitImpl::Unmap() const {
|
||||||
|
const auto dev = device.GetLogical();
|
||||||
|
dev.unmapMemory(memory, device.GetDispatchLoader());
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryMap VKMemoryCommitImpl::Map() const {
|
||||||
|
return Map(interval.second - interval.first);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
|
class MemoryMap;
|
||||||
class VKDevice;
|
class VKDevice;
|
||||||
class VKMemoryAllocation;
|
class VKMemoryAllocation;
|
||||||
class VKMemoryCommitImpl;
|
class VKMemoryCommitImpl;
|
||||||
|
@ -21,11 +22,12 @@ using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
|
||||||
class VKMemoryManager final {
|
class VKMemoryManager final {
|
||||||
public:
|
public:
|
||||||
explicit VKMemoryManager(const VKDevice& device);
|
explicit VKMemoryManager(const VKDevice& device);
|
||||||
|
VKMemoryManager(const VKMemoryManager&) = delete;
|
||||||
~VKMemoryManager();
|
~VKMemoryManager();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Commits a memory with the specified requeriments.
|
* Commits a memory with the specified requeriments.
|
||||||
* @param reqs Requeriments returned from a Vulkan call.
|
* @param requirements Requirements returned from a Vulkan call.
|
||||||
* @param host_visible Signals the allocator that it *must* use host visible and coherent
|
* @param host_visible Signals the allocator that it *must* use host visible and coherent
|
||||||
* memory. When passing false, it will try to allocate device local memory.
|
* memory. When passing false, it will try to allocate device local memory.
|
||||||
* @returns A memory commit.
|
* @returns A memory commit.
|
||||||
|
@ -47,25 +49,35 @@ private:
|
||||||
/// Allocates a chunk of memory.
|
/// Allocates a chunk of memory.
|
||||||
bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
|
bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
|
||||||
|
|
||||||
|
/// Tries to allocate a memory commit.
|
||||||
|
VKMemoryCommit TryAllocCommit(const vk::MemoryRequirements& requirements,
|
||||||
|
vk::MemoryPropertyFlags wanted_properties);
|
||||||
|
|
||||||
/// Returns true if the device uses an unified memory model.
|
/// Returns true if the device uses an unified memory model.
|
||||||
static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props);
|
static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& properties);
|
||||||
|
|
||||||
const VKDevice& device; ///< Device handler.
|
const VKDevice& device; ///< Device handler.
|
||||||
const vk::PhysicalDeviceMemoryProperties props; ///< Physical device properties.
|
const vk::PhysicalDeviceMemoryProperties properties; ///< Physical device properties.
|
||||||
const bool is_memory_unified; ///< True if memory model is unified.
|
const bool is_memory_unified; ///< True if memory model is unified.
|
||||||
std::vector<std::unique_ptr<VKMemoryAllocation>> allocs; ///< Current allocations.
|
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
|
||||||
};
|
};
|
||||||
|
|
||||||
class VKMemoryCommitImpl final {
|
class VKMemoryCommitImpl final {
|
||||||
friend VKMemoryAllocation;
|
friend VKMemoryAllocation;
|
||||||
|
friend MemoryMap;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, u8* data,
|
explicit VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
|
||||||
u64 begin, u64 end);
|
vk::DeviceMemory memory, u64 begin, u64 end);
|
||||||
~VKMemoryCommitImpl();
|
~VKMemoryCommitImpl();
|
||||||
|
|
||||||
/// Returns the writeable memory map. The commit has to be mappable.
|
/// Maps a memory region and returns a pointer to it.
|
||||||
u8* GetData() const;
|
/// It's illegal to have more than one memory map at the same time.
|
||||||
|
MemoryMap Map(u64 size, u64 offset = 0) const;
|
||||||
|
|
||||||
|
/// Maps the whole commit and returns a pointer to it.
|
||||||
|
/// It's illegal to have more than one memory map at the same time.
|
||||||
|
MemoryMap Map() const;
|
||||||
|
|
||||||
/// Returns the Vulkan memory handler.
|
/// Returns the Vulkan memory handler.
|
||||||
vk::DeviceMemory GetMemory() const {
|
vk::DeviceMemory GetMemory() const {
|
||||||
|
@ -78,10 +90,46 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
/// Unmaps memory.
|
||||||
|
void Unmap() const;
|
||||||
|
|
||||||
|
const VKDevice& device; ///< Vulkan device.
|
||||||
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
|
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
|
||||||
vk::DeviceMemory memory; ///< Vulkan device memory handler.
|
vk::DeviceMemory memory; ///< Vulkan device memory handler.
|
||||||
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
||||||
u8* data{}; ///< Pointer to the host mapped memory, it has the commit offset included.
|
};
|
||||||
|
|
||||||
|
/// Holds ownership of a memory map.
|
||||||
|
class MemoryMap final {
|
||||||
|
public:
|
||||||
|
explicit MemoryMap(const VKMemoryCommitImpl* commit, u8* address)
|
||||||
|
: commit{commit}, address{address} {}
|
||||||
|
|
||||||
|
~MemoryMap() {
|
||||||
|
if (commit) {
|
||||||
|
commit->Unmap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prematurely releases the memory map.
|
||||||
|
void Release() {
|
||||||
|
commit->Unmap();
|
||||||
|
commit = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the address of the memory map.
|
||||||
|
u8* GetAddress() const {
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the address of the memory map;
|
||||||
|
operator u8*() const {
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const VKMemoryCommitImpl* commit{}; ///< Mapped memory commit.
|
||||||
|
u8* address{}; ///< Address to the mapped memory.
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
Loading…
Reference in a new issue