diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp
index 3c214ec00..2a1ae1bb3 100644
--- a/src/audio_core/device/device_session.cpp
+++ b/src/audio_core/device/device_session.cpp
@@ -8,6 +8,7 @@
 #include "audio_core/sink/sink_stream.h"
 #include "core/core.h"
 #include "core/core_timing.h"
+#include "core/guest_memory.h"
 #include "core/memory.h"
 
 #include "core/hle/kernel/k_process.h"
diff --git a/src/audio_core/renderer/command/data_source/decode.cpp b/src/audio_core/renderer/command/data_source/decode.cpp
index 911dae3c1..905613a5a 100644
--- a/src/audio_core/renderer/command/data_source/decode.cpp
+++ b/src/audio_core/renderer/command/data_source/decode.cpp
@@ -9,6 +9,7 @@
 #include "common/fixed_point.h"
 #include "common/logging/log.h"
 #include "common/scratch_buffer.h"
+#include "core/guest_memory.h"
 #include "core/memory.h"
 
 namespace AudioCore::Renderer {
diff --git a/src/common/common_types.h b/src/common/common_types.h
index 0fc225aff..ae04c4d60 100644
--- a/src/common/common_types.h
+++ b/src/common/common_types.h
@@ -45,6 +45,7 @@ using f32 = float;  ///< 32-bit floating point
 using f64 = double; ///< 64-bit floating point
 
 using VAddr = u64;    ///< Represents a pointer in the userspace virtual address space.
+using DAddr = u64;    ///< Represents a pointer in the device specific virtual address space.
 using PAddr = u64;    ///< Represents a pointer in the ARM11 physical address space.
 using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space.
 
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 16ddb5e90..4ff2c1bb7 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -37,6 +37,8 @@ add_library(core STATIC
     debugger/gdbstub_arch.h
     debugger/gdbstub.cpp
     debugger/gdbstub.h
+    device_memory_manager.h
+    device_memory_manager.inc
     device_memory.cpp
     device_memory.h
     file_sys/fssystem/fs_i_storage.h
@@ -609,6 +611,8 @@ add_library(core STATIC
     hle/service/ns/pdm_qry.h
     hle/service/nvdrv/core/container.cpp
     hle/service/nvdrv/core/container.h
+    hle/service/nvdrv/core/heap_mapper.cpp
+    hle/service/nvdrv/core/heap_mapper.h
     hle/service/nvdrv/core/nvmap.cpp
     hle/service/nvdrv/core/nvmap.h
     hle/service/nvdrv/core/syncpoint_manager.cpp
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 461eea9c8..2392fe136 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -28,6 +28,7 @@
 #include "core/file_sys/savedata_factory.h"
 #include "core/file_sys/vfs_concat.h"
 #include "core/file_sys/vfs_real.h"
+#include "core/gpu_dirty_memory_manager.h"
 #include "core/hle/kernel/k_memory_manager.h"
 #include "core/hle/kernel/k_process.h"
 #include "core/hle/kernel/k_resource_limit.h"
@@ -565,6 +566,9 @@ struct System::Impl {
     std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
     std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_cpu{};
 
+    std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES>
+        gpu_dirty_memory_managers;
+
     std::deque<std::vector<u8>> user_channel;
 };
 
@@ -651,8 +655,14 @@ size_t System::GetCurrentHostThreadID() const {
     return impl->kernel.GetCurrentHostThreadID();
 }
 
-void System::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
-    return this->ApplicationProcess()->GatherGPUDirtyMemory(callback);
+std::span<GPUDirtyMemoryManager> System::GetGPUDirtyMemoryManager() {
+    return impl->gpu_dirty_memory_managers;
+}
+
+void System::GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback) {
+    for (auto& manager : impl->gpu_dirty_memory_managers) {
+        manager.Gather(callback);
+    }
 }
 
 PerfStatsResults System::GetAndResetPerfStats() {
diff --git a/src/core/core.h b/src/core/core.h
index ba5add0dc..80446f385 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -8,6 +8,7 @@
 #include <functional>
 #include <memory>
 #include <mutex>
+#include <span>
 #include <string>
 #include <vector>
 
@@ -116,6 +117,7 @@ class CpuManager;
 class Debugger;
 class DeviceMemory;
 class ExclusiveMonitor;
+class GPUDirtyMemoryManager;
 class PerfStats;
 class Reporter;
 class SpeedLimiter;
@@ -224,7 +226,9 @@ public:
     /// Prepare the core emulation for a reschedule
     void PrepareReschedule(u32 core_index);
 
-    void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
+    std::span<GPUDirtyMemoryManager> GetGPUDirtyMemoryManager();
+
+    void GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback);
 
     [[nodiscard]] size_t GetCurrentHostThreadID() const;
 
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index 13388b73e..11bf0e326 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -31,6 +31,12 @@ public:
                DramMemoryMap::Base;
     }
 
+    template <typename T>
+    PAddr GetRawPhysicalAddr(const T* ptr) const {
+        return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) -
+                                  reinterpret_cast<uintptr_t>(buffer.BackingBasePointer()));
+    }
+
     template <typename T>
     T* GetPointer(Common::PhysicalAddress addr) {
         return reinterpret_cast<T*>(buffer.BackingBasePointer() +
@@ -43,6 +49,16 @@ public:
                                     (GetInteger(addr) - DramMemoryMap::Base));
     }
 
+    template <typename T>
+    T* GetPointerFromRaw(PAddr addr) {
+        return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr);
+    }
+
+    template <typename T>
+    const T* GetPointerFromRaw(PAddr addr) const {
+        return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr);
+    }
+
     Common::HostMemory buffer;
 };
 
diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h
new file mode 100644
index 000000000..ffeed46cc
--- /dev/null
+++ b/src/core/device_memory_manager.h
@@ -0,0 +1,211 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+#include <atomic>
+#include <deque>
+#include <memory>
+#include <mutex>
+
+#include "common/common_types.h"
+#include "common/scratch_buffer.h"
+#include "common/virtual_buffer.h"
+
+namespace Core {
+
+constexpr size_t DEVICE_PAGEBITS = 12ULL;
+constexpr size_t DEVICE_PAGESIZE = 1ULL << DEVICE_PAGEBITS;
+constexpr size_t DEVICE_PAGEMASK = DEVICE_PAGESIZE - 1ULL;
+
+class DeviceMemory;
+
+namespace Memory {
+class Memory;
+}
+
+template <typename DTraits>
+struct DeviceMemoryManagerAllocator;
+
+struct Asid {
+    size_t id;
+};
+
+template <typename Traits>
+class DeviceMemoryManager {
+    using DeviceInterface = typename Traits::DeviceInterface;
+    using DeviceMethods = typename Traits::DeviceMethods;
+
+public:
+    DeviceMemoryManager(const DeviceMemory& device_memory);
+    ~DeviceMemoryManager();
+
+    void BindInterface(DeviceInterface* device_inter);
+
+    DAddr Allocate(size_t size);
+    void AllocateFixed(DAddr start, size_t size);
+    void Free(DAddr start, size_t size);
+
+    void Map(DAddr address, VAddr virtual_address, size_t size, Asid asid, bool track = false);
+
+    void Unmap(DAddr address, size_t size);
+
+    void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, Asid asid);
+    void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, Asid asid) {
+        std::scoped_lock lk(mapping_guard);
+        TrackContinuityImpl(address, virtual_address, size, asid);
+    }
+
+    // Write / Read
+    template <typename T>
+    T* GetPointer(DAddr address);
+
+    template <typename T>
+    const T* GetPointer(DAddr address) const;
+
+    template <typename Func>
+    void ApplyOpOnPAddr(PAddr address, Common::ScratchBuffer<u32>& buffer, Func&& operation) {
+        DAddr subbits = static_cast<DAddr>(address & page_mask);
+        const u32 base = compressed_device_addr[(address >> page_bits)];
+        if ((base >> MULTI_FLAG_BITS) == 0) [[likely]] {
+            const DAddr d_address = (static_cast<DAddr>(base) << page_bits) + subbits;
+            operation(d_address);
+            return;
+        }
+        InnerGatherDeviceAddresses(buffer, address);
+        for (u32 value : buffer) {
+            operation((static_cast<DAddr>(value) << page_bits) + subbits);
+        }
+    }
+
+    template <typename Func>
+    void ApplyOpOnPointer(const u8* p, Common::ScratchBuffer<u32>& buffer, Func&& operation) {
+        PAddr address = GetRawPhysicalAddr<u8>(p);
+        ApplyOpOnPAddr(address, buffer, operation);
+    }
+
+    PAddr GetPhysicalRawAddressFromDAddr(DAddr address) const {
+        PAddr subbits = static_cast<PAddr>(address & page_mask);
+        auto paddr = compressed_physical_ptr[(address >> page_bits)];
+        if (paddr == 0) {
+            return 0;
+        }
+        return (static_cast<PAddr>(paddr - 1) << page_bits) + subbits;
+    }
+
+    template <typename T>
+    void Write(DAddr address, T value);
+
+    template <typename T>
+    T Read(DAddr address) const;
+
+    u8* GetSpan(const DAddr src_addr, const std::size_t size);
+    const u8* GetSpan(const DAddr src_addr, const std::size_t size) const;
+
+    void ReadBlock(DAddr address, void* dest_pointer, size_t size);
+    void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size);
+    void WriteBlock(DAddr address, const void* src_pointer, size_t size);
+    void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size);
+
+    Asid RegisterProcess(Memory::Memory* memory);
+    void UnregisterProcess(Asid id);
+
+    void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta);
+
+    static constexpr size_t AS_BITS = Traits::device_virtual_bits;
+
+private:
+    static constexpr size_t device_virtual_bits = Traits::device_virtual_bits;
+    static constexpr size_t device_as_size = 1ULL << device_virtual_bits;
+    static constexpr size_t physical_min_bits = 32;
+    static constexpr size_t physical_max_bits = 33;
+    static constexpr size_t page_bits = 12;
+    static constexpr size_t page_size = 1ULL << page_bits;
+    static constexpr size_t page_mask = page_size - 1ULL;
+    static constexpr u32 physical_address_base = 1U << page_bits;
+    static constexpr u32 MULTI_FLAG_BITS = 31;
+    static constexpr u32 MULTI_FLAG = 1U << MULTI_FLAG_BITS;
+    static constexpr u32 MULTI_MASK = ~MULTI_FLAG;
+
+    template <typename T>
+    T* GetPointerFromRaw(PAddr addr) {
+        return reinterpret_cast<T*>(physical_base + addr);
+    }
+
+    template <typename T>
+    const T* GetPointerFromRaw(PAddr addr) const {
+        return reinterpret_cast<T*>(physical_base + addr);
+    }
+
+    template <typename T>
+    PAddr GetRawPhysicalAddr(const T* ptr) const {
+        return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) - physical_base);
+    }
+
+    void WalkBlock(const DAddr addr, const std::size_t size, auto on_unmapped, auto on_memory,
+                   auto increment);
+
+    void InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer, PAddr address);
+
+    std::unique_ptr<DeviceMemoryManagerAllocator<Traits>> impl;
+
+    const uintptr_t physical_base;
+    DeviceInterface* device_inter;
+    Common::VirtualBuffer<u32> compressed_physical_ptr;
+    Common::VirtualBuffer<u32> compressed_device_addr;
+    Common::VirtualBuffer<u32> continuity_tracker;
+
+    // Process memory interfaces
+
+    std::deque<size_t> id_pool;
+    std::deque<Memory::Memory*> registered_processes;
+
+    // Memory protection management
+
+    static constexpr size_t guest_max_as_bits = 39;
+    static constexpr size_t guest_as_size = 1ULL << guest_max_as_bits;
+    static constexpr size_t guest_mask = guest_as_size - 1ULL;
+    static constexpr size_t asid_start_bit = guest_max_as_bits;
+
+    std::pair<Asid, VAddr> ExtractCPUBacking(size_t page_index) {
+        auto content = cpu_backing_address[page_index];
+        const VAddr address = content & guest_mask;
+        const Asid asid{static_cast<size_t>(content >> asid_start_bit)};
+        return std::make_pair(asid, address);
+    }
+
+    void InsertCPUBacking(size_t page_index, VAddr address, Asid asid) {
+        cpu_backing_address[page_index] = address | (asid.id << asid_start_bit);
+    }
+
+    Common::VirtualBuffer<VAddr> cpu_backing_address;
+    static constexpr size_t subentries = 8 / sizeof(u8);
+    static constexpr size_t subentries_mask = subentries - 1;
+    class CounterEntry final {
+    public:
+        CounterEntry() = default;
+
+        std::atomic_uint8_t& Count(std::size_t page) {
+            return values[page & subentries_mask];
+        }
+
+        const std::atomic_uint8_t& Count(std::size_t page) const {
+            return values[page & subentries_mask];
+        }
+
+    private:
+        std::array<std::atomic_uint8_t, subentries> values{};
+    };
+    static_assert(sizeof(CounterEntry) == subentries * sizeof(u8),
+                  "CounterEntry should be 8 bytes!");
+
+    static constexpr size_t num_counter_entries =
+        (1ULL << (device_virtual_bits - page_bits)) / subentries;
+    using CachedPages = std::array<CounterEntry, num_counter_entries>;
+    std::unique_ptr<CachedPages> cached_pages;
+    std::mutex counter_guard;
+    std::mutex mapping_guard;
+};
+
+} // namespace Core
diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc
new file mode 100644
index 000000000..8ce122872
--- /dev/null
+++ b/src/core/device_memory_manager.inc
@@ -0,0 +1,582 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <atomic>
+#include <limits>
+#include <memory>
+#include <type_traits>
+
+#include "common/address_space.h"
+#include "common/address_space.inc"
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/div_ceil.h"
+#include "common/scope_exit.h"
+#include "common/settings.h"
+#include "core/device_memory.h"
+#include "core/device_memory_manager.h"
+#include "core/memory.h"
+
+namespace Core {
+
+namespace {
+
+class MultiAddressContainer {
+public:
+    MultiAddressContainer() = default;
+    ~MultiAddressContainer() = default;
+
+    void GatherValues(u32 start_entry, Common::ScratchBuffer<u32>& buffer) {
+        buffer.resize(8);
+        buffer.resize(0);
+        size_t index = 0;
+        const auto add_value = [&](u32 value) {
+            buffer[index] = value;
+            index++;
+            buffer.resize(index);
+        };
+
+        u32 iter_entry = start_entry;
+        Entry* current = &storage[iter_entry - 1];
+        add_value(current->value);
+        while (current->next_entry != 0) {
+            iter_entry = current->next_entry;
+            current = &storage[iter_entry - 1];
+            add_value(current->value);
+        }
+    }
+
+    u32 Register(u32 value) {
+        return RegisterImplementation(value);
+    }
+
+    void Register(u32 value, u32 start_entry) {
+        auto entry_id = RegisterImplementation(value);
+        u32 iter_entry = start_entry;
+        Entry* current = &storage[iter_entry - 1];
+        while (current->next_entry != 0) {
+            iter_entry = current->next_entry;
+            current = &storage[iter_entry - 1];
+        }
+        current->next_entry = entry_id;
+    }
+
+    std::pair<bool, u32> Unregister(u32 value, u32 start_entry) {
+        u32 iter_entry = start_entry;
+        Entry* previous{};
+        Entry* current = &storage[iter_entry - 1];
+        Entry* next{};
+        bool more_than_one_remaining = false;
+        u32 result_start{start_entry};
+        size_t count = 0;
+        while (current->value != value) {
+            count++;
+            previous = current;
+            iter_entry = current->next_entry;
+            current = &storage[iter_entry - 1];
+        }
+        // Find next
+        u32 next_entry = current->next_entry;
+        if (next_entry != 0) {
+            next = &storage[next_entry - 1];
+            more_than_one_remaining = next->next_entry != 0 || previous != nullptr;
+        }
+        if (previous) {
+            previous->next_entry = next_entry;
+        } else {
+            result_start = next_entry;
+        }
+        free_entries.emplace_back(iter_entry);
+        return std::make_pair(more_than_one_remaining || count > 1, result_start);
+    }
+
+    u32 ReleaseEntry(u32 start_entry) {
+        Entry* current = &storage[start_entry - 1];
+        free_entries.emplace_back(start_entry);
+        return current->value;
+    }
+
+private:
+    u32 RegisterImplementation(u32 value) {
+        auto entry_id = GetNewEntry();
+        auto& entry = storage[entry_id - 1];
+        entry.next_entry = 0;
+        entry.value = value;
+        return entry_id;
+    }
+    u32 GetNewEntry() {
+        if (!free_entries.empty()) {
+            u32 result = free_entries.front();
+            free_entries.pop_front();
+            return result;
+        }
+        storage.emplace_back();
+        u32 new_entry = static_cast<u32>(storage.size());
+        return new_entry;
+    }
+
+    struct Entry {
+        u32 next_entry{};
+        u32 value{};
+    };
+
+    std::deque<Entry> storage;
+    std::deque<u32> free_entries;
+};
+
+struct EmptyAllocator {
+    EmptyAllocator([[maybe_unused]] DAddr address) {}
+};
+
+} // namespace
+
+template <typename DTraits>
+struct DeviceMemoryManagerAllocator {
+    static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits;
+    static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS;
+    static constexpr DAddr max_device_area = 1ULL << device_virtual_bits;
+
+    DeviceMemoryManagerAllocator() : main_allocator(first_address) {}
+
+    Common::FlatAllocator<DAddr, 0, device_virtual_bits> main_allocator;
+    MultiAddressContainer multi_dev_address;
+
+    /// Returns true when vaddr -> vaddr+size is fully contained in the buffer
+    template <bool pin_area>
+    [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept {
+        return addr >= 0 && addr + size <= max_device_area;
+    }
+
+    DAddr Allocate(size_t size) {
+        return main_allocator.Allocate(size);
+    }
+
+    void AllocateFixed(DAddr b_address, size_t b_size) {
+        main_allocator.AllocateFixed(b_address, b_size);
+    }
+
+    void Free(DAddr b_address, size_t b_size) {
+        main_allocator.Free(b_address, b_size);
+    }
+};
+
+template <typename Traits>
+DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_)
+    : physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
+      device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
+      compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
+                                               Settings::MemoryLayout::Memory_4Gb
+                                           ? physical_min_bits
+                                           : physical_max_bits) -
+                                      Memory::YUZU_PAGEBITS)),
+      continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
+      cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
+    impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
+    cached_pages = std::make_unique<CachedPages>();
+
+    const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS;
+    for (size_t i = 0; i < total_virtual; i++) {
+        compressed_physical_ptr[i] = 0;
+        continuity_tracker[i] = 1;
+        cpu_backing_address[i] = 0;
+    }
+    const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
+                                                Settings::MemoryLayout::Memory_4Gb
+                                            ? physical_min_bits
+                                            : physical_max_bits) -
+                                       Memory::YUZU_PAGEBITS);
+    for (size_t i = 0; i < total_phys; i++) {
+        compressed_device_addr[i] = 0;
+    }
+}
+
+template <typename Traits>
+DeviceMemoryManager<Traits>::~DeviceMemoryManager() = default;
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* device_inter_) {
+    device_inter = device_inter_;
+}
+
+template <typename Traits>
+DAddr DeviceMemoryManager<Traits>::Allocate(size_t size) {
+    return impl->Allocate(size);
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::AllocateFixed(DAddr start, size_t size) {
+    return impl->AllocateFixed(start, size);
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::Free(DAddr start, size_t size) {
+    impl->Free(start, size);
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size_t size,
+                                      Asid asid, bool track) {
+    Core::Memory::Memory* process_memory = registered_processes[asid.id];
+    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
+    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
+    std::scoped_lock lk(mapping_guard);
+    for (size_t i = 0; i < num_pages; i++) {
+        const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE;
+        auto* ptr = process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress));
+        if (ptr == nullptr) [[unlikely]] {
+            compressed_physical_ptr[start_page_d + i] = 0;
+            continue;
+        }
+        auto phys_addr = static_cast<u32>(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U;
+        compressed_physical_ptr[start_page_d + i] = phys_addr;
+        InsertCPUBacking(start_page_d + i, new_vaddress, asid);
+        const u32 base_dev = compressed_device_addr[phys_addr - 1U];
+        const u32 new_dev = static_cast<u32>(start_page_d + i);
+        if (base_dev == 0) [[likely]] {
+            compressed_device_addr[phys_addr - 1U] = new_dev;
+            continue;
+        }
+        u32 start_id = base_dev & MULTI_MASK;
+        if ((base_dev >> MULTI_FLAG_BITS) == 0) {
+            start_id = impl->multi_dev_address.Register(base_dev);
+            compressed_device_addr[phys_addr - 1U] = MULTI_FLAG | start_id;
+        }
+        impl->multi_dev_address.Register(new_dev, start_id);
+    }
+    if (track) {
+        TrackContinuityImpl(address, virtual_address, size, asid);
+    }
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
+    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
+    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
+    device_inter->InvalidateRegion(address, size);
+    std::scoped_lock lk(mapping_guard);
+    for (size_t i = 0; i < num_pages; i++) {
+        auto phys_addr = compressed_physical_ptr[start_page_d + i];
+        compressed_physical_ptr[start_page_d + i] = 0;
+        cpu_backing_address[start_page_d + i] = 0;
+        if (phys_addr != 0) [[likely]] {
+            const u32 base_dev = compressed_device_addr[phys_addr - 1U];
+            if ((base_dev >> MULTI_FLAG_BITS) == 0) [[likely]] {
+                compressed_device_addr[phys_addr - 1] = 0;
+                continue;
+            }
+            const auto [more_entries, new_start] = impl->multi_dev_address.Unregister(
+                static_cast<u32>(start_page_d + i), base_dev & MULTI_MASK);
+            if (!more_entries) {
+                compressed_device_addr[phys_addr - 1] =
+                    impl->multi_dev_address.ReleaseEntry(new_start);
+                continue;
+            }
+            compressed_device_addr[phys_addr - 1] = new_start | MULTI_FLAG;
+        }
+    }
+}
+template <typename Traits>
+void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address,
+                                                      size_t size, Asid asid) {
+    Core::Memory::Memory* process_memory = registered_processes[asid.id];
+    size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
+    size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
+    uintptr_t last_ptr = 0;
+    size_t page_count = 1;
+    for (size_t i = num_pages; i > 0; i--) {
+        size_t index = i - 1;
+        const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE;
+        const uintptr_t new_ptr = reinterpret_cast<uintptr_t>(
+            process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)));
+        if (new_ptr + page_size == last_ptr) {
+            page_count++;
+        } else {
+            page_count = 1;
+        }
+        last_ptr = new_ptr;
+        continuity_tracker[start_page_d + index] = static_cast<u32>(page_count);
+    }
+}
+template <typename Traits>
+u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) {
+    size_t page_index = src_addr >> page_bits;
+    size_t subbits = src_addr & page_mask;
+    if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) {
+        return GetPointer<u8>(src_addr);
+    }
+    return nullptr;
+}
+
+template <typename Traits>
+const u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) const {
+    size_t page_index = src_addr >> page_bits;
+    size_t subbits = src_addr & page_mask;
+    if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) {
+        return GetPointer<u8>(src_addr);
+    }
+    return nullptr;
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer,
+                                                             PAddr address) {
+    size_t phys_addr = address >> page_bits;
+    std::scoped_lock lk(mapping_guard);
+    u32 backing = compressed_device_addr[phys_addr];
+    if ((backing >> MULTI_FLAG_BITS) != 0) {
+        impl->multi_dev_address.GatherValues(backing & MULTI_MASK, buffer);
+        return;
+    }
+    buffer.resize(1);
+    buffer[0] = backing;
+}
+
+template <typename Traits>
+template <typename T>
+T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) {
+    const size_t index = address >> Memory::YUZU_PAGEBITS;
+    const size_t offset = address & Memory::YUZU_PAGEMASK;
+    auto phys_addr = compressed_physical_ptr[index];
+    if (phys_addr == 0) [[unlikely]] {
+        return nullptr;
+    }
+    return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
+                                offset);
+}
+
+template <typename Traits>
+template <typename T>
+const T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) const {
+    const size_t index = address >> Memory::YUZU_PAGEBITS;
+    const size_t offset = address & Memory::YUZU_PAGEMASK;
+    auto phys_addr = compressed_physical_ptr[index];
+    if (phys_addr == 0) [[unlikely]] {
+        return nullptr;
+    }
+    return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
+                                offset);
+}
+
+template <typename Traits>
+template <typename T>
+void DeviceMemoryManager<Traits>::Write(DAddr address, T value) {
+    T* ptr = GetPointer<T>(address);
+    if (!ptr) [[unlikely]] {
+        return;
+    }
+    std::memcpy(ptr, &value, sizeof(T));
+}
+
+template <typename Traits>
+template <typename T>
+T DeviceMemoryManager<Traits>::Read(DAddr address) const {
+    const T* ptr = GetPointer<T>(address);
+    T result{};
+    if (!ptr) [[unlikely]] {
+        return result;
+    }
+    std::memcpy(&result, ptr, sizeof(T));
+    return result;
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto on_unmapped,
+                                            auto on_memory, auto increment) {
+    std::size_t remaining_size = size;
+    std::size_t page_index = addr >> Memory::YUZU_PAGEBITS;
+    std::size_t page_offset = addr & Memory::YUZU_PAGEMASK;
+
+    while (remaining_size) {
+        const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]);
+        const std::size_t copy_amount =
+            std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
+        const auto current_vaddr =
+            static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
+        SCOPE_EXIT({
+            page_index += next_pages;
+            page_offset = 0;
+            increment(copy_amount);
+            remaining_size -= copy_amount;
+        });
+
+        auto phys_addr = compressed_physical_ptr[page_index];
+        if (phys_addr == 0) {
+            on_unmapped(copy_amount, current_vaddr);
+            continue;
+        }
+        auto* mem_ptr = GetPointerFromRaw<u8>(
+            (static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset);
+        on_memory(copy_amount, mem_ptr);
+    }
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
+    device_inter->FlushRegion(address, size);
+    WalkBlock(
+        address, size,
+        [&](size_t copy_amount, DAddr current_vaddr) {
+            LOG_ERROR(
+                HW_Memory,
+                "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
+                current_vaddr, address, size);
+            std::memset(dest_pointer, 0, copy_amount);
+        },
+        [&](size_t copy_amount, const u8* const src_ptr) {
+            std::memcpy(dest_pointer, src_ptr, copy_amount);
+        },
+        [&](const std::size_t copy_amount) {
+            dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
+        });
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_pointer, size_t size) {
+    WalkBlock(
+        address, size,
+        [&](size_t copy_amount, DAddr current_vaddr) {
+            LOG_ERROR(
+                HW_Memory,
+                "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
+                current_vaddr, address, size);
+        },
+        [&](size_t copy_amount, u8* const dst_ptr) {
+            std::memcpy(dst_ptr, src_pointer, copy_amount);
+        },
+        [&](const std::size_t copy_amount) {
+            src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
+        });
+    device_inter->InvalidateRegion(address, size);
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size) {
+    WalkBlock(
+        address, size,
+        [&](size_t copy_amount, DAddr current_vaddr) {
+            LOG_ERROR(
+                HW_Memory,
+                "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
+                current_vaddr, address, size);
+            std::memset(dest_pointer, 0, copy_amount);
+        },
+        [&](size_t copy_amount, const u8* const src_ptr) {
+            std::memcpy(dest_pointer, src_ptr, copy_amount);
+        },
+        [&](const std::size_t copy_amount) {
+            dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
+        });
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* src_pointer,
+                                                   size_t size) {
+    WalkBlock(
+        address, size,
+        [&](size_t copy_amount, DAddr current_vaddr) {
+            LOG_ERROR(
+                HW_Memory,
+                "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
+                current_vaddr, address, size);
+        },
+        [&](size_t copy_amount, u8* const dst_ptr) {
+            std::memcpy(dst_ptr, src_pointer, copy_amount);
+        },
+        [&](const std::size_t copy_amount) {
+            src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
+        });
+}
+
+template <typename Traits>
+Asid DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_device_inter) {
+    size_t new_id{};
+    if (!id_pool.empty()) {
+        new_id = id_pool.front();
+        id_pool.pop_front();
+        registered_processes[new_id] = memory_device_inter;
+    } else {
+        registered_processes.emplace_back(memory_device_inter);
+        new_id = registered_processes.size() - 1U;
+    }
+    return Asid{new_id};
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::UnregisterProcess(Asid asid) {
+    registered_processes[asid.id] = nullptr;
+    id_pool.push_front(asid.id);
+}
+
+template <typename Traits>
+void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) {
+    std::unique_lock<std::mutex> lk(counter_guard, std::defer_lock);
+    const auto Lock = [&] {
+        if (!lk) {
+            lk.lock();
+        }
+    };
+    u64 uncache_begin = 0;
+    u64 cache_begin = 0;
+    u64 uncache_bytes = 0;
+    u64 cache_bytes = 0;
+    const auto MarkRegionCaching = &DeviceMemoryManager<Traits>::DeviceMethods::MarkRegionCaching;
+
+    std::atomic_thread_fence(std::memory_order_acquire);
+    const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE);
+    size_t page = addr >> Memory::YUZU_PAGEBITS;
+    auto [asid, base_vaddress] = ExtractCPUBacking(page);
+    size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS;
+    auto* memory_device_inter = registered_processes[asid.id];
+    for (; page != page_end; ++page) {
+        std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page);
+
+        if (delta > 0) {
+            ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits<u8>::max(),
+                       "Count may overflow!");
+        } else if (delta < 0) {
+            ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
+        } else {
+            ASSERT_MSG(false, "Delta must be non-zero!");
+        }
+
+        // Adds or subtracts 1, as count is a unsigned 8-bit value
+        count.fetch_add(static_cast<u8>(delta), std::memory_order_release);
+
+        // Assume delta is either -1 or 1
+        if (count.load(std::memory_order::relaxed) == 0) {
+            if (uncache_bytes == 0) {
+                uncache_begin = vpage;
+            }
+            uncache_bytes += Memory::YUZU_PAGESIZE;
+        } else if (uncache_bytes > 0) {
+            Lock();
+            MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
+                              uncache_bytes, false);
+            uncache_bytes = 0;
+        }
+        if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
+            if (cache_bytes == 0) {
+                cache_begin = vpage;
+            }
+            cache_bytes += Memory::YUZU_PAGESIZE;
+        } else if (cache_bytes > 0) {
+            Lock();
+            MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
+                              true);
+            cache_bytes = 0;
+        }
+        vpage++;
+    }
+    if (uncache_bytes > 0) {
+        Lock();
+        MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
+                          false);
+    }
+    if (cache_bytes > 0) {
+        Lock();
+        MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
+                          true);
+    }
+}
+
+} // namespace Core
diff --git a/src/core/gpu_dirty_memory_manager.h b/src/core/gpu_dirty_memory_manager.h
index 9687531e8..cc8fc176f 100644
--- a/src/core/gpu_dirty_memory_manager.h
+++ b/src/core/gpu_dirty_memory_manager.h
@@ -10,7 +10,7 @@
 #include <utility>
 #include <vector>
 
-#include "core/memory.h"
+#include "core/device_memory_manager.h"
 
 namespace Core {
 
@@ -23,7 +23,7 @@ public:
 
     ~GPUDirtyMemoryManager() = default;
 
-    void Collect(VAddr address, size_t size) {
+    void Collect(PAddr address, size_t size) {
         TransformAddress t = BuildTransform(address, size);
         TransformAddress tmp, original;
         do {
@@ -47,7 +47,7 @@ public:
                                                 std::memory_order_relaxed));
     }
 
-    void Gather(std::function<void(VAddr, size_t)>& callback) {
+    void Gather(std::function<void(PAddr, size_t)>& callback) {
         {
             std::scoped_lock lk(guard);
             TransformAddress t = current.exchange(default_transform, std::memory_order_relaxed);
@@ -65,7 +65,7 @@ public:
                 mask = mask >> empty_bits;
 
                 const size_t continuous_bits = std::countr_one(mask);
-                callback((static_cast<VAddr>(transform.address) << page_bits) + offset,
+                callback((static_cast<PAddr>(transform.address) << page_bits) + offset,
                          continuous_bits << align_bits);
                 mask = continuous_bits < align_size ? (mask >> continuous_bits) : 0;
                 offset += continuous_bits << align_bits;
@@ -80,7 +80,7 @@ private:
         u32 mask;
     };
 
-    constexpr static size_t page_bits = Memory::YUZU_PAGEBITS - 1;
+    constexpr static size_t page_bits = DEVICE_PAGEBITS - 1;
     constexpr static size_t page_size = 1ULL << page_bits;
     constexpr static size_t page_mask = page_size - 1;
 
@@ -89,7 +89,7 @@ private:
     constexpr static size_t align_mask = align_size - 1;
     constexpr static TransformAddress default_transform = {.address = ~0U, .mask = 0U};
 
-    bool IsValid(VAddr address) {
+    bool IsValid(PAddr address) {
         return address < (1ULL << 39);
     }
 
@@ -103,7 +103,7 @@ private:
         return mask;
     }
 
-    TransformAddress BuildTransform(VAddr address, size_t size) {
+    TransformAddress BuildTransform(PAddr address, size_t size) {
         const size_t minor_address = address & page_mask;
         const size_t minor_bit = minor_address >> align_bits;
         const size_t top_bit = (minor_address + size + align_mask) >> align_bits;
diff --git a/src/core/guest_memory.h b/src/core/guest_memory.h
new file mode 100644
index 000000000..7ee18c126
--- /dev/null
+++ b/src/core/guest_memory.h
@@ -0,0 +1,214 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <iterator>
+#include <memory>
+#include <optional>
+#include <span>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/scratch_buffer.h"
+
+namespace Core::Memory {
+
+enum GuestMemoryFlags : u32 {
+    Read = 1 << 0,
+    Write = 1 << 1,
+    Safe = 1 << 2,
+    Cached = 1 << 3,
+
+    SafeRead = Read | Safe,
+    SafeWrite = Write | Safe,
+    SafeReadWrite = SafeRead | SafeWrite,
+    SafeReadCachedWrite = SafeReadWrite | Cached,
+
+    UnsafeRead = Read,
+    UnsafeWrite = Write,
+    UnsafeReadWrite = UnsafeRead | UnsafeWrite,
+    UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
+};
+
+namespace {
+template <typename M, typename T, GuestMemoryFlags FLAGS>
+class GuestMemory {
+    using iterator = T*;
+    using const_iterator = const T*;
+    using value_type = T;
+    using element_type = T;
+    using iterator_category = std::contiguous_iterator_tag;
+
+public:
+    GuestMemory() = delete;
+    explicit GuestMemory(M& memory, u64 addr, std::size_t size,
+                         Common::ScratchBuffer<T>* backup = nullptr)
+        : m_memory{memory}, m_addr{addr}, m_size{size} {
+        static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
+        if constexpr (FLAGS & GuestMemoryFlags::Read) {
+            Read(addr, size, backup);
+        }
+    }
+
+    ~GuestMemory() = default;
+
+    T* data() noexcept {
+        return m_data_span.data();
+    }
+
+    const T* data() const noexcept {
+        return m_data_span.data();
+    }
+
+    size_t size() const noexcept {
+        return m_size;
+    }
+
+    size_t size_bytes() const noexcept {
+        return this->size() * sizeof(T);
+    }
+
+    [[nodiscard]] T* begin() noexcept {
+        return this->data();
+    }
+
+    [[nodiscard]] const T* begin() const noexcept {
+        return this->data();
+    }
+
+    [[nodiscard]] T* end() noexcept {
+        return this->data() + this->size();
+    }
+
+    [[nodiscard]] const T* end() const noexcept {
+        return this->data() + this->size();
+    }
+
+    T& operator[](size_t index) noexcept {
+        return m_data_span[index];
+    }
+
+    const T& operator[](size_t index) const noexcept {
+        return m_data_span[index];
+    }
+
+    void SetAddressAndSize(u64 addr, std::size_t size) noexcept {
+        m_addr = addr;
+        m_size = size;
+        m_addr_changed = true;
+    }
+
+    std::span<T> Read(u64 addr, std::size_t size,
+                      Common::ScratchBuffer<T>* backup = nullptr) noexcept {
+        m_addr = addr;
+        m_size = size;
+        if (m_size == 0) {
+            m_is_data_copy = true;
+            return {};
+        }
+
+        if (this->TrySetSpan()) {
+            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
+                m_memory.FlushRegion(m_addr, this->size_bytes());
+            }
+        } else {
+            if (backup) {
+                backup->resize_destructive(this->size());
+                m_data_span = *backup;
+            } else {
+                m_data_copy.resize(this->size());
+                m_data_span = std::span(m_data_copy);
+            }
+            m_is_data_copy = true;
+            m_span_valid = true;
+            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
+                m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
+            } else {
+                m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
+            }
+        }
+        return m_data_span;
+    }
+
+    void Write(std::span<T> write_data) noexcept {
+        if constexpr (FLAGS & GuestMemoryFlags::Cached) {
+            m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
+        } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
+            m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
+        } else {
+            m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
+        }
+    }
+
+    bool TrySetSpan() noexcept {
+        if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
+            m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
+            m_span_valid = true;
+            return true;
+        }
+        return false;
+    }
+
+protected:
+    bool IsDataCopy() const noexcept {
+        return m_is_data_copy;
+    }
+
+    bool AddressChanged() const noexcept {
+        return m_addr_changed;
+    }
+
+    M& m_memory;
+    u64 m_addr{};
+    size_t m_size{};
+    std::span<T> m_data_span{};
+    std::vector<T> m_data_copy{};
+    bool m_span_valid{false};
+    bool m_is_data_copy{false};
+    bool m_addr_changed{false};
+};
+
+template <typename M, typename T, GuestMemoryFlags FLAGS>
+class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
+public:
+    GuestMemoryScoped() = delete;
+    explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
+                               Common::ScratchBuffer<T>* backup = nullptr)
+        : GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
+        if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
+            if (!this->TrySetSpan()) {
+                if (backup) {
+                    this->m_data_span = *backup;
+                    this->m_span_valid = true;
+                    this->m_is_data_copy = true;
+                }
+            }
+        }
+    }
+
+    ~GuestMemoryScoped() {
+        if constexpr (FLAGS & GuestMemoryFlags::Write) {
+            if (this->size() == 0) [[unlikely]] {
+                return;
+            }
+
+            if (this->AddressChanged() || this->IsDataCopy()) {
+                ASSERT(this->m_span_valid);
+                if constexpr (FLAGS & GuestMemoryFlags::Cached) {
+                    this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
+                } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
+                    this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
+                } else {
+                    this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
+                }
+            } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
+                                 (FLAGS & GuestMemoryFlags::Cached)) {
+                this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
+            }
+        }
+    }
+};
+} // namespace
+
+} // namespace Core::Memory
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 53735a225..0b08e877e 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -5,6 +5,7 @@
 #include "common/scope_exit.h"
 #include "common/settings.h"
 #include "core/core.h"
+#include "core/gpu_dirty_memory_manager.h"
 #include "core/hle/kernel/k_process.h"
 #include "core/hle/kernel/k_scoped_resource_reservation.h"
 #include "core/hle/kernel/k_shared_memory.h"
@@ -320,7 +321,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
 
     // Ensure our memory is initialized.
     m_memory.SetCurrentPageTable(*this);
-    m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
+    m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
 
     // Ensure we can insert the code region.
     R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
@@ -417,7 +418,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
 
     // Ensure our memory is initialized.
     m_memory.SetCurrentPageTable(*this);
-    m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
+    m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
 
     // Ensure we can insert the code region.
     R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
@@ -1141,8 +1142,7 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
 KProcess::KProcess(KernelCore& kernel)
     : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
       m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
-      m_handle_table{kernel}, m_dirty_memory_managers{},
-      m_exclusive_monitor{}, m_memory{kernel.System()} {}
+      m_handle_table{kernel}, m_exclusive_monitor{}, m_memory{kernel.System()} {}
 KProcess::~KProcess() = default;
 
 Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
@@ -1324,10 +1324,4 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT
     return true;
 }
 
-void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
-    for (auto& manager : m_dirty_memory_managers) {
-        manager.Gather(callback);
-    }
-}
-
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 53c0e3316..ab1358a12 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -7,7 +7,6 @@
 
 #include "core/arm/arm_interface.h"
 #include "core/file_sys/program_metadata.h"
-#include "core/gpu_dirty_memory_manager.h"
 #include "core/hle/kernel/code_set.h"
 #include "core/hle/kernel/k_address_arbiter.h"
 #include "core/hle/kernel/k_capabilities.h"
@@ -128,7 +127,6 @@ private:
 #ifdef HAS_NCE
     std::unordered_map<u64, u64> m_post_handlers{};
 #endif
-    std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers;
     std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor;
     Core::Memory::Memory m_memory;
 
@@ -511,8 +509,6 @@ public:
         return m_memory;
     }
 
-    void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
-
     Core::ExclusiveMonitor& GetExclusiveMonitor() const {
         return *m_exclusive_monitor;
     }
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp
index 3f38ceb03..e491dd260 100644
--- a/src/core/hle/service/hle_ipc.cpp
+++ b/src/core/hle/service/hle_ipc.cpp
@@ -12,6 +12,7 @@
 #include "common/common_types.h"
 #include "common/logging/log.h"
 #include "common/scratch_buffer.h"
+#include "core/guest_memory.h"
 #include "core/hle/kernel/k_auto_object.h"
 #include "core/hle/kernel/k_handle_table.h"
 #include "core/hle/kernel/k_process.h"
@@ -23,19 +24,6 @@
 #include "core/hle/service/ipc_helpers.h"
 #include "core/memory.h"
 
-namespace {
-static thread_local std::array read_buffer_data_a{
-    Common::ScratchBuffer<u8>(),
-    Common::ScratchBuffer<u8>(),
-    Common::ScratchBuffer<u8>(),
-};
-static thread_local std::array read_buffer_data_x{
-    Common::ScratchBuffer<u8>(),
-    Common::ScratchBuffer<u8>(),
-    Common::ScratchBuffer<u8>(),
-};
-} // Anonymous namespace
-
 namespace Service {
 
 SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
@@ -343,48 +331,27 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
 }
 
 std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
-    static thread_local std::array read_buffer_a{
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-    };
+    Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
 
     ASSERT_OR_EXECUTE_MSG(
         BufferDescriptorA().size() > buffer_index, { return {}; },
         "BufferDescriptorA invalid buffer_index {}", buffer_index);
-    auto& read_buffer = read_buffer_a[buffer_index];
-    return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
-                            BufferDescriptorA()[buffer_index].Size(),
-                            &read_buffer_data_a[buffer_index]);
+    return gm.Read(BufferDescriptorA()[buffer_index].Address(),
+                   BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]);
 }
 
 std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
-    static thread_local std::array read_buffer_x{
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-    };
+    Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
 
     ASSERT_OR_EXECUTE_MSG(
         BufferDescriptorX().size() > buffer_index, { return {}; },
         "BufferDescriptorX invalid buffer_index {}", buffer_index);
-    auto& read_buffer = read_buffer_x[buffer_index];
-    return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
-                            BufferDescriptorX()[buffer_index].Size(),
-                            &read_buffer_data_x[buffer_index]);
+    return gm.Read(BufferDescriptorX()[buffer_index].Address(),
+                   BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]);
 }
 
 std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
-    static thread_local std::array read_buffer_a{
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-    };
-    static thread_local std::array read_buffer_x{
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-        Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
-    };
+    Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
 
     const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
                            BufferDescriptorA()[buffer_index].Size()};
@@ -401,18 +368,14 @@ std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) cons
         ASSERT_OR_EXECUTE_MSG(
             BufferDescriptorA().size() > buffer_index, { return {}; },
             "BufferDescriptorA invalid buffer_index {}", buffer_index);
-        auto& read_buffer = read_buffer_a[buffer_index];
-        return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
-                                BufferDescriptorA()[buffer_index].Size(),
-                                &read_buffer_data_a[buffer_index]);
+        return gm.Read(BufferDescriptorA()[buffer_index].Address(),
+                       BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]);
     } else {
         ASSERT_OR_EXECUTE_MSG(
             BufferDescriptorX().size() > buffer_index, { return {}; },
             "BufferDescriptorX invalid buffer_index {}", buffer_index);
-        auto& read_buffer = read_buffer_x[buffer_index];
-        return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
-                                BufferDescriptorX()[buffer_index].Size(),
-                                &read_buffer_data_x[buffer_index]);
+        return gm.Read(BufferDescriptorX()[buffer_index].Address(),
+                       BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]);
     }
 }
 
diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h
index 440737db5..8329d7265 100644
--- a/src/core/hle/service/hle_ipc.h
+++ b/src/core/hle/service/hle_ipc.h
@@ -41,6 +41,8 @@ class KernelCore;
 class KHandleTable;
 class KProcess;
 class KServerSession;
+template <typename T>
+class KScopedAutoObject;
 class KThread;
 } // namespace Kernel
 
@@ -424,6 +426,9 @@ private:
 
     Kernel::KernelCore& kernel;
     Core::Memory::Memory& memory;
+
+    mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_a{};
+    mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_x{};
 };
 
 } // namespace Service
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp
index 37ca24f5d..21ef57d27 100644
--- a/src/core/hle/service/nvdrv/core/container.cpp
+++ b/src/core/hle/service/nvdrv/core/container.cpp
@@ -2,27 +2,135 @@
 // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
 // SPDX-License-Identifier: GPL-3.0-or-later
 
+#include <atomic>
+#include <deque>
+#include <mutex>
+
+#include "core/hle/kernel/k_process.h"
 #include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/heap_mapper.h"
 #include "core/hle/service/nvdrv/core/nvmap.h"
 #include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "core/memory.h"
 #include "video_core/host1x/host1x.h"
 
 namespace Service::Nvidia::NvCore {
 
+Session::Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_)
+    : id{id_}, process{process_}, asid{asid_}, has_preallocated_area{}, mapper{}, is_active{} {}
+
+Session::~Session() = default;
+
 struct ContainerImpl {
-    explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
-        : file{host1x_}, manager{host1x_}, device_file_data{} {}
+    explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_)
+        : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {}
+    Tegra::Host1x::Host1x& host1x;
     NvMap file;
     SyncpointManager manager;
     Container::Host1xDeviceFileData device_file_data;
+    std::deque<Session> sessions;
+    size_t new_ids{};
+    std::deque<size_t> id_pool;
+    std::mutex session_guard;
 };
 
 Container::Container(Tegra::Host1x::Host1x& host1x_) {
-    impl = std::make_unique<ContainerImpl>(host1x_);
+    impl = std::make_unique<ContainerImpl>(*this, host1x_);
 }
 
 Container::~Container() = default;
 
+SessionId Container::OpenSession(Kernel::KProcess* process) {
+    using namespace Common::Literals;
+
+    std::scoped_lock lk(impl->session_guard);
+    for (auto& session : impl->sessions) {
+        if (!session.is_active) {
+            continue;
+        }
+        if (session.process == process) {
+            return session.id;
+        }
+    }
+    size_t new_id{};
+    auto* memory_interface = &process->GetMemory();
+    auto& smmu = impl->host1x.MemoryManager();
+    auto asid = smmu.RegisterProcess(memory_interface);
+    if (!impl->id_pool.empty()) {
+        new_id = impl->id_pool.front();
+        impl->id_pool.pop_front();
+        impl->sessions[new_id] = Session{SessionId{new_id}, process, asid};
+    } else {
+        new_id = impl->new_ids++;
+        impl->sessions.emplace_back(SessionId{new_id}, process, asid);
+    }
+    auto& session = impl->sessions[new_id];
+    session.is_active = true;
+    // Optimization
+    if (process->IsApplication()) {
+        auto& page_table = process->GetPageTable().GetBasePageTable();
+        auto heap_start = page_table.GetHeapRegionStart();
+
+        Kernel::KProcessAddress cur_addr = heap_start;
+        size_t region_size = 0;
+        VAddr region_start = 0;
+        while (true) {
+            Kernel::KMemoryInfo mem_info{};
+            Kernel::Svc::PageInfo page_info{};
+            R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
+                                          cur_addr));
+            auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+
+            // Check if this memory block is heap.
+            if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) {
+                if (svc_mem_info.size > region_size) {
+                    region_size = svc_mem_info.size;
+                    region_start = svc_mem_info.base_address;
+                }
+            }
+
+            // Check if we're done.
+            const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
+            if (next_address <= GetInteger(cur_addr)) {
+                break;
+            }
+
+            cur_addr = next_address;
+        }
+        session.has_preallocated_area = false;
+        auto start_region = region_size >= 32_MiB ? smmu.Allocate(region_size) : 0;
+        if (start_region != 0) {
+            session.mapper = std::make_unique<HeapMapper>(region_start, start_region, region_size,
+                                                          asid, impl->host1x);
+            smmu.TrackContinuity(start_region, region_start, region_size, asid);
+            session.has_preallocated_area = true;
+            LOG_DEBUG(Debug, "Preallocation created!");
+        }
+    }
+    return SessionId{new_id};
+}
+
+void Container::CloseSession(SessionId session_id) {
+    std::scoped_lock lk(impl->session_guard);
+    auto& session = impl->sessions[session_id.id];
+    auto& smmu = impl->host1x.MemoryManager();
+    if (session.has_preallocated_area) {
+        const DAddr region_start = session.mapper->GetRegionStart();
+        const size_t region_size = session.mapper->GetRegionSize();
+        session.mapper.reset();
+        smmu.Free(region_start, region_size);
+        session.has_preallocated_area = false;
+    }
+    session.is_active = false;
+    smmu.UnregisterProcess(impl->sessions[session_id.id].asid);
+    impl->id_pool.emplace_front(session_id.id);
+}
+
+Session* Container::GetSession(SessionId session_id) {
+    std::atomic_thread_fence(std::memory_order_acquire);
+    return &impl->sessions[session_id.id];
+}
+
 NvMap& Container::GetNvMapFile() {
     return impl->file;
 }
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h
index b4b63ac90..b4d3938a8 100644
--- a/src/core/hle/service/nvdrv/core/container.h
+++ b/src/core/hle/service/nvdrv/core/container.h
@@ -8,24 +8,56 @@
 #include <memory>
 #include <unordered_map>
 
+#include "core/device_memory_manager.h"
 #include "core/hle/service/nvdrv/nvdata.h"
 
+namespace Kernel {
+class KProcess;
+}
+
 namespace Tegra::Host1x {
 class Host1x;
 } // namespace Tegra::Host1x
 
 namespace Service::Nvidia::NvCore {
 
+class HeapMapper;
 class NvMap;
 class SyncpointManager;
 
 struct ContainerImpl;
 
+struct SessionId {
+    size_t id;
+};
+
+struct Session {
+    Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_);
+    ~Session();
+
+    Session(const Session&) = delete;
+    Session& operator=(const Session&) = delete;
+    Session(Session&&) = default;
+    Session& operator=(Session&&) = default;
+
+    SessionId id;
+    Kernel::KProcess* process;
+    Core::Asid asid;
+    bool has_preallocated_area{};
+    std::unique_ptr<HeapMapper> mapper{};
+    bool is_active{};
+};
+
 class Container {
 public:
     explicit Container(Tegra::Host1x::Host1x& host1x);
     ~Container();
 
+    SessionId OpenSession(Kernel::KProcess* process);
+    void CloseSession(SessionId id);
+
+    Session* GetSession(SessionId id);
+
     NvMap& GetNvMapFile();
 
     const NvMap& GetNvMapFile() const;
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp
new file mode 100644
index 000000000..096dc5deb
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp
@@ -0,0 +1,175 @@
+// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <mutex>
+
+#include <boost/container/small_vector.hpp>
+#define BOOST_NO_MT
+#include <boost/pool/detail/mutex.hpp>
+#undef BOOST_NO_MT
+#include <boost/icl/interval.hpp>
+#include <boost/icl/interval_base_set.hpp>
+#include <boost/icl/interval_set.hpp>
+#include <boost/icl/split_interval_map.hpp>
+#include <boost/pool/pool.hpp>
+#include <boost/pool/pool_alloc.hpp>
+#include <boost/pool/poolfwd.hpp>
+
+#include "core/hle/service/nvdrv/core/heap_mapper.h"
+#include "video_core/host1x/host1x.h"
+
+namespace boost {
+template <typename T>
+class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
+}
+
+namespace Service::Nvidia::NvCore {
+
+using IntervalCompare = std::less<DAddr>;
+using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
+using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
+using IntervalSet = boost::icl::interval_set<DAddr>;
+using IntervalType = typename IntervalSet::interval_type;
+
+template <typename Type>
+struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
+    // types
+    typedef counter_add_functor<Type> type;
+    typedef boost::icl::identity_based_inplace_combine<Type> base_type;
+
+    // public member functions
+    void operator()(Type& current, const Type& added) const {
+        current += added;
+        if (current < base_type::identity_element()) {
+            current = base_type::identity_element();
+        }
+    }
+
+    // public static functions
+    static void version(Type&){};
+};
+
+using OverlapCombine = counter_add_functor<int>;
+using OverlapSection = boost::icl::inter_section<int>;
+using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
+
+struct HeapMapper::HeapMapperInternal {
+    HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {}
+    ~HeapMapperInternal() = default;
+
+    template <typename Func>
+    void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
+                                 Func&& func) {
+        const DAddr start_address = cpu_addr;
+        const DAddr end_address = start_address + size;
+        const IntervalType search_interval{start_address, end_address};
+        auto it = current_range.lower_bound(search_interval);
+        if (it == current_range.end()) {
+            return;
+        }
+        auto end_it = current_range.upper_bound(search_interval);
+        for (; it != end_it; it++) {
+            auto& inter = it->first;
+            DAddr inter_addr_end = inter.upper();
+            DAddr inter_addr = inter.lower();
+            if (inter_addr_end > end_address) {
+                inter_addr_end = end_address;
+            }
+            if (inter_addr < start_address) {
+                inter_addr = start_address;
+            }
+            func(inter_addr, inter_addr_end, it->second);
+        }
+    }
+
+    void RemoveEachInOverlapCounter(OverlapCounter& current_range,
+                                    const IntervalType search_interval, int subtract_value) {
+        bool any_removals = false;
+        current_range.add(std::make_pair(search_interval, subtract_value));
+        do {
+            any_removals = false;
+            auto it = current_range.lower_bound(search_interval);
+            if (it == current_range.end()) {
+                return;
+            }
+            auto end_it = current_range.upper_bound(search_interval);
+            for (; it != end_it; it++) {
+                if (it->second <= 0) {
+                    any_removals = true;
+                    current_range.erase(it);
+                    break;
+                }
+            }
+        } while (any_removals);
+    }
+
+    IntervalSet base_set;
+    OverlapCounter mapping_overlaps;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
+    std::mutex guard;
+};
+
+HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
+                       Tegra::Host1x::Host1x& host1x)
+    : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_asid{asid} {
+    m_internal = std::make_unique<HeapMapperInternal>(host1x);
+}
+
+HeapMapper::~HeapMapper() {
+    m_internal->device_memory.Unmap(m_daddress, m_size);
+}
+
+DAddr HeapMapper::Map(VAddr start, size_t size) {
+    std::scoped_lock lk(m_internal->guard);
+    m_internal->base_set.clear();
+    const IntervalType interval{start, start + size};
+    m_internal->base_set.insert(interval);
+    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
+                                        [this](VAddr start_addr, VAddr end_addr, int) {
+                                            const IntervalType other{start_addr, end_addr};
+                                            m_internal->base_set.subtract(other);
+                                        });
+    if (!m_internal->base_set.empty()) {
+        auto it = m_internal->base_set.begin();
+        auto end_it = m_internal->base_set.end();
+        for (; it != end_it; it++) {
+            const VAddr inter_addr_end = it->upper();
+            const VAddr inter_addr = it->lower();
+            const size_t offset = inter_addr - m_vaddress;
+            const size_t sub_size = inter_addr_end - inter_addr;
+            m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
+                                          m_asid);
+        }
+    }
+    m_internal->mapping_overlaps += std::make_pair(interval, 1);
+    m_internal->base_set.clear();
+    return m_daddress + (start - m_vaddress);
+}
+
+void HeapMapper::Unmap(VAddr start, size_t size) {
+    std::scoped_lock lk(m_internal->guard);
+    m_internal->base_set.clear();
+    m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
+                                        [this](VAddr start_addr, VAddr end_addr, int value) {
+                                            if (value <= 1) {
+                                                const IntervalType other{start_addr, end_addr};
+                                                m_internal->base_set.insert(other);
+                                            }
+                                        });
+    if (!m_internal->base_set.empty()) {
+        auto it = m_internal->base_set.begin();
+        auto end_it = m_internal->base_set.end();
+        for (; it != end_it; it++) {
+            const VAddr inter_addr_end = it->upper();
+            const VAddr inter_addr = it->lower();
+            const size_t offset = inter_addr - m_vaddress;
+            const size_t sub_size = inter_addr_end - inter_addr;
+            m_internal->device_memory.Unmap(m_daddress + offset, sub_size);
+        }
+    }
+    const IntervalType to_remove{start, start + size};
+    m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1);
+    m_internal->base_set.clear();
+}
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.h b/src/core/hle/service/nvdrv/core/heap_mapper.h
new file mode 100644
index 000000000..491a12e4f
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/heap_mapper.h
@@ -0,0 +1,49 @@
+// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_types.h"
+#include "core/device_memory_manager.h"
+
+namespace Tegra::Host1x {
+class Host1x;
+} // namespace Tegra::Host1x
+
+namespace Service::Nvidia::NvCore {
+
+class HeapMapper {
+public:
+    HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
+               Tegra::Host1x::Host1x& host1x);
+    ~HeapMapper();
+
+    bool IsInBounds(VAddr start, size_t size) const {
+        VAddr end = start + size;
+        return start >= m_vaddress && end <= (m_vaddress + m_size);
+    }
+
+    DAddr Map(VAddr start, size_t size);
+
+    void Unmap(VAddr start, size_t size);
+
+    DAddr GetRegionStart() const {
+        return m_daddress;
+    }
+
+    size_t GetRegionSize() const {
+        return m_size;
+    }
+
+private:
+    struct HeapMapperInternal;
+    VAddr m_vaddress;
+    DAddr m_daddress;
+    size_t m_size;
+    Core::Asid m_asid;
+    std::unique_ptr<HeapMapperInternal> m_internal;
+};
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index 0ca05257e..1b59c6b15 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -2,14 +2,19 @@
 // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
 // SPDX-License-Identifier: GPL-3.0-or-later
 
+#include <functional>
+
 #include "common/alignment.h"
 #include "common/assert.h"
 #include "common/logging/log.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/heap_mapper.h"
 #include "core/hle/service/nvdrv/core/nvmap.h"
 #include "core/memory.h"
 #include "video_core/host1x/host1x.h"
 
 using Core::Memory::YUZU_PAGESIZE;
+constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
 
 namespace Service::Nvidia::NvCore {
 NvMap::Handle::Handle(u64 size_, Id id_)
@@ -17,9 +22,9 @@ NvMap::Handle::Handle(u64 size_, Id id_)
     flags.raw = 0;
 }
 
-NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
+NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
+                              NvCore::SessionId pSessionId) {
     std::scoped_lock lock(mutex);
-
     // Handles cannot be allocated twice
     if (allocated) {
         return NvResult::AccessDenied;
@@ -28,6 +33,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
     flags = pFlags;
     kind = pKind;
     align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
+    session_id = pSessionId;
 
     // This flag is only applicable for handles with an address passed
     if (pAddress) {
@@ -63,7 +69,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
     return NvResult::Success;
 }
 
-NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
+NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {}
 
 void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
     std::scoped_lock lock(handles_lock);
@@ -78,12 +84,30 @@ void NvMap::UnmapHandle(Handle& handle_description) {
         handle_description.unmap_queue_entry.reset();
     }
 
+    // Free and unmap the handle from Host1x GMMU
+    if (handle_description.pin_virt_address) {
+        host1x.GMMU().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
+                            handle_description.aligned_size);
+        host1x.Allocator().Free(handle_description.pin_virt_address,
+                                static_cast<u32>(handle_description.aligned_size));
+        handle_description.pin_virt_address = 0;
+    }
+
     // Free and unmap the handle from the SMMU
-    host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
-                                 handle_description.aligned_size);
-    host1x.Allocator().Free(handle_description.pin_virt_address,
-                            static_cast<u32>(handle_description.aligned_size));
-    handle_description.pin_virt_address = 0;
+    const size_t map_size = handle_description.aligned_size;
+    if (!handle_description.in_heap) {
+        auto& smmu = host1x.MemoryManager();
+        size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
+        smmu.Unmap(handle_description.d_address, map_size);
+        smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up));
+        handle_description.d_address = 0;
+        return;
+    }
+    const VAddr vaddress = handle_description.address;
+    auto* session = core.GetSession(handle_description.session_id);
+    session->mapper->Unmap(vaddress, map_size);
+    handle_description.d_address = 0;
+    handle_description.in_heap = false;
 }
 
 bool NvMap::TryRemoveHandle(const Handle& handle_description) {
@@ -124,22 +148,33 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
     }
 }
 
-VAddr NvMap::GetHandleAddress(Handle::Id handle) {
+DAddr NvMap::GetHandleAddress(Handle::Id handle) {
     std::scoped_lock lock(handles_lock);
     try {
-        return handles.at(handle)->address;
+        return handles.at(handle)->d_address;
     } catch (std::out_of_range&) {
         return 0;
     }
 }
 
-u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
+DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
     auto handle_description{GetHandle(handle)};
     if (!handle_description) [[unlikely]] {
         return 0;
     }
 
     std::scoped_lock lock(handle_description->mutex);
+    const auto map_low_area = [&] {
+        if (handle_description->pin_virt_address == 0) {
+            auto& gmmu_allocator = host1x.Allocator();
+            auto& gmmu = host1x.GMMU();
+            u32 address =
+                gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size));
+            gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address,
+                     handle_description->aligned_size);
+            handle_description->pin_virt_address = address;
+        }
+    };
     if (!handle_description->pins) {
         // If we're in the unmap queue we can just remove ourselves and return since we're already
         // mapped
@@ -151,37 +186,58 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
                 unmap_queue.erase(*handle_description->unmap_queue_entry);
                 handle_description->unmap_queue_entry.reset();
 
+                if (low_area_pin) {
+                    map_low_area();
+                    handle_description->pins++;
+                    return static_cast<DAddr>(handle_description->pin_virt_address);
+                }
+
                 handle_description->pins++;
-                return handle_description->pin_virt_address;
+                return handle_description->d_address;
             }
         }
 
+        using namespace std::placeholders;
         // If not then allocate some space and map it
-        u32 address{};
-        auto& smmu_allocator = host1x.Allocator();
-        auto& smmu_memory_manager = host1x.MemoryManager();
-        while ((address = smmu_allocator.Allocate(
-                    static_cast<u32>(handle_description->aligned_size))) == 0) {
-            // Free handles until the allocation succeeds
-            std::scoped_lock queueLock(unmap_queue_lock);
-            if (auto freeHandleDesc{unmap_queue.front()}) {
-                // Handles in the unmap queue are guaranteed not to be pinned so don't bother
-                // checking if they are before unmapping
-                std::scoped_lock freeLock(freeHandleDesc->mutex);
-                if (handle_description->pin_virt_address)
-                    UnmapHandle(*freeHandleDesc);
-            } else {
-                LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
+        DAddr address{};
+        auto& smmu = host1x.MemoryManager();
+        auto* session = core.GetSession(handle_description->session_id);
+        const VAddr vaddress = handle_description->address;
+        const size_t map_size = handle_description->aligned_size;
+        if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) {
+            handle_description->d_address = session->mapper->Map(vaddress, map_size);
+            handle_description->in_heap = true;
+        } else {
+            size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
+            while ((address = smmu.Allocate(aligned_up)) == 0) {
+                // Free handles until the allocation succeeds
+                std::scoped_lock queueLock(unmap_queue_lock);
+                if (auto freeHandleDesc{unmap_queue.front()}) {
+                    // Handles in the unmap queue are guaranteed not to be pinned so don't bother
+                    // checking if they are before unmapping
+                    std::scoped_lock freeLock(freeHandleDesc->mutex);
+                    if (handle_description->d_address)
+                        UnmapHandle(*freeHandleDesc);
+                } else {
+                    LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
+                }
             }
-        }
 
-        smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
-                                handle_description->aligned_size);
-        handle_description->pin_virt_address = address;
+            handle_description->d_address = address;
+            smmu.Map(address, vaddress, map_size, session->asid, true);
+            handle_description->in_heap = false;
+        }
+    }
+
+    if (low_area_pin) {
+        map_low_area();
     }
 
     handle_description->pins++;
-    return handle_description->pin_virt_address;
+    if (low_area_pin) {
+        return static_cast<DAddr>(handle_description->pin_virt_address);
+    }
+    return handle_description->d_address;
 }
 
 void NvMap::UnpinHandle(Handle::Id handle) {
@@ -232,7 +288,7 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
                 LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
             } else if (handle_description->dupes == 0) {
                 // Force unmap the handle
-                if (handle_description->pin_virt_address) {
+                if (handle_description->d_address) {
                     std::scoped_lock queueLock(unmap_queue_lock);
                     UnmapHandle(*handle_description);
                 }
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index a8e573890..d7f695845 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -14,6 +14,7 @@
 
 #include "common/bit_field.h"
 #include "common/common_types.h"
+#include "core/hle/service/nvdrv/core/container.h"
 #include "core/hle/service/nvdrv/nvdata.h"
 
 namespace Tegra {
@@ -25,6 +26,8 @@ class Host1x;
 } // namespace Tegra
 
 namespace Service::Nvidia::NvCore {
+
+class Container;
 /**
  * @brief The nvmap core class holds the global state for nvmap and provides methods to manage
  * handles
@@ -48,7 +51,7 @@ public:
         using Id = u32;
         Id id; //!< A globally unique identifier for this handle
 
-        s32 pins{};
+        s64 pins{};
         u32 pin_virt_address{};
         std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
 
@@ -61,15 +64,18 @@ public:
         } flags{};
         static_assert(sizeof(Flags) == sizeof(u32));
 
-        u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
-                       //!< this can also be in the nvdrv tmem
+        VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
+                         //!< this can also be in the nvdrv tmem
         bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
                                      //!< call
 
         u8 kind{};        //!< Used for memory compression
         bool allocated{}; //!< If the handle has been allocated with `Alloc`
+        bool in_heap{};
+        NvCore::SessionId session_id{};
 
-        u64 dma_map_addr{}; //! remove me after implementing pinning.
+        DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
+                           //!< to, this can also be in the nvdrv tmem
 
         Handle(u64 size, Id id);
 
@@ -77,7 +83,8 @@ public:
          * @brief Sets up the handle with the given memory config, can allocate memory from the tmem
          * if a 0 address is passed
          */
-        [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
+        [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
+                                     NvCore::SessionId pSessionId);
 
         /**
          * @brief Increases the dupe counter of the handle for the given session
@@ -108,7 +115,7 @@ public:
         bool can_unlock;   //!< If the address region is ready to be unlocked
     };
 
-    explicit NvMap(Tegra::Host1x::Host1x& host1x);
+    explicit NvMap(Container& core, Tegra::Host1x::Host1x& host1x);
 
     /**
      * @brief Creates an unallocated handle of the given size
@@ -117,7 +124,7 @@ public:
 
     std::shared_ptr<Handle> GetHandle(Handle::Id handle);
 
-    VAddr GetHandleAddress(Handle::Id handle);
+    DAddr GetHandleAddress(Handle::Id handle);
 
     /**
      * @brief Maps a handle into the SMMU address space
@@ -125,7 +132,7 @@ public:
      * number of calls to `UnpinHandle`
      * @return The SMMU virtual address that the handle has been mapped to
      */
-    u32 PinHandle(Handle::Id handle);
+    DAddr PinHandle(Handle::Id handle, bool low_area_pin);
 
     /**
      * @brief When this has been called an equal number of times to `PinHandle` for the supplied
@@ -172,5 +179,7 @@ private:
      * @return If the handle was removed from the map
      */
     bool TryRemoveHandle(const Handle& handle_description);
+
+    Container& core;
 };
 } // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index a04538d5d..8adaddc60 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -7,6 +7,7 @@
 #include <vector>
 
 #include "common/common_types.h"
+#include "core/hle/service/nvdrv/core/container.h"
 #include "core/hle/service/nvdrv/nvdata.h"
 
 namespace Core {
@@ -62,7 +63,7 @@ public:
      * Called once a device is opened
      * @param fd The device fd
      */
-    virtual void OnOpen(DeviceFD fd) = 0;
+    virtual void OnOpen(NvCore::SessionId session_id, DeviceFD fd) = 0;
 
     /**
      * Called once a device is closed
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 05a43d8dc..c1ebbd62d 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -35,14 +35,14 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
     return NvResult::NotImplemented;
 }
 
-void nvdisp_disp0::OnOpen(DeviceFD fd) {}
+void nvdisp_disp0::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 void nvdisp_disp0::OnClose(DeviceFD fd) {}
 
 void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
                         u32 height, u32 stride, android::BufferTransformFlags transform,
                         const Common::Rectangle<int>& crop_rect,
                         std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
-    const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
+    const DAddr addr = nvmap.GetHandleAddress(buffer_handle);
     LOG_TRACE(Service,
               "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
               addr, offset, width, height, stride, format);
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index daee05fe8..5f13a50a2 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -32,7 +32,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
     /// Performs a screen flip, drawing the buffer pointed to by the handle.
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 6b3639008..e6646ba04 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
     return NvResult::NotImplemented;
 }
 
-void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
+void nvhost_as_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 void nvhost_as_gpu::OnClose(DeviceFD fd) {}
 
 NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
@@ -206,6 +206,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
                        static_cast<u32>(aligned_size >> page_size_bits));
     }
 
+    nvmap.UnpinHandle(mapping->handle);
+
     // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
     // Only FreeSpace can unmap them fully
     if (mapping->sparse_alloc) {
@@ -293,12 +295,12 @@ NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
                 return NvResult::BadValue;
             }
 
-            VAddr cpu_address{static_cast<VAddr>(
-                handle->address +
-                (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
+            DAddr base = nvmap.PinHandle(entry.handle, false);
+            DAddr device_address{static_cast<DAddr>(
+                base + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
 
-            gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
-                      use_big_pages);
+            gmmu->Map(virtual_address, device_address, size,
+                      static_cast<Tegra::PTEKind>(entry.kind), use_big_pages);
         }
     }
 
@@ -331,9 +333,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
             }
 
             u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
-            VAddr cpu_address{mapping->ptr + params.buffer_offset};
+            VAddr device_address{mapping->ptr + params.buffer_offset};
 
-            gmmu->Map(gpu_address, cpu_address, params.mapping_size,
+            gmmu->Map(gpu_address, device_address, params.mapping_size,
                       static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
 
             return NvResult::Success;
@@ -349,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
         return NvResult::BadValue;
     }
 
-    VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
+    DAddr device_address{
+        static_cast<DAddr>(nvmap.PinHandle(params.handle, false) + params.buffer_offset)};
     u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
 
     bool big_page{[&]() {
@@ -373,15 +376,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
         }
 
         const bool use_big_pages = alloc->second.big_pages && big_page;
-        gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
+        gmmu->Map(params.offset, device_address, size, static_cast<Tegra::PTEKind>(params.kind),
                   use_big_pages);
 
-        auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
-                                               use_big_pages, alloc->second.sparse)};
+        auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
+                                               true, use_big_pages, alloc->second.sparse)};
         alloc->second.mappings.push_back(mapping);
         mapping_map[params.offset] = mapping;
     } else {
-
         auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
         u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
         u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
@@ -394,11 +396,11 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
             return NvResult::InsufficientMemory;
         }
 
-        gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
+        gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size),
                   static_cast<Tegra::PTEKind>(params.kind), big_page);
 
-        auto mapping{
-            std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
+        auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
+                                               false, big_page, false)};
         mapping_map[params.offset] = mapping;
     }
 
@@ -433,6 +435,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
             gmmu->Unmap(params.offset, mapping->size);
         }
 
+        nvmap.UnpinHandle(mapping->handle);
+
         mapping_map.erase(params.offset);
     } catch (const std::out_of_range&) {
         LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 79a21683d..7d0a99988 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -55,7 +55,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
     Kernel::KEvent* QueryEvent(u32 event_id) override;
@@ -159,16 +159,18 @@ private:
     NvCore::NvMap& nvmap;
 
     struct Mapping {
-        VAddr ptr;
+        NvCore::NvMap::Handle::Id handle;
+        DAddr ptr;
         u64 offset;
         u64 size;
         bool fixed;
         bool big_page; // Only valid if fixed == false
         bool sparse_alloc;
 
-        Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
-            : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
-              sparse_alloc(sparse_alloc_) {}
+        Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_,
+                bool big_page_, bool sparse_alloc_)
+            : handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_),
+              big_page(big_page_), sparse_alloc(sparse_alloc_) {}
     };
 
     struct Allocation {
@@ -212,9 +214,6 @@ private:
         bool initialised{};
     } vm;
     std::shared_ptr<Tegra::MemoryManager> gmmu;
-
-    // s32 channel{};
-    // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
 };
 
 } // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index b8dd34e24..250d01de3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inp
     return NvResult::NotImplemented;
 }
 
-void nvhost_ctrl::OnOpen(DeviceFD fd) {}
+void nvhost_ctrl::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 
 void nvhost_ctrl::OnClose(DeviceFD fd) {}
 
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 992124b60..403f1a746 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -32,7 +32,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
     Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 3e0c96456..ddd85678b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
     return NvResult::NotImplemented;
 }
 
-void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
+void nvhost_ctrl_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
 
 NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index d170299bd..d2ab05b21 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -28,7 +28,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
     Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index b0395c2f0..bf12d69a5 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
     return NvResult::NotImplemented;
 }
 
-void nvhost_gpu::OnOpen(DeviceFD fd) {}
+void nvhost_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 void nvhost_gpu::OnClose(DeviceFD fd) {}
 
 NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 88fd228ff..e34a978db 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -47,7 +47,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
     Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index f43914e1b..2c0ac2a46 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
         case 0x7:
             return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
         case 0x9:
-            return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
+            return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd);
         case 0xa:
             return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
         default:
@@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
     return NvResult::NotImplemented;
 }
 
-void nvhost_nvdec::OnOpen(DeviceFD fd) {
+void nvhost_nvdec::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
     LOG_INFO(Service_NVDRV, "NVDEC video stream started");
     system.SetNVDECActive(true);
+    sessions[fd] = session_id;
 }
 
 void nvhost_nvdec::OnClose(DeviceFD fd) {
@@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) {
         system.GPU().ClearCdmaInstance(iter->second);
     }
     system.SetNVDECActive(false);
+    auto it = sessions.find(fd);
+    if (it != sessions.end()) {
+        sessions.erase(it);
+    }
 }
 
 } // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
index ad2233c49..627686757 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
@@ -20,7 +20,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 };
 
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 74c701b95..a0a7bfa40 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -8,6 +8,7 @@
 #include "common/common_types.h"
 #include "common/logging/log.h"
 #include "core/core.h"
+#include "core/hle/kernel/k_process.h"
 #include "core/hle/service/nvdrv/core/container.h"
 #include "core/hle/service/nvdrv/core/nvmap.h"
 #include "core/hle/service/nvdrv/core/syncpoint_manager.h"
@@ -95,6 +96,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
     offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
 
     auto& gpu = system.GPU();
+    auto* session = core.GetSession(sessions[fd]);
+
     if (gpu.UseNvdec()) {
         for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
             const SyncptIncr& syncpt_incr = syncpt_increments[i];
@@ -106,8 +109,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
         const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
         ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
         Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
-        system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
-                                             cmdlist.size() * sizeof(u32));
+        session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
+                                                cmdlist.size() * sizeof(u32));
         gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
     }
     // Some games expect command_buffers to be written back
@@ -133,10 +136,12 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
     return NvResult::Success;
 }
 
-NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
+NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries,
+                                        DeviceFD fd) {
     const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
     for (size_t i = 0; i < num_entries; i++) {
-        entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
+        DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, true);
+        entries[i].map_address = static_cast<u32>(pin_address);
     }
 
     return NvResult::Success;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index 7ce748e18..900db81d2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -4,7 +4,9 @@
 #pragma once
 
 #include <deque>
+#include <unordered_map>
 #include <vector>
+
 #include "common/common_types.h"
 #include "common/swap.h"
 #include "core/hle/service/nvdrv/core/syncpoint_manager.h"
@@ -111,7 +113,7 @@ protected:
     NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
     NvResult GetSyncpoint(IoctlGetSyncpoint& params);
     NvResult GetWaitbase(IoctlGetWaitbase& params);
-    NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
+    NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd);
     NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
     NvResult SetSubmitTimeout(u32 timeout);
 
@@ -125,6 +127,7 @@ protected:
     NvCore::NvMap& nvmap;
     NvCore::ChannelType channel_type;
     std::array<u32, MaxSyncPoints> device_syncpoints{};
+    std::unordered_map<DeviceFD, NvCore::SessionId> sessions;
 };
 }; // namespace Devices
 } // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
index 9e6b86458..f87d53f12 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
@@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
     return NvResult::NotImplemented;
 }
 
-void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
+void nvhost_nvjpg::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
 void nvhost_nvjpg::OnClose(DeviceFD fd) {}
 
 NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
index 790c97f6a..def9c254d 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
@@ -22,7 +22,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
 private:
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index 87f8d7c22..bf090f5eb 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
         case 0x3:
             return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
         case 0x9:
-            return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
+            return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd);
         case 0xa:
             return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
         default:
@@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
     return NvResult::NotImplemented;
 }
 
-void nvhost_vic::OnOpen(DeviceFD fd) {}
+void nvhost_vic::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
+    sessions[fd] = session_id;
+}
 
 void nvhost_vic::OnClose(DeviceFD fd) {
     auto& host1x_file = core.Host1xDeviceFile();
@@ -76,6 +78,7 @@ void nvhost_vic::OnClose(DeviceFD fd) {
     if (iter != host1x_file.fd_to_id.end()) {
         system.GPU().ClearCdmaInstance(iter->second);
     }
+    sessions.erase(fd);
 }
 
 } // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
index cadbcb0a5..0cc04354a 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
@@ -19,7 +19,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 };
 } // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 71b2e62ec..da61a3bfe 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
         case 0x3:
             return WrapFixed(this, &nvmap::IocFromId, input, output);
         case 0x4:
-            return WrapFixed(this, &nvmap::IocAlloc, input, output);
+            return WrapFixed(this, &nvmap::IocAlloc, input, output, fd);
         case 0x5:
-            return WrapFixed(this, &nvmap::IocFree, input, output);
+            return WrapFixed(this, &nvmap::IocFree, input, output, fd);
         case 0x9:
             return WrapFixed(this, &nvmap::IocParam, input, output);
         case 0xe:
@@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
     return NvResult::NotImplemented;
 }
 
-void nvmap::OnOpen(DeviceFD fd) {}
-void nvmap::OnClose(DeviceFD fd) {}
+void nvmap::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
+    sessions[fd] = session_id;
+}
+void nvmap::OnClose(DeviceFD fd) {
+    auto it = sessions.find(fd);
+    if (it != sessions.end()) {
+        sessions.erase(it);
+    }
+}
 
 NvResult nvmap::IocCreate(IocCreateParams& params) {
     LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
@@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
     return NvResult::Success;
 }
 
-NvResult nvmap::IocAlloc(IocAllocParams& params) {
+NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
     LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
 
     if (!params.handle) {
@@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) {
         return NvResult::InsufficientMemory;
     }
 
-    const auto result =
-        handle_description->Alloc(params.flags, params.align, params.kind, params.address);
+    const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
+                                                  params.address, sessions[fd]);
     if (result != NvResult::Success) {
         LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
         return result;
     }
     bool is_out_io{};
-    ASSERT(system.ApplicationProcess()
-               ->GetPageTable()
+    auto process = container.GetSession(sessions[fd])->process;
+    ASSERT(process->GetPageTable()
                .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
                                              handle_description->size,
                                              Kernel::KMemoryPermission::None, true, false)
@@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) {
     return NvResult::Success;
 }
 
-NvResult nvmap::IocFree(IocFreeParams& params) {
+NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) {
     LOG_DEBUG(Service_NVDRV, "called");
 
     if (!params.handle) {
@@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) {
     }
 
     if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
+        auto process = container.GetSession(sessions[fd])->process;
         if (freeInfo->can_unlock) {
-            ASSERT(system.ApplicationProcess()
-                       ->GetPageTable()
+            ASSERT(process->GetPageTable()
                        .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
                        .IsSuccess());
         }
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index 049c11028..d07d85f88 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -33,7 +33,7 @@ public:
     NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
                     std::span<u8> inline_output) override;
 
-    void OnOpen(DeviceFD fd) override;
+    void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
     void OnClose(DeviceFD fd) override;
 
     enum class HandleParameterType : u32_le {
@@ -100,11 +100,11 @@ public:
     static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
 
     NvResult IocCreate(IocCreateParams& params);
-    NvResult IocAlloc(IocAllocParams& params);
+    NvResult IocAlloc(IocAllocParams& params, DeviceFD fd);
     NvResult IocGetId(IocGetIdParams& params);
     NvResult IocFromId(IocFromIdParams& params);
     NvResult IocParam(IocParamParams& params);
-    NvResult IocFree(IocFreeParams& params);
+    NvResult IocFree(IocFreeParams& params, DeviceFD fd);
 
 private:
     /// Id to use for the next handle that is created.
@@ -115,6 +115,7 @@ private:
 
     NvCore::Container& container;
     NvCore::NvMap& file;
+    std::unordered_map<DeviceFD, NvCore::SessionId> sessions;
 };
 
 } // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 9e46ee8dd..cb256e5b4 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) {
 void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) {
     auto server_manager = std::make_unique<ServerManager>(system);
     auto module = std::make_shared<Module>(system);
-    server_manager->RegisterNamedService("nvdrv", std::make_shared<NVDRV>(system, module, "nvdrv"));
-    server_manager->RegisterNamedService("nvdrv:a",
-                                         std::make_shared<NVDRV>(system, module, "nvdrv:a"));
-    server_manager->RegisterNamedService("nvdrv:s",
-                                         std::make_shared<NVDRV>(system, module, "nvdrv:s"));
-    server_manager->RegisterNamedService("nvdrv:t",
-                                         std::make_shared<NVDRV>(system, module, "nvdrv:t"));
+    const auto NvdrvInterfaceFactoryForApplication = [&, module] {
+        return std::make_shared<NVDRV>(system, module, "nvdrv");
+    };
+    const auto NvdrvInterfaceFactoryForApplets = [&, module] {
+        return std::make_shared<NVDRV>(system, module, "nvdrv:a");
+    };
+    const auto NvdrvInterfaceFactoryForSysmodules = [&, module] {
+        return std::make_shared<NVDRV>(system, module, "nvdrv:s");
+    };
+    const auto NvdrvInterfaceFactoryForTesting = [&, module] {
+        return std::make_shared<NVDRV>(system, module, "nvdrv:t");
+    };
+    server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication);
+    server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets);
+    server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules);
+    server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactoryForTesting);
     server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system));
     nvnflinger.SetNVDrvInstance(module);
     ServerManager::RunServer(std::move(server_manager));
@@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
     return NvResult::Success;
 }
 
-DeviceFD Module::Open(const std::string& device_name) {
+DeviceFD Module::Open(const std::string& device_name, NvCore::SessionId session_id) {
     auto it = builders.find(device_name);
     if (it == builders.end()) {
         LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
@@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) {
     auto& builder = it->second;
     auto device = builder(fd)->second;
 
-    device->OnOpen(fd);
+    device->OnOpen(session_id, fd);
 
     return fd;
 }
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index d8622b3ca..c594f0e5e 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -77,7 +77,7 @@ public:
     NvResult VerifyFD(DeviceFD fd) const;
 
     /// Opens a device node and returns a file descriptor to it.
-    DeviceFD Open(const std::string& device_name);
+    DeviceFD Open(const std::string& device_name, NvCore::SessionId session_id);
 
     /// Sends an ioctl command to the specified file descriptor.
     NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output);
@@ -93,6 +93,10 @@ public:
 
     NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
 
+    NvCore::Container& GetContainer() {
+        return container;
+    }
+
 private:
     friend class EventInterface;
     friend class Service::Nvnflinger::Nvnflinger;
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
index c8a880e84..6e4825313 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
@@ -3,8 +3,10 @@
 // SPDX-License-Identifier: GPL-3.0-or-later
 
 #include "common/logging/log.h"
+#include "common/scope_exit.h"
 #include "core/core.h"
 #include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_process.h"
 #include "core/hle/kernel/k_readable_event.h"
 #include "core/hle/service/ipc_helpers.h"
 #include "core/hle/service/nvdrv/nvdata.h"
@@ -37,7 +39,7 @@ void NVDRV::Open(HLERequestContext& ctx) {
         return;
     }
 
-    DeviceFD fd = nvdrv->Open(device_name);
+    DeviceFD fd = nvdrv->Open(device_name, session_id);
 
     rb.Push<DeviceFD>(fd);
     rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
@@ -150,12 +152,29 @@ void NVDRV::Close(HLERequestContext& ctx) {
 
 void NVDRV::Initialize(HLERequestContext& ctx) {
     LOG_WARNING(Service_NVDRV, "(STUBBED) called");
+    IPC::ResponseBuilder rb{ctx, 3};
+    SCOPE_EXIT({
+        rb.Push(ResultSuccess);
+        rb.PushEnum(NvResult::Success);
+    });
+
+    if (is_initialized) {
+        // No need to initialize again
+        return;
+    }
+
+    IPC::RequestParser rp{ctx};
+    const auto process_handle{ctx.GetCopyHandle(0)};
+    // The transfer memory is lent to nvdrv as a work buffer since nvdrv is
+    // unable to allocate as much memory on its own. For HLE it's unnecessary to handle it
+    [[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)};
+    [[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>();
+
+    auto& container = nvdrv->GetContainer();
+    auto process = ctx.GetObjectFromHandle<Kernel::KProcess>(process_handle);
+    session_id = container.OpenSession(process.GetPointerUnsafe());
 
     is_initialized = true;
-
-    IPC::ResponseBuilder rb{ctx, 3};
-    rb.Push(ResultSuccess);
-    rb.PushEnum(NvResult::Success);
 }
 
 void NVDRV::QueryEvent(HLERequestContext& ctx) {
@@ -242,6 +261,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char*
     RegisterHandlers(functions);
 }
 
-NVDRV::~NVDRV() = default;
+NVDRV::~NVDRV() {
+    auto& container = nvdrv->GetContainer();
+    container.CloseSession(session_id);
+}
 
 } // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h
index 6e98115dc..f2195ae1e 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.h
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.h
@@ -35,6 +35,7 @@ private:
 
     u64 pid{};
     bool is_initialized{};
+    NvCore::SessionId session_id{};
     Common::ScratchBuffer<u8> output_buffer;
     Common::ScratchBuffer<u8> inline_output_buffer;
 };
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
index 2fef6cc1a..86e272b41 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
@@ -87,19 +87,20 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
     R_SUCCEED();
 }
 
-Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
+Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) {
     // Free the handle.
     Nvidia::Devices::nvmap::IocFreeParams free_params{
         .handle = handle,
     };
-    R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
+    R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success,
+             VI::ResultOperationFailed);
 
     // We succeeded.
     R_SUCCEED();
 }
 
 Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
-                        u32 size) {
+                        u32 size, Nvidia::DeviceFD nvmap_fd) {
     // Assign the allocated memory to the handle.
     Nvidia::Devices::nvmap::IocAllocParams alloc_params{
         .handle = handle,
@@ -109,16 +110,16 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
         .kind = 0,
         .address = GetInteger(buffer),
     };
-    R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
+    R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success,
+             VI::ResultOperationFailed);
 
     // We succeeded.
     R_SUCCEED();
 }
 
-Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
+Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd,
                                Common::ProcessAddress buffer, u32 size) {
     // Get the nvmap device.
-    auto nvmap_fd = nvdrv.Open("/dev/nvmap");
     auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
     ASSERT(nvmap != nullptr);
 
@@ -127,11 +128,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
 
     // Ensure we maintain a clean state on failure.
     ON_RESULT_FAILURE {
-        ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle)));
+        ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd)));
     };
 
     // Assign the allocated memory to the handle.
-    R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size));
+    R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd));
 }
 
 constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
@@ -197,9 +198,13 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
                                            std::addressof(m_buffer_page_group), m_system,
                                            SharedBufferSize));
 
+    auto& container = m_nvdrv->GetContainer();
+    m_session_id = container.OpenSession(m_system.ApplicationProcess());
+    m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
+
     // Create an nvmap handle for the buffer and assign the memory to it.
-    R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address,
-                                  SharedBufferSize));
+    R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
+                                  map_address, SharedBufferSize));
 
     // Record the display id.
     m_display_id = display_id;
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h
index c809c01b4..033bf4bbe 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h
@@ -4,6 +4,8 @@
 #pragma once
 
 #include "common/math_util.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/nvdata.h"
 #include "core/hle/service/nvnflinger/nvnflinger.h"
 #include "core/hle/service/nvnflinger/ui/fence.h"
 
@@ -53,7 +55,8 @@ private:
     u64 m_layer_id = 0;
     u32 m_buffer_nvmap_handle = 0;
     SharedMemoryPoolLayout m_pool_layout = {};
-
+    Nvidia::DeviceFD m_nvmap_fd = {};
+    Nvidia::NvCore::SessionId m_session_id = {};
     std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
 
     std::mutex m_guard;
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp
index af6591370..71d6fdb0c 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.cpp
+++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp
@@ -124,7 +124,7 @@ void Nvnflinger::ShutdownLayers() {
 
 void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
     nvdrv = std::move(instance);
-    disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
+    disp_fd = nvdrv->Open("/dev/nvdisp_disp0", {});
 }
 
 std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) {
diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
index ce70946ec..ede2a1193 100644
--- a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
+++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
@@ -22,11 +22,13 @@ GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap,
     : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) {
     if (this->BufferId() > 0) {
         m_nvmap->DuplicateHandle(this->BufferId(), true);
+        m_nvmap->PinHandle(this->BufferId(), false);
     }
 }
 
 GraphicBuffer::~GraphicBuffer() {
     if (m_nvmap != nullptr && this->BufferId() > 0) {
+        m_nvmap->UnpinHandle(this->BufferId());
         m_nvmap->FreeHandle(this->BufferId(), true);
     }
 }
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 8176a41be..1c218566f 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -24,6 +24,8 @@
 #include "core/hle/kernel/k_process.h"
 #include "core/memory.h"
 #include "video_core/gpu.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
+#include "video_core/host1x/host1x.h"
 #include "video_core/rasterizer_download_area.h"
 
 namespace Core::Memory {
@@ -637,17 +639,6 @@ struct Memory::Impl {
         LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
                   base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
 
-        // During boot, current_page_table might not be set yet, in which case we need not flush
-        if (system.IsPoweredOn()) {
-            auto& gpu = system.GPU();
-            for (u64 i = 0; i < size; i++) {
-                const auto page = base + i;
-                if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
-                    gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
-                }
-            }
-        }
-
         const auto end = base + size;
         ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
                    base + page_table.pointers.size());
@@ -811,21 +802,33 @@ struct Memory::Impl {
         return true;
     }
 
-    void HandleRasterizerDownload(VAddr address, size_t size) {
+    void HandleRasterizerDownload(VAddr v_address, size_t size) {
+        const auto* p = GetPointerImpl(
+            v_address, []() {}, []() {});
+        if (!gpu_device_memory) [[unlikely]] {
+            gpu_device_memory = &system.Host1x().MemoryManager();
+        }
         const size_t core = system.GetCurrentHostThreadID();
         auto& current_area = rasterizer_read_areas[core];
-        const VAddr end_address = address + size;
-        if (current_area.start_address <= address && end_address <= current_area.end_address)
-            [[likely]] {
-            return;
-        }
-        current_area = system.GPU().OnCPURead(address, size);
+        gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
+            const DAddr end_address = address + size;
+            if (current_area.start_address <= address && end_address <= current_area.end_address)
+                [[likely]] {
+                return;
+            }
+            current_area = system.GPU().OnCPURead(address, size);
+        });
     }
 
-    void HandleRasterizerWrite(VAddr address, size_t size) {
+    void HandleRasterizerWrite(VAddr v_address, size_t size) {
+        const auto* p = GetPointerImpl(
+            v_address, []() {}, []() {});
         constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
         const size_t core = std::min(system.GetCurrentHostThreadID(),
                                      sys_core); // any other calls threads go to syscore.
+        if (!gpu_device_memory) [[unlikely]] {
+            gpu_device_memory = &system.Host1x().MemoryManager();
+        }
         // Guard on sys_core;
         if (core == sys_core) [[unlikely]] {
             sys_core_guard.lock();
@@ -835,36 +838,53 @@ struct Memory::Impl {
                 sys_core_guard.unlock();
             }
         });
-        auto& current_area = rasterizer_write_areas[core];
-        VAddr subaddress = address >> YUZU_PAGEBITS;
-        bool do_collection = current_area.last_address == subaddress;
-        if (!do_collection) [[unlikely]] {
-            do_collection = system.GPU().OnCPUWrite(address, size);
-            if (!do_collection) {
-                return;
+        gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
+            auto& current_area = rasterizer_write_areas[core];
+            PAddr subaddress = address >> YUZU_PAGEBITS;
+            bool do_collection = current_area.last_address == subaddress;
+            if (!do_collection) [[unlikely]] {
+                do_collection = system.GPU().OnCPUWrite(address, size);
+                if (!do_collection) {
+                    return;
+                }
+                current_area.last_address = subaddress;
             }
-            current_area.last_address = subaddress;
-        }
-        gpu_dirty_managers[core].Collect(address, size);
+            gpu_dirty_managers[core].Collect(address, size);
+        });
     }
 
     struct GPUDirtyState {
-        VAddr last_address;
+        PAddr last_address;
     };
 
-    void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
-        system.GPU().InvalidateRegion(GetInteger(dest_addr), size);
-    }
-
-    void FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
-        system.GPU().FlushRegion(GetInteger(dest_addr), size);
+    void InvalidateGPUMemory(u8* p, size_t size) {
+        constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
+        const size_t core = std::min(system.GetCurrentHostThreadID(),
+                                     sys_core); // any other calls threads go to syscore.
+        if (!gpu_device_memory) [[unlikely]] {
+            gpu_device_memory = &system.Host1x().MemoryManager();
+        }
+        // Guard on sys_core;
+        if (core == sys_core) [[unlikely]] {
+            sys_core_guard.lock();
+        }
+        SCOPE_EXIT({
+            if (core == sys_core) [[unlikely]] {
+                sys_core_guard.unlock();
+            }
+        });
+        auto& gpu = system.GPU();
+        gpu_device_memory->ApplyOpOnPointer(
+            p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); });
     }
 
     Core::System& system;
+    Tegra::MaxwellDeviceMemoryManager* gpu_device_memory{};
     Common::PageTable* current_page_table = nullptr;
     std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES>
         rasterizer_read_areas{};
     std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
+    std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{};
     std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
     std::mutex sys_core_guard;
 
@@ -1059,14 +1079,6 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug)
     impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
 }
 
-void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
-    impl->InvalidateRegion(dest_addr, size);
-}
-
-void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
-    impl->FlushRegion(dest_addr, size);
-}
-
 bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
     [[maybe_unused]] bool mapped = true;
     [[maybe_unused]] bool rasterizer = false;
@@ -1078,10 +1090,10 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
                       GetInteger(vaddr));
             mapped = false;
         },
-        [&] {
-            impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
-            rasterizer = true;
-        });
+        [&] { rasterizer = true; });
+    if (rasterizer) {
+        impl->InvalidateGPUMemory(ptr, size);
+    }
 
 #ifdef __linux__
     if (!rasterizer && mapped) {
diff --git a/src/core/memory.h b/src/core/memory.h
index dddfaf4a4..f7e6b297f 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -12,6 +12,7 @@
 
 #include "common/scratch_buffer.h"
 #include "common/typed_address.h"
+#include "core/guest_memory.h"
 #include "core/hle/result.h"
 
 namespace Common {
@@ -486,10 +487,10 @@ public:
     void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
 
     void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
-    void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
+
     bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
+
     bool InvalidateSeparateHeap(void* fault_address);
-    void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
 
 private:
     Core::System& system;
@@ -498,209 +499,9 @@ private:
     std::unique_ptr<Impl> impl;
 };
 
-enum GuestMemoryFlags : u32 {
-    Read = 1 << 0,
-    Write = 1 << 1,
-    Safe = 1 << 2,
-    Cached = 1 << 3,
-
-    SafeRead = Read | Safe,
-    SafeWrite = Write | Safe,
-    SafeReadWrite = SafeRead | SafeWrite,
-    SafeReadCachedWrite = SafeReadWrite | Cached,
-
-    UnsafeRead = Read,
-    UnsafeWrite = Write,
-    UnsafeReadWrite = UnsafeRead | UnsafeWrite,
-    UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
-};
-
-namespace {
-template <typename M, typename T, GuestMemoryFlags FLAGS>
-class GuestMemory {
-    using iterator = T*;
-    using const_iterator = const T*;
-    using value_type = T;
-    using element_type = T;
-    using iterator_category = std::contiguous_iterator_tag;
-
-public:
-    GuestMemory() = delete;
-    explicit GuestMemory(M& memory, u64 addr, std::size_t size,
-                         Common::ScratchBuffer<T>* backup = nullptr)
-        : m_memory{memory}, m_addr{addr}, m_size{size} {
-        static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
-        if constexpr (FLAGS & GuestMemoryFlags::Read) {
-            Read(addr, size, backup);
-        }
-    }
-
-    ~GuestMemory() = default;
-
-    T* data() noexcept {
-        return m_data_span.data();
-    }
-
-    const T* data() const noexcept {
-        return m_data_span.data();
-    }
-
-    size_t size() const noexcept {
-        return m_size;
-    }
-
-    size_t size_bytes() const noexcept {
-        return this->size() * sizeof(T);
-    }
-
-    [[nodiscard]] T* begin() noexcept {
-        return this->data();
-    }
-
-    [[nodiscard]] const T* begin() const noexcept {
-        return this->data();
-    }
-
-    [[nodiscard]] T* end() noexcept {
-        return this->data() + this->size();
-    }
-
-    [[nodiscard]] const T* end() const noexcept {
-        return this->data() + this->size();
-    }
-
-    T& operator[](size_t index) noexcept {
-        return m_data_span[index];
-    }
-
-    const T& operator[](size_t index) const noexcept {
-        return m_data_span[index];
-    }
-
-    void SetAddressAndSize(u64 addr, std::size_t size) noexcept {
-        m_addr = addr;
-        m_size = size;
-        m_addr_changed = true;
-    }
-
-    std::span<T> Read(u64 addr, std::size_t size,
-                      Common::ScratchBuffer<T>* backup = nullptr) noexcept {
-        m_addr = addr;
-        m_size = size;
-        if (m_size == 0) {
-            m_is_data_copy = true;
-            return {};
-        }
-
-        if (this->TrySetSpan()) {
-            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
-                m_memory.FlushRegion(m_addr, this->size_bytes());
-            }
-        } else {
-            if (backup) {
-                backup->resize_destructive(this->size());
-                m_data_span = *backup;
-            } else {
-                m_data_copy.resize(this->size());
-                m_data_span = std::span(m_data_copy);
-            }
-            m_is_data_copy = true;
-            m_span_valid = true;
-            if constexpr (FLAGS & GuestMemoryFlags::Safe) {
-                m_memory.ReadBlock(m_addr, this->data(), this->size_bytes());
-            } else {
-                m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes());
-            }
-        }
-        return m_data_span;
-    }
-
-    void Write(std::span<T> write_data) noexcept {
-        if constexpr (FLAGS & GuestMemoryFlags::Cached) {
-            m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes());
-        } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
-            m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes());
-        } else {
-            m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes());
-        }
-    }
-
-    bool TrySetSpan() noexcept {
-        if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) {
-            m_data_span = {reinterpret_cast<T*>(ptr), this->size()};
-            m_span_valid = true;
-            return true;
-        }
-        return false;
-    }
-
-protected:
-    bool IsDataCopy() const noexcept {
-        return m_is_data_copy;
-    }
-
-    bool AddressChanged() const noexcept {
-        return m_addr_changed;
-    }
-
-    M& m_memory;
-    u64 m_addr{};
-    size_t m_size{};
-    std::span<T> m_data_span{};
-    std::vector<T> m_data_copy{};
-    bool m_span_valid{false};
-    bool m_is_data_copy{false};
-    bool m_addr_changed{false};
-};
-
-template <typename M, typename T, GuestMemoryFlags FLAGS>
-class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
-public:
-    GuestMemoryScoped() = delete;
-    explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size,
-                               Common::ScratchBuffer<T>* backup = nullptr)
-        : GuestMemory<M, T, FLAGS>(memory, addr, size, backup) {
-        if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
-            if (!this->TrySetSpan()) {
-                if (backup) {
-                    this->m_data_span = *backup;
-                    this->m_span_valid = true;
-                    this->m_is_data_copy = true;
-                }
-            }
-        }
-    }
-
-    ~GuestMemoryScoped() {
-        if constexpr (FLAGS & GuestMemoryFlags::Write) {
-            if (this->size() == 0) [[unlikely]] {
-                return;
-            }
-
-            if (this->AddressChanged() || this->IsDataCopy()) {
-                ASSERT(this->m_span_valid);
-                if constexpr (FLAGS & GuestMemoryFlags::Cached) {
-                    this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes());
-                } else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
-                    this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes());
-                } else {
-                    this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
-                }
-            } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
-                                 (FLAGS & GuestMemoryFlags::Cached)) {
-                this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
-            }
-        }
-    }
-};
-} // namespace
-
 template <typename T, GuestMemoryFlags FLAGS>
-using CpuGuestMemory = GuestMemory<Memory, T, FLAGS>;
+using CpuGuestMemory = GuestMemory<Core::Memory::Memory, T, FLAGS>;
 template <typename T, GuestMemoryFlags FLAGS>
-using CpuGuestMemoryScoped = GuestMemoryScoped<Memory, T, FLAGS>;
-template <typename T, GuestMemoryFlags FLAGS>
-using GpuGuestMemory = GuestMemory<Tegra::MemoryManager, T, FLAGS>;
-template <typename T, GuestMemoryFlags FLAGS>
-using GpuGuestMemoryScoped = GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>;
+using CpuGuestMemoryScoped = GuestMemoryScoped<Core::Memory::Memory, T, FLAGS>;
+
 } // namespace Core::Memory
diff --git a/src/tests/video_core/memory_tracker.cpp b/src/tests/video_core/memory_tracker.cpp
index 618793668..0e559a590 100644
--- a/src/tests/video_core/memory_tracker.cpp
+++ b/src/tests/video_core/memory_tracker.cpp
@@ -24,9 +24,8 @@ constexpr VAddr c = 16 * HIGH_PAGE_SIZE;
 class RasterizerInterface {
 public:
     void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
-        const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS};
-        const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >>
-                           Core::Memory::YUZU_PAGEBITS};
+        const u64 page_start{addr >> Core::DEVICE_PAGEBITS};
+        const u64 page_end{(addr + size + Core::DEVICE_PAGESIZE - 1) >> Core::DEVICE_PAGEBITS};
         for (u64 page = page_start; page < page_end; ++page) {
             int& value = page_table[page];
             value += delta;
@@ -40,7 +39,7 @@ public:
     }
 
     [[nodiscard]] int Count(VAddr addr) const noexcept {
-        const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS);
+        const auto it = page_table.find(addr >> Core::DEVICE_PAGEBITS);
         return it == page_table.end() ? 0 : it->second;
     }
 
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index c22c7631c..5ed0ad0ed 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -71,6 +71,8 @@ add_library(video_core STATIC
     host1x/ffmpeg/ffmpeg.h
     host1x/control.cpp
     host1x/control.h
+    host1x/gpu_device_memory_manager.cpp
+    host1x/gpu_device_memory_manager.h
     host1x/host1x.cpp
     host1x/host1x.h
     host1x/nvdec.cpp
@@ -93,6 +95,7 @@ add_library(video_core STATIC
     gpu.h
     gpu_thread.cpp
     gpu_thread.h
+    guest_memory.h
     invalidation_accumulator.h
     memory_manager.cpp
     memory_manager.h
@@ -105,8 +108,6 @@ add_library(video_core STATIC
     query_cache/query_stream.h
     query_cache/types.h
     query_cache.h
-    rasterizer_accelerated.cpp
-    rasterizer_accelerated.h
     rasterizer_interface.h
     renderer_base.cpp
     renderer_base.h
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index 0bb3bf8ae..40e98e395 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -33,13 +33,12 @@ struct NullBufferParams {};
  *
  * The buffer size and address is forcefully aligned to CPU page boundaries.
  */
-template <class RasterizerInterface>
 class BufferBase {
 public:
     static constexpr u64 BASE_PAGE_BITS = 16;
     static constexpr u64 BASE_PAGE_SIZE = 1ULL << BASE_PAGE_BITS;
 
-    explicit BufferBase(RasterizerInterface& rasterizer_, VAddr cpu_addr_, u64 size_bytes_)
+    explicit BufferBase(VAddr cpu_addr_, u64 size_bytes_)
         : cpu_addr{cpu_addr_}, size_bytes{size_bytes_} {}
 
     explicit BufferBase(NullBufferParams) {}
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 6d1fc3887..b4bf369d1 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -8,16 +8,16 @@
 #include <numeric>
 
 #include "video_core/buffer_cache/buffer_cache_base.h"
+#include "video_core/guest_memory.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 
 namespace VideoCommon {
 
-using Core::Memory::YUZU_PAGESIZE;
+using Core::DEVICE_PAGESIZE;
 
 template <class P>
-BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
-                            Core::Memory::Memory& cpu_memory_, Runtime& runtime_)
-    : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, memory_tracker{
-                                                                               rasterizer} {
+BufferCache<P>::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_)
+    : runtime{runtime_}, device_memory{device_memory_}, memory_tracker{device_memory} {
     // Ensure the first slot is used for the null buffer
     void(slot_buffers.insert(runtime, NullBufferParams{}));
     common_ranges.clear();
@@ -29,17 +29,17 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
         return;
     }
 
-    const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
-    const s64 min_spacing_expected = device_memory - 1_GiB;
-    const s64 min_spacing_critical = device_memory - 512_MiB;
-    const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD);
+    const s64 device_local_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
+    const s64 min_spacing_expected = device_local_memory - 1_GiB;
+    const s64 min_spacing_critical = device_local_memory - 512_MiB;
+    const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD);
     const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
     const s64 min_vacancy_critical = (3 * mem_threshold) / 10;
     minimum_memory = static_cast<u64>(
-        std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected),
+        std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected),
                  DEFAULT_EXPECTED_MEMORY));
     critical_memory = static_cast<u64>(
-        std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical),
+        std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical),
                  DEFAULT_CRITICAL_MEMORY));
 }
 
@@ -105,71 +105,71 @@ void BufferCache<P>::TickFrame() {
 }
 
 template <class P>
-void BufferCache<P>::WriteMemory(VAddr cpu_addr, u64 size) {
-    if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) {
-        const IntervalType subtract_interval{cpu_addr, cpu_addr + size};
+void BufferCache<P>::WriteMemory(DAddr device_addr, u64 size) {
+    if (memory_tracker.IsRegionGpuModified(device_addr, size)) {
+        const IntervalType subtract_interval{device_addr, device_addr + size};
         ClearDownload(subtract_interval);
         common_ranges.subtract(subtract_interval);
     }
-    memory_tracker.MarkRegionAsCpuModified(cpu_addr, size);
+    memory_tracker.MarkRegionAsCpuModified(device_addr, size);
 }
 
 template <class P>
-void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) {
-    const bool is_dirty = IsRegionRegistered(cpu_addr, size);
+void BufferCache<P>::CachedWriteMemory(DAddr device_addr, u64 size) {
+    const bool is_dirty = IsRegionRegistered(device_addr, size);
     if (!is_dirty) {
         return;
     }
-    VAddr aligned_start = Common::AlignDown(cpu_addr, YUZU_PAGESIZE);
-    VAddr aligned_end = Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE);
+    DAddr aligned_start = Common::AlignDown(device_addr, DEVICE_PAGESIZE);
+    DAddr aligned_end = Common::AlignUp(device_addr + size, DEVICE_PAGESIZE);
     if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) {
-        WriteMemory(cpu_addr, size);
+        WriteMemory(device_addr, size);
         return;
     }
 
     tmp_buffer.resize_destructive(size);
-    cpu_memory.ReadBlockUnsafe(cpu_addr, tmp_buffer.data(), size);
+    device_memory.ReadBlockUnsafe(device_addr, tmp_buffer.data(), size);
 
-    InlineMemoryImplementation(cpu_addr, size, tmp_buffer);
+    InlineMemoryImplementation(device_addr, size, tmp_buffer);
 }
 
 template <class P>
-bool BufferCache<P>::OnCPUWrite(VAddr cpu_addr, u64 size) {
-    const bool is_dirty = IsRegionRegistered(cpu_addr, size);
+bool BufferCache<P>::OnCPUWrite(DAddr device_addr, u64 size) {
+    const bool is_dirty = IsRegionRegistered(device_addr, size);
     if (!is_dirty) {
         return false;
     }
-    if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) {
+    if (memory_tracker.IsRegionGpuModified(device_addr, size)) {
         return true;
     }
-    WriteMemory(cpu_addr, size);
+    WriteMemory(device_addr, size);
     return false;
 }
 
 template <class P>
-std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(VAddr cpu_addr,
+std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(DAddr device_addr,
                                                                               u64 size) {
     std::optional<VideoCore::RasterizerDownloadArea> area{};
     area.emplace();
-    VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
-    VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
-    area->start_address = cpu_addr_start_aligned;
-    area->end_address = cpu_addr_end_aligned;
-    if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
+    DAddr device_addr_start_aligned = Common::AlignDown(device_addr, Core::DEVICE_PAGESIZE);
+    DAddr device_addr_end_aligned = Common::AlignUp(device_addr + size, Core::DEVICE_PAGESIZE);
+    area->start_address = device_addr_start_aligned;
+    area->end_address = device_addr_end_aligned;
+    if (memory_tracker.IsRegionPreflushable(device_addr, size)) {
         area->preemtive = true;
         return area;
     };
-    area->preemtive =
-        !IsRegionGpuModified(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
-    memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
-                                            cpu_addr_end_aligned - cpu_addr_start_aligned);
+    area->preemtive = !IsRegionGpuModified(device_addr_start_aligned,
+                                           device_addr_end_aligned - device_addr_start_aligned);
+    memory_tracker.MarkRegionAsPreflushable(device_addr_start_aligned,
+                                            device_addr_end_aligned - device_addr_start_aligned);
     return area;
 }
 
 template <class P>
-void BufferCache<P>::DownloadMemory(VAddr cpu_addr, u64 size) {
-    ForEachBufferInRange(cpu_addr, size, [&](BufferId, Buffer& buffer) {
-        DownloadBufferMemory(buffer, cpu_addr, size);
+void BufferCache<P>::DownloadMemory(DAddr device_addr, u64 size) {
+    ForEachBufferInRange(device_addr, size, [&](BufferId, Buffer& buffer) {
+        DownloadBufferMemory(buffer, device_addr, size);
     });
 }
 
@@ -184,8 +184,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
 
 template <class P>
 bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
-    const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
-    const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
+    const std::optional<DAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
+    const std::optional<DAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
     if (!cpu_src_address || !cpu_dest_address) {
         return false;
     }
@@ -216,10 +216,10 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
     }};
 
     boost::container::small_vector<IntervalType, 4> tmp_intervals;
-    auto mirror = [&](VAddr base_address, VAddr base_address_end) {
+    auto mirror = [&](DAddr base_address, DAddr base_address_end) {
         const u64 size = base_address_end - base_address;
-        const VAddr diff = base_address - *cpu_src_address;
-        const VAddr new_base_address = *cpu_dest_address + diff;
+        const DAddr diff = base_address - *cpu_src_address;
+        const DAddr new_base_address = *cpu_dest_address + diff;
         const IntervalType add_interval{new_base_address, new_base_address + size};
         tmp_intervals.push_back(add_interval);
         uncommitted_ranges.add(add_interval);
@@ -239,15 +239,15 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
         memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
     }
 
-    Core::Memory::CpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadWrite> tmp(
-        cpu_memory, *cpu_src_address, amount, &tmp_buffer);
+    Tegra::Memory::DeviceGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadWrite>
+        tmp(device_memory, *cpu_src_address, amount, &tmp_buffer);
     tmp.SetAddressAndSize(*cpu_dest_address, amount);
     return true;
 }
 
 template <class P>
 bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
-    const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
+    const std::optional<DAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
     if (!cpu_dst_address) {
         return false;
     }
@@ -273,23 +273,23 @@ template <class P>
 std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_addr, u32 size,
                                                                  ObtainBufferSynchronize sync_info,
                                                                  ObtainBufferOperation post_op) {
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
-    if (!cpu_addr) {
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    if (!device_addr) {
         return {&slot_buffers[NULL_BUFFER_ID], 0};
     }
-    return ObtainCPUBuffer(*cpu_addr, size, sync_info, post_op);
+    return ObtainCPUBuffer(*device_addr, size, sync_info, post_op);
 }
 
 template <class P>
 std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
-    VAddr cpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) {
-    const BufferId buffer_id = FindBuffer(cpu_addr, size);
+    DAddr device_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) {
+    const BufferId buffer_id = FindBuffer(device_addr, size);
     Buffer& buffer = slot_buffers[buffer_id];
 
     // synchronize op
     switch (sync_info) {
     case ObtainBufferSynchronize::FullSynchronize:
-        SynchronizeBuffer(buffer, cpu_addr, size);
+        SynchronizeBuffer(buffer, device_addr, size);
         break;
     default:
         break;
@@ -297,12 +297,12 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
 
     switch (post_op) {
     case ObtainBufferOperation::MarkAsWritten:
-        MarkWrittenBuffer(buffer_id, cpu_addr, size);
+        MarkWrittenBuffer(buffer_id, device_addr, size);
         break;
     case ObtainBufferOperation::DiscardWrite: {
-        VAddr cpu_addr_start = Common::AlignDown(cpu_addr, 64);
-        VAddr cpu_addr_end = Common::AlignUp(cpu_addr + size, 64);
-        IntervalType interval{cpu_addr_start, cpu_addr_end};
+        DAddr device_addr_start = Common::AlignDown(device_addr, 64);
+        DAddr device_addr_end = Common::AlignUp(device_addr + size, 64);
+        IntervalType interval{device_addr_start, device_addr_end};
         ClearDownload(interval);
         common_ranges.subtract(interval);
         break;
@@ -311,15 +311,15 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
         break;
     }
 
-    return {&buffer, buffer.Offset(cpu_addr)};
+    return {&buffer, buffer.Offset(device_addr)};
 }
 
 template <class P>
 void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
                                                u32 size) {
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
     const Binding binding{
-        .cpu_addr = *cpu_addr,
+        .device_addr = *device_addr,
         .size = size,
         .buffer_id = BufferId{},
     };
@@ -555,16 +555,17 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
     for (const IntervalSet& intervals : committed_ranges) {
         for (auto& interval : intervals) {
             const std::size_t size = interval.upper() - interval.lower();
-            const VAddr cpu_addr = interval.lower();
-            ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
-                const VAddr buffer_start = buffer.CpuAddr();
-                const VAddr buffer_end = buffer_start + buffer.SizeBytes();
-                const VAddr new_start = std::max(buffer_start, cpu_addr);
-                const VAddr new_end = std::min(buffer_end, cpu_addr + size);
+            const DAddr device_addr = interval.lower();
+            ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
+                const DAddr buffer_start = buffer.CpuAddr();
+                const DAddr buffer_end = buffer_start + buffer.SizeBytes();
+                const DAddr new_start = std::max(buffer_start, device_addr);
+                const DAddr new_end = std::min(buffer_end, device_addr + size);
                 memory_tracker.ForEachDownloadRange(
-                    new_start, new_end - new_start, false, [&](u64 cpu_addr_out, u64 range_size) {
-                        const VAddr buffer_addr = buffer.CpuAddr();
-                        const auto add_download = [&](VAddr start, VAddr end) {
+                    new_start, new_end - new_start, false,
+                    [&](u64 device_addr_out, u64 range_size) {
+                        const DAddr buffer_addr = buffer.CpuAddr();
+                        const auto add_download = [&](DAddr start, DAddr end) {
                             const u64 new_offset = start - buffer_addr;
                             const u64 new_size = end - start;
                             downloads.push_back({
@@ -582,7 +583,7 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
                             largest_copy = std::max(largest_copy, new_size);
                         };
 
-                        ForEachInRangeSet(common_ranges, cpu_addr_out, range_size, add_download);
+                        ForEachInRangeSet(common_ranges, device_addr_out, range_size, add_download);
                     });
             });
         }
@@ -605,8 +606,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
             BufferCopy second_copy{copy};
             Buffer& buffer = slot_buffers[buffer_id];
             second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
-            VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
-            const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
+            DAddr orig_device_addr = static_cast<DAddr>(second_copy.src_offset);
+            const IntervalType base_interval{orig_device_addr, orig_device_addr + copy.size};
             async_downloads += std::make_pair(base_interval, 1);
             buffer.MarkUsage(copy.src_offset, copy.size);
             runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
@@ -635,11 +636,11 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
                 runtime.Finish();
                 for (const auto& [copy, buffer_id] : downloads) {
                     const Buffer& buffer = slot_buffers[buffer_id];
-                    const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+                    const DAddr device_addr = buffer.CpuAddr() + copy.src_offset;
                     // Undo the modified offset
                     const u64 dst_offset = copy.dst_offset - download_staging.offset;
                     const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
-                    cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
+                    device_memory.WriteBlockUnsafe(device_addr, read_mapped_memory, copy.size);
                 }
             } else {
                 const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
@@ -647,8 +648,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
                     Buffer& buffer = slot_buffers[buffer_id];
                     buffer.ImmediateDownload(copy.src_offset,
                                              immediate_buffer.subspan(0, copy.size));
-                    const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
-                    cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+                    const DAddr device_addr = buffer.CpuAddr() + copy.src_offset;
+                    device_memory.WriteBlockUnsafe(device_addr, immediate_buffer.data(), copy.size);
                 }
             }
         }
@@ -681,19 +682,19 @@ void BufferCache<P>::PopAsyncBuffers() {
         u8* base = async_buffer->mapped_span.data();
         const size_t base_offset = async_buffer->offset;
         for (const auto& copy : downloads) {
-            const VAddr cpu_addr = static_cast<VAddr>(copy.src_offset);
+            const DAddr device_addr = static_cast<DAddr>(copy.src_offset);
             const u64 dst_offset = copy.dst_offset - base_offset;
             const u8* read_mapped_memory = base + dst_offset;
             ForEachInOverlapCounter(
-                async_downloads, cpu_addr, copy.size, [&](VAddr start, VAddr end, int count) {
-                    cpu_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - cpu_addr],
-                                                end - start);
+                async_downloads, device_addr, copy.size, [&](DAddr start, DAddr end, int count) {
+                    device_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - device_addr],
+                                                   end - start);
                     if (count == 1) {
                         const IntervalType base_interval{start, end};
                         common_ranges.subtract(base_interval);
                     }
                 });
-            const IntervalType subtract_interval{cpu_addr, cpu_addr + copy.size};
+            const IntervalType subtract_interval{device_addr, device_addr + copy.size};
             RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1);
         }
         async_buffers_death_ring.emplace_back(*async_buffer);
@@ -703,15 +704,15 @@ void BufferCache<P>::PopAsyncBuffers() {
 }
 
 template <class P>
-bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
+bool BufferCache<P>::IsRegionGpuModified(DAddr addr, size_t size) {
     bool is_dirty = false;
-    ForEachInRangeSet(common_ranges, addr, size, [&](VAddr, VAddr) { is_dirty = true; });
+    ForEachInRangeSet(common_ranges, addr, size, [&](DAddr, DAddr) { is_dirty = true; });
     return is_dirty;
 }
 
 template <class P>
-bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
-    const VAddr end_addr = addr + size;
+bool BufferCache<P>::IsRegionRegistered(DAddr addr, size_t size) {
+    const DAddr end_addr = addr + size;
     const u64 page_end = Common::DivCeil(end_addr, CACHING_PAGESIZE);
     for (u64 page = addr >> CACHING_PAGEBITS; page < page_end;) {
         const BufferId buffer_id = page_table[page];
@@ -720,8 +721,8 @@ bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
             continue;
         }
         Buffer& buffer = slot_buffers[buffer_id];
-        const VAddr buf_start_addr = buffer.CpuAddr();
-        const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes();
+        const DAddr buf_start_addr = buffer.CpuAddr();
+        const DAddr buf_end_addr = buf_start_addr + buffer.SizeBytes();
         if (buf_start_addr < end_addr && addr < buf_end_addr) {
             return true;
         }
@@ -731,7 +732,7 @@ bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
 }
 
 template <class P>
-bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
+bool BufferCache<P>::IsRegionCpuModified(DAddr addr, size_t size) {
     return memory_tracker.IsRegionCpuModified(addr, size);
 }
 
@@ -739,7 +740,7 @@ template <class P>
 void BufferCache<P>::BindHostIndexBuffer() {
     Buffer& buffer = slot_buffers[channel_state->index_buffer.buffer_id];
     TouchBuffer(buffer, channel_state->index_buffer.buffer_id);
-    const u32 offset = buffer.Offset(channel_state->index_buffer.cpu_addr);
+    const u32 offset = buffer.Offset(channel_state->index_buffer.device_addr);
     const u32 size = channel_state->index_buffer.size;
     const auto& draw_state = maxwell3d->draw_manager->GetDrawState();
     if (!draw_state.inline_index_draw_indexes.empty()) [[unlikely]] {
@@ -754,7 +755,7 @@ void BufferCache<P>::BindHostIndexBuffer() {
             buffer.ImmediateUpload(0, draw_state.inline_index_draw_indexes);
         }
     } else {
-        SynchronizeBuffer(buffer, channel_state->index_buffer.cpu_addr, size);
+        SynchronizeBuffer(buffer, channel_state->index_buffer.device_addr, size);
     }
     if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
         const u32 new_offset =
@@ -777,7 +778,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
         const Binding& binding = channel_state->vertex_buffers[index];
         Buffer& buffer = slot_buffers[binding.buffer_id];
         TouchBuffer(buffer, binding.buffer_id);
-        SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
+        SynchronizeBuffer(buffer, binding.device_addr, binding.size);
         if (!flags[Dirty::VertexBuffer0 + index]) {
             continue;
         }
@@ -797,7 +798,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
             Buffer& buffer = slot_buffers[binding.buffer_id];
 
             const u32 stride = maxwell3d->regs.vertex_streams[index].stride;
-            const u32 offset = buffer.Offset(binding.cpu_addr);
+            const u32 offset = buffer.Offset(binding.device_addr);
             buffer.MarkUsage(offset, binding.size);
 
             host_bindings.buffers.push_back(&buffer);
@@ -814,7 +815,7 @@ void BufferCache<P>::BindHostDrawIndirectBuffers() {
     const auto bind_buffer = [this](const Binding& binding) {
         Buffer& buffer = slot_buffers[binding.buffer_id];
         TouchBuffer(buffer, binding.buffer_id);
-        SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
+        SynchronizeBuffer(buffer, binding.device_addr, binding.size);
     };
     if (current_draw_indirect->include_count) {
         bind_buffer(channel_state->count_buffer_binding);
@@ -842,13 +843,13 @@ template <class P>
 void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index,
                                                    bool needs_bind) {
     const Binding& binding = channel_state->uniform_buffers[stage][index];
-    const VAddr cpu_addr = binding.cpu_addr;
+    const DAddr device_addr = binding.device_addr;
     const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
     Buffer& buffer = slot_buffers[binding.buffer_id];
     TouchBuffer(buffer, binding.buffer_id);
     const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
                                  size <= channel_state->uniform_buffer_skip_cache_size &&
-                                 !memory_tracker.IsRegionGpuModified(cpu_addr, size);
+                                 !memory_tracker.IsRegionGpuModified(device_addr, size);
     if (use_fast_buffer) {
         if constexpr (IS_OPENGL) {
             if (runtime.HasFastBufferSubData()) {
@@ -862,7 +863,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
                     channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
                     runtime.BindFastUniformBuffer(stage, binding_index, size);
                 }
-                const auto span = ImmediateBufferWithData(cpu_addr, size);
+                const auto span = ImmediateBufferWithData(device_addr, size);
                 runtime.PushFastUniformBuffer(stage, binding_index, span);
                 return;
             }
@@ -873,11 +874,11 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
         }
         // Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan
         const std::span<u8> span = runtime.BindMappedUniformBuffer(stage, binding_index, size);
-        cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size);
+        device_memory.ReadBlockUnsafe(device_addr, span.data(), size);
         return;
     }
     // Classic cached path
-    const bool sync_cached = SynchronizeBuffer(buffer, cpu_addr, size);
+    const bool sync_cached = SynchronizeBuffer(buffer, device_addr, size);
     if (sync_cached) {
         ++channel_state->uniform_cache_hits[0];
     }
@@ -892,7 +893,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
     if (!needs_bind) {
         return;
     }
-    const u32 offset = buffer.Offset(cpu_addr);
+    const u32 offset = buffer.Offset(device_addr);
     if constexpr (IS_OPENGL) {
         // Fast buffer will be unbound
         channel_state->fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
@@ -920,14 +921,14 @@ void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
         Buffer& buffer = slot_buffers[binding.buffer_id];
         TouchBuffer(buffer, binding.buffer_id);
         const u32 size = binding.size;
-        SynchronizeBuffer(buffer, binding.cpu_addr, size);
+        SynchronizeBuffer(buffer, binding.device_addr, size);
 
-        const u32 offset = buffer.Offset(binding.cpu_addr);
+        const u32 offset = buffer.Offset(binding.device_addr);
         buffer.MarkUsage(offset, size);
         const bool is_written = ((channel_state->written_storage_buffers[stage] >> index) & 1) != 0;
 
         if (is_written) {
-            MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size);
+            MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size);
         }
 
         if constexpr (NEEDS_BIND_STORAGE_INDEX) {
@@ -945,14 +946,14 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
         const TextureBufferBinding& binding = channel_state->texture_buffers[stage][index];
         Buffer& buffer = slot_buffers[binding.buffer_id];
         const u32 size = binding.size;
-        SynchronizeBuffer(buffer, binding.cpu_addr, size);
+        SynchronizeBuffer(buffer, binding.device_addr, size);
 
         const bool is_written = ((channel_state->written_texture_buffers[stage] >> index) & 1) != 0;
         if (is_written) {
-            MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size);
+            MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size);
         }
 
-        const u32 offset = buffer.Offset(binding.cpu_addr);
+        const u32 offset = buffer.Offset(binding.device_addr);
         const PixelFormat format = binding.format;
         buffer.MarkUsage(offset, size);
         if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
@@ -982,11 +983,11 @@ void BufferCache<P>::BindHostTransformFeedbackBuffers() {
         Buffer& buffer = slot_buffers[binding.buffer_id];
         TouchBuffer(buffer, binding.buffer_id);
         const u32 size = binding.size;
-        SynchronizeBuffer(buffer, binding.cpu_addr, size);
+        SynchronizeBuffer(buffer, binding.device_addr, size);
 
-        MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size);
+        MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size);
 
-        const u32 offset = buffer.Offset(binding.cpu_addr);
+        const u32 offset = buffer.Offset(binding.device_addr);
         buffer.MarkUsage(offset, size);
         host_bindings.buffers.push_back(&buffer);
         host_bindings.offsets.push_back(offset);
@@ -1011,9 +1012,9 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
         TouchBuffer(buffer, binding.buffer_id);
         const u32 size =
             std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
-        SynchronizeBuffer(buffer, binding.cpu_addr, size);
+        SynchronizeBuffer(buffer, binding.device_addr, size);
 
-        const u32 offset = buffer.Offset(binding.cpu_addr);
+        const u32 offset = buffer.Offset(binding.device_addr);
         buffer.MarkUsage(offset, size);
         if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
             runtime.BindComputeUniformBuffer(binding_index, buffer, offset, size);
@@ -1032,15 +1033,15 @@ void BufferCache<P>::BindHostComputeStorageBuffers() {
         Buffer& buffer = slot_buffers[binding.buffer_id];
         TouchBuffer(buffer, binding.buffer_id);
         const u32 size = binding.size;
-        SynchronizeBuffer(buffer, binding.cpu_addr, size);
+        SynchronizeBuffer(buffer, binding.device_addr, size);
 
-        const u32 offset = buffer.Offset(binding.cpu_addr);
+        const u32 offset = buffer.Offset(binding.device_addr);
         buffer.MarkUsage(offset, size);
         const bool is_written =
             ((channel_state->written_compute_storage_buffers >> index) & 1) != 0;
 
         if (is_written) {
-            MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size);
+            MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size);
         }
 
         if constexpr (NEEDS_BIND_STORAGE_INDEX) {
@@ -1058,15 +1059,15 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
         const TextureBufferBinding& binding = channel_state->compute_texture_buffers[index];
         Buffer& buffer = slot_buffers[binding.buffer_id];
         const u32 size = binding.size;
-        SynchronizeBuffer(buffer, binding.cpu_addr, size);
+        SynchronizeBuffer(buffer, binding.device_addr, size);
 
         const bool is_written =
             ((channel_state->written_compute_texture_buffers >> index) & 1) != 0;
         if (is_written) {
-            MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size);
+            MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size);
         }
 
-        const u32 offset = buffer.Offset(binding.cpu_addr);
+        const u32 offset = buffer.Offset(binding.device_addr);
         const PixelFormat format = binding.format;
         buffer.MarkUsage(offset, size);
         if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
@@ -1131,7 +1132,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
             inline_buffer_id = CreateBuffer(0, buffer_size);
         }
         channel_state->index_buffer = Binding{
-            .cpu_addr = 0,
+            .device_addr = 0,
             .size = inline_index_size,
             .buffer_id = inline_buffer_id,
         };
@@ -1140,19 +1141,19 @@ void BufferCache<P>::UpdateIndexBuffer() {
 
     const GPUVAddr gpu_addr_begin = index_buffer_ref.StartAddress();
     const GPUVAddr gpu_addr_end = index_buffer_ref.EndAddress();
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
     const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
     const u32 draw_size =
         (index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes();
     const u32 size = std::min(address_size, draw_size);
-    if (size == 0 || !cpu_addr) {
+    if (size == 0 || !device_addr) {
         channel_state->index_buffer = NULL_BINDING;
         return;
     }
     channel_state->index_buffer = Binding{
-        .cpu_addr = *cpu_addr,
+        .device_addr = *device_addr,
         .size = size,
-        .buffer_id = FindBuffer(*cpu_addr, size),
+        .buffer_id = FindBuffer(*device_addr, size),
     };
 }
 
@@ -1178,19 +1179,19 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
     const auto& limit = maxwell3d->regs.vertex_stream_limits[index];
     const GPUVAddr gpu_addr_begin = array.Address();
     const GPUVAddr gpu_addr_end = limit.Address() + 1;
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
     const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
     u32 size = address_size; // TODO: Analyze stride and number of vertices
-    if (array.enable == 0 || size == 0 || !cpu_addr) {
+    if (array.enable == 0 || size == 0 || !device_addr) {
         channel_state->vertex_buffers[index] = NULL_BINDING;
         return;
     }
     if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
         size = static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, size));
     }
-    const BufferId buffer_id = FindBuffer(*cpu_addr, size);
+    const BufferId buffer_id = FindBuffer(*device_addr, size);
     channel_state->vertex_buffers[index] = Binding{
-        .cpu_addr = *cpu_addr,
+        .device_addr = *device_addr,
         .size = size,
         .buffer_id = buffer_id,
     };
@@ -1199,15 +1200,15 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
 template <class P>
 void BufferCache<P>::UpdateDrawIndirect() {
     const auto update = [this](GPUVAddr gpu_addr, size_t size, Binding& binding) {
-        const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
-        if (!cpu_addr) {
+        const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+        if (!device_addr) {
             binding = NULL_BINDING;
             return;
         }
         binding = Binding{
-            .cpu_addr = *cpu_addr,
+            .device_addr = *device_addr,
             .size = static_cast<u32>(size),
-            .buffer_id = FindBuffer(*cpu_addr, static_cast<u32>(size)),
+            .buffer_id = FindBuffer(*device_addr, static_cast<u32>(size)),
         };
     };
     if (current_draw_indirect->include_count) {
@@ -1231,7 +1232,7 @@ void BufferCache<P>::UpdateUniformBuffers(size_t stage) {
             channel_state->dirty_uniform_buffers[stage] |= 1U << index;
         }
         // Resolve buffer
-        binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
+        binding.buffer_id = FindBuffer(binding.device_addr, binding.size);
     });
 }
 
@@ -1240,7 +1241,7 @@ void BufferCache<P>::UpdateStorageBuffers(size_t stage) {
     ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) {
         // Resolve buffer
         Binding& binding = channel_state->storage_buffers[stage][index];
-        const BufferId buffer_id = FindBuffer(binding.cpu_addr, binding.size);
+        const BufferId buffer_id = FindBuffer(binding.device_addr, binding.size);
         binding.buffer_id = buffer_id;
     });
 }
@@ -1249,7 +1250,7 @@ template <class P>
 void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
     ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) {
         Binding& binding = channel_state->texture_buffers[stage][index];
-        binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
+        binding.buffer_id = FindBuffer(binding.device_addr, binding.size);
     });
 }
 
@@ -1268,14 +1269,14 @@ void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
     const auto& binding = maxwell3d->regs.transform_feedback.buffers[index];
     const GPUVAddr gpu_addr = binding.Address() + binding.start_offset;
     const u32 size = binding.size;
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
-    if (binding.enable == 0 || size == 0 || !cpu_addr) {
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    if (binding.enable == 0 || size == 0 || !device_addr) {
         channel_state->transform_feedback_buffers[index] = NULL_BINDING;
         return;
     }
-    const BufferId buffer_id = FindBuffer(*cpu_addr, size);
+    const BufferId buffer_id = FindBuffer(*device_addr, size);
     channel_state->transform_feedback_buffers[index] = Binding{
-        .cpu_addr = *cpu_addr,
+        .device_addr = *device_addr,
         .size = size,
         .buffer_id = buffer_id,
     };
@@ -1289,13 +1290,13 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
         const auto& launch_desc = kepler_compute->launch_description;
         if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
             const auto& cbuf = launch_desc.const_buffer_config[index];
-            const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
-            if (cpu_addr) {
-                binding.cpu_addr = *cpu_addr;
+            const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
+            if (device_addr) {
+                binding.device_addr = *device_addr;
                 binding.size = cbuf.size;
             }
         }
-        binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
+        binding.buffer_id = FindBuffer(binding.device_addr, binding.size);
     });
 }
 
@@ -1304,7 +1305,7 @@ void BufferCache<P>::UpdateComputeStorageBuffers() {
     ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) {
         // Resolve buffer
         Binding& binding = channel_state->compute_storage_buffers[index];
-        binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
+        binding.buffer_id = FindBuffer(binding.device_addr, binding.size);
     });
 }
 
@@ -1312,45 +1313,63 @@ template <class P>
 void BufferCache<P>::UpdateComputeTextureBuffers() {
     ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) {
         Binding& binding = channel_state->compute_texture_buffers[index];
-        binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
+        binding.buffer_id = FindBuffer(binding.device_addr, binding.size);
     });
 }
 
 template <class P>
-void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size) {
-    memory_tracker.MarkRegionAsGpuModified(cpu_addr, size);
+void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size) {
+    memory_tracker.MarkRegionAsGpuModified(device_addr, size);
 
-    const IntervalType base_interval{cpu_addr, cpu_addr + size};
+    const IntervalType base_interval{device_addr, device_addr + size};
     common_ranges.add(base_interval);
     uncommitted_ranges.add(base_interval);
 }
 
 template <class P>
-BufferId BufferCache<P>::FindBuffer(VAddr cpu_addr, u32 size) {
-    if (cpu_addr == 0) {
+BufferId BufferCache<P>::FindBuffer(DAddr device_addr, u32 size) {
+    if (device_addr == 0) {
         return NULL_BUFFER_ID;
     }
-    const u64 page = cpu_addr >> CACHING_PAGEBITS;
+    const u64 page = device_addr >> CACHING_PAGEBITS;
     const BufferId buffer_id = page_table[page];
     if (!buffer_id) {
-        return CreateBuffer(cpu_addr, size);
+        return CreateBuffer(device_addr, size);
     }
     const Buffer& buffer = slot_buffers[buffer_id];
-    if (buffer.IsInBounds(cpu_addr, size)) {
+    if (buffer.IsInBounds(device_addr, size)) {
         return buffer_id;
     }
-    return CreateBuffer(cpu_addr, size);
+    return CreateBuffer(device_addr, size);
 }
 
 template <class P>
-typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu_addr,
+typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(DAddr device_addr,
                                                                        u32 wanted_size) {
     static constexpr int STREAM_LEAP_THRESHOLD = 16;
     boost::container::small_vector<BufferId, 16> overlap_ids;
-    VAddr begin = cpu_addr;
-    VAddr end = cpu_addr + wanted_size;
+    DAddr begin = device_addr;
+    DAddr end = device_addr + wanted_size;
     int stream_score = 0;
     bool has_stream_leap = false;
+    auto expand_begin = [&](DAddr add_value) {
+        static constexpr DAddr min_page = CACHING_PAGESIZE + Core::DEVICE_PAGESIZE;
+        if (add_value > begin - min_page) {
+            begin = min_page;
+            device_addr = Core::DEVICE_PAGESIZE;
+            return;
+        }
+        begin -= add_value;
+        device_addr = begin - CACHING_PAGESIZE;
+    };
+    auto expand_end = [&](DAddr add_value) {
+        static constexpr DAddr max_page = 1ULL << Tegra::MaxwellDeviceMemoryManager::AS_BITS;
+        if (add_value > max_page - end) {
+            end = max_page;
+            return;
+        }
+        end += add_value;
+    };
     if (begin == 0) {
         return OverlapResult{
             .ids = std::move(overlap_ids),
@@ -1359,9 +1378,9 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
             .has_stream_leap = has_stream_leap,
         };
     }
-    for (; cpu_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE);
-         cpu_addr += CACHING_PAGESIZE) {
-        const BufferId overlap_id = page_table[cpu_addr >> CACHING_PAGEBITS];
+    for (; device_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE);
+         device_addr += CACHING_PAGESIZE) {
+        const BufferId overlap_id = page_table[device_addr >> CACHING_PAGEBITS];
         if (!overlap_id) {
             continue;
         }
@@ -1371,12 +1390,12 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
         }
         overlap_ids.push_back(overlap_id);
         overlap.Pick();
-        const VAddr overlap_cpu_addr = overlap.CpuAddr();
-        const bool expands_left = overlap_cpu_addr < begin;
+        const DAddr overlap_device_addr = overlap.CpuAddr();
+        const bool expands_left = overlap_device_addr < begin;
         if (expands_left) {
-            begin = overlap_cpu_addr;
+            begin = overlap_device_addr;
         }
-        const VAddr overlap_end = overlap_cpu_addr + overlap.SizeBytes();
+        const DAddr overlap_end = overlap_device_addr + overlap.SizeBytes();
         const bool expands_right = overlap_end > end;
         if (overlap_end > end) {
             end = overlap_end;
@@ -1387,11 +1406,10 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
             // as a stream buffer. Increase the size to skip constantly recreating buffers.
             has_stream_leap = true;
             if (expands_right) {
-                begin -= CACHING_PAGESIZE * 256;
-                cpu_addr = begin - CACHING_PAGESIZE;
+                expand_begin(CACHING_PAGESIZE * 128);
             }
             if (expands_left) {
-                end += CACHING_PAGESIZE * 256;
+                expand_end(CACHING_PAGESIZE * 128);
             }
         }
     }
@@ -1424,13 +1442,13 @@ void BufferCache<P>::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id,
 }
 
 template <class P>
-BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
-    VAddr cpu_addr_end = Common::AlignUp(cpu_addr + wanted_size, CACHING_PAGESIZE);
-    cpu_addr = Common::AlignDown(cpu_addr, CACHING_PAGESIZE);
-    wanted_size = static_cast<u32>(cpu_addr_end - cpu_addr);
-    const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
+BufferId BufferCache<P>::CreateBuffer(DAddr device_addr, u32 wanted_size) {
+    DAddr device_addr_end = Common::AlignUp(device_addr + wanted_size, CACHING_PAGESIZE);
+    device_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE);
+    wanted_size = static_cast<u32>(device_addr_end - device_addr);
+    const OverlapResult overlap = ResolveOverlaps(device_addr, wanted_size);
     const u32 size = static_cast<u32>(overlap.end - overlap.begin);
-    const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
+    const BufferId new_buffer_id = slot_buffers.insert(runtime, overlap.begin, size);
     auto& new_buffer = slot_buffers[new_buffer_id];
     const size_t size_bytes = new_buffer.SizeBytes();
     runtime.ClearBuffer(new_buffer, 0, size_bytes, 0);
@@ -1465,10 +1483,10 @@ void BufferCache<P>::ChangeRegister(BufferId buffer_id) {
         total_used_memory -= Common::AlignUp(size, 1024);
         lru_cache.Free(buffer.getLRUID());
     }
-    const VAddr cpu_addr_begin = buffer.CpuAddr();
-    const VAddr cpu_addr_end = cpu_addr_begin + size;
-    const u64 page_begin = cpu_addr_begin / CACHING_PAGESIZE;
-    const u64 page_end = Common::DivCeil(cpu_addr_end, CACHING_PAGESIZE);
+    const DAddr device_addr_begin = buffer.CpuAddr();
+    const DAddr device_addr_end = device_addr_begin + size;
+    const u64 page_begin = device_addr_begin / CACHING_PAGESIZE;
+    const u64 page_end = Common::DivCeil(device_addr_end, CACHING_PAGESIZE);
     for (u64 page = page_begin; page != page_end; ++page) {
         if constexpr (insert) {
             page_table[page] = buffer_id;
@@ -1486,15 +1504,15 @@ void BufferCache<P>::TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept {
 }
 
 template <class P>
-bool BufferCache<P>::SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size) {
+bool BufferCache<P>::SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size) {
     boost::container::small_vector<BufferCopy, 4> copies;
     u64 total_size_bytes = 0;
     u64 largest_copy = 0;
-    VAddr buffer_start = buffer.CpuAddr();
-    memory_tracker.ForEachUploadRange(cpu_addr, size, [&](u64 cpu_addr_out, u64 range_size) {
+    DAddr buffer_start = buffer.CpuAddr();
+    memory_tracker.ForEachUploadRange(device_addr, size, [&](u64 device_addr_out, u64 range_size) {
         copies.push_back(BufferCopy{
             .src_offset = total_size_bytes,
-            .dst_offset = cpu_addr_out - buffer_start,
+            .dst_offset = device_addr_out - buffer_start,
             .size = range_size,
         });
         total_size_bytes += range_size;
@@ -1526,14 +1544,14 @@ void BufferCache<P>::ImmediateUploadMemory([[maybe_unused]] Buffer& buffer,
         std::span<u8> immediate_buffer;
         for (const BufferCopy& copy : copies) {
             std::span<const u8> upload_span;
-            const VAddr cpu_addr = buffer.CpuAddr() + copy.dst_offset;
-            if (IsRangeGranular(cpu_addr, copy.size)) {
-                upload_span = std::span(cpu_memory.GetPointer(cpu_addr), copy.size);
+            const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset;
+            if (IsRangeGranular(device_addr, copy.size)) {
+                upload_span = std::span(device_memory.GetPointer<u8>(device_addr), copy.size);
             } else {
                 if (immediate_buffer.empty()) {
                     immediate_buffer = ImmediateBuffer(largest_copy);
                 }
-                cpu_memory.ReadBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+                device_memory.ReadBlockUnsafe(device_addr, immediate_buffer.data(), copy.size);
                 upload_span = immediate_buffer.subspan(0, copy.size);
             }
             buffer.ImmediateUpload(copy.dst_offset, upload_span);
@@ -1550,8 +1568,8 @@ void BufferCache<P>::MappedUploadMemory([[maybe_unused]] Buffer& buffer,
         const std::span<u8> staging_pointer = upload_staging.mapped_span;
         for (BufferCopy& copy : copies) {
             u8* const src_pointer = staging_pointer.data() + copy.src_offset;
-            const VAddr cpu_addr = buffer.CpuAddr() + copy.dst_offset;
-            cpu_memory.ReadBlockUnsafe(cpu_addr, src_pointer, copy.size);
+            const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset;
+            device_memory.ReadBlockUnsafe(device_addr, src_pointer, copy.size);
 
             // Apply the staging offset
             copy.src_offset += upload_staging.offset;
@@ -1562,14 +1580,14 @@ void BufferCache<P>::MappedUploadMemory([[maybe_unused]] Buffer& buffer,
 }
 
 template <class P>
-bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
+bool BufferCache<P>::InlineMemory(DAddr dest_address, size_t copy_size,
                                   std::span<const u8> inlined_buffer) {
     const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
     if (!is_dirty) {
         return false;
     }
-    VAddr aligned_start = Common::AlignDown(dest_address, YUZU_PAGESIZE);
-    VAddr aligned_end = Common::AlignUp(dest_address + copy_size, YUZU_PAGESIZE);
+    DAddr aligned_start = Common::AlignDown(dest_address, DEVICE_PAGESIZE);
+    DAddr aligned_end = Common::AlignUp(dest_address + copy_size, DEVICE_PAGESIZE);
     if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) {
         return false;
     }
@@ -1580,7 +1598,7 @@ bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
 }
 
 template <class P>
-void BufferCache<P>::InlineMemoryImplementation(VAddr dest_address, size_t copy_size,
+void BufferCache<P>::InlineMemoryImplementation(DAddr dest_address, size_t copy_size,
                                                 std::span<const u8> inlined_buffer) {
     const IntervalType subtract_interval{dest_address, dest_address + copy_size};
     ClearDownload(subtract_interval);
@@ -1612,14 +1630,14 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer) {
 }
 
 template <class P>
-void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 size) {
+void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, DAddr device_addr, u64 size) {
     boost::container::small_vector<BufferCopy, 1> copies;
     u64 total_size_bytes = 0;
     u64 largest_copy = 0;
     memory_tracker.ForEachDownloadRangeAndClear(
-        cpu_addr, size, [&](u64 cpu_addr_out, u64 range_size) {
-            const VAddr buffer_addr = buffer.CpuAddr();
-            const auto add_download = [&](VAddr start, VAddr end) {
+        device_addr, size, [&](u64 device_addr_out, u64 range_size) {
+            const DAddr buffer_addr = buffer.CpuAddr();
+            const auto add_download = [&](DAddr start, DAddr end) {
                 const u64 new_offset = start - buffer_addr;
                 const u64 new_size = end - start;
                 copies.push_back(BufferCopy{
@@ -1634,8 +1652,8 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si
                 largest_copy = std::max(largest_copy, new_size);
             };
 
-            const VAddr start_address = cpu_addr_out;
-            const VAddr end_address = start_address + range_size;
+            const DAddr start_address = device_addr_out;
+            const DAddr end_address = start_address + range_size;
             ForEachInRangeSet(common_ranges, start_address, range_size, add_download);
             const IntervalType subtract_interval{start_address, end_address};
             ClearDownload(subtract_interval);
@@ -1658,18 +1676,18 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si
         runtime.CopyBuffer(download_staging.buffer, buffer, copies_span, true);
         runtime.Finish();
         for (const BufferCopy& copy : copies) {
-            const VAddr copy_cpu_addr = buffer.CpuAddr() + copy.src_offset;
+            const DAddr copy_device_addr = buffer.CpuAddr() + copy.src_offset;
             // Undo the modified offset
             const u64 dst_offset = copy.dst_offset - download_staging.offset;
             const u8* copy_mapped_memory = mapped_memory + dst_offset;
-            cpu_memory.WriteBlockUnsafe(copy_cpu_addr, copy_mapped_memory, copy.size);
+            device_memory.WriteBlockUnsafe(copy_device_addr, copy_mapped_memory, copy.size);
         }
     } else {
         const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
         for (const BufferCopy& copy : copies) {
             buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
-            const VAddr copy_cpu_addr = buffer.CpuAddr() + copy.src_offset;
-            cpu_memory.WriteBlockUnsafe(copy_cpu_addr, immediate_buffer.data(), copy.size);
+            const DAddr copy_device_addr = buffer.CpuAddr() + copy.src_offset;
+            device_memory.WriteBlockUnsafe(copy_device_addr, immediate_buffer.data(), copy.size);
         }
     }
 }
@@ -1758,20 +1776,20 @@ Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
     const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment);
     const u32 aligned_size = static_cast<u32>(gpu_addr - aligned_gpu_addr) + size;
 
-    const std::optional<VAddr> aligned_cpu_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr);
-    if (!aligned_cpu_addr || size == 0) {
+    const std::optional<DAddr> aligned_device_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr);
+    if (!aligned_device_addr || size == 0) {
         LOG_WARNING(HW_GPU, "Failed to find storage buffer for cbuf index {}", cbuf_index);
         return NULL_BINDING;
     }
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
-    ASSERT_MSG(cpu_addr, "Unaligned storage buffer address not found for cbuf index {}",
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    ASSERT_MSG(device_addr, "Unaligned storage buffer address not found for cbuf index {}",
                cbuf_index);
     // The end address used for size calculation does not need to be aligned
-    const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
+    const DAddr cpu_end = Common::AlignUp(*device_addr + size, Core::DEVICE_PAGESIZE);
 
     const Binding binding{
-        .cpu_addr = *aligned_cpu_addr,
-        .size = is_written ? aligned_size : static_cast<u32>(cpu_end - *aligned_cpu_addr),
+        .device_addr = *aligned_device_addr,
+        .size = is_written ? aligned_size : static_cast<u32>(cpu_end - *aligned_device_addr),
         .buffer_id = BufferId{},
     };
     return binding;
@@ -1780,15 +1798,15 @@ Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
 template <class P>
 TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
                                                              PixelFormat format) {
-    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    const std::optional<DAddr> device_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
     TextureBufferBinding binding;
-    if (!cpu_addr || size == 0) {
-        binding.cpu_addr = 0;
+    if (!device_addr || size == 0) {
+        binding.device_addr = 0;
         binding.size = 0;
         binding.buffer_id = NULL_BUFFER_ID;
         binding.format = PixelFormat::Invalid;
     } else {
-        binding.cpu_addr = *cpu_addr;
+        binding.device_addr = *device_addr;
         binding.size = size;
         binding.buffer_id = BufferId{};
         binding.format = format;
@@ -1797,14 +1815,14 @@ TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(GPUVAddr gpu_addr,
 }
 
 template <class P>
-std::span<const u8> BufferCache<P>::ImmediateBufferWithData(VAddr cpu_addr, size_t size) {
-    u8* const base_pointer = cpu_memory.GetPointer(cpu_addr);
-    if (IsRangeGranular(cpu_addr, size) ||
-        base_pointer + size == cpu_memory.GetPointer(cpu_addr + size)) {
+std::span<const u8> BufferCache<P>::ImmediateBufferWithData(DAddr device_addr, size_t size) {
+    u8* const base_pointer = device_memory.GetPointer<u8>(device_addr);
+    if (IsRangeGranular(device_addr, size) ||
+        base_pointer + size == device_memory.GetPointer<u8>(device_addr + size)) {
         return std::span(base_pointer, size);
     } else {
         const std::span<u8> span = ImmediateBuffer(size);
-        cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size);
+        device_memory.ReadBlockUnsafe(device_addr, span.data(), size);
         return span;
     }
 }
@@ -1828,13 +1846,14 @@ bool BufferCache<P>::HasFastUniformBufferBound(size_t stage, u32 binding_index)
 template <class P>
 std::pair<typename BufferCache<P>::Buffer*, u32> BufferCache<P>::GetDrawIndirectCount() {
     auto& buffer = slot_buffers[channel_state->count_buffer_binding.buffer_id];
-    return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.cpu_addr));
+    return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.device_addr));
 }
 
 template <class P>
 std::pair<typename BufferCache<P>::Buffer*, u32> BufferCache<P>::GetDrawIndirectBuffer() {
     auto& buffer = slot_buffers[channel_state->indirect_buffer_binding.buffer_id];
-    return std::make_pair(&buffer, buffer.Offset(channel_state->indirect_buffer_binding.cpu_addr));
+    return std::make_pair(&buffer,
+                          buffer.Offset(channel_state->indirect_buffer_binding.device_addr));
 }
 
 } // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index d6d696d8c..80dbb81e7 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -32,7 +32,6 @@
 #include "common/microprofile.h"
 #include "common/scope_exit.h"
 #include "common/settings.h"
-#include "core/memory.h"
 #include "video_core/buffer_cache/buffer_base.h"
 #include "video_core/control/channel_state_cache.h"
 #include "video_core/delayed_destruction_ring.h"
@@ -41,7 +40,6 @@
 #include "video_core/engines/kepler_compute.h"
 #include "video_core/engines/maxwell_3d.h"
 #include "video_core/memory_manager.h"
-#include "video_core/rasterizer_interface.h"
 #include "video_core/surface.h"
 #include "video_core/texture_cache/slot_vector.h"
 #include "video_core/texture_cache/types.h"
@@ -94,7 +92,7 @@ static constexpr BufferId NULL_BUFFER_ID{0};
 static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
 
 struct Binding {
-    VAddr cpu_addr{};
+    DAddr device_addr{};
     u32 size{};
     BufferId buffer_id;
 };
@@ -104,7 +102,7 @@ struct TextureBufferBinding : Binding {
 };
 
 static constexpr Binding NULL_BINDING{
-    .cpu_addr = 0,
+    .device_addr = 0,
     .size = 0,
     .buffer_id = NULL_BUFFER_ID,
 };
@@ -204,10 +202,10 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
     using Async_Buffer = typename P::Async_Buffer;
     using MemoryTracker = typename P::MemoryTracker;
 
-    using IntervalCompare = std::less<VAddr>;
-    using IntervalInstance = boost::icl::interval_type_default<VAddr, std::less>;
-    using IntervalAllocator = boost::fast_pool_allocator<VAddr>;
-    using IntervalSet = boost::icl::interval_set<VAddr>;
+    using IntervalCompare = std::less<DAddr>;
+    using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
+    using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
+    using IntervalSet = boost::icl::interval_set<DAddr>;
     using IntervalType = typename IntervalSet::interval_type;
 
     template <typename Type>
@@ -230,32 +228,31 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInf
 
     using OverlapCombine = counter_add_functor<int>;
     using OverlapSection = boost::icl::inter_section<int>;
-    using OverlapCounter = boost::icl::split_interval_map<VAddr, int>;
+    using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
 
     struct OverlapResult {
         boost::container::small_vector<BufferId, 16> ids;
-        VAddr begin;
-        VAddr end;
+        DAddr begin;
+        DAddr end;
         bool has_stream_leap = false;
     };
 
 public:
-    explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
-                         Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
+    explicit BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_);
 
     void TickFrame();
 
-    void WriteMemory(VAddr cpu_addr, u64 size);
+    void WriteMemory(DAddr device_addr, u64 size);
 
-    void CachedWriteMemory(VAddr cpu_addr, u64 size);
+    void CachedWriteMemory(DAddr device_addr, u64 size);
 
-    bool OnCPUWrite(VAddr cpu_addr, u64 size);
+    bool OnCPUWrite(DAddr device_addr, u64 size);
 
-    void DownloadMemory(VAddr cpu_addr, u64 size);
+    void DownloadMemory(DAddr device_addr, u64 size);
 
-    std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
+    std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(DAddr device_addr, u64 size);
 
-    bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
+    bool InlineMemory(DAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
 
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
 
@@ -300,7 +297,7 @@ public:
                                                        ObtainBufferSynchronize sync_info,
                                                        ObtainBufferOperation post_op);
 
-    [[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(VAddr gpu_addr, u32 size,
+    [[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(DAddr gpu_addr, u32 size,
                                                           ObtainBufferSynchronize sync_info,
                                                           ObtainBufferOperation post_op);
     void FlushCachedWrites();
@@ -326,13 +323,13 @@ public:
     bool DMAClear(GPUVAddr src_address, u64 amount, u32 value);
 
     /// Return true when a CPU region is modified from the GPU
-    [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
+    [[nodiscard]] bool IsRegionGpuModified(DAddr addr, size_t size);
 
     /// Return true when a region is registered on the cache
-    [[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size);
+    [[nodiscard]] bool IsRegionRegistered(DAddr addr, size_t size);
 
     /// Return true when a CPU region is modified from the CPU
-    [[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size);
+    [[nodiscard]] bool IsRegionCpuModified(DAddr addr, size_t size);
 
     void SetDrawIndirect(
         const Tegra::Engines::DrawManager::IndirectParams* current_draw_indirect_) {
@@ -366,9 +363,9 @@ private:
     }
 
     template <typename Func>
-    void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) {
-        const u64 page_end = Common::DivCeil(cpu_addr + size, CACHING_PAGESIZE);
-        for (u64 page = cpu_addr >> CACHING_PAGEBITS; page < page_end;) {
+    void ForEachBufferInRange(DAddr device_addr, u64 size, Func&& func) {
+        const u64 page_end = Common::DivCeil(device_addr + size, CACHING_PAGESIZE);
+        for (u64 page = device_addr >> CACHING_PAGEBITS; page < page_end;) {
             const BufferId buffer_id = page_table[page];
             if (!buffer_id) {
                 ++page;
@@ -377,15 +374,15 @@ private:
             Buffer& buffer = slot_buffers[buffer_id];
             func(buffer_id, buffer);
 
-            const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
+            const DAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
             page = Common::DivCeil(end_addr, CACHING_PAGESIZE);
         }
     }
 
     template <typename Func>
-    void ForEachInRangeSet(IntervalSet& current_range, VAddr cpu_addr, u64 size, Func&& func) {
-        const VAddr start_address = cpu_addr;
-        const VAddr end_address = start_address + size;
+    void ForEachInRangeSet(IntervalSet& current_range, DAddr device_addr, u64 size, Func&& func) {
+        const DAddr start_address = device_addr;
+        const DAddr end_address = start_address + size;
         const IntervalType search_interval{start_address, end_address};
         auto it = current_range.lower_bound(search_interval);
         if (it == current_range.end()) {
@@ -393,8 +390,8 @@ private:
         }
         auto end_it = current_range.upper_bound(search_interval);
         for (; it != end_it; it++) {
-            VAddr inter_addr_end = it->upper();
-            VAddr inter_addr = it->lower();
+            DAddr inter_addr_end = it->upper();
+            DAddr inter_addr = it->lower();
             if (inter_addr_end > end_address) {
                 inter_addr_end = end_address;
             }
@@ -406,10 +403,10 @@ private:
     }
 
     template <typename Func>
-    void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
+    void ForEachInOverlapCounter(OverlapCounter& current_range, DAddr device_addr, u64 size,
                                  Func&& func) {
-        const VAddr start_address = cpu_addr;
-        const VAddr end_address = start_address + size;
+        const DAddr start_address = device_addr;
+        const DAddr end_address = start_address + size;
         const IntervalType search_interval{start_address, end_address};
         auto it = current_range.lower_bound(search_interval);
         if (it == current_range.end()) {
@@ -418,8 +415,8 @@ private:
         auto end_it = current_range.upper_bound(search_interval);
         for (; it != end_it; it++) {
             auto& inter = it->first;
-            VAddr inter_addr_end = inter.upper();
-            VAddr inter_addr = inter.lower();
+            DAddr inter_addr_end = inter.upper();
+            DAddr inter_addr = inter.lower();
             if (inter_addr_end > end_address) {
                 inter_addr_end = end_address;
             }
@@ -451,9 +448,9 @@ private:
         } while (any_removals);
     }
 
-    static bool IsRangeGranular(VAddr cpu_addr, size_t size) {
-        return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) ==
-               ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK);
+    static bool IsRangeGranular(DAddr device_addr, size_t size) {
+        return (device_addr & ~Core::DEVICE_PAGEMASK) ==
+               ((device_addr + size) & ~Core::DEVICE_PAGEMASK);
     }
 
     void RunGarbageCollector();
@@ -508,15 +505,15 @@ private:
 
     void UpdateComputeTextureBuffers();
 
-    void MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size);
+    void MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size);
 
-    [[nodiscard]] BufferId FindBuffer(VAddr cpu_addr, u32 size);
+    [[nodiscard]] BufferId FindBuffer(DAddr device_addr, u32 size);
 
-    [[nodiscard]] OverlapResult ResolveOverlaps(VAddr cpu_addr, u32 wanted_size);
+    [[nodiscard]] OverlapResult ResolveOverlaps(DAddr device_addr, u32 wanted_size);
 
     void JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, bool accumulate_stream_score);
 
-    [[nodiscard]] BufferId CreateBuffer(VAddr cpu_addr, u32 wanted_size);
+    [[nodiscard]] BufferId CreateBuffer(DAddr device_addr, u32 wanted_size);
 
     void Register(BufferId buffer_id);
 
@@ -527,7 +524,7 @@ private:
 
     void TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept;
 
-    bool SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size);
+    bool SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size);
 
     void UploadMemory(Buffer& buffer, u64 total_size_bytes, u64 largest_copy,
                       std::span<BufferCopy> copies);
@@ -539,7 +536,7 @@ private:
 
     void DownloadBufferMemory(Buffer& buffer_id);
 
-    void DownloadBufferMemory(Buffer& buffer_id, VAddr cpu_addr, u64 size);
+    void DownloadBufferMemory(Buffer& buffer_id, DAddr device_addr, u64 size);
 
     void DeleteBuffer(BufferId buffer_id, bool do_not_mark = false);
 
@@ -549,7 +546,7 @@ private:
     [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
                                                                PixelFormat format);
 
-    [[nodiscard]] std::span<const u8> ImmediateBufferWithData(VAddr cpu_addr, size_t size);
+    [[nodiscard]] std::span<const u8> ImmediateBufferWithData(DAddr device_addr, size_t size);
 
     [[nodiscard]] std::span<u8> ImmediateBuffer(size_t wanted_capacity);
 
@@ -557,11 +554,10 @@ private:
 
     void ClearDownload(IntervalType subtract_interval);
 
-    void InlineMemoryImplementation(VAddr dest_address, size_t copy_size,
+    void InlineMemoryImplementation(DAddr dest_address, size_t copy_size,
                                     std::span<const u8> inlined_buffer);
 
-    VideoCore::RasterizerInterface& rasterizer;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
 
     SlotVector<Buffer> slot_buffers;
     DelayedDestructionRing<Buffer, 8> delayed_destruction_ring;
@@ -598,7 +594,7 @@ private:
     u64 critical_memory = 0;
     BufferId inline_buffer_id;
 
-    std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table;
+    std::array<BufferId, ((1ULL << 34) >> CACHING_PAGEBITS)> page_table;
     Common::ScratchBuffer<u8> tmp_buffer;
 };
 
diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h
index 6c1c8287b..c95eed1f6 100644
--- a/src/video_core/buffer_cache/memory_tracker_base.h
+++ b/src/video_core/buffer_cache/memory_tracker_base.h
@@ -17,19 +17,19 @@
 
 namespace VideoCommon {
 
-template <class RasterizerInterface>
+template <typename DeviceTracker>
 class MemoryTrackerBase {
-    static constexpr size_t MAX_CPU_PAGE_BITS = 39;
+    static constexpr size_t MAX_CPU_PAGE_BITS = 34;
     static constexpr size_t HIGHER_PAGE_BITS = 22;
     static constexpr size_t HIGHER_PAGE_SIZE = 1ULL << HIGHER_PAGE_BITS;
     static constexpr size_t HIGHER_PAGE_MASK = HIGHER_PAGE_SIZE - 1ULL;
     static constexpr size_t NUM_HIGH_PAGES = 1ULL << (MAX_CPU_PAGE_BITS - HIGHER_PAGE_BITS);
     static constexpr size_t MANAGER_POOL_SIZE = 32;
     static constexpr size_t WORDS_STACK_NEEDED = HIGHER_PAGE_SIZE / BYTES_PER_WORD;
-    using Manager = WordManager<RasterizerInterface, WORDS_STACK_NEEDED>;
+    using Manager = WordManager<DeviceTracker, WORDS_STACK_NEEDED>;
 
 public:
-    MemoryTrackerBase(RasterizerInterface& rasterizer_) : rasterizer{&rasterizer_} {}
+    MemoryTrackerBase(DeviceTracker& device_tracker_) : device_tracker{&device_tracker_} {}
     ~MemoryTrackerBase() = default;
 
     /// Returns the inclusive CPU modified range in a begin end pair
@@ -74,7 +74,7 @@ public:
             });
     }
 
-    /// Mark region as CPU modified, notifying the rasterizer about this change
+    /// Mark region as CPU modified, notifying the device_tracker about this change
     void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
         IteratePages<true>(dirty_cpu_addr, query_size,
                            [](Manager* manager, u64 offset, size_t size) {
@@ -83,7 +83,7 @@ public:
                            });
     }
 
-    /// Unmark region as CPU modified, notifying the rasterizer about this change
+    /// Unmark region as CPU modified, notifying the device_tracker about this change
     void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
         IteratePages<true>(dirty_cpu_addr, query_size,
                            [](Manager* manager, u64 offset, size_t size) {
@@ -139,7 +139,7 @@ public:
             });
     }
 
-    /// Flushes cached CPU writes, and notify the rasterizer about the deltas
+    /// Flushes cached CPU writes, and notify the device_tracker about the deltas
     void FlushCachedWrites(VAddr query_cpu_addr, u64 query_size) noexcept {
         IteratePages<false>(query_cpu_addr, query_size,
                             [](Manager* manager, [[maybe_unused]] u64 offset,
@@ -280,7 +280,7 @@ private:
         manager_pool.emplace_back();
         auto& last_pool = manager_pool.back();
         for (size_t i = 0; i < MANAGER_POOL_SIZE; i++) {
-            new (&last_pool[i]) Manager(0, *rasterizer, HIGHER_PAGE_SIZE);
+            new (&last_pool[i]) Manager(0, *device_tracker, HIGHER_PAGE_SIZE);
             free_managers.push_back(&last_pool[i]);
         }
         return on_return();
@@ -293,7 +293,7 @@ private:
 
     std::unordered_set<u32> cached_pages;
 
-    RasterizerInterface* rasterizer = nullptr;
+    DeviceTracker* device_tracker = nullptr;
 };
 
 } // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index a336bde41..3db9d8b42 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -13,12 +13,12 @@
 #include "common/common_funcs.h"
 #include "common/common_types.h"
 #include "common/div_ceil.h"
-#include "core/memory.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 
 namespace VideoCommon {
 
 constexpr u64 PAGES_PER_WORD = 64;
-constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE;
+constexpr u64 BYTES_PER_PAGE = Core::DEVICE_PAGESIZE;
 constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
 
 enum class Type {
@@ -163,11 +163,11 @@ struct Words {
     WordsArray<stack_words> preflushable;
 };
 
-template <class RasterizerInterface, size_t stack_words = 1>
+template <class DeviceTracker, size_t stack_words = 1>
 class WordManager {
 public:
-    explicit WordManager(VAddr cpu_addr_, RasterizerInterface& rasterizer_, u64 size_bytes)
-        : cpu_addr{cpu_addr_}, rasterizer{&rasterizer_}, words{size_bytes} {}
+    explicit WordManager(VAddr cpu_addr_, DeviceTracker& tracker_, u64 size_bytes)
+        : cpu_addr{cpu_addr_}, tracker{&tracker_}, words{size_bytes} {}
 
     explicit WordManager() = default;
 
@@ -279,7 +279,7 @@ public:
     }
 
     /**
-     * Loop over each page in the given range, turn off those bits and notify the rasterizer if
+     * Loop over each page in the given range, turn off those bits and notify the tracker if
      * needed. Call the given function on each turned off range.
      *
      * @param query_cpu_range Base CPU address to loop over
@@ -459,26 +459,26 @@ private:
     }
 
     /**
-     * Notify rasterizer about changes in the CPU tracking state of a word in the buffer
+     * Notify tracker about changes in the CPU tracking state of a word in the buffer
      *
-     * @param word_index   Index to the word to notify to the rasterizer
+     * @param word_index   Index to the word to notify to the tracker
      * @param current_bits Current state of the word
      * @param new_bits     New state of the word
      *
-     * @tparam add_to_rasterizer True when the rasterizer should start tracking the new pages
+     * @tparam add_to_tracker True when the tracker should start tracking the new pages
      */
-    template <bool add_to_rasterizer>
+    template <bool add_to_tracker>
     void NotifyRasterizer(u64 word_index, u64 current_bits, u64 new_bits) const {
-        u64 changed_bits = (add_to_rasterizer ? current_bits : ~current_bits) & new_bits;
+        u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits;
         VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
         IteratePages(changed_bits, [&](size_t offset, size_t size) {
-            rasterizer->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE,
-                                               size * BYTES_PER_PAGE, add_to_rasterizer ? 1 : -1);
+            tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, size * BYTES_PER_PAGE,
+                                            add_to_tracker ? 1 : -1);
         });
     }
 
     VAddr cpu_addr = 0;
-    RasterizerInterface* rasterizer = nullptr;
+    DeviceTracker* tracker = nullptr;
     Words<stack_words> words;
 };
 
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 58ce0d8c2..fb2060ca4 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -5,10 +5,10 @@
 #include "common/microprofile.h"
 #include "common/settings.h"
 #include "core/core.h"
-#include "core/memory.h"
 #include "video_core/dma_pusher.h"
 #include "video_core/engines/maxwell_3d.h"
 #include "video_core/gpu.h"
+#include "video_core/guest_memory.h"
 #include "video_core/memory_manager.h"
 
 namespace Tegra {
@@ -85,15 +85,15 @@ bool DmaPusher::Step() {
             }
         }
         const auto safe_process = [&] {
-            Core::Memory::GpuGuestMemory<Tegra::CommandHeader,
-                                         Core::Memory::GuestMemoryFlags::SafeRead>
+            Tegra::Memory::GpuGuestMemory<Tegra::CommandHeader,
+                                          Tegra::Memory::GuestMemoryFlags::SafeRead>
                 headers(memory_manager, dma_state.dma_get, command_list_header.size,
                         &command_headers);
             ProcessCommands(headers);
         };
         const auto unsafe_process = [&] {
-            Core::Memory::GpuGuestMemory<Tegra::CommandHeader,
-                                         Core::Memory::GuestMemoryFlags::UnsafeRead>
+            Tegra::Memory::GpuGuestMemory<Tegra::CommandHeader,
+                                          Tegra::Memory::GuestMemoryFlags::UnsafeRead>
                 headers(memory_manager, dma_state.dma_get, command_list_header.size,
                         &command_headers);
             ProcessCommands(headers);
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index bc64d4486..e5cc04ec4 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -5,8 +5,8 @@
 
 #include "common/algorithm.h"
 #include "common/assert.h"
-#include "core/memory.h"
 #include "video_core/engines/engine_upload.h"
+#include "video_core/guest_memory.h"
 #include "video_core/memory_manager.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/textures/decoders.h"
@@ -68,7 +68,8 @@ void State::ProcessData(std::span<const u8> read_buffer) {
             true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
             regs.dest.BlockHeight(), regs.dest.BlockDepth());
 
-        Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
+        Tegra::Memory::GpuGuestMemoryScoped<u8,
+                                            Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
             tmp(memory_manager, address, dst_size, &tmp_buffer);
 
         Tegra::Texture::SwizzleSubrect(tmp, read_buffer, bytes_per_pixel, width, regs.dest.height,
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 95ba4f76c..a94e1f043 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -9,7 +9,6 @@
 #include "common/settings.h"
 #include "core/core.h"
 #include "core/core_timing.h"
-#include "core/memory.h"
 #include "video_core/dirty_flags.h"
 #include "video_core/engines/draw_manager.h"
 #include "video_core/engines/maxwell_3d.h"
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 56fbff306..2ebd21fc5 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -8,9 +8,9 @@
 #include "common/polyfill_ranges.h"
 #include "common/settings.h"
 #include "core/core.h"
-#include "core/memory.h"
 #include "video_core/engines/maxwell_3d.h"
 #include "video_core/engines/maxwell_dma.h"
+#include "video_core/guest_memory.h"
 #include "video_core/memory_manager.h"
 #include "video_core/renderer_base.h"
 #include "video_core/textures/decoders.h"
@@ -133,8 +133,8 @@ void MaxwellDMA::Launch() {
                 UNIMPLEMENTED_IF(regs.offset_out % 16 != 0);
                 read_buffer.resize_destructive(16);
                 for (u32 offset = 0; offset < regs.line_length_in; offset += 16) {
-                    Core::Memory::GpuGuestMemoryScoped<
-                        u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
+                    Tegra::Memory::GpuGuestMemoryScoped<
+                        u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
                         tmp_write_buffer(memory_manager,
                                          convert_linear_2_blocklinear_addr(regs.offset_in + offset),
                                          16, &read_buffer);
@@ -146,16 +146,16 @@ void MaxwellDMA::Launch() {
                 UNIMPLEMENTED_IF(regs.offset_out % 16 != 0);
                 read_buffer.resize_destructive(16);
                 for (u32 offset = 0; offset < regs.line_length_in; offset += 16) {
-                    Core::Memory::GpuGuestMemoryScoped<
-                        u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
+                    Tegra::Memory::GpuGuestMemoryScoped<
+                        u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
                         tmp_write_buffer(memory_manager, regs.offset_in + offset, 16, &read_buffer);
                     tmp_write_buffer.SetAddressAndSize(
                         convert_linear_2_blocklinear_addr(regs.offset_out + offset), 16);
                 }
             } else {
                 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
-                    Core::Memory::GpuGuestMemoryScoped<
-                        u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
+                    Tegra::Memory::GpuGuestMemoryScoped<
+                        u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
                         tmp_write_buffer(memory_manager, regs.offset_in, regs.line_length_in,
                                          &read_buffer);
                     tmp_write_buffer.SetAddressAndSize(regs.offset_out, regs.line_length_in);
@@ -226,9 +226,9 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
 
     const size_t dst_size = dst_operand.pitch * regs.line_count;
 
-    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
+    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
         memory_manager, src_operand.address, src_size, &read_buffer);
-    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
+    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
         tmp_write_buffer(memory_manager, dst_operand.address, dst_size, &write_buffer);
 
     UnswizzleSubrect(tmp_write_buffer, tmp_read_buffer, bytes_per_pixel, width, height, depth,
@@ -290,9 +290,9 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
 
     GPUVAddr src_addr = regs.offset_in;
     GPUVAddr dst_addr = regs.offset_out;
-    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
+    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
         memory_manager, src_addr, src_size, &read_buffer);
-    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
+    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::UnsafeReadCachedWrite>
         tmp_write_buffer(memory_manager, dst_addr, dst_size, &write_buffer);
 
     //  If the input is linear and the output is tiled, swizzle the input and copy it over.
@@ -344,9 +344,9 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
 
     intermediate_buffer.resize_destructive(mid_buffer_size);
 
-    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
+    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
         memory_manager, regs.offset_in, src_size, &read_buffer);
-    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
+    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite>
         tmp_write_buffer(memory_manager, regs.offset_out, dst_size, &write_buffer);
 
     UnswizzleSubrect(intermediate_buffer, tmp_read_buffer, bytes_per_pixel, src_width, src.height,
diff --git a/src/video_core/engines/sw_blitter/blitter.cpp b/src/video_core/engines/sw_blitter/blitter.cpp
index 67ce9134b..4bc079024 100644
--- a/src/video_core/engines/sw_blitter/blitter.cpp
+++ b/src/video_core/engines/sw_blitter/blitter.cpp
@@ -8,6 +8,7 @@
 #include "common/scratch_buffer.h"
 #include "video_core/engines/sw_blitter/blitter.h"
 #include "video_core/engines/sw_blitter/converter.h"
+#include "video_core/guest_memory.h"
 #include "video_core/memory_manager.h"
 #include "video_core/surface.h"
 #include "video_core/textures/decoders.h"
@@ -160,7 +161,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
     const auto dst_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(dst.format));
     const size_t src_size = get_surface_size(src, src_bytes_per_pixel);
 
-    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_buffer(
+    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::SafeRead> tmp_buffer(
         memory_manager, src.Address(), src_size, &impl->tmp_buffer);
 
     const size_t src_copy_size = src_extent_x * src_extent_y * src_bytes_per_pixel;
@@ -220,7 +221,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
     }
 
     const size_t dst_size = get_surface_size(dst, dst_bytes_per_pixel);
-    Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadWrite>
+    Tegra::Memory::GpuGuestMemoryScoped<u8, Tegra::Memory::GuestMemoryFlags::SafeReadWrite>
         tmp_buffer2(memory_manager, dst.Address(), dst_size, &impl->tmp_buffer);
 
     if (dst.linear == Fermi2D::MemoryLayout::BlockLinear) {
diff --git a/src/video_core/framebuffer_config.h b/src/video_core/framebuffer_config.h
index 5f3bffcab..856f4bd52 100644
--- a/src/video_core/framebuffer_config.h
+++ b/src/video_core/framebuffer_config.h
@@ -14,7 +14,7 @@ namespace Tegra {
  * Struct describing framebuffer configuration
  */
 struct FramebufferConfig {
-    VAddr address{};
+    DAddr address{};
     u32 offset{};
     u32 width{};
     u32 height{};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 11549d448..609704b33 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -85,7 +85,8 @@ struct GPU::Impl {
     void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
         renderer = std::move(renderer_);
         rasterizer = renderer->ReadRasterizer();
-        host1x.MemoryManager().BindRasterizer(rasterizer);
+        host1x.MemoryManager().BindInterface(rasterizer);
+        host1x.GMMU().BindRasterizer(rasterizer);
     }
 
     /// Flush all current written commands into the host GPU for execution.
@@ -95,8 +96,8 @@ struct GPU::Impl {
 
     /// Synchronizes CPU writes with Host GPU memory.
     void InvalidateGPUCache() {
-        std::function<void(VAddr, size_t)> callback_writes(
-            [this](VAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); });
+        std::function<void(PAddr, size_t)> callback_writes(
+            [this](PAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); });
         system.GatherGPUDirtyMemory(callback_writes);
     }
 
@@ -279,11 +280,11 @@ struct GPU::Impl {
     }
 
     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
-    void FlushRegion(VAddr addr, u64 size) {
+    void FlushRegion(DAddr addr, u64 size) {
         gpu_thread.FlushRegion(addr, size);
     }
 
-    VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size) {
+    VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) {
         auto raster_area = rasterizer->GetFlushArea(addr, size);
         if (raster_area.preemtive) {
             return raster_area;
@@ -299,16 +300,16 @@ struct GPU::Impl {
     }
 
     /// Notify rasterizer that any caches of the specified region should be invalidated
-    void InvalidateRegion(VAddr addr, u64 size) {
+    void InvalidateRegion(DAddr addr, u64 size) {
         gpu_thread.InvalidateRegion(addr, size);
     }
 
-    bool OnCPUWrite(VAddr addr, u64 size) {
+    bool OnCPUWrite(DAddr addr, u64 size) {
         return rasterizer->OnCPUWrite(addr, size);
     }
 
     /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
-    void FlushAndInvalidateRegion(VAddr addr, u64 size) {
+    void FlushAndInvalidateRegion(DAddr addr, u64 size) {
         gpu_thread.FlushAndInvalidateRegion(addr, size);
     }
 
@@ -437,7 +438,7 @@ void GPU::OnCommandListEnd() {
     impl->OnCommandListEnd();
 }
 
-u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
+u64 GPU::RequestFlush(DAddr addr, std::size_t size) {
     return impl->RequestSyncOperation(
         [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
 }
@@ -557,23 +558,23 @@ void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
     impl->SwapBuffers(framebuffer);
 }
 
-VideoCore::RasterizerDownloadArea GPU::OnCPURead(VAddr addr, u64 size) {
+VideoCore::RasterizerDownloadArea GPU::OnCPURead(PAddr addr, u64 size) {
     return impl->OnCPURead(addr, size);
 }
 
-void GPU::FlushRegion(VAddr addr, u64 size) {
+void GPU::FlushRegion(DAddr addr, u64 size) {
     impl->FlushRegion(addr, size);
 }
 
-void GPU::InvalidateRegion(VAddr addr, u64 size) {
+void GPU::InvalidateRegion(DAddr addr, u64 size) {
     impl->InvalidateRegion(addr, size);
 }
 
-bool GPU::OnCPUWrite(VAddr addr, u64 size) {
+bool GPU::OnCPUWrite(DAddr addr, u64 size) {
     return impl->OnCPUWrite(addr, size);
 }
 
-void GPU::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+void GPU::FlushAndInvalidateRegion(DAddr addr, u64 size) {
     impl->FlushAndInvalidateRegion(addr, size);
 }
 
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index ba2838b89..b3c1d15bd 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -158,7 +158,7 @@ public:
     void InitAddressSpace(Tegra::MemoryManager& memory_manager);
 
     /// Request a host GPU memory flush from the CPU.
-    [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
+    [[nodiscard]] u64 RequestFlush(DAddr addr, std::size_t size);
 
     /// Obtains current flush request fence id.
     [[nodiscard]] u64 CurrentSyncRequestFence() const;
@@ -242,20 +242,20 @@ public:
     void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
 
     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
-    [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size);
+    [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size);
 
     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
-    void FlushRegion(VAddr addr, u64 size);
+    void FlushRegion(DAddr addr, u64 size);
 
     /// Notify rasterizer that any caches of the specified region should be invalidated
-    void InvalidateRegion(VAddr addr, u64 size);
+    void InvalidateRegion(DAddr addr, u64 size);
 
     /// Notify rasterizer that CPU is trying to write this area. It returns true if the area is
     /// sensible, false otherwise
-    bool OnCPUWrite(VAddr addr, u64 size);
+    bool OnCPUWrite(DAddr addr, u64 size);
 
     /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
-    void FlushAndInvalidateRegion(VAddr addr, u64 size);
+    void FlushAndInvalidateRegion(DAddr addr, u64 size);
 
 private:
     struct Impl;
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 2f0f9f593..788d4f61e 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -82,7 +82,7 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
     PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt));
 }
 
-void ThreadManager::FlushRegion(VAddr addr, u64 size) {
+void ThreadManager::FlushRegion(DAddr addr, u64 size) {
     if (!is_async) {
         // Always flush with synchronous GPU mode
         PushCommand(FlushRegionCommand(addr, size));
@@ -101,11 +101,11 @@ void ThreadManager::TickGPU() {
     PushCommand(GPUTickCommand());
 }
 
-void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
+void ThreadManager::InvalidateRegion(DAddr addr, u64 size) {
     rasterizer->OnCacheInvalidation(addr, size);
 }
 
-void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+void ThreadManager::FlushAndInvalidateRegion(DAddr addr, u64 size) {
     // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
     rasterizer->OnCacheInvalidation(addr, size);
 }
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 43940bd6d..2de25e9ef 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -54,26 +54,26 @@ struct SwapBuffersCommand final {
 
 /// Command to signal to the GPU thread to flush a region
 struct FlushRegionCommand final {
-    explicit constexpr FlushRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
+    explicit constexpr FlushRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
 
-    VAddr addr;
+    DAddr addr;
     u64 size;
 };
 
 /// Command to signal to the GPU thread to invalidate a region
 struct InvalidateRegionCommand final {
-    explicit constexpr InvalidateRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
+    explicit constexpr InvalidateRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {}
 
-    VAddr addr;
+    DAddr addr;
     u64 size;
 };
 
 /// Command to signal to the GPU thread to flush and invalidate a region
 struct FlushAndInvalidateRegionCommand final {
-    explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr_, u64 size_)
+    explicit constexpr FlushAndInvalidateRegionCommand(DAddr addr_, u64 size_)
         : addr{addr_}, size{size_} {}
 
-    VAddr addr;
+    DAddr addr;
     u64 size;
 };
 
@@ -122,13 +122,13 @@ public:
     void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
 
     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
-    void FlushRegion(VAddr addr, u64 size);
+    void FlushRegion(DAddr addr, u64 size);
 
     /// Notify rasterizer that any caches of the specified region should be invalidated
-    void InvalidateRegion(VAddr addr, u64 size);
+    void InvalidateRegion(DAddr addr, u64 size);
 
     /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
-    void FlushAndInvalidateRegion(VAddr addr, u64 size);
+    void FlushAndInvalidateRegion(DAddr addr, u64 size);
 
     void TickGPU();
 
diff --git a/src/video_core/guest_memory.h b/src/video_core/guest_memory.h
new file mode 100644
index 000000000..8b6213172
--- /dev/null
+++ b/src/video_core/guest_memory.h
@@ -0,0 +1,30 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <iterator>
+#include <memory>
+#include <optional>
+#include <span>
+#include <vector>
+
+#include "common/scratch_buffer.h"
+#include "core/guest_memory.h"
+#include "video_core/memory_manager.h"
+
+namespace Tegra::Memory {
+
+using GuestMemoryFlags = Core::Memory::GuestMemoryFlags;
+
+template <typename T, GuestMemoryFlags FLAGS>
+using DeviceGuestMemory = Core::Memory::GuestMemory<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
+template <typename T, GuestMemoryFlags FLAGS>
+using DeviceGuestMemoryScoped =
+    Core::Memory::GuestMemoryScoped<Tegra::MaxwellDeviceMemoryManager, T, FLAGS>;
+template <typename T, GuestMemoryFlags FLAGS>
+using GpuGuestMemory = Core::Memory::GuestMemory<Tegra::MemoryManager, T, FLAGS>;
+template <typename T, GuestMemoryFlags FLAGS>
+using GpuGuestMemoryScoped = Core::Memory::GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>;
+
+} // namespace Tegra::Memory
diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index 309a7f1d5..994591c8d 100644
--- a/src/video_core/host1x/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -32,13 +32,12 @@ H264::~H264() = default;
 std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
                                        size_t* out_configuration_size, bool is_first_frame) {
     H264DecoderContext context;
-    host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
-                                     sizeof(H264DecoderContext));
+    host1x.GMMU().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext));
 
     const s64 frame_number = context.h264_parameter_set.frame_number.Value();
     if (!is_first_frame && frame_number != 0) {
         frame.resize_destructive(context.stream_len);
-        host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
+        host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
         *out_configuration_size = 0;
         return frame;
     }
@@ -159,8 +158,8 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters
     std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
 
     *out_configuration_size = encoded_header.size();
-    host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
-                                     frame.data() + encoded_header.size(), context.stream_len);
+    host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data() + encoded_header.size(),
+                            context.stream_len);
 
     return frame;
 }
diff --git a/src/video_core/host1x/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp
index ee6392ff9..be97e3b00 100644
--- a/src/video_core/host1x/codecs/vp8.cpp
+++ b/src/video_core/host1x/codecs/vp8.cpp
@@ -14,7 +14,7 @@ VP8::~VP8() = default;
 
 std::span<const u8> VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
     VP8PictureInfo info;
-    host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
+    host1x.GMMU().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
 
     const bool is_key_frame = info.key_frame == 1u;
     const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
@@ -45,7 +45,7 @@ std::span<const u8> VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters&
         frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
     }
     const u64 bitstream_offset = state.frame_bitstream_offset;
-    host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
+    host1x.GMMU().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
 
     return frame;
 }
diff --git a/src/video_core/host1x/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp
index 306c3d0e8..65d6fb2d5 100644
--- a/src/video_core/host1x/codecs/vp9.cpp
+++ b/src/video_core/host1x/codecs/vp9.cpp
@@ -358,7 +358,7 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
 
 Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
     PictureInfo picture_info;
-    host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
+    host1x.GMMU().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
     Vp9PictureInfo vp9_info = picture_info.Convert();
 
     InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
@@ -373,7 +373,7 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters&
 
 void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
     EntropyProbs entropy;
-    host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
+    host1x.GMMU().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
     entropy.Convert(dst);
 }
 
@@ -383,9 +383,8 @@ Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters
         // gpu.SyncGuestHost(); epic, why?
         current_frame.info = GetVp9PictureInfo(state);
         current_frame.bit_stream.resize(current_frame.info.bitstream_size);
-        host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
-                                         current_frame.bit_stream.data(),
-                                         current_frame.info.bitstream_size);
+        host1x.GMMU().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
+                                current_frame.info.bitstream_size);
     }
     if (!next_frame.bit_stream.empty()) {
         Vp9FrameContainer temp{
diff --git a/src/video_core/host1x/gpu_device_memory_manager.cpp b/src/video_core/host1x/gpu_device_memory_manager.cpp
new file mode 100644
index 000000000..668c2f08b
--- /dev/null
+++ b/src/video_core/host1x/gpu_device_memory_manager.cpp
@@ -0,0 +1,32 @@
+// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/device_memory_manager.inc"
+#include "video_core/host1x/gpu_device_memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace Tegra {
+
+struct MaxwellDeviceMethods {
+    static inline void MarkRegionCaching(Core::Memory::Memory* interface, VAddr address,
+                                         size_t size, bool caching) {
+        interface->RasterizerMarkRegionCached(address, size, caching);
+    }
+};
+
+} // namespace Tegra
+
+template struct Core::DeviceMemoryManagerAllocator<Tegra::MaxwellDeviceTraits>;
+template class Core::DeviceMemoryManager<Tegra::MaxwellDeviceTraits>;
+
+template const u8* Tegra::MaxwellDeviceMemoryManager::GetPointer<u8>(DAddr addr) const;
+template u8* Tegra::MaxwellDeviceMemoryManager::GetPointer<u8>(DAddr addr);
+
+template u8 Tegra::MaxwellDeviceMemoryManager::Read<u8>(DAddr addr) const;
+template u16 Tegra::MaxwellDeviceMemoryManager::Read<u16>(DAddr addr) const;
+template u32 Tegra::MaxwellDeviceMemoryManager::Read<u32>(DAddr addr) const;
+template u64 Tegra::MaxwellDeviceMemoryManager::Read<u64>(DAddr addr) const;
+template void Tegra::MaxwellDeviceMemoryManager::Write<u8>(DAddr addr, u8 data);
+template void Tegra::MaxwellDeviceMemoryManager::Write<u16>(DAddr addr, u16 data);
+template void Tegra::MaxwellDeviceMemoryManager::Write<u32>(DAddr addr, u32 data);
+template void Tegra::MaxwellDeviceMemoryManager::Write<u64>(DAddr addr, u64 data);
\ No newline at end of file
diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h
new file mode 100644
index 000000000..a9f249991
--- /dev/null
+++ b/src/video_core/host1x/gpu_device_memory_manager.h
@@ -0,0 +1,24 @@
+// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/device_memory_manager.h"
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Tegra {
+
+struct MaxwellDeviceMethods;
+
+struct MaxwellDeviceTraits {
+    static constexpr size_t device_virtual_bits = 34;
+    using DeviceInterface = typename VideoCore::RasterizerInterface;
+    using DeviceMethods = MaxwellDeviceMethods;
+};
+
+using MaxwellDeviceMemoryManager = Core::DeviceMemoryManager<MaxwellDeviceTraits>;
+
+} // namespace Tegra
\ No newline at end of file
diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp
index 7c317a85d..c4c7a5883 100644
--- a/src/video_core/host1x/host1x.cpp
+++ b/src/video_core/host1x/host1x.cpp
@@ -9,9 +9,12 @@ namespace Tegra {
 namespace Host1x {
 
 Host1x::Host1x(Core::System& system_)
-    : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
+    : system{system_}, syncpoint_manager{},
+      memory_manager(system.DeviceMemory()), gmmu_manager{system, memory_manager, 32, 12},
       allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
 
+Host1x::~Host1x() = default;
+
 } // namespace Host1x
 
 } // namespace Tegra
diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h
index 57082ae54..d72d97b7b 100644
--- a/src/video_core/host1x/host1x.h
+++ b/src/video_core/host1x/host1x.h
@@ -6,6 +6,7 @@
 #include "common/common_types.h"
 
 #include "common/address_space.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/host1x/syncpoint_manager.h"
 #include "video_core/memory_manager.h"
 
@@ -20,6 +21,7 @@ namespace Host1x {
 class Host1x {
 public:
     explicit Host1x(Core::System& system);
+    ~Host1x();
 
     SyncpointManager& GetSyncpointManager() {
         return syncpoint_manager;
@@ -29,14 +31,22 @@ public:
         return syncpoint_manager;
     }
 
-    Tegra::MemoryManager& MemoryManager() {
+    Tegra::MaxwellDeviceMemoryManager& MemoryManager() {
         return memory_manager;
     }
 
-    const Tegra::MemoryManager& MemoryManager() const {
+    const Tegra::MaxwellDeviceMemoryManager& MemoryManager() const {
         return memory_manager;
     }
 
+    Tegra::MemoryManager& GMMU() {
+        return gmmu_manager;
+    }
+
+    const Tegra::MemoryManager& GMMU() const {
+        return gmmu_manager;
+    }
+
     Common::FlatAllocator<u32, 0, 32>& Allocator() {
         return *allocator;
     }
@@ -48,7 +58,8 @@ public:
 private:
     Core::System& system;
     SyncpointManager syncpoint_manager;
-    Tegra::MemoryManager memory_manager;
+    Tegra::MaxwellDeviceMemoryManager memory_manager;
+    Tegra::MemoryManager gmmu_manager;
     std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
 };
 
diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp
index 2a5eba415..d154746af 100644
--- a/src/video_core/host1x/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -81,7 +81,7 @@ void Vic::Execute() {
         LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
         return;
     }
-    const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
+    const VicConfig config{host1x.GMMU().Read<u64>(config_struct_address + 0x20)};
     auto frame = nvdec_processor->GetFrame();
     if (!frame) {
         return;
@@ -162,12 +162,12 @@ void Vic::WriteRGBFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
         Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
                                 block_height, 0, width * 4);
 
-        host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
+        host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
     } else {
         // send pitch linear frame
         const size_t linear_size = width * height * 4;
-        host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
-                                          linear_size);
+        host1x.GMMU().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
+                                 linear_size);
     }
 }
 
@@ -193,8 +193,7 @@ void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
         const std::size_t dst = y * aligned_width;
         std::memcpy(luma_buffer.data() + dst, luma_src + src, frame_width);
     }
-    host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
-                                      luma_buffer.size());
+    host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), luma_buffer.size());
 
     // Chroma
     const std::size_t half_height = frame_height / 2;
@@ -233,8 +232,8 @@ void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& c
         ASSERT(false);
         break;
     }
-    host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
-                                      chroma_buffer.size());
+    host1x.GMMU().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
+                             chroma_buffer.size());
 }
 
 } // namespace Host1x
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index d16040613..a52f8e486 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -7,25 +7,26 @@
 #include "common/assert.h"
 #include "common/logging/log.h"
 #include "core/core.h"
-#include "core/device_memory.h"
 #include "core/hle/kernel/k_page_table.h"
 #include "core/hle/kernel/k_process.h"
+#include "video_core/guest_memory.h"
+#include "video_core/host1x/host1x.h"
 #include "video_core/invalidation_accumulator.h"
 #include "video_core/memory_manager.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/renderer_base.h"
 
 namespace Tegra {
-using Core::Memory::GuestMemoryFlags;
+using Tegra::Memory::GuestMemoryFlags;
 
 std::atomic<size_t> MemoryManager::unique_identifier_generator{};
 
-MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
-                             u64 page_bits_)
-    : system{system_}, memory{system.ApplicationMemory()}, device_memory{system.DeviceMemory()},
-      address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
-      entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
-                                           page_bits != big_page_bits ? page_bits : 0},
+MemoryManager::MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_,
+                             u64 address_space_bits_, u64 big_page_bits_, u64 page_bits_)
+    : system{system_}, memory{memory_}, address_space_bits{address_space_bits_},
+      page_bits{page_bits_}, big_page_bits{big_page_bits_}, entries{}, big_entries{},
+      page_table{address_space_bits, address_space_bits + page_bits - 38,
+                 page_bits != big_page_bits ? page_bits : 0},
       kind_map{PTEKind::INVALID}, unique_identifier{unique_identifier_generator.fetch_add(
                                       1, std::memory_order_acq_rel)},
       accumulator{std::make_unique<VideoCommon::InvalidationAccumulator>()} {
@@ -42,11 +43,16 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
     big_page_table_mask = big_page_table_size - 1;
 
     big_entries.resize(big_page_table_size / 32, 0);
-    big_page_table_cpu.resize(big_page_table_size);
+    big_page_table_dev.resize(big_page_table_size);
     big_page_continuous.resize(big_page_table_size / continuous_bits, 0);
     entries.resize(page_table_size / 32, 0);
 }
 
+MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
+                             u64 page_bits_)
+    : MemoryManager(system_, system_.Host1x().MemoryManager(), address_space_bits_, big_page_bits_,
+                    page_bits_) {}
+
 MemoryManager::~MemoryManager() = default;
 
 template <bool is_big_page>
@@ -100,7 +106,7 @@ inline void MemoryManager::SetBigPageContinuous(size_t big_page_index, bool valu
 }
 
 template <MemoryManager::EntryType entry_type>
-GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size,
                                     PTEKind kind) {
     [[maybe_unused]] u64 remaining_size{size};
     if constexpr (entry_type == EntryType::Mapped) {
@@ -114,9 +120,9 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
             rasterizer->ModifyGPUMemory(unique_identifier, current_gpu_addr, page_size);
         }
         if constexpr (entry_type == EntryType::Mapped) {
-            const VAddr current_cpu_addr = cpu_addr + offset;
+            const DAddr current_dev_addr = dev_addr + offset;
             const auto index = PageEntryIndex<false>(current_gpu_addr);
-            const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
+            const u32 sub_value = static_cast<u32>(current_dev_addr >> cpu_page_bits);
             page_table[index] = sub_value;
         }
         remaining_size -= page_size;
@@ -126,7 +132,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp
 }
 
 template <MemoryManager::EntryType entry_type>
-GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
+GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr,
                                        size_t size, PTEKind kind) {
     [[maybe_unused]] u64 remaining_size{size};
     for (u64 offset{}; offset < size; offset += big_page_size) {
@@ -137,20 +143,20 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
             rasterizer->ModifyGPUMemory(unique_identifier, current_gpu_addr, big_page_size);
         }
         if constexpr (entry_type == EntryType::Mapped) {
-            const VAddr current_cpu_addr = cpu_addr + offset;
+            const DAddr current_dev_addr = dev_addr + offset;
             const auto index = PageEntryIndex<true>(current_gpu_addr);
-            const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
-            big_page_table_cpu[index] = sub_value;
+            const u32 sub_value = static_cast<u32>(current_dev_addr >> cpu_page_bits);
+            big_page_table_dev[index] = sub_value;
             const bool is_continuous = ([&] {
                 uintptr_t base_ptr{
-                    reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
+                    reinterpret_cast<uintptr_t>(memory.GetPointer<u8>(current_dev_addr))};
                 if (base_ptr == 0) {
                     return false;
                 }
-                for (VAddr start_cpu = current_cpu_addr + page_size;
-                     start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
+                for (DAddr start_cpu = current_dev_addr + page_size;
+                     start_cpu < current_dev_addr + big_page_size; start_cpu += page_size) {
                     base_ptr += page_size;
-                    auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu));
+                    auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointer<u8>(start_cpu));
                     if (next_ptr == 0 || base_ptr != next_ptr) {
                         return false;
                     }
@@ -172,12 +178,12 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_)
     rasterizer = rasterizer_;
 }
 
-GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind,
+GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, DAddr dev_addr, std::size_t size, PTEKind kind,
                             bool is_big_pages) {
     if (is_big_pages) [[likely]] {
-        return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
+        return BigPageTableOp<EntryType::Mapped>(gpu_addr, dev_addr, size, kind);
     }
-    return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
+    return PageTableOp<EntryType::Mapped>(gpu_addr, dev_addr, size, kind);
 }
 
 GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
@@ -202,7 +208,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
     PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
 }
 
-std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
+std::optional<DAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
     if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
         return std::nullopt;
     }
@@ -211,17 +217,17 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
             return std::nullopt;
         }
 
-        const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
+        const DAddr dev_addr_base = static_cast<DAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
                                     << cpu_page_bits;
-        return cpu_addr_base + (gpu_addr & page_mask);
+        return dev_addr_base + (gpu_addr & page_mask);
     }
 
-    const VAddr cpu_addr_base =
-        static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
-    return cpu_addr_base + (gpu_addr & big_page_mask);
+    const DAddr dev_addr_base =
+        static_cast<DAddr>(big_page_table_dev[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
+    return dev_addr_base + (gpu_addr & big_page_mask);
 }
 
-std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
+std::optional<DAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
     size_t page_index{addr >> page_bits};
     const size_t page_last{(addr + size + page_size - 1) >> page_bits};
     while (page_index < page_last) {
@@ -274,7 +280,7 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
         return {};
     }
 
-    return memory.GetPointer(*address);
+    return memory.GetPointer<u8>(*address);
 }
 
 const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
@@ -283,7 +289,7 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
         return {};
     }
 
-    return memory.GetPointer(*address);
+    return memory.GetPointer<u8>(*address);
 }
 
 #ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
@@ -367,25 +373,25 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std:
         dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
     };
     auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
         if constexpr (is_safe) {
-            rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
+            rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
         }
-        u8* physical = memory.GetPointer(cpu_addr_base);
+        u8* physical = memory.GetPointer<u8>(dev_addr_base);
         std::memcpy(dest_buffer, physical, copy_amount);
         dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
     };
     auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
         if constexpr (is_safe) {
-            rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
+            rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
         }
         if (!IsBigPageContinuous(page_index)) [[unlikely]] {
-            memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
+            memory.ReadBlockUnsafe(dev_addr_base, dest_buffer, copy_amount);
         } else {
-            u8* physical = memory.GetPointer(cpu_addr_base);
+            u8* physical = memory.GetPointer<u8>(dev_addr_base);
             std::memcpy(dest_buffer, physical, copy_amount);
         }
         dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
@@ -416,25 +422,25 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
         src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
     };
     auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
         if constexpr (is_safe) {
-            rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
+            rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
         }
-        u8* physical = memory.GetPointer(cpu_addr_base);
+        u8* physical = memory.GetPointer<u8>(dev_addr_base);
         std::memcpy(physical, src_buffer, copy_amount);
         src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
     };
     auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
         if constexpr (is_safe) {
-            rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
+            rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
         }
         if (!IsBigPageContinuous(page_index)) [[unlikely]] {
-            memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
+            memory.WriteBlockUnsafe(dev_addr_base, src_buffer, copy_amount);
         } else {
-            u8* physical = memory.GetPointer(cpu_addr_base);
+            u8* physical = memory.GetPointer<u8>(dev_addr_base);
             std::memcpy(physical, src_buffer, copy_amount);
         }
         src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
@@ -470,14 +476,14 @@ void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size,
                           [[maybe_unused]] std::size_t copy_amount) {};
 
     auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
-        rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
     };
     auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
-        rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
+        rasterizer->FlushRegion(dev_addr_base, copy_amount, which);
     };
     auto flush_short_pages = [&](std::size_t page_index, std::size_t offset,
                                  std::size_t copy_amount) {
@@ -495,15 +501,15 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
                           [[maybe_unused]] std::size_t copy_amount) { return false; };
 
     auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
-        result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount, which);
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        result |= rasterizer->MustFlushRegion(dev_addr_base, copy_amount, which);
         return result;
     };
     auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
-        result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount, which);
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
+        result |= rasterizer->MustFlushRegion(dev_addr_base, copy_amount, which);
         return result;
     };
     auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
@@ -517,7 +523,7 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
 }
 
 size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const {
-    std::optional<VAddr> old_page_addr{};
+    std::optional<DAddr> old_page_addr{};
     size_t range_so_far = 0;
     bool result{false};
     auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
@@ -526,24 +532,24 @@ size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const {
         return true;
     };
     auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
-        if (old_page_addr && *old_page_addr != cpu_addr_base) {
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        if (old_page_addr && *old_page_addr != dev_addr_base) {
             result = true;
             return true;
         }
         range_so_far += copy_amount;
-        old_page_addr = {cpu_addr_base + copy_amount};
+        old_page_addr = {dev_addr_base + copy_amount};
         return false;
     };
     auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
-        if (old_page_addr && *old_page_addr != cpu_addr_base) {
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
+        if (old_page_addr && *old_page_addr != dev_addr_base) {
             return true;
         }
         range_so_far += copy_amount;
-        old_page_addr = {cpu_addr_base + copy_amount};
+        old_page_addr = {dev_addr_base + copy_amount};
         return false;
     };
     auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
@@ -568,14 +574,14 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
                           [[maybe_unused]] std::size_t copy_amount) {};
 
     auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
-        rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
     };
     auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
-        rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
+        rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which);
     };
     auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset,
                                       std::size_t copy_amount) {
@@ -587,7 +593,7 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
 
 void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size,
                               VideoCommon::CacheType which) {
-    Core::Memory::GpuGuestMemoryScoped<u8, GuestMemoryFlags::SafeReadWrite> data(
+    Tegra::Memory::GpuGuestMemoryScoped<u8, GuestMemoryFlags::SafeReadWrite> data(
         *this, gpu_src_addr, size);
     data.SetAddressAndSize(gpu_dest_addr, size);
     FlushRegion(gpu_dest_addr, size, which);
@@ -600,18 +606,18 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
             const std::size_t page{(page_index & big_page_mask) + size};
             return page <= big_page_size;
         }
-        const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
-        return page <= Core::Memory::YUZU_PAGESIZE;
+        const std::size_t page{(gpu_addr & Core::DEVICE_PAGEMASK) + size};
+        return page <= Core::DEVICE_PAGESIZE;
     }
     if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
         return false;
     }
-    const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
-    return page <= Core::Memory::YUZU_PAGESIZE;
+    const std::size_t page{(gpu_addr & Core::DEVICE_PAGEMASK) + size};
+    return page <= Core::DEVICE_PAGESIZE;
 }
 
 bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const {
-    std::optional<VAddr> old_page_addr{};
+    std::optional<DAddr> old_page_addr{};
     bool result{true};
     auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
                     std::size_t copy_amount) {
@@ -619,23 +625,23 @@ bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const
         return true;
     };
     auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
-        if (old_page_addr && *old_page_addr != cpu_addr_base) {
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        if (old_page_addr && *old_page_addr != dev_addr_base) {
             result = false;
             return true;
         }
-        old_page_addr = {cpu_addr_base + copy_amount};
+        old_page_addr = {dev_addr_base + copy_amount};
         return false;
     };
     auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
-        if (old_page_addr && *old_page_addr != cpu_addr_base) {
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
+        if (old_page_addr && *old_page_addr != dev_addr_base) {
             result = false;
             return true;
         }
-        old_page_addr = {cpu_addr_base + copy_amount};
+        old_page_addr = {dev_addr_base + copy_amount};
         return false;
     };
     auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
@@ -678,11 +684,11 @@ template <bool is_gpu_address>
 void MemoryManager::GetSubmappedRangeImpl(
     GPUVAddr gpu_addr, std::size_t size,
     boost::container::small_vector<
-        std::pair<std::conditional_t<is_gpu_address, GPUVAddr, VAddr>, std::size_t>, 32>& result)
+        std::pair<std::conditional_t<is_gpu_address, GPUVAddr, DAddr>, std::size_t>, 32>& result)
     const {
-    std::optional<std::pair<std::conditional_t<is_gpu_address, GPUVAddr, VAddr>, std::size_t>>
+    std::optional<std::pair<std::conditional_t<is_gpu_address, GPUVAddr, DAddr>, std::size_t>>
         last_segment{};
-    std::optional<VAddr> old_page_addr{};
+    std::optional<DAddr> old_page_addr{};
     const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index,
                                                 [[maybe_unused]] std::size_t offset,
                                                 [[maybe_unused]] std::size_t copy_amount) {
@@ -694,20 +700,20 @@ void MemoryManager::GetSubmappedRangeImpl(
     const auto extend_size_big = [this, &split, &old_page_addr,
                                   &last_segment](std::size_t page_index, std::size_t offset,
                                                  std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(big_page_table_dev[page_index]) << cpu_page_bits) + offset;
         if (old_page_addr) {
-            if (*old_page_addr != cpu_addr_base) {
+            if (*old_page_addr != dev_addr_base) {
                 split(0, 0, 0);
             }
         }
-        old_page_addr = {cpu_addr_base + copy_amount};
+        old_page_addr = {dev_addr_base + copy_amount};
         if (!last_segment) {
             if constexpr (is_gpu_address) {
                 const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset;
                 last_segment = {new_base_addr, copy_amount};
             } else {
-                last_segment = {cpu_addr_base, copy_amount};
+                last_segment = {dev_addr_base, copy_amount};
             }
         } else {
             last_segment->second += copy_amount;
@@ -716,20 +722,20 @@ void MemoryManager::GetSubmappedRangeImpl(
     const auto extend_size_short = [this, &split, &old_page_addr,
                                     &last_segment](std::size_t page_index, std::size_t offset,
                                                    std::size_t copy_amount) {
-        const VAddr cpu_addr_base =
-            (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+        const DAddr dev_addr_base =
+            (static_cast<DAddr>(page_table[page_index]) << cpu_page_bits) + offset;
         if (old_page_addr) {
-            if (*old_page_addr != cpu_addr_base) {
+            if (*old_page_addr != dev_addr_base) {
                 split(0, 0, 0);
             }
         }
-        old_page_addr = {cpu_addr_base + copy_amount};
+        old_page_addr = {dev_addr_base + copy_amount};
         if (!last_segment) {
             if constexpr (is_gpu_address) {
                 const GPUVAddr new_base_addr = (page_index << page_bits) + offset;
                 last_segment = {new_base_addr, copy_amount};
             } else {
-                last_segment = {cpu_addr_base, copy_amount};
+                last_segment = {dev_addr_base, copy_amount};
             }
         } else {
             last_segment->second += copy_amount;
@@ -756,9 +762,12 @@ void MemoryManager::FlushCaching() {
 }
 
 const u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) const {
-    auto cpu_addr = GpuToCpuAddress(src_addr);
-    if (cpu_addr) {
-        return memory.GetSpan(*cpu_addr, size);
+    if (!IsContinuousRange(src_addr, size)) {
+        return nullptr;
+    }
+    auto dev_addr = GpuToCpuAddress(src_addr);
+    if (dev_addr) {
+        return memory.GetSpan(*dev_addr, size);
     }
     return nullptr;
 }
@@ -767,9 +776,9 @@ u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) {
     if (!IsContinuousRange(src_addr, size)) {
         return nullptr;
     }
-    auto cpu_addr = GpuToCpuAddress(src_addr);
-    if (cpu_addr) {
-        return memory.GetSpan(*cpu_addr, size);
+    auto dev_addr = GpuToCpuAddress(src_addr);
+    if (dev_addr) {
+        return memory.GetSpan(*dev_addr, size);
     }
     return nullptr;
 }
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 9b311b9e5..c5255f36c 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -15,8 +15,8 @@
 #include "common/range_map.h"
 #include "common/scratch_buffer.h"
 #include "common/virtual_buffer.h"
-#include "core/memory.h"
 #include "video_core/cache_types.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/pte_kind.h"
 
 namespace VideoCore {
@@ -28,10 +28,6 @@ class InvalidationAccumulator;
 }
 
 namespace Core {
-class DeviceMemory;
-namespace Memory {
-class Memory;
-} // namespace Memory
 class System;
 } // namespace Core
 
@@ -41,6 +37,9 @@ class MemoryManager final {
 public:
     explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
                            u64 big_page_bits_ = 16, u64 page_bits_ = 12);
+    explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_,
+                           u64 address_space_bits_ = 40, u64 big_page_bits_ = 16,
+                           u64 page_bits_ = 12);
     ~MemoryManager();
 
     size_t GetID() const {
@@ -50,9 +49,9 @@ public:
     /// Binds a renderer to the memory manager.
     void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
 
-    [[nodiscard]] std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const;
+    [[nodiscard]] std::optional<DAddr> GpuToCpuAddress(GPUVAddr addr) const;
 
-    [[nodiscard]] std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr, std::size_t size) const;
+    [[nodiscard]] std::optional<DAddr> GpuToCpuAddress(GPUVAddr addr, std::size_t size) const;
 
     template <typename T>
     [[nodiscard]] T Read(GPUVAddr addr) const;
@@ -69,7 +68,7 @@ public:
         if (!address) {
             return {};
         }
-        return memory.GetPointer(*address);
+        return memory.GetPointer<T>(*address);
     }
 
     template <typename T>
@@ -110,7 +109,7 @@ public:
     [[nodiscard]] bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const;
 
     /**
-     * Checks if a gpu region is mapped by a single range of cpu addresses.
+     * Checks if a gpu region is mapped by a single range of device addresses.
      */
     [[nodiscard]] bool IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const;
 
@@ -120,14 +119,14 @@ public:
     [[nodiscard]] bool IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const;
 
     /**
-     * Returns a vector with all the subranges of cpu addresses mapped beneath.
+     * Returns a vector with all the subranges of device addresses mapped beneath.
      * if the region is continuous, a single pair will be returned. If it's unmapped, an empty
      * vector will be returned;
      */
     boost::container::small_vector<std::pair<GPUVAddr, std::size_t>, 32> GetSubmappedRange(
         GPUVAddr gpu_addr, std::size_t size) const;
 
-    GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
+    GPUVAddr Map(GPUVAddr gpu_addr, DAddr dev_addr, std::size_t size,
                  PTEKind kind = PTEKind::INVALID, bool is_big_pages = true);
     GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
     void Unmap(GPUVAddr gpu_addr, std::size_t size);
@@ -186,12 +185,11 @@ private:
     void GetSubmappedRangeImpl(
         GPUVAddr gpu_addr, std::size_t size,
         boost::container::small_vector<
-            std::pair<std::conditional_t<is_gpu_address, GPUVAddr, VAddr>, std::size_t>, 32>&
+            std::pair<std::conditional_t<is_gpu_address, GPUVAddr, DAddr>, std::size_t>, 32>&
             result) const;
 
     Core::System& system;
-    Core::Memory::Memory& memory;
-    Core::DeviceMemory& device_memory;
+    MaxwellDeviceMemoryManager& memory;
 
     const u64 address_space_bits;
     const u64 page_bits;
@@ -218,11 +216,11 @@ private:
     std::vector<u64> big_entries;
 
     template <EntryType entry_type>
-    GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+    GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size,
                          PTEKind kind);
 
     template <EntryType entry_type>
-    GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+    GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size,
                             PTEKind kind);
 
     template <bool is_big_page>
@@ -233,11 +231,11 @@ private:
 
     Common::MultiLevelPageTable<u32> page_table;
     Common::RangeMap<GPUVAddr, PTEKind> kind_map;
-    Common::VirtualBuffer<u32> big_page_table_cpu;
+    Common::VirtualBuffer<u32> big_page_table_dev;
 
     std::vector<u64> big_page_continuous;
-    boost::container::small_vector<std::pair<VAddr, std::size_t>, 32> page_stash{};
-    boost::container::small_vector<std::pair<VAddr, std::size_t>, 32> page_stash2{};
+    boost::container::small_vector<std::pair<DAddr, std::size_t>, 32> page_stash{};
+    boost::container::small_vector<std::pair<DAddr, std::size_t>, 32> page_stash2{};
 
     mutable std::mutex guard;
 
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index a64404ce4..4861b123a 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -18,9 +18,9 @@
 
 #include "common/assert.h"
 #include "common/settings.h"
-#include "core/memory.h"
 #include "video_core/control/channel_state_cache.h"
 #include "video_core/engines/maxwell_3d.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/memory_manager.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/texture_cache/slot_vector.h"
@@ -102,18 +102,19 @@ template <class QueryCache, class CachedQuery, class CounterStream, class HostCo
 class QueryCacheLegacy : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 public:
     explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_,
-                              Core::Memory::Memory& cpu_memory_)
+                              Tegra::MaxwellDeviceMemoryManager& device_memory_)
         : rasterizer{rasterizer_},
           // Use reinterpret_cast instead of static_cast as workaround for
           // UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
-          cpu_memory{cpu_memory_}, streams{{
-                                       {CounterStream{reinterpret_cast<QueryCache&>(*this),
-                                                      VideoCore::QueryType::SamplesPassed}},
-                                       {CounterStream{reinterpret_cast<QueryCache&>(*this),
-                                                      VideoCore::QueryType::PrimitivesGenerated}},
-                                       {CounterStream{reinterpret_cast<QueryCache&>(*this),
-                                                      VideoCore::QueryType::TfbPrimitivesWritten}},
-                                   }} {
+          device_memory{device_memory_},
+          streams{{
+              {CounterStream{reinterpret_cast<QueryCache&>(*this),
+                             VideoCore::QueryType::SamplesPassed}},
+              {CounterStream{reinterpret_cast<QueryCache&>(*this),
+                             VideoCore::QueryType::PrimitivesGenerated}},
+              {CounterStream{reinterpret_cast<QueryCache&>(*this),
+                             VideoCore::QueryType::TfbPrimitivesWritten}},
+          }} {
         (void)slot_async_jobs.insert(); // Null value
     }
 
@@ -322,13 +323,14 @@ private:
             local_lock.unlock();
             if (timestamp) {
                 u64 timestamp_value = *timestamp;
-                cpu_memory.WriteBlockUnsafe(address + sizeof(u64), &timestamp_value, sizeof(u64));
-                cpu_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
+                device_memory.WriteBlockUnsafe(address + sizeof(u64), &timestamp_value,
+                                               sizeof(u64));
+                device_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
                 rasterizer.InvalidateRegion(address, sizeof(u64) * 2,
                                             VideoCommon::CacheType::NoQueryCache);
             } else {
                 u32 small_value = static_cast<u32>(value);
-                cpu_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
+                device_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
                 rasterizer.InvalidateRegion(address, sizeof(u32),
                                             VideoCommon::CacheType::NoQueryCache);
             }
@@ -342,7 +344,7 @@ private:
     SlotVector<AsyncJob> slot_async_jobs;
 
     VideoCore::RasterizerInterface& rasterizer;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
 
     mutable std::recursive_mutex mutex;
 
diff --git a/src/video_core/query_cache/query_base.h b/src/video_core/query_cache/query_base.h
index 1d786b3a7..aca6a6447 100644
--- a/src/video_core/query_cache/query_base.h
+++ b/src/video_core/query_cache/query_base.h
@@ -23,7 +23,7 @@ DECLARE_ENUM_FLAG_OPERATORS(QueryFlagBits)
 
 class QueryBase {
 public:
-    VAddr guest_address{};
+    DAddr guest_address{};
     QueryFlagBits flags{};
     u64 value{};
 
@@ -32,7 +32,7 @@ protected:
     QueryBase() = default;
 
     // Parameterized constructor
-    QueryBase(VAddr address, QueryFlagBits flags_, u64 value_)
+    QueryBase(DAddr address, QueryFlagBits flags_, u64 value_)
         : guest_address(address), flags(flags_), value{value_} {}
 };
 
diff --git a/src/video_core/query_cache/query_cache.h b/src/video_core/query_cache/query_cache.h
index 94f0c4466..08b779055 100644
--- a/src/video_core/query_cache/query_cache.h
+++ b/src/video_core/query_cache/query_cache.h
@@ -15,9 +15,9 @@
 #include "common/logging/log.h"
 #include "common/scope_exit.h"
 #include "common/settings.h"
-#include "core/memory.h"
 #include "video_core/engines/maxwell_3d.h"
 #include "video_core/gpu.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/memory_manager.h"
 #include "video_core/query_cache/bank_base.h"
 #include "video_core/query_cache/query_base.h"
@@ -113,9 +113,10 @@ struct QueryCacheBase<Traits>::QueryCacheBaseImpl {
     using RuntimeType = typename Traits::RuntimeType;
 
     QueryCacheBaseImpl(QueryCacheBase<Traits>* owner_, VideoCore::RasterizerInterface& rasterizer_,
-                       Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_, Tegra::GPU& gpu_)
+                       Tegra::MaxwellDeviceMemoryManager& device_memory_, RuntimeType& runtime_,
+                       Tegra::GPU& gpu_)
         : owner{owner_}, rasterizer{rasterizer_},
-          cpu_memory{cpu_memory_}, runtime{runtime_}, gpu{gpu_} {
+          device_memory{device_memory_}, runtime{runtime_}, gpu{gpu_} {
         streamer_mask = 0;
         for (size_t i = 0; i < static_cast<size_t>(QueryType::MaxQueryTypes); i++) {
             streamers[i] = runtime.GetStreamerInterface(static_cast<QueryType>(i));
@@ -158,7 +159,7 @@ struct QueryCacheBase<Traits>::QueryCacheBaseImpl {
 
     QueryCacheBase<Traits>* owner;
     VideoCore::RasterizerInterface& rasterizer;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
     RuntimeType& runtime;
     Tegra::GPU& gpu;
     std::array<StreamerInterface*, static_cast<size_t>(QueryType::MaxQueryTypes)> streamers;
@@ -171,10 +172,11 @@ struct QueryCacheBase<Traits>::QueryCacheBaseImpl {
 template <typename Traits>
 QueryCacheBase<Traits>::QueryCacheBase(Tegra::GPU& gpu_,
                                        VideoCore::RasterizerInterface& rasterizer_,
-                                       Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_)
+                                       Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                                       RuntimeType& runtime_)
     : cached_queries{} {
     impl = std::make_unique<QueryCacheBase<Traits>::QueryCacheBaseImpl>(
-        this, rasterizer_, cpu_memory_, runtime_, gpu_);
+        this, rasterizer_, device_memory_, runtime_, gpu_);
 }
 
 template <typename Traits>
@@ -240,7 +242,7 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
     if (!cpu_addr_opt) [[unlikely]] {
         return;
     }
-    VAddr cpu_addr = *cpu_addr_opt;
+    DAddr cpu_addr = *cpu_addr_opt;
     const size_t new_query_id = streamer->WriteCounter(cpu_addr, has_timestamp, payload, subreport);
     auto* query = streamer->GetQuery(new_query_id);
     if (is_fence) {
@@ -250,13 +252,12 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
     query_location.stream_id.Assign(static_cast<u32>(streamer_id));
     query_location.query_id.Assign(static_cast<u32>(new_query_id));
     const auto gen_caching_indexing = [](VAddr cur_addr) {
-        return std::make_pair<u64, u32>(cur_addr >> Core::Memory::YUZU_PAGEBITS,
-                                        static_cast<u32>(cur_addr & Core::Memory::YUZU_PAGEMASK));
+        return std::make_pair<u64, u32>(cur_addr >> Core::DEVICE_PAGEBITS,
+                                        static_cast<u32>(cur_addr & Core::DEVICE_PAGEMASK));
     };
-    u8* pointer = impl->cpu_memory.GetPointer(cpu_addr);
-    u8* pointer_timestamp = impl->cpu_memory.GetPointer(cpu_addr + 8);
+    u8* pointer = impl->device_memory.template GetPointer<u8>(cpu_addr);
+    u8* pointer_timestamp = impl->device_memory.template GetPointer<u8>(cpu_addr + 8);
     bool is_synced = !Settings::IsGPULevelHigh() && is_fence;
-
     std::function<void()> operation([this, is_synced, streamer, query_base = query, query_location,
                                      pointer, pointer_timestamp] {
         if (True(query_base->flags & QueryFlagBits::IsInvalidated)) {
@@ -323,8 +324,8 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
 template <typename Traits>
 void QueryCacheBase<Traits>::UnregisterPending() {
     const auto gen_caching_indexing = [](VAddr cur_addr) {
-        return std::make_pair<u64, u32>(cur_addr >> Core::Memory::YUZU_PAGEBITS,
-                                        static_cast<u32>(cur_addr & Core::Memory::YUZU_PAGEMASK));
+        return std::make_pair<u64, u32>(cur_addr >> Core::DEVICE_PAGEBITS,
+                                        static_cast<u32>(cur_addr & Core::DEVICE_PAGEMASK));
     };
     std::scoped_lock lock(cache_mutex);
     for (QueryLocation loc : impl->pending_unregister) {
@@ -388,7 +389,7 @@ bool QueryCacheBase<Traits>::AccelerateHostConditionalRendering() {
         }
         VAddr cpu_addr = *cpu_addr_opt;
         std::scoped_lock lock(cache_mutex);
-        auto it1 = cached_queries.find(cpu_addr >> Core::Memory::YUZU_PAGEBITS);
+        auto it1 = cached_queries.find(cpu_addr >> Core::DEVICE_PAGEBITS);
         if (it1 == cached_queries.end()) {
             return VideoCommon::LookupData{
                 .address = cpu_addr,
@@ -396,10 +397,10 @@ bool QueryCacheBase<Traits>::AccelerateHostConditionalRendering() {
             };
         }
         auto& sub_container = it1->second;
-        auto it_current = sub_container.find(cpu_addr & Core::Memory::YUZU_PAGEMASK);
+        auto it_current = sub_container.find(cpu_addr & Core::DEVICE_PAGEMASK);
 
         if (it_current == sub_container.end()) {
-            auto it_current_2 = sub_container.find((cpu_addr & Core::Memory::YUZU_PAGEMASK) + 4);
+            auto it_current_2 = sub_container.find((cpu_addr & Core::DEVICE_PAGEMASK) + 4);
             if (it_current_2 == sub_container.end()) {
                 return VideoCommon::LookupData{
                     .address = cpu_addr,
@@ -559,7 +560,7 @@ bool QueryCacheBase<Traits>::SemiFlushQueryDirty(QueryCacheBase<Traits>::QueryLo
     }
     if (True(query_base->flags & QueryFlagBits::IsFinalValueSynced) &&
         False(query_base->flags & QueryFlagBits::IsGuestSynced)) {
-        auto* ptr = impl->cpu_memory.GetPointer(query_base->guest_address);
+        auto* ptr = impl->device_memory.template GetPointer<u8>(query_base->guest_address);
         if (True(query_base->flags & QueryFlagBits::HasTimestamp)) {
             std::memcpy(ptr, &query_base->value, sizeof(query_base->value));
             return false;
diff --git a/src/video_core/query_cache/query_cache_base.h b/src/video_core/query_cache/query_cache_base.h
index 07be421c6..c12fb75ef 100644
--- a/src/video_core/query_cache/query_cache_base.h
+++ b/src/video_core/query_cache/query_cache_base.h
@@ -13,15 +13,11 @@
 #include "common/assert.h"
 #include "common/bit_field.h"
 #include "common/common_types.h"
-#include "core/memory.h"
 #include "video_core/control/channel_state_cache.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/query_cache/query_base.h"
 #include "video_core/query_cache/types.h"
 
-namespace Core::Memory {
-class Memory;
-}
-
 namespace VideoCore {
 class RasterizerInterface;
 }
@@ -53,7 +49,8 @@ public:
     };
 
     explicit QueryCacheBase(Tegra::GPU& gpu, VideoCore::RasterizerInterface& rasterizer_,
-                            Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_);
+                            Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                            RuntimeType& runtime_);
 
     ~QueryCacheBase();
 
@@ -125,10 +122,10 @@ protected:
         const u64 addr_begin = addr;
         const u64 addr_end = addr_begin + size;
 
-        const u64 page_end = addr_end >> Core::Memory::YUZU_PAGEBITS;
+        const u64 page_end = addr_end >> Core::DEVICE_PAGEBITS;
         std::scoped_lock lock(cache_mutex);
-        for (u64 page = addr_begin >> Core::Memory::YUZU_PAGEBITS; page <= page_end; ++page) {
-            const u64 page_start = page << Core::Memory::YUZU_PAGEBITS;
+        for (u64 page = addr_begin >> Core::DEVICE_PAGEBITS; page <= page_end; ++page) {
+            const u64 page_start = page << Core::DEVICE_PAGEBITS;
             const auto in_range = [page_start, addr_begin, addr_end](const u32 query_location) {
                 const u64 cache_begin = page_start + query_location;
                 const u64 cache_end = cache_begin + sizeof(u32);
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp
deleted file mode 100644
index f200a650f..000000000
--- a/src/video_core/rasterizer_accelerated.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <atomic>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/div_ceil.h"
-#include "core/memory.h"
-#include "video_core/rasterizer_accelerated.h"
-
-namespace VideoCore {
-
-using namespace Core::Memory;
-
-RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_)
-    : cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {}
-
-RasterizerAccelerated::~RasterizerAccelerated() = default;
-
-void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
-    u64 uncache_begin = 0;
-    u64 cache_begin = 0;
-    u64 uncache_bytes = 0;
-    u64 cache_bytes = 0;
-
-    std::atomic_thread_fence(std::memory_order_acquire);
-    const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
-    for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
-        std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
-
-        if (delta > 0) {
-            ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
-        } else if (delta < 0) {
-            ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
-        } else {
-            ASSERT_MSG(false, "Delta must be non-zero!");
-        }
-
-        // Adds or subtracts 1, as count is a unsigned 8-bit value
-        count.fetch_add(static_cast<u16>(delta), std::memory_order_release);
-
-        // Assume delta is either -1 or 1
-        if (count.load(std::memory_order::relaxed) == 0) {
-            if (uncache_bytes == 0) {
-                uncache_begin = page;
-            }
-            uncache_bytes += YUZU_PAGESIZE;
-        } else if (uncache_bytes > 0) {
-            cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
-                                                  false);
-            uncache_bytes = 0;
-        }
-        if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
-            if (cache_bytes == 0) {
-                cache_begin = page;
-            }
-            cache_bytes += YUZU_PAGESIZE;
-        } else if (cache_bytes > 0) {
-            cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
-            cache_bytes = 0;
-        }
-    }
-    if (uncache_bytes > 0) {
-        cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
-    }
-    if (cache_bytes > 0) {
-        cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
-    }
-}
-
-} // namespace VideoCore
diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h
deleted file mode 100644
index e6c0ea87a..000000000
--- a/src/video_core/rasterizer_accelerated.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <array>
-#include <atomic>
-
-#include "common/common_types.h"
-#include "video_core/rasterizer_interface.h"
-
-namespace Core::Memory {
-class Memory;
-}
-
-namespace VideoCore {
-
-/// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface.
-class RasterizerAccelerated : public RasterizerInterface {
-public:
-    explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_);
-    ~RasterizerAccelerated() override;
-
-    void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
-
-private:
-    class CacheEntry final {
-    public:
-        CacheEntry() = default;
-
-        std::atomic_uint16_t& Count(std::size_t page) {
-            return values[page & 3];
-        }
-
-        const std::atomic_uint16_t& Count(std::size_t page) const {
-            return values[page & 3];
-        }
-
-    private:
-        std::array<std::atomic_uint16_t, 4> values{};
-    };
-    static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
-
-    using CachedPages = std::array<CacheEntry, 0x2000000>;
-    std::unique_ptr<CachedPages> cached_pages;
-    Core::Memory::Memory& cpu_memory;
-};
-
-} // namespace VideoCore
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 49224ca85..8fa4e4d9a 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -86,35 +86,35 @@ public:
     virtual void FlushAll() = 0;
 
     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
-    virtual void FlushRegion(VAddr addr, u64 size,
+    virtual void FlushRegion(DAddr addr, u64 size,
                              VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 
     /// Check if the the specified memory area requires flushing to CPU Memory.
-    virtual bool MustFlushRegion(VAddr addr, u64 size,
+    virtual bool MustFlushRegion(DAddr addr, u64 size,
                                  VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 
-    virtual RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) = 0;
+    virtual RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) = 0;
 
     /// Notify rasterizer that any caches of the specified region should be invalidated
-    virtual void InvalidateRegion(VAddr addr, u64 size,
+    virtual void InvalidateRegion(DAddr addr, u64 size,
                                   VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 
-    virtual void InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) {
+    virtual void InnerInvalidation(std::span<const std::pair<DAddr, std::size_t>> sequences) {
         for (const auto& [cpu_addr, size] : sequences) {
             InvalidateRegion(cpu_addr, size);
         }
     }
 
     /// Notify rasterizer that any caches of the specified region are desync with guest
-    virtual void OnCacheInvalidation(VAddr addr, u64 size) = 0;
+    virtual void OnCacheInvalidation(PAddr addr, u64 size) = 0;
 
-    virtual bool OnCPUWrite(VAddr addr, u64 size) = 0;
+    virtual bool OnCPUWrite(PAddr addr, u64 size) = 0;
 
     /// Sync memory between guest and host.
     virtual void InvalidateGPUCache() = 0;
 
     /// Unmap memory range
-    virtual void UnmapMemory(VAddr addr, u64 size) = 0;
+    virtual void UnmapMemory(DAddr addr, u64 size) = 0;
 
     /// Remap GPU memory range. This means underneath backing memory changed
     virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0;
@@ -122,7 +122,7 @@ public:
     /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
     /// and invalidated
     virtual void FlushAndInvalidateRegion(
-        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
+        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
 
     /// Notify the host renderer to wait for previous primitive and compute operations.
     virtual void WaitForIdle() = 0;
@@ -157,13 +157,10 @@ public:
 
     /// Attempt to use a faster method to display the framebuffer to screen
     [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
-                                                 VAddr framebuffer_addr, u32 pixel_stride) {
+                                                 DAddr framebuffer_addr, u32 pixel_stride) {
         return false;
     }
 
-    /// Increase/decrease the number of object in pages touching the specified region
-    virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {}
-
     /// Initialize disk cached resources for the game being emulated
     virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
                                    const DiskResourceLoadCallback& callback) {}
diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp
index 4f1d5b548..abfabb65b 100644
--- a/src/video_core/renderer_null/null_rasterizer.cpp
+++ b/src/video_core/renderer_null/null_rasterizer.cpp
@@ -2,7 +2,6 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 
 #include "common/alignment.h"
-#include "core/memory.h"
 #include "video_core/control/channel_state.h"
 #include "video_core/host1x/host1x.h"
 #include "video_core/memory_manager.h"
@@ -19,8 +18,7 @@ bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) {
     return true;
 }
 
-RasterizerNull::RasterizerNull(Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu)
-    : RasterizerAccelerated(cpu_memory_), m_gpu{gpu} {}
+RasterizerNull::RasterizerNull(Tegra::GPU& gpu) : m_gpu{gpu} {}
 RasterizerNull::~RasterizerNull() = default;
 
 void RasterizerNull::Draw(bool is_indexed, u32 instance_count) {}
@@ -45,25 +43,25 @@ void RasterizerNull::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr
                                                u32 size) {}
 void RasterizerNull::DisableGraphicsUniformBuffer(size_t stage, u32 index) {}
 void RasterizerNull::FlushAll() {}
-void RasterizerNull::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
-bool RasterizerNull::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType) {
+void RasterizerNull::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType) {}
+bool RasterizerNull::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType) {
     return false;
 }
-void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
-bool RasterizerNull::OnCPUWrite(VAddr addr, u64 size) {
+void RasterizerNull::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType) {}
+bool RasterizerNull::OnCPUWrite(PAddr addr, u64 size) {
     return false;
 }
-void RasterizerNull::OnCacheInvalidation(VAddr addr, u64 size) {}
-VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 size) {
+void RasterizerNull::OnCacheInvalidation(PAddr addr, u64 size) {}
+VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(PAddr addr, u64 size) {
     VideoCore::RasterizerDownloadArea new_area{
-        .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
-        .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+        .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE),
+        .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE),
         .preemtive = true,
     };
     return new_area;
 }
 void RasterizerNull::InvalidateGPUCache() {}
-void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {}
+void RasterizerNull::UnmapMemory(DAddr addr, u64 size) {}
 void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {}
 void RasterizerNull::SignalFence(std::function<void()>&& func) {
     func();
@@ -78,7 +76,7 @@ void RasterizerNull::SignalSyncPoint(u32 value) {
 }
 void RasterizerNull::SignalReference() {}
 void RasterizerNull::ReleaseFences(bool) {}
-void RasterizerNull::FlushAndInvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
+void RasterizerNull::FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType) {}
 void RasterizerNull::WaitForIdle() {}
 void RasterizerNull::FragmentBarrier() {}
 void RasterizerNull::TiledCacheBarrier() {}
@@ -95,7 +93,7 @@ bool RasterizerNull::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Surfac
 void RasterizerNull::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
                                               std::span<const u8> memory) {}
 bool RasterizerNull::AccelerateDisplay(const Tegra::FramebufferConfig& config,
-                                       VAddr framebuffer_addr, u32 pixel_stride) {
+                                       DAddr framebuffer_addr, u32 pixel_stride) {
     return true;
 }
 void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h
index 23001eeb8..a5789604f 100644
--- a/src/video_core/renderer_null/null_rasterizer.h
+++ b/src/video_core/renderer_null/null_rasterizer.h
@@ -6,7 +6,6 @@
 #include "common/common_types.h"
 #include "video_core/control/channel_state_cache.h"
 #include "video_core/engines/maxwell_dma.h"
-#include "video_core/rasterizer_accelerated.h"
 #include "video_core/rasterizer_interface.h"
 
 namespace Core {
@@ -32,10 +31,10 @@ public:
     }
 };
 
-class RasterizerNull final : public VideoCore::RasterizerAccelerated,
+class RasterizerNull final : public VideoCore::RasterizerInterface,
                              protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 public:
-    explicit RasterizerNull(Core::Memory::Memory& cpu_memory, Tegra::GPU& gpu);
+    explicit RasterizerNull(Tegra::GPU& gpu);
     ~RasterizerNull() override;
 
     void Draw(bool is_indexed, u32 instance_count) override;
@@ -48,17 +47,17 @@ public:
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
     void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
     void FlushAll() override;
-    void FlushRegion(VAddr addr, u64 size,
+    void FlushRegion(DAddr addr, u64 size,
                      VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    bool MustFlushRegion(VAddr addr, u64 size,
+    bool MustFlushRegion(DAddr addr, u64 size,
                          VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    void InvalidateRegion(VAddr addr, u64 size,
+    void InvalidateRegion(DAddr addr, u64 size,
                           VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    void OnCacheInvalidation(VAddr addr, u64 size) override;
-    bool OnCPUWrite(VAddr addr, u64 size) override;
-    VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
+    void OnCacheInvalidation(DAddr addr, u64 size) override;
+    bool OnCPUWrite(DAddr addr, u64 size) override;
+    VideoCore::RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override;
     void InvalidateGPUCache() override;
-    void UnmapMemory(VAddr addr, u64 size) override;
+    void UnmapMemory(DAddr addr, u64 size) override;
     void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
     void SignalFence(std::function<void()>&& func) override;
     void SyncOperation(std::function<void()>&& func) override;
@@ -66,7 +65,7 @@ public:
     void SignalReference() override;
     void ReleaseFences(bool force) override;
     void FlushAndInvalidateRegion(
-        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
     void WaitForIdle() override;
     void FragmentBarrier() override;
     void TiledCacheBarrier() override;
@@ -78,7 +77,7 @@ public:
     Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
     void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
                                   std::span<const u8> memory) override;
-    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
+    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr,
                            u32 pixel_stride) override;
     void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
                            const VideoCore::DiskResourceLoadCallback& callback) override;
diff --git a/src/video_core/renderer_null/renderer_null.cpp b/src/video_core/renderer_null/renderer_null.cpp
index be92cc2f4..078feb925 100644
--- a/src/video_core/renderer_null/renderer_null.cpp
+++ b/src/video_core/renderer_null/renderer_null.cpp
@@ -7,10 +7,9 @@
 
 namespace Null {
 
-RendererNull::RendererNull(Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory,
-                           Tegra::GPU& gpu,
+RendererNull::RendererNull(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
                            std::unique_ptr<Core::Frontend::GraphicsContext> context_)
-    : RendererBase(emu_window, std::move(context_)), m_gpu(gpu), m_rasterizer(cpu_memory, gpu) {}
+    : RendererBase(emu_window, std::move(context_)), m_gpu(gpu), m_rasterizer(gpu) {}
 
 RendererNull::~RendererNull() = default;
 
diff --git a/src/video_core/renderer_null/renderer_null.h b/src/video_core/renderer_null/renderer_null.h
index 967ff5645..9531b43f6 100644
--- a/src/video_core/renderer_null/renderer_null.h
+++ b/src/video_core/renderer_null/renderer_null.h
@@ -13,8 +13,7 @@ namespace Null {
 
 class RendererNull final : public VideoCore::RendererBase {
 public:
-    explicit RendererNull(Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory,
-                          Tegra::GPU& gpu,
+    explicit RendererNull(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
                           std::unique_ptr<Core::Frontend::GraphicsContext> context);
     ~RendererNull() override;
 
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 517ac14dd..ade72e1f9 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -47,11 +47,10 @@ constexpr std::array PROGRAM_LUT{
 } // Anonymous namespace
 
 Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
-    : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(null_params) {}
+    : VideoCommon::BufferBase(null_params) {}
 
-Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
-               VAddr cpu_addr_, u64 size_bytes_)
-    : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_) {
+Buffer::Buffer(BufferCacheRuntime& runtime, DAddr cpu_addr_, u64 size_bytes_)
+    : VideoCommon::BufferBase(cpu_addr_, size_bytes_) {
     buffer.Create();
     if (runtime.device.HasDebuggingToolAttached()) {
         const std::string name = fmt::format("Buffer 0x{:x}", CpuAddr());
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index 2c18de166..af34c272b 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -10,7 +10,6 @@
 #include "common/common_types.h"
 #include "video_core/buffer_cache/buffer_cache_base.h"
 #include "video_core/buffer_cache/memory_tracker_base.h"
-#include "video_core/rasterizer_interface.h"
 #include "video_core/renderer_opengl/gl_device.h"
 #include "video_core/renderer_opengl/gl_resource_manager.h"
 #include "video_core/renderer_opengl/gl_staging_buffer_pool.h"
@@ -19,10 +18,9 @@ namespace OpenGL {
 
 class BufferCacheRuntime;
 
-class Buffer : public VideoCommon::BufferBase<VideoCore::RasterizerInterface> {
+class Buffer : public VideoCommon::BufferBase {
 public:
-    explicit Buffer(BufferCacheRuntime&, VideoCore::RasterizerInterface& rasterizer, VAddr cpu_addr,
-                    u64 size_bytes);
+    explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr, u64 size_bytes);
     explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams);
 
     void ImmediateUpload(size_t offset, std::span<const u8> data) noexcept;
@@ -244,7 +242,7 @@ struct BufferCacheParams {
     using Runtime = OpenGL::BufferCacheRuntime;
     using Buffer = OpenGL::Buffer;
     using Async_Buffer = OpenGL::StagingBufferMap;
-    using MemoryTracker = VideoCommon::MemoryTrackerBase<VideoCore::RasterizerInterface>;
+    using MemoryTracker = VideoCommon::MemoryTrackerBase<Tegra::MaxwellDeviceMemoryManager>;
 
     static constexpr bool IS_OPENGL = true;
     static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = true;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index fef7360ed..2147d587f 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -35,8 +35,9 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
 
 } // Anonymous namespace
 
-QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
-    : QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {
+QueryCache::QueryCache(RasterizerOpenGL& rasterizer_,
+                       Tegra::MaxwellDeviceMemoryManager& device_memory_)
+    : QueryCacheLegacy(rasterizer_, device_memory_), gl_rasterizer{rasterizer_} {
     EnableCounters();
 }
 
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 0721e0b3d..38118f355 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include "common/common_types.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/query_cache.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/renderer_opengl/gl_resource_manager.h"
@@ -28,7 +29,8 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
 class QueryCache final
     : public VideoCommon::QueryCacheLegacy<QueryCache, CachedQuery, CounterStream, HostCounter> {
 public:
-    explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_);
+    explicit QueryCache(RasterizerOpenGL& rasterizer_,
+                        Tegra::MaxwellDeviceMemoryManager& device_memory_);
     ~QueryCache();
 
     OGLQuery AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 7a5fad735..d5354ef2d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -70,18 +70,18 @@ std::optional<VideoCore::QueryType> MaxwellToVideoCoreQuery(VideoCommon::QueryTy
 } // Anonymous namespace
 
 RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
-                                   Core::Memory::Memory& cpu_memory_, const Device& device_,
-                                   ScreenInfo& screen_info_, ProgramManager& program_manager_,
-                                   StateTracker& state_tracker_)
-    : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_),
+                                   Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                                   const Device& device_, ScreenInfo& screen_info_,
+                                   ProgramManager& program_manager_, StateTracker& state_tracker_)
+    : gpu(gpu_), device_memory(device_memory_), device(device_), screen_info(screen_info_),
       program_manager(program_manager_), state_tracker(state_tracker_),
       texture_cache_runtime(device, program_manager, state_tracker, staging_buffer_pool),
-      texture_cache(texture_cache_runtime, *this),
+      texture_cache(texture_cache_runtime, device_memory_),
       buffer_cache_runtime(device, staging_buffer_pool),
-      buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
-      shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
-                   state_tracker, gpu.ShaderNotify()),
-      query_cache(*this, cpu_memory_), accelerate_dma(buffer_cache, texture_cache),
+      buffer_cache(device_memory_, buffer_cache_runtime),
+      shader_cache(device_memory_, emu_window_, device, texture_cache, buffer_cache,
+                   program_manager, state_tracker, gpu.ShaderNotify()),
+      query_cache(*this, device_memory_), accelerate_dma(buffer_cache, texture_cache),
       fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
       blit_image(program_manager_) {}
 
@@ -475,7 +475,7 @@ void RasterizerOpenGL::DisableGraphicsUniformBuffer(size_t stage, u32 index) {
 
 void RasterizerOpenGL::FlushAll() {}
 
-void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
+void RasterizerOpenGL::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
     MICROPROFILE_SCOPE(OpenGL_CacheManagement);
     if (addr == 0 || size == 0) {
         return;
@@ -493,7 +493,7 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType
     }
 }
 
-bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
+bool RasterizerOpenGL::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
     if ((True(which & VideoCommon::CacheType::BufferCache))) {
         std::scoped_lock lock{buffer_cache.mutex};
         if (buffer_cache.IsRegionGpuModified(addr, size)) {
@@ -510,7 +510,7 @@ bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
     return false;
 }
 
-VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 size) {
+VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(DAddr addr, u64 size) {
     {
         std::scoped_lock lock{texture_cache.mutex};
         auto area = texture_cache.GetFlushArea(addr, size);
@@ -526,14 +526,14 @@ VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64
         }
     }
     VideoCore::RasterizerDownloadArea new_area{
-        .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
-        .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+        .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE),
+        .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE),
         .preemtive = true,
     };
     return new_area;
 }
 
-void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
+void RasterizerOpenGL::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
     MICROPROFILE_SCOPE(OpenGL_CacheManagement);
     if (addr == 0 || size == 0) {
         return;
@@ -554,7 +554,7 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::Cache
     }
 }
 
-bool RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
+bool RasterizerOpenGL::OnCPUWrite(DAddr addr, u64 size) {
     MICROPROFILE_SCOPE(OpenGL_CacheManagement);
     if (addr == 0 || size == 0) {
         return false;
@@ -576,8 +576,9 @@ bool RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
     return false;
 }
 
-void RasterizerOpenGL::OnCacheInvalidation(VAddr addr, u64 size) {
+void RasterizerOpenGL::OnCacheInvalidation(DAddr addr, u64 size) {
     MICROPROFILE_SCOPE(OpenGL_CacheManagement);
+
     if (addr == 0 || size == 0) {
         return;
     }
@@ -596,7 +597,7 @@ void RasterizerOpenGL::InvalidateGPUCache() {
     gpu.InvalidateGPUCache();
 }
 
-void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) {
+void RasterizerOpenGL::UnmapMemory(DAddr addr, u64 size) {
     {
         std::scoped_lock lock{texture_cache.mutex};
         texture_cache.UnmapMemory(addr, size);
@@ -635,7 +636,7 @@ void RasterizerOpenGL::ReleaseFences(bool force) {
     fence_manager.WaitPendingFences(force);
 }
 
-void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size,
+void RasterizerOpenGL::FlushAndInvalidateRegion(DAddr addr, u64 size,
                                                 VideoCommon::CacheType which) {
     if (Settings::IsGPULevelExtreme()) {
         FlushRegion(addr, size, which);
@@ -739,7 +740,7 @@ void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_si
 }
 
 bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
-                                         VAddr framebuffer_addr, u32 pixel_stride) {
+                                         DAddr framebuffer_addr, u32 pixel_stride) {
     if (framebuffer_addr == 0) {
         return false;
     }
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index ce3460938..34aa73526 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -14,7 +14,6 @@
 #include "common/common_types.h"
 #include "video_core/control/channel_state_cache.h"
 #include "video_core/engines/maxwell_dma.h"
-#include "video_core/rasterizer_accelerated.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/renderer_opengl/blit_image.h"
 #include "video_core/renderer_opengl/gl_buffer_cache.h"
@@ -72,13 +71,13 @@ private:
     TextureCache& texture_cache;
 };
 
-class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
+class RasterizerOpenGL : public VideoCore::RasterizerInterface,
                          protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 public:
     explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
-                              Core::Memory::Memory& cpu_memory_, const Device& device_,
-                              ScreenInfo& screen_info_, ProgramManager& program_manager_,
-                              StateTracker& state_tracker_);
+                              Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                              const Device& device_, ScreenInfo& screen_info_,
+                              ProgramManager& program_manager_, StateTracker& state_tracker_);
     ~RasterizerOpenGL() override;
 
     void Draw(bool is_indexed, u32 instance_count) override;
@@ -92,17 +91,17 @@ public:
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
     void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
     void FlushAll() override;
-    void FlushRegion(VAddr addr, u64 size,
+    void FlushRegion(DAddr addr, u64 size,
                      VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    bool MustFlushRegion(VAddr addr, u64 size,
+    bool MustFlushRegion(DAddr addr, u64 size,
                          VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
-    void InvalidateRegion(VAddr addr, u64 size,
+    VideoCore::RasterizerDownloadArea GetFlushArea(PAddr addr, u64 size) override;
+    void InvalidateRegion(DAddr addr, u64 size,
                           VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    void OnCacheInvalidation(VAddr addr, u64 size) override;
-    bool OnCPUWrite(VAddr addr, u64 size) override;
+    void OnCacheInvalidation(PAddr addr, u64 size) override;
+    bool OnCPUWrite(PAddr addr, u64 size) override;
     void InvalidateGPUCache() override;
-    void UnmapMemory(VAddr addr, u64 size) override;
+    void UnmapMemory(DAddr addr, u64 size) override;
     void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
     void SignalFence(std::function<void()>&& func) override;
     void SyncOperation(std::function<void()>&& func) override;
@@ -110,7 +109,7 @@ public:
     void SignalReference() override;
     void ReleaseFences(bool force = true) override;
     void FlushAndInvalidateRegion(
-        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
     void WaitForIdle() override;
     void FragmentBarrier() override;
     void TiledCacheBarrier() override;
@@ -123,7 +122,7 @@ public:
     Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
     void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
                                   std::span<const u8> memory) override;
-    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
+    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr,
                            u32 pixel_stride) override;
     void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
                            const VideoCore::DiskResourceLoadCallback& callback) override;
@@ -235,6 +234,7 @@ private:
                        VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport);
 
     Tegra::GPU& gpu;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
 
     const Device& device;
     ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 30df41b7d..50462cdde 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -168,11 +168,12 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs
 }
 } // Anonymous namespace
 
-ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
-                         const Device& device_, TextureCache& texture_cache_,
-                         BufferCache& buffer_cache_, ProgramManager& program_manager_,
-                         StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_)
-    : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_},
+ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                         Core::Frontend::EmuWindow& emu_window_, const Device& device_,
+                         TextureCache& texture_cache_, BufferCache& buffer_cache_,
+                         ProgramManager& program_manager_, StateTracker& state_tracker_,
+                         VideoCore::ShaderNotify& shader_notify_)
+    : VideoCommon::ShaderCache{device_memory_}, emu_window{emu_window_}, device{device_},
       texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
       state_tracker{state_tracker_}, shader_notify{shader_notify_},
       use_asynchronous_shaders{device.UseAsynchronousShaders()},
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index 6b9732fca..5ac413529 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -17,7 +17,7 @@
 
 namespace Tegra {
 class MemoryManager;
-}
+} // namespace Tegra
 
 namespace OpenGL {
 
@@ -28,10 +28,11 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
 
 class ShaderCache : public VideoCommon::ShaderCache {
 public:
-    explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
-                         const Device& device_, TextureCache& texture_cache_,
-                         BufferCache& buffer_cache_, ProgramManager& program_manager_,
-                         StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_);
+    explicit ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                         Core::Frontend::EmuWindow& emu_window_, const Device& device_,
+                         TextureCache& texture_cache_, BufferCache& buffer_cache_,
+                         ProgramManager& program_manager_, StateTracker& state_tracker_,
+                         VideoCore::ShaderNotify& shader_notify_);
     ~ShaderCache();
 
     void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 2933718b6..b75376fdb 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -15,7 +15,6 @@
 #include "common/telemetry.h"
 #include "core/core_timing.h"
 #include "core/frontend/emu_window.h"
-#include "core/memory.h"
 #include "core/telemetry_session.h"
 #include "video_core/host_shaders/ffx_a_h.h"
 #include "video_core/host_shaders/ffx_fsr1_h.h"
@@ -144,12 +143,13 @@ void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severit
 
 RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
                                Core::Frontend::EmuWindow& emu_window_,
-                               Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
+                               Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_,
                                std::unique_ptr<Core::Frontend::GraphicsContext> context_)
     : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
-      emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, device{emu_window_},
+      emu_window{emu_window_}, device_memory{device_memory_}, gpu{gpu_}, device{emu_window_},
       state_tracker{}, program_manager{device},
-      rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
+      rasterizer(emu_window, gpu, device_memory, device, screen_info, program_manager,
+                 state_tracker) {
     if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
         glEnable(GL_DEBUG_OUTPUT);
         glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
@@ -242,7 +242,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf
     const u32 bytes_per_pixel{VideoCore::Surface::BytesPerBlock(pixel_format)};
     const u64 size_in_bytes{Tegra::Texture::CalculateSize(
         true, bytes_per_pixel, framebuffer.stride, framebuffer.height, 1, block_height_log2, 0)};
-    const u8* const host_ptr{cpu_memory.GetPointer(framebuffer_addr)};
+    const u8* const host_ptr{device_memory.GetPointer<u8>(framebuffer_addr)};
     const std::span<const u8> input_data(host_ptr, size_in_bytes);
     Tegra::Texture::UnswizzleTexture(gl_framebuffer_data, input_data, bytes_per_pixel,
                                      framebuffer.width, framebuffer.height, 1, block_height_log2,
diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h
index b70607635..18699610a 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.h
+++ b/src/video_core/renderer_opengl/renderer_opengl.h
@@ -61,7 +61,7 @@ class RendererOpenGL final : public VideoCore::RendererBase {
 public:
     explicit RendererOpenGL(Core::TelemetrySession& telemetry_session_,
                             Core::Frontend::EmuWindow& emu_window_,
-                            Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
+                            Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_,
                             std::unique_ptr<Core::Frontend::GraphicsContext> context_);
     ~RendererOpenGL() override;
 
@@ -101,7 +101,7 @@ private:
 
     Core::TelemetrySession& telemetry_session;
     Core::Frontend::EmuWindow& emu_window;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
     Tegra::GPU& gpu;
 
     Device device;
diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h
index 71c783709..850c34a3a 100644
--- a/src/video_core/renderer_vulkan/pipeline_helper.h
+++ b/src/video_core/renderer_vulkan/pipeline_helper.h
@@ -12,7 +12,6 @@
 #include "shader_recompiler/shader_info.h"
 #include "video_core/renderer_vulkan/vk_texture_cache.h"
 #include "video_core/renderer_vulkan/vk_update_descriptor.h"
-#include "video_core/texture_cache/texture_cache.h"
 #include "video_core/texture_cache/types.h"
 #include "video_core/vulkan_common/vulkan_device.h"
 
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 100b70918..1631276c6 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -82,10 +82,10 @@ Device CreateDevice(const vk::Instance& instance, const vk::InstanceDispatch& dl
 
 RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
                                Core::Frontend::EmuWindow& emu_window,
-                               Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
+                               Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_,
                                std::unique_ptr<Core::Frontend::GraphicsContext> context_) try
     : RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_),
-      cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary(context.get())),
+      device_memory(device_memory_), gpu(gpu_), library(OpenLibrary(context.get())),
       instance(CreateInstance(*library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type,
                               Settings::values.renderer_debug.GetValue())),
       debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance)
@@ -97,9 +97,9 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
                 render_window.GetFramebufferLayout().height),
       present_manager(instance, render_window, device, memory_allocator, scheduler, swapchain,
                       surface),
-      blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, present_manager,
-                  scheduler, screen_info),
-      rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator,
+      blit_screen(device_memory, render_window, device, memory_allocator, swapchain,
+                  present_manager, scheduler, screen_info),
+      rasterizer(render_window, gpu, device_memory, screen_info, device, memory_allocator,
                  state_tracker, scheduler) {
     if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) {
         turbo_mode.emplace(instance, dld);
@@ -128,7 +128,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
     screen_info.width = framebuffer->width;
     screen_info.height = framebuffer->height;
 
-    const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
+    const DAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
     const bool use_accelerated =
         rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
     RenderScreenshot(*framebuffer, use_accelerated);
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 14e257cf7..11c52287a 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -7,12 +7,12 @@
 #include <string>
 #include <variant>
 
-#include "video_core/renderer_vulkan/vk_rasterizer.h"
-
 #include "common/dynamic_library.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/renderer_base.h"
 #include "video_core/renderer_vulkan/vk_blit_screen.h"
 #include "video_core/renderer_vulkan/vk_present_manager.h"
+#include "video_core/renderer_vulkan/vk_rasterizer.h"
 #include "video_core/renderer_vulkan/vk_scheduler.h"
 #include "video_core/renderer_vulkan/vk_state_tracker.h"
 #include "video_core/renderer_vulkan/vk_swapchain.h"
@@ -42,7 +42,7 @@ class RendererVulkan final : public VideoCore::RendererBase {
 public:
     explicit RendererVulkan(Core::TelemetrySession& telemtry_session,
                             Core::Frontend::EmuWindow& emu_window,
-                            Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
+                            Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_,
                             std::unique_ptr<Core::Frontend::GraphicsContext> context_);
     ~RendererVulkan() override;
 
@@ -62,7 +62,7 @@ private:
     void RenderScreenshot(const Tegra::FramebufferConfig& framebuffer, bool use_accelerated);
 
     Core::TelemetrySession& telemetry_session;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
     Tegra::GPU& gpu;
 
     std::shared_ptr<Common::DynamicLibrary> library;
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 60432f5ad..610f27c84 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -14,8 +14,8 @@
 #include "common/settings.h"
 #include "core/core.h"
 #include "core/frontend/emu_window.h"
-#include "core/memory.h"
 #include "video_core/gpu.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/host_shaders/fxaa_frag_spv.h"
 #include "video_core/host_shaders/fxaa_vert_spv.h"
 #include "video_core/host_shaders/present_bicubic_frag_spv.h"
@@ -121,11 +121,12 @@ struct BlitScreen::BufferData {
     // Unaligned image data goes here
 };
 
-BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWindow& render_window_,
-                       const Device& device_, MemoryAllocator& memory_allocator_,
-                       Swapchain& swapchain_, PresentManager& present_manager_,
-                       Scheduler& scheduler_, const ScreenInfo& screen_info_)
-    : cpu_memory{cpu_memory_}, render_window{render_window_}, device{device_},
+BlitScreen::BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                       Core::Frontend::EmuWindow& render_window_, const Device& device_,
+                       MemoryAllocator& memory_allocator_, Swapchain& swapchain_,
+                       PresentManager& present_manager_, Scheduler& scheduler_,
+                       const ScreenInfo& screen_info_)
+    : device_memory{device_memory_}, render_window{render_window_}, device{device_},
       memory_allocator{memory_allocator_}, swapchain{swapchain_}, present_manager{present_manager_},
       scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
     resource_ticks.resize(image_count);
@@ -219,8 +220,8 @@ void BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
     if (!use_accelerated) {
         const u64 image_offset = GetRawImageOffset(framebuffer);
 
-        const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset;
-        const u8* const host_ptr = cpu_memory.GetPointer(framebuffer_addr);
+        const DAddr framebuffer_addr = framebuffer.address + framebuffer.offset;
+        const u8* const host_ptr = device_memory.GetPointer<u8>(framebuffer_addr);
 
         // TODO(Rodrigo): Read this from HLE
         constexpr u32 block_height_log2 = 4;
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 78b32416d..3eff76009 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -6,6 +6,7 @@
 #include <memory>
 
 #include "core/frontend/framebuffer_layout.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/vulkan_common/vulkan_memory_allocator.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
 
@@ -13,10 +14,6 @@ namespace Core {
 class System;
 }
 
-namespace Core::Memory {
-class Memory;
-}
-
 namespace Core::Frontend {
 class EmuWindow;
 }
@@ -56,8 +53,9 @@ struct ScreenInfo {
 
 class BlitScreen {
 public:
-    explicit BlitScreen(Core::Memory::Memory& cpu_memory, Core::Frontend::EmuWindow& render_window,
-                        const Device& device, MemoryAllocator& memory_manager, Swapchain& swapchain,
+    explicit BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory,
+                        Core::Frontend::EmuWindow& render_window, const Device& device,
+                        MemoryAllocator& memory_manager, Swapchain& swapchain,
                         PresentManager& present_manager, Scheduler& scheduler,
                         const ScreenInfo& screen_info);
     ~BlitScreen();
@@ -109,7 +107,7 @@ private:
     u64 CalculateBufferSize(const Tegra::FramebufferConfig& framebuffer) const;
     u64 GetRawImageOffset(const Tegra::FramebufferConfig& framebuffer) const;
 
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
     Core::Frontend::EmuWindow& render_window;
     const Device& device;
     MemoryAllocator& memory_allocator;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 3c61799fa..31001d142 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -79,7 +79,7 @@ vk::Buffer CreateBuffer(const Device& device, const MemoryAllocator& memory_allo
 } // Anonymous namespace
 
 Buffer::Buffer(BufferCacheRuntime& runtime, VideoCommon::NullBufferParams null_params)
-    : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(null_params), tracker{4096} {
+    : VideoCommon::BufferBase(null_params), tracker{4096} {
     if (runtime.device.HasNullDescriptor()) {
         return;
     }
@@ -88,11 +88,9 @@ Buffer::Buffer(BufferCacheRuntime& runtime, VideoCommon::NullBufferParams null_p
     is_null = true;
 }
 
-Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
-               VAddr cpu_addr_, u64 size_bytes_)
-    : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_),
-      device{&runtime.device}, buffer{CreateBuffer(*device, runtime.memory_allocator, SizeBytes())},
-      tracker{SizeBytes()} {
+Buffer::Buffer(BufferCacheRuntime& runtime, DAddr cpu_addr_, u64 size_bytes_)
+    : VideoCommon::BufferBase(cpu_addr_, size_bytes_), device{&runtime.device},
+      buffer{CreateBuffer(*device, runtime.memory_allocator, SizeBytes())}, tracker{SizeBytes()} {
     if (runtime.device.HasDebuggingToolAttached()) {
         buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str());
     }
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index dc300d7cb..e273f4988 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -23,11 +23,10 @@ struct HostVertexBinding;
 
 class BufferCacheRuntime;
 
-class Buffer : public VideoCommon::BufferBase<VideoCore::RasterizerInterface> {
+class Buffer : public VideoCommon::BufferBase {
 public:
     explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params);
-    explicit Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
-                    VAddr cpu_addr_, u64 size_bytes_);
+    explicit Buffer(BufferCacheRuntime& runtime, VAddr cpu_addr_, u64 size_bytes_);
 
     [[nodiscard]] VkBufferView View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format);
 
@@ -173,7 +172,7 @@ struct BufferCacheParams {
     using Runtime = Vulkan::BufferCacheRuntime;
     using Buffer = Vulkan::Buffer;
     using Async_Buffer = Vulkan::StagingBufferRef;
-    using MemoryTracker = VideoCommon::MemoryTrackerBase<VideoCore::RasterizerInterface>;
+    using MemoryTracker = VideoCommon::MemoryTrackerBase<Tegra::MaxwellDeviceMemoryManager>;
 
     static constexpr bool IS_OPENGL = false;
     static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = false;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index f2fd2670f..ec6b3a4b0 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -19,6 +19,7 @@
 #include "video_core/renderer_vulkan/vk_texture_cache.h"
 #include "video_core/renderer_vulkan/vk_update_descriptor.h"
 #include "video_core/shader_notify.h"
+#include "video_core/texture_cache/texture_cache.h"
 #include "video_core/vulkan_common/vulkan_device.h"
 
 #if defined(_MSC_VER) && defined(NDEBUG)
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index d1841198d..1e1821b10 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -30,7 +30,6 @@
 #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
 #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
 #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
-#include "video_core/renderer_vulkan/vk_rasterizer.h"
 #include "video_core/renderer_vulkan/vk_scheduler.h"
 #include "video_core/renderer_vulkan/vk_shader_util.h"
 #include "video_core/renderer_vulkan/vk_update_descriptor.h"
@@ -299,12 +298,13 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
     return std::memcmp(&rhs, this, Size()) == 0;
 }
 
-PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_,
-                             Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
+PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                             const Device& device_, Scheduler& scheduler_,
+                             DescriptorPool& descriptor_pool_,
                              GuestDescriptorQueue& guest_descriptor_queue_,
                              RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
                              TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
-    : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_},
+    : VideoCommon::ShaderCache{device_memory_}, device{device_}, scheduler{scheduler_},
       descriptor_pool{descriptor_pool_}, guest_descriptor_queue{guest_descriptor_queue_},
       render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_},
       texture_cache{texture_cache_}, shader_notify{shader_notify_},
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index e323ea0fd..797700128 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -20,6 +20,7 @@
 #include "shader_recompiler/object_pool.h"
 #include "shader_recompiler/profile.h"
 #include "video_core/engines/maxwell_3d.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
 #include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
@@ -79,7 +80,6 @@ class ComputePipeline;
 class DescriptorPool;
 class Device;
 class PipelineStatistics;
-class RasterizerVulkan;
 class RenderPassCache;
 class Scheduler;
 
@@ -99,8 +99,8 @@ struct ShaderPools {
 
 class PipelineCache : public VideoCommon::ShaderCache {
 public:
-    explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler,
-                           DescriptorPool& descriptor_pool,
+    explicit PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device,
+                           Scheduler& scheduler, DescriptorPool& descriptor_pool,
                            GuestDescriptorQueue& guest_descriptor_queue,
                            RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
                            TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index ad4caf688..7cbc9c73c 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -13,9 +13,10 @@
 
 #include "common/bit_util.h"
 #include "common/common_types.h"
-#include "core/memory.h"
 #include "video_core/engines/draw_manager.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/query_cache/query_cache.h"
+#include "video_core/rasterizer_interface.h"
 #include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_compute_pass.h"
 #include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -102,7 +103,7 @@ private:
 using BaseStreamer = VideoCommon::SimpleStreamer<VideoCommon::HostQueryBase>;
 
 struct HostSyncValues {
-    VAddr address;
+    DAddr address;
     size_t size;
     size_t offset;
 
@@ -317,7 +318,7 @@ public:
         pending_sync.clear();
     }
 
-    size_t WriteCounter(VAddr address, bool has_timestamp, u32 value,
+    size_t WriteCounter(DAddr address, bool has_timestamp, u32 value,
                         [[maybe_unused]] std::optional<u32> subreport) override {
         PauseCounter();
         auto index = BuildQuery();
@@ -738,7 +739,7 @@ public:
         pending_sync.clear();
     }
 
-    size_t WriteCounter(VAddr address, bool has_timestamp, u32 value,
+    size_t WriteCounter(DAddr address, bool has_timestamp, u32 value,
                         std::optional<u32> subreport_) override {
         auto index = BuildQuery();
         auto* new_query = GetQuery(index);
@@ -769,9 +770,9 @@ public:
         return index;
     }
 
-    std::optional<std::pair<VAddr, size_t>> GetLastQueryStream(size_t stream) {
+    std::optional<std::pair<DAddr, size_t>> GetLastQueryStream(size_t stream) {
         if (last_queries[stream] != 0) {
-            std::pair<VAddr, size_t> result(last_queries[stream], last_queries_stride[stream]);
+            std::pair<DAddr, size_t> result(last_queries[stream], last_queries_stride[stream]);
             return result;
         }
         return std::nullopt;
@@ -974,7 +975,7 @@ private:
     size_t buffers_count{};
     std::array<VkBuffer, NUM_STREAMS> counter_buffers{};
     std::array<VkDeviceSize, NUM_STREAMS> offsets{};
-    std::array<VAddr, NUM_STREAMS> last_queries;
+    std::array<DAddr, NUM_STREAMS> last_queries;
     std::array<size_t, NUM_STREAMS> last_queries_stride;
     Maxwell3D::Regs::PrimitiveTopology out_topology;
     u64 streams_mask;
@@ -987,7 +988,7 @@ public:
         : VideoCommon::QueryBase(0, VideoCommon::QueryFlagBits::IsHostManaged, 0) {}
 
     // Parameterized constructor
-    PrimitivesQueryBase(bool has_timestamp, VAddr address)
+    PrimitivesQueryBase(bool has_timestamp, DAddr address)
         : VideoCommon::QueryBase(address, VideoCommon::QueryFlagBits::IsHostManaged, 0) {
         if (has_timestamp) {
             flags |= VideoCommon::QueryFlagBits::HasTimestamp;
@@ -995,7 +996,7 @@ public:
     }
 
     u64 stride{};
-    VAddr dependant_address{};
+    DAddr dependant_address{};
     Maxwell3D::Regs::PrimitiveTopology topology{Maxwell3D::Regs::PrimitiveTopology::Points};
     size_t dependant_index{};
     bool dependant_manage{};
@@ -1005,15 +1006,15 @@ class PrimitivesSucceededStreamer : public VideoCommon::SimpleStreamer<Primitive
 public:
     explicit PrimitivesSucceededStreamer(size_t id_, QueryCacheRuntime& runtime_,
                                          TFBCounterStreamer& tfb_streamer_,
-                                         Core::Memory::Memory& cpu_memory_)
+                                         Tegra::MaxwellDeviceMemoryManager& device_memory_)
         : VideoCommon::SimpleStreamer<PrimitivesQueryBase>(id_), runtime{runtime_},
-          tfb_streamer{tfb_streamer_}, cpu_memory{cpu_memory_} {
+          tfb_streamer{tfb_streamer_}, device_memory{device_memory_} {
         MakeDependent(&tfb_streamer);
     }
 
     ~PrimitivesSucceededStreamer() = default;
 
-    size_t WriteCounter(VAddr address, bool has_timestamp, u32 value,
+    size_t WriteCounter(DAddr address, bool has_timestamp, u32 value,
                         std::optional<u32> subreport_) override {
         auto index = BuildQuery();
         auto* new_query = GetQuery(index);
@@ -1063,6 +1064,8 @@ public:
                 }
             });
         }
+        auto* ptr = device_memory.GetPointer<u8>(new_query->dependant_address);
+        ASSERT(ptr != nullptr);
 
         new_query->dependant_manage = must_manage_dependance;
         pending_flush_queries.push_back(index);
@@ -1100,7 +1103,7 @@ public:
                 num_vertices = dependant_query->value / query->stride;
                 tfb_streamer.Free(query->dependant_index);
             } else {
-                u8* pointer = cpu_memory.GetPointer(query->dependant_address);
+                u8* pointer = device_memory.GetPointer<u8>(query->dependant_address);
                 u32 result;
                 std::memcpy(&result, pointer, sizeof(u32));
                 num_vertices = static_cast<u64>(result) / query->stride;
@@ -1137,7 +1140,7 @@ public:
 private:
     QueryCacheRuntime& runtime;
     TFBCounterStreamer& tfb_streamer;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
 
     // syncing queue
     std::vector<size_t> pending_sync;
@@ -1152,12 +1155,13 @@ private:
 
 struct QueryCacheRuntimeImpl {
     QueryCacheRuntimeImpl(QueryCacheRuntime& runtime, VideoCore::RasterizerInterface* rasterizer_,
-                          Core::Memory::Memory& cpu_memory_, Vulkan::BufferCache& buffer_cache_,
-                          const Device& device_, const MemoryAllocator& memory_allocator_,
-                          Scheduler& scheduler_, StagingBufferPool& staging_pool_,
+                          Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                          Vulkan::BufferCache& buffer_cache_, const Device& device_,
+                          const MemoryAllocator& memory_allocator_, Scheduler& scheduler_,
+                          StagingBufferPool& staging_pool_,
                           ComputePassDescriptorQueue& compute_pass_descriptor_queue,
                           DescriptorPool& descriptor_pool)
-        : rasterizer{rasterizer_}, cpu_memory{cpu_memory_},
+        : rasterizer{rasterizer_}, device_memory{device_memory_},
           buffer_cache{buffer_cache_}, device{device_},
           memory_allocator{memory_allocator_}, scheduler{scheduler_}, staging_pool{staging_pool_},
           guest_streamer(0, runtime),
@@ -1168,7 +1172,7 @@ struct QueryCacheRuntimeImpl {
                        scheduler, memory_allocator, staging_pool),
           primitives_succeeded_streamer(
               static_cast<size_t>(QueryType::StreamingPrimitivesSucceeded), runtime, tfb_streamer,
-              cpu_memory_),
+              device_memory_),
           primitives_needed_minus_succeeded_streamer(
               static_cast<size_t>(QueryType::StreamingPrimitivesNeededMinusSucceeded), runtime, 0u),
           hcr_setup{}, hcr_is_set{}, is_hcr_running{}, maxwell3d{} {
@@ -1195,7 +1199,7 @@ struct QueryCacheRuntimeImpl {
     }
 
     VideoCore::RasterizerInterface* rasterizer;
-    Core::Memory::Memory& cpu_memory;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
     Vulkan::BufferCache& buffer_cache;
 
     const Device& device;
@@ -1210,7 +1214,7 @@ struct QueryCacheRuntimeImpl {
     PrimitivesSucceededStreamer primitives_succeeded_streamer;
     VideoCommon::StubStreamer<QueryCacheParams> primitives_needed_minus_succeeded_streamer;
 
-    std::vector<std::pair<VAddr, VAddr>> little_cache;
+    std::vector<std::pair<DAddr, DAddr>> little_cache;
     std::vector<std::pair<VkBuffer, VkDeviceSize>> buffers_to_upload_to;
     std::vector<size_t> redirect_cache;
     std::vector<std::vector<VkBufferCopy>> copies_setup;
@@ -1229,14 +1233,14 @@ struct QueryCacheRuntimeImpl {
 };
 
 QueryCacheRuntime::QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer,
-                                     Core::Memory::Memory& cpu_memory_,
+                                     Tegra::MaxwellDeviceMemoryManager& device_memory_,
                                      Vulkan::BufferCache& buffer_cache_, const Device& device_,
                                      const MemoryAllocator& memory_allocator_,
                                      Scheduler& scheduler_, StagingBufferPool& staging_pool_,
                                      ComputePassDescriptorQueue& compute_pass_descriptor_queue,
                                      DescriptorPool& descriptor_pool) {
     impl = std::make_unique<QueryCacheRuntimeImpl>(
-        *this, rasterizer, cpu_memory_, buffer_cache_, device_, memory_allocator_, scheduler_,
+        *this, rasterizer, device_memory_, buffer_cache_, device_, memory_allocator_, scheduler_,
         staging_pool_, compute_pass_descriptor_queue, descriptor_pool);
 }
 
@@ -1309,7 +1313,7 @@ void QueryCacheRuntime::HostConditionalRenderingCompareValueImpl(VideoCommon::Lo
     ResumeHostConditionalRendering();
 }
 
-void QueryCacheRuntime::HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal) {
+void QueryCacheRuntime::HostConditionalRenderingCompareBCImpl(DAddr address, bool is_equal) {
     VkBuffer to_resolve;
     u32 to_resolve_offset;
     {
@@ -1350,11 +1354,11 @@ bool QueryCacheRuntime::HostConditionalRenderingCompareValues(VideoCommon::Looku
         return false;
     }
 
-    const auto check_in_bc = [&](VAddr address) {
+    const auto check_in_bc = [&](DAddr address) {
         return impl->buffer_cache.IsRegionGpuModified(address, 8);
     };
-    const auto check_value = [&](VAddr address) {
-        u8* ptr = impl->cpu_memory.GetPointer(address);
+    const auto check_value = [&](DAddr address) {
+        u8* ptr = impl->device_memory.GetPointer<u8>(address);
         u64 value{};
         std::memcpy(&value, ptr, sizeof(value));
         return value == 0;
@@ -1477,8 +1481,8 @@ void QueryCacheRuntime::SyncValues(std::span<SyncValuesType> values, VkBuffer ba
     for (auto& sync_val : values) {
         total_size += sync_val.size;
         bool found = false;
-        VAddr base = Common::AlignDown(sync_val.address, Core::Memory::YUZU_PAGESIZE);
-        VAddr base_end = base + Core::Memory::YUZU_PAGESIZE;
+        DAddr base = Common::AlignDown(sync_val.address, Core::DEVICE_PAGESIZE);
+        DAddr base_end = base + Core::DEVICE_PAGESIZE;
         for (size_t i = 0; i < impl->little_cache.size(); i++) {
             const auto set_found = [&] {
                 impl->redirect_cache.push_back(i);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index e9a1ea169..f6151123e 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -27,7 +27,7 @@ struct QueryCacheRuntimeImpl;
 class QueryCacheRuntime {
 public:
     explicit QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer,
-                               Core::Memory::Memory& cpu_memory_,
+                               Tegra::MaxwellDeviceMemoryManager& device_memory_,
                                Vulkan::BufferCache& buffer_cache_, const Device& device_,
                                const MemoryAllocator& memory_allocator_, Scheduler& scheduler_,
                                StagingBufferPool& staging_pool_,
@@ -61,7 +61,7 @@ public:
 
 private:
     void HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object, bool is_equal);
-    void HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal);
+    void HostConditionalRenderingCompareBCImpl(DAddr address, bool is_equal);
     friend struct QueryCacheRuntimeImpl;
     std::unique_ptr<QueryCacheRuntimeImpl> impl;
 };
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 241fc34be..5bf41b81f 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -18,6 +18,7 @@
 #include "video_core/engines/draw_manager.h"
 #include "video_core/engines/kepler_compute.h"
 #include "video_core/engines/maxwell_3d.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/renderer_vulkan/blit_image.h"
 #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
 #include "video_core/renderer_vulkan/maxwell_to_vk.h"
@@ -163,10 +164,11 @@ DrawParams MakeDrawParams(const MaxwellDrawState& draw_state, u32 num_instances,
 } // Anonymous namespace
 
 RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
-                                   Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
-                                   const Device& device_, MemoryAllocator& memory_allocator_,
-                                   StateTracker& state_tracker_, Scheduler& scheduler_)
-    : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_},
+                                   Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                                   ScreenInfo& screen_info_, const Device& device_,
+                                   MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
+                                   Scheduler& scheduler_)
+    : gpu{gpu_}, device_memory{device_memory_}, screen_info{screen_info_}, device{device_},
       memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_},
       staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
       guest_descriptor_queue(device, scheduler), compute_pass_descriptor_queue(device, scheduler),
@@ -174,14 +176,14 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
       texture_cache_runtime{
           device,     scheduler,         memory_allocator, staging_pool,
           blit_image, render_pass_cache, descriptor_pool,  compute_pass_descriptor_queue},
-      texture_cache(texture_cache_runtime, *this),
+      texture_cache(texture_cache_runtime, device_memory),
       buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
                            guest_descriptor_queue, compute_pass_descriptor_queue, descriptor_pool),
-      buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
-      query_cache_runtime(this, cpu_memory_, buffer_cache, device, memory_allocator, scheduler,
+      buffer_cache(device_memory, buffer_cache_runtime),
+      query_cache_runtime(this, device_memory, buffer_cache, device, memory_allocator, scheduler,
                           staging_pool, compute_pass_descriptor_queue, descriptor_pool),
-      query_cache(gpu, *this, cpu_memory_, query_cache_runtime),
-      pipeline_cache(*this, device, scheduler, descriptor_pool, guest_descriptor_queue,
+      query_cache(gpu, *this, device_memory, query_cache_runtime),
+      pipeline_cache(device_memory, device, scheduler, descriptor_pool, guest_descriptor_queue,
                      render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
       accelerate_dma(buffer_cache, texture_cache, scheduler),
       fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
@@ -508,7 +510,7 @@ void Vulkan::RasterizerVulkan::DisableGraphicsUniformBuffer(size_t stage, u32 in
 
 void RasterizerVulkan::FlushAll() {}
 
-void RasterizerVulkan::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
+void RasterizerVulkan::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
     if (addr == 0 || size == 0) {
         return;
     }
@@ -525,7 +527,7 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType
     }
 }
 
-bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
+bool RasterizerVulkan::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
     if ((True(which & VideoCommon::CacheType::BufferCache))) {
         std::scoped_lock lock{buffer_cache.mutex};
         if (buffer_cache.IsRegionGpuModified(addr, size)) {
@@ -542,7 +544,7 @@ bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
     return false;
 }
 
-VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 size) {
+VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(DAddr addr, u64 size) {
     {
         std::scoped_lock lock{texture_cache.mutex};
         auto area = texture_cache.GetFlushArea(addr, size);
@@ -551,14 +553,14 @@ VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64
         }
     }
     VideoCore::RasterizerDownloadArea new_area{
-        .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
-        .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+        .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE),
+        .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE),
         .preemtive = true,
     };
     return new_area;
 }
 
-void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
+void RasterizerVulkan::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) {
     if (addr == 0 || size == 0) {
         return;
     }
@@ -578,7 +580,7 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::Cache
     }
 }
 
-void RasterizerVulkan::InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) {
+void RasterizerVulkan::InnerInvalidation(std::span<const std::pair<DAddr, std::size_t>> sequences) {
     {
         std::scoped_lock lock{texture_cache.mutex};
         for (const auto& [addr, size] : sequences) {
@@ -599,7 +601,7 @@ void RasterizerVulkan::InnerInvalidation(std::span<const std::pair<VAddr, std::s
     }
 }
 
-bool RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
+bool RasterizerVulkan::OnCPUWrite(DAddr addr, u64 size) {
     if (addr == 0 || size == 0) {
         return false;
     }
@@ -620,7 +622,7 @@ bool RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
     return false;
 }
 
-void RasterizerVulkan::OnCacheInvalidation(VAddr addr, u64 size) {
+void RasterizerVulkan::OnCacheInvalidation(DAddr addr, u64 size) {
     if (addr == 0 || size == 0) {
         return;
     }
@@ -640,7 +642,7 @@ void RasterizerVulkan::InvalidateGPUCache() {
     gpu.InvalidateGPUCache();
 }
 
-void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) {
+void RasterizerVulkan::UnmapMemory(DAddr addr, u64 size) {
     {
         std::scoped_lock lock{texture_cache.mutex};
         texture_cache.UnmapMemory(addr, size);
@@ -679,7 +681,7 @@ void RasterizerVulkan::ReleaseFences(bool force) {
     fence_manager.WaitPendingFences(force);
 }
 
-void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size,
+void RasterizerVulkan::FlushAndInvalidateRegion(DAddr addr, u64 size,
                                                 VideoCommon::CacheType which) {
     if (Settings::IsGPULevelExtreme()) {
         FlushRegion(addr, size, which);
@@ -782,7 +784,7 @@ void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_si
 }
 
 bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
-                                         VAddr framebuffer_addr, u32 pixel_stride) {
+                                         DAddr framebuffer_addr, u32 pixel_stride) {
     if (!framebuffer_addr) {
         return false;
     }
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index ad069556c..881ee0993 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -7,14 +7,13 @@
 
 #include <boost/container/static_vector.hpp>
 
-#include "video_core/renderer_vulkan/vk_buffer_cache.h"
-
 #include "common/common_types.h"
 #include "video_core/control/channel_state_cache.h"
 #include "video_core/engines/maxwell_dma.h"
-#include "video_core/rasterizer_accelerated.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/renderer_vulkan/blit_image.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
 #include "video_core/renderer_vulkan/vk_fence_manager.h"
 #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
@@ -34,10 +33,14 @@ namespace Core::Frontend {
 class EmuWindow;
 }
 
-namespace Tegra::Engines {
+namespace Tegra {
+
+namespace Engines {
 class Maxwell3D;
 }
 
+} // namespace Tegra
+
 namespace Vulkan {
 
 struct ScreenInfo;
@@ -70,13 +73,14 @@ private:
     Scheduler& scheduler;
 };
 
-class RasterizerVulkan final : public VideoCore::RasterizerAccelerated,
+class RasterizerVulkan final : public VideoCore::RasterizerInterface,
                                protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 public:
     explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
-                              Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
-                              const Device& device_, MemoryAllocator& memory_allocator_,
-                              StateTracker& state_tracker_, Scheduler& scheduler_);
+                              Tegra::MaxwellDeviceMemoryManager& device_memory_,
+                              ScreenInfo& screen_info_, const Device& device_,
+                              MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
+                              Scheduler& scheduler_);
     ~RasterizerVulkan() override;
 
     void Draw(bool is_indexed, u32 instance_count) override;
@@ -90,18 +94,18 @@ public:
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
     void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
     void FlushAll() override;
-    void FlushRegion(VAddr addr, u64 size,
+    void FlushRegion(DAddr addr, u64 size,
                      VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    bool MustFlushRegion(VAddr addr, u64 size,
+    bool MustFlushRegion(DAddr addr, u64 size,
                          VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
-    void InvalidateRegion(VAddr addr, u64 size,
+    VideoCore::RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override;
+    void InvalidateRegion(DAddr addr, u64 size,
                           VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
-    void InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) override;
-    void OnCacheInvalidation(VAddr addr, u64 size) override;
-    bool OnCPUWrite(VAddr addr, u64 size) override;
+    void InnerInvalidation(std::span<const std::pair<DAddr, std::size_t>> sequences) override;
+    void OnCacheInvalidation(DAddr addr, u64 size) override;
+    bool OnCPUWrite(DAddr addr, u64 size) override;
     void InvalidateGPUCache() override;
-    void UnmapMemory(VAddr addr, u64 size) override;
+    void UnmapMemory(DAddr addr, u64 size) override;
     void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
     void SignalFence(std::function<void()>&& func) override;
     void SyncOperation(std::function<void()>&& func) override;
@@ -109,7 +113,7 @@ public:
     void SignalReference() override;
     void ReleaseFences(bool force = true) override;
     void FlushAndInvalidateRegion(
-        VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+        DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
     void WaitForIdle() override;
     void FragmentBarrier() override;
     void TiledCacheBarrier() override;
@@ -122,7 +126,7 @@ public:
     Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
     void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
                                   std::span<const u8> memory) override;
-    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
+    bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr,
                            u32 pixel_stride) override;
     void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
                            const VideoCore::DiskResourceLoadCallback& callback) override;
@@ -176,6 +180,7 @@ private:
     void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
 
     Tegra::GPU& gpu;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
 
     ScreenInfo& screen_info;
     const Device& device;
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index e81cd031b..2af32c8f2 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -12,6 +12,7 @@
 #include "video_core/dirty_flags.h"
 #include "video_core/engines/kepler_compute.h"
 #include "video_core/engines/maxwell_3d.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/memory_manager.h"
 #include "video_core/shader_cache.h"
 #include "video_core/shader_environment.h"
@@ -34,7 +35,8 @@ void ShaderCache::SyncGuestHost() {
     RemovePendingShaders();
 }
 
-ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {}
+ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_)
+    : device_memory{device_memory_} {}
 
 bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
     auto& dirty{maxwell3d->dirty.flags};
@@ -132,7 +134,7 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
 
     storage.push_back(std::move(data));
 
-    rasterizer.UpdatePagesCachedCount(addr, size, 1);
+    device_memory.UpdatePagesCachedCount(addr, size, 1);
 }
 
 void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
@@ -209,7 +211,7 @@ void ShaderCache::UnmarkMemory(Entry* entry) {
 
     const VAddr addr = entry->addr_start;
     const size_t size = entry->addr_end - addr;
-    rasterizer.UpdatePagesCachedCount(addr, size, -1);
+    device_memory.UpdatePagesCachedCount(addr, size, -1);
 }
 
 void ShaderCache::RemoveShadersFromStorage(std::span<ShaderInfo*> removed_shaders) {
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index a76896620..fd9bf2562 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -14,6 +14,7 @@
 #include "common/common_types.h"
 #include "common/polyfill_ranges.h"
 #include "video_core/control/channel_state_cache.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/rasterizer_interface.h"
 #include "video_core/shader_environment.h"
 
@@ -77,7 +78,7 @@ protected:
         }
     };
 
-    explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_);
+    explicit ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory);
 
     /// @brief Update the hashes and information of shader stages
     /// @param unique_hashes Shader hashes to store into when a stage is enabled
@@ -145,7 +146,7 @@ private:
     /// @brief Create a new shader entry and register it
     const ShaderInfo* MakeShaderInfo(GenericEnvironment& env, VAddr cpu_addr);
 
-    VideoCore::RasterizerInterface& rasterizer;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
 
     mutable std::mutex lookup_mutex;
     std::mutex invalidation_mutex;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 0d5a1709f..7398ed2ec 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -8,10 +8,11 @@
 
 #include "common/alignment.h"
 #include "common/settings.h"
-#include "core/memory.h"
 #include "video_core/control/channel_state.h"
 #include "video_core/dirty_flags.h"
 #include "video_core/engines/kepler_compute.h"
+#include "video_core/guest_memory.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
 #include "video_core/texture_cache/image_view_base.h"
 #include "video_core/texture_cache/samples_helper.h"
 #include "video_core/texture_cache/texture_cache_base.h"
@@ -27,8 +28,8 @@ using VideoCore::Surface::SurfaceType;
 using namespace Common::Literals;
 
 template <class P>
-TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_)
-    : runtime{runtime_}, rasterizer{rasterizer_} {
+TextureCache<P>::TextureCache(Runtime& runtime_, Tegra::MaxwellDeviceMemoryManager& device_memory_)
+    : runtime{runtime_}, device_memory{device_memory_} {
     // Configure null sampler
     TSCEntry sampler_descriptor{};
     sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear);
@@ -49,19 +50,19 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
     void(slot_samplers.insert(runtime, sampler_descriptor));
 
     if constexpr (HAS_DEVICE_MEMORY_INFO) {
-        const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
-        const s64 min_spacing_expected = device_memory - 1_GiB;
-        const s64 min_spacing_critical = device_memory - 512_MiB;
-        const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD);
+        const s64 device_local_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
+        const s64 min_spacing_expected = device_local_memory - 1_GiB;
+        const s64 min_spacing_critical = device_local_memory - 512_MiB;
+        const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD);
         const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
         const s64 min_vacancy_critical = (3 * mem_threshold) / 10;
         expected_memory = static_cast<u64>(
-            std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected),
+            std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected),
                      DEFAULT_EXPECTED_MEMORY));
         critical_memory = static_cast<u64>(
-            std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical),
+            std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical),
                      DEFAULT_CRITICAL_MEMORY));
-        minimum_memory = static_cast<u64>((device_memory - mem_threshold) / 2);
+        minimum_memory = static_cast<u64>((device_local_memory - mem_threshold) / 2);
     } else {
         expected_memory = DEFAULT_EXPECTED_MEMORY + 512_MiB;
         critical_memory = DEFAULT_CRITICAL_MEMORY + 1_GiB;
@@ -513,7 +514,7 @@ FramebufferId TextureCache<P>::GetFramebufferId(const RenderTargets& key) {
 }
 
 template <class P>
-void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) {
+void TextureCache<P>::WriteMemory(DAddr cpu_addr, size_t size) {
     ForEachImageInRegion(cpu_addr, size, [this](ImageId image_id, Image& image) {
         if (True(image.flags & ImageFlagBits::CpuModified)) {
             return;
@@ -526,7 +527,7 @@ void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) {
 }
 
 template <class P>
-void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
+void TextureCache<P>::DownloadMemory(DAddr cpu_addr, size_t size) {
     boost::container::small_vector<ImageId, 16> images;
     ForEachImageInRegion(cpu_addr, size, [&images](ImageId image_id, ImageBase& image) {
         if (!image.IsSafeDownload()) {
@@ -553,7 +554,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
 }
 
 template <class P>
-std::optional<VideoCore::RasterizerDownloadArea> TextureCache<P>::GetFlushArea(VAddr cpu_addr,
+std::optional<VideoCore::RasterizerDownloadArea> TextureCache<P>::GetFlushArea(DAddr cpu_addr,
                                                                                u64 size) {
     std::optional<VideoCore::RasterizerDownloadArea> area{};
     ForEachImageInRegion(cpu_addr, size, [&](ImageId, ImageBase& image) {
@@ -579,7 +580,7 @@ std::optional<VideoCore::RasterizerDownloadArea> TextureCache<P>::GetFlushArea(V
 }
 
 template <class P>
-void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
+void TextureCache<P>::UnmapMemory(DAddr cpu_addr, size_t size) {
     boost::container::small_vector<ImageId, 16> deleted_images;
     ForEachImageInRegion(cpu_addr, size, [&](ImageId id, Image&) { deleted_images.push_back(id); });
     for (const ImageId id : deleted_images) {
@@ -713,7 +714,7 @@ bool TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
 
 template <class P>
 typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(
-    const Tegra::FramebufferConfig& config, VAddr cpu_addr) {
+    const Tegra::FramebufferConfig& config, DAddr cpu_addr) {
     // TODO: Properly implement this
     const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS);
     if (it == page_table.end()) {
@@ -940,7 +941,7 @@ bool TextureCache<P>::IsRescaling(const ImageViewBase& image_view) const noexcep
 }
 
 template <class P>
-bool TextureCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
+bool TextureCache<P>::IsRegionGpuModified(DAddr addr, size_t size) {
     bool is_modified = false;
     ForEachImageInRegion(addr, size, [&is_modified](ImageId, ImageBase& image) {
         if (False(image.flags & ImageFlagBits::GpuModified)) {
@@ -1059,7 +1060,7 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
         return;
     }
 
-    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> swizzle_data(
+    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::UnsafeRead> swizzle_data(
         *gpu_memory, gpu_addr, image.guest_size_bytes, &swizzle_data_buffer);
 
     if (True(image.flags & ImageFlagBits::Converted)) {
@@ -1124,7 +1125,7 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a
 template <class P>
 ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
                                    RelaxedOptions options) {
-    std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    std::optional<DAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
     if (!cpu_addr) {
         cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
         if (!cpu_addr) {
@@ -1265,7 +1266,7 @@ void TextureCache<P>::QueueAsyncDecode(Image& image, ImageId image_id) {
 
     static Common::ScratchBuffer<u8> local_unswizzle_data_buffer;
     local_unswizzle_data_buffer.resize_destructive(image.unswizzled_size_bytes);
-    Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> swizzle_data(
+    Tegra::Memory::GpuGuestMemory<u8, Tegra::Memory::GuestMemoryFlags::UnsafeRead> swizzle_data(
         *gpu_memory, image.gpu_addr, image.guest_size_bytes, &swizzle_data_buffer);
 
     auto copies = UnswizzleImage(*gpu_memory, image.gpu_addr, image.info, swizzle_data,
@@ -1339,14 +1340,14 @@ bool TextureCache<P>::ScaleDown(Image& image) {
 template <class P>
 ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr,
                                      RelaxedOptions options) {
-    std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    std::optional<DAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
     if (!cpu_addr) {
         const auto size = CalculateGuestSizeInBytes(info);
         cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size);
         if (!cpu_addr) {
-            const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space;
+            const DAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space;
             virtual_invalid_space += Common::AlignUp(size, 32);
-            cpu_addr = std::optional<VAddr>(fake_addr);
+            cpu_addr = std::optional<DAddr>(fake_addr);
         }
     }
     ASSERT_MSG(cpu_addr, "Tried to insert an image to an invalid gpu_addr=0x{:x}", gpu_addr);
@@ -1362,7 +1363,7 @@ ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr,
 }
 
 template <class P>
-ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VAddr cpu_addr) {
+ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DAddr cpu_addr) {
     ImageInfo new_info = info;
     const size_t size_bytes = CalculateGuestSizeInBytes(new_info);
     const bool broken_views = runtime.HasBrokenTextureViewFormats();
@@ -1650,7 +1651,7 @@ std::optional<typename TextureCache<P>::BlitImages> TextureCache<P>::GetBlitImag
 
 template <class P>
 ImageId TextureCache<P>::FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr) {
-    std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+    std::optional<DAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
     if (!cpu_addr) {
         cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
         if (!cpu_addr) {
@@ -1780,7 +1781,7 @@ ImageViewId TextureCache<P>::FindRenderTargetView(const ImageInfo& info, GPUVAdd
 
 template <class P>
 template <typename Func>
-void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func) {
+void TextureCache<P>::ForEachImageInRegion(DAddr cpu_addr, size_t size, Func&& func) {
     using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
     static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
     boost::container::small_vector<ImageId, 32> images;
@@ -1924,11 +1925,11 @@ void TextureCache<P>::ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size,
 template <class P>
 template <typename Func>
 void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) {
-    using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type;
+    using FuncReturn = typename std::invoke_result<Func, GPUVAddr, DAddr, size_t>::type;
     static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>;
     const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
     for (const auto& [gpu_addr, size] : segments) {
-        std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+        std::optional<DAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
         ASSERT(cpu_addr);
         if constexpr (RETURNS_BOOL) {
             if (func(gpu_addr, *cpu_addr, size)) {
@@ -1980,7 +1981,7 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
     }
     boost::container::small_vector<ImageViewId, 16> sparse_maps;
     ForEachSparseSegment(
-        image, [this, image_id, &sparse_maps](GPUVAddr gpu_addr, VAddr cpu_addr, size_t size) {
+        image, [this, image_id, &sparse_maps](GPUVAddr gpu_addr, DAddr cpu_addr, size_t size) {
             auto map_id = slot_map_views.insert(gpu_addr, cpu_addr, size, image_id);
             ForEachCPUPage(cpu_addr, size,
                            [this, map_id](u64 page) { page_table[page].push_back(map_id); });
@@ -2048,7 +2049,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
     auto& sparse_maps = it->second;
     for (auto& map_view_id : sparse_maps) {
         const auto& map_range = slot_map_views[map_view_id];
-        const VAddr cpu_addr = map_range.cpu_addr;
+        const DAddr cpu_addr = map_range.cpu_addr;
         const std::size_t size = map_range.size;
         ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
             const auto page_it = page_table.find(page);
@@ -2080,7 +2081,7 @@ void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) {
     ASSERT(False(image.flags & ImageFlagBits::Tracked));
     image.flags |= ImageFlagBits::Tracked;
     if (False(image.flags & ImageFlagBits::Sparse)) {
-        rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1);
+        device_memory.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1);
         return;
     }
     if (True(image.flags & ImageFlagBits::Registered)) {
@@ -2089,15 +2090,15 @@ void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) {
         auto& sparse_maps = it->second;
         for (auto& map_view_id : sparse_maps) {
             const auto& map = slot_map_views[map_view_id];
-            const VAddr cpu_addr = map.cpu_addr;
+            const DAddr cpu_addr = map.cpu_addr;
             const std::size_t size = map.size;
-            rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
+            device_memory.UpdatePagesCachedCount(cpu_addr, size, 1);
         }
         return;
     }
     ForEachSparseSegment(image,
-                         [this]([[maybe_unused]] GPUVAddr gpu_addr, VAddr cpu_addr, size_t size) {
-                             rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
+                         [this]([[maybe_unused]] GPUVAddr gpu_addr, DAddr cpu_addr, size_t size) {
+                             device_memory.UpdatePagesCachedCount(cpu_addr, size, 1);
                          });
 }
 
@@ -2106,7 +2107,7 @@ void TextureCache<P>::UntrackImage(ImageBase& image, ImageId image_id) {
     ASSERT(True(image.flags & ImageFlagBits::Tracked));
     image.flags &= ~ImageFlagBits::Tracked;
     if (False(image.flags & ImageFlagBits::Sparse)) {
-        rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, -1);
+        device_memory.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, -1);
         return;
     }
     ASSERT(True(image.flags & ImageFlagBits::Registered));
@@ -2115,9 +2116,9 @@ void TextureCache<P>::UntrackImage(ImageBase& image, ImageId image_id) {
     auto& sparse_maps = it->second;
     for (auto& map_view_id : sparse_maps) {
         const auto& map = slot_map_views[map_view_id];
-        const VAddr cpu_addr = map.cpu_addr;
+        const DAddr cpu_addr = map.cpu_addr;
         const std::size_t size = map.size;
-        rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+        device_memory.UpdatePagesCachedCount(cpu_addr, size, -1);
     }
 }
 
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 6caf75b46..8699d40d4 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -36,9 +36,11 @@
 #include "video_core/texture_cache/types.h"
 #include "video_core/textures/texture.h"
 
-namespace Tegra::Control {
+namespace Tegra {
+namespace Control {
 struct ChannelState;
 }
+} // namespace Tegra
 
 namespace VideoCommon {
 
@@ -126,7 +128,7 @@ class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelI
     };
 
 public:
-    explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&);
+    explicit TextureCache(Runtime&, Tegra::MaxwellDeviceMemoryManager&);
 
     /// Notify the cache that a new frame has been queued
     void TickFrame();
@@ -190,15 +192,15 @@ public:
     Framebuffer* GetFramebuffer();
 
     /// Mark images in a range as modified from the CPU
-    void WriteMemory(VAddr cpu_addr, size_t size);
+    void WriteMemory(DAddr cpu_addr, size_t size);
 
     /// Download contents of host images to guest memory in a region
-    void DownloadMemory(VAddr cpu_addr, size_t size);
+    void DownloadMemory(DAddr cpu_addr, size_t size);
 
-    std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
+    std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(DAddr cpu_addr, u64 size);
 
     /// Remove images in a region
-    void UnmapMemory(VAddr cpu_addr, size_t size);
+    void UnmapMemory(DAddr cpu_addr, size_t size);
 
     /// Remove images in a region
     void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size);
@@ -210,7 +212,7 @@ public:
 
     /// Try to find a cached image view in the given CPU address
     [[nodiscard]] ImageView* TryFindFramebufferImageView(const Tegra::FramebufferConfig& config,
-                                                         VAddr cpu_addr);
+                                                         DAddr cpu_addr);
 
     /// Return true when there are uncommitted images to be downloaded
     [[nodiscard]] bool HasUncommittedFlushes() const noexcept;
@@ -235,7 +237,7 @@ public:
                                  GPUVAddr address = 0, size_t size = 0);
 
     /// Return true when a CPU region is modified from the GPU
-    [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
+    [[nodiscard]] bool IsRegionGpuModified(DAddr addr, size_t size);
 
     [[nodiscard]] bool IsRescaling() const noexcept;
 
@@ -252,7 +254,7 @@ public:
 private:
     /// Iterate over all page indices in a range
     template <typename Func>
-    static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) {
+    static void ForEachCPUPage(DAddr addr, size_t size, Func&& func) {
         static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
         const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
         for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
@@ -326,7 +328,7 @@ private:
 
     /// Create a new image and join perfectly matching existing images
     /// Remove joined images from the cache
-    [[nodiscard]] ImageId JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VAddr cpu_addr);
+    [[nodiscard]] ImageId JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DAddr cpu_addr);
 
     [[nodiscard]] ImageId FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr);
 
@@ -349,7 +351,7 @@ private:
 
     /// Iterates over all the images in a region calling func
     template <typename Func>
-    void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func);
+    void ForEachImageInRegion(DAddr cpu_addr, size_t size, Func&& func);
 
     template <typename Func>
     void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
@@ -421,7 +423,7 @@ private:
 
     Runtime& runtime;
 
-    VideoCore::RasterizerInterface& rasterizer;
+    Tegra::MaxwellDeviceMemoryManager& device_memory;
     std::deque<TextureCacheGPUMap> gpu_page_table_storage;
 
     RenderTargets render_targets;
@@ -432,7 +434,7 @@ private:
     std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table;
     std::unordered_map<ImageId, boost::container::small_vector<ImageViewId, 16>> sparse_views;
 
-    VAddr virtual_invalid_space{};
+    DAddr virtual_invalid_space{};
 
     bool has_deleted_images = false;
     bool is_rescaling = false;
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index fcf70068e..1a6f0d1ad 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -20,9 +20,9 @@
 #include "common/div_ceil.h"
 #include "common/scratch_buffer.h"
 #include "common/settings.h"
-#include "core/memory.h"
 #include "video_core/compatible_formats.h"
 #include "video_core/engines/maxwell_3d.h"
+#include "video_core/guest_memory.h"
 #include "video_core/memory_manager.h"
 #include "video_core/surface.h"
 #include "video_core/texture_cache/decode_bc.h"
@@ -552,7 +552,8 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
     for (s32 layer = 0; layer < info.resources.layers; ++layer) {
         const std::span<const u8> src = input.subspan(host_offset);
         {
-            Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadWrite>
+            Tegra::Memory::GpuGuestMemoryScoped<u8,
+                                                Tegra::Memory::GuestMemoryFlags::UnsafeReadWrite>
                 dst(gpu_memory, gpu_addr + guest_offset, subresource_size, &tmp_buffer);
 
             SwizzleTexture(dst, src, bytes_per_block, num_tiles.width, num_tiles.height,
diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp
index b42d48416..0efb7b49d 100644
--- a/src/video_core/video_core.cpp
+++ b/src/video_core/video_core.cpp
@@ -6,6 +6,8 @@
 #include "common/logging/log.h"
 #include "common/settings.h"
 #include "core/core.h"
+#include "video_core/host1x/gpu_device_memory_manager.h"
+#include "video_core/host1x/host1x.h"
 #include "video_core/renderer_base.h"
 #include "video_core/renderer_null/renderer_null.h"
 #include "video_core/renderer_opengl/renderer_opengl.h"
@@ -18,18 +20,17 @@ std::unique_ptr<VideoCore::RendererBase> CreateRenderer(
     Core::System& system, Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
     std::unique_ptr<Core::Frontend::GraphicsContext> context) {
     auto& telemetry_session = system.TelemetrySession();
-    auto& cpu_memory = system.ApplicationMemory();
+    auto& device_memory = system.Host1x().MemoryManager();
 
     switch (Settings::values.renderer_backend.GetValue()) {
     case Settings::RendererBackend::OpenGL:
-        return std::make_unique<OpenGL::RendererOpenGL>(telemetry_session, emu_window, cpu_memory,
-                                                        gpu, std::move(context));
+        return std::make_unique<OpenGL::RendererOpenGL>(telemetry_session, emu_window,
+                                                        device_memory, gpu, std::move(context));
     case Settings::RendererBackend::Vulkan:
-        return std::make_unique<Vulkan::RendererVulkan>(telemetry_session, emu_window, cpu_memory,
-                                                        gpu, std::move(context));
+        return std::make_unique<Vulkan::RendererVulkan>(telemetry_session, emu_window,
+                                                        device_memory, gpu, std::move(context));
     case Settings::RendererBackend::Null:
-        return std::make_unique<Null::RendererNull>(emu_window, cpu_memory, gpu,
-                                                    std::move(context));
+        return std::make_unique<Null::RendererNull>(emu_window, gpu, std::move(context));
     default:
         return nullptr;
     }