diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 71e844959..8e7ca6123 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -27,6 +27,11 @@ u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector& input, std::vecto case IoctlCommand::IocGetVaRegionsCommand: return GetVARegions(input, output); } + + if (static_cast(command.cmd.Value()) == IoctlCommand::IocRemapCommand) + return Remap(input, output); + + UNIMPLEMENTED_MSG("Unimplemented ioctl command"); return 0; } @@ -56,6 +61,36 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector& return 0; } +u32 nvhost_as_gpu::Remap(const std::vector& input, std::vector& output) { + size_t num_entries = input.size() / sizeof(IoctlRemapEntry); + + NGLOG_WARNING(Service_NVDRV, "(STUBBED) called, num_entries=0x{:X}", num_entries); + + std::vector entries(num_entries); + std::memcpy(entries.data(), input.data(), input.size()); + + auto& gpu = Core::System::GetInstance().GPU(); + + for (const auto& entry : entries) { + NGLOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", + entry.offset, entry.nvmap_handle, entry.pages); + Tegra::GPUVAddr offset = static_cast(entry.offset) << 0x10; + + auto object = nvmap_dev->GetObject(entry.nvmap_handle); + ASSERT(object); + + ASSERT(object->status == nvmap::Object::Status::Allocated); + + u64 size = static_cast(entry.pages) << 0x10; + ASSERT(size <= object->size); + + Tegra::GPUVAddr returned = gpu.memory_manager->MapBufferEx(object->addr, offset, size); + ASSERT(returned == offset); + } + std::memcpy(output.data(), entries.data(), output.size()); + return 0; +} + u32 nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector& output) { IoctlMapBufferEx params{}; std::memcpy(¶ms, input.data(), input.size()); @@ -73,6 +108,16 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector& ou auto object = nvmap_dev->GetObject(params.nvmap_handle); ASSERT(object); + // We can only map objects that have already been assigned a CPU address. + ASSERT(object->status == nvmap::Object::Status::Allocated); + + ASSERT(params.buffer_offset == 0); + + // The real nvservices doesn't make a distinction between handles and ids, and + // object can only have one handle and it will be the same as its id. Assert that this is the + // case to prevent unexpected behavior. + ASSERT(object->id == params.nvmap_handle); + auto& gpu = Core::System::GetInstance().GPU(); if (params.flags & 1) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index d86c3ebd9..f2dd0c3b3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -26,6 +26,7 @@ private: enum class IoctlCommand : u32_le { IocInitalizeExCommand = 0x40284109, IocAllocateSpaceCommand = 0xC0184102, + IocRemapCommand = 0x00000014, IocMapBufferExCommand = 0xC0284106, IocBindChannelCommand = 0x40044101, IocGetVaRegionsCommand = 0xC0404108, @@ -54,6 +55,16 @@ private: }; static_assert(sizeof(IoctlAllocSpace) == 24, "IoctlInitalizeEx is incorrect size"); + struct IoctlRemapEntry { + u16_le flags; + u16_le kind; + u32_le nvmap_handle; + INSERT_PADDING_WORDS(1); + u32_le offset; + u32_le pages; + }; + static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); + struct IoctlMapBufferEx { u32_le flags; // bit0: fixed_offset, bit2: cacheable u32_le kind; // -1 is default @@ -91,6 +102,7 @@ private: u32 InitalizeEx(const std::vector& input, std::vector& output); u32 AllocateSpace(const std::vector& input, std::vector& output); + u32 Remap(const std::vector& input, std::vector& output); u32 MapBufferEx(const std::vector& input, std::vector& output); u32 BindChannel(const std::vector& input, std::vector& output); u32 GetVARegions(const std::vector& input, std::vector& output);