Merge pull request #178 from Subv/command_buffers

GPU: Added a command processor to decode the GPU pushbuffers and forward the commands to their respective engines
This commit is contained in:
bunnei 2018-02-12 13:51:52 -05:00 committed by GitHub
commit be5ba4d952
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 364 additions and 23 deletions

View file

@ -4,6 +4,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
#include "core/hle/service/nvdrv/devices/nvmap.h"
@ -44,11 +45,12 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>&
LOG_DEBUG(Service_NVDRV, "called, pages=%x, page_size=%x, flags=%x", params.pages,
params.page_size, params.flags);
auto& gpu = Core::System::GetInstance().GPU();
const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
if (params.flags & 1) {
params.offset = memory_manager->AllocateSpace(params.offset, size, 1);
params.offset = gpu.memory_manager->AllocateSpace(params.offset, size, 1);
} else {
params.offset = memory_manager->AllocateSpace(size, params.align);
params.offset = gpu.memory_manager->AllocateSpace(size, params.align);
}
std::memcpy(output.data(), &params, output.size());
@ -71,10 +73,12 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
auto object = nvmap_dev->GetObject(params.nvmap_handle);
ASSERT(object);
auto& gpu = Core::System::GetInstance().GPU();
if (params.flags & 1) {
params.offset = memory_manager->MapBufferEx(object->addr, params.offset, object->size);
params.offset = gpu.memory_manager->MapBufferEx(object->addr, params.offset, object->size);
} else {
params.offset = memory_manager->MapBufferEx(object->addr, object->size);
params.offset = gpu.memory_manager->MapBufferEx(object->addr, object->size);
}
std::memcpy(output.data(), &params, output.size());

View file

@ -10,7 +10,6 @@
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
#include "core/hle/service/nvdrv/memory_manager.h"
namespace Service {
namespace Nvidia {
@ -20,9 +19,7 @@ class nvmap;
class nvhost_as_gpu final : public nvdevice {
public:
nvhost_as_gpu(std::shared_ptr<nvmap> nvmap_dev) : nvdevice(), nvmap_dev(std::move(nvmap_dev)) {
memory_manager = std::make_shared<MemoryManager>();
}
nvhost_as_gpu(std::shared_ptr<nvmap> nvmap_dev) : nvmap_dev(std::move(nvmap_dev)) {}
~nvhost_as_gpu() override = default;
u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
@ -101,7 +98,6 @@ private:
u32 GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
std::shared_ptr<nvmap> nvmap_dev;
std::shared_ptr<MemoryManager> memory_manager;
};
} // namespace Devices

View file

@ -5,6 +5,7 @@
#include <map>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
namespace Service {
@ -131,7 +132,7 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp
params.num_entries * sizeof(IoctlGpfifoEntry));
for (auto entry : entries) {
VAddr va_addr = entry.Address();
// TODO(ogniK): Process these
Core::System::GetInstance().GPU().ProcessCommandList(va_addr, entry.sz);
}
params.fence_out.id = 0;
params.fence_out.value = 0;

View file

@ -4,6 +4,7 @@
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
#include "common/swap.h"
@ -12,12 +13,14 @@
namespace Service {
namespace Nvidia {
namespace Devices {
class nvmap;
constexpr u32 NVGPU_IOCTL_MAGIC('H');
constexpr u32 NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO(0x8);
class nvhost_gpu final : public nvdevice {
public:
nvhost_gpu() = default;
nvhost_gpu(std::shared_ptr<nvmap> nvmap_dev) : nvmap_dev(std::move(nvmap_dev)) {}
~nvhost_gpu() override = default;
u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override;
@ -132,6 +135,8 @@ private:
u32 AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output);
u32 AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output);
u32 SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& output);
std::shared_ptr<nvmap> nvmap_dev;
};
} // namespace Devices

View file

@ -1,112 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/service/nvdrv/memory_manager.h"
namespace Service {
namespace Nvidia {
PAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
boost::optional<PAddr> paddr = FindFreeBlock(size, align);
ASSERT(paddr);
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(*paddr + offset) = static_cast<u64>(PageStatus::Allocated);
}
return *paddr;
}
PAddr MemoryManager::AllocateSpace(PAddr paddr, u64 size, u64 align) {
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
if (IsPageMapped(paddr + offset)) {
return AllocateSpace(size, align);
}
}
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(paddr + offset) = static_cast<u64>(PageStatus::Allocated);
}
return paddr;
}
PAddr MemoryManager::MapBufferEx(VAddr vaddr, u64 size) {
vaddr &= ~Memory::PAGE_MASK;
boost::optional<PAddr> paddr = FindFreeBlock(size);
ASSERT(paddr);
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(*paddr + offset) = vaddr + offset;
}
return *paddr;
}
PAddr MemoryManager::MapBufferEx(VAddr vaddr, PAddr paddr, u64 size) {
vaddr &= ~Memory::PAGE_MASK;
paddr &= ~Memory::PAGE_MASK;
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
if (PageSlot(paddr + offset) != static_cast<u64>(PageStatus::Allocated)) {
return MapBufferEx(vaddr, size);
}
}
for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) {
PageSlot(paddr + offset) = vaddr + offset;
}
return paddr;
}
boost::optional<PAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
PAddr paddr{};
u64 free_space{};
align = (align + Memory::PAGE_MASK) & ~Memory::PAGE_MASK;
while (paddr + free_space < MAX_ADDRESS) {
if (!IsPageMapped(paddr + free_space)) {
free_space += Memory::PAGE_SIZE;
if (free_space >= size) {
return paddr;
}
} else {
paddr += free_space + Memory::PAGE_SIZE;
free_space = 0;
const u64 remainder{paddr % align};
if (!remainder) {
paddr = (paddr - remainder) + align;
}
}
}
return {};
}
VAddr MemoryManager::PhysicalToVirtualAddress(PAddr paddr) {
VAddr base_addr = PageSlot(paddr);
ASSERT(base_addr != static_cast<u64>(PageStatus::Unmapped));
return base_addr + (paddr & Memory::PAGE_MASK);
}
bool MemoryManager::IsPageMapped(PAddr paddr) {
return PageSlot(paddr) != static_cast<u64>(PageStatus::Unmapped);
}
VAddr& MemoryManager::PageSlot(PAddr paddr) {
auto& block = page_table[(paddr >> (Memory::PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK];
if (!block) {
block = std::make_unique<PageBlock>();
for (unsigned index = 0; index < PAGE_BLOCK_SIZE; index++) {
(*block)[index] = static_cast<u64>(PageStatus::Unmapped);
}
}
return (*block)[(paddr >> Memory::PAGE_BITS) & PAGE_BLOCK_MASK];
}
} // namespace Nvidia
} // namespace Service

View file

@ -1,48 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <memory>
#include "common/common_types.h"
#include "core/memory.h"
namespace Service {
namespace Nvidia {
class MemoryManager final {
public:
MemoryManager() = default;
PAddr AllocateSpace(u64 size, u64 align);
PAddr AllocateSpace(PAddr paddr, u64 size, u64 align);
PAddr MapBufferEx(VAddr vaddr, u64 size);
PAddr MapBufferEx(VAddr vaddr, PAddr paddr, u64 size);
VAddr PhysicalToVirtualAddress(PAddr paddr);
private:
boost::optional<PAddr> FindFreeBlock(u64 size, u64 align = 1);
bool IsPageMapped(PAddr paddr);
VAddr& PageSlot(PAddr paddr);
enum class PageStatus : u64 {
Unmapped = 0xFFFFFFFFFFFFFFFFULL,
Allocated = 0xFFFFFFFFFFFFFFFEULL,
};
static constexpr u64 MAX_ADDRESS{0x10000000000ULL};
static constexpr u64 PAGE_TABLE_BITS{14};
static constexpr u64 PAGE_TABLE_SIZE{1 << PAGE_TABLE_BITS};
static constexpr u64 PAGE_TABLE_MASK{PAGE_TABLE_SIZE - 1};
static constexpr u64 PAGE_BLOCK_BITS{14};
static constexpr u64 PAGE_BLOCK_SIZE{1 << PAGE_BLOCK_BITS};
static constexpr u64 PAGE_BLOCK_MASK{PAGE_BLOCK_SIZE - 1};
using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>;
std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{};
};
} // namespace Nvidia
} // namespace Service

View file

@ -32,11 +32,11 @@ void InstallInterfaces(SM::ServiceManager& service_manager) {
Module::Module() {
auto nvmap_dev = std::make_shared<Devices::nvmap>();
devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(nvmap_dev);
devices["/dev/nvhost-gpu"] = std::make_shared<Devices::nvhost_gpu>(nvmap_dev);
devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>();
devices["/dev/nvmap"] = nvmap_dev;
devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(nvmap_dev);
devices["/dev/nvhost-ctrl"] = std::make_shared<Devices::nvhost_ctrl>();
devices["/dev/nvhost-gpu"] = std::make_shared<Devices::nvhost_gpu>();
}
u32 Module::Open(std::string device_name) {