mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-05-30 07:13:18 +00:00
video_core: Bringup some basic functionality (#145)
* video_core: Remove hack in rasterizer * The hack was to skip the first draw as the display buffer had not been created yet and the texture cache couldn't create one itself. With this patch it now can, using the color buffer parameters from registers * shader_recompiler: Implement attribute loads/stores * video_core: Add basic vertex, index buffer handling and pipeline caching * externals: Make xxhash lowercase
This commit is contained in:
parent
e9f64bb76c
commit
3c90b8ac00
50 changed files with 1030 additions and 383 deletions
|
@ -7,6 +7,7 @@
|
|||
#include "common/scope_exit.h"
|
||||
#include "core/libraries/error_codes.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
|
@ -61,6 +62,10 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
|||
new_vma.prot = prot;
|
||||
new_vma.name = name;
|
||||
new_vma.type = type;
|
||||
|
||||
if (type == VMAType::Direct) {
|
||||
MapVulkanMemory(mapped_addr, size);
|
||||
}
|
||||
};
|
||||
|
||||
// When virtual addr is zero let the address space manager pick the address.
|
||||
|
@ -103,6 +108,10 @@ void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
|||
ASSERT_MSG(it != vma_map.end() && it->first == virtual_addr,
|
||||
"Attempting to unmap partially mapped range");
|
||||
|
||||
if (it->second.type == VMAType::Direct) {
|
||||
UnmapVulkanMemory(virtual_addr, size);
|
||||
}
|
||||
|
||||
// Mark region as free and attempt to coalesce it with neighbours.
|
||||
auto& vma = it->second;
|
||||
vma.type = VMAType::Free;
|
||||
|
@ -114,6 +123,13 @@ void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
|||
impl.Unmap(virtual_addr, size);
|
||||
}
|
||||
|
||||
std::pair<vk::Buffer, size_t> MemoryManager::GetVulkanBuffer(VAddr addr) {
|
||||
auto it = mapped_memories.upper_bound(addr);
|
||||
it = std::prev(it);
|
||||
ASSERT(it != mapped_memories.end() && it->first <= addr);
|
||||
return std::make_pair(*it->second.buffer, addr - it->first);
|
||||
}
|
||||
|
||||
VirtualMemoryArea& MemoryManager::AddMapping(VAddr virtual_addr, size_t size) {
|
||||
auto vma_handle = FindVMA(virtual_addr);
|
||||
ASSERT_MSG(vma_handle != vma_map.end(), "Virtual address not in vm_map");
|
||||
|
@ -171,4 +187,81 @@ MemoryManager::VMAHandle MemoryManager::MergeAdjacent(VMAHandle iter) {
|
|||
return iter;
|
||||
}
|
||||
|
||||
void MemoryManager::MapVulkanMemory(VAddr addr, size_t size) {
|
||||
const vk::Device device = instance->GetDevice();
|
||||
const auto memory_props = instance->GetPhysicalDevice().getMemoryProperties();
|
||||
void* host_pointer = reinterpret_cast<void*>(addr);
|
||||
const auto host_mem_props = device.getMemoryHostPointerPropertiesEXT(
|
||||
vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT, host_pointer);
|
||||
ASSERT(host_mem_props.memoryTypeBits != 0);
|
||||
|
||||
int mapped_memory_type = -1;
|
||||
auto find_mem_type_with_flag = [&](const vk::MemoryPropertyFlags flags) {
|
||||
u32 host_mem_types = host_mem_props.memoryTypeBits;
|
||||
while (host_mem_types != 0) {
|
||||
// Try to find a cached memory type
|
||||
mapped_memory_type = std::countr_zero(host_mem_types);
|
||||
host_mem_types -= (1 << mapped_memory_type);
|
||||
|
||||
if ((memory_props.memoryTypes[mapped_memory_type].propertyFlags & flags) == flags) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mapped_memory_type = -1;
|
||||
};
|
||||
|
||||
// First try to find a memory that is both coherent and cached
|
||||
find_mem_type_with_flag(vk::MemoryPropertyFlagBits::eHostCoherent |
|
||||
vk::MemoryPropertyFlagBits::eHostCached);
|
||||
if (mapped_memory_type == -1)
|
||||
// Then only coherent (lower performance)
|
||||
find_mem_type_with_flag(vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||
|
||||
if (mapped_memory_type == -1) {
|
||||
LOG_CRITICAL(Render_Vulkan, "No coherent memory available for memory mapping");
|
||||
mapped_memory_type = std::countr_zero(host_mem_props.memoryTypeBits);
|
||||
}
|
||||
|
||||
const vk::StructureChain alloc_info = {
|
||||
vk::MemoryAllocateInfo{
|
||||
.allocationSize = size,
|
||||
.memoryTypeIndex = static_cast<uint32_t>(mapped_memory_type),
|
||||
},
|
||||
vk::ImportMemoryHostPointerInfoEXT{
|
||||
.handleType = vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT,
|
||||
.pHostPointer = host_pointer,
|
||||
},
|
||||
};
|
||||
|
||||
const auto [it, new_memory] = mapped_memories.try_emplace(addr);
|
||||
ASSERT_MSG(new_memory, "Attempting to remap already mapped vulkan memory");
|
||||
|
||||
auto& memory = it->second;
|
||||
memory.backing = device.allocateMemoryUnique(alloc_info.get());
|
||||
|
||||
constexpr vk::BufferUsageFlags MapFlags =
|
||||
vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eVertexBuffer |
|
||||
vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
|
||||
vk::BufferUsageFlagBits::eUniformBuffer;
|
||||
|
||||
const vk::StructureChain buffer_info = {
|
||||
vk::BufferCreateInfo{
|
||||
.size = size,
|
||||
.usage = MapFlags,
|
||||
.sharingMode = vk::SharingMode::eExclusive,
|
||||
},
|
||||
vk::ExternalMemoryBufferCreateInfoKHR{
|
||||
.handleTypes = vk::ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT,
|
||||
}};
|
||||
memory.buffer = device.createBufferUnique(buffer_info.get());
|
||||
device.bindBufferMemory(*memory.buffer, *memory.backing, 0);
|
||||
}
|
||||
|
||||
void MemoryManager::UnmapVulkanMemory(VAddr addr, size_t size) {
|
||||
const auto it = mapped_memories.find(addr);
|
||||
ASSERT(it != mapped_memories.end() && it->second.buffer_size == size);
|
||||
mapped_memories.erase(it);
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
#include <boost/icl/split_interval_map.hpp>
|
||||
|
@ -10,6 +11,11 @@
|
|||
#include "common/singleton.h"
|
||||
#include "common/types.h"
|
||||
#include "core/address_space.h"
|
||||
#include "video_core/renderer_vulkan/vk_common.h"
|
||||
|
||||
namespace Vulkan {
|
||||
class Instance;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
|
@ -86,6 +92,10 @@ public:
|
|||
explicit MemoryManager();
|
||||
~MemoryManager();
|
||||
|
||||
void SetInstance(const Vulkan::Instance* instance_) {
|
||||
instance = instance_;
|
||||
}
|
||||
|
||||
PAddr Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
||||
int memory_type);
|
||||
|
||||
|
@ -97,11 +107,9 @@ public:
|
|||
|
||||
void UnmapMemory(VAddr virtual_addr, size_t size);
|
||||
|
||||
private:
|
||||
bool HasOverlap(VAddr addr, size_t size) const {
|
||||
return vma_map.find(addr) != vma_map.end();
|
||||
}
|
||||
std::pair<vk::Buffer, size_t> GetVulkanBuffer(VAddr addr);
|
||||
|
||||
private:
|
||||
VMAHandle FindVMA(VAddr target) {
|
||||
// Return first the VMA with base >= target.
|
||||
const auto it = vma_map.lower_bound(target);
|
||||
|
@ -117,10 +125,22 @@ private:
|
|||
|
||||
VMAHandle MergeAdjacent(VMAHandle iter);
|
||||
|
||||
void MapVulkanMemory(VAddr addr, size_t size);
|
||||
|
||||
void UnmapVulkanMemory(VAddr addr, size_t size);
|
||||
|
||||
private:
|
||||
AddressSpace impl;
|
||||
std::vector<DirectMemoryArea> allocations;
|
||||
VMAMap vma_map;
|
||||
|
||||
struct MappedMemory {
|
||||
vk::UniqueBuffer buffer;
|
||||
vk::UniqueDeviceMemory backing;
|
||||
size_t buffer_size;
|
||||
};
|
||||
std::map<VAddr, MappedMemory> mapped_memories;
|
||||
const Vulkan::Instance* instance{};
|
||||
};
|
||||
|
||||
using Memory = Common::Singleton<MemoryManager>;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue