mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-06-07 03:03:15 +00:00
video_core: Implement guest buffer manager (#373)
* video_core: Introduce buffer cache * video_core: Use multi level page table for caches * renderer_vulkan: Remove unused stream buffer * fix build * oops forgot optimize off
This commit is contained in:
parent
159be2c7f4
commit
381ba8c7a5
55 changed files with 2697 additions and 1039 deletions
227
src/video_core/buffer_cache/buffer.cpp
Normal file
227
src/video_core/buffer_cache/buffer.cpp
Normal file
|
@ -0,0 +1,227 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "video_core/buffer_cache/buffer.h"
|
||||
#include "video_core/renderer_vulkan/liverpool_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||
#include "video_core/renderer_vulkan/vk_platform.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
|
||||
#include <vk_mem_alloc.h>
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
constexpr vk::BufferUsageFlags AllFlags =
|
||||
vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
|
||||
vk::BufferUsageFlagBits::eUniformTexelBuffer | vk::BufferUsageFlagBits::eStorageTexelBuffer |
|
||||
vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer |
|
||||
vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eVertexBuffer;
|
||||
|
||||
std::string_view BufferTypeName(MemoryUsage type) {
|
||||
switch (type) {
|
||||
case MemoryUsage::Upload:
|
||||
return "Upload";
|
||||
case MemoryUsage::Download:
|
||||
return "Download";
|
||||
case MemoryUsage::Stream:
|
||||
return "Stream";
|
||||
case MemoryUsage::DeviceLocal:
|
||||
return "DeviceLocal";
|
||||
default:
|
||||
return "Invalid";
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePreferredVmaFlags(MemoryUsage usage) {
|
||||
return usage != MemoryUsage::DeviceLocal ? VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
||||
: VkMemoryPropertyFlagBits{};
|
||||
}
|
||||
|
||||
[[nodiscard]] VmaAllocationCreateFlags MemoryUsageVmaFlags(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::Upload:
|
||||
case MemoryUsage::Stream:
|
||||
return VMA_ALLOCATION_CREATE_MAPPED_BIT |
|
||||
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
case MemoryUsage::Download:
|
||||
return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
case MemoryUsage::DeviceLocal:
|
||||
return {};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] VmaMemoryUsage MemoryUsageVma(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::DeviceLocal:
|
||||
case MemoryUsage::Stream:
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
|
||||
case MemoryUsage::Upload:
|
||||
case MemoryUsage::Download:
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
|
||||
}
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
|
||||
}
|
||||
|
||||
UniqueBuffer::UniqueBuffer(vk::Device device_, VmaAllocator allocator_)
|
||||
: device{device_}, allocator{allocator_} {}
|
||||
|
||||
UniqueBuffer::~UniqueBuffer() {
|
||||
if (buffer) {
|
||||
vmaDestroyBuffer(allocator, buffer, allocation);
|
||||
}
|
||||
}
|
||||
|
||||
void UniqueBuffer::Create(const vk::BufferCreateInfo& buffer_ci, MemoryUsage usage,
|
||||
VmaAllocationInfo* out_alloc_info) {
|
||||
const VmaAllocationCreateInfo alloc_ci = {
|
||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage),
|
||||
.usage = MemoryUsageVma(usage),
|
||||
.requiredFlags = 0,
|
||||
.preferredFlags = MemoryUsagePreferredVmaFlags(usage),
|
||||
.pool = VK_NULL_HANDLE,
|
||||
.pUserData = nullptr,
|
||||
};
|
||||
|
||||
const VkBufferCreateInfo buffer_ci_unsafe = static_cast<VkBufferCreateInfo>(buffer_ci);
|
||||
VkBuffer unsafe_buffer{};
|
||||
VkResult result = vmaCreateBuffer(allocator, &buffer_ci_unsafe, &alloc_ci, &unsafe_buffer,
|
||||
&allocation, out_alloc_info);
|
||||
ASSERT_MSG(result == VK_SUCCESS, "Failed allocating buffer with error {}",
|
||||
vk::to_string(vk::Result{result}));
|
||||
buffer = vk::Buffer{unsafe_buffer};
|
||||
}
|
||||
|
||||
Buffer::Buffer(const Vulkan::Instance& instance_, MemoryUsage usage_, VAddr cpu_addr_,
|
||||
u64 size_bytes_)
|
||||
: cpu_addr{cpu_addr_}, size_bytes{size_bytes_}, instance{&instance_}, usage{usage_},
|
||||
buffer{instance->GetDevice(), instance->GetAllocator()} {
|
||||
// Create buffer object.
|
||||
const vk::BufferCreateInfo buffer_ci = {
|
||||
.size = size_bytes,
|
||||
.usage = AllFlags,
|
||||
};
|
||||
VmaAllocationInfo alloc_info{};
|
||||
buffer.Create(buffer_ci, usage, &alloc_info);
|
||||
|
||||
if (instance->HasDebuggingToolAttached()) {
|
||||
const auto device = instance->GetDevice();
|
||||
Vulkan::SetObjectName(device, Handle(), "Buffer {:#x} {} KiB", cpu_addr, size_bytes / 1024);
|
||||
}
|
||||
|
||||
// Map it if it is host visible.
|
||||
VkMemoryPropertyFlags property_flags{};
|
||||
vmaGetAllocationMemoryProperties(instance->GetAllocator(), buffer.allocation, &property_flags);
|
||||
if (alloc_info.pMappedData) {
|
||||
mapped_data = std::span<u8>{std::bit_cast<u8*>(alloc_info.pMappedData), size_bytes};
|
||||
}
|
||||
is_coherent = property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
}
|
||||
|
||||
vk::BufferView Buffer::View(u32 offset, u32 size, AmdGpu::DataFormat dfmt,
|
||||
AmdGpu::NumberFormat nfmt) {
|
||||
const auto it{std::ranges::find_if(views, [offset, size, dfmt, nfmt](const BufferView& view) {
|
||||
return offset == view.offset && size == view.size && dfmt == view.dfmt && nfmt == view.nfmt;
|
||||
})};
|
||||
if (it != views.end()) {
|
||||
return it->handle;
|
||||
}
|
||||
views.push_back({
|
||||
.offset = offset,
|
||||
.size = size,
|
||||
.dfmt = dfmt,
|
||||
.nfmt = nfmt,
|
||||
.handle = instance->GetDevice().createBufferView({
|
||||
.buffer = buffer.buffer,
|
||||
.format = Vulkan::LiverpoolToVK::SurfaceFormat(dfmt, nfmt),
|
||||
.offset = offset,
|
||||
.range = size,
|
||||
}),
|
||||
});
|
||||
return views.back().handle;
|
||||
}
|
||||
|
||||
constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
|
||||
constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
|
||||
|
||||
StreamBuffer::StreamBuffer(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler_,
|
||||
MemoryUsage usage, u64 size_bytes)
|
||||
: Buffer{instance, usage, 0, size_bytes}, scheduler{scheduler_} {
|
||||
ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
|
||||
ReserveWatches(previous_watches, WATCHES_INITIAL_RESERVE);
|
||||
const auto device = instance.GetDevice();
|
||||
if (instance.HasDebuggingToolAttached()) {
|
||||
Vulkan::SetObjectName(device, Handle(), "StreamBuffer({}): {} KiB", BufferTypeName(usage),
|
||||
size_bytes / 1024);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<u8*, u64> StreamBuffer::Map(u64 size, u64 alignment) {
|
||||
if (!is_coherent && usage == MemoryUsage::Stream) {
|
||||
size = Common::AlignUp(size, instance->NonCoherentAtomSize());
|
||||
}
|
||||
|
||||
ASSERT(size <= this->size_bytes);
|
||||
mapped_size = size;
|
||||
|
||||
if (alignment > 0) {
|
||||
offset = Common::AlignUp(offset, alignment);
|
||||
}
|
||||
|
||||
if (offset + size > this->size_bytes) {
|
||||
// The buffer would overflow, save the amount of used watches and reset the state.
|
||||
invalidation_mark = current_watch_cursor;
|
||||
current_watch_cursor = 0;
|
||||
offset = 0;
|
||||
|
||||
// Swap watches and reset waiting cursors.
|
||||
std::swap(previous_watches, current_watches);
|
||||
wait_cursor = 0;
|
||||
wait_bound = 0;
|
||||
}
|
||||
|
||||
const u64 mapped_upper_bound = offset + size;
|
||||
WaitPendingOperations(mapped_upper_bound);
|
||||
return std::make_pair(mapped_data.data() + offset, offset);
|
||||
}
|
||||
|
||||
void StreamBuffer::Commit() {
|
||||
if (!is_coherent) {
|
||||
if (usage == MemoryUsage::Download) {
|
||||
vmaInvalidateAllocation(instance->GetAllocator(), buffer.allocation, offset,
|
||||
mapped_size);
|
||||
} else {
|
||||
vmaFlushAllocation(instance->GetAllocator(), buffer.allocation, offset, mapped_size);
|
||||
}
|
||||
}
|
||||
|
||||
offset += mapped_size;
|
||||
if (current_watch_cursor + 1 >= current_watches.size()) {
|
||||
// Ensure that there are enough watches.
|
||||
ReserveWatches(current_watches, WATCHES_RESERVE_CHUNK);
|
||||
}
|
||||
|
||||
auto& watch = current_watches[current_watch_cursor++];
|
||||
watch.upper_bound = offset;
|
||||
watch.tick = scheduler.CurrentTick();
|
||||
}
|
||||
|
||||
void StreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) {
|
||||
watches.resize(watches.size() + grow_size);
|
||||
}
|
||||
|
||||
void StreamBuffer::WaitPendingOperations(u64 requested_upper_bound) {
|
||||
if (!invalidation_mark) {
|
||||
return;
|
||||
}
|
||||
while (requested_upper_bound > wait_bound && wait_cursor < *invalidation_mark) {
|
||||
auto& watch = previous_watches[wait_cursor];
|
||||
wait_bound = watch.upper_bound;
|
||||
scheduler.Wait(watch.tick);
|
||||
++wait_cursor;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace VideoCore
|
173
src/video_core/buffer_cache/buffer.h
Normal file
173
src/video_core/buffer_cache/buffer.h
Normal file
|
@ -0,0 +1,173 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/types.h"
|
||||
#include "video_core/amdgpu/resource.h"
|
||||
#include "video_core/renderer_vulkan/vk_common.h"
|
||||
|
||||
namespace Vulkan {
|
||||
class Instance;
|
||||
class Scheduler;
|
||||
} // namespace Vulkan
|
||||
|
||||
VK_DEFINE_HANDLE(VmaAllocation)
|
||||
VK_DEFINE_HANDLE(VmaAllocator)
|
||||
|
||||
struct VmaAllocationInfo;
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
/// Hints and requirements for the backing memory type of a commit
|
||||
enum class MemoryUsage {
|
||||
DeviceLocal, ///< Requests device local buffer.
|
||||
Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads
|
||||
Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks
|
||||
Stream, ///< Requests device local host visible buffer, falling back host memory.
|
||||
};
|
||||
|
||||
struct UniqueBuffer {
|
||||
explicit UniqueBuffer(vk::Device device, VmaAllocator allocator);
|
||||
~UniqueBuffer();
|
||||
|
||||
UniqueBuffer(const UniqueBuffer&) = delete;
|
||||
UniqueBuffer& operator=(const UniqueBuffer&) = delete;
|
||||
|
||||
UniqueBuffer(UniqueBuffer&& other)
|
||||
: buffer{std::exchange(other.buffer, VK_NULL_HANDLE)},
|
||||
allocator{std::exchange(other.allocator, VK_NULL_HANDLE)},
|
||||
allocation{std::exchange(other.allocation, VK_NULL_HANDLE)} {}
|
||||
UniqueBuffer& operator=(UniqueBuffer&& other) {
|
||||
buffer = std::exchange(other.buffer, VK_NULL_HANDLE);
|
||||
allocator = std::exchange(other.allocator, VK_NULL_HANDLE);
|
||||
allocation = std::exchange(other.allocation, VK_NULL_HANDLE);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Create(const vk::BufferCreateInfo& image_ci, MemoryUsage usage,
|
||||
VmaAllocationInfo* out_alloc_info);
|
||||
|
||||
operator vk::Buffer() const {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
vk::Device device;
|
||||
VmaAllocator allocator;
|
||||
VmaAllocation allocation;
|
||||
vk::Buffer buffer{};
|
||||
};
|
||||
|
||||
class Buffer {
|
||||
public:
|
||||
explicit Buffer(const Vulkan::Instance& instance, MemoryUsage usage, VAddr cpu_addr_,
|
||||
u64 size_bytes_);
|
||||
|
||||
Buffer& operator=(const Buffer&) = delete;
|
||||
Buffer(const Buffer&) = delete;
|
||||
|
||||
Buffer& operator=(Buffer&&) = default;
|
||||
Buffer(Buffer&&) = default;
|
||||
|
||||
vk::BufferView View(u32 offset, u32 size, AmdGpu::DataFormat dfmt, AmdGpu::NumberFormat nfmt);
|
||||
|
||||
/// Increases the likeliness of this being a stream buffer
|
||||
void IncreaseStreamScore(int score) noexcept {
|
||||
stream_score += score;
|
||||
}
|
||||
|
||||
/// Returns the likeliness of this being a stream buffer
|
||||
[[nodiscard]] int StreamScore() const noexcept {
|
||||
return stream_score;
|
||||
}
|
||||
|
||||
/// Returns true when vaddr -> vaddr+size is fully contained in the buffer
|
||||
[[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept {
|
||||
return addr >= cpu_addr && addr + size <= cpu_addr + SizeBytes();
|
||||
}
|
||||
|
||||
/// Returns the base CPU address of the buffer
|
||||
[[nodiscard]] VAddr CpuAddr() const noexcept {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
/// Returns the offset relative to the given CPU address
|
||||
[[nodiscard]] u32 Offset(VAddr other_cpu_addr) const noexcept {
|
||||
return static_cast<u32>(other_cpu_addr - cpu_addr);
|
||||
}
|
||||
|
||||
size_t SizeBytes() const {
|
||||
return size_bytes;
|
||||
}
|
||||
|
||||
vk::Buffer Handle() const noexcept {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public:
|
||||
VAddr cpu_addr = 0;
|
||||
bool is_picked{};
|
||||
bool is_coherent{};
|
||||
int stream_score = 0;
|
||||
size_t size_bytes = 0;
|
||||
std::span<u8> mapped_data;
|
||||
const Vulkan::Instance* instance{};
|
||||
MemoryUsage usage;
|
||||
UniqueBuffer buffer;
|
||||
struct BufferView {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
AmdGpu::DataFormat dfmt;
|
||||
AmdGpu::NumberFormat nfmt;
|
||||
vk::BufferView handle;
|
||||
};
|
||||
std::vector<BufferView> views;
|
||||
};
|
||||
|
||||
class StreamBuffer : public Buffer {
|
||||
public:
|
||||
explicit StreamBuffer(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler,
|
||||
MemoryUsage usage, u64 size_bytes_);
|
||||
|
||||
/// Reserves a region of memory from the stream buffer.
|
||||
std::pair<u8*, u64> Map(u64 size, u64 alignment = 0);
|
||||
|
||||
/// Ensures that reserved bytes of memory are available to the GPU.
|
||||
void Commit();
|
||||
|
||||
/// Maps and commits a memory region with user provided data
|
||||
u64 Copy(VAddr src, size_t size, size_t alignment = 0) {
|
||||
const auto [data, offset] = Map(size, alignment);
|
||||
std::memcpy(data, reinterpret_cast<const void*>(src), size);
|
||||
Commit();
|
||||
return offset;
|
||||
}
|
||||
|
||||
private:
|
||||
struct Watch {
|
||||
u64 tick{};
|
||||
u64 upper_bound{};
|
||||
};
|
||||
|
||||
/// Increases the amount of watches available.
|
||||
void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size);
|
||||
|
||||
/// Waits pending watches until requested upper bound.
|
||||
void WaitPendingOperations(u64 requested_upper_bound);
|
||||
|
||||
private:
|
||||
Vulkan::Scheduler& scheduler;
|
||||
u64 offset{};
|
||||
u64 mapped_size{};
|
||||
std::vector<Watch> current_watches;
|
||||
std::size_t current_watch_cursor{};
|
||||
std::optional<size_t> invalidation_mark;
|
||||
std::vector<Watch> previous_watches;
|
||||
std::size_t wait_cursor{};
|
||||
u64 wait_bound{};
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
497
src/video_core/buffer_cache/buffer_cache.cpp
Normal file
497
src/video_core/buffer_cache/buffer_cache.cpp
Normal file
|
@ -0,0 +1,497 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/alignment.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "shader_recompiler/runtime_info.h"
|
||||
#include "video_core/amdgpu/liverpool.h"
|
||||
#include "video_core/buffer_cache/buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/liverpool_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
static constexpr size_t StagingBufferSize = 256_MB;
|
||||
static constexpr size_t UboStreamBufferSize = 64_MB;
|
||||
|
||||
BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_,
|
||||
const AmdGpu::Liverpool* liverpool_, PageManager& tracker_)
|
||||
: instance{instance_}, scheduler{scheduler_}, liverpool{liverpool_}, tracker{tracker_},
|
||||
staging_buffer{instance, scheduler, MemoryUsage::Upload, StagingBufferSize},
|
||||
stream_buffer{instance, scheduler, MemoryUsage::Stream, UboStreamBufferSize},
|
||||
memory_tracker{&tracker} {
|
||||
// Ensure the first slot is used for the null buffer
|
||||
void(slot_buffers.insert(instance, MemoryUsage::DeviceLocal, 0, 1));
|
||||
}
|
||||
|
||||
BufferCache::~BufferCache() = default;
|
||||
|
||||
void BufferCache::InvalidateMemory(VAddr device_addr, u64 size) {
|
||||
std::scoped_lock lk{mutex};
|
||||
const bool is_tracked = IsRegionRegistered(device_addr, size);
|
||||
if (!is_tracked) {
|
||||
return;
|
||||
}
|
||||
// Mark the page as CPU modified to stop tracking writes.
|
||||
SCOPE_EXIT {
|
||||
memory_tracker.MarkRegionAsCpuModified(device_addr, size);
|
||||
};
|
||||
if (!memory_tracker.IsRegionGpuModified(device_addr, size)) {
|
||||
// Page has not been modified by the GPU, nothing to do.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void BufferCache::DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 size) {
|
||||
boost::container::small_vector<vk::BufferCopy, 1> copies;
|
||||
u64 total_size_bytes = 0;
|
||||
u64 largest_copy = 0;
|
||||
memory_tracker.ForEachDownloadRange<true>(
|
||||
device_addr, size, [&](u64 device_addr_out, u64 range_size) {
|
||||
const VAddr buffer_addr = buffer.CpuAddr();
|
||||
const auto add_download = [&](VAddr start, VAddr end, u64) {
|
||||
const u64 new_offset = start - buffer_addr;
|
||||
const u64 new_size = end - start;
|
||||
copies.push_back(vk::BufferCopy{
|
||||
.srcOffset = new_offset,
|
||||
.dstOffset = total_size_bytes,
|
||||
.size = new_size,
|
||||
});
|
||||
// Align up to avoid cache conflicts
|
||||
constexpr u64 align = 64ULL;
|
||||
constexpr u64 mask = ~(align - 1ULL);
|
||||
total_size_bytes += (new_size + align - 1) & mask;
|
||||
largest_copy = std::max(largest_copy, new_size);
|
||||
};
|
||||
});
|
||||
if (total_size_bytes == 0) {
|
||||
return;
|
||||
}
|
||||
const auto [staging, offset] = staging_buffer.Map(total_size_bytes);
|
||||
for (auto& copy : copies) {
|
||||
// Modify copies to have the staging offset in mind
|
||||
copy.dstOffset += offset;
|
||||
}
|
||||
staging_buffer.Commit();
|
||||
scheduler.EndRendering();
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
cmdbuf.copyBuffer(buffer.buffer, staging_buffer.Handle(), copies);
|
||||
scheduler.Finish();
|
||||
for (const auto& copy : copies) {
|
||||
const VAddr copy_device_addr = buffer.CpuAddr() + copy.srcOffset;
|
||||
const u64 dst_offset = copy.dstOffset - offset;
|
||||
std::memcpy(std::bit_cast<u8*>(copy_device_addr), staging + dst_offset, copy.size);
|
||||
}
|
||||
}
|
||||
|
||||
bool BufferCache::BindVertexBuffers(const Shader::Info& vs_info) {
|
||||
if (vs_info.vs_inputs.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::array<vk::Buffer, NUM_VERTEX_BUFFERS> host_buffers;
|
||||
std::array<vk::DeviceSize, NUM_VERTEX_BUFFERS> host_offsets;
|
||||
boost::container::static_vector<AmdGpu::Buffer, NUM_VERTEX_BUFFERS> guest_buffers;
|
||||
|
||||
struct BufferRange {
|
||||
VAddr base_address;
|
||||
VAddr end_address;
|
||||
vk::Buffer vk_buffer;
|
||||
u64 offset;
|
||||
|
||||
size_t GetSize() const {
|
||||
return end_address - base_address;
|
||||
}
|
||||
};
|
||||
|
||||
// Calculate buffers memory overlaps
|
||||
bool has_step_rate = false;
|
||||
boost::container::static_vector<BufferRange, NUM_VERTEX_BUFFERS> ranges{};
|
||||
for (const auto& input : vs_info.vs_inputs) {
|
||||
if (input.instance_step_rate == Shader::Info::VsInput::InstanceIdType::OverStepRate0 ||
|
||||
input.instance_step_rate == Shader::Info::VsInput::InstanceIdType::OverStepRate1) {
|
||||
has_step_rate = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto& buffer = vs_info.ReadUd<AmdGpu::Buffer>(input.sgpr_base, input.dword_offset);
|
||||
if (buffer.GetSize() == 0) {
|
||||
continue;
|
||||
}
|
||||
guest_buffers.emplace_back(buffer);
|
||||
ranges.emplace_back(buffer.base_address, buffer.base_address + buffer.GetSize());
|
||||
}
|
||||
|
||||
std::ranges::sort(ranges, [](const BufferRange& lhv, const BufferRange& rhv) {
|
||||
return lhv.base_address < rhv.base_address;
|
||||
});
|
||||
|
||||
boost::container::static_vector<BufferRange, NUM_VERTEX_BUFFERS> ranges_merged{ranges[0]};
|
||||
for (auto range : ranges) {
|
||||
auto& prev_range = ranges_merged.back();
|
||||
if (prev_range.end_address < range.base_address) {
|
||||
ranges_merged.emplace_back(range);
|
||||
} else {
|
||||
prev_range.end_address = std::max(prev_range.end_address, range.end_address);
|
||||
}
|
||||
}
|
||||
|
||||
// Map buffers
|
||||
for (auto& range : ranges_merged) {
|
||||
const auto [buffer, offset] = ObtainBuffer(range.base_address, range.GetSize(), false);
|
||||
range.vk_buffer = buffer->buffer;
|
||||
range.offset = offset;
|
||||
}
|
||||
|
||||
// Bind vertex buffers
|
||||
const size_t num_buffers = guest_buffers.size();
|
||||
for (u32 i = 0; i < num_buffers; ++i) {
|
||||
const auto& buffer = guest_buffers[i];
|
||||
const auto host_buffer = std::ranges::find_if(ranges_merged, [&](const BufferRange& range) {
|
||||
return (buffer.base_address >= range.base_address &&
|
||||
buffer.base_address < range.end_address);
|
||||
});
|
||||
ASSERT(host_buffer != ranges_merged.cend());
|
||||
|
||||
host_buffers[i] = host_buffer->vk_buffer;
|
||||
host_offsets[i] = host_buffer->offset + buffer.base_address - host_buffer->base_address;
|
||||
}
|
||||
|
||||
if (num_buffers > 0) {
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
cmdbuf.bindVertexBuffers(0, num_buffers, host_buffers.data(), host_offsets.data());
|
||||
}
|
||||
|
||||
return has_step_rate;
|
||||
}
|
||||
|
||||
u32 BufferCache::BindIndexBuffer(bool& is_indexed, u32 index_offset) {
|
||||
// Emulate QuadList primitive type with CPU made index buffer.
|
||||
const auto& regs = liverpool->regs;
|
||||
if (regs.primitive_type == AmdGpu::Liverpool::PrimitiveType::QuadList) {
|
||||
is_indexed = true;
|
||||
|
||||
// Emit indices.
|
||||
const u32 index_size = 3 * regs.num_indices;
|
||||
const auto [data, offset] = stream_buffer.Map(index_size);
|
||||
Vulkan::LiverpoolToVK::EmitQuadToTriangleListIndices(data, regs.num_indices);
|
||||
stream_buffer.Commit();
|
||||
|
||||
// Bind index buffer.
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
cmdbuf.bindIndexBuffer(stream_buffer.Handle(), offset, vk::IndexType::eUint16);
|
||||
return index_size / sizeof(u16);
|
||||
}
|
||||
if (!is_indexed) {
|
||||
return regs.num_indices;
|
||||
}
|
||||
|
||||
// Figure out index type and size.
|
||||
const bool is_index16 =
|
||||
regs.index_buffer_type.index_type == AmdGpu::Liverpool::IndexType::Index16;
|
||||
const vk::IndexType index_type = is_index16 ? vk::IndexType::eUint16 : vk::IndexType::eUint32;
|
||||
const u32 index_size = is_index16 ? sizeof(u16) : sizeof(u32);
|
||||
VAddr index_address = regs.index_base_address.Address<VAddr>();
|
||||
index_address += index_offset * index_size;
|
||||
|
||||
// Bind index buffer.
|
||||
const u32 index_buffer_size = regs.num_indices * index_size;
|
||||
const auto [vk_buffer, offset] = ObtainBuffer(index_address, index_buffer_size, false);
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
cmdbuf.bindIndexBuffer(vk_buffer->Handle(), offset, index_type);
|
||||
return regs.num_indices;
|
||||
}
|
||||
|
||||
std::pair<Buffer*, u32> BufferCache::ObtainBuffer(VAddr device_addr, u32 size, bool is_written) {
|
||||
std::scoped_lock lk{mutex};
|
||||
static constexpr u64 StreamThreshold = CACHING_PAGESIZE;
|
||||
const bool is_gpu_dirty = memory_tracker.IsRegionGpuModified(device_addr, size);
|
||||
if (!is_written && size < StreamThreshold && !is_gpu_dirty) {
|
||||
// For small uniform buffers that have not been modified by gpu
|
||||
// use device local stream buffer to reduce renderpass breaks.
|
||||
const u64 offset = stream_buffer.Copy(device_addr, size, instance.UniformMinAlignment());
|
||||
return {&stream_buffer, offset};
|
||||
}
|
||||
|
||||
const BufferId buffer_id = FindBuffer(device_addr, size);
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
SynchronizeBuffer(buffer, device_addr, size);
|
||||
if (is_written) {
|
||||
memory_tracker.MarkRegionAsGpuModified(device_addr, size);
|
||||
}
|
||||
return {&buffer, buffer.Offset(device_addr)};
|
||||
}
|
||||
|
||||
bool BufferCache::IsRegionRegistered(VAddr addr, size_t size) {
|
||||
const VAddr end_addr = addr + size;
|
||||
const u64 page_end = Common::DivCeil(end_addr, CACHING_PAGESIZE);
|
||||
for (u64 page = addr >> CACHING_PAGEBITS; page < page_end;) {
|
||||
const BufferId buffer_id = page_table[page];
|
||||
if (!buffer_id) {
|
||||
++page;
|
||||
continue;
|
||||
}
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
const VAddr buf_start_addr = buffer.CpuAddr();
|
||||
const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes();
|
||||
if (buf_start_addr < end_addr && addr < buf_end_addr) {
|
||||
return true;
|
||||
}
|
||||
page = Common::DivCeil(end_addr, CACHING_PAGESIZE);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool BufferCache::IsRegionCpuModified(VAddr addr, size_t size) {
|
||||
return memory_tracker.IsRegionCpuModified(addr, size);
|
||||
}
|
||||
|
||||
BufferId BufferCache::FindBuffer(VAddr device_addr, u32 size) {
|
||||
if (device_addr == 0) {
|
||||
return NULL_BUFFER_ID;
|
||||
}
|
||||
const u64 page = device_addr >> CACHING_PAGEBITS;
|
||||
const BufferId buffer_id = page_table[page];
|
||||
if (!buffer_id) {
|
||||
return CreateBuffer(device_addr, size);
|
||||
}
|
||||
const Buffer& buffer = slot_buffers[buffer_id];
|
||||
if (buffer.IsInBounds(device_addr, size)) {
|
||||
return buffer_id;
|
||||
}
|
||||
return CreateBuffer(device_addr, size);
|
||||
}
|
||||
|
||||
BufferCache::OverlapResult BufferCache::ResolveOverlaps(VAddr device_addr, u32 wanted_size) {
|
||||
static constexpr int STREAM_LEAP_THRESHOLD = 16;
|
||||
boost::container::small_vector<BufferId, 16> overlap_ids;
|
||||
VAddr begin = device_addr;
|
||||
VAddr end = device_addr + wanted_size;
|
||||
int stream_score = 0;
|
||||
bool has_stream_leap = false;
|
||||
const auto expand_begin = [&](VAddr add_value) {
|
||||
static constexpr VAddr min_page = CACHING_PAGESIZE + DEVICE_PAGESIZE;
|
||||
if (add_value > begin - min_page) {
|
||||
begin = min_page;
|
||||
device_addr = DEVICE_PAGESIZE;
|
||||
return;
|
||||
}
|
||||
begin -= add_value;
|
||||
device_addr = begin - CACHING_PAGESIZE;
|
||||
};
|
||||
const auto expand_end = [&](VAddr add_value) {
|
||||
static constexpr VAddr max_page = 1ULL << MemoryTracker::MAX_CPU_PAGE_BITS;
|
||||
if (add_value > max_page - end) {
|
||||
end = max_page;
|
||||
return;
|
||||
}
|
||||
end += add_value;
|
||||
};
|
||||
if (begin == 0) {
|
||||
return OverlapResult{
|
||||
.ids = std::move(overlap_ids),
|
||||
.begin = begin,
|
||||
.end = end,
|
||||
.has_stream_leap = has_stream_leap,
|
||||
};
|
||||
}
|
||||
for (; device_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE);
|
||||
device_addr += CACHING_PAGESIZE) {
|
||||
const BufferId overlap_id = page_table[device_addr >> CACHING_PAGEBITS];
|
||||
if (!overlap_id) {
|
||||
continue;
|
||||
}
|
||||
Buffer& overlap = slot_buffers[overlap_id];
|
||||
if (overlap.is_picked) {
|
||||
continue;
|
||||
}
|
||||
overlap_ids.push_back(overlap_id);
|
||||
overlap.is_picked = true;
|
||||
const VAddr overlap_device_addr = overlap.CpuAddr();
|
||||
const bool expands_left = overlap_device_addr < begin;
|
||||
if (expands_left) {
|
||||
begin = overlap_device_addr;
|
||||
}
|
||||
const VAddr overlap_end = overlap_device_addr + overlap.SizeBytes();
|
||||
const bool expands_right = overlap_end > end;
|
||||
if (overlap_end > end) {
|
||||
end = overlap_end;
|
||||
}
|
||||
stream_score += overlap.StreamScore();
|
||||
if (stream_score > STREAM_LEAP_THRESHOLD && !has_stream_leap) {
|
||||
// When this memory region has been joined a bunch of times, we assume it's being used
|
||||
// as a stream buffer. Increase the size to skip constantly recreating buffers.
|
||||
has_stream_leap = true;
|
||||
if (expands_right) {
|
||||
expand_begin(CACHING_PAGESIZE * 128);
|
||||
}
|
||||
if (expands_left) {
|
||||
expand_end(CACHING_PAGESIZE * 128);
|
||||
}
|
||||
}
|
||||
}
|
||||
return OverlapResult{
|
||||
.ids = std::move(overlap_ids),
|
||||
.begin = begin,
|
||||
.end = end,
|
||||
.has_stream_leap = has_stream_leap,
|
||||
};
|
||||
}
|
||||
|
||||
void BufferCache::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id,
|
||||
bool accumulate_stream_score) {
|
||||
Buffer& new_buffer = slot_buffers[new_buffer_id];
|
||||
Buffer& overlap = slot_buffers[overlap_id];
|
||||
if (accumulate_stream_score) {
|
||||
new_buffer.IncreaseStreamScore(overlap.StreamScore() + 1);
|
||||
}
|
||||
const size_t dst_base_offset = overlap.CpuAddr() - new_buffer.CpuAddr();
|
||||
const vk::BufferCopy copy = {
|
||||
.srcOffset = 0,
|
||||
.dstOffset = dst_base_offset,
|
||||
.size = overlap.SizeBytes(),
|
||||
};
|
||||
scheduler.EndRendering();
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
static constexpr vk::MemoryBarrier READ_BARRIER{
|
||||
.srcAccessMask = vk::AccessFlagBits::eMemoryWrite,
|
||||
.dstAccessMask = vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite,
|
||||
};
|
||||
static constexpr vk::MemoryBarrier WRITE_BARRIER{
|
||||
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
|
||||
.dstAccessMask = vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite,
|
||||
};
|
||||
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eAllCommands,
|
||||
vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlagBits::eByRegion,
|
||||
READ_BARRIER, {}, {});
|
||||
cmdbuf.copyBuffer(overlap.buffer, new_buffer.buffer, copy);
|
||||
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
|
||||
vk::PipelineStageFlagBits::eAllCommands,
|
||||
vk::DependencyFlagBits::eByRegion, WRITE_BARRIER, {}, {});
|
||||
DeleteBuffer(overlap_id, true);
|
||||
}
|
||||
|
||||
BufferId BufferCache::CreateBuffer(VAddr device_addr, u32 wanted_size) {
|
||||
const VAddr device_addr_end = Common::AlignUp(device_addr + wanted_size, CACHING_PAGESIZE);
|
||||
device_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE);
|
||||
wanted_size = static_cast<u32>(device_addr_end - device_addr);
|
||||
const OverlapResult overlap = ResolveOverlaps(device_addr, wanted_size);
|
||||
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
|
||||
const BufferId new_buffer_id =
|
||||
slot_buffers.insert(instance, MemoryUsage::DeviceLocal, overlap.begin, size);
|
||||
auto& new_buffer = slot_buffers[new_buffer_id];
|
||||
const size_t size_bytes = new_buffer.SizeBytes();
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
scheduler.EndRendering();
|
||||
cmdbuf.fillBuffer(new_buffer.buffer, 0, size_bytes, 0);
|
||||
for (const BufferId overlap_id : overlap.ids) {
|
||||
JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
|
||||
}
|
||||
Register(new_buffer_id);
|
||||
return new_buffer_id;
|
||||
}
|
||||
|
||||
void BufferCache::Register(BufferId buffer_id) {
|
||||
ChangeRegister<true>(buffer_id);
|
||||
}
|
||||
|
||||
void BufferCache::Unregister(BufferId buffer_id) {
|
||||
ChangeRegister<false>(buffer_id);
|
||||
}
|
||||
|
||||
template <bool insert>
|
||||
void BufferCache::ChangeRegister(BufferId buffer_id) {
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
const auto size = buffer.SizeBytes();
|
||||
const VAddr device_addr_begin = buffer.CpuAddr();
|
||||
const VAddr device_addr_end = device_addr_begin + size;
|
||||
const u64 page_begin = device_addr_begin / CACHING_PAGESIZE;
|
||||
const u64 page_end = Common::DivCeil(device_addr_end, CACHING_PAGESIZE);
|
||||
for (u64 page = page_begin; page != page_end; ++page) {
|
||||
if constexpr (insert) {
|
||||
page_table[page] = buffer_id;
|
||||
} else {
|
||||
page_table[page] = BufferId{};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool BufferCache::SynchronizeBuffer(Buffer& buffer, VAddr device_addr, u32 size) {
|
||||
boost::container::small_vector<vk::BufferCopy, 4> copies;
|
||||
u64 total_size_bytes = 0;
|
||||
u64 largest_copy = 0;
|
||||
VAddr buffer_start = buffer.CpuAddr();
|
||||
const auto add_copy = [&](VAddr device_addr_out, u64 range_size) {
|
||||
copies.push_back(vk::BufferCopy{
|
||||
.srcOffset = total_size_bytes,
|
||||
.dstOffset = device_addr_out - buffer_start,
|
||||
.size = range_size,
|
||||
});
|
||||
total_size_bytes += range_size;
|
||||
largest_copy = std::max(largest_copy, range_size);
|
||||
};
|
||||
memory_tracker.ForEachUploadRange(device_addr, size, [&](u64 device_addr_out, u64 range_size) {
|
||||
add_copy(device_addr_out, range_size);
|
||||
// Prevent uploading to gpu modified regions.
|
||||
// gpu_modified_ranges.ForEachNotInRange(device_addr_out, range_size, add_copy);
|
||||
});
|
||||
if (total_size_bytes == 0) {
|
||||
return true;
|
||||
}
|
||||
vk::Buffer src_buffer = staging_buffer.Handle();
|
||||
if (total_size_bytes < StagingBufferSize) {
|
||||
const auto [staging, offset] = staging_buffer.Map(total_size_bytes);
|
||||
for (auto& copy : copies) {
|
||||
u8* const src_pointer = staging + copy.srcOffset;
|
||||
const VAddr device_addr = buffer.CpuAddr() + copy.dstOffset;
|
||||
std::memcpy(src_pointer, std::bit_cast<const u8*>(device_addr), copy.size);
|
||||
// Apply the staging offset
|
||||
copy.srcOffset += offset;
|
||||
}
|
||||
staging_buffer.Commit();
|
||||
} else {
|
||||
// For large one time transfers use a temporary host buffer.
|
||||
// RenderDoc can lag quite a bit if the stream buffer is too large.
|
||||
Buffer temp_buffer{instance, MemoryUsage::Upload, 0, total_size_bytes};
|
||||
src_buffer = temp_buffer.Handle();
|
||||
u8* const staging = temp_buffer.mapped_data.data();
|
||||
for (auto& copy : copies) {
|
||||
u8* const src_pointer = staging + copy.srcOffset;
|
||||
const VAddr device_addr = buffer.CpuAddr() + copy.dstOffset;
|
||||
std::memcpy(src_pointer, std::bit_cast<const u8*>(device_addr), copy.size);
|
||||
}
|
||||
scheduler.DeferOperation([buffer = std::move(temp_buffer)]() mutable {});
|
||||
}
|
||||
scheduler.EndRendering();
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
static constexpr vk::MemoryBarrier READ_BARRIER{
|
||||
.srcAccessMask = vk::AccessFlagBits::eMemoryWrite,
|
||||
.dstAccessMask = vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite,
|
||||
};
|
||||
static constexpr vk::MemoryBarrier WRITE_BARRIER{
|
||||
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
|
||||
.dstAccessMask = vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite,
|
||||
};
|
||||
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eAllCommands,
|
||||
vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlagBits::eByRegion,
|
||||
READ_BARRIER, {}, {});
|
||||
cmdbuf.copyBuffer(src_buffer, buffer.buffer, copies);
|
||||
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
|
||||
vk::PipelineStageFlagBits::eAllCommands,
|
||||
vk::DependencyFlagBits::eByRegion, WRITE_BARRIER, {}, {});
|
||||
return false;
|
||||
}
|
||||
|
||||
void BufferCache::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
|
||||
// Mark the whole buffer as CPU written to stop tracking CPU writes
|
||||
if (!do_not_mark) {
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
memory_tracker.MarkRegionAsCpuModified(buffer.CpuAddr(), buffer.SizeBytes());
|
||||
}
|
||||
Unregister(buffer_id);
|
||||
scheduler.DeferOperation([this, buffer_id] { slot_buffers.erase(buffer_id); });
|
||||
}
|
||||
|
||||
} // namespace VideoCore
|
129
src/video_core/buffer_cache/buffer_cache.h
Normal file
129
src/video_core/buffer_cache/buffer_cache.h
Normal file
|
@ -0,0 +1,129 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <mutex>
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
#include <tsl/robin_map.h>
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/slot_vector.h"
|
||||
#include "common/types.h"
|
||||
#include "video_core/buffer_cache/buffer.h"
|
||||
#include "video_core/buffer_cache/memory_tracker_base.h"
|
||||
#include "video_core/multi_level_page_table.h"
|
||||
|
||||
namespace AmdGpu {
|
||||
struct Liverpool;
|
||||
}
|
||||
|
||||
namespace Shader {
|
||||
struct Info;
|
||||
}
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
using BufferId = Common::SlotId;
|
||||
|
||||
static constexpr BufferId NULL_BUFFER_ID{0};
|
||||
|
||||
static constexpr u32 NUM_VERTEX_BUFFERS = 32;
|
||||
|
||||
class BufferCache {
|
||||
public:
|
||||
static constexpr u32 CACHING_PAGEBITS = 12;
|
||||
static constexpr u64 CACHING_PAGESIZE = u64{1} << CACHING_PAGEBITS;
|
||||
static constexpr u64 DEVICE_PAGESIZE = 4_KB;
|
||||
|
||||
struct Traits {
|
||||
using Entry = BufferId;
|
||||
static constexpr size_t AddressSpaceBits = 39;
|
||||
static constexpr size_t FirstLevelBits = 14;
|
||||
static constexpr size_t PageBits = CACHING_PAGEBITS;
|
||||
};
|
||||
using PageTable = MultiLevelPageTable<Traits>;
|
||||
|
||||
struct OverlapResult {
|
||||
boost::container::small_vector<BufferId, 16> ids;
|
||||
VAddr begin;
|
||||
VAddr end;
|
||||
bool has_stream_leap = false;
|
||||
};
|
||||
|
||||
public:
|
||||
explicit BufferCache(const Vulkan::Instance& instance, Vulkan::Scheduler& scheduler,
|
||||
const AmdGpu::Liverpool* liverpool, PageManager& tracker);
|
||||
~BufferCache();
|
||||
|
||||
/// Invalidates any buffer in the logical page range.
|
||||
void InvalidateMemory(VAddr device_addr, u64 size);
|
||||
|
||||
/// Binds host vertex buffers for the current draw.
|
||||
bool BindVertexBuffers(const Shader::Info& vs_info);
|
||||
|
||||
/// Bind host index buffer for the current draw.
|
||||
u32 BindIndexBuffer(bool& is_indexed, u32 index_offset);
|
||||
|
||||
/// Obtains a buffer for the specified region.
|
||||
[[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(VAddr gpu_addr, u32 size, bool is_written);
|
||||
|
||||
/// Return true when a region is registered on the cache
|
||||
[[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size);
|
||||
|
||||
/// Return true when a CPU region is modified from the CPU
|
||||
[[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size);
|
||||
|
||||
private:
|
||||
template <typename Func>
|
||||
void ForEachBufferInRange(VAddr device_addr, u64 size, Func&& func) {
|
||||
const u64 page_end = Common::DivCeil(device_addr + size, CACHING_PAGESIZE);
|
||||
for (u64 page = device_addr >> CACHING_PAGEBITS; page < page_end;) {
|
||||
const BufferId buffer_id = page_table[page];
|
||||
if (!buffer_id) {
|
||||
++page;
|
||||
continue;
|
||||
}
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
func(buffer_id, buffer);
|
||||
|
||||
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
||||
page = Common::DivCeil(end_addr, CACHING_PAGESIZE);
|
||||
}
|
||||
}
|
||||
|
||||
void DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 size);
|
||||
|
||||
[[nodiscard]] BufferId FindBuffer(VAddr device_addr, u32 size);
|
||||
|
||||
[[nodiscard]] OverlapResult ResolveOverlaps(VAddr device_addr, u32 wanted_size);
|
||||
|
||||
void JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, bool accumulate_stream_score);
|
||||
|
||||
[[nodiscard]] BufferId CreateBuffer(VAddr device_addr, u32 wanted_size);
|
||||
|
||||
void Register(BufferId buffer_id);
|
||||
|
||||
void Unregister(BufferId buffer_id);
|
||||
|
||||
template <bool insert>
|
||||
void ChangeRegister(BufferId buffer_id);
|
||||
|
||||
bool SynchronizeBuffer(Buffer& buffer, VAddr device_addr, u32 size);
|
||||
|
||||
void DeleteBuffer(BufferId buffer_id, bool do_not_mark = false);
|
||||
|
||||
const Vulkan::Instance& instance;
|
||||
Vulkan::Scheduler& scheduler;
|
||||
const AmdGpu::Liverpool* liverpool;
|
||||
PageManager& tracker;
|
||||
StreamBuffer staging_buffer;
|
||||
StreamBuffer stream_buffer;
|
||||
std::recursive_mutex mutex;
|
||||
Common::SlotVector<Buffer> slot_buffers;
|
||||
MemoryTracker memory_tracker;
|
||||
PageTable page_table;
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
175
src/video_core/buffer_cache/memory_tracker_base.h
Normal file
175
src/video_core/buffer_cache/memory_tracker_base.h
Normal file
|
@ -0,0 +1,175 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <deque>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include "common/types.h"
|
||||
#include "video_core/buffer_cache/word_manager.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
class MemoryTracker {
|
||||
public:
|
||||
static constexpr size_t MAX_CPU_PAGE_BITS = 39;
|
||||
static constexpr size_t HIGHER_PAGE_BITS = 22;
|
||||
static constexpr size_t HIGHER_PAGE_SIZE = 1ULL << HIGHER_PAGE_BITS;
|
||||
static constexpr size_t HIGHER_PAGE_MASK = HIGHER_PAGE_SIZE - 1ULL;
|
||||
static constexpr size_t NUM_HIGH_PAGES = 1ULL << (MAX_CPU_PAGE_BITS - HIGHER_PAGE_BITS);
|
||||
static constexpr size_t MANAGER_POOL_SIZE = 32;
|
||||
static constexpr size_t WORDS_STACK_NEEDED = HIGHER_PAGE_SIZE / BYTES_PER_WORD;
|
||||
using Manager = WordManager<WORDS_STACK_NEEDED>;
|
||||
|
||||
public:
|
||||
explicit MemoryTracker(PageManager* tracker_) : tracker{tracker_} {}
|
||||
~MemoryTracker() = default;
|
||||
|
||||
/// Returns true if a region has been modified from the CPU
|
||||
[[nodiscard]] bool IsRegionCpuModified(VAddr query_cpu_addr, u64 query_size) noexcept {
|
||||
return IteratePages<true>(
|
||||
query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
|
||||
return manager->template IsRegionModified<Type::CPU>(offset, size);
|
||||
});
|
||||
}
|
||||
|
||||
/// Returns true if a region has been modified from the GPU
|
||||
[[nodiscard]] bool IsRegionGpuModified(VAddr query_cpu_addr, u64 query_size) noexcept {
|
||||
return IteratePages<false>(
|
||||
query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
|
||||
return manager->template IsRegionModified<Type::GPU>(offset, size);
|
||||
});
|
||||
}
|
||||
|
||||
/// Mark region as CPU modified, notifying the device_tracker about this change
|
||||
void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
|
||||
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||
[](Manager* manager, u64 offset, size_t size) {
|
||||
manager->template ChangeRegionState<Type::CPU, true>(
|
||||
manager->GetCpuAddr() + offset, size);
|
||||
});
|
||||
}
|
||||
|
||||
/// Unmark region as CPU modified, notifying the device_tracker about this change
|
||||
void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
|
||||
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||
[](Manager* manager, u64 offset, size_t size) {
|
||||
manager->template ChangeRegionState<Type::CPU, false>(
|
||||
manager->GetCpuAddr() + offset, size);
|
||||
});
|
||||
}
|
||||
|
||||
/// Mark region as modified from the host GPU
|
||||
void MarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
|
||||
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||
[](Manager* manager, u64 offset, size_t size) {
|
||||
manager->template ChangeRegionState<Type::GPU, true>(
|
||||
manager->GetCpuAddr() + offset, size);
|
||||
});
|
||||
}
|
||||
|
||||
/// Unmark region as modified from the host GPU
|
||||
void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
|
||||
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||
[](Manager* manager, u64 offset, size_t size) {
|
||||
manager->template ChangeRegionState<Type::GPU, false>(
|
||||
manager->GetCpuAddr() + offset, size);
|
||||
});
|
||||
}
|
||||
|
||||
/// Call 'func' for each CPU modified range and unmark those pages as CPU modified
|
||||
template <typename Func>
|
||||
void ForEachUploadRange(VAddr query_cpu_range, u64 query_size, Func&& func) {
|
||||
IteratePages<true>(query_cpu_range, query_size,
|
||||
[&func](Manager* manager, u64 offset, size_t size) {
|
||||
manager->template ForEachModifiedRange<Type::CPU, true>(
|
||||
manager->GetCpuAddr() + offset, size, func);
|
||||
});
|
||||
}
|
||||
|
||||
/// Call 'func' for each GPU modified range and unmark those pages as GPU modified
|
||||
template <bool clear, typename Func>
|
||||
void ForEachDownloadRange(VAddr query_cpu_range, u64 query_size, Func&& func) {
|
||||
IteratePages<false>(query_cpu_range, query_size,
|
||||
[&func](Manager* manager, u64 offset, size_t size) {
|
||||
if constexpr (clear) {
|
||||
manager->template ForEachModifiedRange<Type::GPU, true>(
|
||||
manager->GetCpuAddr() + offset, size, func);
|
||||
} else {
|
||||
manager->template ForEachModifiedRange<Type::GPU, false>(
|
||||
manager->GetCpuAddr() + offset, size, func);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief IteratePages Iterates L2 word manager page table.
|
||||
* @param cpu_address Start byte cpu address
|
||||
* @param size Size in bytes of the region of iterate.
|
||||
* @param func Callback for each word manager.
|
||||
* @return
|
||||
*/
|
||||
template <bool create_region_on_fail, typename Func>
|
||||
bool IteratePages(VAddr cpu_address, size_t size, Func&& func) {
|
||||
using FuncReturn = typename std::invoke_result<Func, Manager*, u64, size_t>::type;
|
||||
static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
|
||||
std::size_t remaining_size{size};
|
||||
std::size_t page_index{cpu_address >> HIGHER_PAGE_BITS};
|
||||
u64 page_offset{cpu_address & HIGHER_PAGE_MASK};
|
||||
while (remaining_size > 0) {
|
||||
const std::size_t copy_amount{
|
||||
std::min<std::size_t>(HIGHER_PAGE_SIZE - page_offset, remaining_size)};
|
||||
auto* manager{top_tier[page_index]};
|
||||
if (manager) {
|
||||
if constexpr (BOOL_BREAK) {
|
||||
if (func(manager, page_offset, copy_amount)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
func(manager, page_offset, copy_amount);
|
||||
}
|
||||
} else if constexpr (create_region_on_fail) {
|
||||
CreateRegion(page_index);
|
||||
manager = top_tier[page_index];
|
||||
if constexpr (BOOL_BREAK) {
|
||||
if (func(manager, page_offset, copy_amount)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
func(manager, page_offset, copy_amount);
|
||||
}
|
||||
}
|
||||
page_index++;
|
||||
page_offset = 0;
|
||||
remaining_size -= copy_amount;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void CreateRegion(std::size_t page_index) {
|
||||
const VAddr base_cpu_addr = page_index << HIGHER_PAGE_BITS;
|
||||
if (free_managers.empty()) {
|
||||
manager_pool.emplace_back();
|
||||
auto& last_pool = manager_pool.back();
|
||||
for (size_t i = 0; i < MANAGER_POOL_SIZE; i++) {
|
||||
std::construct_at(&last_pool[i], tracker, 0, HIGHER_PAGE_SIZE);
|
||||
free_managers.push_back(&last_pool[i]);
|
||||
}
|
||||
}
|
||||
// Each manager tracks a 4_MB virtual address space.
|
||||
auto* new_manager = free_managers.back();
|
||||
new_manager->SetCpuAddress(base_cpu_addr);
|
||||
free_managers.pop_back();
|
||||
top_tier[page_index] = new_manager;
|
||||
}
|
||||
|
||||
PageManager* tracker;
|
||||
std::deque<std::array<Manager, MANAGER_POOL_SIZE>> manager_pool;
|
||||
std::vector<Manager*> free_managers;
|
||||
std::array<Manager*, NUM_HIGH_PAGES> top_tier{};
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
159
src/video_core/buffer_cache/range_set.h
Normal file
159
src/video_core/buffer_cache/range_set.h
Normal file
|
@ -0,0 +1,159 @@
|
|||
// SPDX-FileCopyrightText: 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
#include <boost/pool/pool.hpp>
|
||||
#include <boost/pool/pool_alloc.hpp>
|
||||
#include <boost/pool/poolfwd.hpp>
|
||||
#include "common/types.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
template <class T>
|
||||
using RangeSetsAllocator =
|
||||
boost::fast_pool_allocator<T, boost::default_user_allocator_new_delete,
|
||||
boost::details::pool::default_mutex, 1024, 2048>;
|
||||
|
||||
struct RangeSet {
|
||||
using IntervalSet =
|
||||
boost::icl::interval_set<VAddr, std::less,
|
||||
ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, VAddr, std::less),
|
||||
RangeSetsAllocator>;
|
||||
using IntervalType = typename IntervalSet::interval_type;
|
||||
|
||||
explicit RangeSet() = default;
|
||||
~RangeSet() = default;
|
||||
|
||||
void Add(VAddr base_address, size_t size) {
|
||||
const VAddr end_address = base_address + size;
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_ranges_set.add(interval);
|
||||
}
|
||||
|
||||
void Subtract(VAddr base_address, size_t size) {
|
||||
const VAddr end_address = base_address + size;
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_ranges_set.subtract(interval);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEach(Func&& func) const {
|
||||
if (m_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
auto it = m_ranges_set.begin();
|
||||
auto end_it = m_ranges_set.end();
|
||||
for (; it != end_it; it++) {
|
||||
const VAddr inter_addr_end = it->upper();
|
||||
const VAddr inter_addr = it->lower();
|
||||
func(inter_addr, inter_addr_end);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRange(VAddr base_addr, size_t size, Func&& func) const {
|
||||
if (m_ranges_set.empty()) {
|
||||
return;
|
||||
}
|
||||
const VAddr start_address = base_addr;
|
||||
const VAddr end_address = start_address + size;
|
||||
const IntervalType search_interval{start_address, end_address};
|
||||
auto it = m_ranges_set.lower_bound(search_interval);
|
||||
if (it == m_ranges_set.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = m_ranges_set.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
VAddr inter_addr_end = it->upper();
|
||||
VAddr inter_addr = it->lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end);
|
||||
}
|
||||
}
|
||||
|
||||
IntervalSet m_ranges_set;
|
||||
};
|
||||
|
||||
class RangeMap {
|
||||
public:
|
||||
using IntervalMap =
|
||||
boost::icl::interval_map<VAddr, u64, boost::icl::partial_absorber, std::less,
|
||||
boost::icl::inplace_plus, boost::icl::inter_section,
|
||||
ICL_INTERVAL_INSTANCE(ICL_INTERVAL_DEFAULT, VAddr, std::less),
|
||||
RangeSetsAllocator>;
|
||||
using IntervalType = typename IntervalMap::interval_type;
|
||||
|
||||
public:
|
||||
RangeMap() = default;
|
||||
~RangeMap() = default;
|
||||
|
||||
RangeMap(RangeMap const&) = delete;
|
||||
RangeMap& operator=(RangeMap const&) = delete;
|
||||
|
||||
RangeMap(RangeMap&& other);
|
||||
RangeMap& operator=(RangeMap&& other);
|
||||
|
||||
void Add(VAddr base_address, size_t size, u64 value) {
|
||||
const VAddr end_address = base_address + size;
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_ranges_map.add({interval, value});
|
||||
}
|
||||
|
||||
void Subtract(VAddr base_address, size_t size) {
|
||||
const VAddr end_address = base_address + size;
|
||||
IntervalType interval{base_address, end_address};
|
||||
m_ranges_map -= interval;
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachInRange(VAddr base_addr, size_t size, Func&& func) const {
|
||||
if (m_ranges_map.empty()) {
|
||||
return;
|
||||
}
|
||||
const VAddr start_address = base_addr;
|
||||
const VAddr end_address = start_address + size;
|
||||
const IntervalType search_interval{start_address, end_address};
|
||||
auto it = m_ranges_map.lower_bound(search_interval);
|
||||
if (it == m_ranges_map.end()) {
|
||||
return;
|
||||
}
|
||||
auto end_it = m_ranges_map.upper_bound(search_interval);
|
||||
for (; it != end_it; it++) {
|
||||
VAddr inter_addr_end = it->first.upper();
|
||||
VAddr inter_addr = it->first.lower();
|
||||
if (inter_addr_end > end_address) {
|
||||
inter_addr_end = end_address;
|
||||
}
|
||||
if (inter_addr < start_address) {
|
||||
inter_addr = start_address;
|
||||
}
|
||||
func(inter_addr, inter_addr_end, it->second);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void ForEachNotInRange(VAddr base_addr, size_t size, Func&& func) const {
|
||||
const VAddr end_addr = base_addr + size;
|
||||
ForEachInRange(base_addr, size, [&](VAddr range_addr, VAddr range_end, u64) {
|
||||
if (size_t gap_size = range_addr - base_addr; gap_size != 0) {
|
||||
func(base_addr, gap_size);
|
||||
}
|
||||
base_addr = range_end;
|
||||
});
|
||||
if (base_addr != end_addr) {
|
||||
func(base_addr, end_addr - base_addr);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
IntervalMap m_ranges_map;
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
398
src/video_core/buffer_cache/word_manager.h
Normal file
398
src/video_core/buffer_cache/word_manager.h
Normal file
|
@ -0,0 +1,398 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <span>
|
||||
#include <utility>
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/types.h"
|
||||
#include "video_core/page_manager.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
constexpr u64 PAGES_PER_WORD = 64;
|
||||
constexpr u64 BYTES_PER_PAGE = 4_KB;
|
||||
constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
|
||||
|
||||
enum class Type {
|
||||
CPU,
|
||||
GPU,
|
||||
Untracked,
|
||||
};
|
||||
|
||||
/// Vector tracking modified pages tightly packed with small vector optimization
|
||||
template <size_t stack_words = 1>
|
||||
struct WordsArray {
|
||||
/// Returns the pointer to the words state
|
||||
[[nodiscard]] const u64* Pointer(bool is_short) const noexcept {
|
||||
return is_short ? stack.data() : heap;
|
||||
}
|
||||
|
||||
/// Returns the pointer to the words state
|
||||
[[nodiscard]] u64* Pointer(bool is_short) noexcept {
|
||||
return is_short ? stack.data() : heap;
|
||||
}
|
||||
|
||||
std::array<u64, stack_words> stack{}; ///< Small buffers storage
|
||||
u64* heap; ///< Not-small buffers pointer to the storage
|
||||
};
|
||||
|
||||
template <size_t stack_words = 1>
|
||||
struct Words {
|
||||
explicit Words() = default;
|
||||
explicit Words(u64 size_bytes_) : size_bytes{size_bytes_} {
|
||||
num_words = Common::DivCeil(size_bytes, BYTES_PER_WORD);
|
||||
if (IsShort()) {
|
||||
cpu.stack.fill(~u64{0});
|
||||
gpu.stack.fill(0);
|
||||
untracked.stack.fill(~u64{0});
|
||||
} else {
|
||||
// Share allocation between CPU and GPU pages and set their default values
|
||||
u64* const alloc = new u64[num_words * 3];
|
||||
cpu.heap = alloc;
|
||||
gpu.heap = alloc + num_words;
|
||||
untracked.heap = alloc + num_words * 2;
|
||||
std::fill_n(cpu.heap, num_words, ~u64{0});
|
||||
std::fill_n(gpu.heap, num_words, 0);
|
||||
std::fill_n(untracked.heap, num_words, ~u64{0});
|
||||
}
|
||||
// Clean up tailing bits
|
||||
const u64 last_word_size = size_bytes % BYTES_PER_WORD;
|
||||
const u64 last_local_page = Common::DivCeil(last_word_size, BYTES_PER_PAGE);
|
||||
const u64 shift = (PAGES_PER_WORD - last_local_page) % PAGES_PER_WORD;
|
||||
const u64 last_word = (~u64{0} << shift) >> shift;
|
||||
cpu.Pointer(IsShort())[NumWords() - 1] = last_word;
|
||||
untracked.Pointer(IsShort())[NumWords() - 1] = last_word;
|
||||
}
|
||||
|
||||
~Words() {
|
||||
Release();
|
||||
}
|
||||
|
||||
Words& operator=(Words&& rhs) noexcept {
|
||||
Release();
|
||||
size_bytes = rhs.size_bytes;
|
||||
num_words = rhs.num_words;
|
||||
cpu = rhs.cpu;
|
||||
gpu = rhs.gpu;
|
||||
untracked = rhs.untracked;
|
||||
rhs.cpu.heap = nullptr;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Words(Words&& rhs) noexcept
|
||||
: size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
|
||||
untracked{rhs.untracked} {
|
||||
rhs.cpu.heap = nullptr;
|
||||
}
|
||||
|
||||
Words& operator=(const Words&) = delete;
|
||||
Words(const Words&) = delete;
|
||||
|
||||
/// Returns true when the buffer fits in the small vector optimization
|
||||
[[nodiscard]] bool IsShort() const noexcept {
|
||||
return num_words <= stack_words;
|
||||
}
|
||||
|
||||
/// Returns the number of words of the buffer
|
||||
[[nodiscard]] size_t NumWords() const noexcept {
|
||||
return num_words;
|
||||
}
|
||||
|
||||
/// Release buffer resources
|
||||
void Release() {
|
||||
if (!IsShort()) {
|
||||
// CPU written words is the base for the heap allocation
|
||||
delete[] cpu.heap;
|
||||
}
|
||||
}
|
||||
|
||||
template <Type type>
|
||||
std::span<u64> Span() noexcept {
|
||||
if constexpr (type == Type::CPU) {
|
||||
return std::span<u64>(cpu.Pointer(IsShort()), num_words);
|
||||
} else if constexpr (type == Type::GPU) {
|
||||
return std::span<u64>(gpu.Pointer(IsShort()), num_words);
|
||||
} else if constexpr (type == Type::Untracked) {
|
||||
return std::span<u64>(untracked.Pointer(IsShort()), num_words);
|
||||
}
|
||||
}
|
||||
|
||||
template <Type type>
|
||||
std::span<const u64> Span() const noexcept {
|
||||
if constexpr (type == Type::CPU) {
|
||||
return std::span<const u64>(cpu.Pointer(IsShort()), num_words);
|
||||
} else if constexpr (type == Type::GPU) {
|
||||
return std::span<const u64>(gpu.Pointer(IsShort()), num_words);
|
||||
} else if constexpr (type == Type::Untracked) {
|
||||
return std::span<const u64>(untracked.Pointer(IsShort()), num_words);
|
||||
}
|
||||
}
|
||||
|
||||
u64 size_bytes = 0;
|
||||
size_t num_words = 0;
|
||||
WordsArray<stack_words> cpu;
|
||||
WordsArray<stack_words> gpu;
|
||||
WordsArray<stack_words> untracked;
|
||||
};
|
||||
|
||||
template <size_t stack_words = 1>
|
||||
class WordManager {
|
||||
public:
|
||||
explicit WordManager(PageManager* tracker_, VAddr cpu_addr_, u64 size_bytes)
|
||||
: tracker{tracker_}, cpu_addr{cpu_addr_}, words{size_bytes} {}
|
||||
|
||||
explicit WordManager() = default;
|
||||
|
||||
void SetCpuAddress(VAddr new_cpu_addr) {
|
||||
cpu_addr = new_cpu_addr;
|
||||
}
|
||||
|
||||
VAddr GetCpuAddr() const {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static u64 ExtractBits(u64 word, size_t page_start, size_t page_end) {
|
||||
constexpr size_t number_bits = sizeof(u64) * 8;
|
||||
const size_t limit_page_end = number_bits - std::min(page_end, number_bits);
|
||||
u64 bits = (word >> page_start) << page_start;
|
||||
bits = (bits << limit_page_end) >> limit_page_end;
|
||||
return bits;
|
||||
}
|
||||
|
||||
static std::pair<size_t, size_t> GetWordPage(VAddr address) {
|
||||
const size_t converted_address = static_cast<size_t>(address);
|
||||
const size_t word_number = converted_address / BYTES_PER_WORD;
|
||||
const size_t amount_pages = converted_address % BYTES_PER_WORD;
|
||||
return std::make_pair(word_number, amount_pages / BYTES_PER_PAGE);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void IterateWords(size_t offset, size_t size, Func&& func) const {
|
||||
using FuncReturn = std::invoke_result_t<Func, std::size_t, u64>;
|
||||
static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
|
||||
const size_t start = static_cast<size_t>(std::max<s64>(static_cast<s64>(offset), 0LL));
|
||||
const size_t end = static_cast<size_t>(std::max<s64>(static_cast<s64>(offset + size), 0LL));
|
||||
if (start >= SizeBytes() || end <= start) {
|
||||
return;
|
||||
}
|
||||
auto [start_word, start_page] = GetWordPage(start);
|
||||
auto [end_word, end_page] = GetWordPage(end + BYTES_PER_PAGE - 1ULL);
|
||||
const size_t num_words = NumWords();
|
||||
start_word = std::min(start_word, num_words);
|
||||
end_word = std::min(end_word, num_words);
|
||||
const size_t diff = end_word - start_word;
|
||||
end_word += (end_page + PAGES_PER_WORD - 1ULL) / PAGES_PER_WORD;
|
||||
end_word = std::min(end_word, num_words);
|
||||
end_page += diff * PAGES_PER_WORD;
|
||||
constexpr u64 base_mask{~0ULL};
|
||||
for (size_t word_index = start_word; word_index < end_word; word_index++) {
|
||||
const u64 mask = ExtractBits(base_mask, start_page, end_page);
|
||||
start_page = 0;
|
||||
end_page -= PAGES_PER_WORD;
|
||||
if constexpr (BOOL_BREAK) {
|
||||
if (func(word_index, mask)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
func(word_index, mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
void IteratePages(u64 mask, Func&& func) const {
|
||||
size_t offset = 0;
|
||||
while (mask != 0) {
|
||||
const size_t empty_bits = std::countr_zero(mask);
|
||||
offset += empty_bits;
|
||||
mask = mask >> empty_bits;
|
||||
|
||||
const size_t continuous_bits = std::countr_one(mask);
|
||||
func(offset, continuous_bits);
|
||||
mask = continuous_bits < PAGES_PER_WORD ? (mask >> continuous_bits) : 0;
|
||||
offset += continuous_bits;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the state of a range of pages
|
||||
*
|
||||
* @param dirty_addr Base address to mark or unmark as modified
|
||||
* @param size Size in bytes to mark or unmark as modified
|
||||
*/
|
||||
template <Type type, bool enable>
|
||||
void ChangeRegionState(u64 dirty_addr, u64 size) noexcept(type == Type::GPU) {
|
||||
std::span<u64> state_words = words.template Span<type>();
|
||||
[[maybe_unused]] std::span<u64> untracked_words = words.template Span<Type::Untracked>();
|
||||
IterateWords(dirty_addr - cpu_addr, size, [&](size_t index, u64 mask) {
|
||||
if constexpr (type == Type::CPU) {
|
||||
NotifyPageTracker<!enable>(index, untracked_words[index], mask);
|
||||
}
|
||||
if constexpr (enable) {
|
||||
state_words[index] |= mask;
|
||||
if constexpr (type == Type::CPU) {
|
||||
untracked_words[index] |= mask;
|
||||
}
|
||||
} else {
|
||||
state_words[index] &= ~mask;
|
||||
if constexpr (type == Type::CPU) {
|
||||
untracked_words[index] &= ~mask;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Loop over each page in the given range, turn off those bits and notify the tracker if
|
||||
* needed. Call the given function on each turned off range.
|
||||
*
|
||||
* @param query_cpu_range Base CPU address to loop over
|
||||
* @param size Size in bytes of the CPU range to loop over
|
||||
* @param func Function to call for each turned off region
|
||||
*/
|
||||
template <Type type, bool clear, typename Func>
|
||||
void ForEachModifiedRange(VAddr query_cpu_range, s64 size, Func&& func) {
|
||||
static_assert(type != Type::Untracked);
|
||||
|
||||
std::span<u64> state_words = words.template Span<type>();
|
||||
[[maybe_unused]] std::span<u64> untracked_words = words.template Span<Type::Untracked>();
|
||||
const size_t offset = query_cpu_range - cpu_addr;
|
||||
bool pending = false;
|
||||
size_t pending_offset{};
|
||||
size_t pending_pointer{};
|
||||
const auto release = [&]() {
|
||||
func(cpu_addr + pending_offset * BYTES_PER_PAGE,
|
||||
(pending_pointer - pending_offset) * BYTES_PER_PAGE);
|
||||
};
|
||||
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
||||
if constexpr (type == Type::GPU) {
|
||||
mask &= ~untracked_words[index];
|
||||
}
|
||||
const u64 word = state_words[index] & mask;
|
||||
if constexpr (clear) {
|
||||
if constexpr (type == Type::CPU) {
|
||||
NotifyPageTracker<true>(index, untracked_words[index], mask);
|
||||
}
|
||||
state_words[index] &= ~mask;
|
||||
if constexpr (type == Type::CPU) {
|
||||
untracked_words[index] &= ~mask;
|
||||
}
|
||||
}
|
||||
const size_t base_offset = index * PAGES_PER_WORD;
|
||||
IteratePages(word, [&](size_t pages_offset, size_t pages_size) {
|
||||
const auto reset = [&]() {
|
||||
pending_offset = base_offset + pages_offset;
|
||||
pending_pointer = base_offset + pages_offset + pages_size;
|
||||
};
|
||||
if (!pending) {
|
||||
reset();
|
||||
pending = true;
|
||||
return;
|
||||
}
|
||||
if (pending_pointer == base_offset + pages_offset) {
|
||||
pending_pointer += pages_size;
|
||||
return;
|
||||
}
|
||||
release();
|
||||
reset();
|
||||
});
|
||||
});
|
||||
if (pending) {
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true when a region has been modified
|
||||
*
|
||||
* @param offset Offset in bytes from the start of the buffer
|
||||
* @param size Size in bytes of the region to query for modifications
|
||||
*/
|
||||
template <Type type>
|
||||
[[nodiscard]] bool IsRegionModified(u64 offset, u64 size) const noexcept {
|
||||
static_assert(type != Type::Untracked);
|
||||
|
||||
const std::span<const u64> state_words = words.template Span<type>();
|
||||
[[maybe_unused]] const std::span<const u64> untracked_words =
|
||||
words.template Span<Type::Untracked>();
|
||||
bool result = false;
|
||||
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
||||
if constexpr (type == Type::GPU) {
|
||||
mask &= ~untracked_words[index];
|
||||
}
|
||||
const u64 word = state_words[index] & mask;
|
||||
if (word != 0) {
|
||||
result = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Returns the number of words of the manager
|
||||
[[nodiscard]] size_t NumWords() const noexcept {
|
||||
return words.NumWords();
|
||||
}
|
||||
|
||||
/// Returns the size in bytes of the manager
|
||||
[[nodiscard]] u64 SizeBytes() const noexcept {
|
||||
return words.size_bytes;
|
||||
}
|
||||
|
||||
/// Returns true when the buffer fits in the small vector optimization
|
||||
[[nodiscard]] bool IsShort() const noexcept {
|
||||
return words.IsShort();
|
||||
}
|
||||
|
||||
private:
|
||||
template <Type type>
|
||||
u64* Array() noexcept {
|
||||
if constexpr (type == Type::CPU) {
|
||||
return words.cpu.Pointer(IsShort());
|
||||
} else if constexpr (type == Type::GPU) {
|
||||
return words.gpu.Pointer(IsShort());
|
||||
} else if constexpr (type == Type::Untracked) {
|
||||
return words.untracked.Pointer(IsShort());
|
||||
}
|
||||
}
|
||||
|
||||
template <Type type>
|
||||
const u64* Array() const noexcept {
|
||||
if constexpr (type == Type::CPU) {
|
||||
return words.cpu.Pointer(IsShort());
|
||||
} else if constexpr (type == Type::GPU) {
|
||||
return words.gpu.Pointer(IsShort());
|
||||
} else if constexpr (type == Type::Untracked) {
|
||||
return words.untracked.Pointer(IsShort());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Notify tracker about changes in the CPU tracking state of a word in the buffer
|
||||
*
|
||||
* @param word_index Index to the word to notify to the tracker
|
||||
* @param current_bits Current state of the word
|
||||
* @param new_bits New state of the word
|
||||
*
|
||||
* @tparam add_to_tracker True when the tracker should start tracking the new pages
|
||||
*/
|
||||
template <bool add_to_tracker>
|
||||
void NotifyPageTracker(u64 word_index, u64 current_bits, u64 new_bits) const {
|
||||
u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits;
|
||||
VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
|
||||
IteratePages(changed_bits, [&](size_t offset, size_t size) {
|
||||
tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, size * BYTES_PER_PAGE,
|
||||
add_to_tracker ? 1 : -1);
|
||||
});
|
||||
}
|
||||
|
||||
PageManager* tracker;
|
||||
VAddr cpu_addr = 0;
|
||||
Words<stack_words> words;
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
Loading…
Add table
Add a link
Reference in a new issue