Use RangeSet

This commit is contained in:
Lander Gallastegi 2025-07-05 18:07:22 +02:00
parent 31ac54258a
commit 47c43df544
6 changed files with 20 additions and 25 deletions

View file

@ -921,7 +921,6 @@ set(VIDEO_CORE src/video_core/amdgpu/liverpool.cpp
src/video_core/buffer_cache/buffer_cache.cpp
src/video_core/buffer_cache/buffer_cache.h
src/video_core/buffer_cache/memory_tracker.h
src/video_core/buffer_cache/range_set.h
src/video_core/buffer_cache/region_definitions.h
src/video_core/buffer_cache/region_manager.h
src/video_core/renderer_vulkan/liverpool_to_vk.cpp
@ -980,6 +979,7 @@ set(VIDEO_CORE src/video_core/amdgpu/liverpool.cpp
src/video_core/page_manager.cpp
src/video_core/page_manager.h
src/video_core/multi_level_page_table.h
src/video_core/range_set.h
src/video_core/renderdoc.cpp
src/video_core/renderdoc.h
)

View file

@ -154,9 +154,8 @@ void BufferCache::DownloadBufferMemory(Buffer& buffer, VAddr device_addr, u64 si
memory_tracker->ForEachDownloadRange<false>(
device_addr, size, [&](u64 device_addr_out, u64 range_size) {
const VAddr buffer_addr = buffer.CpuAddr();
const auto add_download = [&](VAddr start, VAddr end) {
const auto add_download = [&](VAddr start, u64 new_size) {
const u64 new_offset = start - buffer_addr;
const u64 new_size = end - start;
copies.push_back(vk::BufferCopy{
.srcOffset = new_offset,
.dstOffset = total_size_bytes,

View file

@ -8,8 +8,8 @@
#include "common/slot_vector.h"
#include "common/types.h"
#include "video_core/buffer_cache/buffer.h"
#include "video_core/buffer_cache/range_set.h"
#include "video_core/multi_level_page_table.h"
#include "video_core/range_set.h"
namespace AmdGpu {
struct Liverpool;

View file

@ -66,7 +66,7 @@ struct RangeSet {
for (const auto& set : m_ranges_set) {
const VAddr inter_addr_end = set.upper();
const VAddr inter_addr = set.lower();
func(inter_addr, inter_addr_end);
func(inter_addr, inter_addr_end - inter_addr);
}
}
@ -92,7 +92,7 @@ struct RangeSet {
if (inter_addr < start_address) {
inter_addr = start_address;
}
func(inter_addr, inter_addr_end);
func(inter_addr, inter_addr_end - inter_addr);
}
}
@ -170,7 +170,7 @@ public:
for (const auto& [interval, value] : m_ranges_map) {
const VAddr inter_addr_end = interval.upper();
const VAddr inter_addr = interval.lower();
func(inter_addr, inter_addr_end, value);
func(inter_addr, inter_addr_end - inter_addr, value);
}
}
@ -196,7 +196,7 @@ public:
if (inter_addr < start_address) {
inter_addr = start_address;
}
func(inter_addr, inter_addr_end, it->second);
func(inter_addr, inter_addr_end - inter_addr, it->second);
}
}
@ -274,7 +274,7 @@ public:
for (const auto& [interval, value] : m_ranges_map) {
const VAddr inter_addr_end = interval.upper();
const VAddr inter_addr = interval.lower();
func(inter_addr, inter_addr_end, value);
func(inter_addr, inter_addr_end - inter_addr, value);
}
}
@ -300,7 +300,7 @@ public:
if (inter_addr < start_address) {
inter_addr = start_address;
}
func(inter_addr, inter_addr_end, it->second);
func(inter_addr, inter_addr_end - inter_addr, it->second);
}
}

View file

@ -475,10 +475,10 @@ bool Rasterizer::BindResources(const Pipeline* pipeline) {
// We only use fault buffer for DMA right now.
{
Common::RecursiveSharedLock lock{mapped_ranges_mutex};
for (auto& range : mapped_ranges) {
buffer_cache.SynchronizeBuffersInRange(range.lower(),
range.upper() - range.lower());
}
mapped_ranges.ForEach(
[&](const VAddr addr, u64 size) {
buffer_cache.SynchronizeBuffersInRange(addr, size);
});
}
buffer_cache.MemoryBarrier();
}
@ -979,16 +979,14 @@ bool Rasterizer::IsMapped(VAddr addr, u64 size) {
// There is no memory, so not mapped.
return false;
}
const auto range = decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
Common::RecursiveSharedLock lock{mapped_ranges_mutex};
return boost::icl::contains(mapped_ranges, range);
return mapped_ranges.Contains(addr, size);
}
void Rasterizer::MapMemory(VAddr addr, u64 size) {
{
std::scoped_lock lock{mapped_ranges_mutex};
mapped_ranges += decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
mapped_ranges.Add(addr, size);
}
page_manager.OnGpuMap(addr, size);
}
@ -999,7 +997,7 @@ void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
page_manager.OnGpuUnmap(addr, size);
{
std::scoped_lock lock{mapped_ranges_mutex};
mapped_ranges -= decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
mapped_ranges.Subtract(addr, size);
}
}

View file

@ -9,6 +9,7 @@
#include "video_core/page_manager.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/texture_cache/texture_cache.h"
#include "video_core/range_set.h"
namespace AmdGpu {
struct Liverpool;
@ -75,11 +76,8 @@ public:
template <typename Func>
void ForEachMappedRangeInRange(VAddr addr, u64 size, Func&& func) {
const auto range = decltype(mapped_ranges)::interval_type::right_open(addr, addr + size);
Common::RecursiveSharedLock lock{mapped_ranges_mutex};
for (const auto& mapped_range : (mapped_ranges & range)) {
func(mapped_range);
}
Common::RecursiveSharedLock lk(mapped_ranges_mutex);
mapped_ranges.ForEachInRange(addr, size, std::forward<Func>(func));
}
private:
@ -121,7 +119,7 @@ private:
VideoCore::TextureCache texture_cache;
AmdGpu::Liverpool* liverpool;
Core::MemoryManager* memory;
boost::icl::interval_set<VAddr> mapped_ranges;
VideoCore::RangeSet mapped_ranges;
Common::SharedFirstMutex mapped_ranges_mutex;
PipelineCache pipeline_cache;