cache: Invalidate pages for file reads. (#1726)

* cache: Invalidate pages for file reads.

* texture_cache: Simplify invalidate intersection check.

* vk_rasterizer: Make aware of mapped memory ranges.

* buffer_cache: Remove redundant page calculations.

Called functions will convert to page numbers/addresses themselves.

* file_system: Simplify memory invalidation and add a few missed cases.
This commit is contained in:
squidbus 2024-12-11 11:11:24 -08:00 committed by GitHub
parent e612e881ac
commit 14f7dc3527
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 74 additions and 60 deletions

View file

@ -114,8 +114,7 @@ struct PageManager::Impl {
// Notify rasterizer about the fault.
const VAddr addr = msg.arg.pagefault.address;
const VAddr addr_page = GetPageAddr(addr);
rasterizer->InvalidateMemory(addr, addr_page, PAGESIZE);
rasterizer->InvalidateMemory(addr, 1);
}
}
@ -135,17 +134,14 @@ struct PageManager::Impl {
}
void OnMap(VAddr address, size_t size) {
owned_ranges += boost::icl::interval<VAddr>::right_open(address, address + size);
// No-op
}
void OnUnmap(VAddr address, size_t size) {
owned_ranges -= boost::icl::interval<VAddr>::right_open(address, address + size);
// No-op
}
void Protect(VAddr address, size_t size, bool allow_write) {
ASSERT_MSG(owned_ranges.find(address) != owned_ranges.end(),
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.", address,
size);
auto* memory = Core::Memory::Instance();
auto& impl = memory->GetAddressSpace();
impl.Protect(address, size,
@ -155,17 +151,13 @@ struct PageManager::Impl {
static bool GuestFaultSignalHandler(void* context, void* fault_address) {
const auto addr = reinterpret_cast<VAddr>(fault_address);
const bool is_write = Common::IsWriteError(context);
if (is_write && owned_ranges.find(addr) != owned_ranges.end()) {
const VAddr addr_aligned = GetPageAddr(addr);
rasterizer->InvalidateMemory(addr, addr_aligned, PAGESIZE);
return true;
if (Common::IsWriteError(context)) {
return rasterizer->InvalidateMemory(addr, 1);
}
return false;
}
inline static Vulkan::Rasterizer* rasterizer;
inline static boost::icl::interval_set<VAddr> owned_ranges;
};
#endif
@ -210,6 +202,9 @@ void PageManager::UpdatePagesCachedCount(VAddr addr, u64 size, s32 delta) {
const VAddr interval_start_addr = boost::icl::first(interval) << PageShift;
const VAddr interval_end_addr = boost::icl::last_next(interval) << PageShift;
const u32 interval_size = interval_end_addr - interval_start_addr;
ASSERT_MSG(rasterizer->IsMapped(interval_start_addr, interval_size),
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.",
interval_start_addr, interval_size);
if (delta > 0 && count == delta) {
impl->Protect(interval_start_addr, interval_size, false);
} else if (delta < 0 && count == -delta) {

View file

@ -841,12 +841,27 @@ u32 Rasterizer::ReadDataFromGds(u32 gds_offset) {
return value;
}
void Rasterizer::InvalidateMemory(VAddr addr, VAddr addr_aligned, u64 size) {
buffer_cache.InvalidateMemory(addr_aligned, size);
texture_cache.InvalidateMemory(addr, addr_aligned, size);
bool Rasterizer::InvalidateMemory(VAddr addr, u64 size) {
if (!IsMapped(addr, size)) {
// Not GPU mapped memory, can skip invalidation logic entirely.
return false;
}
buffer_cache.InvalidateMemory(addr, size);
texture_cache.InvalidateMemory(addr, size);
return true;
}
bool Rasterizer::IsMapped(VAddr addr, u64 size) {
if (size == 0) {
// There is no memory, so not mapped.
return false;
}
return mapped_ranges.find(boost::icl::interval<VAddr>::right_open(addr, addr + size)) !=
mapped_ranges.end();
}
void Rasterizer::MapMemory(VAddr addr, u64 size) {
mapped_ranges += boost::icl::interval<VAddr>::right_open(addr, addr + size);
page_manager.OnGpuMap(addr, size);
}
@ -854,6 +869,7 @@ void Rasterizer::UnmapMemory(VAddr addr, u64 size) {
buffer_cache.InvalidateMemory(addr, size);
texture_cache.UnmapMemory(addr, size);
page_manager.OnGpuUnmap(addr, size);
mapped_ranges -= boost::icl::interval<VAddr>::right_open(addr, addr + size);
}
void Rasterizer::UpdateDynamicState(const GraphicsPipeline& pipeline) {

View file

@ -54,7 +54,8 @@ public:
void InlineData(VAddr address, const void* value, u32 num_bytes, bool is_gds);
u32 ReadDataFromGds(u32 gsd_offset);
void InvalidateMemory(VAddr addr, VAddr addr_aligned, u64 size);
bool InvalidateMemory(VAddr addr, u64 size);
bool IsMapped(VAddr addr, u64 size);
void MapMemory(VAddr addr, u64 size);
void UnmapMemory(VAddr addr, u64 size);
@ -100,6 +101,7 @@ private:
VideoCore::TextureCache texture_cache;
AmdGpu::Liverpool* liverpool;
Core::MemoryManager* memory;
boost::icl::interval_set<VAddr> mapped_ranges;
PipelineCache pipeline_cache;
boost::container::static_vector<

View file

@ -56,24 +56,27 @@ void TextureCache::MarkAsMaybeDirty(ImageId image_id, Image& image) {
UntrackImage(image_id);
}
void TextureCache::InvalidateMemory(VAddr addr, VAddr page_addr, size_t size) {
void TextureCache::InvalidateMemory(VAddr addr, size_t size) {
std::scoped_lock lock{mutex};
ForEachImageInRegion(page_addr, size, [&](ImageId image_id, Image& image) {
const auto end = addr + size;
const auto pages_start = PageManager::GetPageAddr(addr);
const auto pages_end = PageManager::GetNextPageAddr(addr + size - 1);
ForEachImageInRegion(pages_start, pages_end - pages_start, [&](ImageId image_id, Image& image) {
const auto image_begin = image.info.guest_address;
const auto image_end = image.info.guest_address + image.info.guest_size_bytes;
const auto page_end = page_addr + size;
if (image_begin <= addr && addr < image_end) {
// This image was definitely accessed by this page fault.
// Untrack image, so the range is unprotected and the guest can write freely
if (image_begin < end && addr < image_end) {
// Start or end of the modified region is in the image, or the image is entirely within
// the modified region, so the image was definitely accessed by this page fault.
// Untrack the image, so that the range is unprotected and the guest can write freely.
image.flags |= ImageFlagBits::CpuDirty;
UntrackImage(image_id);
} else if (page_end < image_end) {
} else if (pages_end < image_end) {
// This page access may or may not modify the image.
// We should not mark it as dirty now. If it really was modified
// it will receive more invalidations on its other pages.
// Remove tracking from this page only.
UntrackImageHead(image_id);
} else if (image_begin < page_addr) {
} else if (image_begin < pages_start) {
// This page access does not modify the image but the page should be untracked.
// We should not mark this image as dirty now. If it really was modified
// it will receive more invalidations on its other pages.

View file

@ -95,7 +95,7 @@ public:
~TextureCache();
/// Invalidates any image in the logical page range.
void InvalidateMemory(VAddr addr, VAddr page_addr, size_t size);
void InvalidateMemory(VAddr addr, size_t size);
/// Marks an image as dirty if it exists at the provided address.
void InvalidateMemoryFromGPU(VAddr address, size_t max_size);