Revert new GPU map logic (#3019)
Some checks are pending
Build and Release / pre-release (push) Blocked by required conditions
Build and Release / reuse (push) Waiting to run
Build and Release / clang-format (push) Waiting to run
Build and Release / get-info (push) Waiting to run
Build and Release / windows-sdl (push) Blocked by required conditions
Build and Release / windows-qt (push) Blocked by required conditions
Build and Release / macos-sdl (push) Blocked by required conditions
Build and Release / macos-qt (push) Blocked by required conditions
Build and Release / linux-sdl (push) Blocked by required conditions
Build and Release / linux-qt (push) Blocked by required conditions
Build and Release / linux-sdl-gcc (push) Blocked by required conditions
Build and Release / linux-qt-gcc (push) Blocked by required conditions

Something's wrong somewhere, and there's just too many places that somewhere could be for me to debug it right now.
This commit is contained in:
Stephen Miller 2025-05-31 09:35:52 -05:00 committed by GitHub
parent 4019319d92
commit c09e463b8e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -262,10 +262,7 @@ int MemoryManager::PoolCommit(VAddr virtual_addr, size_t size, MemoryProt prot)
void* out_addr = impl.Map(mapped_addr, size, alignment, -1, false); void* out_addr = impl.Map(mapped_addr, size, alignment, -1, false);
TRACK_ALLOC(out_addr, size, "VMEM"); TRACK_ALLOC(out_addr, size, "VMEM");
if (prot >= MemoryProt::GpuRead) { if (IsValidGpuMapping(mapped_addr, size)) {
// PS4s only map to GPU memory when the protection includes GPU access.
// If the address to map to is too high, PS4s throw a page fault and crash.
ASSERT_MSG(IsValidGpuMapping(mapped_addr, size), "Invalid address for GPU mapping");
rasterizer->MapMemory(mapped_addr, size); rasterizer->MapMemory(mapped_addr, size);
} }
@ -345,19 +342,15 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
MergeAdjacent(vma_map, new_vma_handle); MergeAdjacent(vma_map, new_vma_handle);
} }
if (prot >= MemoryProt::GpuRead) {
// PS4s only map to GPU memory when the protection includes GPU access.
// If the address to map to is too high, PS4s throw a page fault and crash.
ASSERT_MSG(IsValidGpuMapping(mapped_addr, size), "Invalid address for GPU mapping");
rasterizer->MapMemory(mapped_addr, size);
}
if (type == VMAType::Reserved || type == VMAType::PoolReserved) { if (type == VMAType::Reserved || type == VMAType::PoolReserved) {
// For Reserved/PoolReserved mappings, we don't perform any address space allocations. // For Reserved/PoolReserved mappings, we don't perform any address space allocations.
// Just set out_addr to mapped_addr instead. // Just set out_addr to mapped_addr instead.
*out_addr = std::bit_cast<void*>(mapped_addr); *out_addr = std::bit_cast<void*>(mapped_addr);
} else { } else {
// Type is either Direct, Flexible, or Code, these need to be mapped in our address space. // If this is not a reservation, then map to GPU and address space
if (IsValidGpuMapping(mapped_addr, size)) {
rasterizer->MapMemory(mapped_addr, size);
}
*out_addr = impl.Map(mapped_addr, size, alignment, phys_addr, is_exec); *out_addr = impl.Map(mapped_addr, size, alignment, phys_addr, is_exec);
} }
@ -429,7 +422,6 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, size_t size) {
const bool is_exec = vma_base.is_exec; const bool is_exec = vma_base.is_exec;
const auto start_in_vma = virtual_addr - vma_base_addr; const auto start_in_vma = virtual_addr - vma_base_addr;
const auto type = vma_base.type; const auto type = vma_base.type;
const auto prot = vma_base.prot;
if (type != VMAType::PoolReserved && type != VMAType::Pooled) { if (type != VMAType::PoolReserved && type != VMAType::Pooled) {
LOG_ERROR(Kernel_Vmm, "Attempting to decommit non-pooled memory!"); LOG_ERROR(Kernel_Vmm, "Attempting to decommit non-pooled memory!");
@ -437,15 +429,15 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, size_t size) {
} }
if (type == VMAType::Pooled) { if (type == VMAType::Pooled) {
// We always map PoolCommitted memory to GPU, so unmap when decomitting.
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
// Track how much pooled memory is decommitted // Track how much pooled memory is decommitted
pool_budget += size; pool_budget += size;
} }
if (prot >= MemoryProt::GpuRead) {
// If this mapping has GPU access, unmap from GPU.
rasterizer->UnmapMemory(virtual_addr, size);
}
// Mark region as free and attempt to coalesce it with neighbours. // Mark region as free and attempt to coalesce it with neighbours.
const auto new_it = CarveVMA(virtual_addr, size); const auto new_it = CarveVMA(virtual_addr, size);
auto& vma = new_it->second; auto& vma = new_it->second;
@ -486,15 +478,11 @@ u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma
if (type == VMAType::Free) { if (type == VMAType::Free) {
return adjusted_size; return adjusted_size;
} }
if (type == VMAType::Flexible) { if (type == VMAType::Flexible) {
flexible_usage -= adjusted_size; flexible_usage -= adjusted_size;
} }
if (prot >= MemoryProt::GpuRead) {
// If this mapping has GPU access, unmap from GPU.
rasterizer->UnmapMemory(virtual_addr, size);
}
// Mark region as free and attempt to coalesce it with neighbours. // Mark region as free and attempt to coalesce it with neighbours.
const auto new_it = CarveVMA(virtual_addr, adjusted_size); const auto new_it = CarveVMA(virtual_addr, adjusted_size);
auto& vma = new_it->second; auto& vma = new_it->second;
@ -507,6 +495,11 @@ u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma
auto& post_merge_vma = post_merge_it->second; auto& post_merge_vma = post_merge_it->second;
bool readonly_file = post_merge_vma.prot == MemoryProt::CpuRead && type == VMAType::File; bool readonly_file = post_merge_vma.prot == MemoryProt::CpuRead && type == VMAType::File;
if (type != VMAType::Reserved && type != VMAType::PoolReserved) { if (type != VMAType::Reserved && type != VMAType::PoolReserved) {
// If this mapping has GPU access, unmap from GPU.
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
// Unmap the memory region. // Unmap the memory region.
impl.Unmap(vma_base_addr, vma_base_size, start_in_vma, start_in_vma + adjusted_size, impl.Unmap(vma_base_addr, vma_base_size, start_in_vma, start_in_vma + adjusted_size,
phys_base, is_exec, has_backing, readonly_file); phys_base, is_exec, has_backing, readonly_file);
@ -576,18 +569,6 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
if (vma_base.prot < MemoryProt::GpuRead && prot >= MemoryProt::GpuRead) {
// New protection will give the GPU access to this VMA, perform a rasterizer map
ASSERT_MSG(IsValidGpuMapping(addr, size), "Invalid address for GPU mapping");
rasterizer->MapMemory(addr, size);
}
if (vma_base.prot >= MemoryProt::GpuRead && prot < MemoryProt::GpuRead) {
// New protection will remove the GPU's access to this VMA, perform a rasterizer unmap
ASSERT_MSG(IsValidGpuMapping(addr, size), "Invalid address for GPU unmap");
rasterizer->UnmapMemory(addr, size);
}
// Change protection // Change protection
vma_base.prot = prot; vma_base.prot = prot;