Core: Protect fixes (#3029)

* Swap do-while to while

If we use a do-while loop, we waste time if `aligned_size = 0`.  This is also still accurate to FreeBSD behavior, where it returns success if `start == end` during mprotect.
This also effectively prevents the memory assert seen in updated versions of RESIDENT EVIL 2 (CUSA09193)

* Move prot validation outside loop

The prot variable shouldn't change during a mprotect call, so we can check the flags before protecting instead.
Also cleans up the code for prot validation.
This should improve performance, and is more accurate to FreeBSD code.

* Add logging for protect calls

This will help in debugging future problems
This commit is contained in:
Stephen Miller 2025-06-03 01:29:25 -05:00 committed by GitHub
parent eed99141b3
commit bb3f8af81a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 21 additions and 15 deletions

View file

@ -264,6 +264,8 @@ int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void**
}
s32 PS4_SYSV_ABI sceKernelMprotect(const void* addr, u64 size, s32 prot) {
LOG_INFO(Kernel_Vmm, "called addr = {}, size = {:#x}, prot = {:#x}", fmt::ptr(addr), size,
prot);
Core::MemoryManager* memory_manager = Core::Memory::Instance();
Core::MemoryProt protection_flags = static_cast<Core::MemoryProt>(prot);
return memory_manager->Protect(std::bit_cast<VAddr>(addr), size, protection_flags);
@ -279,6 +281,8 @@ s32 PS4_SYSV_ABI posix_mprotect(const void* addr, u64 size, s32 prot) {
}
s32 PS4_SYSV_ABI sceKernelMtypeprotect(const void* addr, u64 size, s32 mtype, s32 prot) {
LOG_INFO(Kernel_Vmm, "called addr = {}, size = {:#x}, prot = {:#x}", fmt::ptr(addr), size,
prot);
Core::MemoryManager* memory_manager = Core::Memory::Instance();
Core::MemoryProt protection_flags = static_cast<Core::MemoryProt>(prot);
return memory_manager->Protect(std::bit_cast<VAddr>(addr), size, protection_flags);

View file

@ -557,18 +557,6 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s
return adjusted_size;
}
// Validate protection flags
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
MemoryProt invalid_flags = prot & ~valid_flags;
if (u32(invalid_flags) != 0 && u32(invalid_flags) != u32(MemoryProt::NoAccess)) {
LOG_ERROR(Kernel_Vmm, "Invalid protection flags: prot = {:#x}, invalid flags = {:#x}",
u32(prot), u32(invalid_flags));
return ORBIS_KERNEL_ERROR_EINVAL;
}
// Change protection
vma_base.prot = prot;
@ -598,11 +586,25 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s
s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
std::scoped_lock lk{mutex};
s64 protected_bytes = 0;
// Validate protection flags
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
MemoryProt invalid_flags = prot & ~valid_flags;
if (invalid_flags != MemoryProt::NoAccess) {
LOG_ERROR(Kernel_Vmm, "Invalid protection flags");
return ORBIS_KERNEL_ERROR_EINVAL;
}
// Align addr and size to the nearest page boundary.
auto aligned_addr = Common::AlignDown(addr, 16_KB);
auto aligned_size = Common::AlignUp(size + addr - aligned_addr, 16_KB);
do {
// Protect all VMAs between aligned_addr and aligned_addr + aligned_size.
s64 protected_bytes = 0;
while (protected_bytes < aligned_size) {
auto it = FindVMA(aligned_addr + protected_bytes);
auto& vma_base = it->second;
ASSERT_MSG(vma_base.Contains(addr + protected_bytes, 0), "Address {:#x} is out of bounds",
@ -615,7 +617,7 @@ s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
return result;
}
protected_bytes += result;
} while (protected_bytes < aligned_size);
}
return ORBIS_OK;
}