Core: MapMemory fixes (#3142)

* Validate requested dmem range in MapMemory

Handles a rare edge case that only comes up when modding Driveclub

* Specify type

auto has failed us once again.

* Types cleanup

Just some basic tidying up.

* Clang
This commit is contained in:
Stephen Miller 2025-06-21 21:22:03 -05:00 committed by GitHub
parent 802124309d
commit d9dac05db2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 189 additions and 172 deletions

View file

@ -23,8 +23,8 @@ u64 PS4_SYSV_ABI sceKernelGetDirectMemorySize() {
return memory->GetTotalDirectSize(); return memory->GetTotalDirectSize();
} }
int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u64 len, s32 PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u64 len,
u64 alignment, int memoryType, s64* physAddrOut) { u64 alignment, s32 memoryType, s64* physAddrOut) {
if (searchStart < 0 || searchEnd < 0) { if (searchStart < 0 || searchEnd < 0) {
LOG_ERROR(Kernel_Vmm, "Invalid parameters!"); LOG_ERROR(Kernel_Vmm, "Invalid parameters!");
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
@ -71,13 +71,13 @@ int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u
return ORBIS_OK; return ORBIS_OK;
} }
s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(size_t len, size_t alignment, int memoryType, s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(u64 len, u64 alignment, s32 memoryType,
s64* physAddrOut) { s64* physAddrOut) {
const auto searchEnd = static_cast<s64>(sceKernelGetDirectMemorySize()); const auto searchEnd = static_cast<s64>(sceKernelGetDirectMemorySize());
return sceKernelAllocateDirectMemory(0, searchEnd, len, alignment, memoryType, physAddrOut); return sceKernelAllocateDirectMemory(0, searchEnd, len, alignment, memoryType, physAddrOut);
} }
s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, size_t len) { s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, u64 len) {
if (len == 0) { if (len == 0) {
return ORBIS_OK; return ORBIS_OK;
} }
@ -87,7 +87,7 @@ s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, size_t len) {
return ORBIS_OK; return ORBIS_OK;
} }
s32 PS4_SYSV_ABI sceKernelReleaseDirectMemory(u64 start, size_t len) { s32 PS4_SYSV_ABI sceKernelReleaseDirectMemory(u64 start, u64 len) {
if (len == 0) { if (len == 0) {
return ORBIS_OK; return ORBIS_OK;
} }
@ -96,9 +96,8 @@ s32 PS4_SYSV_ABI sceKernelReleaseDirectMemory(u64 start, size_t len) {
return ORBIS_OK; return ORBIS_OK;
} }
s32 PS4_SYSV_ABI sceKernelAvailableDirectMemorySize(u64 searchStart, u64 searchEnd, s32 PS4_SYSV_ABI sceKernelAvailableDirectMemorySize(u64 searchStart, u64 searchEnd, u64 alignment,
size_t alignment, u64* physAddrOut, u64* physAddrOut, u64* sizeOut) {
size_t* sizeOut) {
LOG_INFO(Kernel_Vmm, "called searchStart = {:#x}, searchEnd = {:#x}, alignment = {:#x}", LOG_INFO(Kernel_Vmm, "called searchStart = {:#x}, searchEnd = {:#x}, alignment = {:#x}",
searchStart, searchEnd, alignment); searchStart, searchEnd, alignment);
@ -109,7 +108,7 @@ s32 PS4_SYSV_ABI sceKernelAvailableDirectMemorySize(u64 searchStart, u64 searchE
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
PAddr physAddr{}; PAddr physAddr{};
size_t size{}; u64 size{};
s32 result = memory->DirectQueryAvailable(searchStart, searchEnd, alignment, &physAddr, &size); s32 result = memory->DirectQueryAvailable(searchStart, searchEnd, alignment, &physAddr, &size);
if (size == 0) { if (size == 0) {
@ -122,14 +121,14 @@ s32 PS4_SYSV_ABI sceKernelAvailableDirectMemorySize(u64 searchStart, u64 searchE
return result; return result;
} }
s32 PS4_SYSV_ABI sceKernelVirtualQuery(const void* addr, int flags, OrbisVirtualQueryInfo* info, s32 PS4_SYSV_ABI sceKernelVirtualQuery(const void* addr, s32 flags, OrbisVirtualQueryInfo* info,
size_t infoSize) { u64 infoSize) {
LOG_INFO(Kernel_Vmm, "called addr = {}, flags = {:#x}", fmt::ptr(addr), flags); LOG_INFO(Kernel_Vmm, "called addr = {}, flags = {:#x}", fmt::ptr(addr), flags);
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
return memory->VirtualQuery(std::bit_cast<VAddr>(addr), flags, info); return memory->VirtualQuery(std::bit_cast<VAddr>(addr), flags, info);
} }
s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, int flags, u64 alignment) { s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, s32 flags, u64 alignment) {
LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}, flags = {:#x}, alignment = {:#x}", LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}, flags = {:#x}, alignment = {:#x}",
fmt::ptr(*addr), len, flags, alignment); fmt::ptr(*addr), len, flags, alignment);
if (addr == nullptr) { if (addr == nullptr) {
@ -159,7 +158,7 @@ s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, int flags, u
return result; return result;
} }
int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags, s32 PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, s32 prot, s32 flags,
s64 directMemoryStart, u64 alignment, s64 directMemoryStart, u64 alignment,
const char* name) { const char* name) {
LOG_INFO(Kernel_Vmm, LOG_INFO(Kernel_Vmm,
@ -202,7 +201,7 @@ int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, i
return ret; return ret;
} }
int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int flags, s32 PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, s32 prot, s32 flags,
s64 directMemoryStart, u64 alignment) { s64 directMemoryStart, u64 alignment) {
LOG_INFO(Kernel_Vmm, "called, redirected to sceKernelMapNamedDirectMemory"); LOG_INFO(Kernel_Vmm, "called, redirected to sceKernelMapNamedDirectMemory");
return sceKernelMapNamedDirectMemory(addr, len, prot, flags, directMemoryStart, alignment, return sceKernelMapNamedDirectMemory(addr, len, prot, flags, directMemoryStart, alignment,
@ -255,7 +254,7 @@ s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, u64 len, s32 pro
return sceKernelMapNamedFlexibleMemory(addr_in_out, len, prot, flags, "anon"); return sceKernelMapNamedFlexibleMemory(addr_in_out, len, prot, flags, "anon");
} }
int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot) { s32 PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot) {
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
return memory->QueryProtection(std::bit_cast<VAddr>(addr), start, end, prot); return memory->QueryProtection(std::bit_cast<VAddr>(addr), start, end, prot);
} }
@ -285,14 +284,14 @@ s32 PS4_SYSV_ABI sceKernelMtypeprotect(const void* addr, u64 size, s32 mtype, s3
return memory_manager->Protect(std::bit_cast<VAddr>(addr), size, protection_flags); return memory_manager->Protect(std::bit_cast<VAddr>(addr), size, protection_flags);
} }
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info, s32 PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, s32 flags, OrbisQueryInfo* query_info,
size_t infoSize) { u64 infoSize) {
LOG_INFO(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags); LOG_INFO(Kernel_Vmm, "called offset = {:#x}, flags = {:#x}", offset, flags);
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
return memory->DirectMemoryQuery(offset, flags == 1, query_info); return memory->DirectMemoryQuery(offset, flags == 1, query_info);
} }
s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* out_size) { s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(u64* out_size) {
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
*out_size = memory->GetAvailableFlexibleSize(); *out_size = memory->GetAvailableFlexibleSize();
LOG_INFO(Kernel_Vmm, "called size = {:#x}", *out_size); LOG_INFO(Kernel_Vmm, "called size = {:#x}", *out_size);
@ -304,7 +303,7 @@ void PS4_SYSV_ABI _sceKernelRtldSetApplicationHeapAPI(void* func[]) {
linker->SetHeapAPI(func); linker->SetHeapAPI(func);
} }
int PS4_SYSV_ABI sceKernelGetDirectMemoryType(u64 addr, int* directMemoryTypeOut, s32 PS4_SYSV_ABI sceKernelGetDirectMemoryType(u64 addr, s32* directMemoryTypeOut,
void** directMemoryStartOut, void** directMemoryStartOut,
void** directMemoryEndOut) { void** directMemoryEndOut) {
LOG_WARNING(Kernel_Vmm, "called, direct memory addr = {:#x}", addr); LOG_WARNING(Kernel_Vmm, "called, direct memory addr = {:#x}", addr);
@ -313,23 +312,23 @@ int PS4_SYSV_ABI sceKernelGetDirectMemoryType(u64 addr, int* directMemoryTypeOut
directMemoryEndOut); directMemoryEndOut);
} }
int PS4_SYSV_ABI sceKernelIsStack(void* addr, void** start, void** end) { s32 PS4_SYSV_ABI sceKernelIsStack(void* addr, void** start, void** end) {
LOG_DEBUG(Kernel_Vmm, "called, addr = {}", fmt::ptr(addr)); LOG_DEBUG(Kernel_Vmm, "called, addr = {}", fmt::ptr(addr));
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
return memory->IsStack(std::bit_cast<VAddr>(addr), start, end); return memory->IsStack(std::bit_cast<VAddr>(addr), start, end);
} }
s32 PS4_SYSV_ABI sceKernelBatchMap(OrbisKernelBatchMapEntry* entries, int numEntries, s32 PS4_SYSV_ABI sceKernelBatchMap(OrbisKernelBatchMapEntry* entries, s32 numEntries,
int* numEntriesOut) { s32* numEntriesOut) {
return sceKernelBatchMap2(entries, numEntries, numEntriesOut, return sceKernelBatchMap2(entries, numEntries, numEntriesOut,
MemoryFlags::SCE_KERNEL_MAP_FIXED); // 0x10, 0x410? MemoryFlags::SCE_KERNEL_MAP_FIXED); // 0x10, 0x410?
} }
s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEntries, s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, s32 numEntries,
int* numEntriesOut, int flags) { s32* numEntriesOut, s32 flags) {
int result = ORBIS_OK; s32 result = ORBIS_OK;
int processed = 0; s32 processed = 0;
for (int i = 0; i < numEntries; i++, processed++) { for (s32 i = 0; i < numEntries; i++, processed++) {
if (entries == nullptr || entries[i].length == 0 || entries[i].operation > 4) { if (entries == nullptr || entries[i].length == 0 || entries[i].operation > 4) {
result = ORBIS_KERNEL_ERROR_EINVAL; result = ORBIS_KERNEL_ERROR_EINVAL;
break; // break and assign a value to numEntriesOut. break; // break and assign a value to numEntriesOut.
@ -619,7 +618,7 @@ s32 PS4_SYSV_ABI sceKernelConfiguredFlexibleMemorySize(u64* sizeOut) {
return ORBIS_OK; return ORBIS_OK;
} }
int PS4_SYSV_ABI sceKernelMunmap(void* addr, size_t len) { s32 PS4_SYSV_ABI sceKernelMunmap(void* addr, u64 len) {
LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}", fmt::ptr(addr), len); LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}", fmt::ptr(addr), len);
if (len == 0) { if (len == 0) {
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
@ -628,8 +627,8 @@ int PS4_SYSV_ABI sceKernelMunmap(void* addr, size_t len) {
return memory->UnmapMemory(std::bit_cast<VAddr>(addr), len); return memory->UnmapMemory(std::bit_cast<VAddr>(addr), len);
} }
int PS4_SYSV_ABI posix_munmap(void* addr, size_t len) { s32 PS4_SYSV_ABI posix_munmap(void* addr, u64 len) {
int result = sceKernelMunmap(addr, len); s32 result = sceKernelMunmap(addr, len);
if (result < 0) { if (result < 0) {
LOG_ERROR(Kernel_Pthread, "posix_munmap: error = {}", result); LOG_ERROR(Kernel_Pthread, "posix_munmap: error = {}", result);
ErrSceToPosix(result); ErrSceToPosix(result);
@ -638,12 +637,12 @@ int PS4_SYSV_ABI posix_munmap(void* addr, size_t len) {
return result; return result;
} }
static constexpr int MAX_PRT_APERTURES = 3; static constexpr s32 MAX_PRT_APERTURES = 3;
static constexpr VAddr PRT_AREA_START_ADDR = 0x1000000000; static constexpr VAddr PRT_AREA_START_ADDR = 0x1000000000;
static constexpr size_t PRT_AREA_SIZE = 0xec00000000; static constexpr u64 PRT_AREA_SIZE = 0xec00000000;
static std::array<std::pair<VAddr, size_t>, MAX_PRT_APERTURES> PrtApertures{}; static std::array<std::pair<VAddr, u64>, MAX_PRT_APERTURES> PrtApertures{};
int PS4_SYSV_ABI sceKernelSetPrtAperture(int id, VAddr address, size_t size) { s32 PS4_SYSV_ABI sceKernelSetPrtAperture(s32 id, VAddr address, u64 size) {
if (id < 0 || id >= MAX_PRT_APERTURES) { if (id < 0 || id >= MAX_PRT_APERTURES) {
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
@ -667,7 +666,7 @@ int PS4_SYSV_ABI sceKernelSetPrtAperture(int id, VAddr address, size_t size) {
return ORBIS_OK; return ORBIS_OK;
} }
int PS4_SYSV_ABI sceKernelGetPrtAperture(int id, VAddr* address, size_t* size) { s32 PS4_SYSV_ABI sceKernelGetPrtAperture(s32 id, VAddr* address, u64* size) {
if (id < 0 || id >= MAX_PRT_APERTURES) { if (id < 0 || id >= MAX_PRT_APERTURES) {
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }

View file

@ -52,13 +52,13 @@ constexpr u32 ORBIS_KERNEL_MAXIMUM_NAME_LENGTH = 32;
struct OrbisQueryInfo { struct OrbisQueryInfo {
uintptr_t start; uintptr_t start;
uintptr_t end; uintptr_t end;
int memoryType; s32 memoryType;
}; };
struct OrbisVirtualQueryInfo { struct OrbisVirtualQueryInfo {
uintptr_t start; uintptr_t start;
uintptr_t end; uintptr_t end;
size_t offset; u64 offset;
s32 protection; s32 protection;
s32 memory_type; s32 memory_type;
u8 is_flexible : 1; u8 is_flexible : 1;
@ -73,12 +73,12 @@ static_assert(sizeof(OrbisVirtualQueryInfo) == 72,
struct OrbisKernelBatchMapEntry { struct OrbisKernelBatchMapEntry {
void* start; void* start;
size_t offset; u64 offset;
size_t length; u64 length;
char protection; char protection;
char type; char type;
short reserved; s16 reserved;
int operation; s32 operation;
}; };
enum class OrbisKernelMemoryPoolOpcode : u32 { enum class OrbisKernelMemoryPoolOpcode : u32 {
@ -124,45 +124,44 @@ struct OrbisKernelMemoryPoolBatchEntry {
}; };
u64 PS4_SYSV_ABI sceKernelGetDirectMemorySize(); u64 PS4_SYSV_ABI sceKernelGetDirectMemorySize();
int PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u64 len, s32 PS4_SYSV_ABI sceKernelAllocateDirectMemory(s64 searchStart, s64 searchEnd, u64 len,
u64 alignment, int memoryType, s64* physAddrOut); u64 alignment, s32 memoryType, s64* physAddrOut);
int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags, s32 PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, s32 prot, s32 flags,
s64 directMemoryStart, u64 alignment, s64 directMemoryStart, u64 alignment,
const char* name); const char* name);
int PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, int prot, int flags, s32 PS4_SYSV_ABI sceKernelMapDirectMemory(void** addr, u64 len, s32 prot, s32 flags,
s64 directMemoryStart, u64 alignment); s64 directMemoryStart, u64 alignment);
s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(size_t len, size_t alignment, int memoryType, s32 PS4_SYSV_ABI sceKernelAllocateMainDirectMemory(u64 len, u64 alignment, s32 memoryType,
s64* physAddrOut); s64* physAddrOut);
s32 PS4_SYSV_ABI sceKernelReleaseDirectMemory(u64 start, size_t len); s32 PS4_SYSV_ABI sceKernelReleaseDirectMemory(u64 start, u64 len);
s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, size_t len); s32 PS4_SYSV_ABI sceKernelCheckedReleaseDirectMemory(u64 start, u64 len);
s32 PS4_SYSV_ABI sceKernelAvailableDirectMemorySize(u64 searchStart, u64 searchEnd, s32 PS4_SYSV_ABI sceKernelAvailableDirectMemorySize(u64 searchStart, u64 searchEnd, u64 alignment,
size_t alignment, u64* physAddrOut, u64* physAddrOut, u64* sizeOut);
size_t* sizeOut); s32 PS4_SYSV_ABI sceKernelVirtualQuery(const void* addr, s32 flags, OrbisVirtualQueryInfo* info,
s32 PS4_SYSV_ABI sceKernelVirtualQuery(const void* addr, int flags, OrbisVirtualQueryInfo* info, u64 infoSize);
size_t infoSize); s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, s32 flags, u64 alignment);
s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, int flags, u64 alignment);
s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addr_in_out, u64 len, s32 prot, s32 flags, s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addr_in_out, u64 len, s32 prot, s32 flags,
const char* name); const char* name);
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, u64 len, s32 prot, s32 flags); s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, u64 len, s32 prot, s32 flags);
int PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot); s32 PS4_SYSV_ABI sceKernelQueryMemoryProtection(void* addr, void** start, void** end, u32* prot);
s32 PS4_SYSV_ABI sceKernelMprotect(const void* addr, u64 size, s32 prot); s32 PS4_SYSV_ABI sceKernelMprotect(const void* addr, u64 size, s32 prot);
s32 PS4_SYSV_ABI sceKernelMtypeprotect(const void* addr, u64 size, s32 mtype, s32 prot); s32 PS4_SYSV_ABI sceKernelMtypeprotect(const void* addr, u64 size, s32 mtype, s32 prot);
int PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, int flags, OrbisQueryInfo* query_info, s32 PS4_SYSV_ABI sceKernelDirectMemoryQuery(u64 offset, s32 flags, OrbisQueryInfo* query_info,
size_t infoSize); u64 infoSize);
s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(size_t* sizeOut); s32 PS4_SYSV_ABI sceKernelAvailableFlexibleMemorySize(u64* sizeOut);
void PS4_SYSV_ABI _sceKernelRtldSetApplicationHeapAPI(void* func[]); void PS4_SYSV_ABI _sceKernelRtldSetApplicationHeapAPI(void* func[]);
int PS4_SYSV_ABI sceKernelGetDirectMemoryType(u64 addr, int* directMemoryTypeOut, s32 PS4_SYSV_ABI sceKernelGetDirectMemoryType(u64 addr, s32* directMemoryTypeOut,
void** directMemoryStartOut, void** directMemoryStartOut,
void** directMemoryEndOut); void** directMemoryEndOut);
int PS4_SYSV_ABI sceKernelIsStack(void* addr, void** start, void** end); s32 PS4_SYSV_ABI sceKernelIsStack(void* addr, void** start, void** end);
s32 PS4_SYSV_ABI sceKernelBatchMap(OrbisKernelBatchMapEntry* entries, int numEntries, s32 PS4_SYSV_ABI sceKernelBatchMap(OrbisKernelBatchMapEntry* entries, s32 numEntries,
int* numEntriesOut); s32* numEntriesOut);
s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, int numEntries, s32 PS4_SYSV_ABI sceKernelBatchMap2(OrbisKernelBatchMapEntry* entries, s32 numEntries,
int* numEntriesOut, int flags); s32* numEntriesOut, s32 flags);
s32 PS4_SYSV_ABI sceKernelSetVirtualRangeName(const void* addr, u64 len, const char* name); s32 PS4_SYSV_ABI sceKernelSetVirtualRangeName(const void* addr, u64 len, const char* name);
@ -175,7 +174,7 @@ s32 PS4_SYSV_ABI sceKernelMemoryPoolDecommit(void* addr, u64 len, s32 flags);
s32 PS4_SYSV_ABI sceKernelMemoryPoolBatch(const OrbisKernelMemoryPoolBatchEntry* entries, s32 count, s32 PS4_SYSV_ABI sceKernelMemoryPoolBatch(const OrbisKernelMemoryPoolBatchEntry* entries, s32 count,
s32* num_processed, s32 flags); s32* num_processed, s32 flags);
int PS4_SYSV_ABI sceKernelMunmap(void* addr, size_t len); s32 PS4_SYSV_ABI sceKernelMunmap(void* addr, u64 len);
void RegisterMemory(Core::Loader::SymbolsResolver* sym); void RegisterMemory(Core::Loader::SymbolsResolver* sym);

View file

@ -17,11 +17,11 @@ namespace Core {
MemoryManager::MemoryManager() { MemoryManager::MemoryManager() {
// Insert a virtual memory area that covers the entire area we manage. // Insert a virtual memory area that covers the entire area we manage.
const VAddr system_managed_base = impl.SystemManagedVirtualBase(); const VAddr system_managed_base = impl.SystemManagedVirtualBase();
const size_t system_managed_size = impl.SystemManagedVirtualSize(); const u64 system_managed_size = impl.SystemManagedVirtualSize();
const VAddr system_reserved_base = impl.SystemReservedVirtualBase(); const VAddr system_reserved_base = impl.SystemReservedVirtualBase();
const size_t system_reserved_size = impl.SystemReservedVirtualSize(); const u64 system_reserved_size = impl.SystemReservedVirtualSize();
const VAddr user_base = impl.UserVirtualBase(); const VAddr user_base = impl.UserVirtualBase();
const size_t user_size = impl.UserVirtualSize(); const u64 user_size = impl.UserVirtualSize();
vma_map.emplace(system_managed_base, vma_map.emplace(system_managed_base,
VirtualMemoryArea{system_managed_base, system_managed_size}); VirtualMemoryArea{system_managed_base, system_managed_size});
vma_map.emplace(system_reserved_base, vma_map.emplace(system_reserved_base,
@ -148,7 +148,7 @@ bool MemoryManager::TryWriteBacking(void* address, const void* data, u32 num_byt
return true; return true;
} }
PAddr MemoryManager::PoolExpand(PAddr search_start, PAddr search_end, size_t size, u64 alignment) { PAddr MemoryManager::PoolExpand(PAddr search_start, PAddr search_end, u64 size, u64 alignment) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
alignment = alignment > 0 ? alignment : 64_KB; alignment = alignment > 0 ? alignment : 64_KB;
@ -188,8 +188,8 @@ PAddr MemoryManager::PoolExpand(PAddr search_start, PAddr search_end, size_t siz
return mapping_start; return mapping_start;
} }
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment, PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, u64 size, u64 alignment,
int memory_type) { s32 memory_type) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
alignment = alignment > 0 ? alignment : 16_KB; alignment = alignment > 0 ? alignment : 16_KB;
@ -226,7 +226,7 @@ PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size,
return mapping_start; return mapping_start;
} }
void MemoryManager::Free(PAddr phys_addr, size_t size) { void MemoryManager::Free(PAddr phys_addr, u64 size) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
auto dmem_area = CarveDmemArea(phys_addr, size); auto dmem_area = CarveDmemArea(phys_addr, size);
@ -256,7 +256,7 @@ void MemoryManager::Free(PAddr phys_addr, size_t size) {
MergeAdjacent(dmem_map, dmem_area); MergeAdjacent(dmem_map, dmem_area);
} }
int MemoryManager::PoolCommit(VAddr virtual_addr, size_t size, MemoryProt prot) { s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const u64 alignment = 64_KB; const u64 alignment = 64_KB;
@ -320,6 +320,28 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
return ORBIS_KERNEL_ERROR_ENOMEM; return ORBIS_KERNEL_ERROR_ENOMEM;
} }
// Validate the requested physical address range
if (phys_addr != -1) {
u64 validated_size = 0;
do {
auto dmem_area = FindDmemArea(phys_addr + validated_size)->second;
// If any requested dmem area is not allocated, return an error.
if (dmem_area.is_free) {
LOG_ERROR(Kernel_Vmm, "Unable to map {:#x} bytes at physical address {:#x}", size,
phys_addr);
return ORBIS_KERNEL_ERROR_ENOMEM;
}
// Track how much we've validated.
validated_size += dmem_area.size - (phys_addr + validated_size - dmem_area.base);
} while (validated_size < size && phys_addr + validated_size < GetTotalDirectSize());
// If the requested range goes outside the dmem map, return an error.
if (validated_size < size) {
LOG_ERROR(Kernel_Vmm, "Unable to map {:#x} bytes at physical address {:#x}", size,
phys_addr);
return ORBIS_KERNEL_ERROR_ENOMEM;
}
}
// Limit the minumum address to SystemManagedVirtualBase to prevent hardware-specific issues. // Limit the minumum address to SystemManagedVirtualBase to prevent hardware-specific issues.
VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr;
@ -403,7 +425,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance(); auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance();
VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr;
const size_t size_aligned = Common::AlignUp(size, 16_KB); const u64 size_aligned = Common::AlignUp(size, 16_KB);
// Find first free area to map the file. // Find first free area to map the file.
if (False(flags & MemoryMapFlags::Fixed)) { if (False(flags & MemoryMapFlags::Fixed)) {
@ -416,7 +438,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
if (True(flags & MemoryMapFlags::Fixed)) { if (True(flags & MemoryMapFlags::Fixed)) {
const auto& vma = FindVMA(virtual_addr)->second; const auto& vma = FindVMA(virtual_addr)->second;
const size_t remaining_size = vma.base + vma.size - virtual_addr; const u64 remaining_size = vma.base + vma.size - virtual_addr;
ASSERT_MSG(!vma.IsMapped() && remaining_size >= size, ASSERT_MSG(!vma.IsMapped() && remaining_size >= size,
"Memory region {:#x} to {:#x} isn't free enough to map region {:#x} to {:#x}", "Memory region {:#x} to {:#x} isn't free enough to map region {:#x} to {:#x}",
vma.base, vma.base + vma.size, virtual_addr, virtual_addr + size); vma.base, vma.base + vma.size, virtual_addr, virtual_addr + size);
@ -448,7 +470,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
return ORBIS_OK; return ORBIS_OK;
} }
s32 MemoryManager::PoolDecommit(VAddr virtual_addr, size_t size) { s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const auto it = FindVMA(virtual_addr); const auto it = FindVMA(virtual_addr);
@ -498,7 +520,7 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, size_t size) {
return ORBIS_OK; return ORBIS_OK;
} }
s32 MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) { s32 MemoryManager::UnmapMemory(VAddr virtual_addr, u64 size) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
return UnmapMemoryImpl(virtual_addr, size); return UnmapMemoryImpl(virtual_addr, size);
} }
@ -564,7 +586,7 @@ s32 MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, u64 size) {
return ORBIS_OK; return ORBIS_OK;
} }
int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) { s32 MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
const auto it = FindVMA(addr); const auto it = FindVMA(addr);
@ -586,8 +608,7 @@ int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* pr
return ORBIS_OK; return ORBIS_OK;
} }
s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t size, s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, u64 size, MemoryProt prot) {
MemoryProt prot) {
const auto start_in_vma = addr - vma_base.base; const auto start_in_vma = addr - vma_base.base;
const auto adjusted_size = const auto adjusted_size =
vma_base.size - start_in_vma < size ? vma_base.size - start_in_vma : size; vma_base.size - start_in_vma < size ? vma_base.size - start_in_vma : size;
@ -624,7 +645,7 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t s
return adjusted_size; return adjusted_size;
} }
s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) { s32 MemoryManager::Protect(VAddr addr, u64 size, MemoryProt prot) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
// Validate protection flags // Validate protection flags
@ -649,8 +670,7 @@ s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
auto& vma_base = it->second; auto& vma_base = it->second;
ASSERT_MSG(vma_base.Contains(addr + protected_bytes, 0), "Address {:#x} is out of bounds", ASSERT_MSG(vma_base.Contains(addr + protected_bytes, 0), "Address {:#x} is out of bounds",
addr + protected_bytes); addr + protected_bytes);
auto result = 0; auto result = ProtectBytes(aligned_addr + protected_bytes, vma_base,
result = ProtectBytes(aligned_addr + protected_bytes, vma_base,
aligned_size - protected_bytes, prot); aligned_size - protected_bytes, prot);
if (result < 0) { if (result < 0) {
// ProtectBytes returned an error, return it // ProtectBytes returned an error, return it
@ -662,7 +682,7 @@ s32 MemoryManager::Protect(VAddr addr, size_t size, MemoryProt prot) {
return ORBIS_OK; return ORBIS_OK;
} }
int MemoryManager::VirtualQuery(VAddr addr, int flags, s32 MemoryManager::VirtualQuery(VAddr addr, s32 flags,
::Libraries::Kernel::OrbisVirtualQueryInfo* info) { ::Libraries::Kernel::OrbisVirtualQueryInfo* info) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
@ -707,7 +727,7 @@ int MemoryManager::VirtualQuery(VAddr addr, int flags,
return ORBIS_OK; return ORBIS_OK;
} }
int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next, s32 MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
::Libraries::Kernel::OrbisQueryInfo* out_info) { ::Libraries::Kernel::OrbisQueryInfo* out_info) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
@ -728,13 +748,13 @@ int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
return ORBIS_OK; return ORBIS_OK;
} }
int MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment, s32 MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, u64 alignment,
PAddr* phys_addr_out, size_t* size_out) { PAddr* phys_addr_out, u64* size_out) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex};
auto dmem_area = FindDmemArea(search_start); auto dmem_area = FindDmemArea(search_start);
PAddr paddr{}; PAddr paddr{};
size_t max_size{}; u64 max_size{};
while (dmem_area != dmem_map.end()) { while (dmem_area != dmem_map.end()) {
if (!dmem_area->second.is_free) { if (!dmem_area->second.is_free) {
@ -815,13 +835,60 @@ void MemoryManager::NameVirtualRange(VAddr virtual_addr, u64 size, std::string_v
} }
} }
s32 MemoryManager::GetDirectMemoryType(PAddr addr, s32* directMemoryTypeOut,
void** directMemoryStartOut, void** directMemoryEndOut) {
std::scoped_lock lk{mutex};
auto dmem_area = FindDmemArea(addr);
if (addr > dmem_area->second.GetEnd() || dmem_area->second.is_free) {
LOG_ERROR(Core, "Unable to find allocated direct memory region to check type!");
return ORBIS_KERNEL_ERROR_ENOENT;
}
const auto& area = dmem_area->second;
*directMemoryStartOut = reinterpret_cast<void*>(area.base);
*directMemoryEndOut = reinterpret_cast<void*>(area.GetEnd());
*directMemoryTypeOut = area.memory_type;
return ORBIS_OK;
}
s32 MemoryManager::IsStack(VAddr addr, void** start, void** end) {
auto vma_handle = FindVMA(addr);
if (vma_handle == vma_map.end()) {
return ORBIS_KERNEL_ERROR_EINVAL;
}
const VirtualMemoryArea& vma = vma_handle->second;
if (!vma.Contains(addr, 0) || vma.IsFree()) {
return ORBIS_KERNEL_ERROR_EACCES;
}
u64 stack_start = 0;
u64 stack_end = 0;
if (vma.type == VMAType::Stack) {
stack_start = vma.base;
stack_end = vma.base + vma.size;
}
if (start != nullptr) {
*start = reinterpret_cast<void*>(stack_start);
}
if (end != nullptr) {
*end = reinterpret_cast<void*>(stack_end);
}
return ORBIS_OK;
}
void MemoryManager::InvalidateMemory(const VAddr addr, const u64 size) const { void MemoryManager::InvalidateMemory(const VAddr addr, const u64 size) const {
if (rasterizer) { if (rasterizer) {
rasterizer->InvalidateMemory(addr, size); rasterizer->InvalidateMemory(addr, size);
} }
} }
VAddr MemoryManager::SearchFree(VAddr virtual_addr, size_t size, u32 alignment) { VAddr MemoryManager::SearchFree(VAddr virtual_addr, u64 size, u32 alignment) {
// If the requested address is below the mapped range, start search from the lowest address // If the requested address is below the mapped range, start search from the lowest address
auto min_search_address = impl.SystemManagedVirtualBase(); auto min_search_address = impl.SystemManagedVirtualBase();
if (virtual_addr < min_search_address) { if (virtual_addr < min_search_address) {
@ -864,7 +931,7 @@ VAddr MemoryManager::SearchFree(VAddr virtual_addr, size_t size, u32 alignment)
} }
// If there's enough space in the VMA, return the address. // If there's enough space in the VMA, return the address.
const size_t remaining_size = vma.base + vma.size - virtual_addr; const u64 remaining_size = vma.base + vma.size - virtual_addr;
if (remaining_size >= size) { if (remaining_size >= size) {
return virtual_addr; return virtual_addr;
} }
@ -877,7 +944,7 @@ VAddr MemoryManager::SearchFree(VAddr virtual_addr, size_t size, u32 alignment)
return -1; return -1;
} }
MemoryManager::VMAHandle MemoryManager::CarveVMA(VAddr virtual_addr, size_t size) { MemoryManager::VMAHandle MemoryManager::CarveVMA(VAddr virtual_addr, u64 size) {
auto vma_handle = FindVMA(virtual_addr); auto vma_handle = FindVMA(virtual_addr);
ASSERT_MSG(vma_handle->second.Contains(virtual_addr, 0), "Virtual address not in vm_map"); ASSERT_MSG(vma_handle->second.Contains(virtual_addr, 0), "Virtual address not in vm_map");
@ -906,7 +973,7 @@ MemoryManager::VMAHandle MemoryManager::CarveVMA(VAddr virtual_addr, size_t size
return vma_handle; return vma_handle;
} }
MemoryManager::DMemHandle MemoryManager::CarveDmemArea(PAddr addr, size_t size) { MemoryManager::DMemHandle MemoryManager::CarveDmemArea(PAddr addr, u64 size) {
auto dmem_handle = FindDmemArea(addr); auto dmem_handle = FindDmemArea(addr);
ASSERT_MSG(addr <= dmem_handle->second.GetEnd(), "Physical address not in dmem_map"); ASSERT_MSG(addr <= dmem_handle->second.GetEnd(), "Physical address not in dmem_map");
@ -930,7 +997,7 @@ MemoryManager::DMemHandle MemoryManager::CarveDmemArea(PAddr addr, size_t size)
return dmem_handle; return dmem_handle;
} }
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) { MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, u64 offset_in_vma) {
auto& old_vma = vma_handle->second; auto& old_vma = vma_handle->second;
ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0); ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0);
@ -945,7 +1012,7 @@ MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offse
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
} }
MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, size_t offset_in_area) { MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, u64 offset_in_area) {
auto& old_area = dmem_handle->second; auto& old_area = dmem_handle->second;
ASSERT(offset_in_area < old_area.size && offset_in_area > 0); ASSERT(offset_in_area < old_area.size && offset_in_area > 0);
@ -957,51 +1024,4 @@ MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, size_t of
return dmem_map.emplace_hint(std::next(dmem_handle), new_area.base, new_area); return dmem_map.emplace_hint(std::next(dmem_handle), new_area.base, new_area);
} }
int MemoryManager::GetDirectMemoryType(PAddr addr, int* directMemoryTypeOut,
void** directMemoryStartOut, void** directMemoryEndOut) {
std::scoped_lock lk{mutex};
auto dmem_area = FindDmemArea(addr);
if (addr > dmem_area->second.GetEnd() || dmem_area->second.is_free) {
LOG_ERROR(Core, "Unable to find allocated direct memory region to check type!");
return ORBIS_KERNEL_ERROR_ENOENT;
}
const auto& area = dmem_area->second;
*directMemoryStartOut = reinterpret_cast<void*>(area.base);
*directMemoryEndOut = reinterpret_cast<void*>(area.GetEnd());
*directMemoryTypeOut = area.memory_type;
return ORBIS_OK;
}
int MemoryManager::IsStack(VAddr addr, void** start, void** end) {
auto vma_handle = FindVMA(addr);
if (vma_handle == vma_map.end()) {
return ORBIS_KERNEL_ERROR_EINVAL;
}
const VirtualMemoryArea& vma = vma_handle->second;
if (!vma.Contains(addr, 0) || vma.IsFree()) {
return ORBIS_KERNEL_ERROR_EACCES;
}
auto stack_start = 0ul;
auto stack_end = 0ul;
if (vma.type == VMAType::Stack) {
stack_start = vma.base;
stack_end = vma.base + vma.size;
}
if (start != nullptr) {
*start = reinterpret_cast<void*>(stack_start);
}
if (end != nullptr) {
*end = reinterpret_cast<void*>(stack_end);
}
return ORBIS_OK;
}
} // namespace Core } // namespace Core

View file

@ -63,8 +63,8 @@ enum class VMAType : u32 {
struct DirectMemoryArea { struct DirectMemoryArea {
PAddr base = 0; PAddr base = 0;
size_t size = 0; u64 size = 0;
int memory_type = 0; s32 memory_type = 0;
bool is_pooled = false; bool is_pooled = false;
bool is_free = true; bool is_free = true;
@ -88,7 +88,7 @@ struct DirectMemoryArea {
struct VirtualMemoryArea { struct VirtualMemoryArea {
VAddr base = 0; VAddr base = 0;
size_t size = 0; u64 size = 0;
PAddr phys_base = 0; PAddr phys_base = 0;
VMAType type = VMAType::Free; VMAType type = VMAType::Free;
MemoryProt prot = MemoryProt::NoAccess; MemoryProt prot = MemoryProt::NoAccess;
@ -97,7 +97,7 @@ struct VirtualMemoryArea {
uintptr_t fd = 0; uintptr_t fd = 0;
bool is_exec = false; bool is_exec = false;
bool Contains(VAddr addr, size_t size) const { bool Contains(VAddr addr, u64 size) const {
return addr >= base && (addr + size) <= (base + this->size); return addr >= base && (addr + size) <= (base + this->size);
} }
@ -184,14 +184,13 @@ public:
void SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1, bool use_extended_mem2); void SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1, bool use_extended_mem2);
PAddr PoolExpand(PAddr search_start, PAddr search_end, size_t size, u64 alignment); PAddr PoolExpand(PAddr search_start, PAddr search_end, u64 size, u64 alignment);
PAddr Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment, PAddr Allocate(PAddr search_start, PAddr search_end, u64 size, u64 alignment, s32 memory_type);
int memory_type);
void Free(PAddr phys_addr, size_t size); void Free(PAddr phys_addr, u64 size);
int PoolCommit(VAddr virtual_addr, size_t size, MemoryProt prot); s32 PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot);
s32 MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot, s32 MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
MemoryMapFlags flags, VMAType type, std::string_view name = "anon", MemoryMapFlags flags, VMAType type, std::string_view name = "anon",
@ -200,35 +199,35 @@ public:
s32 MapFile(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot, s32 MapFile(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
MemoryMapFlags flags, s32 fd, s64 phys_addr); MemoryMapFlags flags, s32 fd, s64 phys_addr);
s32 PoolDecommit(VAddr virtual_addr, size_t size); s32 PoolDecommit(VAddr virtual_addr, u64 size);
s32 UnmapMemory(VAddr virtual_addr, size_t size); s32 UnmapMemory(VAddr virtual_addr, u64 size);
int QueryProtection(VAddr addr, void** start, void** end, u32* prot); s32 QueryProtection(VAddr addr, void** start, void** end, u32* prot);
s32 Protect(VAddr addr, size_t size, MemoryProt prot); s32 Protect(VAddr addr, u64 size, MemoryProt prot);
s64 ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, size_t size, MemoryProt prot); s64 ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, u64 size, MemoryProt prot);
int VirtualQuery(VAddr addr, int flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info); s32 VirtualQuery(VAddr addr, s32 flags, ::Libraries::Kernel::OrbisVirtualQueryInfo* info);
int DirectMemoryQuery(PAddr addr, bool find_next, s32 DirectMemoryQuery(PAddr addr, bool find_next,
::Libraries::Kernel::OrbisQueryInfo* out_info); ::Libraries::Kernel::OrbisQueryInfo* out_info);
int DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment, s32 DirectQueryAvailable(PAddr search_start, PAddr search_end, u64 alignment,
PAddr* phys_addr_out, size_t* size_out); PAddr* phys_addr_out, u64* size_out);
int GetDirectMemoryType(PAddr addr, int* directMemoryTypeOut, void** directMemoryStartOut, s32 GetDirectMemoryType(PAddr addr, s32* directMemoryTypeOut, void** directMemoryStartOut,
void** directMemoryEndOut); void** directMemoryEndOut);
s32 IsStack(VAddr addr, void** start, void** end);
s32 SetDirectMemoryType(s64 phys_addr, s32 memory_type); s32 SetDirectMemoryType(s64 phys_addr, s32 memory_type);
void NameVirtualRange(VAddr virtual_addr, u64 size, std::string_view name); void NameVirtualRange(VAddr virtual_addr, u64 size, std::string_view name);
void InvalidateMemory(VAddr addr, u64 size) const; void InvalidateMemory(VAddr addr, u64 size) const;
int IsStack(VAddr addr, void** start, void** end);
private: private:
VMAHandle FindVMA(VAddr target) { VMAHandle FindVMA(VAddr target) {
return std::prev(vma_map.upper_bound(target)); return std::prev(vma_map.upper_bound(target));
@ -258,15 +257,15 @@ private:
return iter; return iter;
} }
VAddr SearchFree(VAddr virtual_addr, size_t size, u32 alignment = 0); VAddr SearchFree(VAddr virtual_addr, u64 size, u32 alignment = 0);
VMAHandle CarveVMA(VAddr virtual_addr, size_t size); VMAHandle CarveVMA(VAddr virtual_addr, u64 size);
DMemHandle CarveDmemArea(PAddr addr, size_t size); DMemHandle CarveDmemArea(PAddr addr, u64 size);
VMAHandle Split(VMAHandle vma_handle, size_t offset_in_vma); VMAHandle Split(VMAHandle vma_handle, u64 offset_in_vma);
DMemHandle Split(DMemHandle dmem_handle, size_t offset_in_area); DMemHandle Split(DMemHandle dmem_handle, u64 offset_in_area);
u64 UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size); u64 UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size);
@ -277,10 +276,10 @@ private:
DMemMap dmem_map; DMemMap dmem_map;
VMAMap vma_map; VMAMap vma_map;
std::mutex mutex; std::mutex mutex;
size_t total_direct_size{}; u64 total_direct_size{};
size_t total_flexible_size{}; u64 total_flexible_size{};
size_t flexible_usage{}; u64 flexible_usage{};
size_t pool_budget{}; u64 pool_budget{};
Vulkan::Rasterizer* rasterizer{}; Vulkan::Rasterizer* rasterizer{};
struct PrtArea { struct PrtArea {