mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-06 00:56:21 +00:00
core: Rework memory manager
This commit is contained in:
parent
623b1d6837
commit
fc887bf3f5
10 changed files with 339 additions and 179 deletions
|
@ -1,7 +1,6 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/scope_exit.h"
|
||||
|
@ -13,49 +12,82 @@
|
|||
namespace Core {
|
||||
|
||||
MemoryManager::MemoryManager() {
|
||||
// Insert a virtual memory area that covers the user area.
|
||||
const size_t user_size = USER_MAX - USER_MIN - 1;
|
||||
vma_map.emplace(USER_MIN, VirtualMemoryArea{USER_MIN, user_size});
|
||||
// Insert an area that covers direct memory physical block.
|
||||
dmem_map.emplace(0, DirectMemoryArea{0, SCE_KERNEL_MAIN_DMEM_SIZE});
|
||||
|
||||
// Insert a virtual memory area that covers the system managed area.
|
||||
const size_t sys_size = SYSTEM_MANAGED_MAX - SYSTEM_MANAGED_MIN - 1;
|
||||
vma_map.emplace(SYSTEM_MANAGED_MIN, VirtualMemoryArea{SYSTEM_MANAGED_MIN, sys_size});
|
||||
// Insert a virtual memory area that covers the entire area we manage.
|
||||
const VAddr virtual_base = impl.VirtualBase();
|
||||
const size_t virtual_size = impl.VirtualSize();
|
||||
vma_map.emplace(virtual_base, VirtualMemoryArea{virtual_base, virtual_size});
|
||||
}
|
||||
|
||||
MemoryManager::~MemoryManager() = default;
|
||||
|
||||
PAddr MemoryManager::Allocate(PAddr search_start, PAddr search_end, size_t size, u64 alignment,
|
||||
int memory_type) {
|
||||
PAddr free_addr = search_start;
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
// Iterate through allocated blocked and find the next free position
|
||||
for (const auto& block : allocations) {
|
||||
const PAddr end = block.base + block.size;
|
||||
free_addr = std::max(end, free_addr);
|
||||
auto dmem_area = FindDmemArea(search_start);
|
||||
|
||||
const auto is_suitable = [&] {
|
||||
return dmem_area->second.is_free && dmem_area->second.size >= size;
|
||||
};
|
||||
while (!is_suitable() && dmem_area->second.GetEnd() <= search_end) {
|
||||
dmem_area++;
|
||||
}
|
||||
ASSERT_MSG(is_suitable(), "Unable to find free direct memory area");
|
||||
|
||||
// Align free position
|
||||
PAddr free_addr = dmem_area->second.base;
|
||||
free_addr = alignment > 0 ? Common::AlignUp(free_addr, alignment) : free_addr;
|
||||
ASSERT(free_addr >= search_start && free_addr + size <= search_end);
|
||||
|
||||
// Add the allocated region to the list and commit its pages.
|
||||
allocations.emplace_back(free_addr, size, memory_type);
|
||||
auto& area = AddDmemAllocation(free_addr, size);
|
||||
area.memory_type = memory_type;
|
||||
area.is_free = false;
|
||||
return free_addr;
|
||||
}
|
||||
|
||||
void MemoryManager::Free(PAddr phys_addr, size_t size) {
|
||||
const auto it = std::ranges::find_if(allocations, [&](const auto& alloc) {
|
||||
return alloc.base == phys_addr && alloc.size == size;
|
||||
});
|
||||
ASSERT(it != allocations.end());
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
// Free the ranges.
|
||||
allocations.erase(it);
|
||||
const auto dmem_area = FindDmemArea(phys_addr);
|
||||
ASSERT(dmem_area != dmem_map.end() && dmem_area->second.base == phys_addr &&
|
||||
dmem_area->second.size == size);
|
||||
|
||||
// Release any dmem mappings that reference this physical block.
|
||||
std::vector<std::pair<VAddr, u64>> remove_list;
|
||||
for (const auto& [addr, mapping] : vma_map) {
|
||||
if (mapping.type != VMAType::Direct) {
|
||||
continue;
|
||||
}
|
||||
if (mapping.phys_base <= phys_addr && phys_addr < mapping.phys_base + mapping.size) {
|
||||
LOG_INFO(Kernel_Vmm, "Unmaping direct mapping {:#x} with size {:#x}", addr,
|
||||
mapping.size);
|
||||
// Unmaping might erase from vma_map. We can't do it here.
|
||||
remove_list.emplace_back(addr, mapping.size);
|
||||
}
|
||||
}
|
||||
for (const auto& [addr, size] : remove_list) {
|
||||
UnmapMemory(addr, size);
|
||||
}
|
||||
|
||||
// Mark region as free and attempt to coalesce it with neighbours.
|
||||
auto& area = dmem_area->second;
|
||||
area.is_free = true;
|
||||
area.memory_type = 0;
|
||||
MergeAdjacent(dmem_map, dmem_area);
|
||||
}
|
||||
|
||||
int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, MemoryProt prot,
|
||||
MemoryMapFlags flags, VMAType type, std::string_view name,
|
||||
PAddr phys_addr, u64 alignment) {
|
||||
bool is_exec, PAddr phys_addr, u64 alignment) {
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
// When virtual addr is zero, force it to virtual_base. The guest cannot pass Fixed
|
||||
// flag so we will take the branch that searches for free (or reserved) mappings.
|
||||
virtual_addr = (virtual_addr == 0) ? impl.VirtualBase() : virtual_addr;
|
||||
|
||||
VAddr mapped_addr = alignment > 0 ? Common::AlignUp(virtual_addr, alignment) : virtual_addr;
|
||||
SCOPE_EXIT {
|
||||
auto& new_vma = AddMapping(mapped_addr, size);
|
||||
|
@ -65,18 +97,11 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
|||
new_vma.type = type;
|
||||
|
||||
if (type == VMAType::Direct) {
|
||||
new_vma.phys_base = phys_addr;
|
||||
MapVulkanMemory(mapped_addr, size);
|
||||
}
|
||||
};
|
||||
|
||||
// When virtual addr is zero let the address space manager pick the address.
|
||||
// Alignment matters here as we let the OS pick the address.
|
||||
if (virtual_addr == 0) {
|
||||
*out_addr = impl.Map(virtual_addr, size, alignment);
|
||||
mapped_addr = std::bit_cast<VAddr>(*out_addr);
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
// Fixed mapping means the virtual address must exactly match the provided one.
|
||||
if (True(flags & MemoryMapFlags::Fixed) && True(flags & MemoryMapFlags::NoOverwrite)) {
|
||||
// This should return SCE_KERNEL_ERROR_ENOMEM but shouldn't normally happen.
|
||||
|
@ -92,21 +117,28 @@ int MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, size_t size, M
|
|||
it++;
|
||||
}
|
||||
ASSERT(it != vma_map.end());
|
||||
mapped_addr = alignment > 0 ? Common::AlignUp(it->second.base, alignment) : it->second.base;
|
||||
const VAddr base = it->second.base;
|
||||
mapped_addr = alignment > 0 ? Common::AlignUp(base, alignment) : base;
|
||||
}
|
||||
|
||||
// Perform the mapping.
|
||||
*out_addr = impl.Map(mapped_addr, size, alignment, phys_addr);
|
||||
*out_addr = impl.Map(mapped_addr, size, alignment, phys_addr, is_exec);
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
// TODO: Partial unmaps are technically supported by the guest.
|
||||
const auto it = vma_map.find(virtual_addr);
|
||||
ASSERT_MSG(it != vma_map.end() && it->first == virtual_addr,
|
||||
"Attempting to unmap partially mapped range");
|
||||
|
||||
if (it->second.type == VMAType::Direct) {
|
||||
const auto type = it->second.type;
|
||||
fmt::print("{}\n", u32(type));
|
||||
std::fflush(stdout);
|
||||
const PAddr phys_addr = type == VMAType::Direct ? it->second.phys_base : -1;
|
||||
if (type == VMAType::Direct) {
|
||||
UnmapVulkanMemory(virtual_addr, size);
|
||||
}
|
||||
|
||||
|
@ -115,13 +147,15 @@ void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
|
|||
vma.type = VMAType::Free;
|
||||
vma.prot = MemoryProt::NoAccess;
|
||||
vma.phys_base = 0;
|
||||
MergeAdjacent(it);
|
||||
MergeAdjacent(vma_map, it);
|
||||
|
||||
// Unmap the memory region.
|
||||
impl.Unmap(virtual_addr, size);
|
||||
impl.Unmap(virtual_addr, size, phys_addr);
|
||||
}
|
||||
|
||||
int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* prot) {
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
const auto it = FindVMA(addr);
|
||||
const auto& vma = it->second;
|
||||
ASSERT_MSG(vma.type != VMAType::Free, "Provided address is not mapped");
|
||||
|
@ -132,18 +166,70 @@ int MemoryManager::QueryProtection(VAddr addr, void** start, void** end, u32* pr
|
|||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
|
||||
Libraries::Kernel::OrbisQueryInfo* out_info) {
|
||||
const auto it = std::ranges::find_if(allocations, [&](const DirectMemoryArea& alloc) {
|
||||
return alloc.base <= addr && addr < alloc.base + alloc.size;
|
||||
});
|
||||
if (it == allocations.end()) {
|
||||
return SCE_KERNEL_ERROR_EACCES;
|
||||
int MemoryManager::VirtualQuery(VAddr addr, int flags,
|
||||
Libraries::Kernel::OrbisVirtualQueryInfo* info) {
|
||||
auto it = FindVMA(addr);
|
||||
if (it->second.type == VMAType::Free && flags == 1) {
|
||||
it++;
|
||||
}
|
||||
if (it->second.type == VMAType::Free) {
|
||||
LOG_WARNING(Kernel_Vmm, "VirtualQuery on free memory region");
|
||||
return ORBIS_KERNEL_ERROR_EACCES;
|
||||
}
|
||||
|
||||
out_info->start = it->base;
|
||||
out_info->end = it->base + it->size;
|
||||
out_info->memoryType = it->memory_type;
|
||||
const auto& vma = it->second;
|
||||
info->start = vma.base;
|
||||
info->end = vma.base + vma.size;
|
||||
info->is_flexible.Assign(vma.type == VMAType::Flexible);
|
||||
info->is_direct.Assign(vma.type == VMAType::Direct);
|
||||
info->is_commited.Assign(vma.type != VMAType::Free);
|
||||
if (vma.type == VMAType::Direct) {
|
||||
const auto dmem_it = FindDmemArea(vma.phys_base);
|
||||
ASSERT(dmem_it != dmem_map.end());
|
||||
info->memory_type = dmem_it->second.memory_type;
|
||||
}
|
||||
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
int MemoryManager::DirectMemoryQuery(PAddr addr, bool find_next,
|
||||
Libraries::Kernel::OrbisQueryInfo* out_info) {
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
auto dmem_area = FindDmemArea(addr);
|
||||
if (dmem_area->second.is_free && find_next) {
|
||||
dmem_area++;
|
||||
}
|
||||
|
||||
if (dmem_area == dmem_map.end() || dmem_area->second.is_free) {
|
||||
LOG_ERROR(Core, "Unable to find allocated direct memory region to query!");
|
||||
return ORBIS_KERNEL_ERROR_EACCES;
|
||||
}
|
||||
|
||||
const auto& area = dmem_area->second;
|
||||
out_info->start = area.base;
|
||||
out_info->end = area.GetEnd();
|
||||
out_info->memoryType = area.memory_type;
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
int MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, size_t alignment,
|
||||
PAddr* phys_addr_out, size_t* size_out) {
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
auto dmem_area = FindDmemArea(search_start);
|
||||
PAddr paddr{};
|
||||
size_t max_size{};
|
||||
while (dmem_area != dmem_map.end() && dmem_area->second.GetEnd() <= search_end) {
|
||||
if (dmem_area->second.size > max_size) {
|
||||
paddr = dmem_area->second.base;
|
||||
max_size = dmem_area->second.size;
|
||||
}
|
||||
dmem_area++;
|
||||
}
|
||||
|
||||
*phys_addr_out = alignment > 0 ? Common::AlignUp(paddr, alignment) : paddr;
|
||||
*size_out = max_size;
|
||||
return ORBIS_OK;
|
||||
}
|
||||
|
||||
|
@ -178,6 +264,30 @@ VirtualMemoryArea& MemoryManager::AddMapping(VAddr virtual_addr, size_t size) {
|
|||
return vma_handle->second;
|
||||
}
|
||||
|
||||
DirectMemoryArea& MemoryManager::AddDmemAllocation(PAddr addr, size_t size) {
|
||||
auto dmem_handle = FindDmemArea(addr);
|
||||
ASSERT_MSG(dmem_handle != dmem_map.end(), "Physical address not in dmem_map");
|
||||
|
||||
const DirectMemoryArea& area = dmem_handle->second;
|
||||
ASSERT_MSG(area.is_free && area.base <= addr,
|
||||
"Adding an allocation to already allocated region");
|
||||
|
||||
const PAddr start_in_area = addr - area.base;
|
||||
const PAddr end_in_vma = start_in_area + size;
|
||||
ASSERT_MSG(end_in_vma <= area.size, "Mapping cannot fit inside free region");
|
||||
|
||||
if (end_in_vma != area.size) {
|
||||
// Split VMA at the end of the allocated region
|
||||
Split(dmem_handle, end_in_vma);
|
||||
}
|
||||
if (start_in_area != 0) {
|
||||
// Split VMA at the start of the allocated region
|
||||
dmem_handle = Split(dmem_handle, start_in_area);
|
||||
}
|
||||
|
||||
return dmem_handle->second;
|
||||
}
|
||||
|
||||
MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offset_in_vma) {
|
||||
auto& old_vma = vma_handle->second;
|
||||
ASSERT(offset_in_vma < old_vma.size && offset_in_vma > 0);
|
||||
|
@ -193,24 +303,17 @@ MemoryManager::VMAHandle MemoryManager::Split(VMAHandle vma_handle, size_t offse
|
|||
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
||||
}
|
||||
|
||||
MemoryManager::VMAHandle MemoryManager::MergeAdjacent(VMAHandle iter) {
|
||||
const auto next_vma = std::next(iter);
|
||||
if (next_vma != vma_map.end() && iter->second.CanMergeWith(next_vma->second)) {
|
||||
iter->second.size += next_vma->second.size;
|
||||
vma_map.erase(next_vma);
|
||||
}
|
||||
MemoryManager::DMemHandle MemoryManager::Split(DMemHandle dmem_handle, size_t offset_in_area) {
|
||||
auto& old_area = dmem_handle->second;
|
||||
ASSERT(offset_in_area < old_area.size && offset_in_area > 0);
|
||||
|
||||
if (iter != vma_map.begin()) {
|
||||
auto prev_vma = std::prev(iter);
|
||||
if (prev_vma->second.CanMergeWith(iter->second)) {
|
||||
prev_vma->second.size += iter->second.size;
|
||||
vma_map.erase(iter);
|
||||
iter = prev_vma;
|
||||
}
|
||||
}
|
||||
auto new_area = old_area;
|
||||
old_area.size = offset_in_area;
|
||||
new_area.base += offset_in_area;
|
||||
new_area.size -= offset_in_area;
|
||||
|
||||
return iter;
|
||||
}
|
||||
return dmem_map.emplace_hint(std::next(dmem_handle), new_area.base, new_area);
|
||||
};
|
||||
|
||||
void MemoryManager::MapVulkanMemory(VAddr addr, size_t size) {
|
||||
return;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue