windows: Address a bunch of address space problems

This commit is contained in:
IndecisiveTurtle 2024-11-09 15:29:09 +02:00
parent ecf2dbbb37
commit f2b6d41ac6
9 changed files with 163 additions and 96 deletions

View file

@ -40,6 +40,12 @@ static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE_PRO;
} }
} }
struct MemoryRegion {
VAddr base;
size_t size;
bool is_mapped;
};
struct AddressSpace::Impl { struct AddressSpace::Impl {
Impl() : process{GetCurrentProcess()} { Impl() : process{GetCurrentProcess()} {
// Allocate virtual address placeholder for our address space. // Allocate virtual address placeholder for our address space.
@ -75,6 +81,7 @@ struct AddressSpace::Impl {
Common::GetLastErrorMsg()); Common::GetLastErrorMsg());
// Take the reduction off of the system managed area, and leave the others unchanged. // Take the reduction off of the system managed area, and leave the others unchanged.
reduction = size_t(virtual_base - SYSTEM_MANAGED_MIN);
system_managed_base = virtual_base; system_managed_base = virtual_base;
system_managed_size = SystemManagedSize - reduction; system_managed_size = SystemManagedSize - reduction;
system_reserved_base = reinterpret_cast<u8*>(SYSTEM_RESERVED_MIN); system_reserved_base = reinterpret_cast<u8*>(SYSTEM_RESERVED_MIN);
@ -95,7 +102,8 @@ struct AddressSpace::Impl {
const uintptr_t system_managed_addr = reinterpret_cast<uintptr_t>(system_managed_base); const uintptr_t system_managed_addr = reinterpret_cast<uintptr_t>(system_managed_base);
const uintptr_t system_reserved_addr = reinterpret_cast<uintptr_t>(system_reserved_base); const uintptr_t system_reserved_addr = reinterpret_cast<uintptr_t>(system_reserved_base);
const uintptr_t user_addr = reinterpret_cast<uintptr_t>(user_base); const uintptr_t user_addr = reinterpret_cast<uintptr_t>(user_base);
placeholders.insert({system_managed_addr, virtual_size - reduction}); regions.emplace(system_managed_addr,
MemoryRegion{system_managed_addr, virtual_size - reduction, false});
// Allocate backing file that represents the total physical memory. // Allocate backing file that represents the total physical memory.
backing_handle = backing_handle =
@ -132,42 +140,15 @@ struct AddressSpace::Impl {
} }
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot, uintptr_t fd = 0) { void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot, uintptr_t fd = 0) {
const size_t aligned_size = Common::AlignUp(size, 16_KB); // Before mapping we must carve a placeholder with the exact properties of our mapping.
const auto it = placeholders.find(virtual_addr); auto* region = EnsureSplitRegionForMapping(virtual_addr, size);
ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region"); region->is_mapped = true;
ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + aligned_size <= it->upper(),
"Map range must be fully contained in a placeholder");
// Windows only allows splitting a placeholder into two.
// This means that if the map range is fully
// contained the the placeholder we need to perform two split operations,
// one at the start and at the end.
const VAddr placeholder_start = it->lower();
const VAddr placeholder_end = it->upper();
const VAddr virtual_end = virtual_addr + aligned_size;
// If the placeholder doesn't exactly start at virtual_addr, split it at the start.
if (placeholder_start != virtual_addr) {
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
virtual_addr - placeholder_start, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
// If the placeholder doesn't exactly end at virtual_end, split it at the end.
if (placeholder_end != virtual_end) {
VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_end),
placeholder_end - virtual_end, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
// Remove the placeholder.
placeholders.erase({virtual_addr, virtual_end});
// Perform the map.
void* ptr = nullptr; void* ptr = nullptr;
if (phys_addr != -1) { if (phys_addr != -1) {
HANDLE backing = fd ? reinterpret_cast<HANDLE>(fd) : backing_handle; HANDLE backing = fd ? reinterpret_cast<HANDLE>(fd) : backing_handle;
if (fd && prot == PAGE_READONLY) { if (fd && prot == PAGE_READONLY) {
DWORD resultvar; DWORD resultvar;
ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), aligned_size, ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER, MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER,
PAGE_READWRITE, nullptr, 0); PAGE_READWRITE, nullptr, 0);
bool ret = ReadFile(backing, ptr, size, &resultvar, NULL); bool ret = ReadFile(backing, ptr, size, &resultvar, NULL);
@ -176,12 +157,12 @@ struct AddressSpace::Impl {
ASSERT_MSG(ret, "VirtualProtect failed. {}", Common::GetLastErrorMsg()); ASSERT_MSG(ret, "VirtualProtect failed. {}", Common::GetLastErrorMsg());
} else { } else {
ptr = MapViewOfFile3(backing, process, reinterpret_cast<PVOID>(virtual_addr), ptr = MapViewOfFile3(backing, process, reinterpret_cast<PVOID>(virtual_addr),
phys_addr, aligned_size, MEM_REPLACE_PLACEHOLDER, prot, phys_addr, size, MEM_REPLACE_PLACEHOLDER, prot,
nullptr, 0); nullptr, 0);
} }
} else { } else {
ptr = ptr =
VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), aligned_size, VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0); MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
} }
ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg()); ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg());
@ -202,33 +183,114 @@ struct AddressSpace::Impl {
// The unmap call will create a new placeholder region. We need to see if we can coalesce it // The unmap call will create a new placeholder region. We need to see if we can coalesce it
// with neighbors. // with neighbors.
VAddr placeholder_start = virtual_addr; JoinRegionsAfterUnmap(virtual_addr, size);
VAddr placeholder_end = virtual_addr + size; }
// The following code is inspired from Dolphin's MemArena
// https://github.com/dolphin-emu/dolphin/blob/deee3ee4/Source/Core/Common/MemArenaWin.cpp#L212
MemoryRegion* EnsureSplitRegionForMapping(VAddr address, size_t size) {
// Find closest region that is <= the given address by using upper bound and decrementing
auto it = regions.upper_bound(address);
ASSERT_MSG(it != regions.begin(), "Invalid address {:#x}", address);
--it;
ASSERT_MSG(!it->second.is_mapped,
"Attempt to map {:#x} with size {:#x} which overlaps with {:#x} mapping",
address, size, it->second.base);
auto& [base, region] = *it;
const VAddr mapping_address = region.base;
const size_t region_size = region.size;
if (mapping_address == address) {
// If this region is already split up correctly we don't have to do anything
if (region_size == size) {
return &region;
}
ASSERT_MSG(region_size >= size,
"Region with address {:#x} and size {:#x} can't fit {:#x}", mapping_address,
region_size, size);
// Split the placeholder.
if (!VirtualFreeEx(process, LPVOID(address), size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
return nullptr;
}
// Update tracked mappings and return the first of the two
region.size = size;
const VAddr new_mapping_start = address + size;
regions.emplace_hint(std::next(it), new_mapping_start,
MemoryRegion(new_mapping_start, region_size - size, false));
return &region;
}
ASSERT(mapping_address < address);
// Is there enough space to map this?
const size_t offset_in_region = address - mapping_address;
const size_t minimum_size = size + offset_in_region;
ASSERT(region_size >= minimum_size);
// Split the placeholder.
if (!VirtualFreeEx(process, LPVOID(address), size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
return nullptr;
}
// Do we now have two regions or three regions?
if (region_size == minimum_size) {
// Split into two; update tracked mappings and return the second one
region.size = offset_in_region;
it = regions.emplace_hint(std::next(it), address,
MemoryRegion(address, size, false));
return &it->second;
} else {
// Split into three; update tracked mappings and return the middle one
region.size = offset_in_region;
const VAddr middle_mapping_start = address;
const size_t middle_mapping_size = size;
const VAddr after_mapping_start = address + size;
const size_t after_mapping_size = region_size - minimum_size;
it = regions.emplace_hint(std::next(it),
after_mapping_start, MemoryRegion(after_mapping_start, after_mapping_size, false));
it = regions.emplace_hint(it, middle_mapping_start,
MemoryRegion(middle_mapping_start, middle_mapping_size, false));
return &it->second;
}
}
void JoinRegionsAfterUnmap(VAddr address, size_t size) {
// There should be a mapping that matches the request exactly, find it
auto it = regions.find(address);
ASSERT_MSG(it != regions.end() && it->second.size == size,
"Invalid address/size given to unmap.");
auto& [base, region] = *it;
region.is_mapped = false;
// Check if a placeholder exists right before us. // Check if a placeholder exists right before us.
const auto left_it = placeholders.find(virtual_addr - 1); auto it_prev = it != regions.begin() ? std::prev(it) : regions.end();
if (left_it != placeholders.end()) { if (it_prev != regions.end() && !it_prev->second.is_mapped) {
ASSERT_MSG(left_it->upper() == virtual_addr, const size_t total_size = it_prev->second.size + size;
"Left placeholder does not end at virtual_addr!"); if (!VirtualFreeEx(process, LPVOID(it_prev->first), total_size,
placeholder_start = left_it->lower(); MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start), UNREACHABLE_MSG("Region coalescing failed: {}", Common::GetLastErrorMsg());
placeholder_end - placeholder_start, }
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
it_prev->second.size = total_size;
it = regions.erase(it);
} }
// Check if a placeholder exists right after us. // Check if a placeholder exists right after us.
const auto right_it = placeholders.find(placeholder_end + 1); auto it_next = std::next(it);
if (right_it != placeholders.end()) { if (it_next != regions.end() && !it_next->second.is_mapped) {
ASSERT_MSG(right_it->lower() == placeholder_end, const size_t total_size = size + it_next->second.size;
"Right placeholder does not start at virtual_end!"); if (!VirtualFreeEx(process, LPVOID(it->first), total_size, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
placeholder_end = right_it->upper(); UNREACHABLE_MSG("Region coalescing failed: {}", Common::GetLastErrorMsg());
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
placeholder_end - placeholder_start,
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
} }
// Insert the new placeholder. it->second.size = total_size;
placeholders.insert({placeholder_start, placeholder_end}); regions.erase(it_next);
}
} }
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) { void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
@ -251,18 +313,21 @@ struct AddressSpace::Impl {
return; return;
} }
DWORD old_flags{}; const VAddr virtual_end = virtual_addr + size;
bool success = auto it = --regions.upper_bound(virtual_addr);
VirtualProtect(reinterpret_cast<void*>(virtual_addr), size, new_flags, &old_flags); for (; it->first < virtual_end; it++) {
if (!it->second.is_mapped) {
if (!success) { continue;
LOG_ERROR(Common_Memory, }
"Failed to change virtual memory protection for address {:#x}, size {}", const auto& region = it->second;
virtual_addr, size); const size_t range_addr = std::max(region.base, virtual_addr);
const size_t range_size = std::min(region.base + region.size, virtual_end) - range_addr;
DWORD old_flags{};
if (!VirtualProtectEx(process, LPVOID(range_addr), range_size, new_flags, &old_flags)) {
UNREACHABLE_MSG("Failed to change virtual memory protection for address {:#x}, size {}",
range_addr, range_size);
}
} }
// Use assert to ensure success in debug builds
DEBUG_ASSERT(success && "Failed to change virtual memory protection");
} }
HANDLE process{}; HANDLE process{};
@ -275,7 +340,7 @@ struct AddressSpace::Impl {
size_t system_reserved_size{}; size_t system_reserved_size{};
u8* user_base{}; u8* user_base{};
size_t user_size{}; size_t user_size{};
boost::icl::separate_interval_set<uintptr_t> placeholders; std::map<VAddr, MemoryRegion> regions;
}; };
#else #else

View file

@ -5,6 +5,7 @@
#include "common/alignment.h" #include "common/alignment.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/scope_exit.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/singleton.h" #include "common/singleton.h"
#include "core/file_sys/fs.h" #include "core/file_sys/fs.h"
@ -145,11 +146,6 @@ s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, int flags, u
int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags, int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags,
s64 directMemoryStart, u64 alignment, s64 directMemoryStart, u64 alignment,
const char* name) { const char* name) {
LOG_INFO(Kernel_Vmm,
"addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}, directMemoryStart = {:#x}, "
"alignment = {:#x}",
fmt::ptr(*addr), len, prot, flags, directMemoryStart, alignment);
if (len == 0 || !Common::Is16KBAligned(len)) { if (len == 0 || !Common::Is16KBAligned(len)) {
LOG_ERROR(Kernel_Vmm, "Map size is either zero or not 16KB aligned!"); LOG_ERROR(Kernel_Vmm, "Map size is either zero or not 16KB aligned!");
return SCE_KERNEL_ERROR_EINVAL; return SCE_KERNEL_ERROR_EINVAL;
@ -168,6 +164,13 @@ int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, i
const VAddr in_addr = reinterpret_cast<VAddr>(*addr); const VAddr in_addr = reinterpret_cast<VAddr>(*addr);
const auto mem_prot = static_cast<Core::MemoryProt>(prot); const auto mem_prot = static_cast<Core::MemoryProt>(prot);
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags); const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
SCOPE_EXIT {
LOG_INFO(Kernel_Vmm,
"in_addr = {:#x}, out_addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}, directMemoryStart = {:#x}, "
"alignment = {:#x}",
in_addr, fmt::ptr(*addr), len, prot, flags, directMemoryStart, alignment);
};
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
return memory->MapMemory(addr, in_addr, len, mem_prot, map_flags, Core::VMAType::Direct, "", return memory->MapMemory(addr, in_addr, len, mem_prot, map_flags, Core::VMAType::Direct, "",
false, directMemoryStart, alignment); false, directMemoryStart, alignment);
@ -201,13 +204,13 @@ s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addr_in_out, std::size_t
const VAddr in_addr = reinterpret_cast<VAddr>(*addr_in_out); const VAddr in_addr = reinterpret_cast<VAddr>(*addr_in_out);
const auto mem_prot = static_cast<Core::MemoryProt>(prot); const auto mem_prot = static_cast<Core::MemoryProt>(prot);
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags); const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
SCOPE_EXIT {
LOG_INFO(Kernel_Vmm, "in_addr = {:#x}, out_addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}",
in_addr, fmt::ptr(*addr_in_out), len, prot, flags);
};
auto* memory = Core::Memory::Instance(); auto* memory = Core::Memory::Instance();
const int ret = memory->MapMemory(addr_in_out, in_addr, len, mem_prot, map_flags, return memory->MapMemory(addr_in_out, in_addr, len, mem_prot, map_flags,
Core::VMAType::Flexible, name); Core::VMAType::Flexible, name);
LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}",
fmt::ptr(*addr_in_out), len, prot, flags);
return ret;
} }
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot, s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,

View file

@ -169,8 +169,8 @@ struct PthreadAttr {
}; };
using PthreadAttrT = PthreadAttr*; using PthreadAttrT = PthreadAttr*;
static constexpr u32 ThrStackDefault = 2_MB; static constexpr u32 ThrStackDefault = 1_MB;
static constexpr u32 ThrStackInitial = 2 * ThrStackDefault; static constexpr u32 ThrStackInitial = 2_MB;
static constexpr u32 ThrPageSize = 16_KB; static constexpr u32 ThrPageSize = 16_KB;
static constexpr u32 ThrGuardDefault = ThrPageSize; static constexpr u32 ThrGuardDefault = ThrPageSize;

View file

@ -22,8 +22,6 @@ int ThreadState::CreateStack(PthreadAttr* attr) {
return 0; return 0;
} }
VAddr stackaddr;
/* /*
* Round up stack size to nearest multiple of _thr_page_size so * Round up stack size to nearest multiple of _thr_page_size so
* that mmap() * will work. If the stack size is not an even * that mmap() * will work. If the stack size is not an even
@ -83,7 +81,7 @@ int ThreadState::CreateStack(PthreadAttr* attr) {
} }
/* Allocate a new stack. */ /* Allocate a new stack. */
stackaddr = last_stack - stacksize - guardsize; VAddr stackaddr = last_stack - stacksize - guardsize;
/* /*
* Even if stack allocation fails, we don't want to try to * Even if stack allocation fails, we don't want to try to

View file

@ -133,6 +133,10 @@ public:
rasterizer = rasterizer_; rasterizer = rasterizer_;
} }
AddressSpace& GetAddressSpace() {
return impl;
}
u64 GetTotalDirectSize() const { u64 GetTotalDirectSize() const {
return total_direct_size; return total_direct_size;
} }

View file

@ -1,6 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project // SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
#include <cryptopp/sha.h> #include <cryptopp/sha.h>
#include "common/alignment.h" #include "common/alignment.h"

View file

@ -1,6 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project // SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
#include "common/assert.h" #include "common/assert.h"
#include "common/config.h" #include "common/config.h"
#include "common/debug.h" #include "common/debug.h"
@ -570,7 +570,7 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span<const u32> dcb, std::span<c
sizeof(u32), false); sizeof(u32), false);
} else if (dma_data->src_sel == DmaDataSrc::Gds && } else if (dma_data->src_sel == DmaDataSrc::Gds &&
dma_data->dst_sel == DmaDataDst::Memory) { dma_data->dst_sel == DmaDataDst::Memory) {
LOG_WARNING(Render_Vulkan, "GDS memory read"); //LOG_WARNING(Render_Vulkan, "GDS memory read");
} else if (dma_data->src_sel == DmaDataSrc::Memory && } else if (dma_data->src_sel == DmaDataSrc::Memory &&
dma_data->dst_sel == DmaDataDst::Memory) { dma_data->dst_sel == DmaDataDst::Memory) {
rasterizer->InlineData(dma_data->DstAddress<VAddr>(), rasterizer->InlineData(dma_data->DstAddress<VAddr>(),

View file

@ -7,6 +7,7 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/error.h" #include "common/error.h"
#include "common/signal_context.h" #include "common/signal_context.h"
#include "core/memory.h"
#include "core/signals.h" #include "core/signals.h"
#include "video_core/page_manager.h" #include "video_core/page_manager.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_rasterizer.h"
@ -145,15 +146,11 @@ struct PageManager::Impl {
ASSERT_MSG(owned_ranges.find(address) != owned_ranges.end(), ASSERT_MSG(owned_ranges.find(address) != owned_ranges.end(),
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.", address, "Attempted to track non-GPU memory at address {:#x}, size {:#x}.", address,
size); size);
#ifdef _WIN32 auto* memory = Core::Memory::Instance();
DWORD prot = allow_write ? PAGE_READWRITE : PAGE_READONLY; auto& impl = memory->GetAddressSpace();
DWORD old_prot{}; impl.Protect(address, size,
BOOL result = VirtualProtect(std::bit_cast<LPVOID>(address), size, prot, &old_prot); allow_write ? Core::MemoryPermission::ReadWrite
ASSERT_MSG(result != 0, "Region protection failed"); : Core::MemoryPermission::Read);
#else
mprotect(reinterpret_cast<void*>(address), size,
PROT_READ | (allow_write ? PROT_WRITE : 0));
#endif
} }
static bool GuestFaultSignalHandler(void* context, void* fault_address) { static bool GuestFaultSignalHandler(void* context, void* fault_address) {

View file

@ -1,6 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project // SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
// Include the vulkan platform specific header // Include the vulkan platform specific header
#if defined(ANDROID) #if defined(ANDROID)
#define VK_USE_PLATFORM_ANDROID_KHR #define VK_USE_PLATFORM_ANDROID_KHR