code: Cleanup and warning fixes from the Vulkan PR (#6163)
Co-authored-by: emufan4568 <geoster3d@gmail.com> Co-authored-by: Kyle Kienapfel <Docteh@users.noreply.github.com>
This commit is contained in:
parent
aa84022704
commit
1ddea27ac8
72 changed files with 895 additions and 626 deletions
|
@ -80,7 +80,7 @@ private:
|
|||
std::shared_ptr<Callback> timeout_callback;
|
||||
|
||||
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<WaitObject> object);
|
||||
std::shared_ptr<WaitObject> object) override;
|
||||
|
||||
class DummyCallback : public WakeupCallback {
|
||||
public:
|
||||
|
|
|
@ -138,10 +138,10 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||
u32 size = static_cast<u32>(descInfo.size);
|
||||
IPC::MappedBufferPermissions permissions = descInfo.perms;
|
||||
|
||||
VAddr page_start = Common::AlignDown(source_address, Memory::PAGE_SIZE);
|
||||
VAddr page_start = Common::AlignDown(source_address, Memory::CITRA_PAGE_SIZE);
|
||||
u32 page_offset = source_address - page_start;
|
||||
u32 num_pages =
|
||||
Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS;
|
||||
u32 num_pages = Common::AlignUp(page_offset + size, Memory::CITRA_PAGE_SIZE) >>
|
||||
Memory::CITRA_PAGE_BITS;
|
||||
|
||||
// Skip when the size is zero and num_pages == 0
|
||||
if (size == 0) {
|
||||
|
@ -171,8 +171,8 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||
found->target_address, size);
|
||||
}
|
||||
|
||||
VAddr prev_reserve = page_start - Memory::PAGE_SIZE;
|
||||
VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE;
|
||||
VAddr prev_reserve = page_start - Memory::CITRA_PAGE_SIZE;
|
||||
VAddr next_reserve = page_start + num_pages * Memory::CITRA_PAGE_SIZE;
|
||||
|
||||
auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second;
|
||||
auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second;
|
||||
|
@ -180,8 +180,9 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||
next_vma.meminfo_state == MemoryState::Reserved);
|
||||
|
||||
// Unmap the buffer and guard pages from the source process
|
||||
ResultCode result = src_process->vm_manager.UnmapRange(
|
||||
page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE);
|
||||
ResultCode result =
|
||||
src_process->vm_manager.UnmapRange(page_start - Memory::CITRA_PAGE_SIZE,
|
||||
(num_pages + 2) * Memory::CITRA_PAGE_SIZE);
|
||||
ASSERT(result == RESULT_SUCCESS);
|
||||
|
||||
mapped_buffer_context.erase(found);
|
||||
|
@ -196,13 +197,13 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||
|
||||
// Reserve a page of memory before the mapped buffer
|
||||
std::shared_ptr<BackingMem> reserve_buffer =
|
||||
std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
dst_process->vm_manager.MapBackingMemoryToBase(
|
||||
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer,
|
||||
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
||||
Memory::CITRA_PAGE_SIZE, Kernel::MemoryState::Reserved);
|
||||
|
||||
std::shared_ptr<BackingMem> buffer =
|
||||
std::make_shared<BufferMem>(num_pages * Memory::PAGE_SIZE);
|
||||
std::make_shared<BufferMem>(num_pages * Memory::CITRA_PAGE_SIZE);
|
||||
memory.ReadBlock(*src_process, source_address, buffer->GetPtr() + page_offset, size);
|
||||
|
||||
// Map the page(s) into the target process' address space.
|
||||
|
|
|
@ -127,7 +127,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
|
|||
// Mapped memory page
|
||||
AddressMapping mapping;
|
||||
mapping.address = descriptor << 12;
|
||||
mapping.size = Memory::PAGE_SIZE;
|
||||
mapping.size = Memory::CITRA_PAGE_SIZE;
|
||||
mapping.read_only = false;
|
||||
mapping.unk_flag = false;
|
||||
|
||||
|
@ -265,7 +265,7 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
|
|||
|
||||
// Free heaps block by block
|
||||
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
|
||||
for (const auto [backing_memory, block_size] : backing_blocks) {
|
||||
for (const auto& [backing_memory, block_size] : backing_blocks) {
|
||||
memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory.GetPtr()), block_size);
|
||||
}
|
||||
|
||||
|
@ -396,7 +396,7 @@ ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perm
|
|||
|
||||
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(source, size));
|
||||
VAddr interval_target = target;
|
||||
for (const auto [backing_memory, block_size] : backing_blocks) {
|
||||
for (const auto& [backing_memory, block_size] : backing_blocks) {
|
||||
auto target_vma =
|
||||
vm_manager.MapBackingMemory(interval_target, backing_memory, block_size, target_state);
|
||||
ASSERT(target_vma.Succeeded());
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/scm_rev.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
|
@ -39,7 +38,6 @@
|
|||
#include "core/hle/kernel/wait_object.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -219,10 +217,10 @@ ResultCode SVC::ControlMemory(u32* out_addr, u32 addr0, u32 addr1, u32 size, u32
|
|||
"size=0x{:X}, permissions=0x{:08X}",
|
||||
operation, addr0, addr1, size, permissions);
|
||||
|
||||
if ((addr0 & Memory::PAGE_MASK) != 0 || (addr1 & Memory::PAGE_MASK) != 0) {
|
||||
if ((addr0 & Memory::CITRA_PAGE_MASK) != 0 || (addr1 & Memory::CITRA_PAGE_MASK) != 0) {
|
||||
return ERR_MISALIGNED_ADDRESS;
|
||||
}
|
||||
if ((size & Memory::PAGE_MASK) != 0) {
|
||||
if ((size & Memory::CITRA_PAGE_MASK) != 0) {
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
}
|
||||
|
||||
|
@ -374,7 +372,7 @@ ResultCode SVC::UnmapMemoryBlock(Handle handle, u32 addr) {
|
|||
|
||||
/// Connect to an OS service given the port name, returns the handle to the port to out
|
||||
ResultCode SVC::ConnectToPort(Handle* out_handle, VAddr port_name_address) {
|
||||
if (!Memory::IsValidVirtualAddress(*kernel.GetCurrentProcess(), port_name_address))
|
||||
if (!memory.IsValidVirtualAddress(*kernel.GetCurrentProcess(), port_name_address))
|
||||
return ERR_NOT_FOUND;
|
||||
|
||||
static constexpr std::size_t PortNameMaxLength = 11;
|
||||
|
@ -541,7 +539,7 @@ ResultCode SVC::WaitSynchronizationN(s32* out, VAddr handles_address, s32 handle
|
|||
bool wait_all, s64 nano_seconds) {
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(*kernel.GetCurrentProcess(), handles_address))
|
||||
if (!memory.IsValidVirtualAddress(*kernel.GetCurrentProcess(), handles_address))
|
||||
return ERR_INVALID_POINTER;
|
||||
|
||||
// NOTE: on real hardware, there is no nullptr check for 'out' (tested with firmware 4.4). If
|
||||
|
@ -687,7 +685,7 @@ static ResultCode ReceiveIPCRequest(Kernel::KernelSystem& kernel, Memory::Memory
|
|||
/// In a single operation, sends a IPC reply and waits for a new request.
|
||||
ResultCode SVC::ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_count,
|
||||
Handle reply_target) {
|
||||
if (!Memory::IsValidVirtualAddress(*kernel.GetCurrentProcess(), handles_address))
|
||||
if (!memory.IsValidVirtualAddress(*kernel.GetCurrentProcess(), handles_address))
|
||||
return ERR_INVALID_POINTER;
|
||||
|
||||
// Check if 'handle_count' is invalid
|
||||
|
@ -1288,7 +1286,7 @@ s64 SVC::GetSystemTick() {
|
|||
/// Creates a memory block at the specified address with the specified permissions and size
|
||||
ResultCode SVC::CreateMemoryBlock(Handle* out_handle, u32 addr, u32 size, u32 my_permission,
|
||||
u32 other_permission) {
|
||||
if (size % Memory::PAGE_SIZE != 0)
|
||||
if (size % Memory::CITRA_PAGE_SIZE != 0)
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
|
||||
std::shared_ptr<SharedMemory> shared_memory = nullptr;
|
||||
|
@ -1393,7 +1391,7 @@ ResultCode SVC::AcceptSession(Handle* out_server_session, Handle server_port_han
|
|||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static void CopyStringPart(char* out, const char* in, int offset, int max_length) {
|
||||
static void CopyStringPart(char* out, const char* in, size_t offset, size_t max_length) {
|
||||
size_t str_size = strlen(in);
|
||||
if (offset < str_size) {
|
||||
strncpy(out, in + offset, max_length - 1);
|
||||
|
@ -1509,7 +1507,7 @@ ResultCode SVC::GetProcessInfo(s64* out, Handle process_handle, u32 type) {
|
|||
// TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure
|
||||
// what's the difference between them.
|
||||
*out = process->memory_used;
|
||||
if (*out % Memory::PAGE_SIZE != 0) {
|
||||
if (*out % Memory::CITRA_PAGE_SIZE != 0) {
|
||||
LOG_ERROR(Kernel_SVC, "called, memory size not page-aligned");
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
}
|
||||
|
|
|
@ -105,9 +105,9 @@ void Thread::Stop() {
|
|||
ReleaseThreadMutexes(this);
|
||||
|
||||
// Mark the TLS slot in the thread's page as free.
|
||||
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
|
||||
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::CITRA_PAGE_SIZE;
|
||||
u32 tls_slot =
|
||||
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
|
||||
((tls_address - Memory::TLS_AREA_VADDR) % Memory::CITRA_PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
|
||||
ASSERT(owner_process.lock());
|
||||
owner_process.lock()->tls_slots[tls_page].reset(tls_slot);
|
||||
}
|
||||
|
@ -337,8 +337,7 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
|||
}
|
||||
|
||||
// TODO(yuriks): Other checks, returning 0xD9001BEA
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(*owner_process, entry_point)) {
|
||||
if (!memory.IsValidVirtualAddress(*owner_process, entry_point)) {
|
||||
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:08x}", name, entry_point);
|
||||
// TODO: Verify error
|
||||
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::Kernel,
|
||||
|
@ -374,13 +373,13 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
|||
auto memory_region = GetMemoryRegion(MemoryRegion::BASE);
|
||||
|
||||
// Allocate some memory from the end of the linear heap for this region.
|
||||
auto offset = memory_region->LinearAllocate(Memory::PAGE_SIZE);
|
||||
auto offset = memory_region->LinearAllocate(Memory::CITRA_PAGE_SIZE);
|
||||
if (!offset) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Not enough space in region to allocate a new TLS page for thread");
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
owner_process->memory_used += Memory::PAGE_SIZE;
|
||||
owner_process->memory_used += Memory::CITRA_PAGE_SIZE;
|
||||
|
||||
tls_slots.emplace_back(0); // The page is completely available at the start
|
||||
available_page = tls_slots.size() - 1;
|
||||
|
@ -390,14 +389,14 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
|||
|
||||
// Map the page to the current process' address space.
|
||||
vm_manager.MapBackingMemory(
|
||||
Memory::TLS_AREA_VADDR + static_cast<VAddr>(available_page) * Memory::PAGE_SIZE,
|
||||
memory.GetFCRAMRef(*offset), Memory::PAGE_SIZE, MemoryState::Locked);
|
||||
Memory::TLS_AREA_VADDR + static_cast<VAddr>(available_page) * Memory::CITRA_PAGE_SIZE,
|
||||
memory.GetFCRAMRef(*offset), Memory::CITRA_PAGE_SIZE, MemoryState::Locked);
|
||||
}
|
||||
|
||||
// Mark the slot as used
|
||||
tls_slots[available_page].set(available_slot);
|
||||
thread->tls_address = Memory::TLS_AREA_VADDR +
|
||||
static_cast<VAddr>(available_page) * Memory::PAGE_SIZE +
|
||||
static_cast<VAddr>(available_page) * Memory::CITRA_PAGE_SIZE +
|
||||
static_cast<VAddr>(available_slot) * Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
memory.ZeroBlock(*owner_process, thread->tls_address, Memory::TLS_ENTRY_SIZE);
|
||||
|
|
|
@ -260,8 +260,8 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
|
|||
}
|
||||
|
||||
ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base);
|
||||
ASSERT_MSG((size & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((base & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base);
|
||||
|
||||
VMAIter vma_handle = StripIterConstness(FindVMA(base));
|
||||
if (vma_handle == vma_map.end()) {
|
||||
|
@ -296,8 +296,8 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
|||
}
|
||||
|
||||
ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) {
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target);
|
||||
ASSERT_MSG((size & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((target & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target);
|
||||
|
||||
const VAddr target_end = target + size;
|
||||
ASSERT(target_end >= target);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue