Sources: Run clang-format on everything.

This commit is contained in:
Emmanuel Gil Peyrot 2016-09-18 09:38:01 +09:00
parent fe948af095
commit dc8479928c
386 changed files with 19560 additions and 18080 deletions

View file

@ -16,8 +16,10 @@
namespace Kernel {
AddressArbiter::AddressArbiter() {}
AddressArbiter::~AddressArbiter() {}
AddressArbiter::AddressArbiter() {
}
AddressArbiter::~AddressArbiter() {
}
SharedPtr<AddressArbiter> AddressArbiter::Create(std::string name) {
SharedPtr<AddressArbiter> address_arbiter(new AddressArbiter);
@ -28,7 +30,7 @@ SharedPtr<AddressArbiter> AddressArbiter::Create(std::string name) {
}
ResultCode AddressArbiter::ArbitrateAddress(ArbitrationType type, VAddr address, s32 value,
u64 nanoseconds) {
u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
@ -38,7 +40,7 @@ ResultCode AddressArbiter::ArbitrateAddress(ArbitrationType type, VAddr address,
ArbitrateAllThreads(address);
} else {
// Resume first N threads
for(int i = 0; i < value; i++)
for (int i = 0; i < value; i++)
ArbitrateHighestPriorityThread(address);
}
break;
@ -55,8 +57,7 @@ ResultCode AddressArbiter::ArbitrateAddress(ArbitrationType type, VAddr address,
GetCurrentThread()->WakeAfterDelay(nanoseconds);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan:
{
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = Memory::Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
@ -65,8 +66,7 @@ ResultCode AddressArbiter::ArbitrateAddress(ArbitrationType type, VAddr address,
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout:
{
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = Memory::Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
@ -79,17 +79,19 @@ ResultCode AddressArbiter::ArbitrateAddress(ArbitrationType type, VAddr address,
default:
LOG_ERROR(Kernel, "unknown type=%d", type);
return ResultCode(ErrorDescription::InvalidEnumValue, ErrorModule::Kernel, ErrorSummary::WrongArgument, ErrorLevel::Usage);
return ResultCode(ErrorDescription::InvalidEnumValue, ErrorModule::Kernel,
ErrorSummary::WrongArgument, ErrorLevel::Usage);
}
HLE::Reschedule(__func__);
// The calls that use a timeout seem to always return a Timeout error even if they did not put the thread to sleep
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultCode(ErrorDescription::Timeout, ErrorModule::OS,
ErrorSummary::StatusChanged, ErrorLevel::Info);
return ResultCode(ErrorDescription::Timeout, ErrorModule::OS, ErrorSummary::StatusChanged,
ErrorLevel::Info);
}
return RESULT_SUCCESS;
}

View file

@ -36,13 +36,19 @@ public:
*/
static SharedPtr<AddressArbiter> Create(std::string name = "Unknown");
std::string GetTypeName() const override { return "Arbiter"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "Arbiter";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::AddressArbiter;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::string name; ///< Name of address arbiter object (optional)
std::string name; ///< Name of address arbiter object (optional)
ResultCode ArbitrateAddress(ArbitrationType type, VAddr address, s32 value, u64 nanoseconds);

View file

@ -10,7 +10,9 @@
namespace Kernel {
ClientPort::ClientPort() {}
ClientPort::~ClientPort() {}
ClientPort::ClientPort() {
}
ClientPort::~ClientPort() {
}
} // namespace

View file

@ -17,16 +17,22 @@ class ServerPort;
class ClientPort : public Object {
public:
friend class ServerPort;
std::string GetTypeName() const override { return "ClientPort"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "ClientPort";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::ClientPort;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
SharedPtr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
SharedPtr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
protected:
ClientPort();

View file

@ -2,20 +2,22 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <map>
#include <algorithm>
#include <map>
#include <vector>
#include "common/assert.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
Event::Event() {}
Event::~Event() {}
Event::Event() {
}
Event::~Event() {
}
SharedPtr<Event> Event::Create(ResetType reset_type, std::string name) {
SharedPtr<Event> evt(new Event);

View file

@ -16,7 +16,6 @@ enum class ResetType {
Pulse,
};
class Event final : public WaitObject {
public:
/**
@ -26,16 +25,22 @@ public:
*/
static SharedPtr<Event> Create(ResetType reset_type, std::string name = "Unknown");
std::string GetTypeName() const override { return "Event"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "Event";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResetType reset_type; ///< Current ResetType
ResetType reset_type; ///< Current ResetType
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
bool ShouldWait() override;
void Acquire() override;

View file

@ -61,7 +61,8 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// CTR-OS doesn't use generation 0, so skip straight to 1.
if (next_generation >= (1 << 15)) next_generation = 1;
if (next_generation >= (1 << 15))
next_generation = 1;
generations[slot] = generation;
objects[slot] = std::move(obj);

View file

@ -23,48 +23,55 @@ class Thread;
// TODO: Verify code
const ResultCode ERR_OUT_OF_HANDLES(ErrorDescription::OutOfMemory, ErrorModule::Kernel,
ErrorSummary::OutOfResource, ErrorLevel::Temporary);
ErrorSummary::OutOfResource, ErrorLevel::Temporary);
// TOOD: Verify code
const ResultCode ERR_INVALID_HANDLE(ErrorDescription::InvalidHandle, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
enum class HandleType : u32 {
Unknown = 0,
Unknown = 0,
Session = 2,
Event = 3,
Mutex = 4,
SharedMemory = 5,
Redirection = 6,
Thread = 7,
Process = 8,
AddressArbiter = 9,
Semaphore = 10,
Timer = 11,
ResourceLimit = 12,
CodeSet = 13,
ClientPort = 14,
ServerPort = 15,
Session = 2,
Event = 3,
Mutex = 4,
SharedMemory = 5,
Redirection = 6,
Thread = 7,
Process = 8,
AddressArbiter = 9,
Semaphore = 10,
Timer = 11,
ResourceLimit = 12,
CodeSet = 13,
ClientPort = 14,
ServerPort = 15,
};
enum {
DEFAULT_STACK_SIZE = 0x4000,
DEFAULT_STACK_SIZE = 0x4000,
};
class Object : NonCopyable {
public:
virtual ~Object() {}
virtual ~Object() {
}
/// Returns a unique identifier for the object. For debugging purposes only.
unsigned int GetObjectId() const { return object_id; }
unsigned int GetObjectId() const {
return object_id;
}
virtual std::string GetTypeName() const { return "[BAD KERNEL OBJECT TYPE]"; }
virtual std::string GetName() const { return "[UNKNOWN KERNEL OBJECT]"; }
virtual std::string GetTypeName() const {
return "[BAD KERNEL OBJECT TYPE]";
}
virtual std::string GetName() const {
return "[UNKNOWN KERNEL OBJECT]";
}
virtual Kernel::HandleType GetHandleType() const = 0;
/**
@ -122,7 +129,6 @@ using SharedPtr = boost::intrusive_ptr<T>;
/// Class that represents a Kernel object that a thread can be waiting on
class WaitObject : public Object {
public:
/**
* Check if the current thread should wait until the object is available
* @return True if the current thread should wait due to this object being unavailable
@ -247,8 +253,12 @@ private:
*/
static const size_t MAX_COUNT = 4096;
static u16 GetSlot(Handle handle) { return handle >> 15; }
static u16 GetGeneration(Handle handle) { return handle & 0x7FFF; }
static u16 GetSlot(Handle handle) {
return handle >> 15;
}
static u16 GetGeneration(Handle handle) {
return handle & 0x7FFF;
}
/// Stores the Object referenced by the handle or null if the slot is empty.
std::array<SharedPtr<Object>, MAX_COUNT> objects;

View file

@ -31,7 +31,7 @@ static MemoryRegionInfo memory_regions[3];
static const u32 memory_region_sizes[8][3] = {
// Old 3DS layouts
{0x04000000, 0x02C00000, 0x01400000}, // 0
{ /* This appears to be unused. */ }, // 1
{/* This appears to be unused. */}, // 1
{0x06000000, 0x00C00000, 0x01400000}, // 2
{0x05000000, 0x01C00000, 0x01400000}, // 3
{0x04800000, 0x02400000, 0x01400000}, // 4
@ -95,7 +95,6 @@ MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) {
UNREACHABLE();
}
}
}
namespace Memory {
@ -110,9 +109,8 @@ struct MemoryArea {
// We don't declare the IO regions in here since its handled by other means.
static MemoryArea memory_areas[] = {
{VRAM_VADDR, VRAM_SIZE, "VRAM"}, // Video memory (VRAM)
{VRAM_VADDR, VRAM_SIZE, "VRAM"}, // Video memory (VRAM)
};
}
void Init() {
@ -125,15 +123,21 @@ void InitLegacyAddressSpace(Kernel::VMManager& address_space) {
for (MemoryArea& area : memory_areas) {
auto block = std::make_shared<std::vector<u8>>(area.size);
address_space.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private).Unwrap();
address_space
.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private)
.Unwrap();
}
auto cfg_mem_vma = address_space.MapBackingMemory(CONFIG_MEMORY_VADDR,
(u8*)&ConfigMem::config_mem, CONFIG_MEMORY_SIZE, MemoryState::Shared).MoveFrom();
auto cfg_mem_vma = address_space
.MapBackingMemory(CONFIG_MEMORY_VADDR, (u8*)&ConfigMem::config_mem,
CONFIG_MEMORY_SIZE, MemoryState::Shared)
.MoveFrom();
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR,
(u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom();
auto shared_page_vma = address_space
.MapBackingMemory(SHARED_PAGE_VADDR, (u8*)&SharedPage::shared_page,
SHARED_PAGE_SIZE, MemoryState::Shared)
.MoveFrom();
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
AudioCore::AddAddressSpace(address_space);

View file

@ -25,7 +25,6 @@ struct MemoryRegionInfo {
void MemoryInit(u32 mem_type);
void MemoryShutdown();
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
}
namespace Memory {

View file

@ -33,8 +33,10 @@ void ReleaseThreadMutexes(Thread* thread) {
thread->held_mutexes.clear();
}
Mutex::Mutex() {}
Mutex::~Mutex() {}
Mutex::Mutex() {
}
Mutex::~Mutex() {
}
SharedPtr<Mutex> Mutex::Create(bool initial_locked, std::string name) {
SharedPtr<Mutex> mutex(new Mutex);

View file

@ -24,15 +24,21 @@ public:
*/
static SharedPtr<Mutex> Create(bool initial_locked, std::string name = "Unknown");
std::string GetTypeName() const override { return "Mutex"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "Mutex";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::Mutex;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
int lock_count; ///< Number of times the mutex has been acquired
std::string name; ///< Name of mutex (optional)
SharedPtr<Thread> holding_thread; ///< Thread that has acquired the mutex
int lock_count; ///< Number of times the mutex has been acquired
std::string name; ///< Name of mutex (optional)
SharedPtr<Thread> holding_thread; ///< Thread that has acquired the mutex
bool ShouldWait() override;
void Acquire() override;

View file

@ -26,8 +26,10 @@ SharedPtr<CodeSet> CodeSet::Create(std::string name, u64 program_id) {
return codeset;
}
CodeSet::CodeSet() {}
CodeSet::~CodeSet() {}
CodeSet::CodeSet() {
}
CodeSet::~CodeSet() {
}
u32 Process::next_process_id;
@ -60,7 +62,8 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
while (bits && index < svc_access_mask.size()) {
svc_access_mask.set(index, bits & 1);
++index; bits >>= 1;
++index;
bits >>= 1;
}
} else if ((type & 0xFF0) == 0xFE0) { // 0x00FF
// Handle table size
@ -70,11 +73,11 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
flags.raw = descriptor & 0xFFFF;
} else if ((type & 0xFFE) == 0xFF8) { // 0x001F
// Mapped memory range
if (i+1 >= len || ((kernel_caps[i+1] >> 20) & 0xFFE) != 0xFF8) {
if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
continue;
}
u32 end_desc = kernel_caps[i+1];
u32 end_desc = kernel_caps[i + 1];
++i; // Skip over the second descriptor on the next iteration
AddressMapping mapping;
@ -107,23 +110,28 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
void Process::Run(s32 main_thread_priority, u32 stack_size) {
memory_region = GetMemoryRegion(flags.memory_region);
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) {
auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory,
segment.offset, segment.size, memory_state).Unwrap();
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
auto vma = vm_manager
.MapMemoryBlock(segment.addr, codeset->memory, segment.offset, segment.size,
memory_state)
.Unwrap();
vm_manager.Reprotect(vma, permissions);
misc_memory_used += segment.size;
memory_region->used += segment.size;
};
// Map CodeSet segments
MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code);
MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private);
MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code);
MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private);
// Allocate and map stack
vm_manager.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, MemoryState::Locked
).Unwrap();
vm_manager
.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
MemoryState::Locked)
.Unwrap();
misc_memory_used += stack_size;
memory_region->used += stack_size;
@ -143,7 +151,8 @@ VAddr Process::GetLinearHeapLimit() const {
}
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) {
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) {
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
target + size < target) {
return ERR_INVALID_ADDRESS;
}
@ -166,7 +175,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
}
ASSERT(heap_end - heap_start == heap_memory->size());
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, size, MemoryState::Private));
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
size, MemoryState::Private));
vm_manager.Reprotect(vma, perms);
heap_used += size;
@ -176,7 +186,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
}
ResultCode Process::HeapFree(VAddr target, u32 size) {
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) {
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
target + size < target) {
return ERR_INVALID_ADDRESS;
}
@ -185,7 +196,8 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
}
ResultCode result = vm_manager.UnmapRange(target, size);
if (result.IsError()) return result;
if (result.IsError())
return result;
heap_used -= size;
memory_region->used -= size;
@ -203,8 +215,8 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
target = heap_end;
}
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
target > heap_end || target + size < target) {
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target > heap_end ||
target + size < target) {
return ERR_INVALID_ADDRESS;
}
@ -220,7 +232,8 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
// TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
// same region. It is unknown if or how the 3DS kernel checks against this.
size_t offset = target - GetLinearHeapBase();
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, MemoryState::Continuous));
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size,
MemoryState::Continuous));
vm_manager.Reprotect(vma, perms);
linear_heap_used += size;
@ -248,7 +261,8 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
}
ResultCode result = vm_manager.UnmapRange(target, size);
if (result.IsError()) return result;
if (result.IsError())
return result;
linear_heap_used -= size;
memory_region->used -= size;
@ -268,9 +282,10 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
return RESULT_SUCCESS;
}
Kernel::Process::Process() {}
Kernel::Process::~Process() {}
Kernel::Process::Process() {
}
Kernel::Process::~Process() {
}
SharedPtr<Process> g_current_process;
}

View file

@ -36,15 +36,18 @@ enum class MemoryRegion : u16 {
union ProcessFlags {
u16 raw;
BitField< 0, 1, u16> allow_debug; ///< Allows other processes to attach to and debug this process.
BitField< 1, 1, u16> force_debug; ///< Allows this process to attach to processes even if they don't have allow_debug set.
BitField< 2, 1, u16> allow_nonalphanum;
BitField< 3, 1, u16> shared_page_writable; ///< Shared page is mapped with write permissions.
BitField< 4, 1, u16> privileged_priority; ///< Can use priority levels higher than 24.
BitField< 5, 1, u16> allow_main_args;
BitField< 6, 1, u16> shared_device_mem;
BitField< 7, 1, u16> runnable_on_sleep;
BitField< 8, 4, MemoryRegion> memory_region; ///< Default region for memory allocations for this process
BitField<0, 1, u16>
allow_debug; ///< Allows other processes to attach to and debug this process.
BitField<1, 1, u16> force_debug; ///< Allows this process to attach to processes even if they
/// don't have allow_debug set.
BitField<2, 1, u16> allow_nonalphanum;
BitField<3, 1, u16> shared_page_writable; ///< Shared page is mapped with write permissions.
BitField<4, 1, u16> privileged_priority; ///< Can use priority levels higher than 24.
BitField<5, 1, u16> allow_main_args;
BitField<6, 1, u16> shared_device_mem;
BitField<7, 1, u16> runnable_on_sleep;
BitField<8, 4, MemoryRegion>
memory_region; ///< Default region for memory allocations for this process
BitField<12, 1, u16> loaded_high; ///< Application loaded high (not at 0x00100000).
};
@ -54,11 +57,17 @@ struct MemoryRegionInfo;
struct CodeSet final : public Object {
static SharedPtr<CodeSet> Create(std::string name, u64 program_id);
std::string GetTypeName() const override { return "CodeSet"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "CodeSet";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::CodeSet;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/// Name of the process
std::string name;
@ -85,11 +94,17 @@ class Process final : public Object {
public:
static SharedPtr<Process> Create(SharedPtr<CodeSet> code_set);
std::string GetTypeName() const override { return "Process"; }
std::string GetName() const override { return codeset->name; }
std::string GetTypeName() const override {
return "Process";
}
std::string GetName() const override {
return codeset->name;
}
static const HandleType HANDLE_TYPE = HandleType::Process;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
static u32 next_process_id;
@ -124,7 +139,6 @@ public:
*/
void Run(s32 main_thread_priority, u32 stack_size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Memory Management
@ -144,7 +158,8 @@ public:
/// The Thread Local Storage area is allocated as processes create threads,
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
/// holds the TLS for a specific thread. This vector contains which parts are in use for each page as a bitmask.
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
/// page as a bitmask.
/// This vector will grow as more pages are allocated for new threads.
std::vector<std::bitset<8>> tls_slots;
@ -164,5 +179,4 @@ private:
};
extern SharedPtr<Process> g_current_process;
}

View file

@ -12,8 +12,10 @@ namespace Kernel {
static SharedPtr<ResourceLimit> resource_limits[4];
ResourceLimit::ResourceLimit() {}
ResourceLimit::~ResourceLimit() {}
ResourceLimit::ResourceLimit() {
}
ResourceLimit::~ResourceLimit() {
}
SharedPtr<ResourceLimit> ResourceLimit::Create(std::string name) {
SharedPtr<ResourceLimit> resource_limit(new ResourceLimit);
@ -23,70 +25,69 @@ SharedPtr<ResourceLimit> ResourceLimit::Create(std::string name) {
}
SharedPtr<ResourceLimit> ResourceLimit::GetForCategory(ResourceLimitCategory category) {
switch (category)
{
case ResourceLimitCategory::APPLICATION:
case ResourceLimitCategory::SYS_APPLET:
case ResourceLimitCategory::LIB_APPLET:
case ResourceLimitCategory::OTHER:
return resource_limits[static_cast<u8>(category)];
default:
LOG_CRITICAL(Kernel, "Unknown resource limit category");
UNREACHABLE();
switch (category) {
case ResourceLimitCategory::APPLICATION:
case ResourceLimitCategory::SYS_APPLET:
case ResourceLimitCategory::LIB_APPLET:
case ResourceLimitCategory::OTHER:
return resource_limits[static_cast<u8>(category)];
default:
LOG_CRITICAL(Kernel, "Unknown resource limit category");
UNREACHABLE();
}
}
s32 ResourceLimit::GetCurrentResourceValue(u32 resource) const {
switch (resource) {
case COMMIT:
return current_commit;
case THREAD:
return current_threads;
case EVENT:
return current_events;
case MUTEX:
return current_mutexes;
case SEMAPHORE:
return current_semaphores;
case TIMER:
return current_timers;
case SHARED_MEMORY:
return current_shared_mems;
case ADDRESS_ARBITER:
return current_address_arbiters;
case CPU_TIME:
return current_cpu_time;
default:
LOG_ERROR(Kernel, "Unknown resource type=%08X", resource);
UNIMPLEMENTED();
return 0;
case COMMIT:
return current_commit;
case THREAD:
return current_threads;
case EVENT:
return current_events;
case MUTEX:
return current_mutexes;
case SEMAPHORE:
return current_semaphores;
case TIMER:
return current_timers;
case SHARED_MEMORY:
return current_shared_mems;
case ADDRESS_ARBITER:
return current_address_arbiters;
case CPU_TIME:
return current_cpu_time;
default:
LOG_ERROR(Kernel, "Unknown resource type=%08X", resource);
UNIMPLEMENTED();
return 0;
}
}
s32 ResourceLimit::GetMaxResourceValue(u32 resource) const {
switch (resource) {
case COMMIT:
return max_commit;
case THREAD:
return max_threads;
case EVENT:
return max_events;
case MUTEX:
return max_mutexes;
case SEMAPHORE:
return max_semaphores;
case TIMER:
return max_timers;
case SHARED_MEMORY:
return max_shared_mems;
case ADDRESS_ARBITER:
return max_address_arbiters;
case CPU_TIME:
return max_cpu_time;
default:
LOG_ERROR(Kernel, "Unknown resource type=%08X", resource);
UNIMPLEMENTED();
return 0;
case COMMIT:
return max_commit;
case THREAD:
return max_threads;
case EVENT:
return max_events;
case MUTEX:
return max_mutexes;
case SEMAPHORE:
return max_semaphores;
case TIMER:
return max_timers;
case SHARED_MEMORY:
return max_shared_mems;
case ADDRESS_ARBITER:
return max_address_arbiters;
case CPU_TIME:
return max_cpu_time;
default:
LOG_ERROR(Kernel, "Unknown resource type=%08X", resource);
UNIMPLEMENTED();
return 0;
}
}
@ -150,7 +151,6 @@ void ResourceLimitsInit() {
}
void ResourceLimitsShutdown() {
}
} // namespace

View file

@ -12,22 +12,22 @@ namespace Kernel {
enum class ResourceLimitCategory : u8 {
APPLICATION = 0,
SYS_APPLET = 1,
LIB_APPLET = 2,
OTHER = 3
SYS_APPLET = 1,
LIB_APPLET = 2,
OTHER = 3
};
enum ResourceTypes {
PRIORITY = 0,
COMMIT = 1,
THREAD = 2,
EVENT = 3,
MUTEX = 4,
SEMAPHORE = 5,
TIMER = 6,
SHARED_MEMORY = 7,
ADDRESS_ARBITER = 8,
CPU_TIME = 9,
PRIORITY = 0,
COMMIT = 1,
THREAD = 2,
EVENT = 3,
MUTEX = 4,
SEMAPHORE = 5,
TIMER = 6,
SHARED_MEMORY = 7,
ADDRESS_ARBITER = 8,
CPU_TIME = 9,
};
class ResourceLimit final : public Object {
@ -44,11 +44,17 @@ public:
*/
static SharedPtr<ResourceLimit> GetForCategory(ResourceLimitCategory category);
std::string GetTypeName() const override { return "ResourceLimit"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "ResourceLimit";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Gets the current value for the specified resource.
@ -85,10 +91,12 @@ public:
/// Max CPU time that the processes in this category can utilize
s32 max_cpu_time = 0;
// TODO(Subv): Increment these in their respective Kernel::T::Create functions, keeping in mind that
// TODO(Subv): Increment these in their respective Kernel::T::Create functions, keeping in mind
// that
// APPLICATION resource limits should not be affected by the objects created by service modules.
// Currently we have no way of distinguishing if a Create was called by the running application,
// or by a service module. Approach this once we have separated the service modules into their own processes
// or by a service module. Approach this once we have separated the service modules into their
// own processes
/// Current memory that the processes in this category are using
s32 current_commit = 0;

View file

@ -10,11 +10,13 @@
namespace Kernel {
Semaphore::Semaphore() {}
Semaphore::~Semaphore() {}
Semaphore::Semaphore() {
}
Semaphore::~Semaphore() {
}
ResultVal<SharedPtr<Semaphore>> Semaphore::Create(s32 initial_count, s32 max_count,
std::string name) {
std::string name) {
if (initial_count > max_count)
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::Kernel,

View file

@ -23,17 +23,23 @@ public:
* @return The created semaphore
*/
static ResultVal<SharedPtr<Semaphore>> Create(s32 initial_count, s32 max_count,
std::string name = "Unknown");
std::string name = "Unknown");
std::string GetTypeName() const override { return "Semaphore"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "Semaphore";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::Semaphore;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
s32 max_count; ///< Maximum number of simultaneous holders the semaphore can have
s32 available_count; ///< Number of free slots left in the semaphore
std::string name; ///< Name of semaphore (optional)
s32 max_count; ///< Maximum number of simultaneous holders the semaphore can have
s32 available_count; ///< Number of free slots left in the semaphore
std::string name; ///< Name of semaphore (optional)
bool ShouldWait() override;
void Acquire() override;

View file

@ -13,8 +13,10 @@
namespace Kernel {
ServerPort::ServerPort() {}
ServerPort::~ServerPort() {}
ServerPort::ServerPort() {
}
ServerPort::~ServerPort() {
}
bool ServerPort::ShouldWait() {
// If there are no pending sessions, we wait until a new one is added.
@ -25,7 +27,8 @@ void ServerPort::Acquire() {
ASSERT_MSG(!ShouldWait(), "object unavailable!");
}
std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> ServerPort::CreatePortPair(u32 max_sessions, std::string name) {
std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>>
ServerPort::CreatePortPair(u32 max_sessions, std::string name) {
SharedPtr<ServerPort> server_port(new ServerPort);
SharedPtr<ClientPort> client_port(new ClientPort);

View file

@ -23,17 +23,25 @@ public:
* @param name Optional name of the ports
* @return The created port tuple
*/
static std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> CreatePortPair(u32 max_sessions, std::string name = "UnknownPort");
static std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>>
CreatePortPair(u32 max_sessions, std::string name = "UnknownPort");
std::string GetTypeName() const override { return "ServerPort"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "ServerPort";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::ServerPort;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::string name; ///< Name of port (optional)
std::string name; ///< Name of port (optional)
std::vector<SharedPtr<WaitObject>> pending_sessions; ///< ServerSessions waiting to be accepted by the port
std::vector<SharedPtr<WaitObject>>
pending_sessions; ///< ServerSessions waiting to be accepted by the port
bool ShouldWait() override;
void Acquire() override;

View file

@ -7,7 +7,8 @@
namespace Kernel {
Session::Session() {}
Session::~Session() {}
Session::Session() {
}
Session::~Session() {
}
}

View file

@ -19,12 +19,13 @@ namespace IPC {
enum DescriptorType : u32 {
// Buffer related desciptors types (mask : 0x0F)
StaticBuffer = 0x02,
PXIBuffer = 0x04,
PXIBuffer = 0x04,
MappedBuffer = 0x08,
// Handle related descriptors types (mask : 0x30, but need to check for buffer related descriptors first )
CopyHandle = 0x00,
MoveHandle = 0x10,
CallingPid = 0x20,
// Handle related descriptors types (mask : 0x30, but need to check for buffer related
// descriptors first )
CopyHandle = 0x00,
MoveHandle = 0x10,
CallingPid = 0x20,
};
/**
@ -34,24 +35,28 @@ enum DescriptorType : u32 {
* @param translate_params_size Size of the translate parameters in words. Up to 63.
* @return The created IPC header.
*
* Normal parameters are sent directly to the process while the translate parameters might go through modifications and checks by the kernel.
* Normal parameters are sent directly to the process while the translate parameters might go
* through modifications and checks by the kernel.
* The translate parameters are described by headers generated with the IPC::*Desc functions.
*
* @note While #normal_params is equivalent to the number of normal parameters, #translate_params_size includes the size occupied by the translate parameters headers.
* @note While #normal_params is equivalent to the number of normal parameters,
* #translate_params_size includes the size occupied by the translate parameters headers.
*/
constexpr u32 MakeHeader(u16 command_id, unsigned int normal_params, unsigned int translate_params_size) {
return (u32(command_id) << 16) | ((u32(normal_params) & 0x3F) << 6) | (u32(translate_params_size) & 0x3F);
constexpr u32 MakeHeader(u16 command_id, unsigned int normal_params,
unsigned int translate_params_size) {
return (u32(command_id) << 16) | ((u32(normal_params) & 0x3F) << 6) |
(u32(translate_params_size) & 0x3F);
}
union Header {
u32 raw;
BitField< 0, 6, u32> translate_params_size;
BitField< 6, 6, u32> normal_params;
BitField<0, 6, u32> translate_params_size;
BitField<6, 6, u32> normal_params;
BitField<16, 16, u32> command_id;
};
inline Header ParseHeader(u32 header) {
return{ header };
return {header};
}
constexpr u32 MoveHandleDesc(u32 num_handles = 1) {
@ -80,27 +85,29 @@ constexpr u32 StaticBufferDesc(u32 size, u8 buffer_id) {
union StaticBufferDescInfo {
u32 raw;
BitField< 10, 4, u32> buffer_id;
BitField< 14, 18, u32> size;
BitField<10, 4, u32> buffer_id;
BitField<14, 18, u32> size;
};
inline StaticBufferDescInfo ParseStaticBufferDesc(const u32 desc) {
return{ desc };
return {desc};
}
/**
* @brief Creates a header describing a buffer to be sent over PXI.
* @param size Size of the buffer. Max 0x00FFFFFF.
* @param buffer_id The Id of the buffer. Max 0xF.
* @param is_read_only true if the buffer is read-only. If false, the buffer is considered to have read-write access.
* @param is_read_only true if the buffer is read-only. If false, the buffer is considered to have
* read-write access.
* @return The created PXI buffer header.
*
* The next value is a phys-address of a table located in the BASE memregion.
*/
inline u32 PXIBufferDesc(u32 size, unsigned buffer_id, bool is_read_only) {
u32 type = PXIBuffer;
if (is_read_only) type |= 0x2;
return type | (size << 8) | ((buffer_id & 0xF) << 4);
if (is_read_only)
type |= 0x2;
return type | (size << 8) | ((buffer_id & 0xF) << 4);
}
enum MappedBufferPermissions {
@ -115,12 +122,12 @@ constexpr u32 MappedBufferDesc(u32 size, MappedBufferPermissions perms) {
union MappedBufferDescInfo {
u32 raw;
BitField< 4, 28, u32> size;
BitField< 1, 2, MappedBufferPermissions> perms;
BitField<4, 28, u32> size;
BitField<1, 2, MappedBufferPermissions> perms;
};
inline MappedBufferDescInfo ParseMappedBufferDesc(const u32 desc) {
return{ desc };
return {desc};
}
inline DescriptorType GetDescriptorType(u32 descriptor) {
@ -153,7 +160,8 @@ static const int kCommandHeaderOffset = 0x80; ///< Offset into command buffer of
* @return Pointer to command buffer
*/
inline u32* GetCommandBuffer(const int offset = 0) {
return (u32*)Memory::GetPointer(GetCurrentThread()->GetTLSAddress() + kCommandHeaderOffset + offset);
return (u32*)Memory::GetPointer(GetCurrentThread()->GetTLSAddress() + kCommandHeaderOffset +
offset);
}
/**
@ -183,10 +191,14 @@ public:
Session();
~Session() override;
std::string GetTypeName() const override { return "Session"; }
std::string GetTypeName() const override {
return "Session";
}
static const HandleType HANDLE_TYPE = HandleType::Session;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Handles a synchronous call to this session using HLE emulation. Emulated <-> emulated calls
@ -205,5 +217,4 @@ public:
ASSERT_MSG(!ShouldWait(), "object unavailable!");
}
};
}

View file

@ -6,17 +6,21 @@
#include "common/logging/log.h"
#include "core/memory.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/memory.h"
namespace Kernel {
SharedMemory::SharedMemory() {}
SharedMemory::~SharedMemory() {}
SharedMemory::SharedMemory() {
}
SharedMemory::~SharedMemory() {
}
SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address, MemoryRegion region, std::string name) {
SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address,
MemoryRegion region, std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory);
shared_memory->owner_process = owner_process;
@ -31,7 +35,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u
MemoryRegionInfo* memory_region = GetMemoryRegion(region);
auto& linheap_memory = memory_region->linear_heap_memory;
ASSERT_MSG(linheap_memory->size() + size <= memory_region->size, "Not enough space in region to allocate shared memory!");
ASSERT_MSG(linheap_memory->size() + size <= memory_region->size,
"Not enough space in region to allocate shared memory!");
shared_memory->backing_block = linheap_memory;
shared_memory->backing_block_offset = linheap_memory->size();
@ -39,7 +44,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u
linheap_memory->insert(linheap_memory->end(), size, 0);
memory_region->used += size;
shared_memory->linear_heap_phys_address = Memory::FCRAM_PADDR + memory_region->base + shared_memory->backing_block_offset;
shared_memory->linear_heap_phys_address =
Memory::FCRAM_PADDR + memory_region->base + shared_memory->backing_block_offset;
// Increase the amount of used linear heap memory for the owner process.
if (shared_memory->owner_process != nullptr) {
@ -51,18 +57,20 @@ SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u
Kernel::g_current_process->vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
}
} else {
// TODO(Subv): What happens if an application tries to create multiple memory blocks pointing to the same address?
// TODO(Subv): What happens if an application tries to create multiple memory blocks
// pointing to the same address?
auto& vm_manager = shared_memory->owner_process->vm_manager;
// The memory is already available and mapped in the owner process.
auto vma = vm_manager.FindVMA(address)->second;
// Copy it over to our own storage
shared_memory->backing_block = std::make_shared<std::vector<u8>>(vma.backing_block->data() + vma.offset,
vma.backing_block->data() + vma.offset + size);
shared_memory->backing_block = std::make_shared<std::vector<u8>>(
vma.backing_block->data() + vma.offset, vma.backing_block->data() + vma.offset + size);
shared_memory->backing_block_offset = 0;
// Unmap the existing pages
vm_manager.UnmapRange(address, size);
// Map our own block into the address space
vm_manager.MapMemoryBlock(address, shared_memory->backing_block, 0, size, MemoryState::Shared);
vm_manager.MapMemoryBlock(address, shared_memory->backing_block, 0, size,
MemoryState::Shared);
// Reprotect the block with the new permissions
vm_manager.ReprotectRange(address, size, ConvertPermissions(permissions));
}
@ -71,8 +79,11 @@ SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u
return shared_memory;
}
SharedPtr<SharedMemory> SharedMemory::CreateForApplet(std::shared_ptr<std::vector<u8>> heap_block, u32 offset, u32 size,
MemoryPermission permissions, MemoryPermission other_permissions, std::string name) {
SharedPtr<SharedMemory> SharedMemory::CreateForApplet(std::shared_ptr<std::vector<u8>> heap_block,
u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory);
shared_memory->owner_process = nullptr;
@ -88,27 +99,31 @@ SharedPtr<SharedMemory> SharedMemory::CreateForApplet(std::shared_ptr<std::vecto
}
ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions) {
MemoryPermission other_permissions) {
MemoryPermission own_other_permissions = target_process == owner_process ? this->permissions : this->other_permissions;
MemoryPermission own_other_permissions =
target_process == owner_process ? this->permissions : this->other_permissions;
// Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
if (base_address == 0 && other_permissions != MemoryPermission::DontCare) {
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS, ErrorSummary::InvalidArgument, ErrorLevel::Usage);
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%08X name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS, ErrorSummary::InvalidArgument, ErrorLevel::Usage);
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}
// Heap-backed memory blocks can not be mapped with other_permissions = DontCare
if (base_address != 0 && other_permissions == MemoryPermission::DontCare) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%08X name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS, ErrorSummary::InvalidArgument, ErrorLevel::Usage);
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}
// Error out if the provided permissions are not compatible with what the creator process needs.
@ -116,12 +131,14 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map id=%u, address=0x%08X name=%s, permissions don't match",
GetObjectId(), address, name.c_str());
return ResultCode(ErrorDescription::WrongPermission, ErrorModule::OS, ErrorSummary::WrongArgument, ErrorLevel::Permanent);
return ResultCode(ErrorDescription::WrongPermission, ErrorModule::OS,
ErrorSummary::WrongArgument, ErrorLevel::Permanent);
}
// TODO(Subv): Check for the Shared Device Mem flag in the creator process.
/*if (was_created_with_shared_device_mem && address != 0) {
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS, ErrorSummary::InvalidArgument, ErrorLevel::Usage);
return ResultCode(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}*/
// TODO(Subv): The same process that created a SharedMemory object
@ -144,23 +161,29 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
}
// Map the memory block into the target process
auto result = target_process->vm_manager.MapMemoryBlock(target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
auto result = target_process->vm_manager.MapMemoryBlock(
target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
if (result.Failed()) {
LOG_ERROR(Kernel, "cannot map id=%u, target_address=0x%08X name=%s, error mapping to virtual memory",
GetObjectId(), target_address, name.c_str());
LOG_ERROR(
Kernel,
"cannot map id=%u, target_address=0x%08X name=%s, error mapping to virtual memory",
GetObjectId(), target_address, name.c_str());
return result.Code();
}
return target_process->vm_manager.ReprotectRange(target_address, size, ConvertPermissions(permissions));
return target_process->vm_manager.ReprotectRange(target_address, size,
ConvertPermissions(permissions));
}
ResultCode SharedMemory::Unmap(Process* target_process, VAddr address) {
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not mapped to a SharedMemory.
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process->vm_manager.UnmapRange(address, size);
}
VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) {
u32 masked_permissions = static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
u32 masked_permissions =
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
return static_cast<VMAPermission>(masked_permissions);
};

View file

@ -16,15 +16,15 @@ namespace Kernel {
/// Permissions for mapped shared memory blocks
enum class MemoryPermission : u32 {
None = 0,
Read = (1u << 0),
Write = (1u << 1),
ReadWrite = (Read | Write),
Execute = (1u << 2),
ReadExecute = (Read | Execute),
WriteExecute = (Write | Execute),
None = 0,
Read = (1u << 0),
Write = (1u << 1),
ReadWrite = (Read | Write),
Execute = (1u << 2),
ReadExecute = (Read | Execute),
WriteExecute = (Write | Execute),
ReadWriteExecute = (Read | Write | Execute),
DontCare = (1u << 28)
DontCare = (1u << 28)
};
class SharedMemory final : public Object {
@ -34,13 +34,18 @@ public:
* @param owner_process Process that created this shared memory object.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param address The address from which to map the Shared Memory.
* @param region If the address is 0, the shared memory will be allocated in this region of the linear heap.
* @param region If the address is 0, the shared memory will be allocated in this region of the
* linear heap.
* @param name Optional object name, used for debugging purposes.
*/
static SharedPtr<SharedMemory> Create(SharedPtr<Process> owner_process, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address = 0, MemoryRegion region = MemoryRegion::BASE, std::string name = "Unknown");
static SharedPtr<SharedMemory> Create(SharedPtr<Process> owner_process, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address = 0,
MemoryRegion region = MemoryRegion::BASE,
std::string name = "Unknown");
/**
* Creates a shared memory object from a block of memory managed by an HLE applet.
@ -48,17 +53,27 @@ public:
* @param offset The offset into the heap block that the SharedMemory will map.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param name Optional object name, used for debugging purposes.
*/
static SharedPtr<SharedMemory> CreateForApplet(std::shared_ptr<std::vector<u8>> heap_block, u32 offset, u32 size,
MemoryPermission permissions, MemoryPermission other_permissions, std::string name = "Unknown Applet");
static SharedPtr<SharedMemory> CreateForApplet(std::shared_ptr<std::vector<u8>> heap_block,
u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
std::string GetTypeName() const override { return "SharedMemory"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "SharedMemory";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::SharedMemory;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Converts the specified MemoryPermission into the equivalent VMAPermission.
@ -73,7 +88,8 @@ public:
* @param permissions Memory block map permissions (specified by SVC field)
* @param other_permissions Memory block map other permissions (specified by SVC field)
*/
ResultCode Map(Process* target_process, VAddr address, MemoryPermission permissions, MemoryPermission other_permissions);
ResultCode Map(Process* target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions);
/**
* Unmaps a shared memory block from the specified address in system memory
@ -94,7 +110,8 @@ public:
SharedPtr<Process> owner_process;
/// Address of shared memory block in the owner process if specified.
VAddr base_address;
/// Physical address of the shared memory block in the linear heap if no address was specified during creation.
/// Physical address of the shared memory block in the linear heap if no address was specified
/// during creation.
PAddr linear_heap_phys_address;
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;

View file

@ -18,10 +18,10 @@
#include "core/core_timing.h"
#include "core/hle/hle.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
#include "core/memory.h"
@ -46,7 +46,7 @@ static Kernel::HandleTable wakeup_callback_handle_table;
static std::vector<SharedPtr<Thread>> thread_list;
// Lists only ready thread ids.
static Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST+1> ready_queue;
static Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
static Thread* current_thread;
@ -61,8 +61,10 @@ inline static u32 const NewThreadId() {
return next_thread_id++;
}
Thread::Thread() {}
Thread::~Thread() {}
Thread::Thread() {
}
Thread::~Thread() {
}
Thread* GetCurrentThread() {
return current_thread;
@ -103,7 +105,7 @@ void Thread::Stop() {
// Clean up thread from ready queue
// This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
if (status == THREADSTATUS_READY){
if (status == THREADSTATUS_READY) {
ready_queue.remove(current_priority, this);
}
@ -119,7 +121,8 @@ void Thread::Stop() {
// Mark the TLS slot in the thread's page as free.
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
u32 tls_slot = ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
u32 tls_slot =
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot);
HLE::Reschedule(__func__);
@ -137,7 +140,7 @@ Thread* ArbitrateHighestPriorityThread(u32 address) {
if (thread == nullptr)
continue;
if(thread->current_priority <= priority) {
if (thread->current_priority <= priority) {
highest_priority_thread = thread.get();
priority = thread->current_priority;
}
@ -170,7 +173,7 @@ static void PriorityBoostStarvedThreads() {
// on hardware. However, this is almost certainly not perfect, and the real CTR OS scheduler
// should probably be reversed to verify this.
const u64 boost_timeout = 2000000; // Boost threads that have been ready for > this long
const u64 boost_timeout = 2000000; // Boost threads that have been ready for > this long
u64 delta = current_ticks - thread->last_running_ticks;
@ -193,10 +196,12 @@ static std::tuple<u32*, u32*> GetWaitSynchTimeoutParameterRegister(Thread* threa
if ((thumb_mode && thumb_inst == 0xDF24) || (!thumb_mode && inst == 0x0F000024)) {
// svc #0x24 (WaitSynchronization1)
return std::make_tuple(&thread->context.cpu_registers[2], &thread->context.cpu_registers[3]);
return std::make_tuple(&thread->context.cpu_registers[2],
&thread->context.cpu_registers[3]);
} else if ((thumb_mode && thumb_inst == 0xDF25) || (!thumb_mode && inst == 0x0F000025)) {
// svc #0x25 (WaitSynchronizationN)
return std::make_tuple(&thread->context.cpu_registers[0], &thread->context.cpu_registers[4]);
return std::make_tuple(&thread->context.cpu_registers[0],
&thread->context.cpu_registers[4]);
}
UNREACHABLE();
@ -245,7 +250,8 @@ static void SwitchContext(Thread* new_thread) {
// Load context of new thread
if (new_thread) {
DEBUG_ASSERT_MSG(new_thread->status == THREADSTATUS_READY, "Thread must be ready to become running.");
DEBUG_ASSERT_MSG(new_thread->status == THREADSTATUS_READY,
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle);
@ -263,7 +269,7 @@ static void SwitchContext(Thread* new_thread) {
new_thread->context.pc -= thumb_mode ? 2 : 4;
// Get the register for timeout parameter
u32* timeout_low, *timeout_high;
u32 *timeout_low, *timeout_high;
std::tie(timeout_low, timeout_high) = GetWaitSynchTimeoutParameterRegister(new_thread);
// Update the timeout parameter
@ -307,7 +313,7 @@ static Thread* PopNextReadyThread() {
// Otherwise just keep going with the current thread
next = thread;
}
} else {
} else {
next = ready_queue.pop_first();
}
@ -321,7 +327,8 @@ void WaitCurrentThread_Sleep() {
HLE::Reschedule(__func__);
}
void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, bool wait_set_output, bool wait_all) {
void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects,
bool wait_set_output, bool wait_all) {
Thread* thread = GetCurrentThread();
thread->wait_set_output = wait_set_output;
thread->wait_all = wait_all;
@ -352,7 +359,8 @@ static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
if (thread->status == THREADSTATUS_WAIT_SYNCH || thread->status == THREADSTATUS_WAIT_ARB) {
thread->SetWaitSynchronizationResult(ResultCode(ErrorDescription::Timeout, ErrorModule::OS,
ErrorSummary::StatusChanged, ErrorLevel::Info));
ErrorSummary::StatusChanged,
ErrorLevel::Info));
if (thread->wait_set_output)
thread->SetWaitSynchronizationOutput(-1);
@ -372,25 +380,25 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
void Thread::ResumeFromWait() {
switch (status) {
case THREADSTATUS_WAIT_SYNCH:
case THREADSTATUS_WAIT_ARB:
case THREADSTATUS_WAIT_SLEEP:
break;
case THREADSTATUS_WAIT_SYNCH:
case THREADSTATUS_WAIT_ARB:
case THREADSTATUS_WAIT_SLEEP:
break;
case THREADSTATUS_READY:
// If the thread is waiting on multiple wait objects, it might be awoken more than once
// before actually resuming. We can ignore subsequent wakeups if the thread status has
// already been set to THREADSTATUS_READY.
return;
case THREADSTATUS_READY:
// If the thread is waiting on multiple wait objects, it might be awoken more than once
// before actually resuming. We can ignore subsequent wakeups if the thread status has
// already been set to THREADSTATUS_READY.
return;
case THREADSTATUS_RUNNING:
DEBUG_ASSERT_MSG(false, "Thread with object id %u has already resumed.", GetObjectId());
return;
case THREADSTATUS_DEAD:
// This should never happen, as threads must complete before being stopped.
DEBUG_ASSERT_MSG(false, "Thread with object id %u cannot be resumed because it's DEAD.",
GetObjectId());
return;
case THREADSTATUS_RUNNING:
DEBUG_ASSERT_MSG(false, "Thread with object id %u has already resumed.", GetObjectId());
return;
case THREADSTATUS_DEAD:
// This should never happen, as threads must complete before being stopped.
DEBUG_ASSERT_MSG(false, "Thread with object id %u cannot be resumed because it's DEAD.",
GetObjectId());
return;
}
ready_queue.push_back(current_priority, this);
@ -405,7 +413,8 @@ static void DebugThreadQueue() {
if (!thread) {
LOG_DEBUG(Kernel, "Current: NO CURRENT THREAD");
} else {
LOG_DEBUG(Kernel, "0x%02X %u (current)", thread->current_priority, GetCurrentThread()->GetObjectId());
LOG_DEBUG(Kernel, "0x%02X %u (current)", thread->current_priority,
GetCurrentThread()->GetObjectId());
}
for (auto& t : thread_list) {
@ -448,7 +457,8 @@ std::tuple<u32, u32, bool> GetFreeThreadLocalSlot(std::vector<std::bitset<8>>& t
* @param entry_point Address of entry point for execution
* @param arg User argument for thread
*/
static void ResetThreadContext(Core::ThreadContext& context, u32 stack_top, u32 entry_point, u32 arg) {
static void ResetThreadContext(Core::ThreadContext& context, u32 stack_top, u32 entry_point,
u32 arg) {
memset(&context, 0, sizeof(Core::ThreadContext));
context.cpu_registers[0] = arg;
@ -458,11 +468,11 @@ static void ResetThreadContext(Core::ThreadContext& context, u32 stack_top, u32
}
ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, s32 priority,
u32 arg, s32 processor_id, VAddr stack_top) {
u32 arg, s32 processor_id, VAddr stack_top) {
if (priority < THREADPRIO_HIGHEST || priority > THREADPRIO_LOWEST) {
s32 new_priority = MathUtil::Clamp<s32>(priority, THREADPRIO_HIGHEST, THREADPRIO_LOWEST);
LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d",
name.c_str(), priority, new_priority);
LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d", name.c_str(),
priority, new_priority);
// TODO(bunnei): Clamping to a valid priority is not necessarily correct behavior... Confirm
// validity of this
priority = new_priority;
@ -472,7 +482,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
LOG_ERROR(Kernel_SVC, "(name=%s): invalid entry %08x", name.c_str(), entry_point);
// TODO: Verify error
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
SharedPtr<Thread> thread(new Thread);
@ -511,8 +521,10 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
auto& linheap_memory = memory_region->linear_heap_memory;
if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) {
LOG_ERROR(Kernel_SVC, "Not enough space in region to allocate a new TLS page for thread");
return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel, ErrorSummary::OutOfResource, ErrorLevel::Permanent);
LOG_ERROR(Kernel_SVC,
"Not enough space in region to allocate a new TLS page for thread");
return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel,
ErrorSummary::OutOfResource, ErrorLevel::Permanent);
}
u32 offset = linheap_memory->size();
@ -537,7 +549,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
// Mark the slot as used
tls_slots[available_page].set(available_slot);
thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE;
thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE +
available_slot * Memory::TLS_ENTRY_SIZE;
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
// to initialize the context
@ -551,10 +564,12 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
return MakeResult<SharedPtr<Thread>>(std::move(thread));
}
// TODO(peachum): Remove this. Range checking should be done, and an appropriate error should be returned.
// TODO(peachum): Remove this. Range checking should be done, and an appropriate error should be
// returned.
static void ClampPriority(const Thread* thread, s32* priority) {
if (*priority < THREADPRIO_HIGHEST || *priority > THREADPRIO_LOWEST) {
DEBUG_ASSERT_MSG(false, "Application passed an out of range priority. An error should be returned.");
DEBUG_ASSERT_MSG(
false, "Application passed an out of range priority. An error should be returned.");
s32 new_priority = MathUtil::Clamp<s32>(*priority, THREADPRIO_HIGHEST, THREADPRIO_LOWEST);
LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d",
@ -586,12 +601,13 @@ SharedPtr<Thread> SetupMainThread(u32 entry_point, s32 priority) {
DEBUG_ASSERT(!GetCurrentThread());
// Initialize new "main" thread
auto thread_res = Thread::Create("main", entry_point, priority, 0,
THREADPROCESSORID_0, Memory::HEAP_VADDR_END);
auto thread_res = Thread::Create("main", entry_point, priority, 0, THREADPROCESSORID_0,
Memory::HEAP_VADDR_END);
SharedPtr<Thread> thread = thread_res.MoveFrom();
thread->context.fpscr = FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO | FPSCR_IXC; // 0x03C00010
thread->context.fpscr =
FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO | FPSCR_IXC; // 0x03C00010
// Run new "main" thread
SwitchContext(thread.get());

View file

@ -17,29 +17,29 @@
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
enum ThreadPriority : s32{
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
THREADPRIO_DEFAULT = 48, ///< Default thread priority for userland apps
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
enum ThreadPriority : s32 {
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
THREADPRIO_DEFAULT = 48, ///< Default thread priority for userland apps
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
};
enum ThreadProcessorId : s32 {
THREADPROCESSORID_DEFAULT = -2, ///< Run thread on default core specified by exheader
THREADPROCESSORID_ALL = -1, ///< Run thread on either core
THREADPROCESSORID_0 = 0, ///< Run thread on core 0 (AppCore)
THREADPROCESSORID_1 = 1, ///< Run thread on core 1 (SysCore)
THREADPROCESSORID_MAX = 2, ///< Processor ID must be less than this
THREADPROCESSORID_DEFAULT = -2, ///< Run thread on default core specified by exheader
THREADPROCESSORID_ALL = -1, ///< Run thread on either core
THREADPROCESSORID_0 = 0, ///< Run thread on core 0 (AppCore)
THREADPROCESSORID_1 = 1, ///< Run thread on core 1 (SysCore)
THREADPROCESSORID_MAX = 2, ///< Processor ID must be less than this
};
enum ThreadStatus {
THREADSTATUS_RUNNING, ///< Currently running
THREADSTATUS_READY, ///< Ready to run
THREADSTATUS_WAIT_ARB, ///< Waiting on an address arbiter
THREADSTATUS_WAIT_SLEEP, ///< Waiting due to a SleepThread SVC
THREADSTATUS_WAIT_SYNCH, ///< Waiting due to a WaitSynchronization SVC
THREADSTATUS_DORMANT, ///< Created but not yet made ready
THREADSTATUS_DEAD ///< Run to completion, or forcefully terminated
THREADSTATUS_RUNNING, ///< Currently running
THREADSTATUS_READY, ///< Ready to run
THREADSTATUS_WAIT_ARB, ///< Waiting on an address arbiter
THREADSTATUS_WAIT_SLEEP, ///< Waiting due to a SleepThread SVC
THREADSTATUS_WAIT_SYNCH, ///< Waiting due to a WaitSynchronization SVC
THREADSTATUS_DORMANT, ///< Created but not yet made ready
THREADSTATUS_DEAD ///< Run to completion, or forcefully terminated
};
namespace Kernel {
@ -60,13 +60,19 @@ public:
* @return A shared pointer to the newly created thread
*/
static ResultVal<SharedPtr<Thread>> Create(std::string name, VAddr entry_point, s32 priority,
u32 arg, s32 processor_id, VAddr stack_top);
u32 arg, s32 processor_id, VAddr stack_top);
std::string GetName() const override { return name; }
std::string GetTypeName() const override { return "Thread"; }
std::string GetName() const override {
return name;
}
std::string GetTypeName() const override {
return "Thread";
}
static const HandleType HANDLE_TYPE = HandleType::Thread;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
bool ShouldWait() override;
void Acquire() override;
@ -75,7 +81,9 @@ public:
* Gets the thread's current priority
* @return The current thread's priority
*/
s32 GetPriority() const { return current_priority; }
s32 GetPriority() const {
return current_priority;
}
/**
* Sets the thread's current priority
@ -93,7 +101,9 @@ public:
* Gets the thread's thread ID
* @return The thread's ID
*/
u32 GetThreadId() const { return thread_id; }
u32 GetThreadId() const {
return thread_id;
}
/**
* Resumes a thread from waiting
@ -127,7 +137,9 @@ public:
* Returns the Thread Local Storage address of the current thread
* @returns VAddr of the thread's TLS
*/
VAddr GetTLSAddress() const { return tls_address; }
VAddr GetTLSAddress() const {
return tls_address;
}
Core::ThreadContext context;
@ -137,8 +149,8 @@ public:
u32 entry_point;
u32 stack_top;
s32 nominal_priority; ///< Nominal thread priority, as set by the emulated application
s32 current_priority; ///< Current thread priority, can be temporarily changed
s32 nominal_priority; ///< Nominal thread priority, as set by the emulated application
s32 current_priority; ///< Current thread priority, can be temporarily changed
u64 last_running_ticks; ///< CPU tick when thread was last running
@ -151,11 +163,11 @@ public:
/// Mutexes currently held by this thread, which will be released when it exits.
boost::container::flat_set<SharedPtr<Mutex>> held_mutexes;
SharedPtr<Process> owner_process; ///< Process that owns this thread
SharedPtr<Process> owner_process; ///< Process that owns this thread
std::vector<SharedPtr<WaitObject>> wait_objects; ///< Objects that the thread is waiting on
VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address
bool wait_all; ///< True if the thread is waiting on all objects before resuming
bool wait_set_output; ///< True if the output parameter should be set on thread wakeup
VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address
bool wait_all; ///< True if the thread is waiting on all objects before resuming
bool wait_set_output; ///< True if the output parameter should be set on thread wakeup
std::string name;
@ -205,10 +217,12 @@ void WaitCurrentThread_Sleep();
/**
* Waits the current thread from a WaitSynchronization call
* @param wait_objects Kernel objects that we are waiting on
* @param wait_set_output If true, set the output parameter on thread wakeup (for WaitSynchronizationN only)
* @param wait_set_output If true, set the output parameter on thread wakeup (for
* WaitSynchronizationN only)
* @param wait_all If true, wait on all objects before resuming (for WaitSynchronizationN only)
*/
void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, bool wait_set_output, bool wait_all);
void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects,
bool wait_set_output, bool wait_all);
/**
* Waits the current thread from an ArbitrateAddress call

View file

@ -9,8 +9,8 @@
#include "core/core_timing.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/timer.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
namespace Kernel {
@ -20,8 +20,10 @@ static int timer_callback_event_type;
// us to simply use a pool index or similar.
static Kernel::HandleTable timer_callback_handle_table;
Timer::Timer() {}
Timer::~Timer() {}
Timer::Timer() {
}
Timer::~Timer() {
}
SharedPtr<Timer> Timer::Create(ResetType reset_type, std::string name) {
SharedPtr<Timer> timer(new Timer);
@ -41,7 +43,7 @@ bool Timer::ShouldWait() {
}
void Timer::Acquire() {
ASSERT_MSG( !ShouldWait(), "object unavailable!");
ASSERT_MSG(!ShouldWait(), "object unavailable!");
if (reset_type == ResetType::OneShot)
signaled = false;
@ -55,8 +57,8 @@ void Timer::Set(s64 initial, s64 interval) {
interval_delay = interval;
u64 initial_microseconds = initial / 1000;
CoreTiming::ScheduleEvent(usToCycles(initial_microseconds),
timer_callback_event_type, callback_handle);
CoreTiming::ScheduleEvent(usToCycles(initial_microseconds), timer_callback_event_type,
callback_handle);
HLE::Reschedule(__func__);
}
@ -73,7 +75,8 @@ void Timer::Clear() {
/// The timer callback event, called when a timer is fired
static void TimerCallback(u64 timer_handle, int cycles_late) {
SharedPtr<Timer> timer = timer_callback_handle_table.Get<Timer>(static_cast<Handle>(timer_handle));
SharedPtr<Timer> timer =
timer_callback_handle_table.Get<Timer>(static_cast<Handle>(timer_handle));
if (timer == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid timer %08" PRIx64, timer_handle);
@ -91,7 +94,7 @@ static void TimerCallback(u64 timer_handle, int cycles_late) {
// Reschedule the timer with the interval delay
u64 interval_microseconds = timer->interval_delay / 1000;
CoreTiming::ScheduleEvent(usToCycles(interval_microseconds) - cycles_late,
timer_callback_event_type, timer_handle);
timer_callback_event_type, timer_handle);
}
}

View file

@ -21,19 +21,25 @@ public:
*/
static SharedPtr<Timer> Create(ResetType reset_type, std::string name = "Unknown");
std::string GetTypeName() const override { return "Timer"; }
std::string GetName() const override { return name; }
std::string GetTypeName() const override {
return "Timer";
}
std::string GetName() const override {
return name;
}
static const HandleType HANDLE_TYPE = HandleType::Timer;
HandleType GetHandleType() const override { return HANDLE_TYPE; }
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResetType reset_type; ///< The ResetType of this timer
ResetType reset_type; ///< The ResetType of this timer
bool signaled; ///< Whether the timer has been signaled or not
std::string name; ///< Name of timer (optional)
bool signaled; ///< Whether the timer has been signaled or not
std::string name; ///< Name of timer (optional)
u64 initial_delay; ///< The delay until the timer fires for the first time
u64 interval_delay; ///< The delay until the timer fires after the first time
u64 initial_delay; ///< The delay until the timer fires for the first time
u64 interval_delay; ///< The delay until the timer fires after the first time
bool ShouldWait() override;
void Acquire() override;

View file

@ -15,8 +15,8 @@ namespace Kernel {
static const char* GetMemoryStateName(MemoryState state) {
static const char* names[] = {
"Free", "Reserved", "IO", "Static", "Code", "Private", "Shared", "Continuous", "Aliased",
"Alias", "AliasCode", "Locked",
"Free", "Reserved", "IO", "Static", "Code", "Private",
"Shared", "Continuous", "Aliased", "Alias", "AliasCode", "Locked",
};
return names[(int)state];
@ -24,13 +24,12 @@ static const char* GetMemoryStateName(MemoryState state) {
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
ASSERT(base + size == next.base);
if (permissions != next.permissions ||
meminfo_state != next.meminfo_state ||
type != next.type) {
if (permissions != next.permissions || meminfo_state != next.meminfo_state ||
type != next.type) {
return false;
}
if (type == VMAType::AllocatedMemoryBlock &&
(backing_block != next.backing_block || offset + size != next.offset)) {
(backing_block != next.backing_block || offset + size != next.offset)) {
return false;
}
if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
@ -70,7 +69,9 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
}
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block, size_t offset, u32 size, MemoryState state) {
std::shared_ptr<std::vector<u8>> block,
size_t offset, u32 size,
MemoryState state) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
@ -89,7 +90,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
}
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * memory, u32 size, MemoryState state) {
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u32 size,
MemoryState state) {
ASSERT(memory != nullptr);
// This is the appropriately sized VMA that will turn into our allocation.
@ -106,7 +108,9 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * m
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
}
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state, Memory::MMIORegionPointer mmio_handler) {
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size,
MemoryState state,
Memory::MMIORegionPointer mmio_handler) {
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
@ -191,15 +195,16 @@ void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
void VMManager::LogLayout(Log::Level log_level) const {
for (const auto& p : vma_map) {
const VirtualMemoryArea& vma = p.second;
LOG_GENERIC(Log::Class::Kernel, log_level, "%08X - %08X size: %8X %c%c%c %s",
vma.base, vma.base + vma.size, vma.size,
(u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
(u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
(u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', GetMemoryStateName(vma.meminfo_state));
LOG_GENERIC(Log::Class::Kernel, log_level, "%08X - %08X size: %8X %c%c%c %s", vma.base,
vma.base + vma.size, vma.size,
(u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
(u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
(u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-',
GetMemoryStateName(vma.meminfo_state));
}
}
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) {
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
// non-const access to its container.
return vma_map.erase(iter, iter); // Erases an empty range of elements
@ -337,5 +342,4 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
break;
}
}
}

View file

@ -15,13 +15,13 @@
namespace Kernel {
const ResultCode ERR_INVALID_ADDRESS{ // 0xE0E01BF5
ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage};
const ResultCode ERR_INVALID_ADDRESS{// 0xE0E01BF5
ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage};
const ResultCode ERR_INVALID_ADDRESS_STATE{ // 0xE0A01BF5
ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidState, ErrorLevel::Usage};
const ResultCode ERR_INVALID_ADDRESS_STATE{// 0xE0A01BF5
ErrorDescription::InvalidAddress, ErrorModule::OS,
ErrorSummary::InvalidState, ErrorLevel::Usage};
enum class VMAType : u8 {
/// VMA represents an unmapped region of the address space.
@ -115,7 +115,8 @@ class VMManager final {
// TODO(yuriks): Make page tables switchable to support multiple VMManagers
public:
/**
* The maximum amount of address space managed by the kernel. Addresses above this are never used.
* The maximum amount of address space managed by the kernel. Addresses above this are never
* used.
* @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000.
*/
static const u32 MAX_ADDRESS = 0x40000000;
@ -151,7 +152,7 @@ public:
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
size_t offset, u32 size, MemoryState state);
size_t offset, u32 size, MemoryState state);
/**
* Maps an unmanaged host memory pointer at a given address.
@ -172,7 +173,8 @@ public:
* @param state MemoryState tag to attach to the VMA.
* @param mmio_handler The handler that will implement read and write for this MMIO region.
*/
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state, Memory::MMIORegionPointer mmio_handler);
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state,
Memory::MMIORegionPointer mmio_handler);
/// Unmaps a range of addresses, splitting VMAs as necessary.
ResultCode UnmapRange(VAddr target, u32 size);
@ -228,5 +230,4 @@ private:
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
};
}