Merge branch 'master' of github.com:citra-emu/citra into ips-patches

This commit is contained in:
Zak Kurka 2018-11-15 07:11:18 -06:00
commit 1ded48f5a3
209 changed files with 17211 additions and 9498 deletions

View file

@ -58,6 +58,7 @@ add_library(core STATIC
file_sys/disk_archive.h
file_sys/errors.h
file_sys/file_backend.h
file_sys/delay_generator.cpp
file_sys/delay_generator.h
file_sys/ivfc_archive.cpp
file_sys/ivfc_archive.h
@ -102,8 +103,6 @@ add_library(core STATIC
hle/applets/mint.h
hle/applets/swkbd.cpp
hle/applets/swkbd.h
hle/config_mem.cpp
hle/config_mem.h
hle/function_wrappers.h
hle/ipc.h
hle/ipc_helpers.h
@ -113,6 +112,8 @@ add_library(core STATIC
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/config_mem.cpp
hle/kernel/config_mem.h
hle/kernel/errors.h
hle/kernel/event.cpp
hle/kernel/event.h
@ -143,6 +144,8 @@ add_library(core STATIC
hle/kernel/session.h
hle/kernel/shared_memory.cpp
hle/kernel/shared_memory.h
hle/kernel/shared_page.cpp
hle/kernel/shared_page.h
hle/kernel/svc.cpp
hle/kernel/svc.h
hle/kernel/thread.cpp
@ -385,8 +388,6 @@ add_library(core STATIC
hle/service/ssl_c.h
hle/service/y2r_u.cpp
hle/service/y2r_u.h
hle/shared_page.cpp
hle/shared_page.h
hw/aes/arithmetic128.cpp
hw/aes/arithmetic128.h
hw/aes/ccm.cpp
@ -446,7 +447,7 @@ target_link_libraries(core PUBLIC common PRIVATE audio_core network video_core)
target_link_libraries(core PUBLIC Boost::boost PRIVATE cryptopp fmt open_source_archives)
if (ENABLE_WEB_SERVICE)
target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE)
target_link_libraries(core PRIVATE json-headers web_service)
target_link_libraries(core PRIVATE web_service)
endif()
if (ARCHITECTURE_x86_64)

View file

@ -71,7 +71,8 @@ private:
class DynarmicUserCallbacks final : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicUserCallbacks(ARM_Dynarmic& parent) : parent(parent) {}
explicit DynarmicUserCallbacks(ARM_Dynarmic& parent)
: parent(parent), timing(parent.system.CoreTiming()) {}
~DynarmicUserCallbacks() = default;
std::uint8_t MemoryRead8(VAddr vaddr) override {
@ -134,7 +135,8 @@ public:
if (GDBStub::IsConnected()) {
parent.jit->HaltExecution();
parent.SetPC(pc);
Kernel::Thread* thread = Kernel::GetCurrentThread();
Kernel::Thread* thread =
Core::System::GetInstance().Kernel().GetThreadManager().GetCurrentThread();
parent.SaveContext(thread->context);
GDBStub::Break();
GDBStub::SendTrap(thread, 5);
@ -147,18 +149,19 @@ public:
}
void AddTicks(std::uint64_t ticks) override {
CoreTiming::AddTicks(ticks);
timing.AddTicks(ticks);
}
std::uint64_t GetTicksRemaining() override {
s64 ticks = CoreTiming::GetDowncount();
s64 ticks = timing.GetDowncount();
return static_cast<u64>(ticks <= 0 ? 0 : ticks);
}
ARM_Dynarmic& parent;
Core::Timing& timing;
};
ARM_Dynarmic::ARM_Dynarmic(PrivilegeMode initial_mode)
: cb(std::make_unique<DynarmicUserCallbacks>(*this)) {
ARM_Dynarmic::ARM_Dynarmic(Core::System& system, PrivilegeMode initial_mode)
: system(system), cb(std::make_unique<DynarmicUserCallbacks>(*this)) {
interpreter_state = std::make_shared<ARMul_State>(initial_mode);
PageTableChanged();
}

View file

@ -15,11 +15,15 @@ namespace Memory {
struct PageTable;
} // namespace Memory
namespace Core {
struct System;
}
class DynarmicUserCallbacks;
class ARM_Dynarmic final : public ARM_Interface {
public:
explicit ARM_Dynarmic(PrivilegeMode initial_mode);
ARM_Dynarmic(Core::System& system, PrivilegeMode initial_mode);
~ARM_Dynarmic();
void Run() override;
@ -50,6 +54,7 @@ public:
private:
friend class DynarmicUserCallbacks;
Core::System& system;
std::unique_ptr<DynarmicUserCallbacks> cb;
std::unique_ptr<Dynarmic::A32::Jit> MakeJit();

View file

@ -75,7 +75,7 @@ ARM_DynCom::ARM_DynCom(PrivilegeMode initial_mode) {
ARM_DynCom::~ARM_DynCom() {}
void ARM_DynCom::Run() {
ExecuteInstructions(std::max<s64>(CoreTiming::GetDowncount(), 0));
ExecuteInstructions(std::max<s64>(Core::System::GetInstance().CoreTiming().GetDowncount(), 0));
}
void ARM_DynCom::Step() {
@ -146,7 +146,7 @@ void ARM_DynCom::SetCP15Register(CP15Register reg, u32 value) {
void ARM_DynCom::ExecuteInstructions(u64 num_instructions) {
state->NumInstrsToExecute = num_instructions;
unsigned ticks_executed = InterpreterMainLoop(state.get());
CoreTiming::AddTicks(ticks_executed);
Core::System::GetInstance().CoreTiming().AddTicks(ticks_executed);
state->ServeBreak();
}

View file

@ -18,6 +18,7 @@
#include "core/arm/skyeye_common/armstate.h"
#include "core/arm/skyeye_common/armsupp.h"
#include "core/arm/skyeye_common/vfp/vfp.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/gdbstub/gdbstub.h"
#include "core/hle/kernel/svc.h"
@ -3859,7 +3860,7 @@ SUB_INST : {
SWI_INST : {
if (inst_base->cond == ConditionCode::AL || CondPassed(cpu, inst_base->cond)) {
swi_inst* const inst_cream = (swi_inst*)inst_base->component;
CoreTiming::AddTicks(num_instrs);
Core::System::GetInstance().CoreTiming().AddTicks(num_instrs);
cpu->NumInstrsToExecute =
num_instrs >= cpu->NumInstrsToExecute ? 0 : cpu->NumInstrsToExecute - num_instrs;
num_instrs = 0;

View file

@ -604,7 +604,8 @@ void ARMul_State::ServeBreak() {
if (last_bkpt_hit) {
Reg[15] = last_bkpt.address;
}
Kernel::Thread* thread = Kernel::GetCurrentThread();
Kernel::Thread* thread =
Core::System::GetInstance().Kernel().GetThreadManager().GetCurrentThread();
Core::CPU().SaveContext(thread->context);
if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) {
last_bkpt_hit = false;

View file

@ -59,13 +59,13 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
// If we don't have a currently active thread then don't execute instructions,
// instead advance to the next event and try to yield to the next thread
if (Kernel::GetCurrentThread() == nullptr) {
if (kernel->GetThreadManager().GetCurrentThread() == nullptr) {
LOG_TRACE(Core_ARM11, "Idling");
CoreTiming::Idle();
CoreTiming::Advance();
timing->Idle();
timing->Advance();
PrepareReschedule();
} else {
CoreTiming::Advance();
timing->Advance();
if (tight_loop) {
cpu_core->Run();
} else {
@ -126,7 +126,9 @@ System::ResultStatus System::Load(EmuWindow& emu_window, const std::string& file
return init_result;
}
const Loader::ResultStatus load_result{app_loader->Load(Kernel::g_current_process)};
Kernel::SharedPtr<Kernel::Process> process;
const Loader::ResultStatus load_result{app_loader->Load(process)};
kernel->SetCurrentProcess(process);
if (Loader::ResultStatus::Success != load_result) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", static_cast<u32>(load_result));
System::Shutdown();
@ -140,7 +142,7 @@ System::ResultStatus System::Load(EmuWindow& emu_window, const std::string& file
return ResultStatus::ErrorLoader;
}
}
Memory::SetCurrentPageTable(&Kernel::g_current_process->vm_manager.page_table);
Memory::SetCurrentPageTable(&kernel->GetCurrentProcess()->vm_manager.page_table);
status = ResultStatus::Success;
m_emu_window = &emu_window;
m_filepath = filepath;
@ -153,7 +155,7 @@ void System::PrepareReschedule() {
}
PerfStats::Results System::GetAndResetPerfStats() {
return perf_stats.GetAndResetStats(CoreTiming::GetGlobalTimeUs());
return perf_stats.GetAndResetStats(timing->GetGlobalTimeUs());
}
void System::Reschedule() {
@ -162,17 +164,17 @@ void System::Reschedule() {
}
reschedule_pending = false;
Kernel::Reschedule();
kernel->GetThreadManager().Reschedule();
}
System::ResultStatus System::Init(EmuWindow& emu_window, u32 system_mode) {
LOG_DEBUG(HW_Memory, "initialized OK");
CoreTiming::Init();
timing = std::make_unique<Timing>();
if (Settings::values.use_cpu_jit) {
#ifdef ARCHITECTURE_x86_64
cpu_core = std::make_unique<ARM_Dynarmic>(USER32MODE);
cpu_core = std::make_unique<ARM_Dynarmic>(*this, USER32MODE);
#else
cpu_core = std::make_unique<ARM_DynCom>(USER32MODE);
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
@ -191,13 +193,12 @@ System::ResultStatus System::Init(EmuWindow& emu_window, u32 system_mode) {
rpc_server = std::make_unique<RPC::RPCServer>();
#endif
service_manager = std::make_shared<Service::SM::ServiceManager>();
shared_page_handler = std::make_shared<SharedPage::Handler>();
archive_manager = std::make_unique<Service::FS::ArchiveManager>();
service_manager = std::make_shared<Service::SM::ServiceManager>(*this);
archive_manager = std::make_unique<Service::FS::ArchiveManager>(*this);
HW::Init();
Kernel::Init(system_mode);
Service::Init(*this, service_manager);
kernel = std::make_unique<Kernel::KernelSystem>(system_mode);
Service::Init(*this);
GDBStub::Init();
ResultStatus result = VideoCore::Init(emu_window);
@ -230,6 +231,22 @@ const Service::FS::ArchiveManager& System::ArchiveManager() const {
return *archive_manager;
}
Kernel::KernelSystem& System::Kernel() {
return *kernel;
}
const Kernel::KernelSystem& System::Kernel() const {
return *kernel;
}
Timing& System::CoreTiming() {
return *timing;
}
const Timing& System::CoreTiming() const {
return *timing;
}
void System::RegisterSoftwareKeyboard(std::shared_ptr<Frontend::SoftwareKeyboard> swkbd) {
registered_swkbd = std::move(swkbd);
}
@ -247,8 +264,7 @@ void System::Shutdown() {
// Shutdown emulation session
GDBStub::Shutdown();
VideoCore::Shutdown();
Service::Shutdown();
Kernel::Shutdown();
kernel.reset();
HW::Shutdown();
telemetry_session.reset();
#ifdef ENABLE_SCRIPTING
@ -257,7 +273,7 @@ void System::Shutdown() {
service_manager.reset();
dsp_core.reset();
cpu_core.reset();
CoreTiming::Shutdown();
timing.reset();
app_loader.reset();
if (auto room_member = Network::GetRoomMember().lock()) {

View file

@ -8,7 +8,6 @@
#include <string>
#include "common/common_types.h"
#include "core/frontend/applets/swkbd.h"
#include "core/hle/shared_page.h"
#include "core/loader/loader.h"
#include "core/memory.h"
#include "core/perf_stats.h"
@ -36,8 +35,14 @@ class ArchiveManager;
}
} // namespace Service
namespace Kernel {
class KernelSystem;
}
namespace Core {
class Timing;
class System {
public:
/**
@ -167,6 +172,18 @@ public:
/// Gets a const reference to the archive manager
const Service::FS::ArchiveManager& ArchiveManager() const;
/// Gets a reference to the kernel
Kernel::KernelSystem& Kernel();
/// Gets a const reference to the kernel
const Kernel::KernelSystem& Kernel() const;
/// Gets a reference to the timing system
Timing& CoreTiming();
/// Gets a const reference to the timing system
const Timing& CoreTiming() const;
PerfStats perf_stats;
FrameLimiter frame_limiter;
@ -193,10 +210,6 @@ public:
return registered_swkbd;
}
std::shared_ptr<SharedPage::Handler> GetSharedPageHandler() const {
return shared_page_handler;
}
private:
/**
* Initialize the emulated system.
@ -236,11 +249,14 @@ private:
std::unique_ptr<RPC::RPCServer> rpc_server;
#endif
/// Shared Page
std::shared_ptr<SharedPage::Handler> shared_page_handler;
std::unique_ptr<Service::FS::ArchiveManager> archive_manager;
public: // HACK: this is temporary exposed for tests,
// due to WIP kernel refactor causing desync state in memory
std::unique_ptr<Kernel::KernelSystem> kernel;
std::unique_ptr<Timing> timing;
private:
static System s_instance;
ResultStatus status = ResultStatus::Success;

View file

@ -2,75 +2,25 @@
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "core/core_timing.h"
#include <algorithm>
#include <cinttypes>
#include <mutex>
#include <string>
#include <tuple>
#include <unordered_map>
#include <vector>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/thread.h"
#include "common/threadsafe_queue.h"
#include "core/core_timing.h"
namespace CoreTiming {
static s64 global_timer;
static s64 slice_length;
static s64 downcount;
struct EventType {
TimedCallback callback;
const std::string* name;
};
struct Event {
s64 time;
u64 fifo_order;
u64 userdata;
const EventType* type;
};
namespace Core {
// Sort by time, unless the times are the same, in which case sort by the order added to the queue
static bool operator>(const Event& left, const Event& right) {
return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
bool Timing::Event::operator>(const Event& right) const {
return std::tie(time, fifo_order) > std::tie(right.time, right.fifo_order);
}
static bool operator<(const Event& left, const Event& right) {
return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
bool Timing::Event::operator<(const Event& right) const {
return std::tie(time, fifo_order) < std::tie(right.time, right.fifo_order);
}
// unordered_map stores each element separately as a linked list node so pointers to elements
// remain stable regardless of rehashes/resizing.
static std::unordered_map<std::string, EventType> event_types;
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated
// by the standard adaptor class.
static std::vector<Event> event_queue;
static u64 event_fifo_id;
// the queue for storing the events from other threads threadsafe until they will be added
// to the event_queue by the emu thread
static Common::MPSCQueue<Event, false> ts_queue;
static constexpr int MAX_SLICE_LENGTH = 20000;
static s64 idled_cycles;
// Are we in a function that has been called from Advance()
// If events are sheduled from a function that gets called from Advance(),
// don't change slice_length and downcount.
static bool is_global_timer_sane;
static EventType* ev_lost = nullptr;
static void EmptyTimedCallback(u64 userdata, s64 cyclesLate) {}
EventType* RegisterEvent(const std::string& name, TimedCallback callback) {
TimingEventType* Timing::RegisterEvent(const std::string& name, TimedCallback callback) {
// check for existing type with same name.
// we want event type names to remain unique so that we can use them for serialization.
ASSERT_MSG(event_types.find(name) == event_types.end(),
@ -78,42 +28,17 @@ EventType* RegisterEvent(const std::string& name, TimedCallback callback) {
"during Init to avoid breaking save states.",
name);
auto info = event_types.emplace(name, EventType{callback, nullptr});
EventType* event_type = &info.first->second;
auto info = event_types.emplace(name, TimingEventType{callback, nullptr});
TimingEventType* event_type = &info.first->second;
event_type->name = &info.first->first;
return event_type;
}
void UnregisterAllEvents() {
ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending");
event_types.clear();
}
void Init() {
downcount = MAX_SLICE_LENGTH;
slice_length = MAX_SLICE_LENGTH;
global_timer = 0;
idled_cycles = 0;
// The time between CoreTiming being intialized and the first call to Advance() is considered
// the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
// executing the first cycle of each slice to prepare the slice length and downcount for
// that slice.
is_global_timer_sane = true;
event_fifo_id = 0;
ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback);
}
void Shutdown() {
Timing::~Timing() {
MoveEvents();
ClearPendingEvents();
UnregisterAllEvents();
}
// This should only be called from the CPU thread. If you are calling
// it from any other thread, you are doing something evil
u64 GetTicks() {
u64 Timing::GetTicks() const {
u64 ticks = static_cast<u64>(global_timer);
if (!is_global_timer_sane) {
ticks += slice_length - downcount;
@ -121,19 +46,16 @@ u64 GetTicks() {
return ticks;
}
void AddTicks(u64 ticks) {
void Timing::AddTicks(u64 ticks) {
downcount -= ticks;
}
u64 GetIdleTicks() {
u64 Timing::GetIdleTicks() const {
return static_cast<u64>(idled_cycles);
}
void ClearPendingEvents() {
event_queue.clear();
}
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
void Timing::ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type,
u64 userdata) {
ASSERT(event_type != nullptr);
s64 timeout = GetTicks() + cycles_into_future;
@ -145,11 +67,12 @@ void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 user
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
void Timing::ScheduleEventThreadsafe(s64 cycles_into_future, const TimingEventType* event_type,
u64 userdata) {
ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
}
void UnscheduleEvent(const EventType* event_type, u64 userdata) {
void Timing::UnscheduleEvent(const TimingEventType* event_type, u64 userdata) {
auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type == event_type && e.userdata == userdata;
});
@ -161,7 +84,7 @@ void UnscheduleEvent(const EventType* event_type, u64 userdata) {
}
}
void RemoveEvent(const EventType* event_type) {
void Timing::RemoveEvent(const TimingEventType* event_type) {
auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
[&](const Event& e) { return e.type == event_type; });
@ -172,12 +95,12 @@ void RemoveEvent(const EventType* event_type) {
}
}
void RemoveNormalAndThreadsafeEvent(const EventType* event_type) {
void Timing::RemoveNormalAndThreadsafeEvent(const TimingEventType* event_type) {
MoveEvents();
RemoveEvent(event_type);
}
void ForceExceptionCheck(s64 cycles) {
void Timing::ForceExceptionCheck(s64 cycles) {
cycles = std::max<s64>(0, cycles);
if (downcount > cycles) {
slice_length -= downcount - cycles;
@ -185,7 +108,7 @@ void ForceExceptionCheck(s64 cycles) {
}
}
void MoveEvents() {
void Timing::MoveEvents() {
for (Event ev; ts_queue.Pop(ev);) {
ev.fifo_order = event_fifo_id++;
event_queue.emplace_back(std::move(ev));
@ -193,7 +116,7 @@ void MoveEvents() {
}
}
void Advance() {
void Timing::Advance() {
MoveEvents();
s64 cycles_executed = slice_length - downcount;
@ -220,17 +143,17 @@ void Advance() {
downcount = slice_length;
}
void Idle() {
void Timing::Idle() {
idled_cycles += downcount;
downcount = 0;
}
std::chrono::microseconds GetGlobalTimeUs() {
std::chrono::microseconds Timing::GetGlobalTimeUs() const {
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE_ARM11};
}
s64 GetDowncount() {
s64 Timing::GetDowncount() const {
return downcount;
}
} // namespace CoreTiming
} // namespace Core

View file

@ -21,8 +21,11 @@
#include <functional>
#include <limits>
#include <string>
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/threadsafe_queue.h"
// The timing we get from the assembly is 268,111,855.956 Hz
// It is possible that this number isn't just an integer because the compiler could have
@ -120,73 +123,112 @@ inline u64 cyclesToMs(s64 cycles) {
return cycles * 1000 / BASE_CLOCK_RATE_ARM11;
}
namespace CoreTiming {
struct EventType;
namespace Core {
using TimedCallback = std::function<void(u64 userdata, int cycles_late)>;
/**
* CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
* required to end slice -1 and start slice 0 before the first cycle of code is executed.
*/
void Init();
void Shutdown();
struct TimingEventType {
TimedCallback callback;
const std::string* name;
};
/**
* This should only be called from the emu thread, if you are calling it any other thread, you are
* doing something evil
*/
u64 GetTicks();
u64 GetIdleTicks();
void AddTicks(u64 ticks);
class Timing {
public:
~Timing();
/**
* Returns the event_type identifier. if name is not unique, it will assert.
*/
EventType* RegisterEvent(const std::string& name, TimedCallback callback);
void UnregisterAllEvents();
/**
* This should only be called from the emu thread, if you are calling it any other thread, you
* are doing something evil
*/
u64 GetTicks() const;
u64 GetIdleTicks() const;
void AddTicks(u64 ticks);
/**
* After the first Advance, the slice lengths and the downcount will be reduced whenever an event
* is scheduled earlier than the current values.
* Scheduling from a callback will not update the downcount until the Advance() completes.
*/
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0);
/**
* Returns the event_type identifier. if name is not unique, it will assert.
*/
TimingEventType* RegisterEvent(const std::string& name, TimedCallback callback);
/**
* This is to be called when outside of hle threads, such as the graphics thread, wants to
* schedule things to be executed on the main thread.
* Not that this doesn't change slice_length and thus events scheduled by this might be called
* with a delay of up to MAX_SLICE_LENGTH
*/
void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, u64 userdata);
/**
* After the first Advance, the slice lengths and the downcount will be reduced whenever an
* event is scheduled earlier than the current values. Scheduling from a callback will not
* update the downcount until the Advance() completes.
*/
void ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata = 0);
void UnscheduleEvent(const EventType* event_type, u64 userdata);
/**
* This is to be called when outside of hle threads, such as the graphics thread, wants to
* schedule things to be executed on the main thread.
* Not that this doesn't change slice_length and thus events scheduled by this might be called
* with a delay of up to MAX_SLICE_LENGTH
*/
void ScheduleEventThreadsafe(s64 cycles_into_future, const TimingEventType* event_type,
u64 userdata);
/// We only permit one event of each type in the queue at a time.
void RemoveEvent(const EventType* event_type);
void RemoveNormalAndThreadsafeEvent(const EventType* event_type);
void UnscheduleEvent(const TimingEventType* event_type, u64 userdata);
/** Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends
* the previous timing slice and begins the next one, you must Advance from the previous
* slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
* Advance() is required to initialize the slice length before the first cycle of emulated
* instructions is executed.
*/
void Advance();
void MoveEvents();
/// We only permit one event of each type in the queue at a time.
void RemoveEvent(const TimingEventType* event_type);
void RemoveNormalAndThreadsafeEvent(const TimingEventType* event_type);
/// Pretend that the main CPU has executed enough cycles to reach the next event.
void Idle();
/** Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends
* the previous timing slice and begins the next one, you must Advance from the previous
* slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
* Advance() is required to initialize the slice length before the first cycle of emulated
* instructions is executed.
*/
void Advance();
void MoveEvents();
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
/// Pretend that the main CPU has executed enough cycles to reach the next event.
void Idle();
void ForceExceptionCheck(s64 cycles);
void ForceExceptionCheck(s64 cycles);
std::chrono::microseconds GetGlobalTimeUs();
std::chrono::microseconds GetGlobalTimeUs() const;
s64 GetDowncount();
s64 GetDowncount() const;
} // namespace CoreTiming
private:
struct Event {
s64 time;
u64 fifo_order;
u64 userdata;
const TimingEventType* type;
bool operator>(const Event& right) const;
bool operator<(const Event& right) const;
};
static constexpr int MAX_SLICE_LENGTH = 20000;
s64 global_timer = 0;
s64 slice_length = MAX_SLICE_LENGTH;
s64 downcount = MAX_SLICE_LENGTH;
// unordered_map stores each element separately as a linked list node so pointers to
// elements remain stable regardless of rehashes/resizing.
std::unordered_map<std::string, TimingEventType> event_types;
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
// accomodated by the standard adaptor class.
std::vector<Event> event_queue;
u64 event_fifo_id = 0;
// the queue for storing the events from other threads threadsafe until they will be added
// to the event_queue by the emu thread
Common::MPSCQueue<Event, false> ts_queue;
s64 idled_cycles = 0;
// Are we in a function that has been called from Advance()
// If events are sheduled from a function that gets called from Advance(),
// don't change slice_length and downcount.
// The time between CoreTiming being intialized and the first call to Advance() is considered
// the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
// executing the first cycle of each slice to prepare the slice length and downcount for
// that slice.
bool is_global_timer_sane = true;
};
} // namespace Core

View file

@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <utility>
#include "core/core.h"
#include "core/file_sys/archive_savedata.h"
#include "core/hle/kernel/process.h"
@ -16,16 +17,19 @@ ArchiveFactory_SaveData::ArchiveFactory_SaveData(
: sd_savedata_source(std::move(sd_savedata)) {}
ResultVal<std::unique_ptr<ArchiveBackend>> ArchiveFactory_SaveData::Open(const Path& path) {
return sd_savedata_source->Open(Kernel::g_current_process->codeset->program_id);
return sd_savedata_source->Open(
Core::System::GetInstance().Kernel().GetCurrentProcess()->codeset->program_id);
}
ResultCode ArchiveFactory_SaveData::Format(const Path& path,
const FileSys::ArchiveFormatInfo& format_info) {
return sd_savedata_source->Format(Kernel::g_current_process->codeset->program_id, format_info);
return sd_savedata_source->Format(
Core::System::GetInstance().Kernel().GetCurrentProcess()->codeset->program_id, format_info);
}
ResultVal<ArchiveFormatInfo> ArchiveFactory_SaveData::GetFormatInfo(const Path& path) const {
return sd_savedata_source->GetFormatInfo(Kernel::g_current_process->codeset->program_id);
return sd_savedata_source->GetFormatInfo(
Core::System::GetInstance().Kernel().GetCurrentProcess()->codeset->program_id);
}
} // namespace FileSys

View file

@ -7,6 +7,7 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/swap.h"
#include "core/core.h"
#include "core/file_sys/archive_selfncch.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/ivfc_archive.h"
@ -279,7 +280,7 @@ void ArchiveFactory_SelfNCCH::Register(Loader::AppLoader& app_loader) {
ResultVal<std::unique_ptr<ArchiveBackend>> ArchiveFactory_SelfNCCH::Open(const Path& path) {
auto archive = std::make_unique<SelfNCCHArchive>(
ncch_data[Kernel::g_current_process->codeset->program_id]);
ncch_data[Core::System::GetInstance().Kernel().GetCurrentProcess()->codeset->program_id]);
return MakeResult<std::unique_ptr<ArchiveBackend>>(std::move(archive));
}

View file

@ -0,0 +1,22 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "core/file_sys/delay_generator.h"
namespace FileSys {
DelayGenerator::~DelayGenerator() = default;
u64 DefaultDelayGenerator::GetReadDelayNs(std::size_t length) {
// This is the delay measured for a romfs read.
// For now we will take that as a default
static constexpr u64 slope(94);
static constexpr u64 offset(582778);
static constexpr u64 minimum(663124);
u64 IPCDelayNanoseconds = std::max<u64>(static_cast<u64>(length) * slope + offset, minimum);
return IPCDelayNanoseconds;
}
} // namespace FileSys

View file

@ -4,10 +4,14 @@
#pragma once
#include <cstddef>
#include "common/common_types.h"
namespace FileSys {
class DelayGenerator {
public:
virtual ~DelayGenerator();
virtual u64 GetReadDelayNs(std::size_t length) = 0;
// TODO (B3N30): Add getter for all other file/directory io operations
@ -15,15 +19,7 @@ public:
class DefaultDelayGenerator : public DelayGenerator {
public:
u64 GetReadDelayNs(std::size_t length) override {
// This is the delay measured for a romfs read.
// For now we will take that as a default
static constexpr u64 slope(94);
static constexpr u64 offset(582778);
static constexpr u64 minimum(663124);
u64 IPCDelayNanoseconds = std::max<u64>(static_cast<u64>(length) * slope + offset, minimum);
return IPCDelayNanoseconds;
}
u64 GetReadDelayNs(std::size_t length) override;
};
} // namespace FileSys

View file

@ -226,7 +226,9 @@ Loader::ResultStatus NCCHContainer::Load() {
std::memcpy(input.data(), key_y_primary.data(), key_y_primary.size());
std::memcpy(input.data() + key_y_primary.size(), seed.data(), seed.size());
CryptoPP::SHA256 sha;
sha.CalculateDigest(key_y_secondary.data(), input.data(), input.size());
std::array<u8, CryptoPP::SHA256::DIGESTSIZE> hash;
sha.CalculateDigest(hash.data(), input.data(), input.size());
std::memcpy(key_y_secondary.data(), hash.data(), key_y_secondary.size());
}
}

View file

@ -34,7 +34,7 @@ bool SeedDB::Load() {
LOG_ERROR(Service_FS, "Failed to read seed database count fully");
return false;
}
if (!file.Seek(file.Tell() + SEEDDB_PADDING_BYTES, SEEK_SET)) {
if (!file.Seek(SEEDDB_PADDING_BYTES, SEEK_CUR)) {
LOG_ERROR(Service_FS, "Failed to skip seed database padding");
return false;
}

View file

@ -160,7 +160,7 @@ BreakpointMap breakpoints_write;
} // Anonymous namespace
static Kernel::Thread* FindThreadById(int id) {
const auto& threads = Kernel::GetThreadList();
const auto& threads = Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
for (auto& thread : threads) {
if (thread->GetThreadId() == static_cast<u32>(id)) {
return thread.get();
@ -535,7 +535,8 @@ static void HandleQuery() {
SendReply(target_xml);
} else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) {
std::string val = "m";
const auto& threads = Kernel::GetThreadList();
const auto& threads =
Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
for (const auto& thread : threads) {
val += fmt::format("{:x},", thread->GetThreadId());
}
@ -547,7 +548,8 @@ static void HandleQuery() {
std::string buffer;
buffer += "l<?xml version=\"1.0\"?>";
buffer += "<threads>";
const auto& threads = Kernel::GetThreadList();
const auto& threads =
Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
for (const auto& thread : threads) {
buffer += fmt::format(R"*(<thread id="{:x}" name="Thread {:x}"></thread>)*",
thread->GetThreadId(), thread->GetThreadId());

View file

@ -8,6 +8,7 @@
#include <unordered_map>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/applets/applet.h"
#include "core/hle/applets/erreula.h"
@ -38,7 +39,7 @@ namespace Applets {
static std::unordered_map<Service::APT::AppletId, std::shared_ptr<Applet>> applets;
/// The CoreTiming event identifier for the Applet update callback.
static CoreTiming::EventType* applet_update_event = nullptr;
static Core::TimingEventType* applet_update_event = nullptr;
/// The interval at which the Applet update callback will be called, 16.6ms
static const u64 applet_update_interval_us = 16666;
@ -88,8 +89,8 @@ static void AppletUpdateEvent(u64 applet_id, s64 cycles_late) {
// If the applet is still running after the last update, reschedule the event
if (applet->IsRunning()) {
CoreTiming::ScheduleEvent(usToCycles(applet_update_interval_us) - cycles_late,
applet_update_event, applet_id);
Core::System::GetInstance().CoreTiming().ScheduleEvent(
usToCycles(applet_update_interval_us) - cycles_late, applet_update_event, applet_id);
} else {
// Otherwise the applet has terminated, in which case we should clean it up
applets[id] = nullptr;
@ -101,8 +102,8 @@ ResultCode Applet::Start(const Service::APT::AppletStartupParameter& parameter)
if (result.IsError())
return result;
// Schedule the update event
CoreTiming::ScheduleEvent(usToCycles(applet_update_interval_us), applet_update_event,
static_cast<u64>(id));
Core::System::GetInstance().CoreTiming().ScheduleEvent(
usToCycles(applet_update_interval_us), applet_update_event, static_cast<u64>(id));
return result;
}
@ -128,11 +129,12 @@ bool IsLibraryAppletRunning() {
void Init() {
// Register the applet update callback
applet_update_event = CoreTiming::RegisterEvent("HLE Applet Update Event", AppletUpdateEvent);
applet_update_event = Core::System::GetInstance().CoreTiming().RegisterEvent(
"HLE Applet Update Event", AppletUpdateEvent);
}
void Shutdown() {
CoreTiming::RemoveEvent(applet_update_event);
Core::System::GetInstance().CoreTiming().RemoveEvent(applet_update_event);
}
} // namespace Applets
} // namespace HLE

View file

@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/erreula.h"
#include "core/hle/service/apt/apt.h"
@ -27,11 +28,9 @@ ResultCode ErrEula::ReceiveParameter(const Service::APT::MessageParameter& param
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Allocate a heap block of the required size for this applet.
heap_memory = std::make_shared<std::vector<u8>>(capture_info.size);
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = Kernel::SharedMemory::CreateForApplet(
heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"ErrEula Memory");
// Send the response message with the newly created SharedMemory

View file

@ -7,6 +7,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/mii_selector.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
@ -34,11 +35,9 @@ ResultCode MiiSelector::ReceiveParameter(const Service::APT::MessageParameter& p
memcpy(&capture_info, parameter.buffer.data(), sizeof(capture_info));
using Kernel::MemoryPermission;
// Allocate a heap block of the required size for this applet.
heap_memory = std::make_shared<std::vector<u8>>(capture_info.size);
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = Kernel::SharedMemory::CreateForApplet(
heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"MiiSelector Memory");
// Send the response message with the newly created SharedMemory

View file

@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/mint.h"
#include "core/hle/service/apt/apt.h"
@ -27,11 +28,9 @@ ResultCode Mint::ReceiveParameter(const Service::APT::MessageParameter& paramete
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Allocate a heap block of the required size for this applet.
heap_memory = std::make_shared<std::vector<u8>>(capture_info.size);
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = Kernel::SharedMemory::CreateForApplet(
heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"Mint Memory");
// Send the response message with the newly created SharedMemory

View file

@ -8,6 +8,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/swkbd.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
@ -38,11 +39,9 @@ ResultCode SoftwareKeyboard::ReceiveParameter(Service::APT::MessageParameter con
memcpy(&capture_info, parameter.buffer.data(), sizeof(capture_info));
using Kernel::MemoryPermission;
// Allocate a heap block of the required size for this applet.
heap_memory = std::make_shared<std::vector<u8>>(capture_info.size);
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = Kernel::SharedMemory::CreateForApplet(
heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"SoftwareKeyboard Memory");
// Send the response message with the newly created SharedMemory

View file

@ -9,27 +9,6 @@
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
namespace Kernel {
/// Offset into command buffer of header
static const int kCommandHeaderOffset = 0x80;
/**
* Returns a pointer to the command buffer in the current thread's TLS
* TODO(Subv): This is not entirely correct, the command buffer should be copied from
* the thread's TLS to an intermediate buffer in kernel memory, and then copied again to
* the service handler process' memory.
* @param offset Optional offset into command buffer
* @param offset Optional offset into command buffer (in bytes)
* @return Pointer to command buffer
*/
inline u32* GetCommandBuffer(const int offset = 0) {
return (u32*)Memory::GetPointer(GetCurrentThread()->GetTLSAddress() + kCommandHeaderOffset +
offset);
}
} // namespace Kernel
namespace IPC {
/// Size of the command buffer area, in 32-bit words.

View file

@ -7,6 +7,7 @@
#include "common/logging/log.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
@ -64,11 +65,11 @@ SharedPtr<Thread> AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
return thread;
}
AddressArbiter::AddressArbiter() {}
AddressArbiter::AddressArbiter(KernelSystem& kernel) : Object(kernel) {}
AddressArbiter::~AddressArbiter() {}
SharedPtr<AddressArbiter> AddressArbiter::Create(std::string name) {
SharedPtr<AddressArbiter> address_arbiter(new AddressArbiter);
SharedPtr<AddressArbiter> KernelSystem::CreateAddressArbiter(std::string name) {
SharedPtr<AddressArbiter> address_arbiter(new AddressArbiter(*this));
address_arbiter->name = std::move(name);

View file

@ -31,14 +31,6 @@ enum class ArbitrationType : u32 {
class AddressArbiter final : public Object {
public:
/**
* Creates an address arbiter.
*
* @param name Optional name used for debugging.
* @returns The created AddressArbiter.
*/
static SharedPtr<AddressArbiter> Create(std::string name = "Unknown");
std::string GetTypeName() const override {
return "Arbiter";
}
@ -57,7 +49,7 @@ public:
s32 value, u64 nanoseconds);
private:
AddressArbiter();
explicit AddressArbiter(KernelSystem& kernel);
~AddressArbiter() override;
/// Puts the thread to wait on the specified arbitration address under this address arbiter.
@ -72,6 +64,8 @@ private:
/// Threads waiting for the address arbiter to be signaled.
std::vector<SharedPtr<Thread>> waiting_threads;
friend class KernelSystem;
};
} // namespace Kernel

View file

@ -13,7 +13,7 @@
namespace Kernel {
ClientPort::ClientPort() = default;
ClientPort::ClientPort(KernelSystem& kernel) : kernel(kernel), Object(kernel) {}
ClientPort::~ClientPort() = default;
ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() {
@ -26,7 +26,7 @@ ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() {
active_sessions++;
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
auto sessions = ServerSession::CreateSessionPair(server_port->GetName(), this);
auto sessions = kernel.CreateSessionPair(server_port->GetName(), this);
if (server_port->hle_handler)
server_port->hle_handler->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions));

View file

@ -48,13 +48,16 @@ public:
void ConnectionClosed();
private:
ClientPort();
explicit ClientPort(KernelSystem& kernel);
~ClientPort() override;
KernelSystem& kernel;
SharedPtr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions = 0; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
friend class KernelSystem;
};
} // namespace Kernel

View file

@ -13,7 +13,7 @@
namespace Kernel {
ClientSession::ClientSession() = default;
ClientSession::ClientSession(KernelSystem& kernel) : Object(kernel) {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.

View file

@ -12,13 +12,12 @@
namespace Kernel {
class ServerSession;
class Session;
class Thread;
class ClientSession final : public Object {
public:
friend class ServerSession;
friend class KernelSystem;
std::string GetTypeName() const override {
return "ClientSession";
@ -46,7 +45,7 @@ public:
std::shared_ptr<Session> parent;
private:
ClientSession();
explicit ClientSession(KernelSystem& kernel);
~ClientSession() override;
};

View file

@ -3,15 +3,13 @@
// Refer to the license.txt file included.
#include <cstring>
#include "core/hle/config_mem.h"
#include "core/hle/kernel/config_mem.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace ConfigMem {
ConfigMemDef config_mem;
void Init() {
Handler::Handler() {
std::memset(&config_mem, 0, sizeof(config_mem));
// Values extracted from firmware 11.2.0-35E
@ -28,4 +26,8 @@ void Init() {
config_mem.firm_ctr_sdk_ver = 0x0000F297;
}
ConfigMemDef& Handler::GetConfigMem() {
return config_mem;
}
} // namespace ConfigMem

View file

@ -49,8 +49,13 @@ struct ConfigMemDef {
static_assert(sizeof(ConfigMemDef) == Memory::CONFIG_MEMORY_SIZE,
"Config Memory structure size is wrong");
extern ConfigMemDef config_mem;
class Handler {
public:
Handler();
ConfigMemDef& GetConfigMem();
void Init();
private:
ConfigMemDef config_mem;
};
} // namespace ConfigMem

View file

@ -67,6 +67,10 @@ constexpr ResultCode ERR_MISALIGNED_SIZE(ErrorDescription::MisalignedSize, Error
constexpr ResultCode ERR_OUT_OF_MEMORY(ErrorDescription::OutOfMemory, ErrorModule::Kernel,
ErrorSummary::OutOfResource,
ErrorLevel::Permanent); // 0xD86007F3
/// Returned when out of heap or linear heap memory when allocating
constexpr ResultCode ERR_OUT_OF_HEAP_MEMORY(ErrorDescription::OutOfMemory, ErrorModule::OS,
ErrorSummary::OutOfResource,
ErrorLevel::Status); // 0xC860180A
constexpr ResultCode ERR_NOT_IMPLEMENTED(ErrorDescription::NotImplemented, ErrorModule::OS,
ErrorSummary::InvalidArgument,
ErrorLevel::Usage); // 0xE0E01BF4

View file

@ -7,16 +7,16 @@
#include <vector>
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
Event::Event() {}
Event::Event(KernelSystem& kernel) : WaitObject(kernel) {}
Event::~Event() {}
SharedPtr<Event> Event::Create(ResetType reset_type, std::string name) {
SharedPtr<Event> evt(new Event);
SharedPtr<Event> KernelSystem::CreateEvent(ResetType reset_type, std::string name) {
SharedPtr<Event> evt(new Event(*this));
evt->signaled = false;
evt->reset_type = reset_type;

View file

@ -12,13 +12,6 @@ namespace Kernel {
class Event final : public WaitObject {
public:
/**
* Creates an event
* @param reset_type ResetType describing how to create event
* @param name Optional name of event
*/
static SharedPtr<Event> Create(ResetType reset_type, std::string name = "Unknown");
std::string GetTypeName() const override {
return "Event";
}
@ -47,13 +40,15 @@ public:
void Clear();
private:
Event();
explicit Event(KernelSystem& kernel);
~Event() override;
ResetType reset_type; ///< Current ResetType
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
friend class KernelSystem;
};
} // namespace Kernel

View file

@ -12,9 +12,7 @@
namespace Kernel {
HandleTable g_handle_table;
HandleTable::HandleTable() {
HandleTable::HandleTable(KernelSystem& kernel) : kernel(kernel) {
next_generation = 1;
Clear();
}
@ -74,9 +72,9 @@ bool HandleTable::IsValid(Handle handle) const {
SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
return GetCurrentThread();
return kernel.GetThreadManager().GetCurrentThread();
} else if (handle == CurrentProcess) {
return g_current_process;
return kernel.GetCurrentProcess();
}
if (!IsValid(handle)) {

View file

@ -42,7 +42,7 @@ enum KernelHandle : Handle {
*/
class HandleTable final : NonCopyable {
public:
HandleTable();
explicit HandleTable(KernelSystem& kernel);
/**
* Allocates a handle for the given object.
@ -119,8 +119,8 @@ private:
/// Head of the free slots linked list.
u16 next_free_slot;
KernelSystem& kernel;
};
extern HandleTable g_handle_table;
} // namespace Kernel

View file

@ -6,6 +6,7 @@
#include <vector>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
@ -49,13 +50,14 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buff;
Memory::ReadBlock(*process, thread->GetCommandBufferAddress(), cmd_buff.data(),
cmd_buff.size() * sizeof(u32));
context.WriteToOutgoingCommandBuffer(cmd_buff.data(), *process, Kernel::g_handle_table);
context.WriteToOutgoingCommandBuffer(cmd_buff.data(), *process);
// Copy the translated command buffer back into the thread's command buffer area.
Memory::WriteBlock(*process, thread->GetCommandBufferAddress(), cmd_buff.data(),
cmd_buff.size() * sizeof(u32));
};
auto event = Kernel::Event::Create(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
auto event = Core::System::GetInstance().Kernel().CreateEvent(Kernel::ResetType::OneShot,
"HLE Pause Event: " + reason);
thread->status = ThreadStatus::WaitHleEvent;
thread->wait_objects = {event};
event->AddWaitingThread(thread);
@ -96,8 +98,7 @@ void HLERequestContext::AddStaticBuffer(u8 buffer_id, std::vector<u8> data) {
}
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
Process& src_process,
HandleTable& src_table) {
Process& src_process) {
IPC::Header header{src_cmdbuf[0]};
std::size_t untranslated_size = 1u + header.normal_params_size;
@ -120,10 +121,10 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* sr
Handle handle = src_cmdbuf[i];
SharedPtr<Object> object = nullptr;
if (handle != 0) {
object = src_table.GetGeneric(handle);
object = src_process.handle_table.GetGeneric(handle);
ASSERT(object != nullptr); // TODO(yuriks): Return error
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_table.Close(handle);
src_process.handle_table.Close(handle);
}
}
@ -161,8 +162,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* sr
return RESULT_SUCCESS;
}
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process,
HandleTable& dst_table) const {
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf,
Process& dst_process) const {
IPC::Header header{cmd_buf[0]};
std::size_t untranslated_size = 1u + header.normal_params_size;
@ -187,7 +188,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, P
Handle handle = 0;
if (object != nullptr) {
// TODO(yuriks): Figure out the proper error handling for if this fails
handle = dst_table.Create(object).Unwrap();
handle = dst_process.handle_table.Create(object).Unwrap();
}
dst_cmdbuf[i++] = handle;
}

View file

@ -124,8 +124,7 @@ private:
/**
* Class containing information about an in-flight IPC request being handled by an HLE service
* implementation. Services should avoid using old global APIs (e.g. Kernel::GetCommandBuffer()) and
* when possible use the APIs in this class to service the request.
* implementation.
*
* HLE handle protocol
* ===================
@ -226,11 +225,9 @@ public:
MappedBuffer& GetMappedBuffer(u32 id_from_cmdbuf);
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, Process& src_process,
HandleTable& src_table);
ResultCode PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, Process& src_process);
/// Writes data from this context back to the requesting process/thread.
ResultCode WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process,
HandleTable& dst_table) const;
ResultCode WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process) const;
private:
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;

View file

@ -14,6 +14,68 @@
namespace Kernel {
void ScanForAndUnmapBuffer(std::array<u32, IPC::COMMAND_BUFFER_LENGTH>& dst_cmd_buf,
const std::size_t dst_command_size, std::size_t& target_index,
SharedPtr<Process> src_process, SharedPtr<Process> dst_process,
const VAddr source_address, const VAddr page_start, const u32 num_pages,
const u32 size, const IPC::MappedBufferPermissions permissions) {
while (target_index < dst_command_size) {
u32 desc = dst_cmd_buf[target_index++];
if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::CopyHandle ||
IPC::GetDescriptorType(desc) == IPC::DescriptorType::MoveHandle) {
u32 num_handles = IPC::HandleNumberFromDesc(desc);
for (u32 j = 0; j < num_handles; ++j) {
target_index += 1;
}
continue;
}
if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::CallingPid ||
IPC::GetDescriptorType(desc) == IPC::DescriptorType::StaticBuffer) {
target_index += 1;
continue;
}
if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) {
VAddr dest_address = dst_cmd_buf[target_index];
IPC::MappedBufferDescInfo dest_descInfo{desc};
u32 dest_size = static_cast<u32>(dest_descInfo.size);
IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms;
if (dest_size == 0) {
target_index += 1;
continue;
}
ASSERT(permissions == dest_permissions && size == dest_size);
// Readonly buffers do not need to be copied over to the target
// process again because they were (presumably) not modified. This
// behavior is consistent with the real kernel.
if (permissions != IPC::MappedBufferPermissions::R) {
// Copy the modified buffer back into the target process
Memory::CopyBlock(*src_process, *dst_process, source_address, dest_address, size);
}
VAddr prev_reserve = page_start - Memory::PAGE_SIZE;
VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE;
auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second;
auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second;
ASSERT(prev_vma.meminfo_state == MemoryState::Reserved &&
next_vma.meminfo_state == MemoryState::Reserved);
// Unmap the buffer and guard pages from the source process
ResultCode result = src_process->vm_manager.UnmapRange(
page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE);
ASSERT(result == RESULT_SUCCESS);
target_index += 1;
break;
}
}
}
ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread> dst_thread,
VAddr src_address, VAddr dst_address, bool reply) {
@ -33,6 +95,18 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32));
// Create a copy of the target's command buffer
IPC::Header dst_header;
Memory::ReadBlock(*dst_process, dst_address, &dst_header.raw, sizeof(dst_header.raw));
std::size_t dst_untranslated_size = 1u + dst_header.normal_params_size;
std::size_t dst_command_size = dst_untranslated_size + dst_header.translate_params_size;
std::size_t target_index = dst_untranslated_size;
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmd_buf;
Memory::ReadBlock(*dst_process, dst_address, dst_cmd_buf.data(),
dst_command_size * sizeof(u32));
std::size_t i = untranslated_size;
while (i < command_size) {
u32 descriptor = cmd_buf[i];
@ -60,9 +134,9 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread
} else if (handle == CurrentProcess) {
object = src_process;
} else if (handle != 0) {
object = g_handle_table.GetGeneric(handle);
object = src_process->handle_table.GetGeneric(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
g_handle_table.Close(handle);
src_process->handle_table.Close(handle);
}
}
@ -73,7 +147,7 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread
continue;
}
auto result = g_handle_table.Create(std::move(object));
auto result = dst_process->handle_table.Create(std::move(object));
cmd_buf[i++] = result.ValueOr(0);
}
break;
@ -128,76 +202,50 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread
u32 num_pages =
Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS;
// Skip when the size is zero and num_pages == 0
if (size == 0) {
cmd_buf[i++] = 0;
break;
}
ASSERT(num_pages >= 1);
if (reply) {
// TODO(Subv): Scan the target's command buffer to make sure that there was a
// MappedBuffer descriptor in the original request. The real kernel panics if you
// try to reply with an unsolicited MappedBuffer.
// Scan the target's command buffer for the matching mapped buffer.
// The real kernel panics if you try to reply with an unsolicited MappedBuffer.
ScanForAndUnmapBuffer(dst_cmd_buf, dst_command_size, target_index, src_process,
dst_process, source_address, page_start, num_pages, size,
permissions);
// Unmap the buffers. Readonly buffers do not need to be copied over to the target
// process again because they were (presumably) not modified. This behavior is
// consistent with the real kernel.
if (permissions == IPC::MappedBufferPermissions::R) {
ResultCode result = src_process->vm_manager.UnmapRange(
page_start, num_pages * Memory::PAGE_SIZE);
ASSERT(result == RESULT_SUCCESS);
}
ASSERT_MSG(permissions == IPC::MappedBufferPermissions::R,
"Unmapping Write MappedBuffers is unimplemented");
i += 1;
break;
}
VAddr target_address = 0;
auto IsPageAligned = [](VAddr address) -> bool {
return (address & Memory::PAGE_MASK) == 0;
};
// TODO(Subv): Support more than 1 page and aligned page mappings
ASSERT_MSG(
num_pages == 1 &&
(!IsPageAligned(source_address) || !IsPageAligned(source_address + size)),
"MappedBuffers of more than one page or aligned transfers are not implemented");
// TODO(Subv): Perform permission checks.
// TODO(Subv): Leave a page of Reserved memory before the first page and after the last
// page.
// Reserve a page of memory before the mapped buffer
auto reserve_buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
dst_process->vm_manager.MapMemoryBlockToBase(
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0,
static_cast<u32>(reserve_buffer->size()), Kernel::MemoryState::Reserved);
if (!IsPageAligned(source_address) ||
(num_pages == 1 && !IsPageAligned(source_address + size))) {
// If the address of the source buffer is not page-aligned or if the buffer doesn't
// fill an entire page, then we have to allocate a page of memory in the target
// process and copy over the data from the input buffer. This allocated buffer will
// be copied back to the source process and deallocated when the server replies to
// the request via ReplyAndReceive.
auto buffer = std::make_shared<std::vector<u8>>(num_pages * Memory::PAGE_SIZE);
Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size);
auto buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
// Number of bytes until the next page.
std::size_t difference_to_page =
Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address;
// If the data fits in one page we can just copy the required size instead of the
// entire page.
std::size_t read_size =
num_pages == 1 ? static_cast<std::size_t>(size) : difference_to_page;
Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset,
read_size);
// Map the page into the target process' address space.
target_address =
dst_process->vm_manager
.MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
buffer, 0, static_cast<u32>(buffer->size()),
Kernel::MemoryState::Shared)
.Unwrap();
}
// Map the page(s) into the target process' address space.
target_address = dst_process->vm_manager
.MapMemoryBlockToBase(
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, buffer, 0,
static_cast<u32>(buffer->size()), Kernel::MemoryState::Shared)
.Unwrap();
cmd_buf[i++] = target_address + page_offset;
// Reserve a page of memory after the mapped buffer
dst_process->vm_manager.MapMemoryBlockToBase(
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0,
static_cast<u32>(reserve_buffer->size()), Kernel::MemoryState::Reserved);
break;
}
default:

View file

@ -2,46 +2,77 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/config_mem.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
#include "core/hle/shared_page.h"
namespace Kernel {
std::atomic<u32> Object::next_object_id{0};
/// Initialize the kernel
void Init(u32 system_mode) {
ConfigMem::Init();
KernelSystem::KernelSystem(u32 system_mode) {
MemoryInit(system_mode);
Kernel::MemoryInit(system_mode);
Kernel::ResourceLimitsInit();
Kernel::ThreadingInit();
Kernel::TimersInit();
Object::next_object_id = 0;
// TODO(Subv): Start the process ids from 10 for now, as lower PIDs are
// reserved for low-level services
Process::next_process_id = 10;
resource_limits = std::make_unique<ResourceLimitList>(*this);
thread_manager = std::make_unique<ThreadManager>();
timer_manager = std::make_unique<TimerManager>();
}
/// Shutdown the kernel
void Shutdown() {
g_handle_table.Clear(); // Free all kernel objects
KernelSystem::~KernelSystem() = default;
Kernel::ThreadingShutdown();
g_current_process = nullptr;
ResourceLimitList& KernelSystem::ResourceLimit() {
return *resource_limits;
}
Kernel::TimersShutdown();
Kernel::ResourceLimitsShutdown();
Kernel::MemoryShutdown();
const ResourceLimitList& KernelSystem::ResourceLimit() const {
return *resource_limits;
}
u32 KernelSystem::GenerateObjectID() {
return next_object_id++;
}
SharedPtr<Process> KernelSystem::GetCurrentProcess() const {
return current_process;
}
void KernelSystem::SetCurrentProcess(SharedPtr<Process> process) {
current_process = std::move(process);
}
ThreadManager& KernelSystem::GetThreadManager() {
return *thread_manager;
}
const ThreadManager& KernelSystem::GetThreadManager() const {
return *thread_manager;
}
TimerManager& KernelSystem::GetTimerManager() {
return *timer_manager;
}
const TimerManager& KernelSystem::GetTimerManager() const {
return *timer_manager;
}
SharedPage::Handler& KernelSystem::GetSharedPageHandler() {
return *shared_page_handler;
}
const SharedPage::Handler& KernelSystem::GetSharedPageHandler() const {
return *shared_page_handler;
}
void KernelSystem::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
named_ports.emplace(std::move(name), std::move(port));
}
} // namespace Kernel

View file

@ -4,14 +4,250 @@
#pragma once
#include <array>
#include <atomic>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/result.h"
namespace ConfigMem {
class Handler;
}
namespace SharedPage {
class Handler;
}
namespace Kernel {
/// Initialize the kernel with the specified system mode.
void Init(u32 system_mode);
class AddressArbiter;
class Event;
class Mutex;
class CodeSet;
class Process;
class Thread;
class Semaphore;
class Timer;
class ClientPort;
class ServerPort;
class ClientSession;
class ServerSession;
class ResourceLimitList;
class SharedMemory;
class ThreadManager;
class TimerManager;
class VMManager;
/// Shutdown the kernel
void Shutdown();
enum class ResetType {
OneShot,
Sticky,
Pulse,
};
/// Permissions for mapped shared memory blocks
enum class MemoryPermission : u32 {
None = 0,
Read = (1u << 0),
Write = (1u << 1),
ReadWrite = (Read | Write),
Execute = (1u << 2),
ReadExecute = (Read | Execute),
WriteExecute = (Write | Execute),
ReadWriteExecute = (Read | Write | Execute),
DontCare = (1u << 28)
};
enum class MemoryRegion : u16 {
APPLICATION = 1,
SYSTEM = 2,
BASE = 3,
};
template <typename T>
using SharedPtr = boost::intrusive_ptr<T>;
class KernelSystem {
public:
explicit KernelSystem(u32 system_mode);
~KernelSystem();
/**
* Creates an address arbiter.
*
* @param name Optional name used for debugging.
* @returns The created AddressArbiter.
*/
SharedPtr<AddressArbiter> CreateAddressArbiter(std::string name = "Unknown");
/**
* Creates an event
* @param reset_type ResetType describing how to create event
* @param name Optional name of event
*/
SharedPtr<Event> CreateEvent(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a mutex.
* @param initial_locked Specifies if the mutex should be locked initially
* @param name Optional name of mutex
* @return Pointer to new Mutex object
*/
SharedPtr<Mutex> CreateMutex(bool initial_locked, std::string name = "Unknown");
SharedPtr<CodeSet> CreateCodeSet(std::string name, u64 program_id);
SharedPtr<Process> CreateProcess(SharedPtr<CodeSet> code_set);
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread
* @return A shared pointer to the newly created thread
*/
ResultVal<SharedPtr<Thread>> CreateThread(std::string name, VAddr entry_point, u32 priority,
u32 arg, s32 processor_id, VAddr stack_top,
Process& owner_process);
/**
* Creates a semaphore.
* @param initial_count Number of slots reserved for other threads
* @param max_count Maximum number of slots the semaphore can have
* @param name Optional name of semaphore
* @return The created semaphore
*/
ResultVal<SharedPtr<Semaphore>> CreateSemaphore(s32 initial_count, s32 max_count,
std::string name = "Unknown");
/**
* Creates a timer
* @param reset_type ResetType describing how to create the timer
* @param name Optional name of timer
* @return The created Timer
*/
SharedPtr<Timer> CreateTimer(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
* @param max_sessions Maximum number of sessions to the port
* @param name Optional name of the ports
* @return The created port tuple
*/
std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> CreatePortPair(
u32 max_sessions, std::string name = "UnknownPort");
/**
* Creates a pair of ServerSession and an associated ClientSession.
* @param name Optional name of the ports.
* @param client_port Optional The ClientPort that spawned this session.
* @return The created session tuple
*/
std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>> CreateSessionPair(
const std::string& name = "Unknown", SharedPtr<ClientPort> client_port = nullptr);
ResourceLimitList& ResourceLimit();
const ResourceLimitList& ResourceLimit() const;
/**
* Creates a shared memory object.
* @param owner_process Process that created this shared memory object.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param address The address from which to map the Shared Memory.
* @param region If the address is 0, the shared memory will be allocated in this region of the
* linear heap.
* @param name Optional object name, used for debugging purposes.
*/
SharedPtr<SharedMemory> CreateSharedMemory(Process* owner_process, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
VAddr address = 0,
MemoryRegion region = MemoryRegion::BASE,
std::string name = "Unknown");
/**
* Creates a shared memory object from a block of memory managed by an HLE applet.
* @param offset The offset into the heap block that the SharedMemory will map.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param name Optional object name, used for debugging purposes.
*/
SharedPtr<SharedMemory> CreateSharedMemoryForApplet(u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
u32 GenerateObjectID();
/// Retrieves a process from the current list of processes.
SharedPtr<Process> GetProcessById(u32 process_id) const;
SharedPtr<Process> GetCurrentProcess() const;
void SetCurrentProcess(SharedPtr<Process> process);
ThreadManager& GetThreadManager();
const ThreadManager& GetThreadManager() const;
TimerManager& GetTimerManager();
const TimerManager& GetTimerManager() const;
void MapSharedPages(VMManager& address_space);
SharedPage::Handler& GetSharedPageHandler();
const SharedPage::Handler& GetSharedPageHandler() const;
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
std::array<MemoryRegionInfo, 3> memory_regions;
/// Adds a port to the named port table
void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
/// Map of named ports managed by the kernel, which can be retrieved using the ConnectToPort
std::unordered_map<std::string, SharedPtr<ClientPort>> named_ports;
private:
void MemoryInit(u32 mem_type);
std::unique_ptr<ResourceLimitList> resource_limits;
std::atomic<u32> next_object_id{0};
// Note: keep the member order below in order to perform correct destruction.
// Thread manager is destructed before process list in order to Stop threads and clear thread
// info from their parent processes first. Timer manager is destructed after process list
// because timers are destructed along with process list and they need to clear info from the
// timer manager.
// TODO (wwylele): refactor the cleanup sequence to make this less complicated and sensitive.
std::unique_ptr<TimerManager> timer_manager;
// TODO(Subv): Start the process ids from 10 for now, as lower PIDs are
// reserved for low-level services
u32 next_process_id = 10;
// Lists all processes that exist in the current session.
std::vector<SharedPtr<Process>> process_list;
SharedPtr<Process> current_process;
std::unique_ptr<ThreadManager> thread_manager;
std::unique_ptr<ConfigMem::Handler> config_mem_handler;
std::unique_ptr<SharedPage::Handler> shared_page_handler;
};
} // namespace Kernel

View file

@ -12,8 +12,10 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/config_mem.h"
#include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
@ -23,8 +25,6 @@
namespace Kernel {
MemoryRegionInfo memory_regions[3];
/// Size of the APPLICATION, SYSTEM and BASE memory regions (respectively) for each system
/// memory configuration type.
static const u32 memory_region_sizes[8][3] = {
@ -41,7 +41,7 @@ static const u32 memory_region_sizes[8][3] = {
{0x0B200000, 0x02E00000, 0x02000000}, // 7
};
void MemoryInit(u32 mem_type) {
void KernelSystem::MemoryInit(u32 mem_type) {
// TODO(yuriks): On the n3DS, all o3DS configurations (<=5) are forced to 6 instead.
ASSERT_MSG(mem_type <= 5, "New 3DS memory configuration aren't supported yet!");
ASSERT(mem_type != 1);
@ -50,13 +50,7 @@ void MemoryInit(u32 mem_type) {
// the sizes specified in the memory_region_sizes table.
VAddr base = 0;
for (int i = 0; i < 3; ++i) {
memory_regions[i].base = base;
memory_regions[i].size = memory_region_sizes[mem_type][i];
memory_regions[i].used = 0;
memory_regions[i].linear_heap_memory = std::make_shared<std::vector<u8>>();
// Reserve enough space for this region of FCRAM.
// We do not want this block of memory to be relocated when allocating from it.
memory_regions[i].linear_heap_memory->reserve(memory_regions[i].size);
memory_regions[i].Reset(base, memory_region_sizes[mem_type][i]);
base += memory_regions[i].size;
}
@ -64,25 +58,19 @@ void MemoryInit(u32 mem_type) {
// We must've allocated the entire FCRAM by the end
ASSERT(base == Memory::FCRAM_SIZE);
using ConfigMem::config_mem;
config_mem_handler = std::make_unique<ConfigMem::Handler>();
auto& config_mem = config_mem_handler->GetConfigMem();
config_mem.app_mem_type = mem_type;
// app_mem_malloc does not always match the configured size for memory_region[0]: in case the
// n3DS type override is in effect it reports the size the game expects, not the real one.
config_mem.app_mem_alloc = memory_region_sizes[mem_type][0];
config_mem.sys_mem_alloc = memory_regions[1].size;
config_mem.base_mem_alloc = memory_regions[2].size;
shared_page_handler = std::make_unique<SharedPage::Handler>();
}
void MemoryShutdown() {
for (auto& region : memory_regions) {
region.base = 0;
region.size = 0;
region.used = 0;
region.linear_heap_memory = nullptr;
}
}
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) {
MemoryRegionInfo* KernelSystem::GetMemoryRegion(MemoryRegion region) {
switch (region) {
case MemoryRegion::APPLICATION:
return &memory_regions[0];
@ -152,23 +140,93 @@ void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mappin
mapping.read_only ? VMAPermission::Read : VMAPermission::ReadWrite);
}
void MapSharedPages(VMManager& address_space) {
auto cfg_mem_vma = address_space
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR,
reinterpret_cast<u8*>(&ConfigMem::config_mem),
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
.Unwrap();
void KernelSystem::MapSharedPages(VMManager& address_space) {
auto cfg_mem_vma =
address_space
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR,
reinterpret_cast<u8*>(&config_mem_handler->GetConfigMem()),
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
.Unwrap();
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
auto shared_page_vma =
address_space
.MapBackingMemory(
Memory::SHARED_PAGE_VADDR,
reinterpret_cast<u8*>(
&Core::System::GetInstance().GetSharedPageHandler()->GetSharedPage()),
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
.MapBackingMemory(Memory::SHARED_PAGE_VADDR,
reinterpret_cast<u8*>(&shared_page_handler->GetSharedPage()),
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
.Unwrap();
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
}
void MemoryRegionInfo::Reset(u32 base, u32 size) {
this->base = base;
this->size = size;
used = 0;
free_blocks.clear();
// mark the entire region as free
free_blocks.insert(Interval::right_open(base, base + size));
}
MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) {
IntervalSet result;
u32 rest = size;
// Try allocating from the higher address
for (auto iter = free_blocks.rbegin(); iter != free_blocks.rend(); ++iter) {
ASSERT(iter->bounds() == boost::icl::interval_bounds::right_open());
if (iter->upper() - iter->lower() >= rest) {
// Requested size is fulfilled with this block
result += Interval(iter->upper() - rest, iter->upper());
rest = 0;
break;
}
result += *iter;
rest -= iter->upper() - iter->lower();
}
if (rest != 0) {
// There is no enough free space
return {};
}
free_blocks -= result;
used += size;
return result;
}
bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) {
Interval interval(offset, offset + size);
if (!boost::icl::contains(free_blocks, interval)) {
// The requested range is already allocated
return false;
}
free_blocks -= interval;
used += size;
return true;
}
std::optional<u32> MemoryRegionInfo::LinearAllocate(u32 size) {
// Find the first sufficient continuous block from the lower address
for (const auto& interval : free_blocks) {
ASSERT(interval.bounds() == boost::icl::interval_bounds::right_open());
if (interval.upper() - interval.lower() >= size) {
Interval allocated(interval.lower(), interval.lower() + size);
free_blocks -= allocated;
used += size;
return allocated.lower();
}
}
// No sufficient block found
return {};
}
void MemoryRegionInfo::Free(u32 offset, u32 size) {
Interval interval(offset, offset + size);
ASSERT(!boost::icl::intersects(free_blocks, interval)); // must be allocated blocks
free_blocks += interval;
used -= size;
}
} // namespace Kernel

View file

@ -4,12 +4,13 @@
#pragma once
#include <memory>
#include <optional>
#include <boost/icl/interval_set.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/process.h"
namespace Kernel {
struct AddressMapping;
class VMManager;
struct MemoryRegionInfo {
@ -17,15 +18,50 @@ struct MemoryRegionInfo {
u32 size;
u32 used;
std::shared_ptr<std::vector<u8>> linear_heap_memory;
// The domain of the interval_set are offsets from start of FCRAM
using IntervalSet = boost::icl::interval_set<u32>;
using Interval = IntervalSet::interval_type;
IntervalSet free_blocks;
/**
* Reset the allocator state
* @param base The base offset the beginning of FCRAM.
* @param size The region size this allocator manages
*/
void Reset(u32 base, u32 size);
/**
* Allocates memory from the heap.
* @param size The size of memory to allocate.
* @returns The set of blocks that make up the allocation request. Empty set if there is no
* enough space.
*/
IntervalSet HeapAllocate(u32 size);
/**
* Allocates memory from the linear heap with specific address and size.
* @param offset the address offset to the beginning of FCRAM.
* @param size size of the memory to allocate.
* @returns true if the allocation is successful. false if the requested region is not free.
*/
bool LinearAllocate(u32 offset, u32 size);
/**
* Allocates memory from the linear heap with only size specified.
* @param size size of the memory to allocate.
* @returns the address offset to the beginning of FCRAM; null if there is no enough space
*/
std::optional<u32> LinearAllocate(u32 size);
/**
* Frees one segment of memory. The memory must have been allocated as heap or linear heap.
* @param offset the region address offset to the beginning of FCRAM.
* @param size the size of the region to free.
*/
void Free(u32 offset, u32 size);
};
void MemoryInit(u32 mem_type);
void MemoryShutdown();
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping);
void MapSharedPages(VMManager& address_space);
extern MemoryRegionInfo memory_regions[3];
} // namespace Kernel

View file

@ -24,11 +24,11 @@ void ReleaseThreadMutexes(Thread* thread) {
thread->held_mutexes.clear();
}
Mutex::Mutex() {}
Mutex::Mutex(KernelSystem& kernel) : WaitObject(kernel) {}
Mutex::~Mutex() {}
SharedPtr<Mutex> Mutex::Create(bool initial_locked, std::string name) {
SharedPtr<Mutex> mutex(new Mutex);
SharedPtr<Mutex> KernelSystem::CreateMutex(bool initial_locked, std::string name) {
SharedPtr<Mutex> mutex(new Mutex(*this));
mutex->lock_count = 0;
mutex->name = std::move(name);
@ -36,7 +36,7 @@ SharedPtr<Mutex> Mutex::Create(bool initial_locked, std::string name) {
// Acquire mutex with current thread if initialized as locked
if (initial_locked)
mutex->Acquire(GetCurrentThread());
mutex->Acquire(thread_manager->GetCurrentThread());
return mutex;
}

View file

@ -16,14 +16,6 @@ class Thread;
class Mutex final : public WaitObject {
public:
/**
* Creates a mutex.
* @param initial_locked Specifies if the mutex should be locked initially
* @param name Optional name of mutex
* @return Pointer to new Mutex object
*/
static SharedPtr<Mutex> Create(bool initial_locked, std::string name = "Unknown");
std::string GetTypeName() const override {
return "Mutex";
}
@ -61,8 +53,10 @@ public:
ResultCode Release(Thread* thread);
private:
Mutex();
explicit Mutex(KernelSystem& kernel);
~Mutex() override;
friend class KernelSystem;
};
/**

View file

@ -3,10 +3,13 @@
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
namespace Kernel {
Object::Object(KernelSystem& kernel) : object_id{kernel.GenerateObjectID()} {}
Object::~Object() = default;
bool Object::IsWaitable() const {

View file

@ -6,13 +6,13 @@
#include <atomic>
#include <string>
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
class KernelSystem;
using Handle = u32;
enum class HandleType : u32 {
@ -37,14 +37,9 @@ enum {
DEFAULT_STACK_SIZE = 0x4000,
};
enum class ResetType {
OneShot,
Sticky,
Pulse,
};
class Object : NonCopyable {
public:
explicit Object(KernelSystem& kernel);
virtual ~Object();
/// Returns a unique identifier for the object. For debugging purposes only.
@ -66,15 +61,12 @@ public:
*/
bool IsWaitable() const;
public:
static std::atomic<u32> next_object_id;
private:
friend void intrusive_ptr_add_ref(Object*);
friend void intrusive_ptr_release(Object*);
std::atomic<u32> ref_count{0};
std::atomic<u32> object_id{next_object_id++};
std::atomic<u32> object_id;
};
// Special functions used by boost::instrusive_ptr to do automatic ref-counting
@ -88,9 +80,6 @@ inline void intrusive_ptr_release(Object* object) {
}
}
template <typename T>
using SharedPtr = boost::intrusive_ptr<T>;
/**
* Attempts to downcast the given Object pointer to a pointer to T.
* @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.

View file

@ -17,11 +17,8 @@
namespace Kernel {
// Lists all processes that exist in the current session.
static std::vector<SharedPtr<Process>> process_list;
SharedPtr<CodeSet> CodeSet::Create(std::string name, u64 program_id) {
SharedPtr<CodeSet> codeset(new CodeSet);
SharedPtr<CodeSet> KernelSystem::CreateCodeSet(std::string name, u64 program_id) {
SharedPtr<CodeSet> codeset(new CodeSet(*this));
codeset->name = std::move(name);
codeset->program_id = program_id;
@ -29,18 +26,17 @@ SharedPtr<CodeSet> CodeSet::Create(std::string name, u64 program_id) {
return codeset;
}
CodeSet::CodeSet() {}
CodeSet::CodeSet(KernelSystem& kernel) : Object(kernel) {}
CodeSet::~CodeSet() {}
u32 Process::next_process_id;
SharedPtr<Process> Process::Create(SharedPtr<CodeSet> code_set) {
SharedPtr<Process> process(new Process);
SharedPtr<Process> KernelSystem::CreateProcess(SharedPtr<CodeSet> code_set) {
SharedPtr<Process> process(new Process(*this));
process->codeset = std::move(code_set);
process->flags.raw = 0;
process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
process->status = ProcessStatus::Created;
process->process_id = ++next_process_id;
process_list.push_back(process);
return process;
@ -119,17 +115,13 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
}
void Process::Run(s32 main_thread_priority, u32 stack_size) {
memory_region = GetMemoryRegion(flags.memory_region);
memory_region = kernel.GetMemoryRegion(flags.memory_region);
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
auto vma = vm_manager
.MapMemoryBlock(segment.addr, codeset->memory, segment.offset, segment.size,
memory_state)
.Unwrap();
vm_manager.Reprotect(vma, permissions);
misc_memory_used += segment.size;
memory_region->used += segment.size;
HeapAllocate(segment.addr, segment.size, permissions, memory_state, true);
Memory::WriteBlock(*this, segment.addr, codeset->memory->data() + segment.offset,
segment.size);
};
// Map CodeSet segments
@ -138,16 +130,11 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) {
MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
// Allocate and map stack
vm_manager
.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
MemoryState::Locked)
.Unwrap();
misc_memory_used += stack_size;
memory_region->used += stack_size;
HeapAllocate(Memory::HEAP_VADDR_END - stack_size, stack_size, VMAPermission::ReadWrite,
MemoryState::Locked, true);
// Map special address mappings
MapSharedPages(vm_manager);
kernel.MapSharedPages(vm_manager);
for (const auto& mapping : address_mappings) {
HandleSpecialMapping(vm_manager, mapping);
}
@ -155,7 +142,7 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) {
status = ProcessStatus::Running;
vm_manager.LogLayout(Log::Level::Debug);
Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority, this);
Kernel::SetupMainThread(kernel, codeset->entrypoint, main_thread_priority, this);
}
VAddr Process::GetLinearHeapAreaAddress() const {
@ -172,44 +159,55 @@ VAddr Process::GetLinearHeapLimit() const {
return GetLinearHeapBase() + memory_region->size;
}
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) {
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms,
MemoryState memory_state, bool skip_range_check) {
LOG_DEBUG(Kernel, "Allocate heap target={:08X}, size={:08X}", target, size);
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
target + size < target) {
return ERR_INVALID_ADDRESS;
if (!skip_range_check) {
LOG_ERROR(Kernel, "Invalid heap address");
return ERR_INVALID_ADDRESS;
}
}
if (heap_memory == nullptr) {
// Initialize heap
heap_memory = std::make_shared<std::vector<u8>>();
heap_start = heap_end = target;
auto vma = vm_manager.FindVMA(target);
if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) {
LOG_ERROR(Kernel, "Trying to allocate already allocated memory");
return ERR_INVALID_ADDRESS_STATE;
}
// If necessary, expand backing vector to cover new heap extents.
if (target < heap_start) {
heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
heap_start = target;
vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
auto allocated_fcram = memory_region->HeapAllocate(size);
if (allocated_fcram.empty()) {
LOG_ERROR(Kernel, "Not enough space");
return ERR_OUT_OF_HEAP_MEMORY;
}
if (target + size > heap_end) {
heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
heap_end = target + size;
vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
// Maps heap block by block
VAddr interval_target = target;
for (const auto& interval : allocated_fcram) {
u32 interval_size = interval.upper() - interval.lower();
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
interval.upper());
std::fill(Memory::fcram.begin() + interval.lower(),
Memory::fcram.begin() + interval.upper(), 0);
auto vma = vm_manager.MapBackingMemory(
interval_target, Memory::fcram.data() + interval.lower(), interval_size, memory_state);
ASSERT(vma.Succeeded());
vm_manager.Reprotect(vma.Unwrap(), perms);
interval_target += interval_size;
}
ASSERT(heap_end - heap_start == heap_memory->size());
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
size, MemoryState::Private));
vm_manager.Reprotect(vma, perms);
memory_used += size;
resource_limit->current_commit += size;
heap_used += size;
memory_region->used += size;
return MakeResult<VAddr>(heap_end - size);
return MakeResult<VAddr>(target);
}
ResultCode Process::HeapFree(VAddr target, u32 size) {
LOG_DEBUG(Kernel, "Free heap target={:08X}, size={:08X}", target, size);
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
target + size < target) {
LOG_ERROR(Kernel, "Invalid heap address");
return ERR_INVALID_ADDRESS;
}
@ -217,59 +215,72 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
return RESULT_SUCCESS;
}
ResultCode result = vm_manager.UnmapRange(target, size);
if (result.IsError())
return result;
// Free heaps block by block
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
for (const auto [backing_memory, block_size] : backing_blocks) {
memory_region->Free(Memory::GetFCRAMOffset(backing_memory), block_size);
}
heap_used -= size;
memory_region->used -= size;
ResultCode result = vm_manager.UnmapRange(target, size);
ASSERT(result.IsSuccess());
memory_used -= size;
resource_limit->current_commit -= size;
return RESULT_SUCCESS;
}
ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) {
auto& linheap_memory = memory_region->linear_heap_memory;
VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
// Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address),
// but explicit addresses are also accepted and respected.
LOG_DEBUG(Kernel, "Allocate linear heap target={:08X}, size={:08X}", target, size);
u32 physical_offset;
if (target == 0) {
target = heap_end;
auto offset = memory_region->LinearAllocate(size);
if (!offset) {
LOG_ERROR(Kernel, "Not enough space");
return ERR_OUT_OF_HEAP_MEMORY;
}
physical_offset = *offset;
target = physical_offset + GetLinearHeapAreaAddress();
} else {
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
target + size < target) {
LOG_ERROR(Kernel, "Invalid linear heap address");
return ERR_INVALID_ADDRESS;
}
// Kernel would crash/return error when target doesn't meet some requirement.
// It seems that target is required to follow immediately after the allocated linear heap,
// or cover the entire hole if there is any.
// Right now we just ignore these checks because they are still unclear. Further more,
// games and homebrew only ever seem to pass target = 0 here (which lets the kernel decide
// the address), so this not important.
physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
if (!memory_region->LinearAllocate(physical_offset, size)) {
LOG_ERROR(Kernel, "Trying to allocate already allocated memory");
return ERR_INVALID_ADDRESS_STATE;
}
}
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target > heap_end ||
target + size < target) {
u8* backing_memory = Memory::fcram.data() + physical_offset;
return ERR_INVALID_ADDRESS;
}
std::fill(backing_memory, backing_memory + size, 0);
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
ASSERT(vma.Succeeded());
vm_manager.Reprotect(vma.Unwrap(), perms);
// Expansion of the linear heap is only allowed if you do an allocation immediately at its
// end. It's possible to free gaps in the middle of the heap and then reallocate them later,
// but expansions are only allowed at the end.
if (target == heap_end) {
linheap_memory->insert(linheap_memory->end(), size, 0);
vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
}
// TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
// same region. It is unknown if or how the 3DS kernel checks against this.
std::size_t offset = target - GetLinearHeapBase();
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size,
MemoryState::Continuous));
vm_manager.Reprotect(vma, perms);
linear_heap_used += size;
memory_region->used += size;
memory_used += size;
resource_limit->current_commit += size;
LOG_DEBUG(Kernel, "Allocated at target={:08X}", target);
return MakeResult<VAddr>(target);
}
ResultCode Process::LinearFree(VAddr target, u32 size) {
auto& linheap_memory = memory_region->linear_heap_memory;
LOG_DEBUG(Kernel, "Free linear heap target={:08X}, size={:08X}", target, size);
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
target + size < target) {
LOG_ERROR(Kernel, "Invalid linear heap address");
return ERR_INVALID_ADDRESS;
}
@ -277,41 +288,81 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
return RESULT_SUCCESS;
}
VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
if (target + size > heap_end) {
return ERR_INVALID_ADDRESS_STATE;
}
ResultCode result = vm_manager.UnmapRange(target, size);
if (result.IsError())
if (result.IsError()) {
LOG_ERROR(Kernel, "Trying to free already freed memory");
return result;
linear_heap_used -= size;
memory_region->used -= size;
if (target + size == heap_end) {
// End of linear heap has been freed, so check what's the last allocated block in it and
// reduce the size.
auto vma = vm_manager.FindVMA(target);
ASSERT(vma != vm_manager.vma_map.end());
ASSERT(vma->second.type == VMAType::Free);
VAddr new_end = vma->second.base;
if (new_end >= GetLinearHeapBase()) {
linheap_memory->resize(new_end - GetLinearHeapBase());
}
}
memory_used -= size;
resource_limit->current_commit -= size;
u32 physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
memory_region->Free(physical_offset, size);
return RESULT_SUCCESS;
}
Kernel::Process::Process() {}
Kernel::Process::~Process() {}
ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms) {
LOG_DEBUG(Kernel, "Map memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}", target,
source, size, static_cast<u8>(perms));
if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END ||
source + size < source) {
LOG_ERROR(Kernel, "Invalid source address");
return ERR_INVALID_ADDRESS;
}
void ClearProcessList() {
process_list.clear();
// TODO(wwylele): check target address range. Is it also restricted to heap region?
auto vma = vm_manager.FindVMA(target);
if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) {
LOG_ERROR(Kernel, "Trying to map to already allocated memory");
return ERR_INVALID_ADDRESS_STATE;
}
// Mark source region as Aliased
CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, MemoryState::Private,
VMAPermission::ReadWrite, MemoryState::Aliased,
VMAPermission::ReadWrite));
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(source, size));
VAddr interval_target = target;
for (const auto [backing_memory, block_size] : backing_blocks) {
auto target_vma = vm_manager.MapBackingMemory(interval_target, backing_memory, block_size,
MemoryState::Alias);
interval_target += block_size;
}
return RESULT_SUCCESS;
}
ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms) {
LOG_DEBUG(Kernel, "Unmap memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}",
target, source, size, static_cast<u8>(perms));
if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END ||
source + size < source) {
LOG_ERROR(Kernel, "Invalid source address");
return ERR_INVALID_ADDRESS;
}
// TODO(wwylele): check target address range. Is it also restricted to heap region?
// TODO(wwylele): check that the source and the target are actually a pair created by Map
// Should return error 0xD8E007F5 in this case
CASCADE_CODE(vm_manager.UnmapRange(target, size));
// Change back source region state. Note that the permission is reprotected according to param
CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, MemoryState::Aliased,
VMAPermission::None, MemoryState::Private, perms));
return RESULT_SUCCESS;
}
SharedPtr<Process> GetProcessById(u32 process_id) {
Kernel::Process::Process(KernelSystem& kernel)
: Object(kernel), handle_table(kernel), kernel(kernel) {}
Kernel::Process::~Process() {}
SharedPtr<Process> KernelSystem::GetProcessById(u32 process_id) const {
auto itr = std::find_if(
process_list.begin(), process_list.end(),
[&](const SharedPtr<Process>& process) { return process->process_id == process_id; });
@ -321,6 +372,4 @@ SharedPtr<Process> GetProcessById(u32 process_id) {
return *itr;
}
SharedPtr<Process> g_current_process;
} // namespace Kernel

View file

@ -13,6 +13,7 @@
#include <boost/container/static_vector.hpp>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/vm_manager.h"
@ -26,12 +27,6 @@ struct AddressMapping {
bool unk_flag;
};
enum class MemoryRegion : u16 {
APPLICATION = 1,
SYSTEM = 2,
BASE = 3,
};
union ProcessFlags {
u16 raw;
@ -55,15 +50,14 @@ enum class ProcessStatus { Created, Running, Exited };
class ResourceLimit;
struct MemoryRegionInfo;
struct CodeSet final : public Object {
class CodeSet final : public Object {
public:
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
};
static SharedPtr<CodeSet> Create(std::string name, u64 program_id);
std::string GetTypeName() const override {
return "CodeSet";
}
@ -111,14 +105,14 @@ struct CodeSet final : public Object {
u64 program_id;
private:
CodeSet();
explicit CodeSet(KernelSystem& kernel);
~CodeSet() override;
friend class KernelSystem;
};
class Process final : public Object {
public:
static SharedPtr<Process> Create(SharedPtr<CodeSet> code_set);
std::string GetTypeName() const override {
return "Process";
}
@ -131,7 +125,7 @@ public:
return HANDLE_TYPE;
}
static u32 next_process_id;
HandleTable handle_table;
SharedPtr<CodeSet> codeset;
/// Resource limit descriptor for this process
@ -153,7 +147,7 @@ public:
ProcessStatus status;
/// The id of this process
u32 process_id = next_process_id++;
u32 process_id;
/**
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
@ -171,15 +165,7 @@ public:
VMManager vm_manager;
// Memory used to back the allocations in the regular heap. A single vector is used to cover
// the entire virtual address space extents that bound the allocations, including any holes.
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
std::shared_ptr<std::vector<u8>> heap_memory;
// The left/right bounds of the address space covered by heap_memory.
VAddr heap_start = 0, heap_end = 0;
u32 heap_used = 0, linear_heap_used = 0, misc_memory_used = 0;
u32 memory_used = 0;
MemoryRegionInfo* memory_region = nullptr;
@ -194,21 +180,22 @@ public:
VAddr GetLinearHeapBase() const;
VAddr GetLinearHeapLimit() const;
ResultVal<VAddr> HeapAllocate(VAddr target, u32 size, VMAPermission perms);
ResultVal<VAddr> HeapAllocate(VAddr target, u32 size, VMAPermission perms,
MemoryState memory_state = MemoryState::Private,
bool skip_range_check = false);
ResultCode HeapFree(VAddr target, u32 size);
ResultVal<VAddr> LinearAllocate(VAddr target, u32 size, VMAPermission perms);
ResultCode LinearFree(VAddr target, u32 size);
ResultCode Map(VAddr target, VAddr source, u32 size, VMAPermission perms);
ResultCode Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms);
private:
Process();
explicit Process(Kernel::KernelSystem& kernel);
~Process() override;
friend class KernelSystem;
KernelSystem& kernel;
};
void ClearProcessList();
/// Retrieves a process from the current list of processes.
SharedPtr<Process> GetProcessById(u32 process_id);
extern SharedPtr<Process> g_current_process;
} // namespace Kernel

View file

@ -9,19 +9,17 @@
namespace Kernel {
static SharedPtr<ResourceLimit> resource_limits[4];
ResourceLimit::ResourceLimit() {}
ResourceLimit::ResourceLimit(KernelSystem& kernel) : Object(kernel) {}
ResourceLimit::~ResourceLimit() {}
SharedPtr<ResourceLimit> ResourceLimit::Create(std::string name) {
SharedPtr<ResourceLimit> resource_limit(new ResourceLimit);
SharedPtr<ResourceLimit> ResourceLimit::Create(KernelSystem& kernel, std::string name) {
SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel));
resource_limit->name = std::move(name);
return resource_limit;
}
SharedPtr<ResourceLimit> ResourceLimit::GetForCategory(ResourceLimitCategory category) {
SharedPtr<ResourceLimit> ResourceLimitList::GetForCategory(ResourceLimitCategory category) {
switch (category) {
case ResourceLimitCategory::APPLICATION:
case ResourceLimitCategory::SYS_APPLET:
@ -90,10 +88,10 @@ u32 ResourceLimit::GetMaxResourceValue(u32 resource) const {
}
}
void ResourceLimitsInit() {
ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
// Create the four resource limits that the system uses
// Create the APPLICATION resource limit
SharedPtr<ResourceLimit> resource_limit = ResourceLimit::Create("Applications");
SharedPtr<ResourceLimit> resource_limit = ResourceLimit::Create(kernel, "Applications");
resource_limit->max_priority = 0x18;
resource_limit->max_commit = 0x4000000;
resource_limit->max_threads = 0x20;
@ -107,7 +105,7 @@ void ResourceLimitsInit() {
resource_limits[static_cast<u8>(ResourceLimitCategory::APPLICATION)] = resource_limit;
// Create the SYS_APPLET resource limit
resource_limit = ResourceLimit::Create("System Applets");
resource_limit = ResourceLimit::Create(kernel, "System Applets");
resource_limit->max_priority = 0x4;
resource_limit->max_commit = 0x5E00000;
resource_limit->max_threads = 0x1D;
@ -121,7 +119,7 @@ void ResourceLimitsInit() {
resource_limits[static_cast<u8>(ResourceLimitCategory::SYS_APPLET)] = resource_limit;
// Create the LIB_APPLET resource limit
resource_limit = ResourceLimit::Create("Library Applets");
resource_limit = ResourceLimit::Create(kernel, "Library Applets");
resource_limit->max_priority = 0x4;
resource_limit->max_commit = 0x600000;
resource_limit->max_threads = 0xE;
@ -135,7 +133,7 @@ void ResourceLimitsInit() {
resource_limits[static_cast<u8>(ResourceLimitCategory::LIB_APPLET)] = resource_limit;
// Create the OTHER resource limit
resource_limit = ResourceLimit::Create("Others");
resource_limit = ResourceLimit::Create(kernel, "Others");
resource_limit->max_priority = 0x4;
resource_limit->max_commit = 0x2180000;
resource_limit->max_threads = 0xE1;
@ -149,6 +147,6 @@ void ResourceLimitsInit() {
resource_limits[static_cast<u8>(ResourceLimitCategory::OTHER)] = resource_limit;
}
void ResourceLimitsShutdown() {}
ResourceLimitList::~ResourceLimitList() = default;
} // namespace Kernel

View file

@ -4,6 +4,7 @@
#pragma once
#include <array>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
@ -34,14 +35,7 @@ public:
/**
* Creates a resource limit object.
*/
static SharedPtr<ResourceLimit> Create(std::string name = "Unknown");
/**
* Retrieves the resource limit associated with the specified resource limit category.
* @param category The resource limit category
* @returns The resource limit associated with the category
*/
static SharedPtr<ResourceLimit> GetForCategory(ResourceLimitCategory category);
static SharedPtr<ResourceLimit> Create(KernelSystem& kernel, std::string name = "Unknown");
std::string GetTypeName() const override {
return "ResourceLimit";
@ -113,14 +107,24 @@ public:
s32 current_cpu_time = 0;
private:
ResourceLimit();
explicit ResourceLimit(KernelSystem& kernel);
~ResourceLimit() override;
};
/// Initializes the resource limits
void ResourceLimitsInit();
class ResourceLimitList {
public:
explicit ResourceLimitList(KernelSystem& kernel);
~ResourceLimitList();
// Destroys the resource limits
void ResourceLimitsShutdown();
/**
* Retrieves the resource limit associated with the specified resource limit category.
* @param category The resource limit category
* @returns The resource limit associated with the category
*/
SharedPtr<ResourceLimit> GetForCategory(ResourceLimitCategory category);
private:
std::array<SharedPtr<ResourceLimit>, 4> resource_limits;
};
} // namespace Kernel

View file

@ -10,16 +10,16 @@
namespace Kernel {
Semaphore::Semaphore() {}
Semaphore::Semaphore(KernelSystem& kernel) : WaitObject(kernel) {}
Semaphore::~Semaphore() {}
ResultVal<SharedPtr<Semaphore>> Semaphore::Create(s32 initial_count, s32 max_count,
std::string name) {
ResultVal<SharedPtr<Semaphore>> KernelSystem::CreateSemaphore(s32 initial_count, s32 max_count,
std::string name) {
if (initial_count > max_count)
return ERR_INVALID_COMBINATION_KERNEL;
SharedPtr<Semaphore> semaphore(new Semaphore);
SharedPtr<Semaphore> semaphore(new Semaphore(*this));
// When the semaphore is created, some slots are reserved for other threads,
// and the rest is reserved for the caller thread

View file

@ -15,16 +15,6 @@ namespace Kernel {
class Semaphore final : public WaitObject {
public:
/**
* Creates a semaphore.
* @param initial_count Number of slots reserved for other threads
* @param max_count Maximum number of slots the semaphore can have
* @param name Optional name of semaphore
* @return The created semaphore
*/
static ResultVal<SharedPtr<Semaphore>> Create(s32 initial_count, s32 max_count,
std::string name = "Unknown");
std::string GetTypeName() const override {
return "Semaphore";
}
@ -52,8 +42,10 @@ public:
ResultVal<s32> Release(s32 release_count);
private:
Semaphore();
explicit Semaphore(KernelSystem& kernel);
~Semaphore() override;
friend class KernelSystem;
};
} // namespace Kernel

View file

@ -13,7 +13,7 @@
namespace Kernel {
ServerPort::ServerPort() {}
ServerPort::ServerPort(KernelSystem& kernel) : WaitObject(kernel) {}
ServerPort::~ServerPort() {}
ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() {
@ -35,11 +35,11 @@ void ServerPort::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> ServerPort::CreatePortPair(
std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> KernelSystem::CreatePortPair(
u32 max_sessions, std::string name) {
SharedPtr<ServerPort> server_port(new ServerPort);
SharedPtr<ClientPort> client_port(new ClientPort);
SharedPtr<ServerPort> server_port(new ServerPort(*this));
SharedPtr<ClientPort> client_port(new ClientPort(*this));
server_port->name = name + "_Server";
client_port->name = name + "_Client";

View file

@ -20,16 +20,6 @@ class SessionRequestHandler;
class ServerPort final : public WaitObject {
public:
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
* @param max_sessions Maximum number of sessions to the port
* @param name Optional name of the ports
* @return The created port tuple
*/
static std::tuple<SharedPtr<ServerPort>, SharedPtr<ClientPort>> CreatePortPair(
u32 max_sessions, std::string name = "UnknownPort");
std::string GetTypeName() const override {
return "ServerPort";
}
@ -69,8 +59,10 @@ public:
void Acquire(Thread* thread) override;
private:
ServerPort();
explicit ServerPort(KernelSystem& kernel);
~ServerPort() override;
friend class KernelSystem;
};
} // namespace Kernel

View file

@ -13,7 +13,7 @@
namespace Kernel {
ServerSession::ServerSession() = default;
ServerSession::ServerSession(KernelSystem& kernel) : WaitObject(kernel) {}
ServerSession::~ServerSession() {
// This destructor will be called automatically when the last ServerSession handle is closed by
// the emulated application.
@ -28,8 +28,8 @@ ServerSession::~ServerSession() {
parent->server = nullptr;
}
ResultVal<SharedPtr<ServerSession>> ServerSession::Create(std::string name) {
SharedPtr<ServerSession> server_session(new ServerSession);
ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelSystem& kernel, std::string name) {
SharedPtr<ServerSession> server_session(new ServerSession(kernel));
server_session->name = std::move(name);
server_session->parent = nullptr;
@ -100,10 +100,10 @@ ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
return RESULT_SUCCESS;
}
ServerSession::SessionPair ServerSession::CreateSessionPair(const std::string& name,
SharedPtr<ClientPort> port) {
auto server_session = ServerSession::Create(name + "_Server").Unwrap();
SharedPtr<ClientSession> client_session(new ClientSession);
std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>> KernelSystem::CreateSessionPair(
const std::string& name, SharedPtr<ClientPort> port) {
auto server_session = ServerSession::Create(*this, name + "_Server").Unwrap();
SharedPtr<ClientSession> client_session(new ClientSession(*this));
client_session->name = name + "_Client";
std::shared_ptr<Session> parent(new Session);

View file

@ -48,17 +48,6 @@ public:
return HANDLE_TYPE;
}
using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>;
/**
* Creates a pair of ServerSession and an associated ClientSession.
* @param name Optional name of the ports.
* @param client_port Optional The ClientPort that spawned this session.
* @return The created session tuple
*/
static SessionPair CreateSessionPair(const std::string& name = "Unknown",
SharedPtr<ClientPort> client_port = nullptr);
/**
* Sets the HLE handler for the session. This handler will be called to service IPC requests
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
@ -95,16 +84,20 @@ public:
SharedPtr<Thread> currently_handling;
private:
ServerSession();
explicit ServerSession(KernelSystem& kernel);
~ServerSession() override;
/**
* Creates a server session. The server session can have an optional HLE handler,
* which will be invoked to handle the IPC requests that this session receives.
* @param kernel The kernel instance to create the server session on
* @param name Optional name of the server session.
* @return The created server session
*/
static ResultVal<SharedPtr<ServerSession>> Create(std::string name = "Unknown");
static ResultVal<SharedPtr<ServerSession>> Create(KernelSystem& kernel,
std::string name = "Unknown");
friend class KernelSystem;
};
} // namespace Kernel

View file

@ -11,14 +11,20 @@
namespace Kernel {
SharedMemory::SharedMemory() {}
SharedMemory::~SharedMemory() {}
SharedMemory::SharedMemory(KernelSystem& kernel) : Object(kernel), kernel(kernel) {}
SharedMemory::~SharedMemory() {
for (const auto& interval : holding_memory) {
kernel.GetMemoryRegion(MemoryRegion::SYSTEM)
->Free(interval.lower(), interval.upper() - interval.lower());
}
}
SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address,
MemoryRegion region, std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory);
SharedPtr<SharedMemory> KernelSystem::CreateSharedMemory(Process* owner_process, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
VAddr address, MemoryRegion region,
std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory(*this));
shared_memory->owner_process = owner_process;
shared_memory->name = std::move(name);
@ -30,64 +36,53 @@ SharedPtr<SharedMemory> SharedMemory::Create(SharedPtr<Process> owner_process, u
// We need to allocate a block from the Linear Heap ourselves.
// We'll manually allocate some memory from the linear heap in the specified region.
MemoryRegionInfo* memory_region = GetMemoryRegion(region);
auto& linheap_memory = memory_region->linear_heap_memory;
auto offset = memory_region->LinearAllocate(size);
ASSERT_MSG(linheap_memory->size() + size <= memory_region->size,
"Not enough space in region to allocate shared memory!");
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
shared_memory->backing_block = linheap_memory;
shared_memory->backing_block_offset = linheap_memory->size();
// Allocate some memory from the end of the linear heap for this region.
linheap_memory->insert(linheap_memory->end(), size, 0);
memory_region->used += size;
shared_memory->linear_heap_phys_address =
Memory::FCRAM_PADDR + memory_region->base +
static_cast<PAddr>(shared_memory->backing_block_offset);
std::fill(Memory::fcram.data() + *offset, Memory::fcram.data() + *offset + size, 0);
shared_memory->backing_blocks = {{Memory::fcram.data() + *offset, size}};
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
shared_memory->linear_heap_phys_offset = *offset;
// Increase the amount of used linear heap memory for the owner process.
if (shared_memory->owner_process != nullptr) {
shared_memory->owner_process->linear_heap_used += size;
}
// Refresh the address mappings for the current process.
if (Kernel::g_current_process != nullptr) {
Kernel::g_current_process->vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
shared_memory->owner_process->memory_used += size;
}
} else {
auto& vm_manager = shared_memory->owner_process->vm_manager;
// The memory is already available and mapped in the owner process.
auto vma = vm_manager.FindVMA(address);
ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address");
ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
// The returned VMA might be a bigger one encompassing the desired address.
auto vma_offset = address - vma->first;
ASSERT_MSG(vma_offset + size <= vma->second.size,
"Shared memory exceeds bounds of mapped block");
shared_memory->backing_block = vma->second.backing_block;
shared_memory->backing_block_offset = vma->second.offset + vma_offset;
auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size);
ASSERT_MSG(backing_blocks.Succeeded(), "Trying to share freed memory");
shared_memory->backing_blocks = std::move(backing_blocks).Unwrap();
}
shared_memory->base_address = address;
return shared_memory;
}
SharedPtr<SharedMemory> SharedMemory::CreateForApplet(std::shared_ptr<std::vector<u8>> heap_block,
u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory);
SharedPtr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
u32 offset, u32 size, MemoryPermission permissions, MemoryPermission other_permissions,
std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory(*this));
// Allocate memory in heap
MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::SYSTEM);
auto backing_blocks = memory_region->HeapAllocate(size);
ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!");
shared_memory->holding_memory = backing_blocks;
shared_memory->owner_process = nullptr;
shared_memory->name = std::move(name);
shared_memory->size = size;
shared_memory->permissions = permissions;
shared_memory->other_permissions = other_permissions;
shared_memory->backing_block = heap_block;
shared_memory->backing_block_offset = offset;
for (const auto& interval : backing_blocks) {
shared_memory->backing_blocks.push_back(
{Memory::fcram.data() + interval.lower(), interval.upper() - interval.lower()});
std::fill(Memory::fcram.data() + interval.lower(), Memory::fcram.data() + interval.upper(),
0);
}
shared_memory->base_address = Memory::HEAP_VADDR + offset;
return shared_memory;
@ -147,24 +142,32 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
if (base_address == 0 && target_address == 0) {
// Calculate the address at which to map the memory block.
auto maybe_vaddr = Memory::PhysicalToVirtualAddress(linear_heap_phys_address);
ASSERT(maybe_vaddr);
target_address = *maybe_vaddr;
// Note: even on new firmware versions, the target address is still in the old linear heap
// region. This exception is made to keep the shared font compatibility. See
// APT:GetSharedFont for detail.
target_address = linear_heap_phys_offset + Memory::LINEAR_HEAP_VADDR;
}
auto vma = target_process->vm_manager.FindVMA(target_address);
if (vma->second.type != VMAType::Free ||
vma->second.base + vma->second.size < target_address + size) {
LOG_ERROR(Kernel,
"cannot map id={}, address=0x{:08X} name={}, mapping to already allocated memory",
GetObjectId(), address, name);
return ERR_INVALID_ADDRESS_STATE;
}
// Map the memory block into the target process
auto result = target_process->vm_manager.MapMemoryBlock(
target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
if (result.Failed()) {
LOG_ERROR(
Kernel,
"cannot map id={}, target_address=0x{:08X} name={}, error mapping to virtual memory",
GetObjectId(), target_address, name);
return result.Code();
VAddr interval_target = target_address;
for (const auto& interval : backing_blocks) {
auto vma = target_process->vm_manager.MapBackingMemory(
interval_target, interval.first, interval.second, MemoryState::Shared);
ASSERT(vma.Succeeded());
target_process->vm_manager.Reprotect(vma.Unwrap(), ConvertPermissions(permissions));
interval_target += interval.second;
}
return target_process->vm_manager.ReprotectRange(target_address, size,
ConvertPermissions(permissions));
return RESULT_SUCCESS;
}
ResultCode SharedMemory::Unmap(Process* target_process, VAddr address) {
@ -180,7 +183,10 @@ VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) {
};
u8* SharedMemory::GetPointer(u32 offset) {
return backing_block->data() + backing_block_offset + offset;
if (backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return backing_blocks[0].first + offset;
}
} // namespace Kernel

View file

@ -12,55 +12,8 @@
namespace Kernel {
/// Permissions for mapped shared memory blocks
enum class MemoryPermission : u32 {
None = 0,
Read = (1u << 0),
Write = (1u << 1),
ReadWrite = (Read | Write),
Execute = (1u << 2),
ReadExecute = (Read | Execute),
WriteExecute = (Write | Execute),
ReadWriteExecute = (Read | Write | Execute),
DontCare = (1u << 28)
};
class SharedMemory final : public Object {
public:
/**
* Creates a shared memory object.
* @param owner_process Process that created this shared memory object.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param address The address from which to map the Shared Memory.
* @param region If the address is 0, the shared memory will be allocated in this region of the
* linear heap.
* @param name Optional object name, used for debugging purposes.
*/
static SharedPtr<SharedMemory> Create(SharedPtr<Process> owner_process, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address = 0,
MemoryRegion region = MemoryRegion::BASE,
std::string name = "Unknown");
/**
* Creates a shared memory object from a block of memory managed by an HLE applet.
* @param heap_block Heap block of the HLE applet.
* @param offset The offset into the heap block that the SharedMemory will map.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param name Optional object name, used for debugging purposes.
*/
static SharedPtr<SharedMemory> CreateForApplet(std::shared_ptr<std::vector<u8>> heap_block,
u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
std::string GetTypeName() const override {
return "SharedMemory";
}
@ -105,16 +58,14 @@ public:
u8* GetPointer(u32 offset = 0);
/// Process that created this shared memory block.
SharedPtr<Process> owner_process;
Process* owner_process;
/// Address of shared memory block in the owner process if specified.
VAddr base_address;
/// Physical address of the shared memory block in the linear heap if no address was specified
/// Offset in FCRAM of the shared memory block in the linear heap if no address was specified
/// during creation.
PAddr linear_heap_phys_address;
PAddr linear_heap_phys_offset;
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;
/// Offset into the backing block for this shared memory.
std::size_t backing_block_offset;
std::vector<std::pair<u8*, u32>> backing_blocks;
/// Size of the memory block. Page-aligned.
u32 size;
/// Permission restrictions applied to the process which created the block.
@ -124,9 +75,14 @@ public:
/// Name of shared memory object.
std::string name;
MemoryRegionInfo::IntervalSet holding_memory;
private:
SharedMemory();
explicit SharedMemory(KernelSystem& kernel);
~SharedMemory() override;
friend class KernelSystem;
KernelSystem& kernel;
};
} // namespace Kernel

View file

@ -4,9 +4,10 @@
#include <chrono>
#include <cstring>
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/service/ptm/ptm.h"
#include "core/hle/shared_page.h"
#include "core/movie.h"
#include "core/settings.h"
@ -53,9 +54,9 @@ Handler::Handler() {
init_time = GetInitTime();
using namespace std::placeholders;
update_time_event = CoreTiming::RegisterEvent(
update_time_event = Core::System::GetInstance().CoreTiming().RegisterEvent(
"SharedPage::UpdateTimeCallback", std::bind(&Handler::UpdateTimeCallback, this, _1, _2));
CoreTiming::ScheduleEvent(0, update_time_event);
Core::System::GetInstance().CoreTiming().ScheduleEvent(0, update_time_event);
float slidestate =
Settings::values.toggle_3d ? (float_le)Settings::values.factor_3d / 100 : 0.0f;
@ -65,8 +66,8 @@ Handler::Handler() {
/// Gets system time in 3DS format. The epoch is Jan 1900, and the unit is millisecond.
u64 Handler::GetSystemTime() const {
std::chrono::milliseconds now =
init_time +
std::chrono::duration_cast<std::chrono::milliseconds>(CoreTiming::GetGlobalTimeUs());
init_time + std::chrono::duration_cast<std::chrono::milliseconds>(
Core::System::GetInstance().CoreTiming().GetGlobalTimeUs());
// 3DS system does't allow user to set a time before Jan 1 2000,
// so we use it as an auxiliary epoch to calculate the console time.
@ -97,14 +98,15 @@ void Handler::UpdateTimeCallback(u64 userdata, int cycles_late) {
shared_page.date_time_counter % 2 ? shared_page.date_time_0 : shared_page.date_time_1;
date_time.date_time = GetSystemTime();
date_time.update_tick = CoreTiming::GetTicks();
date_time.update_tick = Core::System::GetInstance().CoreTiming().GetTicks();
date_time.tick_to_second_coefficient = BASE_CLOCK_RATE_ARM11;
date_time.tick_offset = 0;
++shared_page.date_time_counter;
// system time is updated hourly
CoreTiming::ScheduleEvent(msToCycles(60 * 60 * 1000) - cycles_late, update_time_event);
Core::System::GetInstance().CoreTiming().ScheduleEvent(msToCycles(60 * 60 * 1000) - cycles_late,
update_time_event);
}
void Handler::SetMacAddress(const MacAddress& addr) {

View file

@ -21,8 +21,8 @@
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace CoreTiming {
struct EventType;
namespace Core {
struct TimingEventType;
}
namespace SharedPage {
@ -96,7 +96,7 @@ public:
private:
u64 GetSystemTime() const;
void UpdateTimeCallback(u64 userdata, int cycles_late);
CoreTiming::EventType* update_time_event;
Core::TimingEventType* update_time_event;
std::chrono::seconds init_time;
SharedPageDef shared_page;

View file

@ -83,7 +83,7 @@ static ResultCode ControlMemory(u32* out_addr, u32 operation, u32 addr0, u32 add
}
VMAPermission vma_permissions = (VMAPermission)permissions;
auto& process = *g_current_process;
auto& process = *Core::System::GetInstance().Kernel().GetCurrentProcess();
switch (operation & MEMOP_OPERATION_MASK) {
case MEMOP_FREE: {
@ -114,16 +114,12 @@ static ResultCode ControlMemory(u32* out_addr, u32 operation, u32 addr0, u32 add
}
case MEMOP_MAP: {
// TODO: This is just a hack to avoid regressions until memory aliasing is implemented
CASCADE_RESULT(*out_addr, process.HeapAllocate(addr0, size, vma_permissions));
CASCADE_CODE(process.Map(addr0, addr1, size, vma_permissions));
break;
}
case MEMOP_UNMAP: {
// TODO: This is just a hack to avoid regressions until memory aliasing is implemented
ResultCode result = process.HeapFree(addr0, size);
if (result.IsError())
return result;
CASCADE_CODE(process.Unmap(addr0, addr1, size, vma_permissions));
break;
}
@ -145,19 +141,21 @@ static ResultCode ControlMemory(u32* out_addr, u32 operation, u32 addr0, u32 add
}
static void ExitProcess() {
LOG_INFO(Kernel_SVC, "Process {} exiting", g_current_process->process_id);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Process> current_process = kernel.GetCurrentProcess();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->process_id);
ASSERT_MSG(g_current_process->status == ProcessStatus::Running, "Process has already exited");
ASSERT_MSG(current_process->status == ProcessStatus::Running, "Process has already exited");
g_current_process->status = ProcessStatus::Exited;
current_process->status = ProcessStatus::Exited;
// Stop all the process threads that are currently waiting for objects.
auto& thread_list = GetThreadList();
auto& thread_list = kernel.GetThreadManager().GetThreadList();
for (auto& thread : thread_list) {
if (thread->owner_process != g_current_process)
if (thread->owner_process != current_process)
continue;
if (thread == GetCurrentThread())
if (thread == kernel.GetThreadManager().GetCurrentThread())
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@ -169,7 +167,7 @@ static void ExitProcess() {
}
// Kill the current thread
GetCurrentThread()->Stop();
kernel.GetThreadManager().GetCurrentThread()->Stop();
Core::System::GetInstance().PrepareReschedule();
}
@ -181,7 +179,9 @@ static ResultCode MapMemoryBlock(Handle handle, u32 addr, u32 permissions, u32 o
"otherpermission={}",
handle, addr, permissions, other_permissions);
SharedPtr<SharedMemory> shared_memory = g_handle_table.Get<SharedMemory>(handle);
SharedPtr<SharedMemory> shared_memory =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<SharedMemory>(
handle);
if (shared_memory == nullptr)
return ERR_INVALID_HANDLE;
@ -195,7 +195,8 @@ static ResultCode MapMemoryBlock(Handle handle, u32 addr, u32 permissions, u32 o
case MemoryPermission::WriteExecute:
case MemoryPermission::ReadWriteExecute:
case MemoryPermission::DontCare:
return shared_memory->Map(g_current_process.get(), addr, permissions_type,
return shared_memory->Map(Core::System::GetInstance().Kernel().GetCurrentProcess().get(),
addr, permissions_type,
static_cast<MemoryPermission>(other_permissions));
default:
LOG_ERROR(Kernel_SVC, "unknown permissions=0x{:08X}", permissions);
@ -209,11 +210,12 @@ static ResultCode UnmapMemoryBlock(Handle handle, u32 addr) {
// TODO(Subv): Return E0A01BF5 if the address is not in the application's heap
SharedPtr<SharedMemory> shared_memory = g_handle_table.Get<SharedMemory>(handle);
SharedPtr<Process> current_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
SharedPtr<SharedMemory> shared_memory = current_process->handle_table.Get<SharedMemory>(handle);
if (shared_memory == nullptr)
return ERR_INVALID_HANDLE;
return shared_memory->Unmap(g_current_process.get(), addr);
return shared_memory->Unmap(current_process.get(), addr);
}
/// Connect to an OS service given the port name, returns the handle to the port to out
@ -229,8 +231,10 @@ static ResultCode ConnectToPort(Handle* out_handle, VAddr port_name_address) {
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
auto it = Service::g_kernel_named_ports.find(port_name);
if (it == Service::g_kernel_named_ports.end()) {
KernelSystem& kernel = Core::System::GetInstance().Kernel();
auto it = kernel.named_ports.find(port_name);
if (it == kernel.named_ports.end()) {
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
return ERR_NOT_FOUND;
}
@ -241,13 +245,15 @@ static ResultCode ConnectToPort(Handle* out_handle, VAddr port_name_address) {
CASCADE_RESULT(client_session, client_port->Connect());
// Return the client session
CASCADE_RESULT(*out_handle, g_handle_table.Create(client_session));
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(client_session));
return RESULT_SUCCESS;
}
/// Makes a blocking IPC call to an OS service.
static ResultCode SendSyncRequest(Handle handle) {
SharedPtr<ClientSession> session = g_handle_table.Get<ClientSession>(handle);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<ClientSession> session =
kernel.GetCurrentProcess()->handle_table.Get<ClientSession>(handle);
if (session == nullptr) {
return ERR_INVALID_HANDLE;
}
@ -256,19 +262,20 @@ static ResultCode SendSyncRequest(Handle handle) {
Core::System::GetInstance().PrepareReschedule();
return session->SendSyncRequest(GetCurrentThread());
return session->SendSyncRequest(kernel.GetThreadManager().GetCurrentThread());
}
/// Close a handle
static ResultCode CloseHandle(Handle handle) {
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
return g_handle_table.Close(handle);
return Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Close(handle);
}
/// Wait for a handle to synchronize, timeout after the specified nanoseconds
static ResultCode WaitSynchronization1(Handle handle, s64 nano_seconds) {
auto object = g_handle_table.Get<WaitObject>(handle);
Thread* thread = GetCurrentThread();
KernelSystem& kernel = Core::System::GetInstance().Kernel();
auto object = kernel.GetCurrentProcess()->handle_table.Get<WaitObject>(handle);
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
if (object == nullptr)
return ERR_INVALID_HANDLE;
@ -320,7 +327,8 @@ static ResultCode WaitSynchronization1(Handle handle, s64 nano_seconds) {
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
static ResultCode WaitSynchronizationN(s32* out, VAddr handles_address, s32 handle_count,
bool wait_all, s64 nano_seconds) {
Thread* thread = GetCurrentThread();
KernelSystem& kernel = Core::System::GetInstance().Kernel();
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
if (!Memory::IsValidVirtualAddress(handles_address))
return ERR_INVALID_POINTER;
@ -338,7 +346,7 @@ static ResultCode WaitSynchronizationN(s32* out, VAddr handles_address, s32 hand
for (int i = 0; i < handle_count; ++i) {
Handle handle = Memory::Read32(handles_address + i * sizeof(Handle));
auto object = g_handle_table.Get<WaitObject>(handle);
auto object = kernel.GetCurrentProcess()->handle_table.Get<WaitObject>(handle);
if (object == nullptr)
return ERR_INVALID_HANDLE;
objects[i] = object;
@ -502,9 +510,12 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_
using ObjectPtr = SharedPtr<WaitObject>;
std::vector<ObjectPtr> objects(handle_count);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Process> current_process = kernel.GetCurrentProcess();
for (int i = 0; i < handle_count; ++i) {
Handle handle = Memory::Read32(handles_address + i * sizeof(Handle));
auto object = g_handle_table.Get<WaitObject>(handle);
auto object = current_process->handle_table.Get<WaitObject>(handle);
if (object == nullptr)
return ERR_INVALID_HANDLE;
objects[i] = object;
@ -512,10 +523,11 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_
// We are also sending a command reply.
// Do not send a reply if the command id in the command buffer is 0xFFFF.
u32* cmd_buff = GetCommandBuffer();
IPC::Header header{cmd_buff[0]};
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
u32 cmd_buff_header = Memory::Read32(thread->GetCommandBufferAddress());
IPC::Header header{cmd_buff_header};
if (reply_target != 0 && header.command_id != 0xFFFF) {
auto session = g_handle_table.Get<ServerSession>(reply_target);
auto session = current_process->handle_table.Get<ServerSession>(reply_target);
if (session == nullptr)
return ERR_INVALID_HANDLE;
@ -531,11 +543,11 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_
return ERR_SESSION_CLOSED_BY_REMOTE;
}
VAddr source_address = GetCurrentThread()->GetCommandBufferAddress();
VAddr source_address = thread->GetCommandBufferAddress();
VAddr target_address = request_thread->GetCommandBufferAddress();
ResultCode translation_result = TranslateCommandBuffer(
Kernel::GetCurrentThread(), request_thread, source_address, target_address, true);
ResultCode translation_result =
TranslateCommandBuffer(thread, request_thread, source_address, target_address, true);
// Note: The real kernel seems to always panic if the Server->Client buffer translation
// fails for whatever reason.
@ -555,8 +567,6 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_
return RESULT_SUCCESS;
}
auto thread = GetCurrentThread();
// Find the first object that is acquirable in the provided list of objects
auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) {
return !object->ShouldWait(thread);
@ -572,7 +582,7 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_
return RESULT_SUCCESS;
auto server_session = static_cast<ServerSession*>(object);
return ReceiveIPCRequest(server_session, GetCurrentThread());
return ReceiveIPCRequest(server_session, thread);
}
// No objects were ready to be acquired, prepare to suspend the thread.
@ -615,8 +625,10 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_
/// Create an address arbiter (to allocate access to shared resources)
static ResultCode CreateAddressArbiter(Handle* out_handle) {
SharedPtr<AddressArbiter> arbiter = AddressArbiter::Create();
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(arbiter)));
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<AddressArbiter> arbiter = kernel.CreateAddressArbiter();
CASCADE_RESULT(*out_handle,
kernel.GetCurrentProcess()->handle_table.Create(std::move(arbiter)));
LOG_TRACE(Kernel_SVC, "returned handle=0x{:08X}", *out_handle);
return RESULT_SUCCESS;
}
@ -627,12 +639,16 @@ static ResultCode ArbitrateAddress(Handle handle, u32 address, u32 type, u32 val
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}, address=0x{:08X}, type=0x{:08X}, value=0x{:08X}",
handle, address, type, value);
SharedPtr<AddressArbiter> arbiter = g_handle_table.Get<AddressArbiter>(handle);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<AddressArbiter> arbiter =
kernel.GetCurrentProcess()->handle_table.Get<AddressArbiter>(handle);
if (arbiter == nullptr)
return ERR_INVALID_HANDLE;
auto res = arbiter->ArbitrateAddress(GetCurrentThread(), static_cast<ArbitrationType>(type),
address, value, nanoseconds);
auto res =
arbiter->ArbitrateAddress(kernel.GetThreadManager().GetCurrentThread(),
static_cast<ArbitrationType>(type), address, value, nanoseconds);
// TODO(Subv): Identify in which specific cases this call should cause a reschedule.
Core::System::GetInstance().PrepareReschedule();
@ -675,11 +691,12 @@ static void OutputDebugString(VAddr address, int len) {
static ResultCode GetResourceLimit(Handle* resource_limit, Handle process_handle) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X}", process_handle);
SharedPtr<Process> process = g_handle_table.Get<Process>(process_handle);
SharedPtr<Process> current_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
SharedPtr<Process> process = current_process->handle_table.Get<Process>(process_handle);
if (process == nullptr)
return ERR_INVALID_HANDLE;
CASCADE_RESULT(*resource_limit, g_handle_table.Create(process->resource_limit));
CASCADE_RESULT(*resource_limit, current_process->handle_table.Create(process->resource_limit));
return RESULT_SUCCESS;
}
@ -691,7 +708,8 @@ static ResultCode GetResourceLimitCurrentValues(VAddr values, Handle resource_li
resource_limit_handle, names, name_count);
SharedPtr<ResourceLimit> resource_limit =
g_handle_table.Get<ResourceLimit>(resource_limit_handle);
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<ResourceLimit>(
resource_limit_handle);
if (resource_limit == nullptr)
return ERR_INVALID_HANDLE;
@ -711,7 +729,8 @@ static ResultCode GetResourceLimitLimitValues(VAddr values, Handle resource_limi
resource_limit_handle, names, name_count);
SharedPtr<ResourceLimit> resource_limit =
g_handle_table.Get<ResourceLimit>(resource_limit_handle);
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<ResourceLimit>(
resource_limit_handle);
if (resource_limit == nullptr)
return ERR_INVALID_HANDLE;
@ -733,14 +752,16 @@ static ResultCode CreateThread(Handle* out_handle, u32 priority, u32 entry_point
return ERR_OUT_OF_RANGE;
}
SharedPtr<ResourceLimit>& resource_limit = g_current_process->resource_limit;
SharedPtr<Process> current_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
SharedPtr<ResourceLimit>& resource_limit = current_process->resource_limit;
if (resource_limit->GetMaxResourceValue(ResourceTypes::PRIORITY) > priority) {
return ERR_NOT_AUTHORIZED;
}
if (processor_id == ThreadProcessorIdDefault) {
// Set the target CPU to the one specified in the process' exheader.
processor_id = g_current_process->ideal_processor;
processor_id = current_process->ideal_processor;
ASSERT(processor_id != ThreadProcessorIdDefault);
}
@ -761,14 +782,14 @@ static ResultCode CreateThread(Handle* out_handle, u32 priority, u32 entry_point
break;
}
CASCADE_RESULT(SharedPtr<Thread> thread,
Thread::Create(name, entry_point, priority, arg, processor_id, stack_top,
g_current_process));
CASCADE_RESULT(SharedPtr<Thread> thread, Core::System::GetInstance().Kernel().CreateThread(
name, entry_point, priority, arg, processor_id,
stack_top, *current_process));
thread->context->SetFpscr(FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO |
FPSCR_ROUND_TOZERO); // 0x03C00000
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(thread)));
CASCADE_RESULT(*out_handle, current_process->handle_table.Create(std::move(thread)));
Core::System::GetInstance().PrepareReschedule();
@ -784,13 +805,14 @@ static ResultCode CreateThread(Handle* out_handle, u32 priority, u32 entry_point
static void ExitThread() {
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CPU().GetPC());
ExitCurrentThread();
Core::System::GetInstance().Kernel().GetThreadManager().ExitCurrentThread();
Core::System::GetInstance().PrepareReschedule();
}
/// Gets the priority for the specified thread
static ResultCode GetThreadPriority(u32* priority, Handle handle) {
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(handle);
const SharedPtr<Thread> thread =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Thread>(handle);
if (thread == nullptr)
return ERR_INVALID_HANDLE;
@ -804,13 +826,15 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
return ERR_OUT_OF_RANGE;
}
SharedPtr<Thread> thread = g_handle_table.Get<Thread>(handle);
SharedPtr<Thread> thread =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Thread>(handle);
if (thread == nullptr)
return ERR_INVALID_HANDLE;
// Note: The kernel uses the current process's resource limit instead of
// the one from the thread owner's resource limit.
SharedPtr<ResourceLimit>& resource_limit = g_current_process->resource_limit;
SharedPtr<ResourceLimit>& resource_limit =
Core::System::GetInstance().Kernel().GetCurrentProcess()->resource_limit;
if (resource_limit->GetMaxResourceValue(ResourceTypes::PRIORITY) > priority) {
return ERR_NOT_AUTHORIZED;
}
@ -828,9 +852,10 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
/// Create a mutex
static ResultCode CreateMutex(Handle* out_handle, u32 initial_locked) {
SharedPtr<Mutex> mutex = Mutex::Create(initial_locked != 0);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Mutex> mutex = kernel.CreateMutex(initial_locked != 0);
mutex->name = fmt::format("mutex-{:08x}", Core::CPU().GetReg(14));
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(mutex)));
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(mutex)));
LOG_TRACE(Kernel_SVC, "called initial_locked={} : created handle=0x{:08X}",
initial_locked ? "true" : "false", *out_handle);
@ -842,18 +867,22 @@ static ResultCode CreateMutex(Handle* out_handle, u32 initial_locked) {
static ResultCode ReleaseMutex(Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}", handle);
SharedPtr<Mutex> mutex = g_handle_table.Get<Mutex>(handle);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Mutex> mutex = kernel.GetCurrentProcess()->handle_table.Get<Mutex>(handle);
if (mutex == nullptr)
return ERR_INVALID_HANDLE;
return mutex->Release(GetCurrentThread());
return mutex->Release(kernel.GetThreadManager().GetCurrentThread());
}
/// Get the ID of the specified process
static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X}", process_handle);
const SharedPtr<Process> process = g_handle_table.Get<Process>(process_handle);
const SharedPtr<Process> process =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Process>(
process_handle);
if (process == nullptr)
return ERR_INVALID_HANDLE;
@ -865,7 +894,9 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
static ResultCode GetProcessIdOfThread(u32* process_id, Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
const SharedPtr<Thread> thread =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Thread>(
thread_handle);
if (thread == nullptr)
return ERR_INVALID_HANDLE;
@ -881,7 +912,8 @@ static ResultCode GetProcessIdOfThread(u32* process_id, Handle thread_handle) {
static ResultCode GetThreadId(u32* thread_id, Handle handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", handle);
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(handle);
const SharedPtr<Thread> thread =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Thread>(handle);
if (thread == nullptr)
return ERR_INVALID_HANDLE;
@ -891,9 +923,12 @@ static ResultCode GetThreadId(u32* thread_id, Handle handle) {
/// Creates a semaphore
static ResultCode CreateSemaphore(Handle* out_handle, s32 initial_count, s32 max_count) {
CASCADE_RESULT(SharedPtr<Semaphore> semaphore, Semaphore::Create(initial_count, max_count));
KernelSystem& kernel = Core::System::GetInstance().Kernel();
CASCADE_RESULT(SharedPtr<Semaphore> semaphore,
kernel.CreateSemaphore(initial_count, max_count));
semaphore->name = fmt::format("semaphore-{:08x}", Core::CPU().GetReg(14));
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(semaphore)));
CASCADE_RESULT(*out_handle,
kernel.GetCurrentProcess()->handle_table.Create(std::move(semaphore)));
LOG_TRACE(Kernel_SVC, "called initial_count={}, max_count={}, created handle=0x{:08X}",
initial_count, max_count, *out_handle);
@ -904,7 +939,9 @@ static ResultCode CreateSemaphore(Handle* out_handle, s32 initial_count, s32 max
static ResultCode ReleaseSemaphore(s32* count, Handle handle, s32 release_count) {
LOG_TRACE(Kernel_SVC, "called release_count={}, handle=0x{:08X}", release_count, handle);
SharedPtr<Semaphore> semaphore = g_handle_table.Get<Semaphore>(handle);
SharedPtr<Semaphore> semaphore =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Semaphore>(
handle);
if (semaphore == nullptr)
return ERR_INVALID_HANDLE;
@ -916,13 +953,15 @@ static ResultCode ReleaseSemaphore(s32* count, Handle handle, s32 release_count)
/// Query process memory
static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* page_info,
Handle process_handle, u32 addr) {
SharedPtr<Process> process = g_handle_table.Get<Process>(process_handle);
SharedPtr<Process> process =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Process>(
process_handle);
if (process == nullptr)
return ERR_INVALID_HANDLE;
auto vma = process->vm_manager.FindVMA(addr);
if (vma == g_current_process->vm_manager.vma_map.end())
if (vma == process->vm_manager.vma_map.end())
return ERR_INVALID_ADDRESS;
memory_info->base_address = vma->second.base;
@ -942,9 +981,10 @@ static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, u32
/// Create an event
static ResultCode CreateEvent(Handle* out_handle, u32 reset_type) {
SharedPtr<Event> evt = Event::Create(static_cast<ResetType>(reset_type),
fmt::format("event-{:08x}", Core::CPU().GetReg(14)));
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(evt)));
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Event> evt = kernel.CreateEvent(static_cast<ResetType>(reset_type),
fmt::format("event-{:08x}", Core::CPU().GetReg(14)));
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(evt)));
LOG_TRACE(Kernel_SVC, "called reset_type=0x{:08X} : created handle=0x{:08X}", reset_type,
*out_handle);
@ -953,7 +993,9 @@ static ResultCode CreateEvent(Handle* out_handle, u32 reset_type) {
/// Duplicates a kernel handle
static ResultCode DuplicateHandle(Handle* out, Handle handle) {
CASCADE_RESULT(*out, g_handle_table.Duplicate(handle));
CASCADE_RESULT(
*out,
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Duplicate(handle));
LOG_TRACE(Kernel_SVC, "duplicated 0x{:08X} to 0x{:08X}", handle, *out);
return RESULT_SUCCESS;
}
@ -962,7 +1004,8 @@ static ResultCode DuplicateHandle(Handle* out, Handle handle) {
static ResultCode SignalEvent(Handle handle) {
LOG_TRACE(Kernel_SVC, "called event=0x{:08X}", handle);
SharedPtr<Event> evt = g_handle_table.Get<Event>(handle);
SharedPtr<Event> evt =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Event>(handle);
if (evt == nullptr)
return ERR_INVALID_HANDLE;
@ -975,7 +1018,8 @@ static ResultCode SignalEvent(Handle handle) {
static ResultCode ClearEvent(Handle handle) {
LOG_TRACE(Kernel_SVC, "called event=0x{:08X}", handle);
SharedPtr<Event> evt = g_handle_table.Get<Event>(handle);
SharedPtr<Event> evt =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Event>(handle);
if (evt == nullptr)
return ERR_INVALID_HANDLE;
@ -985,9 +1029,10 @@ static ResultCode ClearEvent(Handle handle) {
/// Creates a timer
static ResultCode CreateTimer(Handle* out_handle, u32 reset_type) {
SharedPtr<Timer> timer = Timer::Create(static_cast<ResetType>(reset_type),
fmt ::format("timer-{:08x}", Core::CPU().GetReg(14)));
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(timer)));
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Timer> timer = kernel.CreateTimer(
static_cast<ResetType>(reset_type), fmt ::format("timer-{:08x}", Core::CPU().GetReg(14)));
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(timer)));
LOG_TRACE(Kernel_SVC, "called reset_type=0x{:08X} : created handle=0x{:08X}", reset_type,
*out_handle);
@ -998,7 +1043,8 @@ static ResultCode CreateTimer(Handle* out_handle, u32 reset_type) {
static ResultCode ClearTimer(Handle handle) {
LOG_TRACE(Kernel_SVC, "called timer=0x{:08X}", handle);
SharedPtr<Timer> timer = g_handle_table.Get<Timer>(handle);
SharedPtr<Timer> timer =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Timer>(handle);
if (timer == nullptr)
return ERR_INVALID_HANDLE;
@ -1014,7 +1060,8 @@ static ResultCode SetTimer(Handle handle, s64 initial, s64 interval) {
return ERR_OUT_OF_RANGE_KERNEL;
}
SharedPtr<Timer> timer = g_handle_table.Get<Timer>(handle);
SharedPtr<Timer> timer =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Timer>(handle);
if (timer == nullptr)
return ERR_INVALID_HANDLE;
@ -1027,7 +1074,8 @@ static ResultCode SetTimer(Handle handle, s64 initial, s64 interval) {
static ResultCode CancelTimer(Handle handle) {
LOG_TRACE(Kernel_SVC, "called timer=0x{:08X}", handle);
SharedPtr<Timer> timer = g_handle_table.Get<Timer>(handle);
SharedPtr<Timer> timer =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Timer>(handle);
if (timer == nullptr)
return ERR_INVALID_HANDLE;
@ -1040,25 +1088,29 @@ static ResultCode CancelTimer(Handle handle) {
static void SleepThread(s64 nanoseconds) {
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
ThreadManager& thread_manager = kernel.GetThreadManager();
// Don't attempt to yield execution if there are no available threads to run,
// this way we avoid a useless reschedule to the idle thread.
if (nanoseconds == 0 && !HaveReadyThreads())
if (nanoseconds == 0 && !thread_manager.HaveReadyThreads())
return;
// Sleep current thread and check for next thread to schedule
WaitCurrentThread_Sleep();
thread_manager.WaitCurrentThread_Sleep();
// Create an event to wake the thread up after the specified nanosecond delay has passed
GetCurrentThread()->WakeAfterDelay(nanoseconds);
thread_manager.GetCurrentThread()->WakeAfterDelay(nanoseconds);
Core::System::GetInstance().PrepareReschedule();
}
/// This returns the total CPU ticks elapsed since the CPU was powered-on
static s64 GetSystemTick() {
s64 result = CoreTiming::GetTicks();
s64 result = Core::System::GetInstance().CoreTiming().GetTicks();
// Advance time to defeat dumb games (like Cubic Ninja) that busy-wait for the frame to end.
CoreTiming::AddTicks(150); // Measured time between two calls on a 9.2 o3DS with Ninjhax 1.1b
// Measured time between two calls on a 9.2 o3DS with Ninjhax 1.1b
Core::System::GetInstance().CoreTiming().AddTicks(150);
return result;
}
@ -1096,18 +1148,20 @@ static ResultCode CreateMemoryBlock(Handle* out_handle, u32 addr, u32 size, u32
return ERR_INVALID_ADDRESS;
}
SharedPtr<Process> current_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
// When trying to create a memory block with address = 0,
// if the process has the Shared Device Memory flag in the exheader,
// then we have to allocate from the same region as the caller process instead of the BASE
// region.
MemoryRegion region = MemoryRegion::BASE;
if (addr == 0 && g_current_process->flags.shared_device_mem)
region = g_current_process->flags.memory_region;
if (addr == 0 && current_process->flags.shared_device_mem)
region = current_process->flags.memory_region;
shared_memory =
SharedMemory::Create(g_current_process, size, static_cast<MemoryPermission>(my_permission),
static_cast<MemoryPermission>(other_permission), addr, region);
CASCADE_RESULT(*out_handle, g_handle_table.Create(std::move(shared_memory)));
shared_memory = Core::System::GetInstance().Kernel().CreateSharedMemory(
current_process.get(), size, static_cast<MemoryPermission>(my_permission),
static_cast<MemoryPermission>(other_permission), addr, region);
CASCADE_RESULT(*out_handle, current_process->handle_table.Create(std::move(shared_memory)));
LOG_WARNING(Kernel_SVC, "called addr=0x{:08X}", addr);
return RESULT_SUCCESS;
@ -1118,70 +1172,82 @@ static ResultCode CreatePort(Handle* server_port, Handle* client_port, VAddr nam
// TODO(Subv): Implement named ports.
ASSERT_MSG(name_address == 0, "Named ports are currently unimplemented");
auto ports = ServerPort::CreatePortPair(max_sessions);
CASCADE_RESULT(*client_port,
g_handle_table.Create(std::move(std::get<SharedPtr<ClientPort>>(ports))));
KernelSystem& kernel = Core::System::GetInstance().Kernel();
SharedPtr<Process> current_process = kernel.GetCurrentProcess();
auto ports = kernel.CreatePortPair(max_sessions);
CASCADE_RESULT(*client_port, current_process->handle_table.Create(
std::move(std::get<SharedPtr<ClientPort>>(ports))));
// Note: The 3DS kernel also leaks the client port handle if the server port handle fails to be
// created.
CASCADE_RESULT(*server_port,
g_handle_table.Create(std::move(std::get<SharedPtr<ServerPort>>(ports))));
CASCADE_RESULT(*server_port, current_process->handle_table.Create(
std::move(std::get<SharedPtr<ServerPort>>(ports))));
LOG_TRACE(Kernel_SVC, "called max_sessions={}", max_sessions);
return RESULT_SUCCESS;
}
static ResultCode CreateSessionToPort(Handle* out_client_session, Handle client_port_handle) {
SharedPtr<ClientPort> client_port = g_handle_table.Get<ClientPort>(client_port_handle);
SharedPtr<Process> current_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
SharedPtr<ClientPort> client_port =
current_process->handle_table.Get<ClientPort>(client_port_handle);
if (client_port == nullptr)
return ERR_INVALID_HANDLE;
CASCADE_RESULT(auto session, client_port->Connect());
CASCADE_RESULT(*out_client_session, g_handle_table.Create(std::move(session)));
CASCADE_RESULT(*out_client_session, current_process->handle_table.Create(std::move(session)));
return RESULT_SUCCESS;
}
static ResultCode CreateSession(Handle* server_session, Handle* client_session) {
auto sessions = ServerSession::CreateSessionPair();
KernelSystem& kernel = Core::System::GetInstance().Kernel();
auto sessions = kernel.CreateSessionPair();
SharedPtr<Process> current_process = kernel.GetCurrentProcess();
auto& server = std::get<SharedPtr<ServerSession>>(sessions);
CASCADE_RESULT(*server_session, g_handle_table.Create(std::move(server)));
CASCADE_RESULT(*server_session, current_process->handle_table.Create(std::move(server)));
auto& client = std::get<SharedPtr<ClientSession>>(sessions);
CASCADE_RESULT(*client_session, g_handle_table.Create(std::move(client)));
CASCADE_RESULT(*client_session, current_process->handle_table.Create(std::move(client)));
LOG_TRACE(Kernel_SVC, "called");
return RESULT_SUCCESS;
}
static ResultCode AcceptSession(Handle* out_server_session, Handle server_port_handle) {
SharedPtr<ServerPort> server_port = g_handle_table.Get<ServerPort>(server_port_handle);
SharedPtr<Process> current_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
SharedPtr<ServerPort> server_port =
current_process->handle_table.Get<ServerPort>(server_port_handle);
if (server_port == nullptr)
return ERR_INVALID_HANDLE;
CASCADE_RESULT(auto session, server_port->Accept());
CASCADE_RESULT(*out_server_session, g_handle_table.Create(std::move(session)));
CASCADE_RESULT(*out_server_session, current_process->handle_table.Create(std::move(session)));
return RESULT_SUCCESS;
}
static ResultCode GetSystemInfo(s64* out, u32 type, s32 param) {
LOG_TRACE(Kernel_SVC, "called type={} param={}", type, param);
KernelSystem& kernel = Core::System::GetInstance().Kernel();
switch ((SystemInfoType)type) {
case SystemInfoType::REGION_MEMORY_USAGE:
switch ((SystemInfoMemUsageRegion)param) {
case SystemInfoMemUsageRegion::ALL:
*out = GetMemoryRegion(MemoryRegion::APPLICATION)->used +
GetMemoryRegion(MemoryRegion::SYSTEM)->used +
GetMemoryRegion(MemoryRegion::BASE)->used;
*out = kernel.GetMemoryRegion(MemoryRegion::APPLICATION)->used +
kernel.GetMemoryRegion(MemoryRegion::SYSTEM)->used +
kernel.GetMemoryRegion(MemoryRegion::BASE)->used;
break;
case SystemInfoMemUsageRegion::APPLICATION:
*out = GetMemoryRegion(MemoryRegion::APPLICATION)->used;
*out = kernel.GetMemoryRegion(MemoryRegion::APPLICATION)->used;
break;
case SystemInfoMemUsageRegion::SYSTEM:
*out = GetMemoryRegion(MemoryRegion::SYSTEM)->used;
*out = kernel.GetMemoryRegion(MemoryRegion::SYSTEM)->used;
break;
case SystemInfoMemUsageRegion::BASE:
*out = GetMemoryRegion(MemoryRegion::BASE)->used;
*out = kernel.GetMemoryRegion(MemoryRegion::BASE)->used;
break;
default:
LOG_ERROR(Kernel_SVC, "unknown GetSystemInfo type=0 region: param={}", param);
@ -1209,7 +1275,9 @@ static ResultCode GetSystemInfo(s64* out, u32 type, s32 param) {
static ResultCode GetProcessInfo(s64* out, Handle process_handle, u32 type) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} type={}", process_handle, type);
SharedPtr<Process> process = g_handle_table.Get<Process>(process_handle);
SharedPtr<Process> process =
Core::System::GetInstance().Kernel().GetCurrentProcess()->handle_table.Get<Process>(
process_handle);
if (process == nullptr)
return ERR_INVALID_HANDLE;
@ -1218,7 +1286,7 @@ static ResultCode GetProcessInfo(s64* out, Handle process_handle, u32 type) {
case 2:
// TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure
// what's the difference between them.
*out = process->heap_used + process->linear_heap_used + process->misc_memory_used;
*out = process->memory_used;
if (*out % Memory::PAGE_SIZE != 0) {
LOG_ERROR(Kernel_SVC, "called, memory size not page-aligned");
return ERR_MISALIGNED_SIZE;
@ -1406,8 +1474,9 @@ void CallSVC(u32 immediate) {
// Lock the global kernel mutex when we enter the kernel HLE.
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
ASSERT_MSG(g_current_process->status == ProcessStatus::Running,
"Running threads from exiting processes is unimplemented");
DEBUG_ASSERT_MSG(Core::System::GetInstance().Kernel().GetCurrentProcess()->status ==
ProcessStatus::Running,
"Running threads from exiting processes is unimplemented");
const FunctionDef* info = GetSVCInfo(immediate);
if (info) {

View file

@ -4,16 +4,15 @@
#include <algorithm>
#include <list>
#include <unordered_map>
#include <vector>
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/math_util.h"
#include "common/thread_queue_list.h"
#include "core/arm/arm_interface.h"
#include "core/arm/skyeye_common/armstate.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
@ -26,9 +25,6 @@
namespace Kernel {
/// Event type for the thread wake up event
static CoreTiming::EventType* ThreadWakeupEventType = nullptr;
bool Thread::ShouldWait(Thread* thread) const {
return status != ThreadStatus::Dead;
}
@ -37,46 +33,29 @@ void Thread::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, allowing
// us to simply use a pool index or similar.
static Kernel::HandleTable wakeup_callback_handle_table;
// Lists all thread ids that aren't deleted/etc.
static std::vector<SharedPtr<Thread>> thread_list;
// Lists only ready thread ids.
static Common::ThreadQueueList<Thread*, ThreadPrioLowest + 1> ready_queue;
static SharedPtr<Thread> current_thread;
// The first available thread id at startup
static u32 next_thread_id;
/**
* Creates a new thread ID
* @return The new thread ID
*/
inline static u32 const NewThreadId() {
u32 ThreadManager::NewThreadId() {
return next_thread_id++;
}
Thread::Thread() : context(Core::CPU().NewContext()) {}
Thread::Thread(KernelSystem& kernel)
: WaitObject(kernel), context(Core::CPU().NewContext()),
thread_manager(kernel.GetThreadManager()) {}
Thread::~Thread() {}
Thread* GetCurrentThread() {
Thread* ThreadManager::GetCurrentThread() const {
return current_thread.get();
}
void Thread::Stop() {
// Cancel any outstanding wakeup events for this thread
CoreTiming::UnscheduleEvent(ThreadWakeupEventType, callback_handle);
wakeup_callback_handle_table.Close(callback_handle);
callback_handle = 0;
Core::System::GetInstance().CoreTiming().UnscheduleEvent(thread_manager.ThreadWakeupEventType,
thread_id);
thread_manager.wakeup_callback_table.erase(thread_id);
// Clean up thread from ready queue
// This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
if (status == ThreadStatus::Ready) {
ready_queue.remove(current_priority, this);
thread_manager.ready_queue.remove(current_priority, this);
}
status = ThreadStatus::Dead;
@ -96,19 +75,17 @@ void Thread::Stop() {
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
u32 tls_slot =
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot);
owner_process->tls_slots[tls_page].reset(tls_slot);
}
/**
* Switches the CPU's active thread context to that of the specified thread
* @param new_thread The thread to switch to
*/
static void SwitchContext(Thread* new_thread) {
void ThreadManager::SwitchContext(Thread* new_thread) {
Thread* previous_thread = GetCurrentThread();
Core::Timing& timing = Core::System::GetInstance().CoreTiming();
// Save context for previous thread
if (previous_thread) {
previous_thread->last_running_ticks = CoreTiming::GetTicks();
previous_thread->last_running_ticks = timing.GetTicks();
Core::CPU().SaveContext(previous_thread->context);
if (previous_thread->status == ThreadStatus::Running) {
@ -125,9 +102,9 @@ static void SwitchContext(Thread* new_thread) {
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle);
timing.UnscheduleEvent(ThreadWakeupEventType, new_thread->thread_id);
auto previous_process = Kernel::g_current_process;
auto previous_process = Core::System::GetInstance().Kernel().GetCurrentProcess();
current_thread = new_thread;
@ -135,8 +112,8 @@ static void SwitchContext(Thread* new_thread) {
new_thread->status = ThreadStatus::Running;
if (previous_process != current_thread->owner_process) {
Kernel::g_current_process = current_thread->owner_process;
SetCurrentPageTable(&Kernel::g_current_process->vm_manager.page_table);
Core::System::GetInstance().Kernel().SetCurrentProcess(current_thread->owner_process);
SetCurrentPageTable(&current_thread->owner_process->vm_manager.page_table);
}
Core::CPU().LoadContext(new_thread->context);
@ -148,11 +125,7 @@ static void SwitchContext(Thread* new_thread) {
}
}
/**
* Pops and returns the next thread from the thread queue
* @return A pointer to the next ready thread
*/
static Thread* PopNextReadyThread() {
Thread* ThreadManager::PopNextReadyThread() {
Thread* next;
Thread* thread = GetCurrentThread();
@ -171,27 +144,22 @@ static Thread* PopNextReadyThread() {
return next;
}
void WaitCurrentThread_Sleep() {
void ThreadManager::WaitCurrentThread_Sleep() {
Thread* thread = GetCurrentThread();
thread->status = ThreadStatus::WaitSleep;
}
void ExitCurrentThread() {
void ThreadManager::ExitCurrentThread() {
Thread* thread = GetCurrentThread();
thread->Stop();
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
}
/**
* Callback that will wake up the thread it was scheduled for
* @param thread_handle The handle of the thread that's been awoken
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
static void ThreadWakeupCallback(u64 thread_handle, s64 cycles_late) {
SharedPtr<Thread> thread = wakeup_callback_handle_table.Get<Thread>((Handle)thread_handle);
void ThreadManager::ThreadWakeupCallback(u64 thread_id, s64 cycles_late) {
SharedPtr<Thread> thread = wakeup_callback_table.at(thread_id);
if (thread == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", (Handle)thread_handle);
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", thread_id);
return;
}
@ -217,7 +185,8 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
if (nanoseconds == -1)
return;
CoreTiming::ScheduleEvent(nsToCycles(nanoseconds), ThreadWakeupEventType, callback_handle);
Core::System::GetInstance().CoreTiming().ScheduleEvent(
nsToCycles(nanoseconds), thread_manager.ThreadWakeupEventType, thread_id);
}
void Thread::ResumeFromWait() {
@ -253,15 +222,12 @@ void Thread::ResumeFromWait() {
wakeup_callback = nullptr;
ready_queue.push_back(current_priority, this);
thread_manager.ready_queue.push_back(current_priority, this);
status = ThreadStatus::Ready;
Core::System::GetInstance().PrepareReschedule();
}
/**
* Prints the thread queue for debugging purposes
*/
static void DebugThreadQueue() {
void ThreadManager::DebugThreadQueue() {
Thread* thread = GetCurrentThread();
if (!thread) {
LOG_DEBUG(Kernel, "Current: NO CURRENT THREAD");
@ -320,9 +286,9 @@ static void ResetThreadContext(const std::unique_ptr<ARM_Interface::ThreadContex
context->SetCpsr(USER32MODE | ((entry_point & 1) << 5)); // Usermode and THUMB mode
}
ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, u32 priority,
u32 arg, s32 processor_id, VAddr stack_top,
SharedPtr<Process> owner_process) {
ResultVal<SharedPtr<Thread>> KernelSystem::CreateThread(std::string name, VAddr entry_point,
u32 priority, u32 arg, s32 processor_id,
VAddr stack_top, Process& owner_process) {
// Check if priority is in ranged. Lowest priority -> highest priority id.
if (priority > ThreadPrioLowest) {
LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
@ -336,33 +302,33 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
// TODO(yuriks): Other checks, returning 0xD9001BEA
if (!Memory::IsValidVirtualAddress(*owner_process, entry_point)) {
if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) {
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:08x}", name, entry_point);
// TODO: Verify error
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
SharedPtr<Thread> thread(new Thread);
SharedPtr<Thread> thread(new Thread(*this));
thread_list.push_back(thread);
ready_queue.prepare(priority);
thread_manager->thread_list.push_back(thread);
thread_manager->ready_queue.prepare(priority);
thread->thread_id = NewThreadId();
thread->thread_id = thread_manager->NewThreadId();
thread->status = ThreadStatus::Dormant;
thread->entry_point = entry_point;
thread->stack_top = stack_top;
thread->nominal_priority = thread->current_priority = priority;
thread->last_running_ticks = CoreTiming::GetTicks();
thread->last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
thread->processor_id = processor_id;
thread->wait_objects.clear();
thread->wait_address = 0;
thread->name = std::move(name);
thread->callback_handle = wakeup_callback_handle_table.Create(thread).Unwrap();
thread->owner_process = owner_process;
thread_manager->wakeup_callback_table[thread->thread_id] = thread.get();
thread->owner_process = &owner_process;
// Find the next available TLS index, and mark it as used
auto& tls_slots = owner_process->tls_slots;
auto& tls_slots = owner_process.tls_slots;
auto [available_page, available_slot, needs_allocation] = GetFreeThreadLocalSlot(tls_slots);
@ -370,32 +336,26 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
// There are no already-allocated pages with free slots, lets allocate a new one.
// TLS pages are allocated from the BASE region in the linear heap.
MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::BASE);
auto& linheap_memory = memory_region->linear_heap_memory;
if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) {
// Allocate some memory from the end of the linear heap for this region.
auto offset = memory_region->LinearAllocate(Memory::PAGE_SIZE);
if (!offset) {
LOG_ERROR(Kernel_SVC,
"Not enough space in region to allocate a new TLS page for thread");
return ERR_OUT_OF_MEMORY;
}
std::size_t offset = linheap_memory->size();
// Allocate some memory from the end of the linear heap for this region.
linheap_memory->insert(linheap_memory->end(), Memory::PAGE_SIZE, 0);
memory_region->used += Memory::PAGE_SIZE;
owner_process->linear_heap_used += Memory::PAGE_SIZE;
owner_process.memory_used += Memory::PAGE_SIZE;
tls_slots.emplace_back(0); // The page is completely available at the start
available_page = tls_slots.size() - 1;
available_slot = 0; // Use the first slot in the new page
auto& vm_manager = owner_process->vm_manager;
vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
auto& vm_manager = owner_process.vm_manager;
// Map the page to the current process' address space.
// TODO(Subv): Find the correct MemoryState for this region.
vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
linheap_memory, offset, Memory::PAGE_SIZE, MemoryState::Private);
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
Memory::fcram.data() + *offset, Memory::PAGE_SIZE,
MemoryState::Locked);
}
// Mark the slot as used
@ -403,11 +363,13 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE +
available_slot * Memory::TLS_ENTRY_SIZE;
Memory::ZeroBlock(owner_process, thread->tls_address, Memory::TLS_ENTRY_SIZE);
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
// to initialize the context
ResetThreadContext(thread->context, stack_top, entry_point, arg);
ready_queue.push_back(thread->current_priority, thread.get());
thread_manager->ready_queue.push_back(thread->current_priority, thread.get());
thread->status = ThreadStatus::Ready;
return MakeResult<SharedPtr<Thread>>(std::move(thread));
@ -418,9 +380,9 @@ void Thread::SetPriority(u32 priority) {
"Invalid priority value.");
// If thread was ready, adjust queues
if (status == ThreadStatus::Ready)
ready_queue.move(this, current_priority, priority);
thread_manager.ready_queue.move(this, current_priority, priority);
else
ready_queue.prepare(priority);
thread_manager.ready_queue.prepare(priority);
nominal_priority = current_priority = priority;
}
@ -437,17 +399,18 @@ void Thread::UpdatePriority() {
void Thread::BoostPriority(u32 priority) {
// If thread was ready, adjust queues
if (status == ThreadStatus::Ready)
ready_queue.move(this, current_priority, priority);
thread_manager.ready_queue.move(this, current_priority, priority);
else
ready_queue.prepare(priority);
thread_manager.ready_queue.prepare(priority);
current_priority = priority;
}
SharedPtr<Thread> SetupMainThread(u32 entry_point, u32 priority, SharedPtr<Process> owner_process) {
SharedPtr<Thread> SetupMainThread(KernelSystem& kernel, u32 entry_point, u32 priority,
SharedPtr<Process> owner_process) {
// Initialize new "main" thread
auto thread_res =
Thread::Create("main", entry_point, priority, 0, owner_process->ideal_processor,
Memory::HEAP_VADDR_END, owner_process);
kernel.CreateThread("main", entry_point, priority, 0, owner_process->ideal_processor,
Memory::HEAP_VADDR_END, *owner_process);
SharedPtr<Thread> thread = std::move(thread_res).Unwrap();
@ -458,11 +421,11 @@ SharedPtr<Thread> SetupMainThread(u32 entry_point, u32 priority, SharedPtr<Proce
return thread;
}
bool HaveReadyThreads() {
bool ThreadManager::HaveReadyThreads() {
return ready_queue.get_first() != nullptr;
}
void Reschedule() {
void ThreadManager::Reschedule() {
Thread* cur = GetCurrentThread();
Thread* next = PopNextReadyThread();
@ -497,27 +460,19 @@ VAddr Thread::GetCommandBufferAddress() const {
return GetTLSAddress() + CommandHeaderOffset;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void ThreadingInit() {
ThreadWakeupEventType = CoreTiming::RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
current_thread = nullptr;
next_thread_id = 1;
ThreadManager::ThreadManager() {
ThreadWakeupEventType = Core::System::GetInstance().CoreTiming().RegisterEvent(
"ThreadWakeupCallback",
[this](u64 thread_id, s64 cycle_late) { ThreadWakeupCallback(thread_id, cycle_late); });
}
void ThreadingShutdown() {
current_thread = nullptr;
ThreadManager::~ThreadManager() {
for (auto& t : thread_list) {
t->Stop();
}
thread_list.clear();
ready_queue.clear();
ClearProcessList();
}
const std::vector<SharedPtr<Thread>>& GetThreadList() {
const std::vector<SharedPtr<Thread>>& ThreadManager::GetThreadList() {
return thread_list;
}

View file

@ -10,7 +10,9 @@
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
#include "common/common_types.h"
#include "common/thread_queue_list.h"
#include "core/arm/arm_interface.h"
#include "core/core_timing.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
@ -53,23 +55,89 @@ enum class ThreadWakeupReason {
Timeout // The thread was woken up due to a wait timeout.
};
class ThreadManager {
public:
ThreadManager();
~ThreadManager();
/**
* Creates a new thread ID
* @return The new thread ID
*/
u32 NewThreadId();
/**
* Gets the current thread
*/
Thread* GetCurrentThread() const;
/**
* Reschedules to the next available thread (call after current thread is suspended)
*/
void Reschedule();
/**
* Prints the thread queue for debugging purposes
*/
void DebugThreadQueue();
/**
* Returns whether there are any threads that are ready to run.
*/
bool HaveReadyThreads();
/**
* Waits the current thread on a sleep
*/
void WaitCurrentThread_Sleep();
/**
* Stops the current thread and removes it from the thread_list
*/
void ExitCurrentThread();
/**
* Get a const reference to the thread list for debug use
*/
const std::vector<SharedPtr<Thread>>& GetThreadList();
private:
/**
* Switches the CPU's active thread context to that of the specified thread
* @param new_thread The thread to switch to
*/
void SwitchContext(Thread* new_thread);
/**
* Pops and returns the next thread from the thread queue
* @return A pointer to the next ready thread
*/
Thread* PopNextReadyThread();
/**
* Callback that will wake up the thread it was scheduled for
* @param thread_id The ID of the thread that's been awoken
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
void ThreadWakeupCallback(u64 thread_id, s64 cycles_late);
u32 next_thread_id = 1;
SharedPtr<Thread> current_thread;
Common::ThreadQueueList<Thread*, ThreadPrioLowest + 1> ready_queue;
std::unordered_map<u64, Thread*> wakeup_callback_table;
/// Event type for the thread wake up event
Core::TimingEventType* ThreadWakeupEventType = nullptr;
// Lists all threadsthat aren't deleted.
std::vector<SharedPtr<Thread>> thread_list;
friend class Thread;
friend class KernelSystem;
};
class Thread final : public WaitObject {
public:
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread
* @return A shared pointer to the newly created thread
*/
static ResultVal<SharedPtr<Thread>> Create(std::string name, VAddr entry_point, u32 priority,
u32 arg, s32 processor_id, VAddr stack_top,
SharedPtr<Process> owner_process);
std::string GetName() const override {
return name;
}
@ -204,7 +272,7 @@ public:
/// Mutexes that this thread is currently waiting for.
boost::container::flat_set<SharedPtr<Mutex>> pending_mutexes;
SharedPtr<Process> owner_process; ///< Process that owns this thread
Process* owner_process; ///< Process that owns this thread
/// Objects that the thread is waiting on, in the same order as they were
// passed to WaitSynchronization1/N.
@ -214,9 +282,6 @@ public:
std::string name;
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
Handle callback_handle;
using WakeupCallback = void(ThreadWakeupReason reason, SharedPtr<Thread> thread,
SharedPtr<WaitObject> object);
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
@ -225,69 +290,23 @@ public:
std::function<WakeupCallback> wakeup_callback;
private:
Thread();
explicit Thread(KernelSystem&);
~Thread() override;
ThreadManager& thread_manager;
friend class KernelSystem;
};
/**
* Sets up the primary application thread
* @param kernel The kernel instance on which the thread is created
* @param entry_point The address at which the thread should start execution
* @param priority The priority to give the main thread
* @param owner_process The parent process for the main thread
* @return A shared pointer to the main thread
*/
SharedPtr<Thread> SetupMainThread(u32 entry_point, u32 priority, SharedPtr<Process> owner_process);
/**
* Returns whether there are any threads that are ready to run.
*/
bool HaveReadyThreads();
/**
* Reschedules to the next available thread (call after current thread is suspended)
*/
void Reschedule();
/**
* Arbitrate the highest priority thread that is waiting
* @param address The address for which waiting threads should be arbitrated
*/
Thread* ArbitrateHighestPriorityThread(u32 address);
/**
* Arbitrate all threads currently waiting.
* @param address The address for which waiting threads should be arbitrated
*/
void ArbitrateAllThreads(u32 address);
/**
* Gets the current thread
*/
Thread* GetCurrentThread();
/**
* Waits the current thread on a sleep
*/
void WaitCurrentThread_Sleep();
/**
* Stops the current thread and removes it from the thread_list
*/
void ExitCurrentThread();
/**
* Initialize threading
*/
void ThreadingInit();
/**
* Shutdown threading
*/
void ThreadingShutdown();
/**
* Get a const reference to the thread list for debug use
*/
const std::vector<SharedPtr<Thread>>& GetThreadList();
SharedPtr<Thread> SetupMainThread(KernelSystem& kernel, u32 entry_point, u32 priority,
SharedPtr<Process> owner_process);
} // namespace Kernel

View file

@ -3,9 +3,10 @@
// Refer to the license.txt file included.
#include <cinttypes>
#include <unordered_map>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core_timing.h"
#include "core/core.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
@ -13,24 +14,22 @@
namespace Kernel {
/// The event type of the generic timer callback event
static CoreTiming::EventType* timer_callback_event_type = nullptr;
// TODO(yuriks): This can be removed if Timer objects are explicitly pooled in the future, allowing
// us to simply use a pool index or similar.
static Kernel::HandleTable timer_callback_handle_table;
Timer::Timer(KernelSystem& kernel) : WaitObject(kernel), timer_manager(kernel.GetTimerManager()) {}
Timer::~Timer() {
Cancel();
timer_manager.timer_callback_table.erase(callback_id);
}
Timer::Timer() {}
Timer::~Timer() {}
SharedPtr<Timer> Timer::Create(ResetType reset_type, std::string name) {
SharedPtr<Timer> timer(new Timer);
SharedPtr<Timer> KernelSystem::CreateTimer(ResetType reset_type, std::string name) {
SharedPtr<Timer> timer(new Timer(*this));
timer->reset_type = reset_type;
timer->signaled = false;
timer->name = std::move(name);
timer->initial_delay = 0;
timer->interval_delay = 0;
timer->callback_handle = timer_callback_handle_table.Create(timer).Unwrap();
timer->callback_id = ++timer_manager->next_timer_callback_id;
timer_manager->timer_callback_table[timer->callback_id] = timer.get();
return timer;
}
@ -57,12 +56,14 @@ void Timer::Set(s64 initial, s64 interval) {
// Immediately invoke the callback
Signal(0);
} else {
CoreTiming::ScheduleEvent(nsToCycles(initial), timer_callback_event_type, callback_handle);
Core::System::GetInstance().CoreTiming().ScheduleEvent(
nsToCycles(initial), timer_manager.timer_callback_event_type, callback_id);
}
}
void Timer::Cancel() {
CoreTiming::UnscheduleEvent(timer_callback_event_type, callback_handle);
Core::System::GetInstance().CoreTiming().UnscheduleEvent(
timer_manager.timer_callback_event_type, callback_id);
}
void Timer::Clear() {
@ -86,29 +87,28 @@ void Timer::Signal(s64 cycles_late) {
if (interval_delay != 0) {
// Reschedule the timer with the interval delay
CoreTiming::ScheduleEvent(nsToCycles(interval_delay) - cycles_late,
timer_callback_event_type, callback_handle);
Core::System::GetInstance().CoreTiming().ScheduleEvent(
nsToCycles(interval_delay) - cycles_late, timer_manager.timer_callback_event_type,
callback_id);
}
}
/// The timer callback event, called when a timer is fired
static void TimerCallback(u64 timer_handle, s64 cycles_late) {
SharedPtr<Timer> timer =
timer_callback_handle_table.Get<Timer>(static_cast<Handle>(timer_handle));
void TimerManager::TimerCallback(u64 callback_id, s64 cycles_late) {
SharedPtr<Timer> timer = timer_callback_table.at(callback_id);
if (timer == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid timer {:08x}", timer_handle);
LOG_CRITICAL(Kernel, "Callback fired for invalid timer {:016x}", callback_id);
return;
}
timer->Signal(cycles_late);
}
void TimersInit() {
timer_callback_handle_table.Clear();
timer_callback_event_type = CoreTiming::RegisterEvent("TimerCallback", TimerCallback);
TimerManager::TimerManager() {
timer_callback_event_type = Core::System::GetInstance().CoreTiming().RegisterEvent(
"TimerCallback",
[this](u64 thread_id, s64 cycle_late) { TimerCallback(thread_id, cycle_late); });
}
void TimersShutdown() {}
} // namespace Kernel

View file

@ -5,21 +5,32 @@
#pragma once
#include "common/common_types.h"
#include "core/core_timing.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/wait_object.h"
namespace Kernel {
class TimerManager {
public:
TimerManager();
private:
/// The timer callback event, called when a timer is fired
void TimerCallback(u64 callback_id, s64 cycles_late);
/// The event type of the generic timer callback event
Core::TimingEventType* timer_callback_event_type = nullptr;
u64 next_timer_callback_id = 0;
std::unordered_map<u64, Timer*> timer_callback_table;
friend class Timer;
friend class KernelSystem;
};
class Timer final : public WaitObject {
public:
/**
* Creates a timer
* @param reset_type ResetType describing how to create the timer
* @param name Optional name of timer
* @return The created Timer
*/
static SharedPtr<Timer> Create(ResetType reset_type, std::string name = "Unknown");
std::string GetTypeName() const override {
return "Timer";
}
@ -68,7 +79,7 @@ public:
void Signal(s64 cycles_late);
private:
Timer();
explicit Timer(KernelSystem& kernel);
~Timer() override;
ResetType reset_type; ///< The ResetType of this timer
@ -79,13 +90,12 @@ private:
bool signaled; ///< Whether the timer has been signaled or not
std::string name; ///< Name of timer (optional)
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
Handle callback_handle;
/// ID used as userdata to reference this object when inserting into the CoreTiming queue.
u64 callback_id;
TimerManager& timer_manager;
friend class KernelSystem;
};
/// Initializes the required variables for timers
void TimersInit();
/// Tears down the timer variables
void TimersShutdown();
} // namespace Kernel

View file

@ -408,4 +408,25 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
break;
}
}
ResultVal<std::vector<std::pair<u8*, u32>>> VMManager::GetBackingBlocksForRange(VAddr address,
u32 size) {
std::vector<std::pair<u8*, u32>> backing_blocks;
VAddr interval_target = address;
while (interval_target != address + size) {
auto vma = FindVMA(interval_target);
if (vma->second.type != VMAType::BackingMemory) {
LOG_ERROR(Kernel, "Trying to use already freed memory");
return ERR_INVALID_ADDRESS_STATE;
}
VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size);
u32 interval_size = interval_end - interval_target;
u8* backing_memory = vma->second.backing_memory + (interval_target - vma->second.base);
backing_blocks.push_back({backing_memory, interval_size});
interval_target += interval_size;
}
return MakeResult(backing_blocks);
}
} // namespace Kernel

View file

@ -6,6 +6,7 @@
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include "common/common_types.h"
#include "core/hle/result.h"
@ -213,6 +214,9 @@ public:
/// Dumps the address space layout to the log, for debugging
void LogLayout(Log::Level log_level) const;
/// Gets a list of backing memory blocks for the specified range
ResultVal<std::vector<std::pair<u8*, u32>>> GetBackingBlocksForRange(VAddr address, u32 size);
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
Memory::PageTable page_table;

View file

@ -5,7 +5,6 @@
#include <algorithm>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/config_mem.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
@ -13,7 +12,6 @@
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
#include "core/hle/shared_page.h"
namespace Kernel {

View file

@ -16,6 +16,8 @@ class Thread;
/// Class that represents a Kernel object that a thread can be waiting on
class WaitObject : public Object {
public:
using Object::Object;
/**
* Check if the specified thread should wait until the object is available
* @param thread The thread about which we're deciding.

View file

@ -1026,8 +1026,8 @@ void Module::Interface::BeginImportProgram(Kernel::HLERequestContext& ctx) {
// Create our CIAFile handle for the app to write to, and while the app writes
// Citra will store contents out to sdmc/nand
const FileSys::Path cia_path = {};
auto file =
std::make_shared<Service::FS::File>(std::make_unique<CIAFile>(media_type), cia_path);
auto file = std::make_shared<Service::FS::File>(
am->system, std::make_unique<CIAFile>(media_type), cia_path);
am->cia_installing = true;
@ -1053,8 +1053,8 @@ void Module::Interface::BeginImportProgramTemporarily(Kernel::HLERequestContext&
// Create our CIAFile handle for the app to write to, and while the app writes Citra will store
// contents out to sdmc/nand
const FileSys::Path cia_path = {};
auto file = std::make_shared<Service::FS::File>(std::make_unique<CIAFile>(FS::MediaType::NAND),
cia_path);
auto file = std::make_shared<Service::FS::File>(
am->system, std::make_unique<CIAFile>(FS::MediaType::NAND), cia_path);
am->cia_installing = true;
@ -1455,16 +1455,16 @@ void Module::Interface::GetMetaDataFromCia(Kernel::HLERequestContext& ctx) {
rb.PushMappedBuffer(output_buffer);
}
Module::Module() {
Module::Module(Core::System& system) : system(system) {
ScanForAllTitles();
system_updater_mutex = Kernel::Mutex::Create(false, "AM::SystemUpdaterMutex");
system_updater_mutex = system.Kernel().CreateMutex(false, "AM::SystemUpdaterMutex");
}
Module::~Module() = default;
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
auto am = std::make_shared<Module>();
auto am = std::make_shared<Module>(system);
std::make_shared<AM_APP>(am)->InstallAsService(service_manager);
std::make_shared<AM_NET>(am)->InstallAsService(service_manager);
std::make_shared<AM_SYS>(am)->InstallAsService(service_manager);

View file

@ -149,7 +149,7 @@ std::string GetMediaTitlePath(Service::FS::MediaType media_type);
class Module final {
public:
Module();
explicit Module(Core::System& system);
~Module();
class Interface : public ServiceFramework<Interface> {
@ -573,6 +573,7 @@ private:
*/
void ScanForAllTitles();
Core::System& system;
bool cia_installing = false;
std::array<std::vector<u64_le>, 3> am_title_list;
Kernel::SharedPtr<Kernel::Mutex> system_updater_mutex;

View file

@ -258,7 +258,7 @@ ResultVal<AppletManager::InitializeResult> AppletManager::Initialize(AppletId ap
slot_data->applet_id = static_cast<AppletId>(app_id);
// Note: In the real console the title id of a given applet slot is set by the APT module when
// calling StartApplication.
slot_data->title_id = Kernel::g_current_process->codeset->program_id;
slot_data->title_id = system.Kernel().GetCurrentProcess()->codeset->program_id;
slot_data->attributes.raw = attributes.raw;
if (slot_data->applet_id == AppletId::Application ||
@ -572,9 +572,9 @@ AppletManager::AppletManager(Core::System& system) : system(system) {
slot_data.registered = false;
slot_data.loaded = false;
slot_data.notification_event =
Kernel::Event::Create(Kernel::ResetType::OneShot, "APT:Notification");
system.Kernel().CreateEvent(Kernel::ResetType::OneShot, "APT:Notification");
slot_data.parameter_event =
Kernel::Event::Create(Kernel::ResetType::OneShot, "APT:Parameter");
system.Kernel().CreateEvent(Kernel::ResetType::OneShot, "APT:Parameter");
}
HLE::Applets::Init();
}

View file

@ -207,10 +207,17 @@ void Module::Interface::GetSharedFont(Kernel::HLERequestContext& ctx) {
// The shared font has to be relocated to the new address before being passed to the
// application.
auto maybe_vaddr =
Memory::PhysicalToVirtualAddress(apt->shared_font_mem->linear_heap_phys_address);
ASSERT(maybe_vaddr);
VAddr target_address = *maybe_vaddr;
// Note: the target address is still in the old linear heap region even on new firmware
// versions. This exception is made for shared font to resolve the following compatibility
// issue:
// The linear heap region changes depending on the kernel version marked in application's
// exheader (not the actual version the application is running on). If an application with old
// kernel version and an applet with new kernel version run at the same time, and they both use
// shared font, different linear heap region would have required shared font to relocate
// according to two different addresses at the same time, which is impossible.
VAddr target_address =
apt->shared_font_mem->linear_heap_phys_offset + Memory::LINEAR_HEAP_VADDR;
if (!apt->shared_font_relocated) {
BCFNT::RelocateSharedFont(apt->shared_font_mem, target_address);
apt->shared_font_relocated = true;
@ -855,11 +862,11 @@ Module::Module(Core::System& system) : system(system) {
using Kernel::MemoryPermission;
shared_font_mem =
Kernel::SharedMemory::Create(nullptr, 0x332000, // 3272 KB
MemoryPermission::ReadWrite, MemoryPermission::Read, 0,
Kernel::MemoryRegion::SYSTEM, "APT:SharedFont");
system.Kernel().CreateSharedMemory(nullptr, 0x332000, // 3272 KB
MemoryPermission::ReadWrite, MemoryPermission::Read, 0,
Kernel::MemoryRegion::SYSTEM, "APT:SharedFont");
lock = Kernel::Mutex::Create(false, "APT_U:Lock");
lock = system.Kernel().CreateMutex(false, "APT_U:Lock");
}
Module::~Module() {}

View file

@ -903,15 +903,16 @@ void Module::Interface::GetNsDataNewFlagPrivileged(Kernel::HLERequestContext& ct
Module::Interface::Interface(std::shared_ptr<Module> boss, const char* name, u32 max_session)
: ServiceFramework(name, max_session), boss(std::move(boss)) {}
Module::Module() {
Module::Module(Core::System& system) {
using namespace Kernel;
// TODO: verify ResetType
task_finish_event = Event::Create(Kernel::ResetType::OneShot, "BOSS::task_finish_event");
task_finish_event =
system.Kernel().CreateEvent(Kernel::ResetType::OneShot, "BOSS::task_finish_event");
}
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
auto boss = std::make_shared<Module>();
auto boss = std::make_shared<Module>(system);
std::make_shared<BOSS_P>(boss)->InstallAsService(service_manager);
std::make_shared<BOSS_U>(boss)->InstallAsService(service_manager);
}

View file

@ -15,7 +15,7 @@ namespace Service::BOSS {
class Module final {
public:
Module();
explicit Module(Core::System& system);
~Module() = default;
class Interface : public ServiceFramework<Interface> {

View file

@ -151,7 +151,7 @@ void Module::StartReceiving(int port_id) {
// schedules a completion event according to the frame rate. The event will block on the
// capture task if it is not finished within the expected time
CoreTiming::ScheduleEvent(
system.CoreTiming().ScheduleEvent(
msToCycles(LATENCY_BY_FRAME_RATE[static_cast<int>(camera.frame_rate)]),
completion_event_callback, port_id);
}
@ -160,7 +160,7 @@ void Module::CancelReceiving(int port_id) {
if (!ports[port_id].is_receiving)
return;
LOG_WARNING(Service_CAM, "tries to cancel an ongoing receiving process.");
CoreTiming::UnscheduleEvent(completion_event_callback, port_id);
system.CoreTiming().UnscheduleEvent(completion_event_callback, port_id);
ports[port_id].capture_result.wait();
ports[port_id].is_receiving = false;
}
@ -1019,16 +1019,17 @@ void Module::Interface::DriverFinalize(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_CAM, "called");
}
Module::Module() {
Module::Module(Core::System& system) : system(system) {
using namespace Kernel;
for (PortConfig& port : ports) {
port.completion_event = Event::Create(ResetType::Sticky, "CAM::completion_event");
port.completion_event =
system.Kernel().CreateEvent(ResetType::Sticky, "CAM::completion_event");
port.buffer_error_interrupt_event =
Event::Create(ResetType::OneShot, "CAM::buffer_error_interrupt_event");
system.Kernel().CreateEvent(ResetType::OneShot, "CAM::buffer_error_interrupt_event");
port.vsync_interrupt_event =
Event::Create(ResetType::OneShot, "CAM::vsync_interrupt_event");
system.Kernel().CreateEvent(ResetType::OneShot, "CAM::vsync_interrupt_event");
}
completion_event_callback = CoreTiming::RegisterEvent(
completion_event_callback = system.CoreTiming().RegisterEvent(
"CAM::CompletionEventCallBack",
[this](u64 userdata, s64 cycles_late) { CompletionEventCallBack(userdata, cycles_late); });
}
@ -1061,7 +1062,7 @@ std::shared_ptr<Module> GetModule(Core::System& system) {
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
auto cam = std::make_shared<Module>();
auto cam = std::make_shared<Module>(system);
std::make_shared<CAM_U>(cam)->InstallAsService(service_manager);
std::make_shared<CAM_S>(cam)->InstallAsService(service_manager);

View file

@ -21,8 +21,8 @@ namespace Camera {
class CameraInterface;
}
namespace CoreTiming {
struct EventType;
namespace Core {
struct TimingEventType;
}
namespace Kernel {
@ -241,7 +241,7 @@ static_assert(sizeof(PackageParameterWithContextDetail) == 28,
class Module final {
public:
Module();
explicit Module(Core::System& system);
~Module();
void ReloadCameraDevices();
@ -779,9 +779,10 @@ private:
void LoadCameraImplementation(CameraConfig& camera, int camera_id);
Core::System& system;
std::array<CameraConfig, NumCameras> cameras;
std::array<PortConfig, 2> ports;
CoreTiming::EventType* completion_event_callback;
Core::TimingEventType* completion_event_callback;
std::atomic<bool> is_camera_reload_pending{false};
};

View file

@ -97,7 +97,7 @@ void Module::Interface::Open(Kernel::HLERequestContext& ctx) {
if (path_type == CecDataPathType::MboxProgramId) {
std::vector<u8> program_id(8);
u64_le le_program_id = Kernel::g_current_process->codeset->program_id;
u64_le le_program_id = cecd->system.Kernel().GetCurrentProcess()->codeset->program_id;
std::memcpy(program_id.data(), &le_program_id, sizeof(u64));
session_data->file->Write(0, sizeof(u64), true, program_id.data());
session_data->file->Close();
@ -1351,10 +1351,11 @@ Module::SessionData::~SessionData() {
Module::Interface::Interface(std::shared_ptr<Module> cecd, const char* name, u32 max_session)
: ServiceFramework(name, max_session), cecd(std::move(cecd)) {}
Module::Module() {
Module::Module(Core::System& system) : system(system) {
using namespace Kernel;
cecinfo_event = Event::Create(Kernel::ResetType::OneShot, "CECD::cecinfo_event");
change_state_event = Event::Create(Kernel::ResetType::OneShot, "CECD::change_state_event");
cecinfo_event = system.Kernel().CreateEvent(Kernel::ResetType::OneShot, "CECD::cecinfo_event");
change_state_event =
system.Kernel().CreateEvent(Kernel::ResetType::OneShot, "CECD::change_state_event");
std::string nand_directory = FileUtil::GetUserPath(FileUtil::UserPath::NANDDir);
FileSys::ArchiveFactory_SystemSaveData systemsavedata_factory(nand_directory);
@ -1433,7 +1434,7 @@ Module::~Module() = default;
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
auto cecd = std::make_shared<Module>();
auto cecd = std::make_shared<Module>(system);
std::make_shared<CECD_NDM>(cecd)->InstallAsService(service_manager);
std::make_shared<CECD_S>(cecd)->InstallAsService(service_manager);
std::make_shared<CECD_U>(cecd)->InstallAsService(service_manager);

View file

@ -23,7 +23,7 @@ namespace Service::CECD {
class Module final {
public:
Module();
explicit Module(Core::System& system);
~Module();
enum class CecCommand : u32 {
@ -610,6 +610,8 @@ private:
Kernel::SharedPtr<Kernel::Event> cecinfo_event;
Kernel::SharedPtr<Kernel::Event> change_state_event;
Core::System& system;
};
/// Initialize CECD service(s)

View file

@ -19,10 +19,10 @@ void CSND_SND::Initialize(Kernel::HLERequestContext& ctx) {
const u32 offset3 = rp.Pop<u32>();
using Kernel::MemoryPermission;
mutex = Kernel::Mutex::Create(false, "CSND:mutex");
shared_memory = Kernel::SharedMemory::Create(nullptr, size, MemoryPermission::ReadWrite,
MemoryPermission::ReadWrite, 0,
Kernel::MemoryRegion::BASE, "CSND:SharedMemory");
mutex = system.Kernel().CreateMutex(false, "CSND:mutex");
shared_memory = system.Kernel().CreateSharedMemory(
nullptr, size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, 0,
Kernel::MemoryRegion::BASE, "CSND:SharedMemory");
IPC::RequestBuilder rb = rp.MakeBuilder(1, 3);
rb.Push(RESULT_SUCCESS);
@ -173,7 +173,7 @@ void CSND_SND::Reset(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_CSND, "(STUBBED) called");
}
CSND_SND::CSND_SND() : ServiceFramework("csnd:SND", 4) {
CSND_SND::CSND_SND(Core::System& system) : ServiceFramework("csnd:SND", 4), system(system) {
static const FunctionInfo functions[] = {
// clang-format off
{0x00010140, &CSND_SND::Initialize, "Initialize"},
@ -196,7 +196,7 @@ CSND_SND::CSND_SND() : ServiceFramework("csnd:SND", 4) {
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
std::make_shared<CSND_SND>()->InstallAsService(service_manager);
std::make_shared<CSND_SND>(system)->InstallAsService(service_manager);
}
} // namespace Service::CSND

View file

@ -16,7 +16,7 @@ namespace Service::CSND {
class CSND_SND final : public ServiceFramework<CSND_SND> {
public:
CSND_SND();
explicit CSND_SND(Core::System& system);
~CSND_SND() = default;
private:
@ -174,6 +174,8 @@ private:
};
static_assert(sizeof(Type0Command) == 0x20, "Type0Command structure size is wrong");
Core::System& system;
Kernel::SharedPtr<Kernel::Mutex> mutex = nullptr;
Kernel::SharedPtr<Kernel::SharedMemory> shared_memory = nullptr;

View file

@ -350,7 +350,7 @@ bool DSP_DSP::HasTooManyEventsRegistered() const {
return number >= max_number_of_interrupt_events;
}
DSP_DSP::DSP_DSP() : ServiceFramework("dsp::DSP", DefaultMaxSessions) {
DSP_DSP::DSP_DSP(Core::System& system) : ServiceFramework("dsp::DSP", DefaultMaxSessions) {
static const FunctionInfo functions[] = {
// clang-format off
{0x00010040, &DSP_DSP::RecvData, "RecvData"},
@ -391,7 +391,8 @@ DSP_DSP::DSP_DSP() : ServiceFramework("dsp::DSP", DefaultMaxSessions) {
RegisterHandlers(functions);
semaphore_event = Kernel::Event::Create(Kernel::ResetType::OneShot, "DSP_DSP::semaphore_event");
semaphore_event =
system.Kernel().CreateEvent(Kernel::ResetType::OneShot, "DSP_DSP::semaphore_event");
}
DSP_DSP::~DSP_DSP() {
@ -401,7 +402,7 @@ DSP_DSP::~DSP_DSP() {
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
auto dsp = std::make_shared<DSP_DSP>();
auto dsp = std::make_shared<DSP_DSP>(system);
dsp->InstallAsService(service_manager);
Core::DSP().SetServiceToInterrupt(std::move(dsp));
}

View file

@ -17,7 +17,7 @@ namespace Service::DSP {
class DSP_DSP final : public ServiceFramework<DSP_DSP> {
public:
DSP_DSP();
explicit DSP_DSP(Core::System& system);
~DSP_DSP();
/// There are three types of interrupts

View file

@ -244,7 +244,7 @@ ERR_F::~ERR_F() = default;
void InstallInterfaces(Core::System& system) {
auto errf = std::make_shared<ERR_F>(system);
errf->InstallAsNamedPort();
errf->InstallAsNamedPort(system.Kernel());
}
} // namespace Service::ERR

View file

@ -87,7 +87,7 @@ ResultVal<std::shared_ptr<File>> ArchiveManager::OpenFileFromArchive(ArchiveHand
if (backend.Failed())
return backend.Code();
auto file = std::shared_ptr<File>(new File(std::move(backend).Unwrap(), path));
auto file = std::shared_ptr<File>(new File(system, std::move(backend).Unwrap(), path));
return MakeResult<std::shared_ptr<File>>(std::move(file));
}
@ -348,7 +348,7 @@ void ArchiveManager::RegisterSelfNCCH(Loader::AppLoader& app_loader) {
factory->Register(app_loader);
}
ArchiveManager::ArchiveManager() {
ArchiveManager::ArchiveManager(Core::System& system) : system(system) {
RegisterArchiveTypes();
}

View file

@ -24,6 +24,10 @@ namespace Loader {
class AppLoader;
}
namespace Core {
class System;
}
namespace Service::FS {
/// Supported archive types
@ -50,7 +54,8 @@ using FileSys::ArchiveFactory;
class ArchiveManager {
public:
ArchiveManager();
explicit ArchiveManager(Core::System& system);
/**
* Opens an archive
* @param id_code IdCode of the archive to open
@ -224,6 +229,8 @@ public:
void RegisterSelfNCCH(Loader::AppLoader& app_loader);
private:
Core::System& system;
/**
* Registers an Archive type, instances of which can later be opened using its IdCode.
* @param factory File system backend interface to the archive

View file

@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/core.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/file_backend.h"
#include "core/hle/ipc_helpers.h"
@ -14,8 +15,9 @@
namespace Service::FS {
File::File(std::unique_ptr<FileSys::FileBackend>&& backend, const FileSys::Path& path)
: ServiceFramework("", 1), path(path), backend(std::move(backend)) {
File::File(Core::System& system, std::unique_ptr<FileSys::FileBackend>&& backend,
const FileSys::Path& path)
: ServiceFramework("", 1), path(path), backend(std::move(backend)), system(system) {
static const FunctionInfo functions[] = {
{0x08010100, &File::OpenSubFile, "OpenSubFile"},
{0x080200C2, &File::Read, "Read"},
@ -69,7 +71,8 @@ void File::Read(Kernel::HLERequestContext& ctx) {
rb.PushMappedBuffer(buffer);
std::chrono::nanoseconds read_timeout_ns{backend->GetReadDelayNs(length)};
ctx.SleepClientThread(Kernel::GetCurrentThread(), "file::read", read_timeout_ns,
ctx.SleepClientThread(system.Kernel().GetThreadManager().GetCurrentThread(), "file::read",
read_timeout_ns,
[](Kernel::SharedPtr<Kernel::Thread> thread,
Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) {
// Nothing to do here
@ -195,7 +198,7 @@ void File::OpenLinkFile(Kernel::HLERequestContext& ctx) {
using Kernel::SharedPtr;
IPC::RequestParser rp(ctx, 0x080C, 0, 0);
IPC::RequestBuilder rb = rp.MakeBuilder(1, 2);
auto sessions = ServerSession::CreateSessionPair(GetName());
auto sessions = system.Kernel().CreateSessionPair(GetName());
auto server = std::get<SharedPtr<ServerSession>>(sessions);
ClientConnected(server);
@ -243,7 +246,7 @@ void File::OpenSubFile(Kernel::HLERequestContext& ctx) {
using Kernel::ClientSession;
using Kernel::ServerSession;
using Kernel::SharedPtr;
auto sessions = ServerSession::CreateSessionPair(GetName());
auto sessions = system.Kernel().CreateSessionPair(GetName());
auto server = std::get<SharedPtr<ServerSession>>(sessions);
ClientConnected(server);
@ -258,7 +261,7 @@ void File::OpenSubFile(Kernel::HLERequestContext& ctx) {
}
Kernel::SharedPtr<Kernel::ClientSession> File::Connect() {
auto sessions = Kernel::ServerSession::CreateSessionPair(GetName());
auto sessions = system.Kernel().CreateSessionPair(GetName());
auto server = std::get<Kernel::SharedPtr<Kernel::ServerSession>>(sessions);
ClientConnected(server);

View file

@ -8,6 +8,10 @@
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/service.h"
namespace Core {
class System;
}
namespace Service::FS {
struct FileSessionSlot : public Kernel::SessionRequestHandler::SessionDataBase {
@ -21,7 +25,8 @@ struct FileSessionSlot : public Kernel::SessionRequestHandler::SessionDataBase {
// Consider splitting ServiceFramework interface.
class File final : public ServiceFramework<File, FileSessionSlot> {
public:
File(std::unique_ptr<FileSys::FileBackend>&& backend, const FileSys::Path& path);
File(Core::System& system, std::unique_ptr<FileSys::FileBackend>&& backend,
const FileSys::Path& path);
~File() = default;
std::string GetName() const {
@ -53,6 +58,8 @@ private:
void GetPriority(Kernel::HLERequestContext& ctx);
void OpenLinkFile(Kernel::HLERequestContext& ctx);
void OpenSubFile(Kernel::HLERequestContext& ctx);
Core::System& system;
};
} // namespace Service::FS

View file

@ -286,7 +286,7 @@ void FS_USER::OpenDirectory(Kernel::HLERequestContext& ctx) {
rb.Push(dir_res.Code());
if (dir_res.Succeeded()) {
std::shared_ptr<Directory> directory = *dir_res;
auto sessions = ServerSession::CreateSessionPair(directory->GetName());
auto sessions = system.Kernel().CreateSessionPair(directory->GetName());
directory->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions));
rb.PushMoveObjects(std::get<SharedPtr<ClientSession>>(sessions));
} else {
@ -619,7 +619,7 @@ void FS_USER::GetProgramLaunchInfo(Kernel::HLERequestContext& ctx) {
// TODO(Subv): The real FS service manages its own process list and only checks the processes
// that were registered with the 'fs:REG' service.
auto process = Kernel::GetProcessById(process_id);
auto process = system.Kernel().GetProcessById(process_id);
IPC::RequestBuilder rb = rp.MakeBuilder(5, 0);
@ -741,7 +741,8 @@ void FS_USER::GetSaveDataSecureValue(Kernel::HLERequestContext& ctx) {
rb.Push<u64>(0); // the secure value
}
FS_USER::FS_USER(ArchiveManager& archives) : ServiceFramework("fs:USER", 30), archives(archives) {
FS_USER::FS_USER(Core::System& system)
: ServiceFramework("fs:USER", 30), system(system), archives(system.ArchiveManager()) {
static const FunctionInfo functions[] = {
{0x000100C6, nullptr, "Dummy1"},
{0x040100C4, nullptr, "Control"},
@ -860,6 +861,6 @@ FS_USER::FS_USER(ArchiveManager& archives) : ServiceFramework("fs:USER", 30), ar
void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
std::make_shared<FS_USER>(system.ArchiveManager())->InstallAsService(service_manager);
std::make_shared<FS_USER>(system)->InstallAsService(service_manager);
}
} // namespace Service::FS

View file

@ -17,7 +17,7 @@ class ArchiveManager;
class FS_USER final : public ServiceFramework<FS_USER> {
public:
explicit FS_USER(ArchiveManager& archives);
explicit FS_USER(Core::System& system);
private:
void Initialize(Kernel::HLERequestContext& ctx);
@ -534,6 +534,7 @@ private:
u32 priority = -1; ///< For SetPriority and GetPriority service functions
Core::System& system;
ArchiveManager& archives;
};

View file

@ -11,6 +11,7 @@
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/result.h"
#include "core/hle/service/gsp/gsp_gpu.h"
#include "core/hw/gpu.h"
@ -731,7 +732,7 @@ void GSP_GPU::SetLedForceOff(Kernel::HLERequestContext& ctx) {
u8 state = rp.Pop<u8>();
system.GetSharedPageHandler()->Set3DLed(state);
system.Kernel().GetSharedPageHandler().Set3DLed(state);
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
rb.Push(RESULT_SUCCESS);
@ -786,9 +787,9 @@ GSP_GPU::GSP_GPU(Core::System& system) : ServiceFramework("gsp::Gpu", 2), system
RegisterHandlers(functions);
using Kernel::MemoryPermission;
shared_memory = Kernel::SharedMemory::Create(nullptr, 0x1000, MemoryPermission::ReadWrite,
MemoryPermission::ReadWrite, 0,
Kernel::MemoryRegion::BASE, "GSP:SharedMemory");
shared_memory = system.Kernel().CreateSharedMemory(
nullptr, 0x1000, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, 0,
Kernel::MemoryRegion::BASE, "GSP:SharedMemory");
first_initialization = true;
};

View file

@ -128,7 +128,7 @@ void Module::UpdatePadCallback(u64 userdata, s64 cycles_late) {
// If we just updated index 0, provide a new timestamp
if (mem->pad.index == 0) {
mem->pad.index_reset_ticks_previous = mem->pad.index_reset_ticks;
mem->pad.index_reset_ticks = (s64)CoreTiming::GetTicks();
mem->pad.index_reset_ticks = (s64)system.CoreTiming().GetTicks();
}
mem->touch.index = next_touch_index;
@ -152,7 +152,7 @@ void Module::UpdatePadCallback(u64 userdata, s64 cycles_late) {
// If we just updated index 0, provide a new timestamp
if (mem->touch.index == 0) {
mem->touch.index_reset_ticks_previous = mem->touch.index_reset_ticks;
mem->touch.index_reset_ticks = (s64)CoreTiming::GetTicks();
mem->touch.index_reset_ticks = (s64)system.CoreTiming().GetTicks();
}
// Signal both handles when there's an update to Pad or touch
@ -160,7 +160,7 @@ void Module::UpdatePadCallback(u64 userdata, s64 cycles_late) {
event_pad_or_touch_2->Signal();
// Reschedule recurrent event
CoreTiming::ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event);
system.CoreTiming().ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event);
}
void Module::UpdateAccelerometerCallback(u64 userdata, s64 cycles_late) {
@ -198,13 +198,14 @@ void Module::UpdateAccelerometerCallback(u64 userdata, s64 cycles_late) {
// If we just updated index 0, provide a new timestamp
if (mem->accelerometer.index == 0) {
mem->accelerometer.index_reset_ticks_previous = mem->accelerometer.index_reset_ticks;
mem->accelerometer.index_reset_ticks = (s64)CoreTiming::GetTicks();
mem->accelerometer.index_reset_ticks = (s64)system.CoreTiming().GetTicks();
}
event_accelerometer->Signal();
// Reschedule recurrent event
CoreTiming::ScheduleEvent(accelerometer_update_ticks - cycles_late, accelerometer_update_event);
system.CoreTiming().ScheduleEvent(accelerometer_update_ticks - cycles_late,
accelerometer_update_event);
}
void Module::UpdateGyroscopeCallback(u64 userdata, s64 cycles_late) {
@ -233,13 +234,13 @@ void Module::UpdateGyroscopeCallback(u64 userdata, s64 cycles_late) {
// If we just updated index 0, provide a new timestamp
if (mem->gyroscope.index == 0) {
mem->gyroscope.index_reset_ticks_previous = mem->gyroscope.index_reset_ticks;
mem->gyroscope.index_reset_ticks = (s64)CoreTiming::GetTicks();
mem->gyroscope.index_reset_ticks = (s64)system.CoreTiming().GetTicks();
}
event_gyroscope->Signal();
// Reschedule recurrent event
CoreTiming::ScheduleEvent(gyroscope_update_ticks - cycles_late, gyroscope_update_event);
system.CoreTiming().ScheduleEvent(gyroscope_update_ticks - cycles_late, gyroscope_update_event);
}
void Module::Interface::GetIPCHandles(Kernel::HLERequestContext& ctx) {
@ -257,7 +258,8 @@ void Module::Interface::EnableAccelerometer(Kernel::HLERequestContext& ctx) {
// Schedules the accelerometer update event if the accelerometer was just enabled
if (hid->enable_accelerometer_count == 1) {
CoreTiming::ScheduleEvent(accelerometer_update_ticks, hid->accelerometer_update_event);
hid->system.CoreTiming().ScheduleEvent(accelerometer_update_ticks,
hid->accelerometer_update_event);
}
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
@ -273,7 +275,7 @@ void Module::Interface::DisableAccelerometer(Kernel::HLERequestContext& ctx) {
// Unschedules the accelerometer update event if the accelerometer was just disabled
if (hid->enable_accelerometer_count == 0) {
CoreTiming::UnscheduleEvent(hid->accelerometer_update_event, 0);
hid->system.CoreTiming().UnscheduleEvent(hid->accelerometer_update_event, 0);
}
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
@ -289,7 +291,7 @@ void Module::Interface::EnableGyroscopeLow(Kernel::HLERequestContext& ctx) {
// Schedules the gyroscope update event if the gyroscope was just enabled
if (hid->enable_gyroscope_count == 1) {
CoreTiming::ScheduleEvent(gyroscope_update_ticks, hid->gyroscope_update_event);
hid->system.CoreTiming().ScheduleEvent(gyroscope_update_ticks, hid->gyroscope_update_event);
}
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
@ -305,7 +307,7 @@ void Module::Interface::DisableGyroscopeLow(Kernel::HLERequestContext& ctx) {
// Unschedules the gyroscope update event if the gyroscope was just disabled
if (hid->enable_gyroscope_count == 0) {
CoreTiming::UnscheduleEvent(hid->gyroscope_update_event, 0);
hid->system.CoreTiming().UnscheduleEvent(hid->gyroscope_update_event, 0);
}
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
@ -359,31 +361,33 @@ std::shared_ptr<Module> Module::Interface::GetModule() const {
Module::Module(Core::System& system) : system(system) {
using namespace Kernel;
shared_mem =
SharedMemory::Create(nullptr, 0x1000, MemoryPermission::ReadWrite, MemoryPermission::Read,
0, MemoryRegion::BASE, "HID:SharedMemory");
shared_mem = system.Kernel().CreateSharedMemory(nullptr, 0x1000, MemoryPermission::ReadWrite,
MemoryPermission::Read, 0, MemoryRegion::BASE,
"HID:SharedMemory");
// Create event handles
event_pad_or_touch_1 = Event::Create(ResetType::OneShot, "HID:EventPadOrTouch1");
event_pad_or_touch_2 = Event::Create(ResetType::OneShot, "HID:EventPadOrTouch2");
event_accelerometer = Event::Create(ResetType::OneShot, "HID:EventAccelerometer");
event_gyroscope = Event::Create(ResetType::OneShot, "HID:EventGyroscope");
event_debug_pad = Event::Create(ResetType::OneShot, "HID:EventDebugPad");
event_pad_or_touch_1 = system.Kernel().CreateEvent(ResetType::OneShot, "HID:EventPadOrTouch1");
event_pad_or_touch_2 = system.Kernel().CreateEvent(ResetType::OneShot, "HID:EventPadOrTouch2");
event_accelerometer = system.Kernel().CreateEvent(ResetType::OneShot, "HID:EventAccelerometer");
event_gyroscope = system.Kernel().CreateEvent(ResetType::OneShot, "HID:EventGyroscope");
event_debug_pad = system.Kernel().CreateEvent(ResetType::OneShot, "HID:EventDebugPad");
// Register update callbacks
Core::Timing& timing = system.CoreTiming();
pad_update_event =
CoreTiming::RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
UpdatePadCallback(userdata, cycles_late);
});
accelerometer_update_event = CoreTiming::RegisterEvent(
accelerometer_update_event = timing.RegisterEvent(
"HID::UpdateAccelerometerCallback", [this](u64 userdata, s64 cycles_late) {
UpdateAccelerometerCallback(userdata, cycles_late);
});
gyroscope_update_event = CoreTiming::RegisterEvent(
"HID::UpdateGyroscopeCallback",
[this](u64 userdata, s64 cycles_late) { UpdateGyroscopeCallback(userdata, cycles_late); });
gyroscope_update_event =
timing.RegisterEvent("HID::UpdateGyroscopeCallback", [this](u64 userdata, s64 cycles_late) {
UpdateGyroscopeCallback(userdata, cycles_late);
});
CoreTiming::ScheduleEvent(pad_update_ticks, pad_update_event);
timing.ScheduleEvent(pad_update_ticks, pad_update_event);
}
void Module::ReloadInputDevices() {

View file

@ -27,8 +27,8 @@ class Event;
class SharedMemory;
} // namespace Kernel
namespace CoreTiming {
struct EventType;
namespace Core {
struct TimingEventType;
};
namespace Service::HID {
@ -325,9 +325,9 @@ private:
int enable_accelerometer_count = 0; // positive means enabled
int enable_gyroscope_count = 0; // positive means enabled
CoreTiming::EventType* pad_update_event;
CoreTiming::EventType* accelerometer_update_event;
CoreTiming::EventType* gyroscope_update_event;
Core::TimingEventType* pad_update_event;
Core::TimingEventType* accelerometer_update_event;
Core::TimingEventType* gyroscope_update_event;
std::atomic<bool> is_device_reload_pending{true};
std::array<std::unique_ptr<Input::ButtonDevice>, Settings::NativeButton::NUM_BUTTONS_HID>

View file

@ -4,6 +4,7 @@
#include "common/alignment.h"
#include "common/string_util.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/service/ir/extra_hid.h"
#include "core/movie.h"
@ -144,11 +145,11 @@ ExtraHID::ExtraHID(SendFunc send_func) : IRDevice(send_func) {
0x65,
}};
hid_polling_callback_id =
CoreTiming::RegisterEvent("ExtraHID::SendHIDStatus", [this](u64, s64 cycles_late) {
hid_polling_callback_id = Core::System::GetInstance().CoreTiming().RegisterEvent(
"ExtraHID::SendHIDStatus", [this](u64, s64 cycles_late) {
SendHIDStatus();
CoreTiming::ScheduleEvent(msToCycles(hid_period) - cycles_late,
hid_polling_callback_id);
Core::System::GetInstance().CoreTiming().ScheduleEvent(
msToCycles(hid_period) - cycles_late, hid_polling_callback_id);
});
}
@ -159,7 +160,7 @@ ExtraHID::~ExtraHID() {
void ExtraHID::OnConnect() {}
void ExtraHID::OnDisconnect() {
CoreTiming::UnscheduleEvent(hid_polling_callback_id, 0);
Core::System::GetInstance().CoreTiming().UnscheduleEvent(hid_polling_callback_id, 0);
}
void ExtraHID::HandleConfigureHIDPollingRequest(const std::vector<u8>& request) {
@ -170,9 +171,10 @@ void ExtraHID::HandleConfigureHIDPollingRequest(const std::vector<u8>& request)
}
// Change HID input polling interval
CoreTiming::UnscheduleEvent(hid_polling_callback_id, 0);
Core::System::GetInstance().CoreTiming().UnscheduleEvent(hid_polling_callback_id, 0);
hid_period = request[1];
CoreTiming::ScheduleEvent(msToCycles(hid_period), hid_polling_callback_id);
Core::System::GetInstance().CoreTiming().ScheduleEvent(msToCycles(hid_period),
hid_polling_callback_id);
}
void ExtraHID::HandleReadCalibrationDataRequest(const std::vector<u8>& request_buf) {

View file

@ -11,9 +11,9 @@
#include "core/frontend/input.h"
#include "core/hle/service/ir/ir_user.h"
namespace CoreTiming {
struct EventType;
} // namespace CoreTiming
namespace Core {
struct TimingEventType;
} // namespace Core
namespace Service::IR {
@ -57,7 +57,7 @@ private:
void LoadInputDevices();
u8 hid_period;
CoreTiming::EventType* hid_polling_callback_id;
Core::TimingEventType* hid_polling_callback_id;
std::array<u8, 0x40> calibration_data;
std::unique_ptr<Input::ButtonDevice> zl;
std::unique_ptr<Input::ButtonDevice> zr;

View file

@ -16,10 +16,10 @@ void InstallInterfaces(Core::System& system) {
auto& service_manager = system.ServiceManager();
std::make_shared<IR_U>()->InstallAsService(service_manager);
auto ir_user = std::make_shared<IR_USER>();
auto ir_user = std::make_shared<IR_USER>(system);
ir_user->InstallAsService(service_manager);
auto ir_rst = std::make_shared<IR_RST>();
auto ir_rst = std::make_shared<IR_RST>(system);
ir_rst->InstallAsService(service_manager);
}

View file

@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/event.h"
@ -99,13 +100,13 @@ void IR_RST::UpdateCallback(u64 userdata, s64 cycles_late) {
// If we just updated index 0, provide a new timestamp
if (mem->index == 0) {
mem->index_reset_ticks_previous = mem->index_reset_ticks;
mem->index_reset_ticks = CoreTiming::GetTicks();
mem->index_reset_ticks = system.CoreTiming().GetTicks();
}
update_event->Signal();
// Reschedule recurrent event
CoreTiming::ScheduleEvent(msToCycles(update_period) - cycles_late, update_callback_id);
system.CoreTiming().ScheduleEvent(msToCycles(update_period) - cycles_late, update_callback_id);
}
void IR_RST::GetHandles(Kernel::HLERequestContext& ctx) {
@ -125,7 +126,7 @@ void IR_RST::Initialize(Kernel::HLERequestContext& ctx) {
next_pad_index = 0;
is_device_reload_pending.store(true);
CoreTiming::ScheduleEvent(msToCycles(update_period), update_callback_id);
system.CoreTiming().ScheduleEvent(msToCycles(update_period), update_callback_id);
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
rb.Push(RESULT_SUCCESS);
@ -136,7 +137,7 @@ void IR_RST::Initialize(Kernel::HLERequestContext& ctx) {
void IR_RST::Shutdown(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp(ctx, 0x03, 0, 0);
CoreTiming::UnscheduleEvent(update_callback_id, 0);
system.CoreTiming().UnscheduleEvent(update_callback_id, 0);
UnloadInputDevices();
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
@ -144,19 +145,18 @@ void IR_RST::Shutdown(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_IR, "called");
}
IR_RST::IR_RST() : ServiceFramework("ir:rst", 1) {
IR_RST::IR_RST(Core::System& system) : ServiceFramework("ir:rst", 1), system(system) {
using namespace Kernel;
// Note: these two kernel objects are even available before Initialize service function is
// called.
shared_memory =
SharedMemory::Create(nullptr, 0x1000, MemoryPermission::ReadWrite, MemoryPermission::Read,
0, MemoryRegion::BASE, "IRRST:SharedMemory");
update_event = Event::Create(ResetType::OneShot, "IRRST:UpdateEvent");
shared_memory = system.Kernel().CreateSharedMemory(nullptr, 0x1000, MemoryPermission::ReadWrite,
MemoryPermission::Read, 0,
MemoryRegion::BASE, "IRRST:SharedMemory");
update_event = system.Kernel().CreateEvent(ResetType::OneShot, "IRRST:UpdateEvent");
update_callback_id =
CoreTiming::RegisterEvent("IRRST:UpdateCallBack", [this](u64 userdata, s64 cycles_late) {
UpdateCallback(userdata, cycles_late);
});
update_callback_id = system.CoreTiming().RegisterEvent(
"IRRST:UpdateCallBack",
[this](u64 userdata, s64 cycles_late) { UpdateCallback(userdata, cycles_late); });
static const FunctionInfo functions[] = {
{0x00010000, &IR_RST::GetHandles, "GetHandles"},

View file

@ -18,8 +18,8 @@ class Event;
class SharedMemory;
} // namespace Kernel
namespace CoreTiming {
struct EventType;
namespace Core {
struct TimingEventType;
};
namespace Service::IR {
@ -39,7 +39,7 @@ union PadState {
/// Interface to "ir:rst" service
class IR_RST final : public ServiceFramework<IR_RST> {
public:
IR_RST();
explicit IR_RST(Core::System& system);
~IR_RST();
void ReloadInputDevices();
@ -77,10 +77,11 @@ private:
void UnloadInputDevices();
void UpdateCallback(u64 userdata, s64 cycles_late);
Core::System& system;
Kernel::SharedPtr<Kernel::Event> update_event;
Kernel::SharedPtr<Kernel::SharedMemory> shared_memory;
u32 next_pad_index{0};
CoreTiming::EventType* update_callback_id;
Core::TimingEventType* update_callback_id;
std::unique_ptr<Input::ButtonDevice> zl_button;
std::unique_ptr<Input::ButtonDevice> zr_button;
std::unique_ptr<Input::AnalogDevice> c_stick;

View file

@ -6,6 +6,7 @@
#include <boost/crc.hpp>
#include "common/string_util.h"
#include "common/swap.h"
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/shared_memory.h"
@ -380,7 +381,7 @@ void IR_USER::ReleaseReceivedData(Kernel::HLERequestContext& ctx) {
LOG_TRACE(Service_IR, "called, count={}", count);
}
IR_USER::IR_USER() : ServiceFramework("ir:USER", 1) {
IR_USER::IR_USER(Core::System& system) : ServiceFramework("ir:USER", 1) {
const FunctionInfo functions[] = {
{0x00010182, nullptr, "InitializeIrNop"},
{0x00020000, &IR_USER::FinalizeIrNop, "FinalizeIrNop"},
@ -413,9 +414,9 @@ IR_USER::IR_USER() : ServiceFramework("ir:USER", 1) {
using namespace Kernel;
conn_status_event = Event::Create(ResetType::OneShot, "IR:ConnectionStatusEvent");
send_event = Event::Create(ResetType::OneShot, "IR:SendEvent");
receive_event = Event::Create(ResetType::OneShot, "IR:ReceiveEvent");
conn_status_event = system.Kernel().CreateEvent(ResetType::OneShot, "IR:ConnectionStatusEvent");
send_event = system.Kernel().CreateEvent(ResetType::OneShot, "IR:SendEvent");
receive_event = system.Kernel().CreateEvent(ResetType::OneShot, "IR:ReceiveEvent");
extra_hid =
std::make_unique<ExtraHID>([this](const std::vector<u8>& data) { PutToReceive(data); });

Some files were not shown because too many files have changed in this diff Show more