Revert "kernel: Various improvements to scheduler"

This commit is contained in:
bunnei 2021-08-25 20:59:28 -07:00 committed by GitHub
parent 3843995ceb
commit 0c8594b225
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
23 changed files with 140 additions and 224 deletions

View file

@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
const auto current_core = system.CurrentCoreIndex();
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here.
@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
const auto current_core = system.CurrentCoreIndex();
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here.

View file

@ -170,10 +170,6 @@ public:
}
}
const std::string& GetName() const {
return name;
}
private:
void RegisterWithKernel();
void UnregisterWithKernel();

View file

@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
u32 new_orr_mask) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
const auto current_core = system.CurrentCoreIndex();
// Load the value from the address.
const auto expected = monitor.ExclusiveRead32(current_core, address);

View file

@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size);
@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) {
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
if (this->IsValidHandle(handle)) {
@ -63,7 +61,6 @@ bool KHandleTable::Remove(Handle handle) {
}
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
@ -86,7 +83,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
}
ResultCode KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
@ -97,7 +93,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
}
void KHandleTable::Unreserve(Handle handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@ -116,7 +111,6 @@ void KHandleTable::Unreserve(Handle handle) {
}
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Unpack the handle.

View file

@ -69,7 +69,6 @@ public:
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
if constexpr (std::is_same_v<T, KAutoObject>) {
@ -124,7 +123,6 @@ public:
size_t num_opened;
{
// Lock the table.
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.

View file

@ -59,7 +59,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle;
thread->DisableDispatch();
auto& kernel = system.Kernel();
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires

View file

@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
}
void KScheduler::DisableScheduling(KernelCore& kernel) {
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
GetCurrentThreadPointer(kernel)->DisableDispatch();
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
scheduler->GetCurrentThread()->DisableDispatch();
}
}
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1);
if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) {
GetCurrentThreadPointer(kernel)->EnableDispatch();
} else {
RescheduleCores(kernel, cores_needing_scheduling);
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
scheduler->GetCurrentThread()->EnableDispatch();
}
}
RescheduleCores(kernel, cores_needing_scheduling);
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
state.highest_priority_thread = nullptr;
}
void KScheduler::Finalize() {
KScheduler::~KScheduler() {
if (idle_thread) {
idle_thread->Close();
idle_thread = nullptr;
}
}
KScheduler::~KScheduler() {
ASSERT(!idle_thread);
}
KThread* KScheduler::GetCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() {
if (phys_core.IsInterrupted()) {
phys_core.ClearInterrupt();
}
guard.Lock();
if (state.needs_scheduling.load()) {
Schedule();
} else {
GetCurrentThread()->EnableDispatch();
guard.Unlock();
}
}
@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() {
}
void KScheduler::Unload(KThread* thread) {
ASSERT(thread);
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
if (thread->IsCallingSvc()) {
thread->ClearIsCallingSvc();
if (thread) {
if (thread->IsCallingSvc()) {
thread->ClearIsCallingSvc();
}
if (!thread->IsTerminationRequested()) {
prev_thread = thread;
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
} else {
prev_thread = nullptr;
}
thread->context_guard.Unlock();
}
auto& physical_core = system.Kernel().PhysicalCore(core_id);
if (!physical_core.IsInitialized()) {
return;
}
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
prev_thread = thread;
} else {
prev_thread = nullptr;
}
thread->context_guard.Unlock();
}
void KScheduler::Reload(KThread* thread) {
@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) {
if (thread) {
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
auto* const thread_owner_process = thread->GetOwnerProcess();
if (thread_owner_process != nullptr) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
}
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() {
}
void KScheduler::ScheduleImpl() {
KThread* previous_thread = GetCurrentThread();
KThread* previous_thread = current_thread.load();
KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling = false;
@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() {
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
previous_thread->EnableDispatch();
guard.Unlock();
return;
}
if (next_thread->GetCurrentCore() != core_id) {
next_thread->SetCurrentCore(core_id);
}
current_thread.store(next_thread);
KProcess* const previous_process = system.Kernel().CurrentProcess();
@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() {
Unload(previous_thread);
std::shared_ptr<Common::Fiber>* old_context;
old_context = &previous_thread->GetHostContext();
if (previous_thread != nullptr) {
old_context = &previous_thread->GetHostContext();
} else {
old_context = &idle_thread->GetHostContext();
}
guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber);

View file

@ -33,8 +33,6 @@ public:
explicit KScheduler(Core::System& system_, s32 core_id_);
~KScheduler();
void Finalize();
/// Reschedules to the next available thread (call after current thread is suspended)
void RescheduleCurrentCore();

View file

@ -14,7 +14,6 @@
#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "common/thread_queue_list.h"
#include "core/core.h"
#include "core/cpu_manager.h"
@ -189,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Setup the stack parameters.
StackParameters& sp = GetStackParameters();
sp.cur_thread = this;
sp.disable_count = 0;
sp.disable_count = 1;
SetInExceptionHandler();
// Set thread ID.
@ -216,10 +215,9 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize emulation parameters.
// Initialize host context.
thread->host_context =
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess;
}
@ -972,9 +970,6 @@ ResultCode KThread::Run() {
// Set our state and finish.
SetState(ThreadState::Runnable);
DisableDispatch();
return ResultSuccess;
}
}
@ -1059,16 +1054,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
return GetCurrentThread(kernel).GetCurrentCore();
}
KScopedDisableDispatch::~KScopedDisableDispatch() {
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
auto scheduler = kernel.CurrentScheduler();
if (scheduler) {
scheduler->RescheduleCurrentCore();
}
} else {
GetCurrentThread(kernel).EnableDispatch();
}
}
} // namespace Kernel

View file

@ -450,39 +450,16 @@ public:
sleeping_queue = q;
}
[[nodiscard]] bool IsKernelThread() const {
return GetActiveCore() == 3;
}
[[nodiscard]] bool IsDispatchTrackingDisabled() const {
return is_single_core || IsKernelThread();
}
[[nodiscard]] s32 GetDisableDispatchCount() const {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return 1;
}
return this->GetStackParameters().disable_count;
}
void DisableDispatch() {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return;
}
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
void EnableDispatch() {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return;
}
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}
@ -731,7 +708,6 @@ private:
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
@ -776,16 +752,4 @@ public:
}
};
class KScopedDisableDispatch {
public:
[[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
GetCurrentThread(kernel).DisableDispatch();
}
~KScopedDisableDispatch();
private:
KernelCore& kernel;
};
} // namespace Kernel

View file

@ -85,9 +85,8 @@ struct KernelCore::Impl {
}
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
cores[core_id].Initialize(current_process->Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
for (auto& core : cores) {
core.Initialize(current_process->Is64BitProcess());
}
}
@ -132,6 +131,15 @@ struct KernelCore::Impl {
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
}
schedulers[core_id].reset();
}
cores.clear();
global_handle_table->Finalize();
@ -159,16 +167,6 @@ struct KernelCore::Impl {
CleanupObject(time_shared_mem);
CleanupObject(system_resource_limit);
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
}
schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
@ -259,6 +257,14 @@ struct KernelCore::Impl {
void MakeCurrentProcess(KProcess* process) {
current_process = process;
if (process == nullptr) {
return;
}
const u32 core_id = GetCurrentHostThreadID();
if (core_id < Core::Hardware::NUM_CPU_CORES) {
system.Memory().SetCurrentPageTable(*process, core_id);
}
}
static inline thread_local u32 host_thread_id = UINT32_MAX;
@ -821,20 +827,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
return impl->cores[id];
}
size_t KernelCore::CurrentPhysicalCoreIndex() const {
const u32 core_id = impl->GetCurrentHostThreadID();
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
return Core::Hardware::NUM_CPU_CORES - 1;
}
return core_id;
}
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
return impl->cores[CurrentPhysicalCoreIndex()];
u32 core_id = impl->GetCurrentHostThreadID();
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
return impl->cores[core_id];
}
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
return impl->cores[CurrentPhysicalCoreIndex()];
u32 core_id = impl->GetCurrentHostThreadID();
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
return impl->cores[core_id];
}
Kernel::KScheduler* KernelCore::CurrentScheduler() {
@ -1027,9 +1029,6 @@ void KernelCore::Suspend(bool in_suspention) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
if (!should_suspend) {
impl->suspend_threads[core_id]->DisableDispatch();
}
}
}
}
@ -1044,11 +1043,13 @@ void KernelCore::ExceptionalExit() {
}
void KernelCore::EnterSVCProfile() {
impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
std::size_t core = impl->GetCurrentHostThreadID();
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
}
void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
std::size_t core = impl->GetCurrentHostThreadID();
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
}
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {

View file

@ -146,9 +146,6 @@ public:
/// Gets the an instance of the respective physical CPU core.
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
/// Gets the current physical core index for the running host thread.
std::size_t CurrentPhysicalCoreIndex() const;
/// Gets the sole instance of the Scheduler at the current running core.
Kernel::KScheduler* CurrentScheduler();

View file

@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const u64 thread_ticks = current_thread->GetCpuTime();
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
}