core: Fix clang build
Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795
This commit is contained in:
parent
ca416a0fb8
commit
be1954e04c
105 changed files with 906 additions and 667 deletions
|
@ -108,7 +108,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
|
|||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
updated_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
|
@ -129,7 +129,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
|
|||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(updated_value)));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
|
|
|
@ -68,7 +68,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
|
|||
generations[slot] = generation;
|
||||
objects[slot] = std::move(obj);
|
||||
|
||||
Handle handle = generation | (slot << 15);
|
||||
const auto handle = static_cast<Handle>(generation | static_cast<u16>(slot << 15));
|
||||
return MakeResult<Handle>(handle);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
|
|||
|
||||
{
|
||||
Handle event_handle = InvalidHandle;
|
||||
SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
|
||||
SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), static_cast<s64>(timeout));
|
||||
thread->SetHLECallback(
|
||||
[context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
|
||||
ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
|
||||
|
|
|
@ -168,7 +168,7 @@ struct KernelCore::Impl {
|
|||
const auto type =
|
||||
static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<s32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
|
|
|
@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
|||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
|
@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
|||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
|
|
|
@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
|||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast<u32>(heap_index))};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
|||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast<u32>(index))};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
|
|
|
@ -33,11 +33,12 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
|
|||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
const auto u_index = static_cast<std::size_t>(index);
|
||||
const auto needed_size{blocks[u_index].GetSize()};
|
||||
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) {
|
||||
if (const VAddr addr = blocks[i].PopBlock(); addr != 0) {
|
||||
if (const std::size_t allocated_size = blocks[i].GetSize();
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
|
@ -50,7 +51,7 @@ VAddr PageHeap::AllocateBlock(s32 index) {
|
|||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[index++].PushBlock(block);
|
||||
block = blocks[static_cast<std::size_t>(index++)].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
|
@ -69,7 +70,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
VAddr after_start{end};
|
||||
VAddr after_end{end};
|
||||
while (big_index >= 0) {
|
||||
const std::size_t block_size{blocks[big_index].GetSize()};
|
||||
const std::size_t block_size{blocks[static_cast<std::size_t>(big_index)].GetSize()};
|
||||
const VAddr big_start{Common::AlignUp((start), block_size)};
|
||||
const VAddr big_end{Common::AlignDown((end), block_size)};
|
||||
if (big_start < big_end) {
|
||||
|
@ -87,7 +88,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
|
||||
// Free space before the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
|
||||
while (before_start + block_size <= before_end) {
|
||||
before_end -= block_size;
|
||||
FreeBlock(before_end, i);
|
||||
|
@ -96,7 +97,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
|
||||
// Free space after the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
|
||||
while (after_start + block_size <= after_end) {
|
||||
FreeBlock(after_start, i);
|
||||
after_start += block_size;
|
||||
|
|
|
@ -34,7 +34,9 @@ public:
|
|||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
const auto shift_index = static_cast<std::size_t>(i);
|
||||
if (num_pages >=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[shift_index]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +88,7 @@ private:
|
|||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
bit_storages[static_cast<std::size_t>(depth)] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
|
@ -99,7 +101,7 @@ private:
|
|||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
const u64 v{bit_storages[static_cast<std::size_t>(depth)][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
|
@ -125,7 +127,7 @@ private:
|
|||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[depth]};
|
||||
u64* bits{bit_storages[static_cast<std::size_t>(depth)]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
|
@ -177,11 +179,11 @@ private:
|
|||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
if (v != 0) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
|
@ -195,12 +197,12 @@ private:
|
|||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
if (v != 0) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
|
|
|
@ -414,7 +414,8 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
|
|||
const std::size_t remaining_pages{remaining_size / PageSize};
|
||||
|
||||
if (process->GetResourceLimit() &&
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(remaining_size))) {
|
||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
@ -778,7 +779,8 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
|
|||
|
||||
auto process{system.Kernel().CurrentProcess()};
|
||||
if (process->GetResourceLimit() && delta != 0 &&
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(delta))) {
|
||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ public:
|
|||
PhysicalCore& operator=(const PhysicalCore&) = delete;
|
||||
|
||||
PhysicalCore(PhysicalCore&&) = default;
|
||||
PhysicalCore& operator=(PhysicalCore&&) = default;
|
||||
PhysicalCore& operator=(PhysicalCore&&) = delete;
|
||||
|
||||
void Idle();
|
||||
/// Interrupt this physical core.
|
||||
|
|
|
@ -137,9 +137,10 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
|
|||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||
const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
const u64 capacity{
|
||||
static_cast<u64>(resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory)) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
|
||||
if (capacity < memory_usage_capacity) {
|
||||
return capacity;
|
||||
|
@ -279,12 +280,12 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
ResourceType::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
||||
static_cast<s64>(kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)));
|
||||
resource_limit->SetLimitValue(ResourceType::Threads, 608);
|
||||
resource_limit->SetLimitValue(ResourceType::Events, 700);
|
||||
resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
|
||||
resource_limit->SetLimitValue(ResourceType::Sessions, 894);
|
||||
ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
|
||||
ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(code_size)));
|
||||
|
||||
// Create TLS region
|
||||
tls_region_address = CreateTLSRegion();
|
||||
|
@ -300,9 +301,9 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
|||
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
|
||||
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
||||
SetupMainThread(system, *this, static_cast<u32>(main_thread_priority), main_thread_stack_top);
|
||||
resource_limit->Reserve(ResourceType::Threads, 1);
|
||||
resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
|
||||
resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(main_thread_stack_size));
|
||||
}
|
||||
|
||||
void Process::PrepareForTermination() {
|
||||
|
@ -363,7 +364,7 @@ VAddr Process::CreateTLSRegion() {
|
|||
->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
|
||||
Memory::MemoryState::ThreadLocal,
|
||||
Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
.ValueOr(0U)};
|
||||
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
|
|
|
@ -43,8 +43,8 @@ void ResourceLimit::Release(ResourceType resource, u64 amount) {
|
|||
void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
|
||||
current[index] -= used_amount;
|
||||
available[index] -= available_amount;
|
||||
current[index] -= static_cast<s64>(used_amount);
|
||||
available[index] -= static_cast<s64>(available_amount);
|
||||
}
|
||||
|
||||
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
|
||||
|
|
|
@ -89,9 +89,11 @@ u32 GlobalScheduler::SelectThreads() {
|
|||
while (iter != suggested_queue[core_id].end()) {
|
||||
suggested = *iter;
|
||||
iter++;
|
||||
s32 suggested_core_id = suggested->GetProcessorID();
|
||||
Thread* top_thread =
|
||||
suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
|
||||
const s32 suggested_core_id = suggested->GetProcessorID();
|
||||
Thread* top_thread = suggested_core_id >= 0
|
||||
? top_threads[static_cast<u32>(suggested_core_id)]
|
||||
: nullptr;
|
||||
|
||||
if (top_thread != suggested) {
|
||||
if (top_thread != nullptr &&
|
||||
top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
|
||||
|
@ -102,16 +104,19 @@ u32 GlobalScheduler::SelectThreads() {
|
|||
TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
|
||||
break;
|
||||
}
|
||||
|
||||
suggested = nullptr;
|
||||
migration_candidates[num_candidates++] = suggested_core_id;
|
||||
}
|
||||
|
||||
// Step 3: Select a suggested thread from another core
|
||||
if (suggested == nullptr) {
|
||||
for (std::size_t i = 0; i < num_candidates; i++) {
|
||||
s32 candidate_core = migration_candidates[i];
|
||||
const auto candidate_core = static_cast<u32>(migration_candidates[i]);
|
||||
suggested = top_threads[candidate_core];
|
||||
auto it = scheduled_queue[candidate_core].begin();
|
||||
it++;
|
||||
++it;
|
||||
|
||||
Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
|
||||
if (next != nullptr) {
|
||||
TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
|
||||
|
@ -128,7 +133,8 @@ u32 GlobalScheduler::SelectThreads() {
|
|||
|
||||
idle_cores &= ~(1U << core_id);
|
||||
}
|
||||
u32 cores_needing_context_switch{};
|
||||
|
||||
u32 cores_needing_context_switch = 0;
|
||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
||||
Scheduler& sched = kernel.Scheduler(core);
|
||||
ASSERT(top_threads[core] == nullptr ||
|
||||
|
@ -186,13 +192,16 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
|||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core >= 0) {
|
||||
if (current_threads[source_core] != nullptr) {
|
||||
if (thread == current_threads[source_core] ||
|
||||
current_threads[source_core]->GetPriority() < min_regular_priority) {
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
|
||||
if (current_threads[sanitized_source_core] != nullptr) {
|
||||
if (thread == current_threads[sanitized_source_core] ||
|
||||
current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
|
||||
next_thread->GetPriority() < thread->GetPriority()) {
|
||||
if (thread->GetPriority() <= priority) {
|
||||
|
@ -240,17 +249,25 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
|
|||
for (std::size_t i = 0; i < current_threads.size(); i++) {
|
||||
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
|
||||
}
|
||||
|
||||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core < 0 || thread == current_threads[source_core]) {
|
||||
if (source_core < 0) {
|
||||
continue;
|
||||
}
|
||||
if (current_threads[source_core] == nullptr ||
|
||||
current_threads[source_core]->GetPriority() >= min_regular_priority) {
|
||||
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
if (thread == current_threads[sanitized_source_core]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current_threads[sanitized_source_core] == nullptr ||
|
||||
current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) {
|
||||
winner = thread;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner != yielding_thread) {
|
||||
TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
|
||||
|
@ -292,17 +309,22 @@ void GlobalScheduler::PreemptThreads() {
|
|||
if (thread->GetPriority() != priority) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (source_core >= 0) {
|
||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
|
||||
? nullptr
|
||||
: scheduled_queue[source_core].front();
|
||||
: scheduled_queue[sanitized_source_core].front();
|
||||
|
||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (next_thread == thread) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (current_thread != nullptr &&
|
||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||
winner = thread;
|
||||
|
@ -322,17 +344,22 @@ void GlobalScheduler::PreemptThreads() {
|
|||
if (thread->GetPriority() < priority) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (source_core >= 0) {
|
||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
|
||||
? nullptr
|
||||
: scheduled_queue[source_core].front();
|
||||
: scheduled_queue[sanitized_source_core].front();
|
||||
|
||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (next_thread == thread) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (current_thread != nullptr &&
|
||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||
winner = thread;
|
||||
|
@ -352,11 +379,11 @@ void GlobalScheduler::PreemptThreads() {
|
|||
|
||||
void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
u32 current_core = global_thread.host_handle;
|
||||
const u32 current_core = global_thread.host_handle;
|
||||
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
|
||||
(current_core < Core::Hardware::NUM_CPU_CORES);
|
||||
while (cores_pending_reschedule != 0) {
|
||||
u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
|
||||
const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
|
||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||
if (!must_context_switch || core != current_core) {
|
||||
auto& phys_core = kernel.PhysicalCore(core);
|
||||
|
@ -366,6 +393,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
|
|||
}
|
||||
cores_pending_reschedule &= ~(1U << core);
|
||||
}
|
||||
|
||||
if (must_context_switch) {
|
||||
auto& core_scheduler = kernel.CurrentScheduler();
|
||||
kernel.ExitSVCProfile();
|
||||
|
@ -803,9 +831,11 @@ void Scheduler::Initialize() {
|
|||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast<s32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
idle_thread = std::move(thread_res).Unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -482,7 +482,8 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
|
|||
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
|
||||
s32 handle_count, u32 timeout_high, Handle* index) {
|
||||
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
|
||||
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
|
||||
return WaitSynchronization(system, index, handles_address, static_cast<u32>(handle_count),
|
||||
nano_seconds);
|
||||
}
|
||||
|
||||
/// Resumes a thread waiting on WaitSynchronization
|
||||
|
@ -2002,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
|
|||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
*core = thread->GetIdealCore();
|
||||
*core = static_cast<u32>(thread->GetIdealCore());
|
||||
*mask = thread->GetAffinityMask();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
|
@ -2070,7 +2071,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
|||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
return thread->SetCoreAndAffinityMask(core, affinity_mask);
|
||||
return thread->SetCoreAndAffinityMask(static_cast<s32>(core), affinity_mask);
|
||||
}
|
||||
|
||||
static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
|
||||
|
|
|
@ -11,11 +11,11 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
static inline u64 Param(const Core::System& system, int n) {
|
||||
static inline u64 Param(const Core::System& system, std::size_t n) {
|
||||
return system.CurrentArmInterface().GetReg(n);
|
||||
}
|
||||
|
||||
static inline u32 Param32(const Core::System& system, int n) {
|
||||
static inline u32 Param32(const Core::System& system, std::size_t n) {
|
||||
return static_cast<u32>(system.CurrentArmInterface().GetReg(n));
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ static inline void FuncReturn(Core::System& system, u64 result) {
|
|||
}
|
||||
|
||||
static inline void FuncReturn32(Core::System& system, u32 result) {
|
||||
system.CurrentArmInterface().SetReg(0, (u64)result);
|
||||
system.CurrentArmInterface().SetReg(0, static_cast<u64>(result));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -386,9 +386,10 @@ template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
|
|||
void SvcWrap32(Core::System& system) {
|
||||
Handle param_1 = 0;
|
||||
|
||||
const u32 retval = func(system, ¶m_1, Param32(system, 0), Param32(system, 1),
|
||||
Param32(system, 2), Param32(system, 3), Param32(system, 4))
|
||||
.raw;
|
||||
const u32 retval =
|
||||
func(system, ¶m_1, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
Param32(system, 3), static_cast<s32>(Param32(system, 4)))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
|
@ -542,8 +543,8 @@ void SvcWrap32(Core::System& system) {
|
|||
template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
Param32(system, 3), ¶m_1)
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1),
|
||||
static_cast<s32>(Param32(system, 2)), Param32(system, 3), ¶m_1)
|
||||
.raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
|
|
|
@ -51,7 +51,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
|||
// We found a ready object, acquire it and set the result value
|
||||
SynchronizationObject* object = itr->get();
|
||||
object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr));
|
||||
lock.CancelSleep();
|
||||
return {RESULT_SUCCESS, index};
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
|||
});
|
||||
ASSERT(itr != sync_objects.end());
|
||||
signaling_object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr));
|
||||
return {signaling_result, index};
|
||||
}
|
||||
return {signaling_result, -1};
|
||||
|
|
|
@ -525,7 +525,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
|||
if (old_affinity_mask != new_affinity_mask) {
|
||||
const s32 old_core = processor_id;
|
||||
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
|
||||
if (static_cast<s32>(ideal_core) < 0) {
|
||||
if (ideal_core < 0) {
|
||||
processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
|
||||
} else {
|
||||
processor_id = ideal_core;
|
||||
|
|
|
@ -470,7 +470,7 @@ public:
|
|||
|
||||
bool InvokeHLECallback(std::shared_ptr<Thread> thread);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
s32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
|
@ -654,8 +654,8 @@ private:
|
|||
|
||||
Scheduler* scheduler = nullptr;
|
||||
|
||||
u32 ideal_core{0xFFFFFFFF};
|
||||
u64 affinity_mask{0x1};
|
||||
s32 ideal_core = -1;
|
||||
u64 affinity_mask = 1;
|
||||
|
||||
s32 ideal_core_override = -1;
|
||||
u64 affinity_mask_override = 0x1;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue