Move solution and projects to src

This commit is contained in:
TSR Berry 2023-04-08 01:22:00 +02:00 committed by Mary
parent cd124bda58
commit cee7121058
3466 changed files with 55 additions and 55 deletions

View file

@ -0,0 +1,7 @@
namespace Ryujinx.HLE.HOS.Kernel.Common
{
interface IKFutureSchedulerObject
{
void TimeUp();
}
}

View file

@ -0,0 +1,73 @@
using Ryujinx.Horizon.Common;
using System.Diagnostics;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
class KAutoObject
{
protected KernelContext KernelContext;
private int _referenceCount;
public KAutoObject(KernelContext context)
{
KernelContext = context;
_referenceCount = 1;
}
public virtual Result SetName(string name)
{
if (!KernelContext.AutoObjectNames.TryAdd(name, this))
{
return KernelResult.InvalidState;
}
return Result.Success;
}
public static Result RemoveName(KernelContext context, string name)
{
if (!context.AutoObjectNames.TryRemove(name, out _))
{
return KernelResult.NotFound;
}
return Result.Success;
}
public static KAutoObject FindNamedObject(KernelContext context, string name)
{
if (context.AutoObjectNames.TryGetValue(name, out KAutoObject obj))
{
return obj;
}
return null;
}
public void IncrementReferenceCount()
{
int newRefCount = Interlocked.Increment(ref _referenceCount);
Debug.Assert(newRefCount >= 2);
}
public void DecrementReferenceCount()
{
int newRefCount = Interlocked.Decrement(ref _referenceCount);
Debug.Assert(newRefCount >= 0);
if (newRefCount == 0)
{
Destroy();
}
}
protected virtual void Destroy()
{
}
}
}

View file

@ -0,0 +1,188 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.Horizon.Common;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
class KResourceLimit : KAutoObject
{
private const int DefaultTimeoutMs = 10000; // 10s
private readonly long[] _current;
private readonly long[] _limit;
private readonly long[] _current2;
private readonly long[] _peak;
private readonly object _lock;
private readonly LinkedList<KThread> _waitingThreads;
private int _waitingThreadsCount;
public KResourceLimit(KernelContext context) : base(context)
{
_current = new long[(int)LimitableResource.Count];
_limit = new long[(int)LimitableResource.Count];
_current2 = new long[(int)LimitableResource.Count];
_peak = new long[(int)LimitableResource.Count];
_lock = new object();
_waitingThreads = new LinkedList<KThread>();
}
public bool Reserve(LimitableResource resource, ulong amount)
{
return Reserve(resource, (long)amount);
}
public bool Reserve(LimitableResource resource, long amount)
{
return Reserve(resource, amount, KTimeManager.ConvertMillisecondsToNanoseconds(DefaultTimeoutMs));
}
public bool Reserve(LimitableResource resource, long amount, long timeout)
{
long endTimePoint = KTimeManager.ConvertNanosecondsToMilliseconds(timeout);
endTimePoint += PerformanceCounter.ElapsedMilliseconds;
bool success = false;
int index = GetIndex(resource);
lock (_lock)
{
if (_current2[index] >= _limit[index])
{
return false;
}
long newCurrent = _current[index] + amount;
while (newCurrent > _limit[index] && _current2[index] + amount <= _limit[index])
{
_waitingThreadsCount++;
KConditionVariable.Wait(KernelContext, _waitingThreads, _lock, timeout);
_waitingThreadsCount--;
newCurrent = _current[index] + amount;
if (timeout >= 0 && PerformanceCounter.ElapsedMilliseconds > endTimePoint)
{
break;
}
}
if (newCurrent <= _limit[index])
{
_current[index] = newCurrent;
_current2[index] += amount;
if (_current[index] > _peak[index])
{
_peak[index] = _current[index];
}
success = true;
}
}
return success;
}
public void Release(LimitableResource resource, ulong amount)
{
Release(resource, (long)amount);
}
public void Release(LimitableResource resource, long amount)
{
Release(resource, amount, amount);
}
public void Release(LimitableResource resource, long amount, long amount2)
{
int index = GetIndex(resource);
lock (_lock)
{
_current[index] -= amount;
_current2[index] -= amount2;
if (_waitingThreadsCount > 0)
{
KConditionVariable.NotifyAll(KernelContext, _waitingThreads);
}
}
}
public long GetRemainingValue(LimitableResource resource)
{
int index = GetIndex(resource);
lock (_lock)
{
return _limit[index] - _current[index];
}
}
public long GetCurrentValue(LimitableResource resource)
{
int index = GetIndex(resource);
lock (_lock)
{
return _current[index];
}
}
public long GetLimitValue(LimitableResource resource)
{
int index = GetIndex(resource);
lock (_lock)
{
return _limit[index];
}
}
public long GetPeakValue(LimitableResource resource)
{
int index = GetIndex(resource);
lock (_lock)
{
return _peak[index];
}
}
public Result SetLimitValue(LimitableResource resource, long limit)
{
int index = GetIndex(resource);
lock (_lock)
{
if (_current[index] <= limit)
{
_limit[index] = limit;
_peak[index] = _current[index];
return Result.Success;
}
else
{
return KernelResult.InvalidState;
}
}
}
private static int GetIndex(LimitableResource resource)
{
return (int)resource;
}
}
}

View file

@ -0,0 +1,35 @@
using Ryujinx.HLE.HOS.Kernel.Threading;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
class KSynchronizationObject : KAutoObject
{
public LinkedList<KThread> WaitingThreads { get; }
public KSynchronizationObject(KernelContext context) : base(context)
{
WaitingThreads = new LinkedList<KThread>();
}
public LinkedListNode<KThread> AddWaitingThread(KThread thread)
{
return WaitingThreads.AddLast(thread);
}
public void RemoveWaitingThread(LinkedListNode<KThread> node)
{
WaitingThreads.Remove(node);
}
public virtual void Signal()
{
KernelContext.Synchronization.SignalObject(this);
}
public virtual bool IsSignaled()
{
return false;
}
}
}

View file

@ -0,0 +1,78 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
static class KSystemControl
{
private const ulong KiB = 1024;
private const ulong MiB = 1024 * KiB;
private const ulong GiB = 1024 * MiB;
private const ulong PageSize = 4 * KiB;
private const ulong RequiredNonSecureSystemPoolSizeVi = 0x2238 * PageSize;
private const ulong RequiredNonSecureSystemPoolSizeNvservices = 0x710 * PageSize;
private const ulong RequiredNonSecureSystemPoolSizeOther = 0x80 * PageSize;
private const ulong RequiredNonSecureSystemPoolSize =
RequiredNonSecureSystemPoolSizeVi +
RequiredNonSecureSystemPoolSizeNvservices +
RequiredNonSecureSystemPoolSizeOther;
public static ulong GetApplicationPoolSize(MemoryArrange arrange)
{
return arrange switch
{
MemoryArrange.MemoryArrange4GiB or
MemoryArrange.MemoryArrange4GiBSystemDev or
MemoryArrange.MemoryArrange6GiBAppletDev => 3285 * MiB,
MemoryArrange.MemoryArrange4GiBAppletDev => 2048 * MiB,
MemoryArrange.MemoryArrange6GiB or
MemoryArrange.MemoryArrange8GiB => 4916 * MiB,
_ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\".")
};
}
public static ulong GetAppletPoolSize(MemoryArrange arrange)
{
return arrange switch
{
MemoryArrange.MemoryArrange4GiB => 507 * MiB,
MemoryArrange.MemoryArrange4GiBAppletDev => 1554 * MiB,
MemoryArrange.MemoryArrange4GiBSystemDev => 448 * MiB,
MemoryArrange.MemoryArrange6GiB => 562 * MiB,
MemoryArrange.MemoryArrange6GiBAppletDev or
MemoryArrange.MemoryArrange8GiB => 2193 * MiB,
_ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\".")
};
}
public static ulong GetMinimumNonSecureSystemPoolSize()
{
return RequiredNonSecureSystemPoolSize;
}
public static ulong GetDramEndAddress(MemorySize size)
{
return DramMemoryMap.DramBase + GetDramSize(size);
}
public static ulong GenerateRandom()
{
// TODO
return 0;
}
public static ulong GetDramSize(MemorySize size)
{
return size switch
{
MemorySize.MemorySize4GiB => 4 * GiB,
MemorySize.MemorySize6GiB => 6 * GiB,
MemorySize.MemorySize8GiB => 8 * GiB,
_ => throw new ArgumentException($"Invalid memory size \"{size}\".")
};
}
}
}

View file

@ -0,0 +1,218 @@
using Ryujinx.Common;
using System;
using System.Collections.Generic;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
class KTimeManager : IDisposable
{
public static readonly long DefaultTimeIncrementNanoseconds = ConvertGuestTicksToNanoseconds(2);
private class WaitingObject
{
public IKFutureSchedulerObject Object { get; }
public long TimePoint { get; }
public WaitingObject(IKFutureSchedulerObject schedulerObj, long timePoint)
{
Object = schedulerObj;
TimePoint = timePoint;
}
}
private readonly KernelContext _context;
private readonly List<WaitingObject> _waitingObjects;
private AutoResetEvent _waitEvent;
private bool _keepRunning;
private long _enforceWakeupFromSpinWait;
private const long NanosecondsPerSecond = 1000000000L;
private const long NanosecondsPerMillisecond = 1000000L;
public KTimeManager(KernelContext context)
{
_context = context;
_waitingObjects = new List<WaitingObject>();
_keepRunning = true;
Thread work = new Thread(WaitAndCheckScheduledObjects)
{
Name = "HLE.TimeManager"
};
work.Start();
}
public void ScheduleFutureInvocation(IKFutureSchedulerObject schedulerObj, long timeout)
{
long startTime = PerformanceCounter.ElapsedTicks;
long timePoint = startTime + ConvertNanosecondsToHostTicks(timeout);
if (timePoint < startTime)
{
timePoint = long.MaxValue;
}
lock (_context.CriticalSection.Lock)
{
_waitingObjects.Add(new WaitingObject(schedulerObj, timePoint));
if (timeout < NanosecondsPerMillisecond)
{
Interlocked.Exchange(ref _enforceWakeupFromSpinWait, 1);
}
}
_waitEvent.Set();
}
public void UnscheduleFutureInvocation(IKFutureSchedulerObject schedulerObj)
{
lock (_context.CriticalSection.Lock)
{
for (int index = _waitingObjects.Count - 1; index >= 0; index--)
{
if (_waitingObjects[index].Object == schedulerObj)
{
_waitingObjects.RemoveAt(index);
}
}
}
}
private void WaitAndCheckScheduledObjects()
{
SpinWait spinWait = new SpinWait();
WaitingObject next;
using (_waitEvent = new AutoResetEvent(false))
{
while (_keepRunning)
{
lock (_context.CriticalSection.Lock)
{
Interlocked.Exchange(ref _enforceWakeupFromSpinWait, 0);
next = GetNextWaitingObject();
}
if (next != null)
{
long timePoint = PerformanceCounter.ElapsedTicks;
if (next.TimePoint > timePoint)
{
long ms = Math.Min((next.TimePoint - timePoint) / PerformanceCounter.TicksPerMillisecond, int.MaxValue);
if (ms > 0)
{
_waitEvent.WaitOne((int)ms);
}
else
{
while (Interlocked.Read(ref _enforceWakeupFromSpinWait) != 1 && PerformanceCounter.ElapsedTicks < next.TimePoint)
{
// Our time is close - don't let SpinWait go off and potentially Thread.Sleep().
if (spinWait.NextSpinWillYield)
{
Thread.Yield();
spinWait.Reset();
}
else
{
spinWait.SpinOnce();
}
}
spinWait.Reset();
}
}
bool timeUp = PerformanceCounter.ElapsedTicks >= next.TimePoint;
if (timeUp)
{
lock (_context.CriticalSection.Lock)
{
if (_waitingObjects.Remove(next))
{
next.Object.TimeUp();
}
}
}
}
else
{
_waitEvent.WaitOne();
}
}
}
}
private WaitingObject GetNextWaitingObject()
{
WaitingObject selected = null;
long lowestTimePoint = long.MaxValue;
for (int index = _waitingObjects.Count - 1; index >= 0; index--)
{
WaitingObject current = _waitingObjects[index];
if (current.TimePoint <= lowestTimePoint)
{
selected = current;
lowestTimePoint = current.TimePoint;
}
}
return selected;
}
public static long ConvertNanosecondsToMilliseconds(long time)
{
time /= NanosecondsPerMillisecond;
if ((ulong)time > int.MaxValue)
{
return int.MaxValue;
}
return time;
}
public static long ConvertMillisecondsToNanoseconds(long time)
{
return time * NanosecondsPerMillisecond;
}
public static long ConvertNanosecondsToHostTicks(long ns)
{
long nsDiv = ns / NanosecondsPerSecond;
long nsMod = ns % NanosecondsPerSecond;
long tickDiv = PerformanceCounter.TicksPerSecond / NanosecondsPerSecond;
long tickMod = PerformanceCounter.TicksPerSecond % NanosecondsPerSecond;
long baseTicks = (nsMod * tickMod + PerformanceCounter.TicksPerSecond - 1) / NanosecondsPerSecond;
return (nsDiv * tickDiv) * NanosecondsPerSecond + nsDiv * tickMod + nsMod * tickDiv + baseTicks;
}
public static long ConvertGuestTicksToNanoseconds(long ticks)
{
return (long)Math.Ceiling(ticks * (1000000000.0 / 19200000.0));
}
public static long ConvertHostTicksToTicks(long time)
{
return (long)((time / (double)PerformanceCounter.TicksPerSecond) * 19200000.0);
}
public void Dispose()
{
_keepRunning = false;
_waitEvent?.Set();
}
}
}

View file

@ -0,0 +1,89 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.Horizon.Common;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
static class KernelInit
{
private readonly struct MemoryRegion
{
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddress => Address + Size;
public MemoryRegion(ulong address, ulong size)
{
Address = address;
Size = size;
}
}
public static void InitializeResourceLimit(KResourceLimit resourceLimit, MemorySize size)
{
void EnsureSuccess(Result result)
{
if (result != Result.Success)
{
throw new InvalidOperationException($"Unexpected result \"{result}\".");
}
}
ulong ramSize = KSystemControl.GetDramSize(size);
EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Memory, (long)ramSize));
EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Thread, 800));
EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Event, 700));
EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.TransferMemory, 200));
EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Session, 900));
if (!resourceLimit.Reserve(LimitableResource.Memory, 0) ||
!resourceLimit.Reserve(LimitableResource.Memory, 0x60000))
{
throw new InvalidOperationException("Unexpected failure reserving memory on resource limit.");
}
}
public static KMemoryRegionManager[] GetMemoryRegions(MemorySize size, MemoryArrange arrange)
{
ulong poolEnd = KSystemControl.GetDramEndAddress(size);
ulong applicationPoolSize = KSystemControl.GetApplicationPoolSize(arrange);
ulong appletPoolSize = KSystemControl.GetAppletPoolSize(arrange);
MemoryRegion servicePool;
MemoryRegion nvServicesPool;
MemoryRegion appletPool;
MemoryRegion applicationPool;
ulong nvServicesPoolSize = KSystemControl.GetMinimumNonSecureSystemPoolSize();
applicationPool = new MemoryRegion(poolEnd - applicationPoolSize, applicationPoolSize);
ulong nvServicesPoolEnd = applicationPool.Address - appletPoolSize;
nvServicesPool = new MemoryRegion(nvServicesPoolEnd - nvServicesPoolSize, nvServicesPoolSize);
appletPool = new MemoryRegion(nvServicesPoolEnd, appletPoolSize);
// Note: There is an extra region used by the kernel, however
// since we are doing HLE we are not going to use that memory, so give all
// the remaining memory space to services.
ulong servicePoolSize = nvServicesPool.Address - DramMemoryMap.SlabHeapEnd;
servicePool = new MemoryRegion(DramMemoryMap.SlabHeapEnd, servicePoolSize);
return new KMemoryRegionManager[]
{
GetMemoryRegion(applicationPool),
GetMemoryRegion(appletPool),
GetMemoryRegion(servicePool),
GetMemoryRegion(nvServicesPool)
};
}
private static KMemoryRegionManager GetMemoryRegion(MemoryRegion region)
{
return new KMemoryRegionManager(region.Address, region.Size, region.EndAddress);
}
}
}

View file

@ -0,0 +1,73 @@
using Ryujinx.Cpu;
using Ryujinx.HLE.HOS.Kernel.Process;
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
static class KernelTransfer
{
public static bool UserToKernel<T>(out T value, ulong address) where T : unmanaged
{
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (currentProcess.CpuMemory.IsRangeMapped(address, (ulong)Unsafe.SizeOf<T>()))
{
value = currentProcess.CpuMemory.Read<T>(address);
return true;
}
value = default;
return false;
}
public static bool UserToKernelArray<T>(ulong address, Span<T> values) where T : unmanaged
{
KProcess currentProcess = KernelStatic.GetCurrentProcess();
Span<byte> data = MemoryMarshal.Cast<T, byte>(values);
if (currentProcess.CpuMemory.IsRangeMapped(address, (ulong)data.Length))
{
currentProcess.CpuMemory.Read(address, data);
return true;
}
return false;
}
public static bool UserToKernelString(out string value, ulong address, uint size)
{
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (currentProcess.CpuMemory.IsRangeMapped(address, size))
{
value = MemoryHelper.ReadAsciiString(currentProcess.CpuMemory, address, size);
return true;
}
value = null;
return false;
}
public static bool KernelToUser<T>(ulong address, T value) where T: unmanaged
{
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (currentProcess.CpuMemory.IsRangeMapped(address, (ulong)Unsafe.SizeOf<T>()))
{
currentProcess.CpuMemory.Write(address, value);
return true;
}
return false;
}
}
}

View file

@ -0,0 +1,13 @@
namespace Ryujinx.HLE.HOS.Kernel.Common
{
enum LimitableResource : byte
{
Memory = 0,
Thread = 1,
Event = 2,
TransferMemory = 3,
Session = 4,
Count = 5
}
}

View file

@ -0,0 +1,12 @@
namespace Ryujinx.HLE.HOS.Kernel.Common
{
enum MemoryArrange : byte
{
MemoryArrange4GiB,
MemoryArrange4GiBAppletDev,
MemoryArrange4GiBSystemDev,
MemoryArrange6GiB,
MemoryArrange6GiBAppletDev,
MemoryArrange8GiB
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.HLE.HOS.Kernel.Common
{
enum MemorySize : byte
{
MemorySize4GiB = 0,
MemorySize6GiB = 1,
MemorySize8GiB = 2
}
}

View file

@ -0,0 +1,128 @@
using System.Numerics;
namespace Ryujinx.HLE.HOS.Kernel.Common
{
class MersenneTwister
{
private int _index;
private uint[] _mt;
public MersenneTwister(uint seed)
{
_mt = new uint[624];
_mt[0] = seed;
for (int mtIdx = 1; mtIdx < _mt.Length; mtIdx++)
{
uint prev = _mt[mtIdx - 1];
_mt[mtIdx] = (uint)(0x6c078965 * (prev ^ (prev >> 30)) + mtIdx);
}
_index = _mt.Length;
}
public long GenRandomNumber(long min, long max)
{
long range = max - min;
if (min == max)
{
return min;
}
if (range == -1)
{
// Increment would cause a overflow, special case.
return GenRandomNumber(2, 2, 32, 0xffffffffu, 0xffffffffu);
}
range++;
// This is log2(Range) plus one.
int nextRangeLog2 = 64 - BitOperations.LeadingZeroCount((ulong)range);
// If Range is already power of 2, subtract one to use log2(Range) directly.
int rangeLog2 = nextRangeLog2 - (BitOperations.IsPow2(range) ? 1 : 0);
int parts = rangeLog2 > 32 ? 2 : 1;
int bitsPerPart = rangeLog2 / parts;
int fullParts = parts - (rangeLog2 - parts * bitsPerPart);
uint mask = 0xffffffffu >> (32 - bitsPerPart);
uint maskPlus1 = 0xffffffffu >> (31 - bitsPerPart);
long randomNumber;
do
{
randomNumber = GenRandomNumber(parts, fullParts, bitsPerPart, mask, maskPlus1);
}
while ((ulong)randomNumber >= (ulong)range);
return min + randomNumber;
}
private long GenRandomNumber(
int parts,
int fullParts,
int bitsPerPart,
uint mask,
uint maskPlus1)
{
long randomNumber = 0;
int part = 0;
for (; part < fullParts; part++)
{
randomNumber <<= bitsPerPart;
randomNumber |= GenRandomNumber() & mask;
}
for (; part < parts; part++)
{
randomNumber <<= bitsPerPart + 1;
randomNumber |= GenRandomNumber() & maskPlus1;
}
return randomNumber;
}
private uint GenRandomNumber()
{
if (_index >= _mt.Length)
{
Twist();
}
uint value = _mt[_index++];
value ^= value >> 11;
value ^= (value << 7) & 0x9d2c5680;
value ^= (value << 15) & 0xefc60000;
value ^= value >> 18;
return value;
}
private void Twist()
{
for (int mtIdx = 0; mtIdx < _mt.Length; mtIdx++)
{
uint value = (_mt[mtIdx] & 0x80000000) + (_mt[(mtIdx + 1) % _mt.Length] & 0x7fffffff);
_mt[mtIdx] = _mt[(mtIdx + 397) % _mt.Length] ^ (value >> 1);
if ((value & 1) != 0)
{
_mt[mtIdx] ^= 0x9908b0df;
}
}
_index = 0;
}
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
enum ChannelState
{
NotInitialized,
Open,
ClientDisconnected,
ServerDisconnected
}
}

View file

@ -0,0 +1,20 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KBufferDescriptor
{
public ulong ClientAddress { get; }
public ulong ServerAddress { get; }
public ulong Size { get; }
public MemoryState State { get; }
public KBufferDescriptor(ulong src, ulong dst, ulong size, MemoryState state)
{
ClientAddress = src;
ServerAddress = dst;
Size = size;
State = state;
}
}
}

View file

@ -0,0 +1,217 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.Horizon.Common;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KBufferDescriptorTable
{
private const int MaxInternalBuffersCount = 8;
private List<KBufferDescriptor> _sendBufferDescriptors;
private List<KBufferDescriptor> _receiveBufferDescriptors;
private List<KBufferDescriptor> _exchangeBufferDescriptors;
public KBufferDescriptorTable()
{
_sendBufferDescriptors = new List<KBufferDescriptor>(MaxInternalBuffersCount);
_receiveBufferDescriptors = new List<KBufferDescriptor>(MaxInternalBuffersCount);
_exchangeBufferDescriptors = new List<KBufferDescriptor>(MaxInternalBuffersCount);
}
public Result AddSendBuffer(ulong src, ulong dst, ulong size, MemoryState state)
{
return Add(_sendBufferDescriptors, src, dst, size, state);
}
public Result AddReceiveBuffer(ulong src, ulong dst, ulong size, MemoryState state)
{
return Add(_receiveBufferDescriptors, src, dst, size, state);
}
public Result AddExchangeBuffer(ulong src, ulong dst, ulong size, MemoryState state)
{
return Add(_exchangeBufferDescriptors, src, dst, size, state);
}
private Result Add(List<KBufferDescriptor> list, ulong src, ulong dst, ulong size, MemoryState state)
{
if (list.Count < MaxInternalBuffersCount)
{
list.Add(new KBufferDescriptor(src, dst, size, state));
return Result.Success;
}
return KernelResult.OutOfMemory;
}
public Result CopyBuffersToClient(KPageTableBase memoryManager)
{
Result result = CopyToClient(memoryManager, _receiveBufferDescriptors);
if (result != Result.Success)
{
return result;
}
return CopyToClient(memoryManager, _exchangeBufferDescriptors);
}
private Result CopyToClient(KPageTableBase memoryManager, List<KBufferDescriptor> list)
{
foreach (KBufferDescriptor desc in list)
{
MemoryState stateMask;
switch (desc.State)
{
case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
default: return KernelResult.InvalidCombination;
}
MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached;
if (desc.State == MemoryState.IpcBuffer0)
{
attributeMask |= MemoryAttribute.DeviceMapped;
}
ulong clientAddrTruncated = BitUtils.AlignDown<ulong>(desc.ClientAddress, KPageTableBase.PageSize);
ulong clientAddrRounded = BitUtils.AlignUp<ulong>(desc.ClientAddress, KPageTableBase.PageSize);
// Check if address is not aligned, in this case we need to perform 2 copies.
if (clientAddrTruncated != clientAddrRounded)
{
ulong copySize = clientAddrRounded - desc.ClientAddress;
if (copySize > desc.Size)
{
copySize = desc.Size;
}
Result result = memoryManager.CopyDataFromCurrentProcess(
desc.ClientAddress,
copySize,
stateMask,
stateMask,
KMemoryPermission.ReadAndWrite,
attributeMask,
MemoryAttribute.None,
desc.ServerAddress);
if (result != Result.Success)
{
return result;
}
}
ulong clientEndAddr = desc.ClientAddress + desc.Size;
ulong serverEndAddr = desc.ServerAddress + desc.Size;
ulong clientEndAddrTruncated = BitUtils.AlignDown<ulong>(clientEndAddr, (ulong)KPageTableBase.PageSize);
ulong clientEndAddrRounded = BitUtils.AlignUp<ulong>(clientEndAddr, KPageTableBase.PageSize);
ulong serverEndAddrTruncated = BitUtils.AlignDown<ulong>(serverEndAddr, (ulong)KPageTableBase.PageSize);
if (clientEndAddrTruncated < clientEndAddrRounded &&
(clientAddrTruncated == clientAddrRounded || clientAddrTruncated < clientEndAddrTruncated))
{
Result result = memoryManager.CopyDataFromCurrentProcess(
clientEndAddrTruncated,
clientEndAddr - clientEndAddrTruncated,
stateMask,
stateMask,
KMemoryPermission.ReadAndWrite,
attributeMask,
MemoryAttribute.None,
serverEndAddrTruncated);
if (result != Result.Success)
{
return result;
}
}
}
return Result.Success;
}
public Result UnmapServerBuffers(KPageTableBase memoryManager)
{
Result result = UnmapServer(memoryManager, _sendBufferDescriptors);
if (result != Result.Success)
{
return result;
}
result = UnmapServer(memoryManager, _receiveBufferDescriptors);
if (result != Result.Success)
{
return result;
}
return UnmapServer(memoryManager, _exchangeBufferDescriptors);
}
private Result UnmapServer(KPageTableBase memoryManager, List<KBufferDescriptor> list)
{
foreach (KBufferDescriptor descriptor in list)
{
Result result = memoryManager.UnmapNoAttributeIfStateEquals(
descriptor.ServerAddress,
descriptor.Size,
descriptor.State);
if (result != Result.Success)
{
return result;
}
}
return Result.Success;
}
public Result RestoreClientBuffers(KPageTableBase memoryManager)
{
Result result = RestoreClient(memoryManager, _sendBufferDescriptors);
if (result != Result.Success)
{
return result;
}
result = RestoreClient(memoryManager, _receiveBufferDescriptors);
if (result != Result.Success)
{
return result;
}
return RestoreClient(memoryManager, _exchangeBufferDescriptors);
}
private Result RestoreClient(KPageTableBase memoryManager, List<KBufferDescriptor> list)
{
foreach (KBufferDescriptor descriptor in list)
{
Result result = memoryManager.UnmapIpcRestorePermission(
descriptor.ClientAddress,
descriptor.Size,
descriptor.State);
if (result != Result.Success)
{
return result;
}
}
return Result.Success;
}
}
}

View file

@ -0,0 +1,144 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KClientPort : KSynchronizationObject
{
private int _sessionsCount;
private readonly int _maxSessions;
private readonly KPort _parent;
public bool IsLight => _parent.IsLight;
public KClientPort(KernelContext context, KPort parent, int maxSessions) : base(context)
{
_maxSessions = maxSessions;
_parent = parent;
}
public Result Connect(out KClientSession clientSession)
{
clientSession = null;
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (currentProcess.ResourceLimit != null &&
!currentProcess.ResourceLimit.Reserve(LimitableResource.Session, 1))
{
return KernelResult.ResLimitExceeded;
}
if (!IncrementSessionsCount())
{
currentProcess.ResourceLimit?.Release(LimitableResource.Session, 1);
return KernelResult.SessionCountExceeded;
}
KSession session = new KSession(KernelContext, this);
Result result = _parent.EnqueueIncomingSession(session.ServerSession);
if (result != Result.Success)
{
session.ClientSession.DecrementReferenceCount();
session.ServerSession.DecrementReferenceCount();
return result;
}
clientSession = session.ClientSession;
return result;
}
public Result ConnectLight(out KLightClientSession clientSession)
{
clientSession = null;
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (currentProcess.ResourceLimit != null &&
!currentProcess.ResourceLimit.Reserve(LimitableResource.Session, 1))
{
return KernelResult.ResLimitExceeded;
}
if (!IncrementSessionsCount())
{
currentProcess.ResourceLimit?.Release(LimitableResource.Session, 1);
return KernelResult.SessionCountExceeded;
}
KLightSession session = new KLightSession(KernelContext);
Result result = _parent.EnqueueIncomingLightSession(session.ServerSession);
if (result != Result.Success)
{
session.ClientSession.DecrementReferenceCount();
session.ServerSession.DecrementReferenceCount();
return result;
}
clientSession = session.ClientSession;
return result;
}
private bool IncrementSessionsCount()
{
while (true)
{
int currentCount = _sessionsCount;
if (currentCount < _maxSessions)
{
if (Interlocked.CompareExchange(ref _sessionsCount, currentCount + 1, currentCount) == currentCount)
{
return true;
}
}
else
{
return false;
}
}
}
public void Disconnect()
{
KernelContext.CriticalSection.Enter();
SignalIfMaximumReached(Interlocked.Decrement(ref _sessionsCount));
KernelContext.CriticalSection.Leave();
}
private void SignalIfMaximumReached(int value)
{
if (value == _maxSessions)
{
Signal();
}
}
public new static Result RemoveName(KernelContext context, string name)
{
KAutoObject foundObj = FindNamedObject(context, name);
if (!(foundObj is KClientPort))
{
return KernelResult.NotFound;
}
return KAutoObject.RemoveName(context, name);
}
}
}

View file

@ -0,0 +1,84 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.Horizon.Common;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KClientSession : KSynchronizationObject
{
public KProcess CreatorProcess { get; }
private KSession _parent;
public ChannelState State { get; set; }
public KClientPort ParentPort { get; }
public KClientSession(KernelContext context, KSession parent, KClientPort parentPort) : base(context)
{
_parent = parent;
ParentPort = parentPort;
parentPort?.IncrementReferenceCount();
State = ChannelState.Open;
CreatorProcess = KernelStatic.GetCurrentProcess();
CreatorProcess.IncrementReferenceCount();
}
public Result SendSyncRequest(ulong customCmdBuffAddr = 0, ulong customCmdBuffSize = 0)
{
KThread currentThread = KernelStatic.GetCurrentThread();
KSessionRequest request = new KSessionRequest(currentThread, customCmdBuffAddr, customCmdBuffSize);
KernelContext.CriticalSection.Enter();
currentThread.SignaledObj = null;
currentThread.ObjSyncResult = Result.Success;
Result result = _parent.ServerSession.EnqueueRequest(request);
KernelContext.CriticalSection.Leave();
if (result == Result.Success)
{
result = currentThread.ObjSyncResult;
}
return result;
}
public Result SendAsyncRequest(KWritableEvent asyncEvent, ulong customCmdBuffAddr = 0, ulong customCmdBuffSize = 0)
{
KThread currentThread = KernelStatic.GetCurrentThread();
KSessionRequest request = new KSessionRequest(currentThread, customCmdBuffAddr, customCmdBuffSize, asyncEvent);
KernelContext.CriticalSection.Enter();
Result result = _parent.ServerSession.EnqueueRequest(request);
KernelContext.CriticalSection.Leave();
return result;
}
public void DisconnectFromPort()
{
if (ParentPort != null)
{
ParentPort.Disconnect();
ParentPort.DecrementReferenceCount();
}
}
protected override void Destroy()
{
_parent.DisconnectClient();
_parent.DecrementReferenceCount();
}
}
}

View file

@ -0,0 +1,14 @@
using Ryujinx.HLE.HOS.Kernel.Common;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KLightClientSession : KAutoObject
{
private readonly KLightSession _parent;
public KLightClientSession(KernelContext context, KLightSession parent) : base(context)
{
_parent = parent;
}
}
}

View file

@ -0,0 +1,14 @@
using Ryujinx.HLE.HOS.Kernel.Common;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KLightServerSession : KAutoObject
{
private readonly KLightSession _parent;
public KLightServerSession(KernelContext context, KLightSession parent) : base(context)
{
_parent = parent;
}
}
}

View file

@ -0,0 +1,16 @@
using Ryujinx.HLE.HOS.Kernel.Common;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KLightSession : KAutoObject
{
public KLightServerSession ServerSession { get; }
public KLightClientSession ClientSession { get; }
public KLightSession(KernelContext context) : base(context)
{
ServerSession = new KLightServerSession(context, this);
ClientSession = new KLightClientSession(context, this);
}
}
}

View file

@ -0,0 +1,72 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Horizon.Common;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KPort : KAutoObject
{
public KServerPort ServerPort { get; }
public KClientPort ClientPort { get; }
private string _name;
private ChannelState _state;
public bool IsLight { get; private set; }
public KPort(KernelContext context, int maxSessions, bool isLight, string name) : base(context)
{
ServerPort = new KServerPort(context, this);
ClientPort = new KClientPort(context, this, maxSessions);
IsLight = isLight;
_name = name;
_state = ChannelState.Open;
}
public Result EnqueueIncomingSession(KServerSession session)
{
Result result;
KernelContext.CriticalSection.Enter();
if (_state == ChannelState.Open)
{
ServerPort.EnqueueIncomingSession(session);
result = Result.Success;
}
else
{
result = KernelResult.PortClosed;
}
KernelContext.CriticalSection.Leave();
return result;
}
public Result EnqueueIncomingLightSession(KLightServerSession session)
{
Result result;
KernelContext.CriticalSection.Enter();
if (_state == ChannelState.Open)
{
ServerPort.EnqueueIncomingLightSession(session);
result = Result.Success;
}
else
{
result = KernelResult.PortClosed;
}
KernelContext.CriticalSection.Leave();
return result;
}
}
}

View file

@ -0,0 +1,87 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KServerPort : KSynchronizationObject
{
private readonly LinkedList<KServerSession> _incomingConnections;
private readonly LinkedList<KLightServerSession> _lightIncomingConnections;
private readonly KPort _parent;
public bool IsLight => _parent.IsLight;
public KServerPort(KernelContext context, KPort parent) : base(context)
{
_parent = parent;
_incomingConnections = new LinkedList<KServerSession>();
_lightIncomingConnections = new LinkedList<KLightServerSession>();
}
public void EnqueueIncomingSession(KServerSession session)
{
AcceptIncomingConnection(_incomingConnections, session);
}
public void EnqueueIncomingLightSession(KLightServerSession session)
{
AcceptIncomingConnection(_lightIncomingConnections, session);
}
private void AcceptIncomingConnection<T>(LinkedList<T> list, T session)
{
KernelContext.CriticalSection.Enter();
list.AddLast(session);
if (list.Count == 1)
{
Signal();
}
KernelContext.CriticalSection.Leave();
}
public KServerSession AcceptIncomingConnection()
{
return AcceptIncomingConnection(_incomingConnections);
}
public KLightServerSession AcceptIncomingLightConnection()
{
return AcceptIncomingConnection(_lightIncomingConnections);
}
private T AcceptIncomingConnection<T>(LinkedList<T> list)
{
T session = default;
KernelContext.CriticalSection.Enter();
if (list.Count != 0)
{
session = list.First.Value;
list.RemoveFirst();
}
KernelContext.CriticalSection.Leave();
return session;
}
public override bool IsSignaled()
{
if (_parent.IsLight)
{
return _lightIncomingConnections.Count != 0;
}
else
{
return _incomingConnections.Count != 0;
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,54 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KSession : KAutoObject
{
public KServerSession ServerSession { get; }
public KClientSession ClientSession { get; }
private bool _hasBeenInitialized;
public KSession(KernelContext context, KClientPort parentPort = null) : base(context)
{
IncrementReferenceCount();
ServerSession = new KServerSession(context, this);
ClientSession = new KClientSession(context, this, parentPort);
_hasBeenInitialized = true;
}
public void DisconnectClient()
{
if (ClientSession.State == ChannelState.Open)
{
ClientSession.State = ChannelState.ClientDisconnected;
ServerSession.CancelAllRequestsClientDisconnected();
}
}
public void DisconnectServer()
{
if (ClientSession.State == ChannelState.Open)
{
ClientSession.State = ChannelState.ServerDisconnected;
}
}
protected override void Destroy()
{
if (_hasBeenInitialized)
{
ClientSession.DisconnectFromPort();
KProcess creatorProcess = ClientSession.CreatorProcess;
creatorProcess.ResourceLimit?.Release(LimitableResource.Session, 1);
creatorProcess.DecrementReferenceCount();
}
}
}
}

View file

@ -0,0 +1,33 @@
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.HLE.HOS.Kernel.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Ipc
{
class KSessionRequest
{
public KBufferDescriptorTable BufferDescriptorTable { get; }
public KThread ClientThread { get; }
public KProcess ServerProcess { get; set; }
public KWritableEvent AsyncEvent { get; }
public ulong CustomCmdBuffAddr { get; }
public ulong CustomCmdBuffSize { get; }
public KSessionRequest(
KThread clientThread,
ulong customCmdBuffAddr,
ulong customCmdBuffSize,
KWritableEvent asyncEvent = null)
{
ClientThread = clientThread;
CustomCmdBuffAddr = customCmdBuffAddr;
CustomCmdBuffSize = customCmdBuffSize;
AsyncEvent = asyncEvent;
BufferDescriptorTable = new KBufferDescriptorTable();
}
}
}

View file

@ -0,0 +1,20 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
namespace Ryujinx.HLE.HOS.Kernel
{
static class KernelConstants
{
public const int InitialKipId = 1;
public const int InitialProcessId = 0x51;
public const int SupervisorCallCount = 0xC0;
public const int MemoryBlockAllocatorSize = 0x2710;
public const ulong UserSlabHeapBase = DramMemoryMap.SlabHeapBase;
public const ulong UserSlabHeapItemSize = KPageTableBase.PageSize;
public const ulong UserSlabHeapSize = 0x3de000;
public const ulong CounterFrequency = 19200000;
}
}

View file

@ -0,0 +1,160 @@
using Ryujinx.Cpu;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.Memory;
using System;
using System.Collections.Concurrent;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel
{
class KernelContext : IDisposable
{
public long PrivilegedProcessLowestId { get; set; } = 1;
public long PrivilegedProcessHighestId { get; set; } = 8;
public bool EnableVersionChecks { get; set; }
public bool KernelInitialized { get; }
public bool Running { get; private set; }
public Switch Device { get; }
public MemoryBlock Memory { get; }
public ITickSource TickSource { get; }
public Syscall Syscall { get; }
public SyscallHandler SyscallHandler { get; }
public KResourceLimit ResourceLimit { get; }
public KMemoryManager MemoryManager { get; }
public KMemoryBlockSlabManager LargeMemoryBlockSlabManager { get; }
public KMemoryBlockSlabManager SmallMemoryBlockSlabManager { get; }
public KSlabHeap UserSlabHeapPages { get; }
public KCriticalSection CriticalSection { get; }
public KScheduler[] Schedulers { get; }
public KPriorityQueue PriorityQueue { get; }
public KTimeManager TimeManager { get; }
public KSynchronization Synchronization { get; }
public KContextIdManager ContextIdManager { get; }
public ConcurrentDictionary<ulong, KProcess> Processes { get; }
public ConcurrentDictionary<string, KAutoObject> AutoObjectNames { get; }
public bool ThreadReselectionRequested { get; set; }
private ulong _kipId;
private ulong _processId;
private ulong _threadUid;
public KernelContext(
ITickSource tickSource,
Switch device,
MemoryBlock memory,
MemorySize memorySize,
MemoryArrange memoryArrange)
{
TickSource = tickSource;
Device = device;
Memory = memory;
Running = true;
Syscall = new Syscall(this);
SyscallHandler = new SyscallHandler(this);
ResourceLimit = new KResourceLimit(this);
KernelInit.InitializeResourceLimit(ResourceLimit, memorySize);
MemoryManager = new KMemoryManager(memorySize, memoryArrange);
LargeMemoryBlockSlabManager = new KMemoryBlockSlabManager(KernelConstants.MemoryBlockAllocatorSize * 2);
SmallMemoryBlockSlabManager = new KMemoryBlockSlabManager(KernelConstants.MemoryBlockAllocatorSize);
UserSlabHeapPages = new KSlabHeap(
KernelConstants.UserSlabHeapBase,
KernelConstants.UserSlabHeapItemSize,
KernelConstants.UserSlabHeapSize);
CommitMemory(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize);
CriticalSection = new KCriticalSection(this);
Schedulers = new KScheduler[KScheduler.CpuCoresCount];
PriorityQueue = new KPriorityQueue();
TimeManager = new KTimeManager(this);
Synchronization = new KSynchronization(this);
ContextIdManager = new KContextIdManager();
for (int core = 0; core < KScheduler.CpuCoresCount; core++)
{
Schedulers[core] = new KScheduler(this, core);
}
StartPreemptionThread();
KernelInitialized = true;
Processes = new ConcurrentDictionary<ulong, KProcess>();
AutoObjectNames = new ConcurrentDictionary<string, KAutoObject>();
_kipId = KernelConstants.InitialKipId;
_processId = KernelConstants.InitialProcessId;
}
private void StartPreemptionThread()
{
void PreemptionThreadStart()
{
KScheduler.PreemptionThreadLoop(this);
}
new Thread(PreemptionThreadStart) { Name = "HLE.PreemptionThread" }.Start();
}
public void CommitMemory(ulong address, ulong size)
{
ulong alignment = MemoryBlock.GetPageSize();
ulong endAddress = address + size;
address &= ~(alignment - 1);
endAddress = (endAddress + (alignment - 1)) & ~(alignment - 1);
Memory.Commit(address, endAddress - address);
}
public ulong NewThreadUid()
{
return Interlocked.Increment(ref _threadUid) - 1;
}
public ulong NewKipId()
{
return Interlocked.Increment(ref _kipId) - 1;
}
public ulong NewProcessId()
{
return Interlocked.Increment(ref _processId) - 1;
}
public void Dispose()
{
Running = false;
for (int i = 0; i < KScheduler.CpuCoresCount; i++)
{
Schedulers[i].Dispose();
}
TimeManager.Dispose();
}
}
}

View file

@ -0,0 +1,73 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.Horizon.Common;
using System;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel
{
static class KernelStatic
{
[ThreadStatic]
private static KernelContext Context;
[ThreadStatic]
private static KThread CurrentThread;
public static Result StartInitialProcess(
KernelContext context,
ProcessCreationInfo creationInfo,
ReadOnlySpan<uint> capabilities,
int mainThreadPriority,
ThreadStart customThreadStart)
{
KProcess process = new KProcess(context);
Result result = process.Initialize(
creationInfo,
capabilities,
context.ResourceLimit,
MemoryRegion.Service,
null,
customThreadStart);
if (result != Result.Success)
{
return result;
}
process.DefaultCpuCore = 3;
context.Processes.TryAdd(process.Pid, process);
return process.Start(mainThreadPriority, 0x1000UL);
}
internal static void SetKernelContext(KernelContext context, KThread thread)
{
Context = context;
CurrentThread = thread;
}
internal static KThread GetCurrentThread()
{
return CurrentThread;
}
internal static KProcess GetCurrentProcess()
{
return GetCurrentThread().Owner;
}
internal static KProcess GetProcessByPid(ulong pid)
{
if (Context.Processes.TryGetValue(pid, out KProcess process))
{
return process;
}
return null;
}
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
enum AddressSpaceType
{
Addr32Bits = 0,
Addr36Bits = 1,
Addr32BitsNoMap = 2,
Addr39Bits = 3
}
}

View file

@ -0,0 +1,18 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
static class DramMemoryMap
{
public const ulong DramBase = 0x80000000;
public const ulong KernelReserveBase = DramBase + 0x60000;
public const ulong SlabHeapBase = KernelReserveBase + 0x85000;
public const ulong SlapHeapSize = 0xa21000;
public const ulong SlabHeapEnd = SlabHeapBase + SlapHeapSize;
public static bool IsHeapPhysicalAddress(ulong address)
{
return address >= SlabHeapEnd;
}
}
}

View file

@ -0,0 +1,169 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common;
using System;
using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KCodeMemory : KAutoObject
{
public KProcess Owner { get; private set; }
private readonly KPageList _pageList;
private readonly object _lock;
private ulong _address;
private bool _isOwnerMapped;
private bool _isMapped;
public KCodeMemory(KernelContext context) : base(context)
{
_pageList = new KPageList();
_lock = new object();
}
public Result Initialize(ulong address, ulong size)
{
Owner = KernelStatic.GetCurrentProcess();
Result result = Owner.MemoryManager.BorrowCodeMemory(_pageList, address, size);
if (result != Result.Success)
{
return result;
}
Owner.CpuMemory.Fill(address, size, 0xff);
Owner.IncrementReferenceCount();
_address = address;
_isMapped = false;
_isOwnerMapped = false;
return Result.Success;
}
public Result Map(ulong address, ulong size, KMemoryPermission perm)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
lock (_lock)
{
if (_isMapped)
{
return KernelResult.InvalidState;
}
KProcess process = KernelStatic.GetCurrentProcess();
Result result = process.MemoryManager.MapPages(address, _pageList, MemoryState.CodeWritable, KMemoryPermission.ReadAndWrite);
if (result != Result.Success)
{
return result;
}
_isMapped = true;
}
return Result.Success;
}
public Result MapToOwner(ulong address, ulong size, KMemoryPermission permission)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
lock (_lock)
{
if (_isOwnerMapped)
{
return KernelResult.InvalidState;
}
Debug.Assert(permission == KMemoryPermission.Read || permission == KMemoryPermission.ReadAndExecute);
Result result = Owner.MemoryManager.MapPages(address, _pageList, MemoryState.CodeReadOnly, permission);
if (result != Result.Success)
{
return result;
}
_isOwnerMapped = true;
}
return Result.Success;
}
public Result Unmap(ulong address, ulong size)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
lock (_lock)
{
KProcess process = KernelStatic.GetCurrentProcess();
Result result = process.MemoryManager.UnmapPages(address, _pageList, MemoryState.CodeWritable);
if (result != Result.Success)
{
return result;
}
Debug.Assert(_isMapped);
_isMapped = false;
}
return Result.Success;
}
public Result UnmapFromOwner(ulong address, ulong size)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
lock (_lock)
{
Result result = Owner.MemoryManager.UnmapPages(address, _pageList, MemoryState.CodeReadOnly);
if (result != Result.Success)
{
return result;
}
Debug.Assert(_isOwnerMapped);
_isOwnerMapped = false;
}
return Result.Success;
}
protected override void Destroy()
{
if (!_isMapped && !_isOwnerMapped)
{
ulong size = _pageList.GetPagesCount() * KPageTableBase.PageSize;
if (Owner.MemoryManager.UnborrowCodeMemory(_address, size, _pageList) != Result.Success)
{
throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
}
}
Owner.DecrementReferenceCount();
}
}
}

View file

@ -0,0 +1,156 @@
using Ryujinx.Common.Collections;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryBlock : IntrusiveRedBlackTreeNode<KMemoryBlock>, IComparable<KMemoryBlock>, IComparable<ulong>
{
public ulong BaseAddress { get; private set; }
public ulong PagesCount { get; private set; }
public MemoryState State { get; private set; }
public KMemoryPermission Permission { get; private set; }
public MemoryAttribute Attribute { get; private set; }
public KMemoryPermission SourcePermission { get; private set; }
public int IpcRefCount { get; private set; }
public int DeviceRefCount { get; private set; }
public KMemoryBlock(
ulong baseAddress,
ulong pagesCount,
MemoryState state,
KMemoryPermission permission,
MemoryAttribute attribute,
int ipcRefCount = 0,
int deviceRefCount = 0)
{
BaseAddress = baseAddress;
PagesCount = pagesCount;
State = state;
Attribute = attribute;
Permission = permission;
IpcRefCount = ipcRefCount;
DeviceRefCount = deviceRefCount;
}
public void SetState(KMemoryPermission permission, MemoryState state, MemoryAttribute attribute)
{
Permission = permission;
State = state;
Attribute &= MemoryAttribute.IpcAndDeviceMapped;
Attribute |= attribute;
}
public void SetIpcMappingPermission(KMemoryPermission newPermission)
{
int oldIpcRefCount = IpcRefCount++;
if ((ushort)IpcRefCount == 0)
{
throw new InvalidOperationException("IPC reference count increment overflowed.");
}
if (oldIpcRefCount == 0)
{
SourcePermission = Permission;
Permission &= ~KMemoryPermission.ReadAndWrite;
Permission |= KMemoryPermission.ReadAndWrite & newPermission;
}
Attribute |= MemoryAttribute.IpcMapped;
}
public void RestoreIpcMappingPermission()
{
int oldIpcRefCount = IpcRefCount--;
if (oldIpcRefCount == 0)
{
throw new InvalidOperationException("IPC reference count decrement underflowed.");
}
if (oldIpcRefCount == 1)
{
Permission = SourcePermission;
SourcePermission = KMemoryPermission.None;
Attribute &= ~MemoryAttribute.IpcMapped;
}
}
public KMemoryBlock SplitRightAtAddress(ulong address)
{
ulong leftAddress = BaseAddress;
ulong leftPagesCount = (address - leftAddress) / KPageTableBase.PageSize;
BaseAddress = address;
PagesCount -= leftPagesCount;
return new KMemoryBlock(
leftAddress,
leftPagesCount,
State,
Permission,
Attribute,
IpcRefCount,
DeviceRefCount);
}
public void AddPages(ulong pagesCount)
{
PagesCount += pagesCount;
}
public KMemoryInfo GetInfo()
{
ulong size = PagesCount * KPageTableBase.PageSize;
return new KMemoryInfo(
BaseAddress,
size,
State,
Permission,
Attribute,
SourcePermission,
IpcRefCount,
DeviceRefCount);
}
public int CompareTo(KMemoryBlock other)
{
if (BaseAddress < other.BaseAddress)
{
return -1;
}
else if (BaseAddress <= other.BaseAddress + other.PagesCount * KPageTableBase.PageSize - 1UL)
{
return 0;
}
else
{
return 1;
}
}
public int CompareTo(ulong address)
{
if (address < BaseAddress)
{
return 1;
}
else if (address <= BaseAddress + PagesCount * KPageTableBase.PageSize - 1UL)
{
return 0;
}
else
{
return -1;
}
}
}
}

View file

@ -0,0 +1,288 @@
using Ryujinx.Common.Collections;
using Ryujinx.Horizon.Common;
using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryBlockManager
{
private const int PageSize = KPageTableBase.PageSize;
private readonly IntrusiveRedBlackTree<KMemoryBlock> _blockTree;
public int BlocksCount => _blockTree.Count;
private KMemoryBlockSlabManager _slabManager;
private ulong _addrSpaceStart;
private ulong _addrSpaceEnd;
public KMemoryBlockManager()
{
_blockTree = new IntrusiveRedBlackTree<KMemoryBlock>();
}
public Result Initialize(ulong addrSpaceStart, ulong addrSpaceEnd, KMemoryBlockSlabManager slabManager)
{
_slabManager = slabManager;
_addrSpaceStart = addrSpaceStart;
_addrSpaceEnd = addrSpaceEnd;
// First insertion will always need only a single block, because there's nothing to split.
if (!slabManager.CanAllocate(1))
{
return KernelResult.OutOfResource;
}
ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize;
_blockTree.Add(new KMemoryBlock(
addrSpaceStart,
addrSpacePagesCount,
MemoryState.Unmapped,
KMemoryPermission.None,
MemoryAttribute.None));
return Result.Success;
}
public void InsertBlock(
ulong baseAddress,
ulong pagesCount,
MemoryState oldState,
KMemoryPermission oldPermission,
MemoryAttribute oldAttribute,
MemoryState newState,
KMemoryPermission newPermission,
MemoryAttribute newAttribute)
{
// Insert new block on the list only on areas where the state
// of the block matches the state specified on the old* state
// arguments, otherwise leave it as is.
int oldCount = _blockTree.Count;
oldAttribute |= MemoryAttribute.IpcAndDeviceMapped;
ulong endAddr = baseAddress + pagesCount * PageSize;
KMemoryBlock currBlock = FindBlock(baseAddress);
while (currBlock != null)
{
ulong currBaseAddr = currBlock.BaseAddress;
ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
if (baseAddress < currEndAddr && currBaseAddr < endAddr)
{
MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped;
if (currBlock.State != oldState ||
currBlock.Permission != oldPermission ||
currBlockAttr != oldAttribute)
{
currBlock = currBlock.Successor;
continue;
}
if (baseAddress > currBaseAddr)
{
KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
_blockTree.Add(newBlock);
}
if (endAddr < currEndAddr)
{
KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
_blockTree.Add(newBlock);
currBlock = newBlock;
}
currBlock.SetState(newPermission, newState, newAttribute);
currBlock = MergeEqualStateNeighbors(currBlock);
}
if (currEndAddr - 1 >= endAddr - 1)
{
break;
}
currBlock = currBlock.Successor;
}
_slabManager.Count += _blockTree.Count - oldCount;
ValidateInternalState();
}
public void InsertBlock(
ulong baseAddress,
ulong pagesCount,
MemoryState state,
KMemoryPermission permission = KMemoryPermission.None,
MemoryAttribute attribute = MemoryAttribute.None)
{
// Inserts new block at the list, replacing and splitting
// existing blocks as needed.
int oldCount = _blockTree.Count;
ulong endAddr = baseAddress + pagesCount * PageSize;
KMemoryBlock currBlock = FindBlock(baseAddress);
while (currBlock != null)
{
ulong currBaseAddr = currBlock.BaseAddress;
ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
if (baseAddress < currEndAddr && currBaseAddr < endAddr)
{
if (baseAddress > currBaseAddr)
{
KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
_blockTree.Add(newBlock);
}
if (endAddr < currEndAddr)
{
KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
_blockTree.Add(newBlock);
currBlock = newBlock;
}
currBlock.SetState(permission, state, attribute);
currBlock = MergeEqualStateNeighbors(currBlock);
}
if (currEndAddr - 1 >= endAddr - 1)
{
break;
}
currBlock = currBlock.Successor;
}
_slabManager.Count += _blockTree.Count - oldCount;
ValidateInternalState();
}
public delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm);
public void InsertBlock(
ulong baseAddress,
ulong pagesCount,
BlockMutator blockMutate,
KMemoryPermission permission = KMemoryPermission.None)
{
// Inserts new block at the list, replacing and splitting
// existing blocks as needed, then calling the callback
// function on the new block.
int oldCount = _blockTree.Count;
ulong endAddr = baseAddress + pagesCount * PageSize;
KMemoryBlock currBlock = FindBlock(baseAddress);
while (currBlock != null)
{
ulong currBaseAddr = currBlock.BaseAddress;
ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
if (baseAddress < currEndAddr && currBaseAddr < endAddr)
{
if (baseAddress > currBaseAddr)
{
KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
_blockTree.Add(newBlock);
}
if (endAddr < currEndAddr)
{
KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
_blockTree.Add(newBlock);
currBlock = newBlock;
}
blockMutate(currBlock, permission);
currBlock = MergeEqualStateNeighbors(currBlock);
}
if (currEndAddr - 1 >= endAddr - 1)
{
break;
}
currBlock = currBlock.Successor;
}
_slabManager.Count += _blockTree.Count - oldCount;
ValidateInternalState();
}
[Conditional("DEBUG")]
private void ValidateInternalState()
{
ulong expectedAddress = 0;
KMemoryBlock currBlock = FindBlock(_addrSpaceStart);
while (currBlock != null)
{
Debug.Assert(currBlock.BaseAddress == expectedAddress);
expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize;
currBlock = currBlock.Successor;
}
Debug.Assert(expectedAddress == _addrSpaceEnd);
}
private KMemoryBlock MergeEqualStateNeighbors(KMemoryBlock block)
{
KMemoryBlock previousBlock = block.Predecessor;
KMemoryBlock nextBlock = block.Successor;
if (previousBlock != null && BlockStateEquals(block, previousBlock))
{
_blockTree.Remove(block);
previousBlock.AddPages(block.PagesCount);
block = previousBlock;
}
if (nextBlock != null && BlockStateEquals(block, nextBlock))
{
_blockTree.Remove(nextBlock);
block.AddPages(nextBlock.PagesCount);
}
return block;
}
private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs)
{
return lhs.State == rhs.State &&
lhs.Permission == rhs.Permission &&
lhs.Attribute == rhs.Attribute &&
lhs.SourcePermission == rhs.SourcePermission &&
lhs.DeviceRefCount == rhs.DeviceRefCount &&
lhs.IpcRefCount == rhs.IpcRefCount;
}
public KMemoryBlock FindBlock(ulong address)
{
return _blockTree.GetNodeByKey(address);
}
}
}

View file

@ -0,0 +1,19 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryBlockSlabManager
{
private ulong _capacityElements;
public int Count { get; set; }
public KMemoryBlockSlabManager(ulong capacityElements)
{
_capacityElements = capacityElements;
}
public bool CanAllocate(int count)
{
return (ulong)(Count + count) <= _capacityElements;
}
}
}

View file

@ -0,0 +1,36 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryInfo
{
public ulong Address { get; }
public ulong Size { get; }
public MemoryState State { get; }
public KMemoryPermission Permission { get; }
public MemoryAttribute Attribute { get; }
public KMemoryPermission SourcePermission { get; }
public int IpcRefCount { get; }
public int DeviceRefCount { get; }
public KMemoryInfo(
ulong address,
ulong size,
MemoryState state,
KMemoryPermission permission,
MemoryAttribute attribute,
KMemoryPermission sourcePermission,
int ipcRefCount,
int deviceRefCount)
{
Address = address;
Size = size;
State = state;
Permission = permission;
Attribute = attribute;
SourcePermission = sourcePermission;
IpcRefCount = ipcRefCount;
DeviceRefCount = deviceRefCount;
}
}
}

View file

@ -0,0 +1,65 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryManager
{
public KMemoryRegionManager[] MemoryRegions { get; }
public KMemoryManager(MemorySize size, MemoryArrange arrange)
{
MemoryRegions = KernelInit.GetMemoryRegions(size, arrange);
}
private KMemoryRegionManager GetMemoryRegion(ulong address)
{
for (int i = 0; i < MemoryRegions.Length; i++)
{
var region = MemoryRegions[i];
if (address >= region.Address && address < region.EndAddr)
{
return region;
}
}
return null;
}
public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
{
IncrementOrDecrementPagesReferenceCount(address, pagesCount, true);
}
public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
{
IncrementOrDecrementPagesReferenceCount(address, pagesCount, false);
}
private void IncrementOrDecrementPagesReferenceCount(ulong address, ulong pagesCount, bool increment)
{
while (pagesCount != 0)
{
var region = GetMemoryRegion(address);
ulong countToProcess = Math.Min(pagesCount, region.GetPageOffsetFromEnd(address));
lock (region)
{
if (increment)
{
region.IncrementPagesReferenceCount(address, countToProcess);
}
else
{
region.DecrementPagesReferenceCount(address, countToProcess);
}
}
pagesCount -= countToProcess;
address += countToProcess * KPageTableBase.PageSize;
}
}
}
}

View file

@ -0,0 +1,242 @@
using Ryujinx.Horizon.Common;
using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryRegionManager
{
private readonly KPageHeap _pageHeap;
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddr => Address + Size;
private readonly ushort[] _pageReferenceCounts;
public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
{
Address = address;
Size = size;
_pageReferenceCounts = new ushort[size / KPageTableBase.PageSize];
_pageHeap = new KPageHeap(address, size);
_pageHeap.Free(address, size / KPageTableBase.PageSize);
_pageHeap.UpdateUsedSize();
}
public Result AllocatePages(out KPageList pageList, ulong pagesCount)
{
if (pagesCount == 0)
{
pageList = new KPageList();
return Result.Success;
}
lock (_pageHeap)
{
Result result = AllocatePagesImpl(out pageList, pagesCount, false);
if (result == Result.Success)
{
foreach (var node in pageList)
{
IncrementPagesReferenceCount(node.Address, node.PagesCount);
}
}
return result;
}
}
public ulong AllocatePagesContiguous(KernelContext context, ulong pagesCount, bool backwards)
{
if (pagesCount == 0)
{
return 0;
}
lock (_pageHeap)
{
ulong address = AllocatePagesContiguousImpl(pagesCount, 1, backwards);
if (address != 0)
{
IncrementPagesReferenceCount(address, pagesCount);
context.CommitMemory(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
}
return address;
}
}
private Result AllocatePagesImpl(out KPageList pageList, ulong pagesCount, bool random)
{
pageList = new KPageList();
int heapIndex = KPageHeap.GetBlockIndex(pagesCount);
if (heapIndex < 0)
{
return KernelResult.OutOfMemory;
}
for (int index = heapIndex; index >= 0; index--)
{
ulong pagesPerAlloc = KPageHeap.GetBlockPagesCount(index);
while (pagesCount >= pagesPerAlloc)
{
ulong allocatedBlock = _pageHeap.AllocateBlock(index, random);
if (allocatedBlock == 0)
{
break;
}
Result result = pageList.AddRange(allocatedBlock, pagesPerAlloc);
if (result != Result.Success)
{
FreePages(pageList);
_pageHeap.Free(allocatedBlock, pagesPerAlloc);
return result;
}
pagesCount -= pagesPerAlloc;
}
}
if (pagesCount != 0)
{
FreePages(pageList);
return KernelResult.OutOfMemory;
}
return Result.Success;
}
private ulong AllocatePagesContiguousImpl(ulong pagesCount, ulong alignPages, bool random)
{
int heapIndex = KPageHeap.GetAlignedBlockIndex(pagesCount, alignPages);
ulong allocatedBlock = _pageHeap.AllocateBlock(heapIndex, random);
if (allocatedBlock == 0)
{
return 0;
}
ulong allocatedPages = KPageHeap.GetBlockPagesCount(heapIndex);
if (allocatedPages > pagesCount)
{
_pageHeap.Free(allocatedBlock + pagesCount * KPageTableBase.PageSize, allocatedPages - pagesCount);
}
return allocatedBlock;
}
public void FreePage(ulong address)
{
lock (_pageHeap)
{
_pageHeap.Free(address, 1);
}
}
public void FreePages(KPageList pageList)
{
lock (_pageHeap)
{
foreach (KPageNode pageNode in pageList)
{
_pageHeap.Free(pageNode.Address, pageNode.PagesCount);
}
}
}
public void FreePages(ulong address, ulong pagesCount)
{
lock (_pageHeap)
{
_pageHeap.Free(address, pagesCount);
}
}
public ulong GetFreePages()
{
lock (_pageHeap)
{
return _pageHeap.GetFreePagesCount();
}
}
public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
{
ulong index = GetPageOffset(address);
ulong endIndex = index + pagesCount;
while (index < endIndex)
{
ushort referenceCount = ++_pageReferenceCounts[index];
Debug.Assert(referenceCount >= 1);
index++;
}
}
public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
{
ulong index = GetPageOffset(address);
ulong endIndex = index + pagesCount;
ulong freeBaseIndex = 0;
ulong freePagesCount = 0;
while (index < endIndex)
{
Debug.Assert(_pageReferenceCounts[index] > 0);
ushort referenceCount = --_pageReferenceCounts[index];
if (referenceCount == 0)
{
if (freePagesCount != 0)
{
freePagesCount++;
}
else
{
freeBaseIndex = index;
freePagesCount = 1;
}
}
else if (freePagesCount != 0)
{
FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
freePagesCount = 0;
}
index++;
}
if (freePagesCount != 0)
{
FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
}
}
public ulong GetPageOffset(ulong address)
{
return (address - Address) / KPageTableBase.PageSize;
}
public ulong GetPageOffsetFromEnd(ulong address)
{
return (EndAddr - address) / KPageTableBase.PageSize;
}
}
}

View file

@ -0,0 +1,298 @@
using Ryujinx.Common;
using System;
using System.Numerics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageBitmap
{
private struct RandomNumberGenerator
{
private uint _entropy;
private uint _bitsAvailable;
private void RefreshEntropy()
{
_entropy = 0;
_bitsAvailable = sizeof(uint) * 8;
}
private bool GenerateRandomBit()
{
if (_bitsAvailable == 0)
{
RefreshEntropy();
}
bool bit = (_entropy & 1) != 0;
_entropy >>= 1;
_bitsAvailable--;
return bit;
}
public int SelectRandomBit(ulong bitmap)
{
int selected = 0;
int bitsCount = UInt64BitSize / 2;
ulong mask = (1UL << bitsCount) - 1;
while (bitsCount != 0)
{
ulong low = bitmap & mask;
ulong high = (bitmap >> bitsCount) & mask;
bool chooseLow;
if (high == 0)
{
chooseLow = true;
}
else if (low == 0)
{
chooseLow = false;
}
else
{
chooseLow = GenerateRandomBit();
}
if (chooseLow)
{
bitmap = low;
}
else
{
bitmap = high;
selected += bitsCount;
}
bitsCount /= 2;
mask >>= bitsCount;
}
return selected;
}
}
private const int UInt64BitSize = sizeof(ulong) * 8;
private const int MaxDepth = 4;
private readonly RandomNumberGenerator _rng;
private readonly ArraySegment<ulong>[] _bitStorages;
private int _usedDepths;
public int BitsCount { get; private set; }
public int HighestDepthIndex => _usedDepths - 1;
public KPageBitmap()
{
_rng = new RandomNumberGenerator();
_bitStorages = new ArraySegment<ulong>[MaxDepth];
}
public ArraySegment<ulong> Initialize(ArraySegment<ulong> storage, ulong size)
{
_usedDepths = GetRequiredDepth(size);
for (int depth = HighestDepthIndex; depth >= 0; depth--)
{
_bitStorages[depth] = storage;
size = BitUtils.DivRoundUp<ulong>(size, (ulong)UInt64BitSize);
storage = storage.Slice((int)size);
}
return storage;
}
public ulong FindFreeBlock(bool random)
{
ulong offset = 0;
int depth = 0;
if (random)
{
do
{
ulong v = _bitStorages[depth][(int)offset];
if (v == 0)
{
return ulong.MaxValue;
}
offset = offset * UInt64BitSize + (ulong)_rng.SelectRandomBit(v);
}
while (++depth < _usedDepths);
}
else
{
do
{
ulong v = _bitStorages[depth][(int)offset];
if (v == 0)
{
return ulong.MaxValue;
}
offset = offset * UInt64BitSize + (ulong)BitOperations.TrailingZeroCount(v);
}
while (++depth < _usedDepths);
}
return offset;
}
public void SetBit(ulong offset)
{
SetBit(HighestDepthIndex, offset);
BitsCount++;
}
public void ClearBit(ulong offset)
{
ClearBit(HighestDepthIndex, offset);
BitsCount--;
}
public bool ClearRange(ulong offset, int count)
{
int depth = HighestDepthIndex;
var bits = _bitStorages[depth];
int bitInd = (int)(offset / UInt64BitSize);
if (count < UInt64BitSize)
{
int shift = (int)(offset % UInt64BitSize);
ulong mask = ((1UL << count) - 1) << shift;
ulong v = bits[bitInd];
if ((v & mask) != mask)
{
return false;
}
v &= ~mask;
bits[bitInd] = v;
if (v == 0)
{
ClearBit(depth - 1, (ulong)bitInd);
}
}
else
{
int remaining = count;
int i = 0;
do
{
if (bits[bitInd + i++] != ulong.MaxValue)
{
return false;
}
remaining -= UInt64BitSize;
}
while (remaining > 0);
remaining = count;
i = 0;
do
{
bits[bitInd + i] = 0;
ClearBit(depth - 1, (ulong)(bitInd + i));
i++;
remaining -= UInt64BitSize;
}
while (remaining > 0);
}
BitsCount -= count;
return true;
}
private void SetBit(int depth, ulong offset)
{
while (depth >= 0)
{
int ind = (int)(offset / UInt64BitSize);
int which = (int)(offset % UInt64BitSize);
ulong mask = 1UL << which;
ulong v = _bitStorages[depth][ind];
_bitStorages[depth][ind] = v | mask;
if (v != 0)
{
break;
}
offset = (ulong)ind;
depth--;
}
}
private void ClearBit(int depth, ulong offset)
{
while (depth >= 0)
{
int ind = (int)(offset / UInt64BitSize);
int which = (int)(offset % UInt64BitSize);
ulong mask = 1UL << which;
ulong v = _bitStorages[depth][ind];
v &= ~mask;
_bitStorages[depth][ind] = v;
if (v != 0)
{
break;
}
offset = (ulong)ind;
depth--;
}
}
private static int GetRequiredDepth(ulong regionSize)
{
int depth = 0;
do
{
regionSize /= UInt64BitSize;
depth++;
}
while (regionSize != 0);
return depth;
}
public static int CalculateManagementOverheadSize(ulong regionSize)
{
int overheadBits = 0;
for (int depth = GetRequiredDepth(regionSize) - 1; depth >= 0; depth--)
{
regionSize = BitUtils.DivRoundUp<ulong>(regionSize, UInt64BitSize);
overheadBits += (int)regionSize;
}
return overheadBits * sizeof(ulong);
}
}
}

View file

@ -0,0 +1,283 @@
using Ryujinx.Common;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageHeap
{
private class Block
{
private KPageBitmap _bitmap = new KPageBitmap();
private ulong _heapAddress;
private ulong _endOffset;
public int Shift { get; private set; }
public int NextShift { get; private set; }
public ulong Size => 1UL << Shift;
public int PagesCount => (int)(Size / KPageTableBase.PageSize);
public int FreeBlocksCount => _bitmap.BitsCount;
public int FreePagesCount => FreeBlocksCount * PagesCount;
public ArraySegment<ulong> Initialize(ulong address, ulong size, int blockShift, int nextBlockShift, ArraySegment<ulong> bitStorage)
{
Shift = blockShift;
NextShift = nextBlockShift;
ulong endAddress = address + size;
ulong align = nextBlockShift != 0
? 1UL << nextBlockShift
: 1UL << blockShift;
address = BitUtils.AlignDown(address, align);
endAddress = BitUtils.AlignUp (endAddress, align);
_heapAddress = address;
_endOffset = (endAddress - address) / (1UL << blockShift);
return _bitmap.Initialize(bitStorage, _endOffset);
}
public ulong PushBlock(ulong address)
{
ulong offset = (address - _heapAddress) >> Shift;
_bitmap.SetBit(offset);
if (NextShift != 0)
{
int diff = 1 << (NextShift - Shift);
offset = BitUtils.AlignDown(offset, (ulong)diff);
if (_bitmap.ClearRange(offset, diff))
{
return _heapAddress + (offset << Shift);
}
}
return 0;
}
public ulong PopBlock(bool random)
{
long sOffset = (long)_bitmap.FindFreeBlock(random);
if (sOffset < 0L)
{
return 0;
}
ulong offset = (ulong)sOffset;
_bitmap.ClearBit(offset);
return _heapAddress + (offset << Shift);
}
public static int CalculateManagementOverheadSize(ulong regionSize, int currBlockShift, int nextBlockShift)
{
ulong currBlockSize = 1UL << currBlockShift;
ulong nextBlockSize = 1UL << nextBlockShift;
ulong align = nextBlockShift != 0 ? nextBlockSize : currBlockSize;
return KPageBitmap.CalculateManagementOverheadSize((align * 2 + BitUtils.AlignUp(regionSize, align)) / currBlockSize);
}
}
private static readonly int[] _memoryBlockPageShifts = new int[] { 12, 16, 21, 22, 25, 29, 30 };
private readonly ulong _heapAddress;
private readonly ulong _heapSize;
private ulong _usedSize;
private readonly int _blocksCount;
private readonly Block[] _blocks;
public KPageHeap(ulong address, ulong size) : this(address, size, _memoryBlockPageShifts)
{
}
public KPageHeap(ulong address, ulong size, int[] blockShifts)
{
_heapAddress = address;
_heapSize = size;
_blocksCount = blockShifts.Length;
_blocks = new Block[_memoryBlockPageShifts.Length];
var currBitmapStorage = new ArraySegment<ulong>(new ulong[CalculateManagementOverheadSize(size, blockShifts)]);
for (int i = 0; i < blockShifts.Length; i++)
{
int currBlockShift = blockShifts[i];
int nextBlockShift = i != blockShifts.Length - 1 ? blockShifts[i + 1] : 0;
_blocks[i] = new Block();
currBitmapStorage = _blocks[i].Initialize(address, size, currBlockShift, nextBlockShift, currBitmapStorage);
}
}
public void UpdateUsedSize()
{
_usedSize = _heapSize - (GetFreePagesCount() * KPageTableBase.PageSize);
}
public ulong GetFreePagesCount()
{
ulong freeCount = 0;
for (int i = 0; i < _blocksCount; i++)
{
freeCount += (ulong)_blocks[i].FreePagesCount;
}
return freeCount;
}
public ulong AllocateBlock(int index, bool random)
{
ulong neededSize = _blocks[index].Size;
for (int i = index; i < _blocksCount; i++)
{
ulong address = _blocks[i].PopBlock(random);
if (address != 0)
{
ulong allocatedSize = _blocks[i].Size;
if (allocatedSize > neededSize)
{
Free(address + neededSize, (allocatedSize - neededSize) / KPageTableBase.PageSize);
}
return address;
}
}
return 0;
}
private void FreeBlock(ulong block, int index)
{
do
{
block = _blocks[index++].PushBlock(block);
}
while (block != 0);
}
public void Free(ulong address, ulong pagesCount)
{
if (pagesCount == 0)
{
return;
}
int bigIndex = _blocksCount - 1;
ulong start = address;
ulong end = address + pagesCount * KPageTableBase.PageSize;
ulong beforeStart = start;
ulong beforeEnd = start;
ulong afterStart = end;
ulong afterEnd = end;
while (bigIndex >= 0)
{
ulong blockSize = _blocks[bigIndex].Size;
ulong bigStart = BitUtils.AlignUp (start, blockSize);
ulong bigEnd = BitUtils.AlignDown(end, blockSize);
if (bigStart < bigEnd)
{
for (ulong block = bigStart; block < bigEnd; block += blockSize)
{
FreeBlock(block, bigIndex);
}
beforeEnd = bigStart;
afterStart = bigEnd;
break;
}
bigIndex--;
}
for (int i = bigIndex - 1; i >= 0; i--)
{
ulong blockSize = _blocks[i].Size;
while (beforeStart + blockSize <= beforeEnd)
{
beforeEnd -= blockSize;
FreeBlock(beforeEnd, i);
}
}
for (int i = bigIndex - 1; i >= 0; i--)
{
ulong blockSize = _blocks[i].Size;
while (afterStart + blockSize <= afterEnd)
{
FreeBlock(afterStart, i);
afterStart += blockSize;
}
}
}
public static int GetAlignedBlockIndex(ulong pagesCount, ulong alignPages)
{
ulong targetPages = Math.Max(pagesCount, alignPages);
for (int i = 0; i < _memoryBlockPageShifts.Length; i++)
{
if (targetPages <= GetBlockPagesCount(i))
{
return i;
}
}
return -1;
}
public static int GetBlockIndex(ulong pagesCount)
{
for (int i = _memoryBlockPageShifts.Length - 1; i >= 0; i--)
{
if (pagesCount >= GetBlockPagesCount(i))
{
return i;
}
}
return -1;
}
public static ulong GetBlockSize(int index)
{
return 1UL << _memoryBlockPageShifts[index];
}
public static ulong GetBlockPagesCount(int index)
{
return GetBlockSize(index) / KPageTableBase.PageSize;
}
private static int CalculateManagementOverheadSize(ulong regionSize, int[] blockShifts)
{
int overheadSize = 0;
for (int i = 0; i < blockShifts.Length; i++)
{
int currBlockShift = blockShifts[i];
int nextBlockShift = i != blockShifts.Length - 1 ? blockShifts[i + 1] : 0;
overheadSize += Block.CalculateManagementOverheadSize(regionSize, currBlockShift, nextBlockShift);
}
return BitUtils.AlignUp(overheadSize, KPageTableBase.PageSize);
}
}
}

View file

@ -0,0 +1,97 @@
using Ryujinx.Horizon.Common;
using System.Collections;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageList : IEnumerable<KPageNode>
{
public LinkedList<KPageNode> Nodes { get; }
public KPageList()
{
Nodes = new LinkedList<KPageNode>();
}
public Result AddRange(ulong address, ulong pagesCount)
{
if (pagesCount != 0)
{
if (Nodes.Last != null)
{
KPageNode lastNode = Nodes.Last.Value;
if (lastNode.Address + lastNode.PagesCount * KPageTableBase.PageSize == address)
{
address = lastNode.Address;
pagesCount += lastNode.PagesCount;
Nodes.RemoveLast();
}
}
Nodes.AddLast(new KPageNode(address, pagesCount));
}
return Result.Success;
}
public ulong GetPagesCount()
{
ulong sum = 0;
foreach (KPageNode node in Nodes)
{
sum += node.PagesCount;
}
return sum;
}
public bool IsEqual(KPageList other)
{
LinkedListNode<KPageNode> thisNode = Nodes.First;
LinkedListNode<KPageNode> otherNode = other.Nodes.First;
while (thisNode != null && otherNode != null)
{
if (thisNode.Value.Address != otherNode.Value.Address ||
thisNode.Value.PagesCount != otherNode.Value.PagesCount)
{
return false;
}
thisNode = thisNode.Next;
otherNode = otherNode.Next;
}
return thisNode == null && otherNode == null;
}
public void IncrementPagesReferenceCount(KMemoryManager manager)
{
foreach (var node in this)
{
manager.IncrementPagesReferenceCount(node.Address, node.PagesCount);
}
}
public void DecrementPagesReferenceCount(KMemoryManager manager)
{
foreach (var node in this)
{
manager.DecrementPagesReferenceCount(node.Address, node.PagesCount);
}
}
public IEnumerator<KPageNode> GetEnumerator()
{
return Nodes.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
}
}

View file

@ -0,0 +1,14 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
struct KPageNode
{
public ulong Address;
public ulong PagesCount;
public KPageNode(ulong address, ulong pagesCount)
{
Address = address;
PagesCount = pagesCount;
}
}
}

View file

@ -0,0 +1,229 @@
using Ryujinx.Horizon.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageTable : KPageTableBase
{
private readonly IVirtualMemoryManager _cpuMemory;
protected override bool Supports4KBPages => _cpuMemory.Supports4KBPages;
public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
{
return _cpuMemory.GetHostRegions(va, size);
}
/// <inheritdoc/>
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
var ranges = _cpuMemory.GetPhysicalRegions(va, size);
foreach (var range in ranges)
{
pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
}
}
/// <inheritdoc/>
protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
{
return _cpuMemory.GetSpan(va, size);
}
/// <inheritdoc/>
protected override Result MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
KPageList pageList = new KPageList();
GetPhysicalRegions(src, pagesCount * PageSize, pageList);
Result result = Reprotect(src, pagesCount, KMemoryPermission.None);
if (result != Result.Success)
{
return result;
}
result = MapPages(dst, pageList, newDstPermission, MemoryMapFlags.Private, false, 0);
if (result != Result.Success)
{
Result reprotectResult = Reprotect(src, pagesCount, oldSrcPermission);
Debug.Assert(reprotectResult == Result.Success);
}
return result;
}
/// <inheritdoc/>
protected override Result UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
{
ulong size = pagesCount * PageSize;
KPageList srcPageList = new KPageList();
KPageList dstPageList = new KPageList();
GetPhysicalRegions(src, size, srcPageList);
GetPhysicalRegions(dst, size, dstPageList);
if (!dstPageList.IsEqual(srcPageList))
{
return KernelResult.InvalidMemRange;
}
Result result = Unmap(dst, pagesCount);
if (result != Result.Success)
{
return result;
}
result = Reprotect(src, pagesCount, newSrcPermission);
if (result != Result.Success)
{
Result mapResult = MapPages(dst, dstPageList, oldDstPermission, MemoryMapFlags.Private, false, 0);
Debug.Assert(mapResult == Result.Success);
}
return result;
}
/// <inheritdoc/>
protected override Result MapPages(
ulong dstVa,
ulong pagesCount,
ulong srcPa,
KMemoryPermission permission,
MemoryMapFlags flags,
bool shouldFillPages,
byte fillValue)
{
ulong size = pagesCount * PageSize;
Context.CommitMemory(srcPa - DramMemoryMap.DramBase, size);
_cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size, flags);
if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
{
Context.MemoryManager.IncrementPagesReferenceCount(srcPa, pagesCount);
}
if (shouldFillPages)
{
_cpuMemory.Fill(dstVa, size, fillValue);
}
return Result.Success;
}
/// <inheritdoc/>
protected override Result MapPages(
ulong address,
KPageList pageList,
KMemoryPermission permission,
MemoryMapFlags flags,
bool shouldFillPages,
byte fillValue)
{
using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
ulong currentVa = address;
foreach (var pageNode in pageList)
{
ulong addr = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * PageSize;
Context.CommitMemory(addr, size);
_cpuMemory.Map(currentVa, addr, size, flags);
if (shouldFillPages)
{
_cpuMemory.Fill(currentVa, size, fillValue);
}
currentVa += size;
}
scopedPageList.SignalSuccess();
return Result.Success;
}
/// <inheritdoc/>
protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size)
{
ulong offset = 0;
foreach (var region in regions)
{
_cpuMemory.MapForeign(va + offset, region.Address, region.Size);
offset += region.Size;
}
return Result.Success;
}
/// <inheritdoc/>
protected override Result Unmap(ulong address, ulong pagesCount)
{
KPageList pagesToClose = new KPageList();
var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize);
foreach (var region in regions)
{
ulong pa = region.Address + DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
pagesToClose.AddRange(pa, region.Size / PageSize);
}
}
_cpuMemory.Unmap(address, pagesCount * PageSize);
pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager);
return Result.Success;
}
/// <inheritdoc/>
protected override Result Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return Result.Success;
}
/// <inheritdoc/>
protected override Result ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return Result.Success;
}
/// <inheritdoc/>
protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
{
_cpuMemory.SignalMemoryTracking(va, size, write);
}
/// <inheritdoc/>
protected override void Write(ulong va, ReadOnlySpan<byte> data)
{
_cpuMemory.Write(va, data);
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
struct KScopedPageList : IDisposable
{
private readonly KMemoryManager _manager;
private KPageList _pageList;
public KScopedPageList(KMemoryManager manager, KPageList pageList)
{
_manager = manager;
_pageList = pageList;
pageList.IncrementPagesReferenceCount(manager);
}
public void SignalSuccess()
{
_pageList = null;
}
public void Dispose()
{
_pageList?.DecrementPagesReferenceCount(_manager);
}
}
}

View file

@ -0,0 +1,75 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common;
using Ryujinx.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KSharedMemory : KAutoObject
{
private readonly KPageList _pageList;
private readonly ulong _ownerPid;
private readonly KMemoryPermission _ownerPermission;
private readonly KMemoryPermission _userPermission;
public KSharedMemory(
KernelContext context,
SharedMemoryStorage storage,
ulong ownerPid,
KMemoryPermission ownerPermission,
KMemoryPermission userPermission) : base(context)
{
_pageList = storage.GetPageList();
_ownerPid = ownerPid;
_ownerPermission = ownerPermission;
_userPermission = userPermission;
}
public Result MapIntoProcess(
KPageTableBase memoryManager,
ulong address,
ulong size,
KProcess process,
KMemoryPermission permission)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
KMemoryPermission expectedPermission = process.Pid == _ownerPid
? _ownerPermission
: _userPermission;
if (permission != expectedPermission)
{
return KernelResult.InvalidPermission;
}
// On platforms with page size > 4 KB, this can fail due to the address not being page aligned,
// we can return an error to force the application to retry with a different address.
try
{
return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
}
catch (InvalidMemoryRegionException)
{
return KernelResult.InvalidMemState;
}
}
public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory);
}
}
}

View file

@ -0,0 +1,50 @@
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KSlabHeap
{
private LinkedList<ulong> _items;
public KSlabHeap(ulong pa, ulong itemSize, ulong size)
{
_items = new LinkedList<ulong>();
int itemsCount = (int)(size / itemSize);
for (int index = 0; index < itemsCount; index++)
{
_items.AddLast(pa);
pa += itemSize;
}
}
public bool TryGetItem(out ulong pa)
{
lock (_items)
{
if (_items.First != null)
{
pa = _items.First.Value;
_items.RemoveFirst();
return true;
}
}
pa = 0;
return false;
}
public void Free(ulong pa)
{
lock (_items)
{
_items.AddFirst(pa);
}
}
}
}

View file

@ -0,0 +1,130 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KTransferMemory : KAutoObject
{
private KProcess _creator;
// TODO: Remove when we no longer need to read it from the owner directly.
public KProcess Creator => _creator;
private readonly KPageList _pageList;
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public KMemoryPermission Permission { get; private set; }
private bool _hasBeenInitialized;
private bool _isMapped;
public KTransferMemory(KernelContext context) : base(context)
{
_pageList = new KPageList();
}
public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context)
{
_pageList = storage.GetPageList();
Permission = KMemoryPermission.ReadAndWrite;
_hasBeenInitialized = true;
_isMapped = false;
}
public Result Initialize(ulong address, ulong size, KMemoryPermission permission)
{
KProcess creator = KernelStatic.GetCurrentProcess();
_creator = creator;
Result result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
if (result != Result.Success)
{
return result;
}
creator.IncrementReferenceCount();
Permission = permission;
Address = address;
Size = size;
_hasBeenInitialized = true;
_isMapped = false;
return result;
}
public Result MapIntoProcess(
KPageTableBase memoryManager,
ulong address,
ulong size,
KProcess process,
KMemoryPermission permission)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
if (permission != Permission || _isMapped)
{
return KernelResult.InvalidState;
}
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
Result result = memoryManager.MapPages(address, _pageList, state, KMemoryPermission.ReadAndWrite);
if (result == Result.Success)
{
_isMapped = true;
}
return result;
}
public Result UnmapFromProcess(
KPageTableBase memoryManager,
ulong address,
ulong size,
KProcess process)
{
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
Result result = memoryManager.UnmapPages(address, _pageList, state);
if (result == Result.Success)
{
_isMapped = false;
}
return result;
}
protected override void Destroy()
{
if (_hasBeenInitialized)
{
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != Result.Success)
{
throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
}
_creator.ResourceLimit?.Release(LimitableResource.TransferMemory, 1);
_creator.DecrementReferenceCount();
}
}
}
}

View file

@ -0,0 +1,22 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
[Flags]
enum MemoryAttribute : byte
{
None = 0,
Mask = 0xff,
Borrowed = 1 << 0,
IpcMapped = 1 << 1,
DeviceMapped = 1 << 2,
Uncached = 1 << 3,
IpcAndDeviceMapped = IpcMapped | DeviceMapped,
BorrowedAndIpcMapped = Borrowed | IpcMapped,
DeviceMappedAndUncached = DeviceMapped | Uncached
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
enum MemoryFillValue : byte
{
Zero = 0,
Stack = 0x58,
Ipc = 0x59,
Heap = 0x5A,
}
}

View file

@ -0,0 +1,20 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
[Flags]
enum KMemoryPermission : uint
{
None = 0,
UserMask = Read | Write | Execute,
Mask = uint.MaxValue,
Read = 1 << 0,
Write = 1 << 1,
Execute = 1 << 2,
DontCare = 1 << 28,
ReadAndWrite = Read | Write,
ReadAndExecute = Read | Execute
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
enum MemoryRegion
{
Application = 0,
Applet = 1,
Service = 2,
NvServices = 3
}
}

View file

@ -0,0 +1,50 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
[Flags]
enum MemoryState : uint
{
Unmapped = 0x00000000,
Io = 0x00002001,
Normal = 0x00042002,
CodeStatic = 0x00DC7E03,
CodeMutable = 0x03FEBD04,
Heap = 0x037EBD05,
SharedMemory = 0x00402006,
ModCodeStatic = 0x00DD7E08,
ModCodeMutable = 0x03FFBD09,
IpcBuffer0 = 0x005C3C0A,
Stack = 0x005C3C0B,
ThreadLocal = 0x0040200C,
TransferMemoryIsolated = 0x015C3C0D,
TransferMemory = 0x005C380E,
ProcessMemory = 0x0040380F,
Reserved = 0x00000010,
IpcBuffer1 = 0x005C3811,
IpcBuffer3 = 0x004C2812,
KernelStack = 0x00002013,
CodeReadOnly = 0x00402214,
CodeWritable = 0x00402015,
UserMask = 0xff,
Mask = 0xffffffff,
PermissionChangeAllowed = 1 << 8,
ForceReadWritableByDebugSyscalls = 1 << 9,
IpcSendAllowedType0 = 1 << 10,
IpcSendAllowedType3 = 1 << 11,
IpcSendAllowedType1 = 1 << 12,
ProcessPermissionChangeAllowed = 1 << 14,
MapAllowed = 1 << 15,
UnmapProcessCodeMemoryAllowed = 1 << 16,
TransferMemoryAllowed = 1 << 17,
QueryPhysicalAddressAllowed = 1 << 18,
MapDeviceAllowed = 1 << 19,
MapDeviceAlignedAllowed = 1 << 20,
IpcBufferAllowed = 1 << 21,
IsPoolAllocated = 1 << 22,
MapProcessAllowed = 1 << 23,
AttributeChangeAllowed = 1 << 24,
CodeMemoryAllowed = 1 << 25
}
}

View file

@ -0,0 +1,49 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class SharedMemoryStorage
{
private readonly KernelContext _context;
private readonly KPageList _pageList;
private readonly ulong _size;
public SharedMemoryStorage(KernelContext context, KPageList pageList)
{
_context = context;
_pageList = pageList;
_size = pageList.GetPagesCount() * KPageTableBase.PageSize;
foreach (KPageNode pageNode in pageList)
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
context.CommitMemory(address, size);
}
}
public void ZeroFill()
{
for (ulong offset = 0; offset < _size; offset += sizeof(ulong))
{
GetRef<ulong>(offset) = 0;
}
}
public ref T GetRef<T>(ulong offset) where T : unmanaged
{
if (_pageList.Nodes.Count == 1)
{
ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase;
return ref _context.Memory.GetRef<T>(address + offset);
}
throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
}
public KPageList GetPageList()
{
return _pageList;
}
}
}

View file

@ -0,0 +1,22 @@
using System.Numerics;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
static class CapabilityExtensions
{
public static CapabilityType GetCapabilityType(this uint cap)
{
return (CapabilityType)(((cap + 1) & ~cap) - 1);
}
public static uint GetFlag(this CapabilityType type)
{
return (uint)type + 1;
}
public static uint GetId(this CapabilityType type)
{
return (uint)BitOperations.TrailingZeroCount(type.GetFlag());
}
}
}

View file

@ -0,0 +1,19 @@
namespace Ryujinx.HLE.HOS.Kernel.Process
{
enum CapabilityType : uint
{
CorePriority = (1u << 3) - 1,
SyscallMask = (1u << 4) - 1,
MapRange = (1u << 6) - 1,
MapIoPage = (1u << 7) - 1,
MapRegion = (1u << 10) - 1,
InterruptPair = (1u << 11) - 1,
ProgramType = (1u << 13) - 1,
KernelVersion = (1u << 14) - 1,
HandleTable = (1u << 15) - 1,
DebugFlags = (1u << 16) - 1,
Invalid = 0u,
Padding = ~0u
}
}

View file

@ -0,0 +1,465 @@
using Ryujinx.HLE.HOS.Diagnostics.Demangler;
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.HLE.Loaders.Elf;
using Ryujinx.Memory;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class HleProcessDebugger
{
private const int Mod0 = 'M' << 0 | 'O' << 8 | 'D' << 16 | '0' << 24;
private KProcess _owner;
private class Image
{
public ulong BaseAddress { get; }
public ulong Size { get; }
public ulong EndAddress => BaseAddress + Size;
public ElfSymbol[] Symbols { get; }
public Image(ulong baseAddress, ulong size, ElfSymbol[] symbols)
{
BaseAddress = baseAddress;
Size = size;
Symbols = symbols;
}
}
private List<Image> _images;
private int _loaded;
public HleProcessDebugger(KProcess owner)
{
_owner = owner;
_images = new List<Image>();
}
public string GetGuestStackTrace(KThread thread)
{
EnsureLoaded();
var context = thread.Context;
StringBuilder trace = new StringBuilder();
trace.AppendLine($"Process: {_owner.Name}, PID: {_owner.Pid}");
void AppendTrace(ulong address)
{
if (AnalyzePointer(out PointerInfo info, address, thread))
{
trace.AppendLine($" 0x{address:x16}\t{info.ImageDisplay}\t{info.SubDisplay}");
}
else
{
trace.AppendLine($" 0x{address:x16}");
}
}
if (context.IsAarch32)
{
ulong framePointer = context.GetX(11);
while (framePointer != 0)
{
if ((framePointer & 3) != 0 ||
!_owner.CpuMemory.IsMapped(framePointer) ||
!_owner.CpuMemory.IsMapped(framePointer + 4))
{
break;
}
AppendTrace(_owner.CpuMemory.Read<uint>(framePointer + 4));
framePointer = _owner.CpuMemory.Read<uint>(framePointer);
}
}
else
{
ulong framePointer = context.GetX(29);
while (framePointer != 0)
{
if ((framePointer & 7) != 0 ||
!_owner.CpuMemory.IsMapped(framePointer) ||
!_owner.CpuMemory.IsMapped(framePointer + 8))
{
break;
}
AppendTrace(_owner.CpuMemory.Read<ulong>(framePointer + 8));
framePointer = _owner.CpuMemory.Read<ulong>(framePointer);
}
}
return trace.ToString();
}
public string GetCpuRegisterPrintout(KThread thread)
{
EnsureLoaded();
var context = thread.Context;
StringBuilder sb = new StringBuilder();
string GetReg(int x)
{
var v = x == 32 ? context.Pc : context.GetX(x);
if (!AnalyzePointer(out PointerInfo info, v, thread))
{
return $"0x{v:x16}";
}
else
{
if (!string.IsNullOrEmpty(info.ImageName))
{
return $"0x{v:x16} ({info.ImageDisplay})\t=> {info.SubDisplay}";
}
else
{
return $"0x{v:x16} ({info.SpDisplay})";
}
}
}
for (int i = 0; i <= 28; i++)
{
sb.AppendLine($"\tX[{i:d2}]:\t{GetReg(i)}");
}
sb.AppendLine($"\tFP:\t{GetReg(29)}");
sb.AppendLine($"\tLR:\t{GetReg(30)}");
sb.AppendLine($"\tSP:\t{GetReg(31)}");
sb.AppendLine($"\tPC:\t{GetReg(32)}");
return sb.ToString();
}
private bool TryGetSubName(Image image, ulong address, out ElfSymbol symbol)
{
address -= image.BaseAddress;
int left = 0;
int right = image.Symbols.Length - 1;
while (left <= right)
{
int size = right - left;
int middle = left + (size >> 1);
symbol = image.Symbols[middle];
ulong endAddr = symbol.Value + symbol.Size;
if (address >= symbol.Value && address < endAddr)
{
return true;
}
if (address < symbol.Value)
{
right = middle - 1;
}
else
{
left = middle + 1;
}
}
symbol = default;
return false;
}
struct PointerInfo
{
public string ImageName;
public string SubName;
public ulong Offset;
public ulong SubOffset;
public string ImageDisplay => $"{ImageName}:0x{Offset:x4}";
public string SubDisplay => SubOffset == 0 ? SubName : $"{SubName}:0x{SubOffset:x4}";
public string SpDisplay => SubOffset == 0 ? "SP" : $"SP:-0x{SubOffset:x4}";
}
private bool AnalyzePointer(out PointerInfo info, ulong address, KThread thread)
{
if (AnalyzePointerFromImages(out info, address))
{
return true;
}
if (AnalyzePointerFromStack(out info, address, thread))
{
return true;
}
return false;
}
private bool AnalyzePointerFromImages(out PointerInfo info, ulong address)
{
info = default;
Image image = GetImage(address, out int imageIndex);
if (image == null)
{
// Value isn't a pointer to a known image...
return false;
}
info.Offset = address - image.BaseAddress;
// Try to find what this pointer is referring to
if (TryGetSubName(image, address, out ElfSymbol symbol))
{
info.SubName = symbol.Name;
// Demangle string if possible
if (info.SubName.StartsWith("_Z"))
{
info.SubName = Demangler.Parse(info.SubName);
}
info.SubOffset = info.Offset - symbol.Value;
}
else
{
info.SubName = "";
}
info.ImageName = GetGuessedNsoNameFromIndex(imageIndex);
return true;
}
private bool AnalyzePointerFromStack(out PointerInfo info, ulong address, KThread thread)
{
info = default;
ulong sp = thread.Context.GetX(31);
var memoryInfo = _owner.MemoryManager.QueryMemory(address);
MemoryState memoryState = memoryInfo.State;
if (!memoryState.HasFlag(MemoryState.Stack)) // Is this pointer within the stack?
{
return false;
}
info.SubOffset = address - sp;
return true;
}
private Image GetImage(ulong address, out int index)
{
lock (_images)
{
for (index = _images.Count - 1; index >= 0; index--)
{
if (address >= _images[index].BaseAddress && address < _images[index].EndAddress)
{
return _images[index];
}
}
}
return null;
}
private string GetGuessedNsoNameFromIndex(int index)
{
if ((uint)index > 11)
{
return "???";
}
if (index == 0)
{
return "rtld";
}
else if (index == 1)
{
return "main";
}
else if (index == GetImagesCount() - 1)
{
return "sdk";
}
else
{
return "subsdk" + (index - 2);
}
}
private int GetImagesCount()
{
lock (_images)
{
return _images.Count;
}
}
private void EnsureLoaded()
{
if (Interlocked.CompareExchange(ref _loaded, 1, 0) == 0)
{
ScanMemoryForTextSegments();
}
}
private void ScanMemoryForTextSegments()
{
ulong oldAddress = 0;
ulong address = 0;
while (address >= oldAddress)
{
KMemoryInfo info = _owner.MemoryManager.QueryMemory(address);
if (info.State == MemoryState.Reserved)
{
break;
}
if (info.State == MemoryState.CodeStatic && info.Permission == KMemoryPermission.ReadAndExecute)
{
LoadMod0Symbols(_owner.CpuMemory, info.Address, info.Size);
}
oldAddress = address;
address = info.Address + info.Size;
}
}
private void LoadMod0Symbols(IVirtualMemoryManager memory, ulong textOffset, ulong textSize)
{
ulong mod0Offset = textOffset + memory.Read<uint>(textOffset + 4);
if (mod0Offset < textOffset || !memory.IsMapped(mod0Offset) || (mod0Offset & 3) != 0)
{
return;
}
Dictionary<ElfDynamicTag, ulong> dynamic = new Dictionary<ElfDynamicTag, ulong>();
int mod0Magic = memory.Read<int>(mod0Offset + 0x0);
if (mod0Magic != Mod0)
{
return;
}
ulong dynamicOffset = memory.Read<uint>(mod0Offset + 0x4) + mod0Offset;
ulong bssStartOffset = memory.Read<uint>(mod0Offset + 0x8) + mod0Offset;
ulong bssEndOffset = memory.Read<uint>(mod0Offset + 0xc) + mod0Offset;
ulong ehHdrStartOffset = memory.Read<uint>(mod0Offset + 0x10) + mod0Offset;
ulong ehHdrEndOffset = memory.Read<uint>(mod0Offset + 0x14) + mod0Offset;
ulong modObjOffset = memory.Read<uint>(mod0Offset + 0x18) + mod0Offset;
bool isAArch32 = memory.Read<ulong>(dynamicOffset) > 0xFFFFFFFF || memory.Read<ulong>(dynamicOffset + 0x10) > 0xFFFFFFFF;
while (true)
{
ulong tagVal;
ulong value;
if (isAArch32)
{
tagVal = memory.Read<uint>(dynamicOffset + 0);
value = memory.Read<uint>(dynamicOffset + 4);
dynamicOffset += 0x8;
}
else
{
tagVal = memory.Read<ulong>(dynamicOffset + 0);
value = memory.Read<ulong>(dynamicOffset + 8);
dynamicOffset += 0x10;
}
ElfDynamicTag tag = (ElfDynamicTag)tagVal;
if (tag == ElfDynamicTag.DT_NULL)
{
break;
}
dynamic[tag] = value;
}
if (!dynamic.TryGetValue(ElfDynamicTag.DT_STRTAB, out ulong strTab) ||
!dynamic.TryGetValue(ElfDynamicTag.DT_SYMTAB, out ulong symTab) ||
!dynamic.TryGetValue(ElfDynamicTag.DT_SYMENT, out ulong symEntSize))
{
return;
}
ulong strTblAddr = textOffset + strTab;
ulong symTblAddr = textOffset + symTab;
List<ElfSymbol> symbols = new List<ElfSymbol>();
while (symTblAddr < strTblAddr)
{
ElfSymbol sym = isAArch32 ? GetSymbol32(memory, symTblAddr, strTblAddr) : GetSymbol64(memory, symTblAddr, strTblAddr);
symbols.Add(sym);
symTblAddr += symEntSize;
}
lock (_images)
{
_images.Add(new Image(textOffset, textSize, symbols.OrderBy(x => x.Value).ToArray()));
}
}
private ElfSymbol GetSymbol64(IVirtualMemoryManager memory, ulong address, ulong strTblAddr)
{
ElfSymbol64 sym = memory.Read<ElfSymbol64>(address);
uint nameIndex = sym.NameOffset;
string name = string.Empty;
for (int chr; (chr = memory.Read<byte>(strTblAddr + nameIndex++)) != 0;)
{
name += (char)chr;
}
return new ElfSymbol(name, sym.Info, sym.Other, sym.SectionIndex, sym.ValueAddress, sym.Size);
}
private ElfSymbol GetSymbol32(IVirtualMemoryManager memory, ulong address, ulong strTblAddr)
{
ElfSymbol32 sym = memory.Read<ElfSymbol32>(address);
uint nameIndex = sym.NameOffset;
string name = string.Empty;
for (int chr; (chr = memory.Read<byte>(strTblAddr + nameIndex++)) != 0;)
{
name += (char)chr;
}
return new ElfSymbol(name, sym.Info, sym.Other, sym.SectionIndex, sym.ValueAddress, sym.Size);
}
}
}

View file

@ -0,0 +1,15 @@
using Ryujinx.Cpu;
using Ryujinx.Memory;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
interface IProcessContext : IDisposable
{
IVirtualMemoryManager AddressSpace { get; }
IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks);
void Execute(IExecutionContext context, ulong codeAddress);
void InvalidateCacheRegion(ulong address, ulong size);
}
}

View file

@ -0,0 +1,9 @@
using Ryujinx.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
interface IProcessContextFactory
{
IProcessContext Create(KernelContext context, ulong pid, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler, bool for64Bit);
}
}

View file

@ -0,0 +1,83 @@
using System;
using System.Numerics;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class KContextIdManager
{
private const int IdMasksCount = 8;
private int[] _idMasks;
private int _nextFreeBitHint;
public KContextIdManager()
{
_idMasks = new int[IdMasksCount];
}
public int GetId()
{
lock (_idMasks)
{
int id = 0;
if (!TestBit(_nextFreeBitHint))
{
id = _nextFreeBitHint;
}
else
{
for (int index = 0; index < IdMasksCount; index++)
{
int mask = _idMasks[index];
int firstFreeBit = BitOperations.LeadingZeroCount((uint)((mask + 1) & ~mask));
if (firstFreeBit < 32)
{
int baseBit = index * 32 + 31;
id = baseBit - firstFreeBit;
break;
}
else if (index == IdMasksCount - 1)
{
throw new InvalidOperationException("Maximum number of Ids reached!");
}
}
}
_nextFreeBitHint = id + 1;
SetBit(id);
return id;
}
}
public void PutId(int id)
{
lock (_idMasks)
{
ClearBit(id);
}
}
private bool TestBit(int bit)
{
return (_idMasks[_nextFreeBitHint / 32] & (1 << (_nextFreeBitHint & 31))) != 0;
}
private void SetBit(int bit)
{
_idMasks[_nextFreeBitHint / 32] |= (1 << (_nextFreeBitHint & 31));
}
private void ClearBit(int bit)
{
_idMasks[_nextFreeBitHint / 32] &= ~(1 << (_nextFreeBitHint & 31));
}
}
}

View file

@ -0,0 +1,19 @@
using Ryujinx.HLE.HOS.Kernel.Common;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class KHandleEntry
{
public KHandleEntry Next { get; set; }
public int Index { get; private set; }
public ushort HandleId { get; set; }
public KAutoObject Obj { get; set; }
public KHandleEntry(int index)
{
Index = index;
}
}
}

View file

@ -0,0 +1,285 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.Horizon.Common;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class KHandleTable
{
public const int SelfThreadHandle = (0x1ffff << 15) | 0;
public const int SelfProcessHandle = (0x1ffff << 15) | 1;
private readonly KernelContext _context;
private KHandleEntry[] _table;
private KHandleEntry _tableHead;
private KHandleEntry _nextFreeEntry;
private int _activeSlotsCount;
private uint _size;
private ushort _idCounter;
public KHandleTable(KernelContext context)
{
_context = context;
}
public Result Initialize(uint size)
{
if (size > 1024)
{
return KernelResult.OutOfMemory;
}
if (size < 1)
{
size = 1024;
}
_size = size;
_idCounter = 1;
_table = new KHandleEntry[size];
_tableHead = new KHandleEntry(0);
KHandleEntry entry = _tableHead;
for (int index = 0; index < size; index++)
{
_table[index] = entry;
entry.Next = new KHandleEntry(index + 1);
entry = entry.Next;
}
_table[size - 1].Next = null;
_nextFreeEntry = _tableHead;
return Result.Success;
}
public Result GenerateHandle(KAutoObject obj, out int handle)
{
handle = 0;
lock (_table)
{
if (_activeSlotsCount >= _size)
{
return KernelResult.HandleTableFull;
}
KHandleEntry entry = _nextFreeEntry;
_nextFreeEntry = entry.Next;
entry.Obj = obj;
entry.HandleId = _idCounter;
_activeSlotsCount++;
handle = (_idCounter << 15) | entry.Index;
obj.IncrementReferenceCount();
if ((short)(_idCounter + 1) >= 0)
{
_idCounter++;
}
else
{
_idCounter = 1;
}
}
return Result.Success;
}
public Result ReserveHandle(out int handle)
{
handle = 0;
lock (_table)
{
if (_activeSlotsCount >= _size)
{
return KernelResult.HandleTableFull;
}
KHandleEntry entry = _nextFreeEntry;
_nextFreeEntry = entry.Next;
_activeSlotsCount++;
handle = (_idCounter << 15) | entry.Index;
if ((short)(_idCounter + 1) >= 0)
{
_idCounter++;
}
else
{
_idCounter = 1;
}
}
return Result.Success;
}
public void CancelHandleReservation(int handle)
{
int index = (handle >> 0) & 0x7fff;
lock (_table)
{
KHandleEntry entry = _table[index];
entry.Obj = null;
entry.Next = _nextFreeEntry;
_nextFreeEntry = entry;
_activeSlotsCount--;
}
}
public void SetReservedHandleObj(int handle, KAutoObject obj)
{
int index = (handle >> 0) & 0x7fff;
int handleId = (handle >> 15);
lock (_table)
{
KHandleEntry entry = _table[index];
entry.Obj = obj;
entry.HandleId = (ushort)handleId;
obj.IncrementReferenceCount();
}
}
public bool CloseHandle(int handle)
{
if ((handle >> 30) != 0 ||
handle == SelfThreadHandle ||
handle == SelfProcessHandle)
{
return false;
}
int index = (handle >> 0) & 0x7fff;
int handleId = (handle >> 15);
KAutoObject obj = null;
bool result = false;
lock (_table)
{
if (handleId != 0 && index < _size)
{
KHandleEntry entry = _table[index];
if ((obj = entry.Obj) != null && entry.HandleId == handleId)
{
entry.Obj = null;
entry.Next = _nextFreeEntry;
_nextFreeEntry = entry;
_activeSlotsCount--;
result = true;
}
}
}
if (result)
{
obj.DecrementReferenceCount();
}
return result;
}
public T GetObject<T>(int handle) where T : KAutoObject
{
int index = (handle >> 0) & 0x7fff;
int handleId = (handle >> 15);
lock (_table)
{
if ((handle >> 30) == 0 && handleId != 0 && index < _size)
{
KHandleEntry entry = _table[index];
if (entry.HandleId == handleId && entry.Obj is T obj)
{
return obj;
}
}
}
return default;
}
public KThread GetKThread(int handle)
{
if (handle == SelfThreadHandle)
{
return KernelStatic.GetCurrentThread();
}
else
{
return GetObject<KThread>(handle);
}
}
public KProcess GetKProcess(int handle)
{
if (handle == SelfProcessHandle)
{
return KernelStatic.GetCurrentProcess();
}
else
{
return GetObject<KProcess>(handle);
}
}
public void Destroy()
{
lock (_table)
{
for (int index = 0; index < _size; index++)
{
KHandleEntry entry = _table[index];
if (entry.Obj != null)
{
if (entry.Obj is IDisposable disposableObj)
{
disposableObj.Dispose();
}
entry.Obj.DecrementReferenceCount();
entry.Obj = null;
entry.Next = _nextFreeEntry;
_nextFreeEntry = entry;
}
}
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,328 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.HLE.HOS.Kernel.Threading;
using Ryujinx.Horizon.Common;
using System;
using System.Numerics;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class KProcessCapabilities
{
public byte[] SvcAccessMask { get; }
public byte[] IrqAccessMask { get; }
public ulong AllowedCpuCoresMask { get; private set; }
public ulong AllowedThreadPriosMask { get; private set; }
public uint DebuggingFlags { get; private set; }
public uint HandleTableSize { get; private set; }
public uint KernelReleaseVersion { get; private set; }
public uint ApplicationType { get; private set; }
public KProcessCapabilities()
{
// length / number of bits of the underlying type
SvcAccessMask = new byte[KernelConstants.SupervisorCallCount / 8];
IrqAccessMask = new byte[0x80];
}
public Result InitializeForKernel(ReadOnlySpan<uint> capabilities, KPageTableBase memoryManager)
{
AllowedCpuCoresMask = 0xf;
AllowedThreadPriosMask = ulong.MaxValue;
DebuggingFlags &= ~3u;
KernelReleaseVersion = KProcess.KernelVersionPacked;
return Parse(capabilities, memoryManager);
}
public Result InitializeForUser(ReadOnlySpan<uint> capabilities, KPageTableBase memoryManager)
{
return Parse(capabilities, memoryManager);
}
private Result Parse(ReadOnlySpan<uint> capabilities, KPageTableBase memoryManager)
{
int mask0 = 0;
int mask1 = 0;
for (int index = 0; index < capabilities.Length; index++)
{
uint cap = capabilities[index];
if (cap.GetCapabilityType() != CapabilityType.MapRange)
{
Result result = ParseCapability(cap, ref mask0, ref mask1, memoryManager);
if (result != Result.Success)
{
return result;
}
}
else
{
if ((uint)index + 1 >= capabilities.Length)
{
return KernelResult.InvalidCombination;
}
uint prevCap = cap;
cap = capabilities[++index];
if (((cap + 1) & ~cap) != 0x40)
{
return KernelResult.InvalidCombination;
}
if ((cap & 0x78000000) != 0)
{
return KernelResult.MaximumExceeded;
}
if ((cap & 0x7ffff80) == 0)
{
return KernelResult.InvalidSize;
}
long address = ((long)prevCap << 5) & 0xffffff000;
long size = ((long)cap << 5) & 0xfffff000;
if (((ulong)(address + size - 1) >> 36) != 0)
{
return KernelResult.InvalidAddress;
}
KMemoryPermission perm = (prevCap >> 31) != 0
? KMemoryPermission.Read
: KMemoryPermission.ReadAndWrite;
Result result;
if ((cap >> 31) != 0)
{
result = memoryManager.MapNormalMemory(address, size, perm);
}
else
{
result = memoryManager.MapIoMemory(address, size, perm);
}
if (result != Result.Success)
{
return result;
}
}
}
return Result.Success;
}
private Result ParseCapability(uint cap, ref int mask0, ref int mask1, KPageTableBase memoryManager)
{
CapabilityType code = cap.GetCapabilityType();
if (code == CapabilityType.Invalid)
{
return KernelResult.InvalidCapability;
}
else if (code == CapabilityType.Padding)
{
return Result.Success;
}
int codeMask = 1 << (32 - BitOperations.LeadingZeroCount(code.GetFlag() + 1));
// Check if the property was already set.
if (((mask0 & codeMask) & 0x1e008) != 0)
{
return KernelResult.InvalidCombination;
}
mask0 |= codeMask;
switch (code)
{
case CapabilityType.CorePriority:
{
if (AllowedCpuCoresMask != 0 || AllowedThreadPriosMask != 0)
{
return KernelResult.InvalidCapability;
}
uint lowestCpuCore = (cap >> 16) & 0xff;
uint highestCpuCore = (cap >> 24) & 0xff;
if (lowestCpuCore > highestCpuCore)
{
return KernelResult.InvalidCombination;
}
uint highestThreadPrio = (cap >> 4) & 0x3f;
uint lowestThreadPrio = (cap >> 10) & 0x3f;
if (lowestThreadPrio > highestThreadPrio)
{
return KernelResult.InvalidCombination;
}
if (highestCpuCore >= KScheduler.CpuCoresCount)
{
return KernelResult.InvalidCpuCore;
}
AllowedCpuCoresMask = GetMaskFromMinMax(lowestCpuCore, highestCpuCore);
AllowedThreadPriosMask = GetMaskFromMinMax(lowestThreadPrio, highestThreadPrio);
break;
}
case CapabilityType.SyscallMask:
{
int slot = ((int)cap >> 29) & 7;
int svcSlotMask = 1 << slot;
if ((mask1 & svcSlotMask) != 0)
{
return KernelResult.InvalidCombination;
}
mask1 |= svcSlotMask;
uint svcMask = (cap >> 5) & 0xffffff;
int baseSvc = slot * 24;
for (int index = 0; index < 24; index++)
{
if (((svcMask >> index) & 1) == 0)
{
continue;
}
int svcId = baseSvc + index;
if (svcId >= KernelConstants.SupervisorCallCount)
{
return KernelResult.MaximumExceeded;
}
SvcAccessMask[svcId / 8] |= (byte)(1 << (svcId & 7));
}
break;
}
case CapabilityType.MapIoPage:
{
long address = ((long)cap << 4) & 0xffffff000;
memoryManager.MapIoMemory(address, KPageTableBase.PageSize, KMemoryPermission.ReadAndWrite);
break;
}
case CapabilityType.MapRegion:
{
// TODO: Implement capabilities for MapRegion
break;
}
case CapabilityType.InterruptPair:
{
// TODO: GIC distributor check.
int irq0 = ((int)cap >> 12) & 0x3ff;
int irq1 = ((int)cap >> 22) & 0x3ff;
if (irq0 != 0x3ff)
{
IrqAccessMask[irq0 / 8] |= (byte)(1 << (irq0 & 7));
}
if (irq1 != 0x3ff)
{
IrqAccessMask[irq1 / 8] |= (byte)(1 << (irq1 & 7));
}
break;
}
case CapabilityType.ProgramType:
{
uint applicationType = (cap >> 14);
if (applicationType > 7)
{
return KernelResult.ReservedValue;
}
ApplicationType = applicationType;
break;
}
case CapabilityType.KernelVersion:
{
// Note: This check is bugged on kernel too, we are just replicating the bug here.
if ((KernelReleaseVersion >> 17) != 0 || cap < 0x80000)
{
return KernelResult.ReservedValue;
}
KernelReleaseVersion = cap;
break;
}
case CapabilityType.HandleTable:
{
uint handleTableSize = cap >> 26;
if (handleTableSize > 0x3ff)
{
return KernelResult.ReservedValue;
}
HandleTableSize = handleTableSize;
break;
}
case CapabilityType.DebugFlags:
{
uint debuggingFlags = cap >> 19;
if (debuggingFlags > 3)
{
return KernelResult.ReservedValue;
}
DebuggingFlags &= ~3u;
DebuggingFlags |= debuggingFlags;
break;
}
default: return KernelResult.InvalidCapability;
}
return Result.Success;
}
private static ulong GetMaskFromMinMax(uint min, uint max)
{
uint range = max - min + 1;
if (range == 64)
{
return ulong.MaxValue;
}
ulong mask = (1UL << (int)range) - 1;
return mask << (int)min;
}
}
}

View file

@ -0,0 +1,77 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class KTlsPageInfo
{
public const int TlsEntrySize = 0x200;
public ulong PageVirtualAddress { get; }
public ulong PagePhysicalAddress { get; }
private readonly bool[] _isSlotFree;
public KTlsPageInfo(ulong pageVirtualAddress, ulong pagePhysicalAddress)
{
PageVirtualAddress = pageVirtualAddress;
PagePhysicalAddress = pagePhysicalAddress;
_isSlotFree = new bool[KPageTableBase.PageSize / TlsEntrySize];
for (int index = 0; index < _isSlotFree.Length; index++)
{
_isSlotFree[index] = true;
}
}
public bool TryGetFreePage(out ulong address)
{
address = PageVirtualAddress;
for (int index = 0; index < _isSlotFree.Length; index++)
{
if (_isSlotFree[index])
{
_isSlotFree[index] = false;
return true;
}
address += TlsEntrySize;
}
address = 0;
return false;
}
public bool IsFull()
{
bool hasFree = false;
for (int index = 0; index < _isSlotFree.Length; index++)
{
hasFree |= _isSlotFree[index];
}
return !hasFree;
}
public bool IsEmpty()
{
bool allFree = true;
for (int index = 0; index < _isSlotFree.Length; index++)
{
allFree &= _isSlotFree[index];
}
return allFree;
}
public void FreeTlsSlot(ulong address)
{
_isSlotFree[(address - PageVirtualAddress) / TlsEntrySize] = true;
}
}
}

View file

@ -0,0 +1,61 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class KTlsPageManager
{
private const int TlsEntrySize = 0x200;
private long _pagePosition;
private int _usedSlots;
private bool[] _slots;
public bool IsEmpty => _usedSlots == 0;
public bool IsFull => _usedSlots == _slots.Length;
public KTlsPageManager(long pagePosition)
{
_pagePosition = pagePosition;
_slots = new bool[KPageTableBase.PageSize / TlsEntrySize];
}
public bool TryGetFreeTlsAddr(out long position)
{
position = _pagePosition;
for (int index = 0; index < _slots.Length; index++)
{
if (!_slots[index])
{
_slots[index] = true;
_usedSlots++;
return true;
}
position += TlsEntrySize;
}
position = 0;
return false;
}
public void FreeTlsSlot(int slot)
{
if ((uint)slot > _slots.Length)
{
throw new ArgumentOutOfRangeException(nameof(slot));
}
_slots[slot] = false;
_usedSlots--;
}
}
}

View file

@ -0,0 +1,34 @@
using Ryujinx.Cpu;
using Ryujinx.Memory;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class ProcessContext : IProcessContext
{
public IVirtualMemoryManager AddressSpace { get; }
public ProcessContext(IVirtualMemoryManager asManager)
{
AddressSpace = asManager;
}
public IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks)
{
return new ProcessExecutionContext();
}
public void Execute(IExecutionContext context, ulong codeAddress)
{
throw new NotSupportedException();
}
public void InvalidateCacheRegion(ulong address, ulong size)
{
}
public void Dispose()
{
}
}
}

View file

@ -0,0 +1,12 @@
using Ryujinx.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class ProcessContextFactory : IProcessContextFactory
{
public IProcessContext Create(KernelContext context, ulong pid, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler, bool for64Bit)
{
return new ProcessContext(new AddressSpaceManager(context.Memory, addressSpaceSize));
}
}
}

View file

@ -0,0 +1,41 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
[Flags]
enum ProcessCreationFlags
{
Is64Bit = 1 << 0,
AddressSpaceShift = 1,
AddressSpace32Bit = 0 << AddressSpaceShift,
AddressSpace64BitDeprecated = 1 << AddressSpaceShift,
AddressSpace32BitWithoutAlias = 2 << AddressSpaceShift,
AddressSpace64Bit = 3 << AddressSpaceShift,
AddressSpaceMask = 7 << AddressSpaceShift,
EnableDebug = 1 << 4,
EnableAslr = 1 << 5,
IsApplication = 1 << 6,
DeprecatedUseSecureMemory = 1 << 7,
PoolPartitionShift = 7,
PoolPartitionApplication = 0 << PoolPartitionShift,
PoolPartitionApplet = 1 << PoolPartitionShift,
PoolPartitionSystem = 2 << PoolPartitionShift,
PoolPartitionSystemNonSecure = 3 << PoolPartitionShift,
PoolPartitionMask = 0xf << PoolPartitionShift,
OptimizeMemoryAllocation = 1 << 11,
All =
Is64Bit |
AddressSpaceMask |
EnableDebug |
EnableAslr |
IsApplication |
DeprecatedUseSecureMemory |
PoolPartitionMask |
OptimizeMemoryAllocation
}
}

View file

@ -0,0 +1,37 @@
namespace Ryujinx.HLE.HOS.Kernel.Process
{
readonly struct ProcessCreationInfo
{
public string Name { get; }
public int Version { get; }
public ulong TitleId { get; }
public ulong CodeAddress { get; }
public int CodePagesCount { get; }
public ProcessCreationFlags Flags { get; }
public int ResourceLimitHandle { get; }
public int SystemResourcePagesCount { get; }
public ProcessCreationInfo(
string name,
int version,
ulong titleId,
ulong codeAddress,
int codePagesCount,
ProcessCreationFlags flags,
int resourceLimitHandle,
int systemResourcePagesCount)
{
Name = name;
Version = version;
TitleId = titleId;
CodeAddress = codeAddress;
CodePagesCount = codePagesCount;
Flags = flags;
ResourceLimitHandle = resourceLimitHandle;
SystemResourcePagesCount = systemResourcePagesCount;
}
}
}

View file

@ -0,0 +1,46 @@
using ARMeilleure.State;
using Ryujinx.Cpu;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class ProcessExecutionContext : IExecutionContext
{
public ulong Pc => 0UL;
public ulong CntfrqEl0 { get; set; }
public ulong CntpctEl0 => 0UL;
public long TpidrEl0 { get; set; }
public long TpidrroEl0 { get; set; }
public uint Pstate { get; set; }
public uint Fpcr { get; set; }
public uint Fpsr { get; set; }
public bool IsAarch32 { get => false; set { } }
public bool Running { get; private set; } = true;
private readonly ulong[] _x = new ulong[32];
public ulong GetX(int index) => _x[index];
public void SetX(int index, ulong value) => _x[index] = value;
public V128 GetV(int index) => default;
public void SetV(int index, V128 value) { }
public void RequestInterrupt()
{
}
public void StopRunning()
{
Running = false;
}
public void Dispose()
{
}
}
}

View file

@ -0,0 +1,14 @@
namespace Ryujinx.HLE.HOS.Kernel.Process
{
enum ProcessState : byte
{
Created = 0,
CreatedAttached = 1,
Started = 2,
Crashed = 3,
Attached = 4,
Exiting = 5,
Exited = 6,
DebugSuspended = 7
}
}

View file

@ -0,0 +1,24 @@
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Process
{
class ProcessTamperInfo
{
public KProcess Process { get; }
public IEnumerable<string> BuildIds { get; }
public IEnumerable<ulong> CodeAddresses { get; }
public ulong HeapAddress { get; }
public ulong AliasAddress { get; }
public ulong AslrAddress { get; }
public ProcessTamperInfo(KProcess process, IEnumerable<string> buildIds, IEnumerable<ulong> codeAddresses, ulong heapAddress, ulong aliasAddress, ulong aslrAddress)
{
Process = process;
BuildIds = buildIds;
CodeAddresses = codeAddresses;
HeapAddress = heapAddress;
AliasAddress = aliasAddress;
AslrAddress = aslrAddress;
}
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
enum CodeMemoryOperation : uint
{
Map,
MapToOwner,
Unmap,
UnmapFromOwner
};
}

View file

@ -0,0 +1,34 @@
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
enum InfoType : uint
{
CoreMask,
PriorityMask,
AliasRegionAddress,
AliasRegionSize,
HeapRegionAddress,
HeapRegionSize,
TotalMemorySize,
UsedMemorySize,
DebuggerAttached,
ResourceLimit,
IdleTickCount,
RandomEntropy,
AslrRegionAddress,
AslrRegionSize,
StackRegionAddress,
StackRegionSize,
SystemResourceSizeTotal,
SystemResourceSizeUsed,
ProgramId,
// NOTE: Added in 4.0.0, removed in 5.0.0.
InitialProcessIdRange,
UserExceptionContextAddress,
TotalNonSystemMemorySize,
UsedNonSystemMemorySize,
IsApplication,
FreeThreadCount,
ThreadTickCount,
MesosphereCurrentProcess = 65001
}
}

View file

@ -0,0 +1,37 @@
using Ryujinx.HLE.HOS.Kernel.Memory;
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
struct MemoryInfo
{
public ulong Address;
public ulong Size;
public MemoryState State;
public MemoryAttribute Attribute;
public KMemoryPermission Permission;
public int IpcRefCount;
public int DeviceRefCount;
#pragma warning disable CS0414
private int _padding;
#pragma warning restore CS0414
public MemoryInfo(
ulong address,
ulong size,
MemoryState state,
MemoryAttribute attribute,
KMemoryPermission permission,
int ipcRefCount,
int deviceRefCount)
{
Address = address;
Size = size;
State = state;
Attribute = attribute;
Permission = permission;
IpcRefCount = ipcRefCount;
DeviceRefCount = deviceRefCount;
_padding = 0;
}
}
}

View file

@ -0,0 +1,9 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
[AttributeUsage(AttributeTargets.Parameter, AllowMultiple = false, Inherited = true)]
class PointerSizedAttribute : Attribute
{
}
}

View file

@ -0,0 +1,15 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
[AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = true)]
class SvcAttribute : Attribute
{
public int Id { get; }
public SvcAttribute(int id)
{
Id = id;
}
}
}

View file

@ -0,0 +1,9 @@
using System;
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
[AttributeUsage(AttributeTargets.Class, AllowMultiple = false, Inherited = true)]
class SvcImplAttribute : Attribute
{
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,44 @@
using Ryujinx.Cpu;
using Ryujinx.HLE.HOS.Kernel.Threading;
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
partial class SyscallHandler
{
private readonly KernelContext _context;
public SyscallHandler(KernelContext context)
{
_context = context;
}
public void SvcCall(IExecutionContext context, ulong address, int id)
{
KThread currentThread = KernelStatic.GetCurrentThread();
if (currentThread.Owner != null &&
currentThread.GetUserDisableCount() != 0 &&
currentThread.Owner.PinnedThreads[currentThread.CurrentCore] == null)
{
_context.CriticalSection.Enter();
currentThread.Owner.PinThread(currentThread);
currentThread.SetUserInterruptFlag();
_context.CriticalSection.Leave();
}
if (context.IsAarch32)
{
SyscallDispatch.Dispatch32(_context.Syscall, context, id);
}
else
{
SyscallDispatch.Dispatch64(_context.Syscall, context, id);
}
currentThread.HandlePostSyscall();
}
}
}

View file

@ -0,0 +1,22 @@
using ARMeilleure.State;
using Ryujinx.Common.Memory;
namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
{
struct ThreadContext
{
public Array29<ulong> Registers;
public ulong Fp;
public ulong Lr;
public ulong Sp;
public ulong Pc;
public uint Pstate;
#pragma warning disable CS0169
private uint _padding;
#pragma warning restore CS0169
public Array32<V128> FpuRegisters;
public uint Fpcr;
public uint Fpsr;
public ulong Tpidr;
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
enum ArbitrationType
{
WaitIfLessThan = 0,
DecrementAndWaitIfLessThan = 1,
WaitIfEqual = 2
}
}

View file

@ -0,0 +1,581 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KAddressArbiter
{
private const int HasListenersMask = 0x40000000;
private readonly KernelContext _context;
private readonly List<KThread> _condVarThreads;
private readonly List<KThread> _arbiterThreads;
public KAddressArbiter(KernelContext context)
{
_context = context;
_condVarThreads = new List<KThread>();
_arbiterThreads = new List<KThread>();
}
public Result ArbitrateLock(int ownerHandle, ulong mutexAddress, int requesterHandle)
{
KThread currentThread = KernelStatic.GetCurrentThread();
_context.CriticalSection.Enter();
if (currentThread.TerminationRequested)
{
_context.CriticalSection.Leave();
return KernelResult.ThreadTerminating;
}
currentThread.SignaledObj = null;
currentThread.ObjSyncResult = Result.Success;
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (!KernelTransfer.UserToKernel(out int mutexValue, mutexAddress))
{
_context.CriticalSection.Leave();
return KernelResult.InvalidMemState;
}
if (mutexValue != (ownerHandle | HasListenersMask))
{
_context.CriticalSection.Leave();
return Result.Success;
}
KThread mutexOwner = currentProcess.HandleTable.GetObject<KThread>(ownerHandle);
if (mutexOwner == null)
{
_context.CriticalSection.Leave();
return KernelResult.InvalidHandle;
}
currentThread.MutexAddress = mutexAddress;
currentThread.ThreadHandleForUserMutex = requesterHandle;
mutexOwner.AddMutexWaiter(currentThread);
currentThread.Reschedule(ThreadSchedState.Paused);
_context.CriticalSection.Leave();
_context.CriticalSection.Enter();
if (currentThread.MutexOwner != null)
{
currentThread.MutexOwner.RemoveMutexWaiter(currentThread);
}
_context.CriticalSection.Leave();
return currentThread.ObjSyncResult;
}
public Result ArbitrateUnlock(ulong mutexAddress)
{
_context.CriticalSection.Enter();
KThread currentThread = KernelStatic.GetCurrentThread();
(int mutexValue, KThread newOwnerThread) = MutexUnlock(currentThread, mutexAddress);
Result result = Result.Success;
if (!KernelTransfer.KernelToUser(mutexAddress, mutexValue))
{
result = KernelResult.InvalidMemState;
}
if (result != Result.Success && newOwnerThread != null)
{
newOwnerThread.SignaledObj = null;
newOwnerThread.ObjSyncResult = result;
}
_context.CriticalSection.Leave();
return result;
}
public Result WaitProcessWideKeyAtomic(ulong mutexAddress, ulong condVarAddress, int threadHandle, long timeout)
{
_context.CriticalSection.Enter();
KThread currentThread = KernelStatic.GetCurrentThread();
currentThread.SignaledObj = null;
currentThread.ObjSyncResult = KernelResult.TimedOut;
if (currentThread.TerminationRequested)
{
_context.CriticalSection.Leave();
return KernelResult.ThreadTerminating;
}
(int mutexValue, _) = MutexUnlock(currentThread, mutexAddress);
KernelTransfer.KernelToUser(condVarAddress, 1);
if (!KernelTransfer.KernelToUser(mutexAddress, mutexValue))
{
_context.CriticalSection.Leave();
return KernelResult.InvalidMemState;
}
currentThread.MutexAddress = mutexAddress;
currentThread.ThreadHandleForUserMutex = threadHandle;
currentThread.CondVarAddress = condVarAddress;
_condVarThreads.Add(currentThread);
if (timeout != 0)
{
currentThread.Reschedule(ThreadSchedState.Paused);
if (timeout > 0)
{
_context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
}
}
_context.CriticalSection.Leave();
if (timeout > 0)
{
_context.TimeManager.UnscheduleFutureInvocation(currentThread);
}
_context.CriticalSection.Enter();
if (currentThread.MutexOwner != null)
{
currentThread.MutexOwner.RemoveMutexWaiter(currentThread);
}
_condVarThreads.Remove(currentThread);
_context.CriticalSection.Leave();
return currentThread.ObjSyncResult;
}
private (int, KThread) MutexUnlock(KThread currentThread, ulong mutexAddress)
{
KThread newOwnerThread = currentThread.RelinquishMutex(mutexAddress, out int count);
int mutexValue = 0;
if (newOwnerThread != null)
{
mutexValue = newOwnerThread.ThreadHandleForUserMutex;
if (count >= 2)
{
mutexValue |= HasListenersMask;
}
newOwnerThread.SignaledObj = null;
newOwnerThread.ObjSyncResult = Result.Success;
newOwnerThread.ReleaseAndResume();
}
return (mutexValue, newOwnerThread);
}
public void SignalProcessWideKey(ulong address, int count)
{
_context.CriticalSection.Enter();
WakeThreads(_condVarThreads, count, TryAcquireMutex, x => x.CondVarAddress == address);
if (!_condVarThreads.Any(x => x.CondVarAddress == address))
{
KernelTransfer.KernelToUser(address, 0);
}
_context.CriticalSection.Leave();
}
private static void TryAcquireMutex(KThread requester)
{
ulong address = requester.MutexAddress;
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (!currentProcess.CpuMemory.IsMapped(address))
{
// Invalid address.
requester.SignaledObj = null;
requester.ObjSyncResult = KernelResult.InvalidMemState;
return;
}
ref int mutexRef = ref currentProcess.CpuMemory.GetRef<int>(address);
int mutexValue, newMutexValue;
do
{
mutexValue = mutexRef;
if (mutexValue != 0)
{
// Update value to indicate there is a mutex waiter now.
newMutexValue = mutexValue | HasListenersMask;
}
else
{
// No thread owning the mutex, assign to requesting thread.
newMutexValue = requester.ThreadHandleForUserMutex;
}
}
while (Interlocked.CompareExchange(ref mutexRef, newMutexValue, mutexValue) != mutexValue);
if (mutexValue == 0)
{
// We now own the mutex.
requester.SignaledObj = null;
requester.ObjSyncResult = Result.Success;
requester.ReleaseAndResume();
return;
}
mutexValue &= ~HasListenersMask;
KThread mutexOwner = currentProcess.HandleTable.GetObject<KThread>(mutexValue);
if (mutexOwner != null)
{
// Mutex already belongs to another thread, wait for it.
mutexOwner.AddMutexWaiter(requester);
}
else
{
// Invalid mutex owner.
requester.SignaledObj = null;
requester.ObjSyncResult = KernelResult.InvalidHandle;
requester.ReleaseAndResume();
}
}
public Result WaitForAddressIfEqual(ulong address, int value, long timeout)
{
KThread currentThread = KernelStatic.GetCurrentThread();
_context.CriticalSection.Enter();
if (currentThread.TerminationRequested)
{
_context.CriticalSection.Leave();
return KernelResult.ThreadTerminating;
}
currentThread.SignaledObj = null;
currentThread.ObjSyncResult = KernelResult.TimedOut;
if (!KernelTransfer.UserToKernel(out int currentValue, address))
{
_context.CriticalSection.Leave();
return KernelResult.InvalidMemState;
}
if (currentValue == value)
{
if (timeout == 0)
{
_context.CriticalSection.Leave();
return KernelResult.TimedOut;
}
currentThread.MutexAddress = address;
currentThread.WaitingInArbitration = true;
_arbiterThreads.Add(currentThread);
currentThread.Reschedule(ThreadSchedState.Paused);
if (timeout > 0)
{
_context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
}
_context.CriticalSection.Leave();
if (timeout > 0)
{
_context.TimeManager.UnscheduleFutureInvocation(currentThread);
}
_context.CriticalSection.Enter();
if (currentThread.WaitingInArbitration)
{
_arbiterThreads.Remove(currentThread);
currentThread.WaitingInArbitration = false;
}
_context.CriticalSection.Leave();
return currentThread.ObjSyncResult;
}
_context.CriticalSection.Leave();
return KernelResult.InvalidState;
}
public Result WaitForAddressIfLessThan(ulong address, int value, bool shouldDecrement, long timeout)
{
KThread currentThread = KernelStatic.GetCurrentThread();
_context.CriticalSection.Enter();
if (currentThread.TerminationRequested)
{
_context.CriticalSection.Leave();
return KernelResult.ThreadTerminating;
}
currentThread.SignaledObj = null;
currentThread.ObjSyncResult = KernelResult.TimedOut;
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (!KernelTransfer.UserToKernel(out int currentValue, address))
{
_context.CriticalSection.Leave();
return KernelResult.InvalidMemState;
}
if (shouldDecrement)
{
currentValue = Interlocked.Decrement(ref currentProcess.CpuMemory.GetRef<int>(address)) + 1;
}
if (currentValue < value)
{
if (timeout == 0)
{
_context.CriticalSection.Leave();
return KernelResult.TimedOut;
}
currentThread.MutexAddress = address;
currentThread.WaitingInArbitration = true;
_arbiterThreads.Add(currentThread);
currentThread.Reschedule(ThreadSchedState.Paused);
if (timeout > 0)
{
_context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
}
_context.CriticalSection.Leave();
if (timeout > 0)
{
_context.TimeManager.UnscheduleFutureInvocation(currentThread);
}
_context.CriticalSection.Enter();
if (currentThread.WaitingInArbitration)
{
_arbiterThreads.Remove(currentThread);
currentThread.WaitingInArbitration = false;
}
_context.CriticalSection.Leave();
return currentThread.ObjSyncResult;
}
_context.CriticalSection.Leave();
return KernelResult.InvalidState;
}
public Result Signal(ulong address, int count)
{
_context.CriticalSection.Enter();
WakeArbiterThreads(address, count);
_context.CriticalSection.Leave();
return Result.Success;
}
public Result SignalAndIncrementIfEqual(ulong address, int value, int count)
{
_context.CriticalSection.Enter();
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (!currentProcess.CpuMemory.IsMapped(address))
{
_context.CriticalSection.Leave();
return KernelResult.InvalidMemState;
}
ref int valueRef = ref currentProcess.CpuMemory.GetRef<int>(address);
int currentValue;
do
{
currentValue = valueRef;
if (currentValue != value)
{
_context.CriticalSection.Leave();
return KernelResult.InvalidState;
}
}
while (Interlocked.CompareExchange(ref valueRef, currentValue + 1, currentValue) != currentValue);
WakeArbiterThreads(address, count);
_context.CriticalSection.Leave();
return Result.Success;
}
public Result SignalAndModifyIfEqual(ulong address, int value, int count)
{
_context.CriticalSection.Enter();
int addend;
// The value is decremented if the number of threads waiting is less
// or equal to the Count of threads to be signaled, or Count is zero
// or negative. It is incremented if there are no threads waiting.
int waitingCount = 0;
foreach (KThread thread in _arbiterThreads.Where(x => x.MutexAddress == address))
{
if (++waitingCount >= count)
{
break;
}
}
if (waitingCount > 0)
{
if (count <= 0)
{
addend = -2;
}
else if (waitingCount < count)
{
addend = -1;
}
else
{
addend = 0;
}
}
else
{
addend = 1;
}
KProcess currentProcess = KernelStatic.GetCurrentProcess();
if (!currentProcess.CpuMemory.IsMapped(address))
{
_context.CriticalSection.Leave();
return KernelResult.InvalidMemState;
}
ref int valueRef = ref currentProcess.CpuMemory.GetRef<int>(address);
int currentValue;
do
{
currentValue = valueRef;
if (currentValue != value)
{
_context.CriticalSection.Leave();
return KernelResult.InvalidState;
}
}
while (Interlocked.CompareExchange(ref valueRef, currentValue + addend, currentValue) != currentValue);
WakeArbiterThreads(address, count);
_context.CriticalSection.Leave();
return Result.Success;
}
private void WakeArbiterThreads(ulong address, int count)
{
static void RemoveArbiterThread(KThread thread)
{
thread.SignaledObj = null;
thread.ObjSyncResult = Result.Success;
thread.ReleaseAndResume();
thread.WaitingInArbitration = false;
}
WakeThreads(_arbiterThreads, count, RemoveArbiterThread, x => x.MutexAddress == address);
}
private static void WakeThreads(
List<KThread> threads,
int count,
Action<KThread> removeCallback,
Func<KThread, bool> predicate)
{
var candidates = threads.Where(predicate).OrderBy(x => x.DynamicPriority);
var toSignal = (count > 0 ? candidates.Take(count) : candidates).ToArray();
foreach (KThread thread in toSignal)
{
removeCallback(thread);
threads.Remove(thread);
}
}
}
}

View file

@ -0,0 +1,70 @@
using System.Collections.Generic;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
static class KConditionVariable
{
public static void Wait(KernelContext context, LinkedList<KThread> threadList, object mutex, long timeout)
{
KThread currentThread = KernelStatic.GetCurrentThread();
context.CriticalSection.Enter();
Monitor.Exit(mutex);
currentThread.Withholder = threadList;
currentThread.Reschedule(ThreadSchedState.Paused);
currentThread.WithholderNode = threadList.AddLast(currentThread);
if (currentThread.TerminationRequested)
{
threadList.Remove(currentThread.WithholderNode);
currentThread.Reschedule(ThreadSchedState.Running);
currentThread.Withholder = null;
context.CriticalSection.Leave();
}
else
{
if (timeout > 0)
{
context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
}
context.CriticalSection.Leave();
if (timeout > 0)
{
context.TimeManager.UnscheduleFutureInvocation(currentThread);
}
}
Monitor.Enter(mutex);
}
public static void NotifyAll(KernelContext context, LinkedList<KThread> threadList)
{
context.CriticalSection.Enter();
LinkedListNode<KThread> node = threadList.First;
for (; node != null; node = threadList.First)
{
KThread thread = node.Value;
threadList.Remove(thread.WithholderNode);
thread.Withholder = null;
thread.Reschedule(ThreadSchedState.Running);
}
context.CriticalSection.Leave();
}
}
}

View file

@ -0,0 +1,64 @@
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KCriticalSection
{
private readonly KernelContext _context;
private readonly object _lock;
private int _recursionCount;
public object Lock => _lock;
public KCriticalSection(KernelContext context)
{
_context = context;
_lock = new object();
}
public void Enter()
{
Monitor.Enter(_lock);
_recursionCount++;
}
public void Leave()
{
if (_recursionCount == 0)
{
return;
}
if (--_recursionCount == 0)
{
ulong scheduledCoresMask = KScheduler.SelectThreads(_context);
Monitor.Exit(_lock);
KThread currentThread = KernelStatic.GetCurrentThread();
bool isCurrentThreadSchedulable = currentThread != null && currentThread.IsSchedulable;
if (isCurrentThreadSchedulable)
{
KScheduler.EnableScheduling(_context, scheduledCoresMask);
}
else
{
KScheduler.EnableSchedulingFromForeignThread(_context, scheduledCoresMask);
// If the thread exists but is not schedulable, we still want to suspend
// it if it's not runnable. That allows the kernel to still block HLE threads
// even if they are not scheduled on guest cores.
if (currentThread != null && !currentThread.IsSchedulable && currentThread.Context.Running)
{
currentThread.SchedulerWaitEvent.WaitOne();
}
}
}
else
{
Monitor.Exit(_lock);
}
}
}
}

View file

@ -0,0 +1,14 @@
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KEvent
{
public KReadableEvent ReadableEvent { get; private set; }
public KWritableEvent WritableEvent { get; private set; }
public KEvent(KernelContext context)
{
ReadableEvent = new KReadableEvent(context, this);
WritableEvent = new KWritableEvent(context, this);
}
}
}

View file

@ -0,0 +1,286 @@
using System.Collections.Generic;
using System.Numerics;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KPriorityQueue
{
private readonly LinkedList<KThread>[][] _scheduledThreadsPerPrioPerCore;
private readonly LinkedList<KThread>[][] _suggestedThreadsPerPrioPerCore;
private readonly long[] _scheduledPrioritiesPerCore;
private readonly long[] _suggestedPrioritiesPerCore;
public KPriorityQueue()
{
_suggestedThreadsPerPrioPerCore = new LinkedList<KThread>[KScheduler.PrioritiesCount][];
_scheduledThreadsPerPrioPerCore = new LinkedList<KThread>[KScheduler.PrioritiesCount][];
for (int prio = 0; prio < KScheduler.PrioritiesCount; prio++)
{
_suggestedThreadsPerPrioPerCore[prio] = new LinkedList<KThread>[KScheduler.CpuCoresCount];
_scheduledThreadsPerPrioPerCore[prio] = new LinkedList<KThread>[KScheduler.CpuCoresCount];
for (int core = 0; core < KScheduler.CpuCoresCount; core++)
{
_suggestedThreadsPerPrioPerCore[prio][core] = new LinkedList<KThread>();
_scheduledThreadsPerPrioPerCore[prio][core] = new LinkedList<KThread>();
}
}
_scheduledPrioritiesPerCore = new long[KScheduler.CpuCoresCount];
_suggestedPrioritiesPerCore = new long[KScheduler.CpuCoresCount];
}
public readonly ref struct KThreadEnumerable
{
readonly LinkedList<KThread>[][] _listPerPrioPerCore;
readonly long[] _prios;
readonly int _core;
public KThreadEnumerable(LinkedList<KThread>[][] listPerPrioPerCore, long[] prios, int core)
{
_listPerPrioPerCore = listPerPrioPerCore;
_prios = prios;
_core = core;
}
public Enumerator GetEnumerator()
{
return new Enumerator(_listPerPrioPerCore, _prios, _core);
}
public ref struct Enumerator
{
private readonly LinkedList<KThread>[][] _listPerPrioPerCore;
private readonly int _core;
private long _prioMask;
private int _prio;
private LinkedList<KThread> _list;
private LinkedListNode<KThread> _node;
public Enumerator(LinkedList<KThread>[][] listPerPrioPerCore, long[] prios, int core)
{
_listPerPrioPerCore = listPerPrioPerCore;
_core = core;
_prioMask = prios[core];
_prio = BitOperations.TrailingZeroCount(_prioMask);
_prioMask &= ~(1L << _prio);
}
public KThread Current => _node?.Value;
public bool MoveNext()
{
_node = _node?.Next;
if (_node == null)
{
if (!MoveNextListAndFirstNode())
{
return false;
}
}
return _node != null;
}
private bool MoveNextListAndFirstNode()
{
if (_prio < KScheduler.PrioritiesCount)
{
_list = _listPerPrioPerCore[_prio][_core];
_node = _list.First;
_prio = BitOperations.TrailingZeroCount(_prioMask);
_prioMask &= ~(1L << _prio);
return true;
}
else
{
_list = null;
_node = null;
return false;
}
}
}
}
public KThreadEnumerable ScheduledThreads(int core)
{
return new KThreadEnumerable(_scheduledThreadsPerPrioPerCore, _scheduledPrioritiesPerCore, core);
}
public KThreadEnumerable SuggestedThreads(int core)
{
return new KThreadEnumerable(_suggestedThreadsPerPrioPerCore, _suggestedPrioritiesPerCore, core);
}
public KThread ScheduledThreadsFirstOrDefault(int core)
{
return ScheduledThreadsElementAtOrDefault(core, 0);
}
public KThread ScheduledThreadsElementAtOrDefault(int core, int index)
{
int currentIndex = 0;
foreach (var scheduledThread in ScheduledThreads(core))
{
if (currentIndex == index)
{
return scheduledThread;
}
else
{
currentIndex++;
}
}
return null;
}
public KThread ScheduledThreadsWithDynamicPriorityFirstOrDefault(int core, int dynamicPriority)
{
foreach (var scheduledThread in ScheduledThreads(core))
{
if (scheduledThread.DynamicPriority == dynamicPriority)
{
return scheduledThread;
}
}
return null;
}
public bool HasScheduledThreads(int core)
{
return ScheduledThreadsFirstOrDefault(core) != null;
}
public void TransferToCore(int prio, int dstCore, KThread thread)
{
int srcCore = thread.ActiveCore;
if (srcCore == dstCore)
{
return;
}
thread.ActiveCore = dstCore;
if (srcCore >= 0)
{
Unschedule(prio, srcCore, thread);
}
if (dstCore >= 0)
{
Unsuggest(prio, dstCore, thread);
Schedule(prio, dstCore, thread);
}
if (srcCore >= 0)
{
Suggest(prio, srcCore, thread);
}
}
public void Suggest(int prio, int core, KThread thread)
{
if (prio >= KScheduler.PrioritiesCount)
{
return;
}
thread.SiblingsPerCore[core] = SuggestedQueue(prio, core).AddFirst(thread);
_suggestedPrioritiesPerCore[core] |= 1L << prio;
}
public void Unsuggest(int prio, int core, KThread thread)
{
if (prio >= KScheduler.PrioritiesCount)
{
return;
}
LinkedList<KThread> queue = SuggestedQueue(prio, core);
queue.Remove(thread.SiblingsPerCore[core]);
if (queue.First == null)
{
_suggestedPrioritiesPerCore[core] &= ~(1L << prio);
}
}
public void Schedule(int prio, int core, KThread thread)
{
if (prio >= KScheduler.PrioritiesCount)
{
return;
}
thread.SiblingsPerCore[core] = ScheduledQueue(prio, core).AddLast(thread);
_scheduledPrioritiesPerCore[core] |= 1L << prio;
}
public void SchedulePrepend(int prio, int core, KThread thread)
{
if (prio >= KScheduler.PrioritiesCount)
{
return;
}
thread.SiblingsPerCore[core] = ScheduledQueue(prio, core).AddFirst(thread);
_scheduledPrioritiesPerCore[core] |= 1L << prio;
}
public KThread Reschedule(int prio, int core, KThread thread)
{
if (prio >= KScheduler.PrioritiesCount)
{
return null;
}
LinkedList<KThread> queue = ScheduledQueue(prio, core);
queue.Remove(thread.SiblingsPerCore[core]);
thread.SiblingsPerCore[core] = queue.AddLast(thread);
return queue.First.Value;
}
public void Unschedule(int prio, int core, KThread thread)
{
if (prio >= KScheduler.PrioritiesCount)
{
return;
}
LinkedList<KThread> queue = ScheduledQueue(prio, core);
queue.Remove(thread.SiblingsPerCore[core]);
if (queue.First == null)
{
_scheduledPrioritiesPerCore[core] &= ~(1L << prio);
}
}
private LinkedList<KThread> SuggestedQueue(int prio, int core)
{
return _suggestedThreadsPerPrioPerCore[prio][core];
}
private LinkedList<KThread> ScheduledQueue(int prio, int core)
{
return _scheduledThreadsPerPrioPerCore[prio][core];
}
}
}

View file

@ -0,0 +1,65 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Horizon.Common;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KReadableEvent : KSynchronizationObject
{
private readonly KEvent _parent;
private bool _signaled;
public KReadableEvent(KernelContext context, KEvent parent) : base(context)
{
_parent = parent;
}
public override void Signal()
{
KernelContext.CriticalSection.Enter();
if (!_signaled)
{
_signaled = true;
base.Signal();
}
KernelContext.CriticalSection.Leave();
}
public Result Clear()
{
_signaled = false;
return Result.Success;
}
public Result ClearIfSignaled()
{
Result result;
KernelContext.CriticalSection.Enter();
if (_signaled)
{
_signaled = false;
result = Result.Success;
}
else
{
result = KernelResult.InvalidState;
}
KernelContext.CriticalSection.Leave();
return result;
}
public override bool IsSignaled()
{
return _signaled;
}
}
}

View file

@ -0,0 +1,661 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using System;
using System.Numerics;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
partial class KScheduler : IDisposable
{
public const int PrioritiesCount = 64;
public const int CpuCoresCount = 4;
private const int RoundRobinTimeQuantumMs = 10;
private static readonly int[] PreemptionPriorities = new int[] { 59, 59, 59, 63 };
private static readonly int[] _srcCoresHighestPrioThreads = new int[CpuCoresCount];
private readonly KernelContext _context;
private readonly int _coreId;
private struct SchedulingState
{
public volatile bool NeedsScheduling;
public volatile KThread SelectedThread;
}
private SchedulingState _state;
private AutoResetEvent _idleInterruptEvent;
private readonly object _idleInterruptEventLock;
private KThread _previousThread;
private KThread _currentThread;
private readonly KThread _idleThread;
public KThread PreviousThread => _previousThread;
public KThread CurrentThread => _currentThread;
public long LastContextSwitchTime { get; private set; }
public long TotalIdleTimeTicks => _idleThread.TotalTimeRunning;
public KScheduler(KernelContext context, int coreId)
{
_context = context;
_coreId = coreId;
_idleInterruptEvent = new AutoResetEvent(false);
_idleInterruptEventLock = new object();
KThread idleThread = CreateIdleThread(context, coreId);
_currentThread = idleThread;
_idleThread = idleThread;
idleThread.StartHostThread();
idleThread.SchedulerWaitEvent.Set();
}
private KThread CreateIdleThread(KernelContext context, int cpuCore)
{
KThread idleThread = new KThread(context);
idleThread.Initialize(0UL, 0UL, 0UL, PrioritiesCount, cpuCore, null, ThreadType.Dummy, IdleThreadLoop);
return idleThread;
}
public static ulong SelectThreads(KernelContext context)
{
if (context.ThreadReselectionRequested)
{
return SelectThreadsImpl(context);
}
else
{
return 0UL;
}
}
private static ulong SelectThreadsImpl(KernelContext context)
{
context.ThreadReselectionRequested = false;
ulong scheduledCoresMask = 0UL;
for (int core = 0; core < CpuCoresCount; core++)
{
KThread thread = context.PriorityQueue.ScheduledThreadsFirstOrDefault(core);
if (thread != null &&
thread.Owner != null &&
thread.Owner.PinnedThreads[core] != null &&
thread.Owner.PinnedThreads[core] != thread)
{
KThread candidate = thread.Owner.PinnedThreads[core];
if (candidate.KernelWaitersCount == 0 && !thread.Owner.IsExceptionUserThread(candidate))
{
if (candidate.SchedFlags == ThreadSchedState.Running)
{
thread = candidate;
}
else
{
thread = null;
}
}
}
scheduledCoresMask |= context.Schedulers[core].SelectThread(thread);
}
for (int core = 0; core < CpuCoresCount; core++)
{
// If the core is not idle (there's already a thread running on it),
// then we don't need to attempt load balancing.
if (context.PriorityQueue.HasScheduledThreads(core))
{
continue;
}
Array.Fill(_srcCoresHighestPrioThreads, 0);
int srcCoresHighestPrioThreadsCount = 0;
KThread dst = null;
// Select candidate threads that could run on this core.
// Give preference to threads that are not yet selected.
foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
{
if (suggested.ActiveCore < 0 || suggested != context.Schedulers[suggested.ActiveCore]._state.SelectedThread)
{
dst = suggested;
break;
}
_srcCoresHighestPrioThreads[srcCoresHighestPrioThreadsCount++] = suggested.ActiveCore;
}
// Not yet selected candidate found.
if (dst != null)
{
// Priorities < 2 are used for the kernel message dispatching
// threads, we should skip load balancing entirely.
if (dst.DynamicPriority >= 2)
{
context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst);
scheduledCoresMask |= context.Schedulers[core].SelectThread(dst);
}
continue;
}
// All candidates are already selected, choose the best one
// (the first one that doesn't make the source core idle if moved).
for (int index = 0; index < srcCoresHighestPrioThreadsCount; index++)
{
int srcCore = _srcCoresHighestPrioThreads[index];
KThread src = context.PriorityQueue.ScheduledThreadsElementAtOrDefault(srcCore, 1);
if (src != null)
{
// Run the second thread on the queue on the source core,
// move the first one to the current core.
KThread origSelectedCoreSrc = context.Schedulers[srcCore]._state.SelectedThread;
scheduledCoresMask |= context.Schedulers[srcCore].SelectThread(src);
context.PriorityQueue.TransferToCore(origSelectedCoreSrc.DynamicPriority, core, origSelectedCoreSrc);
scheduledCoresMask |= context.Schedulers[core].SelectThread(origSelectedCoreSrc);
}
}
}
return scheduledCoresMask;
}
private ulong SelectThread(KThread nextThread)
{
KThread previousThread = _state.SelectedThread;
if (previousThread != nextThread)
{
if (previousThread != null)
{
previousThread.LastScheduledTime = PerformanceCounter.ElapsedTicks;
}
_state.SelectedThread = nextThread;
_state.NeedsScheduling = true;
return 1UL << _coreId;
}
else
{
return 0UL;
}
}
public static void EnableScheduling(KernelContext context, ulong scheduledCoresMask)
{
KScheduler currentScheduler = context.Schedulers[KernelStatic.GetCurrentThread().CurrentCore];
// Note that "RescheduleCurrentCore" will block, so "RescheduleOtherCores" must be done first.
currentScheduler.RescheduleOtherCores(scheduledCoresMask);
currentScheduler.RescheduleCurrentCore();
}
public static void EnableSchedulingFromForeignThread(KernelContext context, ulong scheduledCoresMask)
{
RescheduleOtherCores(context, scheduledCoresMask);
}
private void RescheduleCurrentCore()
{
if (_state.NeedsScheduling)
{
Schedule();
}
}
private void RescheduleOtherCores(ulong scheduledCoresMask)
{
RescheduleOtherCores(_context, scheduledCoresMask & ~(1UL << _coreId));
}
private static void RescheduleOtherCores(KernelContext context, ulong scheduledCoresMask)
{
while (scheduledCoresMask != 0)
{
int coreToSignal = BitOperations.TrailingZeroCount(scheduledCoresMask);
KThread threadToSignal = context.Schedulers[coreToSignal]._currentThread;
// Request the thread running on that core to stop and reschedule, if we have one.
if (threadToSignal != context.Schedulers[coreToSignal]._idleThread)
{
threadToSignal.Context.RequestInterrupt();
}
// If the core is idle, ensure that the idle thread is awaken.
context.Schedulers[coreToSignal]._idleInterruptEvent.Set();
scheduledCoresMask &= ~(1UL << coreToSignal);
}
}
private void IdleThreadLoop()
{
while (_context.Running)
{
_state.NeedsScheduling = false;
Thread.MemoryBarrier();
KThread nextThread = PickNextThread(_state.SelectedThread);
if (_idleThread != nextThread)
{
_idleThread.SchedulerWaitEvent.Reset();
WaitHandle.SignalAndWait(nextThread.SchedulerWaitEvent, _idleThread.SchedulerWaitEvent);
}
_idleInterruptEvent.WaitOne();
}
lock (_idleInterruptEventLock)
{
_idleInterruptEvent.Dispose();
_idleInterruptEvent = null;
}
}
public void Schedule()
{
_state.NeedsScheduling = false;
Thread.MemoryBarrier();
KThread currentThread = KernelStatic.GetCurrentThread();
KThread selectedThread = _state.SelectedThread;
// If the thread is already scheduled and running on the core, we have nothing to do.
if (currentThread == selectedThread)
{
return;
}
currentThread.SchedulerWaitEvent.Reset();
currentThread.ThreadContext.Unlock();
// Wake all the threads that might be waiting until this thread context is unlocked.
for (int core = 0; core < CpuCoresCount; core++)
{
_context.Schedulers[core]._idleInterruptEvent.Set();
}
KThread nextThread = PickNextThread(selectedThread);
if (currentThread.Context.Running)
{
// Wait until this thread is scheduled again, and allow the next thread to run.
WaitHandle.SignalAndWait(nextThread.SchedulerWaitEvent, currentThread.SchedulerWaitEvent);
}
else
{
// Allow the next thread to run.
nextThread.SchedulerWaitEvent.Set();
// We don't need to wait since the thread is exiting, however we need to
// make sure this thread will never call the scheduler again, since it is
// no longer assigned to a core.
currentThread.MakeUnschedulable();
// Just to be sure, set the core to a invalid value.
// This will trigger a exception if it attempts to call schedule again,
// rather than leaving the scheduler in a invalid state.
currentThread.CurrentCore = -1;
}
}
private KThread PickNextThread(KThread selectedThread)
{
while (true)
{
if (selectedThread != null)
{
// Try to run the selected thread.
// We need to acquire the context lock to be sure the thread is not
// already running on another core. If it is, then we return here
// and the caller should try again once there is something available for scheduling.
// The thread currently running on the core should have been requested to
// interrupt so this is not expected to take long.
// The idle thread must also be paused if we are scheduling a thread
// on the core, as the scheduled thread will handle the next switch.
if (selectedThread.ThreadContext.Lock())
{
SwitchTo(selectedThread);
if (!_state.NeedsScheduling)
{
return selectedThread;
}
selectedThread.ThreadContext.Unlock();
}
else
{
return _idleThread;
}
}
else
{
// The core is idle now, make sure that the idle thread can run
// and switch the core when a thread is available.
SwitchTo(null);
return _idleThread;
}
_state.NeedsScheduling = false;
Thread.MemoryBarrier();
selectedThread = _state.SelectedThread;
}
}
private void SwitchTo(KThread nextThread)
{
KProcess currentProcess = KernelStatic.GetCurrentProcess();
KThread currentThread = KernelStatic.GetCurrentThread();
nextThread ??= _idleThread;
if (currentThread != nextThread)
{
long previousTicks = LastContextSwitchTime;
long currentTicks = PerformanceCounter.ElapsedTicks;
long ticksDelta = currentTicks - previousTicks;
currentThread.AddCpuTime(ticksDelta);
if (currentProcess != null)
{
currentProcess.AddCpuTime(ticksDelta);
}
LastContextSwitchTime = currentTicks;
if (currentProcess != null)
{
_previousThread = !currentThread.TerminationRequested && currentThread.ActiveCore == _coreId ? currentThread : null;
}
else if (currentThread == _idleThread)
{
_previousThread = null;
}
}
if (nextThread.CurrentCore != _coreId)
{
nextThread.CurrentCore = _coreId;
}
_currentThread = nextThread;
}
public static void PreemptionThreadLoop(KernelContext context)
{
while (context.Running)
{
context.CriticalSection.Enter();
for (int core = 0; core < CpuCoresCount; core++)
{
RotateScheduledQueue(context, core, PreemptionPriorities[core]);
}
context.CriticalSection.Leave();
Thread.Sleep(RoundRobinTimeQuantumMs);
}
}
private static void RotateScheduledQueue(KernelContext context, int core, int prio)
{
KThread selectedThread = context.PriorityQueue.ScheduledThreadsWithDynamicPriorityFirstOrDefault(core, prio);
KThread nextThread = null;
// Yield priority queue.
if (selectedThread != null)
{
nextThread = context.PriorityQueue.Reschedule(prio, core, selectedThread);
}
static KThread FirstSuitableCandidateOrDefault(KernelContext context, int core, KThread selectedThread, KThread nextThread, Predicate< KThread> predicate)
{
foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
{
int suggestedCore = suggested.ActiveCore;
if (suggestedCore >= 0)
{
KThread selectedSuggestedCore = context.PriorityQueue.ScheduledThreadsFirstOrDefault(suggestedCore);
if (selectedSuggestedCore == suggested || (selectedSuggestedCore != null && selectedSuggestedCore.DynamicPriority < 2))
{
continue;
}
}
// If the candidate was scheduled after the current thread, then it's not worth it.
if (nextThread == selectedThread ||
nextThread == null ||
nextThread.LastScheduledTime >= suggested.LastScheduledTime)
{
if (predicate(suggested))
{
return suggested;
}
}
}
return null;
}
// Select candidate threads that could run on this core.
// Only take into account threads that are not yet selected.
KThread dst = FirstSuitableCandidateOrDefault(context, core, selectedThread, nextThread, x => x.DynamicPriority == prio);
if (dst != null)
{
context.PriorityQueue.TransferToCore(prio, core, dst);
}
// If the priority of the currently selected thread is lower or same as the preemption priority,
// then try to migrate a thread with lower priority.
KThread bestCandidate = context.PriorityQueue.ScheduledThreadsFirstOrDefault(core);
if (bestCandidate != null && bestCandidate.DynamicPriority >= prio)
{
dst = FirstSuitableCandidateOrDefault(context, core, selectedThread, nextThread, x => x.DynamicPriority < bestCandidate.DynamicPriority);
if (dst != null)
{
context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst);
}
}
context.ThreadReselectionRequested = true;
}
public static void Yield(KernelContext context)
{
KThread currentThread = KernelStatic.GetCurrentThread();
if (!currentThread.IsSchedulable)
{
return;
}
context.CriticalSection.Enter();
if (currentThread.SchedFlags != ThreadSchedState.Running)
{
context.CriticalSection.Leave();
return;
}
KThread nextThread = context.PriorityQueue.Reschedule(currentThread.DynamicPriority, currentThread.ActiveCore, currentThread);
if (nextThread != currentThread)
{
context.ThreadReselectionRequested = true;
}
context.CriticalSection.Leave();
}
public static void YieldWithLoadBalancing(KernelContext context)
{
KThread currentThread = KernelStatic.GetCurrentThread();
if (!currentThread.IsSchedulable)
{
return;
}
context.CriticalSection.Enter();
if (currentThread.SchedFlags != ThreadSchedState.Running)
{
context.CriticalSection.Leave();
return;
}
int prio = currentThread.DynamicPriority;
int core = currentThread.ActiveCore;
// Move current thread to the end of the queue.
KThread nextThread = context.PriorityQueue.Reschedule(prio, core, currentThread);
static KThread FirstSuitableCandidateOrDefault(KernelContext context, int core, KThread nextThread, int lessThanOrEqualPriority)
{
foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
{
int suggestedCore = suggested.ActiveCore;
if (suggestedCore >= 0)
{
KThread selectedSuggestedCore = context.Schedulers[suggestedCore]._state.SelectedThread;
if (selectedSuggestedCore == suggested || (selectedSuggestedCore != null && selectedSuggestedCore.DynamicPriority < 2))
{
continue;
}
}
// If the candidate was scheduled after the current thread, then it's not worth it,
// unless the priority is higher than the current one.
if (suggested.LastScheduledTime <= nextThread.LastScheduledTime ||
suggested.DynamicPriority < nextThread.DynamicPriority)
{
if (suggested.DynamicPriority <= lessThanOrEqualPriority)
{
return suggested;
}
}
}
return null;
}
KThread dst = FirstSuitableCandidateOrDefault(context, core, nextThread, prio);
if (dst != null)
{
context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst);
context.ThreadReselectionRequested = true;
}
else if (currentThread != nextThread)
{
context.ThreadReselectionRequested = true;
}
context.CriticalSection.Leave();
}
public static void YieldToAnyThread(KernelContext context)
{
KThread currentThread = KernelStatic.GetCurrentThread();
if (!currentThread.IsSchedulable)
{
return;
}
context.CriticalSection.Enter();
if (currentThread.SchedFlags != ThreadSchedState.Running)
{
context.CriticalSection.Leave();
return;
}
int core = currentThread.ActiveCore;
context.PriorityQueue.TransferToCore(currentThread.DynamicPriority, -1, currentThread);
if (!context.PriorityQueue.HasScheduledThreads(core))
{
KThread selectedThread = null;
foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
{
int suggestedCore = suggested.ActiveCore;
if (suggestedCore < 0)
{
continue;
}
KThread firstCandidate = context.PriorityQueue.ScheduledThreadsFirstOrDefault(suggestedCore);
if (firstCandidate == suggested)
{
continue;
}
if (firstCandidate == null || firstCandidate.DynamicPriority >= 2)
{
context.PriorityQueue.TransferToCore(suggested.DynamicPriority, core, suggested);
}
selectedThread = suggested;
break;
}
if (currentThread != selectedThread)
{
context.ThreadReselectionRequested = true;
}
}
else
{
context.ThreadReselectionRequested = true;
}
context.CriticalSection.Leave();
}
public void Dispose()
{
// Ensure that the idle thread is not blocked and can exit.
lock (_idleInterruptEventLock)
{
if (_idleInterruptEvent != null)
{
_idleInterruptEvent.Set();
}
}
}
}
}

View file

@ -0,0 +1,142 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Horizon.Common;
using System;
using System.Buffers;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KSynchronization
{
private KernelContext _context;
public KSynchronization(KernelContext context)
{
_context = context;
}
public Result WaitFor(Span<KSynchronizationObject> syncObjs, long timeout, out int handleIndex)
{
handleIndex = 0;
Result result = KernelResult.TimedOut;
_context.CriticalSection.Enter();
// Check if objects are already signaled before waiting.
for (int index = 0; index < syncObjs.Length; index++)
{
if (!syncObjs[index].IsSignaled())
{
continue;
}
handleIndex = index;
_context.CriticalSection.Leave();
return Result.Success;
}
if (timeout == 0)
{
_context.CriticalSection.Leave();
return result;
}
KThread currentThread = KernelStatic.GetCurrentThread();
if (currentThread.TerminationRequested)
{
result = KernelResult.ThreadTerminating;
}
else if (currentThread.SyncCancelled)
{
currentThread.SyncCancelled = false;
result = KernelResult.Cancelled;
}
else
{
LinkedListNode<KThread>[] syncNodesArray = ArrayPool<LinkedListNode<KThread>>.Shared.Rent(syncObjs.Length);
Span<LinkedListNode<KThread>> syncNodes = syncNodesArray.AsSpan(0, syncObjs.Length);
for (int index = 0; index < syncObjs.Length; index++)
{
syncNodes[index] = syncObjs[index].AddWaitingThread(currentThread);
}
currentThread.WaitingSync = true;
currentThread.SignaledObj = null;
currentThread.ObjSyncResult = result;
currentThread.Reschedule(ThreadSchedState.Paused);
if (timeout > 0)
{
_context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
}
_context.CriticalSection.Leave();
currentThread.WaitingSync = false;
if (timeout > 0)
{
_context.TimeManager.UnscheduleFutureInvocation(currentThread);
}
_context.CriticalSection.Enter();
result = currentThread.ObjSyncResult;
handleIndex = -1;
for (int index = 0; index < syncObjs.Length; index++)
{
syncObjs[index].RemoveWaitingThread(syncNodes[index]);
if (syncObjs[index] == currentThread.SignaledObj)
{
handleIndex = index;
}
}
ArrayPool<LinkedListNode<KThread>>.Shared.Return(syncNodesArray);
}
_context.CriticalSection.Leave();
return result;
}
public void SignalObject(KSynchronizationObject syncObj)
{
_context.CriticalSection.Enter();
if (syncObj.IsSignaled())
{
LinkedListNode<KThread> node = syncObj.WaitingThreads.First;
while (node != null)
{
KThread thread = node.Value;
if ((thread.SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
{
thread.SignaledObj = syncObj;
thread.ObjSyncResult = Result.Success;
thread.Reschedule(ThreadSchedState.Running);
}
node = node.Next;
}
}
_context.CriticalSection.Leave();
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,33 @@
using Ryujinx.Cpu;
using Ryujinx.Horizon.Common;
using System.Threading;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KThreadContext : IThreadContext
{
private readonly IExecutionContext _context;
public bool Running => _context.Running;
public ulong TlsAddress => (ulong)_context.TpidrroEl0;
public ulong GetX(int index) => _context.GetX(index);
private int _locked;
public KThreadContext(IExecutionContext context)
{
_context = context;
}
public bool Lock()
{
return Interlocked.Exchange(ref _locked, 1) == 0;
}
public void Unlock()
{
Interlocked.Exchange(ref _locked, 0);
}
}
}

View file

@ -0,0 +1,25 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Horizon.Common;
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
class KWritableEvent : KAutoObject
{
private readonly KEvent _parent;
public KWritableEvent(KernelContext context, KEvent parent) : base(context)
{
_parent = parent;
}
public void Signal()
{
_parent.ReadableEvent.Signal();
}
public Result Clear()
{
return _parent.ReadableEvent.Clear();
}
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
enum SignalType
{
Signal = 0,
SignalAndIncrementIfEqual = 1,
SignalAndModifyIfEqual = 2
}
}

View file

@ -0,0 +1,20 @@
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
enum ThreadSchedState : ushort
{
LowMask = 0xf,
HighMask = 0xfff0,
ForcePauseMask = 0x1f0,
ProcessPauseFlag = 1 << 4,
ThreadPauseFlag = 1 << 5,
ProcessDebugPauseFlag = 1 << 6,
BacktracePauseFlag = 1 << 7,
KernelInitPauseFlag = 1 << 8,
None = 0,
Paused = 1,
Running = 2,
TerminationPending = 3
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.HLE.HOS.Kernel.Threading
{
enum ThreadType
{
Dummy,
Kernel,
Kernel2,
User
}
}