rebased another time

This commit is contained in:
Fire Cube 2025-07-07 18:10:32 +02:00
commit a718e3493a
20 changed files with 141 additions and 34 deletions

View file

@ -67,6 +67,7 @@ static u32 internalScreenHeight = 720;
static bool isNullGpu = false;
static bool shouldCopyGPUBuffers = false;
static bool readbacksEnabled = false;
static bool readbackLinearImagesEnabled = false;
static bool directMemoryAccessEnabled = false;
static bool shouldDumpShaders = false;
static bool shouldPatchShaders = false;
@ -105,7 +106,7 @@ u32 m_language = 1; // english
static std::string trophyKey = "";
// Expected number of items in the config file
static constexpr u64 total_entries = 53;
static constexpr u64 total_entries = 54;
bool allowHDR() {
return isHDRAllowed;
@ -272,6 +273,10 @@ bool readbacks() {
return readbacksEnabled;
}
bool readbackLinearImages() {
return readbackLinearImagesEnabled;
}
bool directMemoryAccess() {
return directMemoryAccessEnabled;
}
@ -653,6 +658,8 @@ void load(const std::filesystem::path& path) {
isNullGpu = toml::find_or<bool>(gpu, "nullGpu", isNullGpu);
shouldCopyGPUBuffers = toml::find_or<bool>(gpu, "copyGPUBuffers", shouldCopyGPUBuffers);
readbacksEnabled = toml::find_or<bool>(gpu, "readbacks", readbacksEnabled);
readbackLinearImagesEnabled =
toml::find_or<bool>(gpu, "readbackLinearImages", readbackLinearImagesEnabled);
directMemoryAccessEnabled =
toml::find_or<bool>(gpu, "directMemoryAccess", directMemoryAccessEnabled);
shouldDumpShaders = toml::find_or<bool>(gpu, "dumpShaders", shouldDumpShaders);
@ -826,6 +833,7 @@ void save(const std::filesystem::path& path) {
data["GPU"]["nullGpu"] = isNullGpu;
data["GPU"]["copyGPUBuffers"] = shouldCopyGPUBuffers;
data["GPU"]["readbacks"] = readbacksEnabled;
data["GPU"]["readbackLinearImages"] = readbackLinearImagesEnabled;
data["GPU"]["directMemoryAccess"] = directMemoryAccessEnabled;
data["GPU"]["dumpShaders"] = shouldDumpShaders;
data["GPU"]["patchShaders"] = shouldPatchShaders;
@ -928,6 +936,7 @@ void setDefaultValues() {
isNullGpu = false;
shouldCopyGPUBuffers = false;
readbacksEnabled = false;
readbackLinearImagesEnabled = false;
directMemoryAccessEnabled = false;
shouldDumpShaders = false;
shouldPatchShaders = false;

View file

@ -51,6 +51,7 @@ bool copyGPUCmdBuffers();
void setCopyGPUCmdBuffers(bool enable);
bool readbacks();
void setReadbacks(bool enable);
bool readbackLinearImages();
bool directMemoryAccess();
void setDirectMemoryAccess(bool enable);
bool dumpShaders();

View file

@ -358,9 +358,17 @@ enum PosixPageProtection {
[[nodiscard]] constexpr PosixPageProtection ToPosixProt(Core::MemoryProt prot) {
if (True(prot & Core::MemoryProt::CpuReadWrite) ||
True(prot & Core::MemoryProt::GpuReadWrite)) {
if (True(prot & Core::MemoryProt::CpuExec)) {
return PAGE_EXECUTE_READWRITE;
} else {
return PAGE_READWRITE;
}
} else if (True(prot & Core::MemoryProt::CpuRead) || True(prot & Core::MemoryProt::GpuRead)) {
if (True(prot & Core::MemoryProt::CpuExec)) {
return PAGE_EXECUTE_READ;
} else {
return PAGE_READONLY;
}
} else {
return PAGE_NOACCESS;
}

View file

@ -573,11 +573,12 @@ void* PS4_SYSV_ABI posix_mmap(void* addr, u64 len, s32 prot, s32 flags, s32 fd,
auto* memory = Core::Memory::Instance();
const auto mem_prot = static_cast<Core::MemoryProt>(prot);
const auto mem_flags = static_cast<Core::MemoryMapFlags>(flags);
const auto is_exec = True(mem_prot & Core::MemoryProt::CpuExec);
s32 result = ORBIS_OK;
if (fd == -1) {
result = memory->MapMemory(&addr_out, std::bit_cast<VAddr>(addr), len, mem_prot, mem_flags,
Core::VMAType::Flexible);
Core::VMAType::Flexible, "anon", is_exec);
} else {
result = memory->MapFile(&addr_out, std::bit_cast<VAddr>(addr), len, mem_prot, mem_flags,
fd, phys_addr);
@ -711,6 +712,7 @@ void RegisterMemory(Core::Loader::SymbolsResolver* sym) {
sceKernelConfiguredFlexibleMemorySize);
LIB_FUNCTION("vSMAm3cxYTY", "libkernel", 1, "libkernel", 1, 1, sceKernelMprotect);
LIB_FUNCTION("YQOfxL4QfeU", "libkernel", 1, "libkernel", 1, 1, posix_mprotect);
LIB_FUNCTION("YQOfxL4QfeU", "libScePosix", 1, "libkernel", 1, 1, posix_mprotect);
LIB_FUNCTION("9bfdLIyuwCY", "libkernel", 1, "libkernel", 1, 1, sceKernelMtypeprotect);

View file

@ -631,6 +631,9 @@ s64 MemoryManager::ProtectBytes(VAddr addr, VirtualMemoryArea vma_base, u64 size
if (True(prot & MemoryProt::CpuReadWrite)) {
perms |= Core::MemoryPermission::ReadWrite;
}
if (True(prot & MemoryProt::CpuExec)) {
perms |= Core::MemoryPermission::Execute;
}
if (True(prot & MemoryProt::GpuRead)) {
perms |= Core::MemoryPermission::Read;
}
@ -650,9 +653,9 @@ s32 MemoryManager::Protect(VAddr addr, u64 size, MemoryProt prot) {
std::scoped_lock lk{mutex};
// Validate protection flags
constexpr static MemoryProt valid_flags = MemoryProt::NoAccess | MemoryProt::CpuRead |
MemoryProt::CpuReadWrite | MemoryProt::GpuRead |
MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
constexpr static MemoryProt valid_flags =
MemoryProt::NoAccess | MemoryProt::CpuRead | MemoryProt::CpuReadWrite |
MemoryProt::CpuExec | MemoryProt::GpuRead | MemoryProt::GpuWrite | MemoryProt::GpuReadWrite;
MemoryProt invalid_flags = prot & ~valid_flags;
if (invalid_flags != MemoryProt::NoAccess) {

View file

@ -31,6 +31,7 @@ enum class MemoryProt : u32 {
NoAccess = 0,
CpuRead = 1,
CpuReadWrite = 2,
CpuExec = 4,
GpuRead = 16,
GpuWrite = 32,
GpuReadWrite = 48,

View file

@ -353,7 +353,7 @@ Id EmitFPIsInf32(EmitContext& ctx, Id value);
Id EmitFPIsInf64(EmitContext& ctx, Id value);
Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
Id EmitIAdd64(EmitContext& ctx, Id a, Id b);
Id EmitIAddCary32(EmitContext& ctx, Id a, Id b);
Id EmitIAddCarry32(EmitContext& ctx, Id a, Id b);
Id EmitISub32(EmitContext& ctx, Id a, Id b);
Id EmitISub64(EmitContext& ctx, Id a, Id b);
Id EmitSMulHi(EmitContext& ctx, Id a, Id b);

View file

@ -60,7 +60,7 @@ Id EmitIAdd64(EmitContext& ctx, Id a, Id b) {
return ctx.OpIAdd(ctx.U64, a, b);
}
Id EmitIAddCary32(EmitContext& ctx, Id a, Id b) {
Id EmitIAddCarry32(EmitContext& ctx, Id a, Id b) {
return ctx.OpIAddCarry(ctx.full_result_u32x2, a, b);
}

View file

@ -623,12 +623,15 @@ void Translator::V_ADDC_U32(const GcnInst& inst) {
const IR::U32 src0{GetSrc(inst.src[0])};
const IR::U32 src1{GetSrc(inst.src[1])};
const IR::U32 carry{GetCarryIn(inst)};
const IR::U32 result{ir.IAdd(ir.IAdd(src0, src1), carry)};
SetDst(inst.dst[0], result);
const IR::Value tmp1{ir.IAddCarry(src0, src1)};
const IR::U32 result1{ir.CompositeExtract(tmp1, 0)};
const IR::U32 carry_out1{ir.CompositeExtract(tmp1, 1)};
const IR::Value tmp2{ir.IAddCarry(result1, carry)};
const IR::U32 result2{ir.CompositeExtract(tmp2, 0)};
const IR::U32 carry_out2{ir.CompositeExtract(tmp2, 1)};
SetDst(inst.dst[0], result2);
const IR::U1 less_src0{ir.ILessThan(result, src0, false)};
const IR::U1 less_src1{ir.ILessThan(result, src1, false)};
const IR::U1 did_overflow{ir.LogicalOr(less_src0, less_src1)};
const IR::U1 did_overflow{ir.INotEqual(ir.BitwiseOr(carry_out1, carry_out2), ir.Imm32(0))};
SetCarryOut(inst, did_overflow);
}

View file

@ -1424,13 +1424,13 @@ U32U64 IREmitter::IAdd(const U32U64& a, const U32U64& b) {
}
}
Value IREmitter::IAddCary(const U32& a, const U32& b) {
Value IREmitter::IAddCarry(const U32& a, const U32& b) {
if (a.Type() != b.Type()) {
UNREACHABLE_MSG("Mismatching types {} and {}", a.Type(), b.Type());
}
switch (a.Type()) {
case Type::U32:
return Inst<U32>(Opcode::IAddCary32, a, b);
return Inst(Opcode::IAddCarry32, a, b);
default:
ThrowInvalidType(a.Type());
}

View file

@ -254,7 +254,7 @@ public:
[[nodiscard]] F32F64 FPMedTri(const F32F64& a, const F32F64& b, const F32F64& c);
[[nodiscard]] U32U64 IAdd(const U32U64& a, const U32U64& b);
[[nodiscard]] Value IAddCary(const U32& a, const U32& b);
[[nodiscard]] Value IAddCarry(const U32& a, const U32& b);
[[nodiscard]] U32U64 ISub(const U32U64& a, const U32U64& b);
[[nodiscard]] U32 IMulHi(const U32& a, const U32& b, bool is_signed = false);
[[nodiscard]] U32U64 IMul(const U32U64& a, const U32U64& b);

View file

@ -328,7 +328,7 @@ OPCODE(FPCmpClass32, U1, F32,
// Integer operations
OPCODE(IAdd32, U32, U32, U32, )
OPCODE(IAdd64, U64, U64, U64, )
OPCODE(IAddCary32, U32x2, U32, U32, )
OPCODE(IAddCarry32, U32x2, U32, U32, )
OPCODE(ISub32, U32, U32, U32, )
OPCODE(ISub64, U64, U64, U64, )
OPCODE(IMul32, U32, U32, U32, )

View file

@ -135,9 +135,8 @@ void Liverpool::Process(std::stop_token stoken) {
if (submit_done) {
VideoCore::EndCapture();
if (rasterizer) {
rasterizer->ProcessFaults();
rasterizer->EndCommandList();
rasterizer->Flush();
}
submit_done = false;

View file

@ -112,7 +112,7 @@ public:
/// Invalidates any buffer in the logical page range.
void InvalidateMemory(VAddr device_addr, u64 size);
/// Waits on pending downloads in the logical page range.
/// Flushes any GPU modified buffer in the logical page range back to CPU memory.
void ReadMemory(VAddr device_addr, u64 size, bool is_write = false);
/// Binds host vertex buffers for the current draw.

View file

@ -272,6 +272,8 @@ void Rasterizer::EliminateFastClear() {
void Rasterizer::Draw(bool is_indexed, u32 index_offset) {
RENDERER_TRACE;
scheduler.PopPendingOperations();
if (!FilterDraw()) {
return;
}
@ -317,6 +319,8 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3
u32 max_count, VAddr count_address) {
RENDERER_TRACE;
scheduler.PopPendingOperations();
if (!FilterDraw()) {
return;
}
@ -380,6 +384,8 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3
void Rasterizer::DispatchDirect() {
RENDERER_TRACE;
scheduler.PopPendingOperations();
const auto& cs_program = liverpool->GetCsRegs();
const ComputePipeline* pipeline = pipeline_cache.GetComputePipeline();
if (!pipeline) {
@ -407,6 +413,8 @@ void Rasterizer::DispatchDirect() {
void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) {
RENDERER_TRACE;
scheduler.PopPendingOperations();
const auto& cs_program = liverpool->GetCsRegs();
const ComputePipeline* pipeline = pipeline_cache.GetComputePipeline();
if (!pipeline) {
@ -439,11 +447,12 @@ void Rasterizer::Finish() {
scheduler.Finish();
}
void Rasterizer::ProcessFaults() {
void Rasterizer::EndCommandList() {
if (fault_process_pending) {
fault_process_pending = false;
buffer_cache.ProcessFaultBuffer();
}
texture_cache.ProcessDownloadImages();
}
bool Rasterizer::BindResources(const Pipeline* pipeline) {
@ -649,8 +658,7 @@ void Rasterizer::BindTextures(const Shader::Info& stage, Shader::Backend::Bindin
if (instance.IsNullDescriptorSupported()) {
image_infos.emplace_back(VK_NULL_HANDLE, VK_NULL_HANDLE, vk::ImageLayout::eGeneral);
} else {
auto& null_image_view =
texture_cache.FindTexture(VideoCore::NULL_IMAGE_ID, desc.view_info);
auto& null_image_view = texture_cache.FindTexture(VideoCore::NULL_IMAGE_ID, desc);
image_infos.emplace_back(VK_NULL_HANDLE, *null_image_view.image_view,
vk::ImageLayout::eGeneral);
}
@ -664,7 +672,7 @@ void Rasterizer::BindTextures(const Shader::Info& stage, Shader::Backend::Bindin
bound_images.emplace_back(image_id);
auto& image = texture_cache.GetImage(image_id);
auto& image_view = texture_cache.FindTexture(image_id, desc.view_info);
auto& image_view = texture_cache.FindTexture(image_id, desc);
if (image.binding.force_general || image.binding.is_target) {
image.Transit(vk::ImageLayout::eGeneral,

View file

@ -68,7 +68,7 @@ public:
void CpSync();
u64 Flush();
void Finish();
void ProcessFaults();
void EndCommandList();
PipelineCache& GetPipelineCache() {
return pipeline_cache;

View file

@ -101,6 +101,14 @@ void Scheduler::Wait(u64 tick) {
}
}
void Scheduler::PopPendingOperations() {
master_semaphore.Refresh();
while (!pending_ops.empty() && master_semaphore.IsFree(pending_ops.front().gpu_tick)) {
pending_ops.front().callback();
pending_ops.pop();
}
}
void Scheduler::AllocateWorkerCommandBuffers() {
const vk::CommandBufferBeginInfo begin_info = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
@ -175,10 +183,7 @@ void Scheduler::SubmitExecution(SubmitInfo& info) {
AllocateWorkerCommandBuffers();
// Apply pending operations
while (!pending_ops.empty() && IsFree(pending_ops.front().gpu_tick)) {
pending_ops.front().callback();
pending_ops.pop();
}
PopPendingOperations();
}
void DynamicState::Commit(const Instance& instance, const vk::CommandBuffer& cmdbuf) {

View file

@ -317,6 +317,9 @@ public:
/// Waits for the given tick to trigger on the GPU.
void Wait(u64 tick);
/// Attempts to execute operations whose tick the GPU has caught up with.
void PopPendingOperations();
/// Starts a new rendering scope with provided state.
void BeginRendering(const RenderState& new_state);

View file

@ -5,7 +5,9 @@
#include <xxhash.h>
#include "common/assert.h"
#include "common/config.h"
#include "common/debug.h"
#include "core/memory.h"
#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/page_manager.h"
#include "video_core/renderer_vulkan/vk_instance.h"
@ -58,6 +60,50 @@ ImageId TextureCache::GetNullImage(const vk::Format format) {
return null_id;
}
void TextureCache::ProcessDownloadImages() {
for (const ImageId image_id : download_images) {
DownloadImageMemory(image_id);
}
download_images.clear();
}
void TextureCache::DownloadImageMemory(ImageId image_id) {
Image& image = slot_images[image_id];
if (False(image.flags & ImageFlagBits::GpuModified)) {
return;
}
auto& download_buffer = buffer_cache.GetUtilityBuffer(MemoryUsage::Download);
const u32 download_size = image.info.pitch * image.info.size.height *
image.info.resources.layers * (image.info.num_bits / 8);
ASSERT(download_size <= image.info.guest_size);
const auto [download, offset] = download_buffer.Map(download_size);
download_buffer.Commit();
const vk::BufferImageCopy image_download = {
.bufferOffset = offset,
.bufferRowLength = image.info.pitch,
.bufferImageHeight = image.info.size.height,
.imageSubresource =
{
.aspectMask = image.info.IsDepthStencil() ? vk::ImageAspectFlagBits::eDepth
: vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = image.info.resources.layers,
},
.imageOffset = {0, 0, 0},
.imageExtent = {image.info.size.width, image.info.size.height, 1},
};
scheduler.EndRendering();
const auto cmdbuf = scheduler.CommandBuffer();
image.Transit(vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits2::eTransferRead, {});
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal,
download_buffer.Handle(), image_download);
scheduler.DeferOperation([device_addr = image.info.guest_address, download, download_size] {
auto* memory = Core::Memory::Instance();
memory->TryWriteBacking(std::bit_cast<u8*>(device_addr), download, download_size);
});
}
void TextureCache::MarkAsMaybeDirty(ImageId image_id, Image& image) {
if (image.hash == 0) {
// Initialize hash
@ -169,7 +215,7 @@ ImageId TextureCache::ResolveDepthOverlap(const ImageInfo& requested_info, Bindi
if (recreate) {
auto new_info = requested_info;
new_info.resources = std::min(requested_info.resources, cache_image.info.resources);
new_info.resources = std::max(requested_info.resources, cache_image.info.resources);
const auto new_image_id = slot_images.insert(instance, scheduler, new_info);
RegisterImage(new_image_id);
@ -437,16 +483,27 @@ ImageView& TextureCache::RegisterImageView(ImageId image_id, const ImageViewInfo
return slot_image_views[view_id];
}
ImageView& TextureCache::FindTexture(ImageId image_id, const ImageViewInfo& view_info) {
ImageView& TextureCache::FindTexture(ImageId image_id, const BaseDesc& desc) {
Image& image = slot_images[image_id];
if (desc.type == BindingType::Storage) {
image.flags |= ImageFlagBits::GpuModified;
if (Config::readbackLinearImages() &&
image.info.tiling_mode == AmdGpu::TilingMode::Display_Linear) {
download_images.emplace(image_id);
}
}
UpdateImage(image_id);
return RegisterImageView(image_id, view_info);
return RegisterImageView(image_id, desc.view_info);
}
ImageView& TextureCache::FindRenderTarget(BaseDesc& desc) {
const ImageId image_id = FindImage(desc);
Image& image = slot_images[image_id];
image.flags |= ImageFlagBits::GpuModified;
if (Config::readbackLinearImages() &&
image.info.tiling_mode == AmdGpu::TilingMode::Display_Linear) {
download_images.emplace(image_id);
}
image.usage.render_target = 1u;
UpdateImage(image_id);

View file

@ -3,6 +3,7 @@
#pragma once
#include <unordered_set>
#include <boost/container/small_vector.hpp>
#include <tsl/robin_map.h>
@ -105,11 +106,14 @@ public:
/// Evicts any images that overlap the unmapped range.
void UnmapMemory(VAddr cpu_addr, size_t size);
/// Schedules a copy of pending images for download back to CPU memory.
void ProcessDownloadImages();
/// Retrieves the image handle of the image with the provided attributes.
[[nodiscard]] ImageId FindImage(BaseDesc& desc, FindFlags flags = {});
/// Retrieves an image view with the properties of the specified image id.
[[nodiscard]] ImageView& FindTexture(ImageId image_id, const ImageViewInfo& view_info);
[[nodiscard]] ImageView& FindTexture(ImageId image_id, const BaseDesc& desc);
/// Retrieves the render target with specified properties
[[nodiscard]] ImageView& FindRenderTarget(BaseDesc& desc);
@ -252,6 +256,9 @@ private:
/// Gets or creates a null image for a particular format.
ImageId GetNullImage(vk::Format format);
/// Copies image memory back to CPU.
void DownloadImageMemory(ImageId image_id);
/// Create an image from the given parameters
[[nodiscard]] ImageId InsertImage(const ImageInfo& info, VAddr cpu_addr);
@ -293,6 +300,7 @@ private:
Common::SlotVector<ImageView> slot_image_views;
tsl::robin_map<u64, Sampler> samplers;
tsl::robin_map<vk::Format, ImageId> null_images;
std::unordered_set<ImageId> download_images;
PageTable page_table;
std::mutex mutex;