mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-05-29 23:03:18 +00:00
shader_recompiler: Implement data share append and consume operations (#814)
* shader_recompiler: Add more format swap modes * texture_cache: Handle stencil texture reads * emulator: Support loading font library * readme: Add thanks section * shader_recompiler: Constant buffers as integers * shader_recompiler: Typed buffers as integers * shader_recompiler: Separate thread bit scalars * We can assume guest shader never mixes them with normal sgprs. This helps avoid errors where ssa could view an sgpr write dominating a thread bit read, due to how control flow is structurized, even though its not possible in actual control flow * shader_recompiler: Implement data append/consume operations * clang format * buffer_cache: Simplify invalidation scheme * video_core: Remove some invalidation remnants * adjust
This commit is contained in:
parent
649527a235
commit
13743b27fc
34 changed files with 512 additions and 272 deletions
|
@ -465,6 +465,14 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span<const u32> dcb, std::span<c
|
|||
case PM4ItOpcode::EventWriteEos: {
|
||||
const auto* event_eos = reinterpret_cast<const PM4CmdEventWriteEos*>(header);
|
||||
event_eos->SignalFence();
|
||||
if (event_eos->command == PM4CmdEventWriteEos::Command::GdsStore) {
|
||||
ASSERT(event_eos->size == 1);
|
||||
if (rasterizer) {
|
||||
rasterizer->Finish();
|
||||
const u32 value = rasterizer->ReadDataFromGds(event_eos->gds_index);
|
||||
*event_eos->Address() = value;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PM4ItOpcode::EventWriteEop: {
|
||||
|
@ -474,6 +482,9 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span<const u32> dcb, std::span<c
|
|||
}
|
||||
case PM4ItOpcode::DmaData: {
|
||||
const auto* dma_data = reinterpret_cast<const PM4DmaData*>(header);
|
||||
if (dma_data->src_sel == DmaDataSrc::Data && dma_data->dst_sel == DmaDataDst::Gds) {
|
||||
rasterizer->InlineDataToGds(dma_data->dst_addr_lo, dma_data->data);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PM4ItOpcode::WriteData: {
|
||||
|
|
|
@ -350,6 +350,17 @@ struct PM4CmdEventWriteEop {
|
|||
}
|
||||
};
|
||||
|
||||
enum class DmaDataDst : u32 {
|
||||
Memory = 0,
|
||||
Gds = 1,
|
||||
};
|
||||
|
||||
enum class DmaDataSrc : u32 {
|
||||
Memory = 0,
|
||||
Gds = 1,
|
||||
Data = 2,
|
||||
};
|
||||
|
||||
struct PM4DmaData {
|
||||
PM4Type3Header header;
|
||||
union {
|
||||
|
@ -357,11 +368,11 @@ struct PM4DmaData {
|
|||
BitField<12, 1, u32> src_atc;
|
||||
BitField<13, 2, u32> src_cache_policy;
|
||||
BitField<15, 1, u32> src_volatile;
|
||||
BitField<20, 2, u32> dst_sel;
|
||||
BitField<20, 2, DmaDataDst> dst_sel;
|
||||
BitField<24, 1, u32> dst_atc;
|
||||
BitField<25, 2, u32> dst_cache_policy;
|
||||
BitField<27, 1, u32> dst_volatile;
|
||||
BitField<29, 2, u32> src_sel;
|
||||
BitField<29, 2, DmaDataSrc> src_sel;
|
||||
BitField<31, 1, u32> cp_sync;
|
||||
};
|
||||
union {
|
||||
|
@ -502,13 +513,17 @@ struct PM4CmdEventWriteEos {
|
|||
}
|
||||
|
||||
void SignalFence() const {
|
||||
switch (command.Value()) {
|
||||
const auto cmd = command.Value();
|
||||
switch (cmd) {
|
||||
case Command::SingalFence: {
|
||||
*Address() = DataDWord();
|
||||
break;
|
||||
}
|
||||
case Command::GdsStore: {
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
UNREACHABLE();
|
||||
UNREACHABLE_MSG("Unknown command {}", u32(cmd));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,8 +15,9 @@
|
|||
namespace VideoCore {
|
||||
|
||||
static constexpr size_t NumVertexBuffers = 32;
|
||||
static constexpr size_t StagingBufferSize = 512_MB;
|
||||
static constexpr size_t UboStreamBufferSize = 64_MB;
|
||||
static constexpr size_t GdsBufferSize = 64_KB;
|
||||
static constexpr size_t StagingBufferSize = 1_GB;
|
||||
static constexpr size_t UboStreamBufferSize = 128_MB;
|
||||
|
||||
BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_,
|
||||
const AmdGpu::Liverpool* liverpool_, TextureCache& texture_cache_,
|
||||
|
@ -25,7 +26,10 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
|
|||
texture_cache{texture_cache_}, tracker{tracker_},
|
||||
staging_buffer{instance, scheduler, MemoryUsage::Upload, StagingBufferSize},
|
||||
stream_buffer{instance, scheduler, MemoryUsage::Stream, UboStreamBufferSize},
|
||||
gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, GdsBufferSize},
|
||||
memory_tracker{&tracker} {
|
||||
Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer");
|
||||
|
||||
// Ensure the first slot is used for the null buffer
|
||||
void(slot_buffers.insert(instance, scheduler, MemoryUsage::DeviceLocal, 0, ReadFlags, 1));
|
||||
}
|
||||
|
@ -232,6 +236,27 @@ u32 BufferCache::BindIndexBuffer(bool& is_indexed, u32 index_offset) {
|
|||
return regs.num_indices;
|
||||
}
|
||||
|
||||
void BufferCache::InlineDataToGds(u32 gds_offset, u32 value) {
|
||||
ASSERT_MSG(gds_offset % 4 == 0, "GDS offset must be dword aligned");
|
||||
scheduler.EndRendering();
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
const vk::BufferMemoryBarrier2 buf_barrier = {
|
||||
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
|
||||
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
|
||||
.dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
|
||||
.dstAccessMask = vk::AccessFlagBits2::eMemoryRead,
|
||||
.buffer = gds_buffer.Handle(),
|
||||
.offset = gds_offset,
|
||||
.size = sizeof(u32),
|
||||
};
|
||||
cmdbuf.pipelineBarrier2(vk::DependencyInfo{
|
||||
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
|
||||
.bufferMemoryBarrierCount = 1,
|
||||
.pBufferMemoryBarriers = &buf_barrier,
|
||||
});
|
||||
cmdbuf.updateBuffer(gds_buffer.Handle(), gds_offset, sizeof(u32), &value);
|
||||
}
|
||||
|
||||
std::pair<Buffer*, u32> BufferCache::ObtainBuffer(VAddr device_addr, u32 size, bool is_written,
|
||||
bool is_texel_buffer) {
|
||||
static constexpr u64 StreamThreshold = CACHING_PAGESIZE;
|
||||
|
@ -258,6 +283,7 @@ std::pair<Buffer*, u32> BufferCache::ObtainTempBuffer(VAddr gpu_addr, u32 size)
|
|||
if (buffer_id) {
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
if (buffer.IsInBounds(gpu_addr, size)) {
|
||||
SynchronizeBuffer(buffer, gpu_addr, size, false);
|
||||
return {&buffer, buffer.Offset(gpu_addr)};
|
||||
}
|
||||
}
|
||||
|
@ -541,64 +567,48 @@ void BufferCache::SynchronizeBuffer(Buffer& buffer, VAddr device_addr, u32 size,
|
|||
}
|
||||
|
||||
bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr, u32 size) {
|
||||
boost::container::small_vector<ImageId, 8> image_ids;
|
||||
const u32 inv_size = std::min(size, MaxInvalidateDist);
|
||||
texture_cache.ForEachImageInRegion(device_addr, inv_size, [&](ImageId image_id, Image& image) {
|
||||
// Only consider GPU modified images, i.e render targets or storage images.
|
||||
// Also avoid any CPU modified images as the image data is likely to be stale.
|
||||
if (True(image.flags & ImageFlagBits::CpuModified) ||
|
||||
False(image.flags & ImageFlagBits::GpuModified)) {
|
||||
return;
|
||||
}
|
||||
// Image must fully overlap with the provided buffer range.
|
||||
if (image.cpu_addr < device_addr || image.cpu_addr_end > device_addr + size) {
|
||||
return;
|
||||
}
|
||||
image_ids.push_back(image_id);
|
||||
});
|
||||
if (image_ids.empty()) {
|
||||
static constexpr FindFlags find_flags =
|
||||
FindFlags::NoCreate | FindFlags::RelaxDim | FindFlags::RelaxFmt | FindFlags::RelaxSize;
|
||||
ImageInfo info{};
|
||||
info.guest_address = device_addr;
|
||||
info.guest_size_bytes = size;
|
||||
const ImageId image_id = texture_cache.FindImage(info, find_flags);
|
||||
if (!image_id) {
|
||||
return false;
|
||||
}
|
||||
// Sort images by modification tick. If there are overlaps we want to
|
||||
// copy from least to most recently modified.
|
||||
std::ranges::sort(image_ids, [&](ImageId lhs_id, ImageId rhs_id) {
|
||||
const Image& lhs = texture_cache.GetImage(lhs_id);
|
||||
const Image& rhs = texture_cache.GetImage(rhs_id);
|
||||
return lhs.tick_accessed_last < rhs.tick_accessed_last;
|
||||
});
|
||||
boost::container::small_vector<vk::BufferImageCopy, 8> copies;
|
||||
for (const ImageId image_id : image_ids) {
|
||||
copies.clear();
|
||||
Image& image = texture_cache.GetImage(image_id);
|
||||
u32 offset = buffer.Offset(image.cpu_addr);
|
||||
const u32 num_layers = image.info.resources.layers;
|
||||
for (u32 m = 0; m < image.info.resources.levels; m++) {
|
||||
const u32 width = std::max(image.info.size.width >> m, 1u);
|
||||
const u32 height = std::max(image.info.size.height >> m, 1u);
|
||||
const u32 depth =
|
||||
image.info.props.is_volume ? std::max(image.info.size.depth >> m, 1u) : 1u;
|
||||
const auto& [mip_size, mip_pitch, mip_height, mip_ofs] = image.info.mips_layout[m];
|
||||
copies.push_back({
|
||||
.bufferOffset = offset,
|
||||
.bufferRowLength = static_cast<u32>(mip_pitch),
|
||||
.bufferImageHeight = static_cast<u32>(mip_height),
|
||||
.imageSubresource{
|
||||
.aspectMask = image.aspect_mask & ~vk::ImageAspectFlagBits::eStencil,
|
||||
.mipLevel = m,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = num_layers,
|
||||
},
|
||||
.imageOffset = {0, 0, 0},
|
||||
.imageExtent = {width, height, depth},
|
||||
});
|
||||
offset += mip_ofs * num_layers;
|
||||
}
|
||||
scheduler.EndRendering();
|
||||
image.Transit(vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits::eTransferRead);
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
|
||||
copies);
|
||||
Image& image = texture_cache.GetImage(image_id);
|
||||
if (image.info.guest_size_bytes > size) {
|
||||
return false;
|
||||
}
|
||||
boost::container::small_vector<vk::BufferImageCopy, 8> copies;
|
||||
u32 offset = buffer.Offset(image.cpu_addr);
|
||||
const u32 num_layers = image.info.resources.layers;
|
||||
for (u32 m = 0; m < image.info.resources.levels; m++) {
|
||||
const u32 width = std::max(image.info.size.width >> m, 1u);
|
||||
const u32 height = std::max(image.info.size.height >> m, 1u);
|
||||
const u32 depth =
|
||||
image.info.props.is_volume ? std::max(image.info.size.depth >> m, 1u) : 1u;
|
||||
const auto& [mip_size, mip_pitch, mip_height, mip_ofs] = image.info.mips_layout[m];
|
||||
copies.push_back({
|
||||
.bufferOffset = offset,
|
||||
.bufferRowLength = static_cast<u32>(mip_pitch),
|
||||
.bufferImageHeight = static_cast<u32>(mip_height),
|
||||
.imageSubresource{
|
||||
.aspectMask = image.aspect_mask & ~vk::ImageAspectFlagBits::eStencil,
|
||||
.mipLevel = m,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = num_layers,
|
||||
},
|
||||
.imageOffset = {0, 0, 0},
|
||||
.imageExtent = {width, height, depth},
|
||||
});
|
||||
offset += mip_ofs * num_layers;
|
||||
}
|
||||
scheduler.EndRendering();
|
||||
image.Transit(vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits::eTransferRead);
|
||||
const auto cmdbuf = scheduler.CommandBuffer();
|
||||
cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
|
||||
copies);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,11 @@ public:
|
|||
PageManager& tracker);
|
||||
~BufferCache();
|
||||
|
||||
/// Returns a pointer to GDS device local buffer.
|
||||
[[nodiscard]] const Buffer* GetGdsBuffer() const noexcept {
|
||||
return &gds_buffer;
|
||||
}
|
||||
|
||||
/// Invalidates any buffer in the logical page range.
|
||||
void InvalidateMemory(VAddr device_addr, u64 size);
|
||||
|
||||
|
@ -66,6 +71,9 @@ public:
|
|||
/// Bind host index buffer for the current draw.
|
||||
u32 BindIndexBuffer(bool& is_indexed, u32 index_offset);
|
||||
|
||||
/// Writes a value to GDS buffer.
|
||||
void InlineDataToGds(u32 gds_offset, u32 value);
|
||||
|
||||
/// Obtains a buffer for the specified region.
|
||||
[[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(VAddr gpu_addr, u32 size, bool is_written,
|
||||
bool is_texel_buffer = false);
|
||||
|
@ -130,6 +138,7 @@ private:
|
|||
PageManager& tracker;
|
||||
StreamBuffer staging_buffer;
|
||||
StreamBuffer stream_buffer;
|
||||
Buffer gds_buffer;
|
||||
std::mutex mutex;
|
||||
Common::SlotVector<Buffer> slot_buffers;
|
||||
MemoryTracker memory_tracker;
|
||||
|
|
|
@ -585,11 +585,10 @@ vk::Format SurfaceFormat(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat nu
|
|||
|
||||
vk::Format AdjustColorBufferFormat(vk::Format base_format,
|
||||
Liverpool::ColorBuffer::SwapMode comp_swap, bool is_vo_surface) {
|
||||
ASSERT_MSG(comp_swap == Liverpool::ColorBuffer::SwapMode::Standard ||
|
||||
comp_swap == Liverpool::ColorBuffer::SwapMode::Alternate,
|
||||
"Unsupported component swap mode {}", static_cast<u32>(comp_swap));
|
||||
|
||||
const bool comp_swap_alt = comp_swap == Liverpool::ColorBuffer::SwapMode::Alternate;
|
||||
const bool comp_swap_reverse = comp_swap == Liverpool::ColorBuffer::SwapMode::StandardReverse;
|
||||
const bool comp_swap_alt_reverse =
|
||||
comp_swap == Liverpool::ColorBuffer::SwapMode::AlternateReverse;
|
||||
if (comp_swap_alt) {
|
||||
switch (base_format) {
|
||||
case vk::Format::eR8G8B8A8Unorm:
|
||||
|
@ -605,6 +604,18 @@ vk::Format AdjustColorBufferFormat(vk::Format base_format,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
} else if (comp_swap_reverse) {
|
||||
switch (base_format) {
|
||||
case vk::Format::eR8G8B8A8Unorm:
|
||||
return vk::Format::eA8B8G8R8UnormPack32;
|
||||
case vk::Format::eR8G8B8A8Srgb:
|
||||
return is_vo_surface ? vk::Format::eA8B8G8R8UnormPack32
|
||||
: vk::Format::eA8B8G8R8SrgbPack32;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if (comp_swap_alt_reverse) {
|
||||
return base_format;
|
||||
} else {
|
||||
if (is_vo_surface && base_format == vk::Format::eR8G8B8A8Srgb) {
|
||||
return vk::Format::eR8G8B8A8Unorm;
|
||||
|
|
|
@ -109,37 +109,42 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
|
|||
u32 binding{};
|
||||
|
||||
for (const auto& desc : info->buffers) {
|
||||
const auto vsharp = desc.GetSharp(*info);
|
||||
const bool is_storage = desc.IsStorage(vsharp);
|
||||
const VAddr address = vsharp.base_address;
|
||||
// Most of the time when a metadata is updated with a shader it gets cleared. It means we
|
||||
// can skip the whole dispatch and update the tracked state instead. Also, it is not
|
||||
// intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we will
|
||||
// need its full emulation anyways. For cases of metadata read a warning will be logged.
|
||||
if (desc.is_written) {
|
||||
if (texture_cache.TouchMeta(address, true)) {
|
||||
LOG_TRACE(Render_Vulkan, "Metadata update skipped");
|
||||
return false;
|
||||
}
|
||||
bool is_storage = true;
|
||||
if (desc.is_gds_buffer) {
|
||||
auto* vk_buffer = buffer_cache.GetGdsBuffer();
|
||||
buffer_infos.emplace_back(vk_buffer->Handle(), 0, vk_buffer->SizeBytes());
|
||||
} else {
|
||||
if (texture_cache.IsMeta(address)) {
|
||||
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)");
|
||||
const auto vsharp = desc.GetSharp(*info);
|
||||
is_storage = desc.IsStorage(vsharp);
|
||||
const VAddr address = vsharp.base_address;
|
||||
// Most of the time when a metadata is updated with a shader it gets cleared. It means
|
||||
// we can skip the whole dispatch and update the tracked state instead. Also, it is not
|
||||
// intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we
|
||||
// will need its full emulation anyways. For cases of metadata read a warning will be
|
||||
// logged.
|
||||
if (desc.is_written) {
|
||||
if (texture_cache.TouchMeta(address, true)) {
|
||||
LOG_TRACE(Render_Vulkan, "Metadata update skipped");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (texture_cache.IsMeta(address)) {
|
||||
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)");
|
||||
}
|
||||
}
|
||||
const u32 size = vsharp.GetSize();
|
||||
const u32 alignment =
|
||||
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
||||
const auto [vk_buffer, offset] =
|
||||
buffer_cache.ObtainBuffer(address, size, desc.is_written);
|
||||
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
||||
const u32 adjust = offset - offset_aligned;
|
||||
if (adjust != 0) {
|
||||
ASSERT(adjust % 4 == 0);
|
||||
push_data.AddOffset(binding, adjust);
|
||||
}
|
||||
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, size + adjust);
|
||||
}
|
||||
const u32 size = vsharp.GetSize();
|
||||
if (desc.is_written) {
|
||||
texture_cache.InvalidateMemory(address, size);
|
||||
}
|
||||
const u32 alignment =
|
||||
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
||||
const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(address, size, desc.is_written);
|
||||
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
||||
const u32 adjust = offset - offset_aligned;
|
||||
if (adjust != 0) {
|
||||
ASSERT(adjust % 4 == 0);
|
||||
push_data.AddOffset(binding, adjust);
|
||||
}
|
||||
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, size + adjust);
|
||||
set_writes.push_back({
|
||||
.dstSet = VK_NULL_HANDLE,
|
||||
.dstBinding = binding++,
|
||||
|
@ -188,7 +193,7 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
|
|||
buffer_barriers.emplace_back(*barrier);
|
||||
}
|
||||
if (desc.is_written) {
|
||||
texture_cache.InvalidateMemory(address, size);
|
||||
texture_cache.MarkWritten(address, size);
|
||||
}
|
||||
}
|
||||
set_writes.push_back({
|
||||
|
|
|
@ -432,7 +432,7 @@ void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
|
|||
buffer_barriers.emplace_back(*barrier);
|
||||
}
|
||||
if (desc.is_written) {
|
||||
texture_cache.InvalidateMemory(address, size);
|
||||
texture_cache.MarkWritten(address, size);
|
||||
}
|
||||
}
|
||||
set_writes.push_back({
|
||||
|
|
|
@ -298,6 +298,16 @@ bool PipelineCache::RefreshGraphicsKey() {
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool TessMissingLogged = false;
|
||||
if (auto* pgm = regs.ProgramForStage(3);
|
||||
regs.stage_enable.IsStageEnabled(3) && pgm->Address() != 0) {
|
||||
if (!TessMissingLogged) {
|
||||
LOG_WARNING(Render_Vulkan, "Tess pipeline compilation skipped");
|
||||
TessMissingLogged = true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::tie(infos[i], modules[i], key.stage_hashes[i]) = GetProgram(stage, params, binding);
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -175,6 +175,10 @@ u64 Rasterizer::Flush() {
|
|||
return current_tick;
|
||||
}
|
||||
|
||||
void Rasterizer::Finish() {
|
||||
scheduler.Finish();
|
||||
}
|
||||
|
||||
void Rasterizer::BeginRendering() {
|
||||
const auto& regs = liverpool->regs;
|
||||
RenderState state;
|
||||
|
@ -251,6 +255,17 @@ void Rasterizer::BeginRendering() {
|
|||
scheduler.BeginRendering(state);
|
||||
}
|
||||
|
||||
void Rasterizer::InlineDataToGds(u32 gds_offset, u32 value) {
|
||||
buffer_cache.InlineDataToGds(gds_offset, value);
|
||||
}
|
||||
|
||||
u32 Rasterizer::ReadDataFromGds(u32 gds_offset) {
|
||||
auto* gds_buf = buffer_cache.GetGdsBuffer();
|
||||
u32 value;
|
||||
std::memcpy(&value, gds_buf->mapped_data.data() + gds_offset, sizeof(u32));
|
||||
return value;
|
||||
}
|
||||
|
||||
void Rasterizer::InvalidateMemory(VAddr addr, u64 size) {
|
||||
buffer_cache.InvalidateMemory(addr, size);
|
||||
texture_cache.InvalidateMemory(addr, size);
|
||||
|
|
|
@ -41,12 +41,15 @@ public:
|
|||
void ScopeMarkerEnd();
|
||||
void ScopedMarkerInsert(const std::string_view& str);
|
||||
|
||||
void InlineDataToGds(u32 gds_offset, u32 value);
|
||||
u32 ReadDataFromGds(u32 gsd_offset);
|
||||
void InvalidateMemory(VAddr addr, u64 size);
|
||||
void MapMemory(VAddr addr, u64 size);
|
||||
void UnmapMemory(VAddr addr, u64 size);
|
||||
|
||||
void CpSync();
|
||||
u64 Flush();
|
||||
void Finish();
|
||||
|
||||
private:
|
||||
void BeginRendering();
|
||||
|
|
|
@ -32,7 +32,6 @@ enum ImageFlagBits : u32 {
|
|||
Registered = 1 << 6, ///< True when the image is registered
|
||||
Picked = 1 << 7, ///< Temporary flag to mark the image as picked
|
||||
MetaRegistered = 1 << 8, ///< True when metadata for this surface is known and registered
|
||||
Deleted = 1 << 9, ///< Indicates that images was marked for deletion once frame is done
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ ImageInfo::ImageInfo(const AmdGpu::Image& image, bool force_depth /*= false*/) n
|
|||
pixel_format = LiverpoolToVK::SurfaceFormat(image.GetDataFmt(), image.GetNumberFmt());
|
||||
// Override format if image is forced to be a depth target
|
||||
if (force_depth || tiling_mode == AmdGpu::TilingMode::Depth_MacroTiled) {
|
||||
if (pixel_format == vk::Format::eR32Sfloat) {
|
||||
if (pixel_format == vk::Format::eR32Sfloat || pixel_format == vk::Format::eR8Unorm) {
|
||||
pixel_format = vk::Format::eD32SfloatS8Uint;
|
||||
} else if (pixel_format == vk::Format::eR16Unorm) {
|
||||
pixel_format = vk::Format::eD16UnormS8Uint;
|
||||
|
|
|
@ -128,6 +128,10 @@ ImageView::ImageView(const Vulkan::Instance& instance, const ImageViewInfo& info
|
|||
format = image.info.pixel_format;
|
||||
aspect = vk::ImageAspectFlagBits::eDepth;
|
||||
}
|
||||
if (image.aspect_mask & vk::ImageAspectFlagBits::eStencil && format == vk::Format::eR8Unorm) {
|
||||
format = image.info.pixel_format;
|
||||
aspect = vk::ImageAspectFlagBits::eStencil;
|
||||
}
|
||||
|
||||
const vk::ImageViewCreateInfo image_view_ci = {
|
||||
.pNext = usage_override ? &usage_ci : nullptr,
|
||||
|
|
|
@ -40,17 +40,27 @@ TextureCache::~TextureCache() = default;
|
|||
void TextureCache::InvalidateMemory(VAddr address, size_t size) {
|
||||
std::scoped_lock lock{mutex};
|
||||
ForEachImageInRegion(address, size, [&](ImageId image_id, Image& image) {
|
||||
const size_t image_dist =
|
||||
image.cpu_addr > address ? image.cpu_addr - address : address - image.cpu_addr;
|
||||
if (image_dist < MaxInvalidateDist) {
|
||||
// Ensure image is reuploaded when accessed again.
|
||||
image.flags |= ImageFlagBits::CpuModified;
|
||||
}
|
||||
// Ensure image is reuploaded when accessed again.
|
||||
image.flags |= ImageFlagBits::CpuModified;
|
||||
// Untrack image, so the range is unprotected and the guest can write freely.
|
||||
UntrackImage(image_id);
|
||||
});
|
||||
}
|
||||
|
||||
void TextureCache::MarkWritten(VAddr address, size_t max_size) {
|
||||
static constexpr FindFlags find_flags =
|
||||
FindFlags::NoCreate | FindFlags::RelaxDim | FindFlags::RelaxFmt | FindFlags::RelaxSize;
|
||||
ImageInfo info{};
|
||||
info.guest_address = address;
|
||||
info.guest_size_bytes = max_size;
|
||||
const ImageId image_id = FindImage(info, find_flags);
|
||||
if (!image_id) {
|
||||
return;
|
||||
}
|
||||
// Ensure image is copied when accessed again.
|
||||
slot_images[image_id].flags |= ImageFlagBits::CpuModified;
|
||||
}
|
||||
|
||||
void TextureCache::UnmapMemory(VAddr cpu_addr, size_t size) {
|
||||
std::scoped_lock lk{mutex};
|
||||
|
||||
|
@ -199,10 +209,14 @@ ImageId TextureCache::FindImage(const ImageInfo& info, FindFlags flags) {
|
|||
!IsVulkanFormatCompatible(info.pixel_format, cache_image.info.pixel_format)) {
|
||||
continue;
|
||||
}
|
||||
ASSERT(cache_image.info.type == info.type);
|
||||
ASSERT(cache_image.info.type == info.type || True(flags & FindFlags::RelaxFmt));
|
||||
image_id = cache_id;
|
||||
}
|
||||
|
||||
if (True(flags & FindFlags::NoCreate) && !image_id) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Try to resolve overlaps (if any)
|
||||
if (!image_id) {
|
||||
for (const auto& cache_id : image_ids) {
|
||||
|
@ -211,10 +225,6 @@ ImageId TextureCache::FindImage(const ImageInfo& info, FindFlags flags) {
|
|||
}
|
||||
}
|
||||
|
||||
if (True(flags & FindFlags::NoCreate) && !image_id) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Create and register a new image
|
||||
if (!image_id) {
|
||||
image_id = slot_images.insert(instance, scheduler, info);
|
||||
|
@ -251,9 +261,6 @@ ImageView& TextureCache::RegisterImageView(ImageId image_id, const ImageViewInfo
|
|||
ImageView& TextureCache::FindTexture(const ImageInfo& info, const ImageViewInfo& view_info) {
|
||||
const ImageId image_id = FindImage(info);
|
||||
Image& image = slot_images[image_id];
|
||||
if (view_info.is_storage) {
|
||||
image.flags |= ImageFlagBits::GpuModified;
|
||||
}
|
||||
UpdateImage(image_id);
|
||||
auto& usage = image.info.usage;
|
||||
|
||||
|
@ -351,7 +358,6 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
|
|||
if (False(image.flags & ImageFlagBits::CpuModified)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Mark image as validated.
|
||||
image.flags &= ~ImageFlagBits::CpuModified;
|
||||
|
||||
|
@ -485,8 +491,6 @@ void TextureCache::DeleteImage(ImageId image_id) {
|
|||
ASSERT_MSG(False(image.flags & ImageFlagBits::Tracked), "Image was not untracked");
|
||||
ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered");
|
||||
|
||||
image.flags |= ImageFlagBits::Deleted;
|
||||
|
||||
// Remove any registered meta areas.
|
||||
const auto& meta_info = image.info.meta_info;
|
||||
if (meta_info.cmask_addr) {
|
||||
|
|
|
@ -50,6 +50,9 @@ public:
|
|||
/// Invalidates any image in the logical page range.
|
||||
void InvalidateMemory(VAddr address, size_t size);
|
||||
|
||||
/// Marks an image as dirty if it exists at the provided address.
|
||||
void MarkWritten(VAddr address, size_t max_size);
|
||||
|
||||
/// Evicts any images that overlap the unmapped range.
|
||||
void UnmapMemory(VAddr cpu_addr, size_t size);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue