renderer_vulkan: Prefer depth stencil read-only layout when possible

* Persona reads a depth attachment while it is being attached with writes disabled. Now this works without spamming vk validation errors
This commit is contained in:
IndecisiveTurtle 2024-07-01 02:11:53 +03:00 committed by TheTurtle
parent 22b930ba5e
commit b4d24d8737
15 changed files with 106 additions and 65 deletions

View file

@ -41,7 +41,8 @@ public:
Frame* PrepareFrame(const Libraries::VideoOut::BufferAttributeGroup& attribute,
VAddr cpu_address) {
const auto info = VideoCore::ImageInfo{attribute};
auto& image = texture_cache.FindImage(info, cpu_address);
const auto image_id = texture_cache.FindImage(info, cpu_address);
auto& image = texture_cache.GetImage(image_id);
return PrepareFrameInternal(image);
}
@ -54,7 +55,8 @@ public:
const Libraries::VideoOut::BufferAttributeGroup& attribute, VAddr cpu_address) {
vo_buffers_addr.emplace_back(cpu_address);
const auto info = VideoCore::ImageInfo{attribute};
return texture_cache.FindImage(info, cpu_address);
const auto image_id = texture_cache.FindImage(info, cpu_address);
return texture_cache.GetImage(image_id);
}
bool IsVideoOutSurface(const AmdGpu::Liverpool::ColorBuffer& color_buffer) {

View file

@ -125,17 +125,18 @@ bool ComputePipeline::BindResources(Core::MemoryManager* memory, StreamBuffer& s
}
}
for (const auto& image : info.images) {
const auto tsharp = info.ReadUd<AmdGpu::Image>(image.sgpr_base, image.dword_offset);
const auto& image_view = texture_cache.FindImageView(tsharp, image.is_storage, image.is_depth);
image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view, vk::ImageLayout::eGeneral);
for (const auto& image_desc : info.images) {
const auto tsharp = info.ReadUd<AmdGpu::Image>(image_desc.sgpr_base, image_desc.dword_offset);
const auto& image_view = texture_cache.FindImageView(tsharp, image_desc.is_storage);
const auto& image = texture_cache.GetImage(image_view.image_id);
image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view, image.layout);
set_writes.push_back({
.dstSet = VK_NULL_HANDLE,
.dstBinding = binding++,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = image.is_storage ? vk::DescriptorType::eStorageImage
: vk::DescriptorType::eSampledImage,
.descriptorType = image_desc.is_storage ? vk::DescriptorType::eStorageImage
: vk::DescriptorType::eSampledImage,
.pImageInfo = &image_infos.back(),
});

View file

@ -348,19 +348,18 @@ void GraphicsPipeline::BindResources(Core::MemoryManager* memory, StreamBuffer&
}
}
for (const auto& image : stage.images) {
const auto tsharp = stage.ReadUd<AmdGpu::Image>(image.sgpr_base, image.dword_offset);
const auto& image_view = texture_cache.FindImageView(tsharp, image.is_storage, image.is_depth);
image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view,
(image.is_storage || image.is_depth) ? vk::ImageLayout::eGeneral
: vk::ImageLayout::eShaderReadOnlyOptimal);
for (const auto& image_desc : stage.images) {
const auto tsharp = stage.ReadUd<AmdGpu::Image>(image_desc.sgpr_base, image_desc.dword_offset);
const auto& image_view = texture_cache.FindImageView(tsharp, image_desc.is_storage);
const auto& image = texture_cache.GetImage(image_view.image_id);
image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view, image.layout);
set_writes.push_back({
.dstSet = VK_NULL_HANDLE,
.dstBinding = binding++,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = image.is_storage ? vk::DescriptorType::eStorageImage
: vk::DescriptorType::eSampledImage,
.descriptorType = image_desc.is_storage ? vk::DescriptorType::eStorageImage
: vk::DescriptorType::eSampledImage,
.pImageInfo = &image_infos.back(),
});

View file

@ -203,6 +203,7 @@ bool Instance::CreateDevice() {
.independentBlend = true,
.geometryShader = features.geometryShader,
.logicOp = features.logicOp,
.multiViewport = true,
.samplerAnisotropy = features.samplerAnisotropy,
.fragmentStoresAndAtomics = features.fragmentStoresAndAtomics,
.shaderImageGatherExtended = true,

View file

@ -44,10 +44,11 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) {
return;
}
UpdateDynamicState(*pipeline);
pipeline->BindResources(memory, vertex_index_buffer, texture_cache);
BeginRendering();
UpdateDynamicState(*pipeline);
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline->Handle());
if (is_indexed) {
@ -113,12 +114,14 @@ void Rasterizer::BeginRendering() {
const bool is_clear = regs.depth_render_control.depth_clear_enable ||
texture_cache.IsMetaCleared(htile_address);
const auto& hint = liverpool->last_db_extent;
const auto& image_view = texture_cache.DepthTarget(regs.depth_buffer, htile_address, hint);
const auto& image_view = texture_cache.DepthTarget(regs.depth_buffer, htile_address, hint,
regs.depth_control.depth_write_enable);
const auto& image = texture_cache.GetImage(image_view.image_id);
state.width = std::min<u32>(state.width, hint.width);
state.height = std::min<u32>(state.height, hint.height);
state.depth_attachment = {
.imageView = *image_view.image_view,
.imageLayout = vk::ImageLayout::eGeneral,
.imageLayout = image.layout,
.loadOp = is_clear ? vk::AttachmentLoadOp::eClear : vk::AttachmentLoadOp::eLoad,
.storeOp = is_clear ? vk::AttachmentStoreOp::eNone : vk::AttachmentStoreOp::eStore,
.clearValue = vk::ClearValue{.depthStencil = {.depth = regs.depth_clear,
@ -192,23 +195,34 @@ void Rasterizer::UpdateDynamicState(const GraphicsPipeline& pipeline) {
void Rasterizer::UpdateViewportScissorState() {
auto& regs = liverpool->regs;
boost::container::static_vector<vk::Viewport, Liverpool::NumViewports> viewports;
boost::container::static_vector<vk::Rect2D, Liverpool::NumViewports> scissors;
const float reduce_z =
regs.clipper_control.clip_space == AmdGpu::Liverpool::ClipSpace::MinusWToW ? 1.0f : 0.0f;
for (u32 i = 0; i < Liverpool::NumViewports; i++) {
const auto& vp = regs.viewports[i];
const auto& vp_d = regs.viewport_depths[i];
if (vp.xscale == 0) {
continue;
}
viewports.push_back({
.x = vp.xoffset - vp.xscale,
.y = vp.yoffset - vp.yscale,
.width = vp.xscale * 2.0f,
.height = vp.yscale * 2.0f,
.minDepth = vp.zoffset - vp.zscale * reduce_z,
.maxDepth = vp.zscale + vp.zoffset,
});
}
const auto& sc = regs.screen_scissor;
scissors.push_back({
.offset = {sc.top_left_x, sc.top_left_y},
.extent = {sc.GetWidth(), sc.GetHeight()},
});
const auto cmdbuf = scheduler.CommandBuffer();
const vk::Viewport viewport{
.x = regs.viewports[0].xoffset - regs.viewports[0].xscale,
.y = regs.viewports[0].yoffset - regs.viewports[0].yscale,
.width = regs.viewports[0].xscale * 2.0f,
.height = regs.viewports[0].yscale * 2.0f,
.minDepth = regs.viewports[0].zoffset - regs.viewports[0].zscale * reduce_z,
.maxDepth = regs.viewports[0].zscale + regs.viewports[0].zoffset,
};
const vk::Rect2D scissor{
.offset = {regs.screen_scissor.top_left_x, regs.screen_scissor.top_left_y},
.extent = {regs.screen_scissor.GetWidth(), regs.screen_scissor.GetHeight()},
};
cmdbuf.setViewport(0, viewport);
cmdbuf.setScissor(0, scissor);
cmdbuf.setViewport(0, viewports);
cmdbuf.setScissor(0, scissors);
}
void Rasterizer::UpdateDepthStencilState() {

View file

@ -32,7 +32,7 @@ void Scheduler::BeginRendering(const RenderState& new_state) {
.extent = {render_state.width, render_state.height},
},
.layerCount = 1,
.colorAttachmentCount = static_cast<u32>(render_state.color_attachments.size()),
.colorAttachmentCount = render_state.num_color_attachments,
.pColorAttachments = render_state.color_attachments.data(),
.pDepthAttachment = render_state.num_depth_attachments ?
&render_state.depth_attachment : nullptr,

View file

@ -23,7 +23,7 @@ struct RenderState {
bool operator==(const RenderState& other) const noexcept {
return std::memcmp(this, &other, sizeof(RenderState)) == 0;
}
}
};
class Scheduler {
@ -46,6 +46,11 @@ public:
/// Ends current rendering scope.
void EndRendering();
/// Returns the current render state.
const RenderState& GetRenderState() const {
return render_state;
}
/// Returns the current command buffer.
vk::CommandBuffer CommandBuffer() const {
return current_cmdbuf;

View file

@ -226,7 +226,7 @@ void StreamBuffer::WaitPendingOperations(u64 requested_upper_bound) {
while (requested_upper_bound > wait_bound && wait_cursor < *invalidation_mark) {
auto& watch = previous_watches[wait_cursor];
wait_bound = watch.upper_bound;
scheduler.Wait(watch.tick);
//scheduler.Wait(watch.tick);
++wait_cursor;
}
}