Merge branch 'shadps4-emu:main' into shader_recompiler/format

This commit is contained in:
Daniel R. 2024-08-30 15:40:17 +02:00 committed by GitHub
commit 84f1690dfb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
92 changed files with 2751 additions and 1848 deletions

View file

@ -99,7 +99,7 @@ Id TypeId(const EmitContext& ctx, IR::Type type) {
}
}
void Traverse(EmitContext& ctx, IR::Program& program) {
void Traverse(EmitContext& ctx, const IR::Program& program) {
IR::Block* current_block{};
for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
switch (node.type) {
@ -162,7 +162,7 @@ void Traverse(EmitContext& ctx, IR::Program& program) {
}
}
Id DefineMain(EmitContext& ctx, IR::Program& program) {
Id DefineMain(EmitContext& ctx, const IR::Program& program) {
const Id void_function{ctx.TypeFunction(ctx.void_id)};
const Id main{ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function)};
for (IR::Block* const block : program.blocks) {
@ -185,8 +185,28 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
ctx.AddCapability(spv::Capability::Int16);
}
ctx.AddCapability(spv::Capability::Int64);
if (info.has_storage_images) {
if (info.has_storage_images || info.has_image_buffers) {
ctx.AddCapability(spv::Capability::StorageImageExtendedFormats);
ctx.AddCapability(spv::Capability::StorageImageReadWithoutFormat);
ctx.AddCapability(spv::Capability::StorageImageWriteWithoutFormat);
}
if (info.has_texel_buffers) {
ctx.AddCapability(spv::Capability::SampledBuffer);
}
if (info.has_image_buffers) {
ctx.AddCapability(spv::Capability::ImageBuffer);
}
if (info.has_image_gather) {
ctx.AddCapability(spv::Capability::ImageGatherExtended);
}
if (info.has_image_query) {
ctx.AddCapability(spv::Capability::ImageQuery);
}
if (info.uses_lane_id) {
ctx.AddCapability(spv::Capability::GroupNonUniform);
}
if (info.uses_group_quad) {
ctx.AddCapability(spv::Capability::GroupNonUniformQuad);
}
switch (program.info.stage) {
case Stage::Compute: {
@ -206,19 +226,9 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
} else {
ctx.AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft);
}
ctx.AddCapability(spv::Capability::GroupNonUniform);
if (info.uses_group_quad) {
ctx.AddCapability(spv::Capability::GroupNonUniformQuad);
}
if (info.has_discard) {
ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
}
if (info.has_image_gather) {
ctx.AddCapability(spv::Capability::ImageGatherExtended);
}
if (info.has_image_query) {
ctx.AddCapability(spv::Capability::ImageQuery);
}
if (info.stores.Get(IR::Attribute::Depth)) {
ctx.AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
}
@ -229,7 +239,7 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
ctx.AddEntryPoint(execution_model, main, "main", interfaces);
}
void PatchPhiNodes(IR::Program& program, EmitContext& ctx) {
void PatchPhiNodes(const IR::Program& program, EmitContext& ctx) {
auto inst{program.blocks.front()->begin()};
size_t block_index{0};
ctx.PatchDeferredPhi([&](size_t phi_arg) {
@ -248,8 +258,8 @@ void PatchPhiNodes(IR::Program& program, EmitContext& ctx) {
}
} // Anonymous namespace
std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program, u32& binding) {
EmitContext ctx{profile, program, binding};
std::vector<u32> EmitSPIRV(const Profile& profile, const IR::Program& program, u32& binding) {
EmitContext ctx{profile, program.info, binding};
const Id main{DefineMain(ctx, program)};
DefineEntryPoint(program, ctx, main);
if (program.info.stage == Stage::Vertex) {

View file

@ -9,7 +9,7 @@
namespace Shader::Backend::SPIRV {
[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program,
[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const IR::Program& program,
u32& binding);
} // namespace Shader::Backend::SPIRV

View file

@ -262,171 +262,16 @@ Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
return EmitLoadBufferF32xN<4>(ctx, handle, address);
}
static bool IsSignedInteger(AmdGpu::NumberFormat format) {
switch (format) {
case AmdGpu::NumberFormat::Unorm:
case AmdGpu::NumberFormat::Uscaled:
case AmdGpu::NumberFormat::Uint:
return false;
case AmdGpu::NumberFormat::Snorm:
case AmdGpu::NumberFormat::Sscaled:
case AmdGpu::NumberFormat::Sint:
case AmdGpu::NumberFormat::SnormNz:
return true;
case AmdGpu::NumberFormat::Float:
default:
UNREACHABLE();
}
}
static u32 UXBitsMax(u32 bit_width) {
return (1u << bit_width) - 1u;
}
static u32 SXBitsMax(u32 bit_width) {
return (1u << (bit_width - 1u)) - 1u;
}
static Id ConvertValue(EmitContext& ctx, Id value, AmdGpu::NumberFormat format, u32 bit_width) {
switch (format) {
case AmdGpu::NumberFormat::Unorm:
return ctx.OpFDiv(ctx.F32[1], value, ctx.ConstF32(float(UXBitsMax(bit_width))));
case AmdGpu::NumberFormat::Snorm:
return ctx.OpFDiv(ctx.F32[1], value, ctx.ConstF32(float(SXBitsMax(bit_width))));
case AmdGpu::NumberFormat::SnormNz:
// (x * 2 + 1) / (Format::SMAX * 2)
value = ctx.OpFMul(ctx.F32[1], value, ctx.ConstF32(2.f));
value = ctx.OpFAdd(ctx.F32[1], value, ctx.ConstF32(1.f));
return ctx.OpFDiv(ctx.F32[1], value, ctx.ConstF32(float(SXBitsMax(bit_width) * 2)));
case AmdGpu::NumberFormat::Uscaled:
case AmdGpu::NumberFormat::Sscaled:
case AmdGpu::NumberFormat::Uint:
case AmdGpu::NumberFormat::Sint:
case AmdGpu::NumberFormat::Float:
return value;
default:
UNREACHABLE_MSG("Unsupported number format for conversion: {}",
magic_enum::enum_name(format));
}
}
static Id ComponentOffset(EmitContext& ctx, Id address, u32 stride, u32 bit_offset) {
Id comp_offset = ctx.ConstU32(bit_offset);
if (stride < 4) {
// comp_offset += (address % 4) * 8;
const Id byte_offset = ctx.OpUMod(ctx.U32[1], address, ctx.ConstU32(4u));
const Id bit_offset = ctx.OpShiftLeftLogical(ctx.U32[1], byte_offset, ctx.ConstU32(3u));
comp_offset = ctx.OpIAdd(ctx.U32[1], comp_offset, bit_offset);
}
return comp_offset;
}
static Id GetBufferFormatValue(EmitContext& ctx, u32 handle, Id address, u32 comp) {
auto& buffer = ctx.buffers[handle];
const auto format = buffer.dfmt;
switch (format) {
case AmdGpu::DataFormat::FormatInvalid:
return ctx.f32_zero_value;
case AmdGpu::DataFormat::Format8:
case AmdGpu::DataFormat::Format16:
case AmdGpu::DataFormat::Format32:
case AmdGpu::DataFormat::Format8_8:
case AmdGpu::DataFormat::Format16_16:
case AmdGpu::DataFormat::Format10_11_11:
case AmdGpu::DataFormat::Format11_11_10:
case AmdGpu::DataFormat::Format10_10_10_2:
case AmdGpu::DataFormat::Format2_10_10_10:
case AmdGpu::DataFormat::Format8_8_8_8:
case AmdGpu::DataFormat::Format32_32:
case AmdGpu::DataFormat::Format16_16_16_16:
case AmdGpu::DataFormat::Format32_32_32:
case AmdGpu::DataFormat::Format32_32_32_32: {
const u32 num_components = AmdGpu::NumComponents(format);
if (comp >= num_components) {
return ctx.f32_zero_value;
}
// uint index = address / 4;
Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
const u32 stride = buffer.stride;
if (stride > 4) {
const u32 index_offset = u32(AmdGpu::ComponentOffset(format, comp) / 32);
if (index_offset > 0) {
// index += index_offset;
index = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(index_offset));
}
}
const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index);
const u32 bit_offset = AmdGpu::ComponentOffset(format, comp) % 32;
const u32 bit_width = AmdGpu::ComponentBits(format, comp);
const auto num_format = buffer.nfmt;
if (num_format == AmdGpu::NumberFormat::Float) {
if (bit_width == 32) {
return ctx.OpLoad(ctx.F32[1], ptr);
} else if (bit_width == 16) {
const Id comp_offset = ComponentOffset(ctx, address, stride, bit_offset);
Id value = ctx.OpLoad(ctx.U32[1], ptr);
value =
ctx.OpBitFieldSExtract(ctx.S32[1], value, comp_offset, ctx.ConstU32(bit_width));
value = ctx.OpSConvert(ctx.U16, value);
value = ctx.OpBitcast(ctx.F16[1], value);
return ctx.OpFConvert(ctx.F32[1], value);
} else {
UNREACHABLE_MSG("Invalid float bit width {}", bit_width);
}
} else {
Id value = ctx.OpLoad(ctx.U32[1], ptr);
const bool is_signed = IsSignedInteger(num_format);
if (bit_width < 32) {
const Id comp_offset = ComponentOffset(ctx, address, stride, bit_offset);
if (is_signed) {
value = ctx.OpBitFieldSExtract(ctx.S32[1], value, comp_offset,
ctx.ConstU32(bit_width));
} else {
value = ctx.OpBitFieldUExtract(ctx.U32[1], value, comp_offset,
ctx.ConstU32(bit_width));
}
}
value = ctx.OpBitcast(ctx.F32[1], value);
return ConvertValue(ctx, value, num_format, bit_width);
}
break;
}
default:
UNREACHABLE_MSG("Invalid format for conversion: {}", magic_enum::enum_name(format));
}
}
template <u32 N>
static Id EmitLoadBufferFormatF32xN(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
auto& buffer = ctx.buffers[handle];
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
if constexpr (N == 1) {
return GetBufferFormatValue(ctx, handle, address, 0);
} else {
boost::container::static_vector<Id, N> ids;
for (u32 i = 0; i < N; i++) {
ids.push_back(GetBufferFormatValue(ctx, handle, address, i));
}
return ctx.OpCompositeConstruct(ctx.F32[N], ids);
}
}
Id EmitLoadBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
return EmitLoadBufferFormatF32xN<1>(ctx, inst, handle, address);
}
Id EmitLoadBufferFormatF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
return EmitLoadBufferFormatF32xN<2>(ctx, inst, handle, address);
}
Id EmitLoadBufferFormatF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
return EmitLoadBufferFormatF32xN<3>(ctx, inst, handle, address);
}
Id EmitLoadBufferFormatF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
return EmitLoadBufferFormatF32xN<4>(ctx, inst, handle, address);
const auto& buffer = ctx.texture_buffers[handle];
const Id tex_buffer = ctx.OpLoad(buffer.image_type, buffer.id);
const Id coord = ctx.OpIAdd(ctx.U32[1], address, buffer.coord_offset);
Id texel = buffer.is_storage ? ctx.OpImageRead(buffer.result_type, tex_buffer, coord)
: ctx.OpImageFetch(buffer.result_type, tex_buffer, coord);
if (buffer.is_integer) {
texel = ctx.OpBitcast(ctx.F32[4], texel);
}
return texel;
}
template <u32 N>
@ -467,97 +312,14 @@ void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address
EmitStoreBufferF32xN<1>(ctx, handle, address, value);
}
static Id ConvertF32ToFormat(EmitContext& ctx, Id value, AmdGpu::NumberFormat format,
u32 bit_width) {
switch (format) {
case AmdGpu::NumberFormat::Unorm:
return ctx.OpConvertFToU(
ctx.U32[1], ctx.OpFMul(ctx.F32[1], value, ctx.ConstF32(float(UXBitsMax(bit_width)))));
case AmdGpu::NumberFormat::Uint:
return ctx.OpBitcast(ctx.U32[1], value);
case AmdGpu::NumberFormat::Float:
return value;
default:
UNREACHABLE_MSG("Unsupported number format for conversion: {}",
magic_enum::enum_name(format));
}
}
template <u32 N>
static void EmitStoreBufferFormatF32xN(EmitContext& ctx, u32 handle, Id address, Id value) {
auto& buffer = ctx.buffers[handle];
const auto format = buffer.dfmt;
const auto num_format = buffer.nfmt;
switch (format) {
case AmdGpu::DataFormat::FormatInvalid:
return;
case AmdGpu::DataFormat::Format8_8_8_8:
case AmdGpu::DataFormat::Format16:
case AmdGpu::DataFormat::Format32:
case AmdGpu::DataFormat::Format32_32:
case AmdGpu::DataFormat::Format32_32_32_32: {
ASSERT(N == AmdGpu::NumComponents(format));
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index);
Id packed_value{};
for (u32 i = 0; i < N; i++) {
const u32 bit_width = AmdGpu::ComponentBits(format, i);
const u32 bit_offset = AmdGpu::ComponentOffset(format, i) % 32;
const Id comp{ConvertF32ToFormat(
ctx, N == 1 ? value : ctx.OpCompositeExtract(ctx.F32[1], value, i), num_format,
bit_width)};
if (bit_width == 32) {
if constexpr (N == 1) {
ctx.OpStore(ptr, comp);
} else {
const Id index_i = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(i));
const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id,
ctx.u32_zero_value, index_i);
ctx.OpStore(ptr, comp);
}
} else {
if (i == 0) {
packed_value = comp;
} else {
packed_value =
ctx.OpBitFieldInsert(ctx.U32[1], packed_value, comp,
ctx.ConstU32(bit_offset), ctx.ConstU32(bit_width));
}
if (i == N - 1) {
ctx.OpStore(ptr, packed_value);
}
}
}
} break;
default:
UNREACHABLE_MSG("Invalid format for conversion: {}", magic_enum::enum_name(format));
}
}
void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
EmitStoreBufferFormatF32xN<1>(ctx, handle, address, value);
}
void EmitStoreBufferFormatF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address,
Id value) {
EmitStoreBufferFormatF32xN<2>(ctx, handle, address, value);
}
void EmitStoreBufferFormatF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address,
Id value) {
EmitStoreBufferFormatF32xN<3>(ctx, handle, address, value);
}
void EmitStoreBufferFormatF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address,
Id value) {
EmitStoreBufferFormatF32xN<4>(ctx, handle, address, value);
const auto& buffer = ctx.texture_buffers[handle];
const Id tex_buffer = ctx.OpLoad(buffer.image_type, buffer.id);
const Id coord = ctx.OpIAdd(ctx.U32[1], address, buffer.coord_offset);
if (buffer.is_integer) {
value = ctx.OpBitcast(ctx.U32[4], value);
}
ctx.OpImageWrite(tex_buffer, coord, value);
}
} // namespace Shader::Backend::SPIRV

View file

@ -41,13 +41,14 @@ void Name(EmitContext& ctx, Id object, std::string_view format_str, Args&&... ar
} // Anonymous namespace
EmitContext::EmitContext(const Profile& profile_, IR::Program& program, u32& binding_)
: Sirit::Module(profile_.supported_spirv), info{program.info}, profile{profile_},
stage{program.info.stage}, binding{binding_} {
EmitContext::EmitContext(const Profile& profile_, const Shader::Info& info_, u32& binding_)
: Sirit::Module(profile_.supported_spirv), info{info_}, profile{profile_}, stage{info.stage},
binding{binding_} {
AddCapability(spv::Capability::Shader);
DefineArithmeticTypes();
DefineInterfaces();
DefineBuffers();
DefineTextureBuffers();
DefineImagesAndSamplers();
DefineSharedMemory();
}
@ -123,25 +124,24 @@ void EmitContext::DefineInterfaces() {
DefineOutputs();
}
Id GetAttributeType(EmitContext& ctx, AmdGpu::NumberFormat fmt) {
const VectorIds& GetAttributeType(EmitContext& ctx, AmdGpu::NumberFormat fmt) {
switch (fmt) {
case AmdGpu::NumberFormat::Float:
case AmdGpu::NumberFormat::Unorm:
case AmdGpu::NumberFormat::Snorm:
case AmdGpu::NumberFormat::SnormNz:
return ctx.F32[4];
case AmdGpu::NumberFormat::Sint:
return ctx.S32[4];
case AmdGpu::NumberFormat::Uint:
return ctx.U32[4];
case AmdGpu::NumberFormat::Sscaled:
return ctx.F32[4];
case AmdGpu::NumberFormat::Uscaled:
return ctx.F32[4];
case AmdGpu::NumberFormat::Srgb:
return ctx.F32;
case AmdGpu::NumberFormat::Sint:
return ctx.S32;
case AmdGpu::NumberFormat::Uint:
return ctx.U32;
default:
break;
}
throw InvalidArgument("Invalid attribute type {}", fmt);
UNREACHABLE_MSG("Invalid attribute type {}", fmt);
}
EmitContext::SpirvAttribute EmitContext::GetAttributeInfo(AmdGpu::NumberFormat fmt, Id id) {
@ -162,7 +162,7 @@ EmitContext::SpirvAttribute EmitContext::GetAttributeInfo(AmdGpu::NumberFormat f
default:
break;
}
throw InvalidArgument("Invalid attribute type {}", fmt);
UNREACHABLE_MSG("Invalid attribute type {}", fmt);
}
void EmitContext::DefineBufferOffsets() {
@ -177,6 +177,16 @@ void EmitContext::DefineBufferOffsets() {
buffer.offset = OpBitFieldUExtract(U32[1], value, ConstU32(offset), ConstU32(8U));
buffer.offset_dwords = OpShiftRightLogical(U32[1], buffer.offset, ConstU32(2U));
}
for (auto& tex_buffer : texture_buffers) {
const u32 binding = tex_buffer.binding;
const u32 half = Shader::PushData::BufOffsetIndex + (binding >> 4);
const u32 comp = (binding & 0xf) >> 2;
const u32 offset = (binding & 0x3) << 3;
const Id ptr{OpAccessChain(TypePointer(spv::StorageClass::PushConstant, U32[1]),
push_data_block, ConstU32(half), ConstU32(comp))};
const Id value{OpLoad(U32[1], ptr)};
tex_buffer.coord_offset = OpBitFieldUExtract(U32[1], value, ConstU32(offset), ConstU32(8U));
}
}
Id MakeDefaultValue(EmitContext& ctx, u32 default_value) {
@ -195,6 +205,11 @@ Id MakeDefaultValue(EmitContext& ctx, u32 default_value) {
}
void EmitContext::DefineInputs() {
if (info.uses_lane_id) {
subgroup_local_invocation_id = DefineVariable(
U32[1], spv::BuiltIn::SubgroupLocalInvocationId, spv::StorageClass::Input);
Decorate(subgroup_local_invocation_id, spv::Decoration::Flat);
}
switch (stage) {
case Stage::Vertex: {
vertex_index = DefineVariable(U32[1], spv::BuiltIn::VertexIndex, spv::StorageClass::Input);
@ -202,7 +217,7 @@ void EmitContext::DefineInputs() {
instance_id = DefineVariable(U32[1], spv::BuiltIn::InstanceIndex, spv::StorageClass::Input);
for (const auto& input : info.vs_inputs) {
const Id type{GetAttributeType(*this, input.fmt)};
const Id type{GetAttributeType(*this, input.fmt)[4]};
if (input.instance_step_rate == Info::VsInput::InstanceIdType::OverStepRate0 ||
input.instance_step_rate == Info::VsInput::InstanceIdType::OverStepRate1) {
@ -229,15 +244,12 @@ void EmitContext::DefineInputs() {
break;
}
case Stage::Fragment:
subgroup_local_invocation_id = DefineVariable(
U32[1], spv::BuiltIn::SubgroupLocalInvocationId, spv::StorageClass::Input);
Decorate(subgroup_local_invocation_id, spv::Decoration::Flat);
frag_coord = DefineVariable(F32[4], spv::BuiltIn::FragCoord, spv::StorageClass::Input);
frag_depth = DefineVariable(F32[1], spv::BuiltIn::FragDepth, spv::StorageClass::Output);
front_facing = DefineVariable(U1[1], spv::BuiltIn::FrontFacing, spv::StorageClass::Input);
for (const auto& input : info.ps_inputs) {
const u32 semantic = input.param_index;
if (input.is_default) {
if (input.is_default && !input.is_flat) {
input_params[semantic] = {MakeDefaultValue(*this, input.default_value), F32[1],
F32[1], 4, true};
continue;
@ -328,47 +340,75 @@ void EmitContext::DefinePushDataBlock() {
void EmitContext::DefineBuffers() {
boost::container::small_vector<Id, 8> type_ids;
for (u32 i = 0; const auto& buffer : info.buffers) {
const auto* data_types = True(buffer.used_types & IR::Type::F32) ? &F32 : &U32;
const Id data_type = (*data_types)[1];
const Id record_array_type{buffer.is_storage
? TypeRuntimeArray(data_type)
: TypeArray(data_type, ConstU32(buffer.length))};
const auto define_struct = [&](Id record_array_type, bool is_instance_data) {
const Id struct_type{TypeStruct(record_array_type)};
if (std::ranges::find(type_ids, record_array_type.value, &Id::value) == type_ids.end()) {
Decorate(record_array_type, spv::Decoration::ArrayStride, 4);
const auto name =
buffer.is_instance_data
? fmt::format("{}_instance_data{}_{}{}", stage, i, 'f',
sizeof(float) * CHAR_BIT)
: fmt::format("{}_cbuf_block_{}{}", stage, 'f', sizeof(float) * CHAR_BIT);
Name(struct_type, name);
Decorate(struct_type, spv::Decoration::Block);
MemberName(struct_type, 0, "data");
MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
type_ids.push_back(record_array_type);
if (std::ranges::find(type_ids, record_array_type.value, &Id::value) != type_ids.end()) {
return struct_type;
}
Decorate(record_array_type, spv::Decoration::ArrayStride, 4);
const auto name = is_instance_data ? fmt::format("{}_instance_data_f32", stage)
: fmt::format("{}_cbuf_block_f32", stage);
Name(struct_type, name);
Decorate(struct_type, spv::Decoration::Block);
MemberName(struct_type, 0, "data");
MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
type_ids.push_back(record_array_type);
return struct_type;
};
for (const auto& desc : info.buffers) {
const auto sharp = desc.GetSharp(info);
const bool is_storage = desc.IsStorage(sharp);
const auto* data_types = True(desc.used_types & IR::Type::F32) ? &F32 : &U32;
const Id data_type = (*data_types)[1];
const Id record_array_type{is_storage ? TypeRuntimeArray(data_type)
: TypeArray(data_type, ConstU32(sharp.NumDwords()))};
const Id struct_type{define_struct(record_array_type, desc.is_instance_data)};
const auto storage_class =
buffer.is_storage ? spv::StorageClass::StorageBuffer : spv::StorageClass::Uniform;
is_storage ? spv::StorageClass::StorageBuffer : spv::StorageClass::Uniform;
const Id struct_pointer_type{TypePointer(storage_class, struct_type)};
const Id pointer_type = TypePointer(storage_class, data_type);
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
Decorate(id, spv::Decoration::Binding, binding);
Decorate(id, spv::Decoration::DescriptorSet, 0U);
Name(id, fmt::format("{}_{}", buffer.is_storage ? "ssbo" : "cbuf", buffer.sgpr_base));
if (is_storage && !desc.is_written) {
Decorate(id, spv::Decoration::NonWritable);
}
Name(id, fmt::format("{}_{}", is_storage ? "ssbo" : "cbuf", desc.sgpr_base));
buffers.push_back({
.id = id,
.binding = binding++,
.data_types = data_types,
.pointer_type = pointer_type,
.dfmt = buffer.dfmt,
.nfmt = buffer.nfmt,
.stride = buffer.GetVsharp(info).GetStride(),
});
interfaces.push_back(id);
i++;
}
}
void EmitContext::DefineTextureBuffers() {
for (const auto& desc : info.texture_buffers) {
const bool is_integer =
desc.nfmt == AmdGpu::NumberFormat::Uint || desc.nfmt == AmdGpu::NumberFormat::Sint;
const VectorIds& sampled_type{GetAttributeType(*this, desc.nfmt)};
const u32 sampled = desc.is_written ? 2 : 1;
const Id image_type{TypeImage(sampled_type[1], spv::Dim::Buffer, false, false, false,
sampled, spv::ImageFormat::Unknown)};
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
Decorate(id, spv::Decoration::Binding, binding);
Decorate(id, spv::Decoration::DescriptorSet, 0U);
Name(id, fmt::format("{}_{}", desc.is_written ? "imgbuf" : "texbuf", desc.sgpr_base));
texture_buffers.push_back({
.id = id,
.binding = binding++,
.image_type = image_type,
.result_type = sampled_type[4],
.is_integer = is_integer,
.is_storage = desc.is_written,
});
interfaces.push_back(id);
}
}
@ -447,7 +487,7 @@ spv::ImageFormat GetFormat(const AmdGpu::Image& image) {
Id ImageType(EmitContext& ctx, const ImageResource& desc, Id sampled_type) {
const auto image = ctx.info.ReadUd<AmdGpu::Image>(desc.sgpr_base, desc.dword_offset);
const auto format = desc.is_storage ? GetFormat(image) : spv::ImageFormat::Unknown;
const auto format = desc.is_atomic ? GetFormat(image) : spv::ImageFormat::Unknown;
const u32 sampled = desc.is_storage ? 2 : 1;
switch (desc.type) {
case AmdGpu::ImageType::Color1D:
@ -470,17 +510,8 @@ Id ImageType(EmitContext& ctx, const ImageResource& desc, Id sampled_type) {
void EmitContext::DefineImagesAndSamplers() {
for (const auto& image_desc : info.images) {
const VectorIds* data_types = [&] {
switch (image_desc.nfmt) {
case AmdGpu::NumberFormat::Uint:
return &U32;
case AmdGpu::NumberFormat::Sint:
return &S32;
default:
return &F32;
}
}();
const Id sampled_type = data_types->Get(1);
const VectorIds& data_types = GetAttributeType(*this, image_desc.nfmt);
const Id sampled_type = data_types[1];
const Id image_type{ImageType(*this, image_desc, sampled_type)};
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
@ -489,7 +520,7 @@ void EmitContext::DefineImagesAndSamplers() {
Name(id, fmt::format("{}_{}{}_{:02x}", stage, "img", image_desc.sgpr_base,
image_desc.dword_offset));
images.push_back({
.data_types = data_types,
.data_types = &data_types,
.id = id,
.sampled_type = image_desc.is_storage ? sampled_type : TypeSampledImage(image_type),
.pointer_type = pointer_type,
@ -498,13 +529,12 @@ void EmitContext::DefineImagesAndSamplers() {
interfaces.push_back(id);
++binding;
}
image_u32 = TypePointer(spv::StorageClass::Image, U32[1]);
if (std::ranges::any_of(info.images, &ImageResource::is_atomic)) {
image_u32 = TypePointer(spv::StorageClass::Image, U32[1]);
}
if (info.samplers.empty()) {
return;
}
sampler_type = TypeSampler();
sampler_pointer_type = TypePointer(spv::StorageClass::UniformConstant, sampler_type);
for (const auto& samp_desc : info.samplers) {
@ -520,14 +550,15 @@ void EmitContext::DefineImagesAndSamplers() {
}
void EmitContext::DefineSharedMemory() {
static constexpr size_t DefaultSharedMemSize = 16_KB;
static constexpr size_t DefaultSharedMemSize = 2_KB;
if (!info.uses_shared) {
return;
}
if (info.shared_memory_size == 0) {
info.shared_memory_size = DefaultSharedMemSize;
u32 shared_memory_size = info.shared_memory_size;
if (shared_memory_size == 0) {
shared_memory_size = DefaultSharedMemSize;
}
const u32 num_elements{Common::DivCeil(info.shared_memory_size, 4U)};
const u32 num_elements{Common::DivCeil(shared_memory_size, 4U)};
const Id type{TypeArray(U32[1], ConstU32(num_elements))};
shared_memory_u32_type = TypePointer(spv::StorageClass::Workgroup, type);
shared_u32 = TypePointer(spv::StorageClass::Workgroup, U32[1]);

View file

@ -36,7 +36,7 @@ struct VectorIds {
class EmitContext final : public Sirit::Module {
public:
explicit EmitContext(const Profile& profile, IR::Program& program, u32& binding);
explicit EmitContext(const Profile& profile, const Shader::Info& info, u32& binding);
~EmitContext();
Id Def(const IR::Value& value);
@ -124,7 +124,7 @@ public:
return ConstantComposite(type, constituents);
}
Info& info;
const Info& info;
const Profile& profile;
Stage stage{};
@ -207,13 +207,20 @@ public:
u32 binding;
const VectorIds* data_types;
Id pointer_type;
AmdGpu::DataFormat dfmt;
AmdGpu::NumberFormat nfmt;
u32 stride;
};
struct TextureBufferDefinition {
Id id;
Id coord_offset;
u32 binding;
Id image_type;
Id result_type;
bool is_integer;
bool is_storage;
};
u32& binding;
boost::container::small_vector<BufferDefinition, 16> buffers;
boost::container::small_vector<TextureBufferDefinition, 8> texture_buffers;
boost::container::small_vector<TextureDefinition, 8> images;
boost::container::small_vector<Id, 4> samplers;
@ -238,6 +245,7 @@ private:
void DefineOutputs();
void DefinePushDataBlock();
void DefineBuffers();
void DefineTextureBuffers();
void DefineImagesAndSamplers();
void DefineSharedMemory();

View file

@ -18,25 +18,31 @@ void Translator::EmitDataShare(const GcnInst& inst) {
case Opcode::DS_READ2_B64:
return DS_READ(64, false, true, inst);
case Opcode::DS_WRITE_B32:
return DS_WRITE(32, false, false, inst);
return DS_WRITE(32, false, false, false, inst);
case Opcode::DS_WRITE2ST64_B32:
return DS_WRITE(32, false, true, true, inst);
case Opcode::DS_WRITE_B64:
return DS_WRITE(64, false, false, inst);
return DS_WRITE(64, false, false, false, inst);
case Opcode::DS_WRITE2_B32:
return DS_WRITE(32, false, true, inst);
return DS_WRITE(32, false, true, false, inst);
case Opcode::DS_WRITE2_B64:
return DS_WRITE(64, false, true, inst);
return DS_WRITE(64, false, true, false, inst);
case Opcode::DS_ADD_U32:
return DS_ADD_U32(inst, false);
case Opcode::DS_MIN_U32:
return DS_MIN_U32(inst, false);
return DS_MIN_U32(inst, false, false);
case Opcode::DS_MIN_I32:
return DS_MIN_U32(inst, true, false);
case Opcode::DS_MAX_U32:
return DS_MAX_U32(inst, false);
return DS_MAX_U32(inst, false, false);
case Opcode::DS_MAX_I32:
return DS_MAX_U32(inst, true, false);
case Opcode::DS_ADD_RTN_U32:
return DS_ADD_U32(inst, true);
case Opcode::DS_MIN_RTN_U32:
return DS_MIN_U32(inst, true);
return DS_MIN_U32(inst, false, true);
case Opcode::DS_MAX_RTN_U32:
return DS_MAX_U32(inst, true);
return DS_MAX_U32(inst, false, true);
default:
LogMissingOpcode(inst);
}
@ -89,12 +95,13 @@ void Translator::DS_READ(int bit_size, bool is_signed, bool is_pair, const GcnIn
}
}
void Translator::DS_WRITE(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst) {
void Translator::DS_WRITE(int bit_size, bool is_signed, bool is_pair, bool stride64,
const GcnInst& inst) {
const IR::U32 addr{ir.GetVectorReg(IR::VectorReg(inst.src[0].code))};
const IR::VectorReg data0{inst.src[1].code};
const IR::VectorReg data1{inst.src[2].code};
if (is_pair) {
const u32 adj = bit_size == 32 ? 4 : 8;
const u32 adj = (bit_size == 32 ? 4 : 8) * (stride64 ? 64 : 1);
const IR::U32 addr0 = ir.IAdd(addr, ir.Imm32(u32(inst.control.ds.offset0 * adj)));
if (bit_size == 32) {
ir.WriteShared(32, ir.GetVectorReg(data0), addr0);
@ -133,23 +140,23 @@ void Translator::DS_ADD_U32(const GcnInst& inst, bool rtn) {
}
}
void Translator::DS_MIN_U32(const GcnInst& inst, bool rtn) {
void Translator::DS_MIN_U32(const GcnInst& inst, bool is_signed, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset = ir.Imm32(u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicIMin(addr_offset, data, false);
const IR::Value original_val = ir.SharedAtomicIMin(addr_offset, data, is_signed);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}
}
void Translator::DS_MAX_U32(const GcnInst& inst, bool rtn) {
void Translator::DS_MAX_U32(const GcnInst& inst, bool is_signed, bool rtn) {
const IR::U32 addr{GetSrc(inst.src[0])};
const IR::U32 data{GetSrc(inst.src[1])};
const IR::U32 offset = ir.Imm32(u32(inst.control.ds.offset0));
const IR::U32 addr_offset = ir.IAdd(addr, offset);
const IR::Value original_val = ir.SharedAtomicIMax(addr_offset, data, false);
const IR::Value original_val = ir.SharedAtomicIMax(addr_offset, data, is_signed);
if (rtn) {
SetDst(inst.dst[0], IR::U32{original_val});
}

View file

@ -1,14 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/logging/log.h"
#include "shader_recompiler/frontend/translate/translate.h"
namespace Shader::Gcn {
void Translator::EmitExport(const GcnInst& inst) {
if (ir.block->has_multiple_predecessors && info.stage == Stage::Fragment) {
LOG_WARNING(Render_Recompiler, "An ambiguous export appeared in translation");
ir.Discard(ir.LogicalNot(ir.GetExec()));
}

View file

@ -31,6 +31,8 @@ void Translator::EmitScalarAlu(const GcnInst& inst) {
return S_OR_B64(NegateMode::Result, false, inst);
case Opcode::S_XOR_B64:
return S_OR_B64(NegateMode::None, true, inst);
case Opcode::S_XNOR_B64:
return S_OR_B64(NegateMode::Result, true, inst);
case Opcode::S_ORN2_B64:
return S_OR_B64(NegateMode::Src1, false, inst);
case Opcode::S_AND_B64:

View file

@ -354,7 +354,7 @@ void Translator::EmitFetch(const GcnInst& inst) {
if (!std::filesystem::exists(dump_dir)) {
std::filesystem::create_directories(dump_dir);
}
const auto filename = fmt::format("vs_fetch_{:#018x}.bin", info.pgm_hash);
const auto filename = fmt::format("vs_{:#018x}_fetch.bin", info.pgm_hash);
const auto file = IOFile{dump_dir / filename, FileAccessMode::Write};
file.WriteRaw<u8>(code, fetch_size);
}
@ -399,9 +399,7 @@ void Translator::EmitFetch(const GcnInst& inst) {
info.buffers.push_back({
.sgpr_base = attrib.sgpr_base,
.dword_offset = attrib.dword_offset,
.length = buffer.num_records,
.used_types = IR::Type::F32,
.is_storage = true, // we may not fit into UBO with large meshes
.is_instance_data = true,
});
instance_buf_handle = s32(info.buffers.size() - 1);
@ -438,6 +436,7 @@ void Translator::EmitFlowControl(u32 pc, const GcnInst& inst) {
case Opcode::S_CBRANCH_SCC1:
case Opcode::S_CBRANCH_VCCNZ:
case Opcode::S_CBRANCH_VCCZ:
case Opcode::S_CBRANCH_EXECNZ:
case Opcode::S_BRANCH:
return;
default:

View file

@ -191,8 +191,10 @@ public:
void V_MBCNT_U32_B32(bool is_low, const GcnInst& inst);
// Vector Memory
void BUFFER_LOAD_FORMAT(u32 num_dwords, bool is_typed, bool is_format, const GcnInst& inst);
void BUFFER_STORE_FORMAT(u32 num_dwords, bool is_typed, bool is_format, const GcnInst& inst);
void BUFFER_LOAD(u32 num_dwords, bool is_typed, const GcnInst& inst);
void BUFFER_LOAD_FORMAT(u32 num_dwords, const GcnInst& inst);
void BUFFER_STORE(u32 num_dwords, bool is_typed, const GcnInst& inst);
void BUFFER_STORE_FORMAT(u32 num_dwords, const GcnInst& inst);
void BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst);
// Vector interpolation
@ -202,10 +204,10 @@ public:
// Data share
void DS_SWIZZLE_B32(const GcnInst& inst);
void DS_READ(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst);
void DS_WRITE(int bit_size, bool is_signed, bool is_pair, const GcnInst& inst);
void DS_WRITE(int bit_size, bool is_signed, bool is_pair, bool stride64, const GcnInst& inst);
void DS_ADD_U32(const GcnInst& inst, bool rtn);
void DS_MIN_U32(const GcnInst& inst, bool rtn);
void DS_MAX_U32(const GcnInst& inst, bool rtn);
void DS_MIN_U32(const GcnInst& inst, bool is_signed, bool rtn);
void DS_MAX_U32(const GcnInst& inst, bool is_signed, bool rtn);
void V_READFIRSTLANE_B32(const GcnInst& inst);
void V_READLANE_B32(const GcnInst& inst);
void V_WRITELANE_B32(const GcnInst& inst);

View file

@ -415,14 +415,20 @@ void Translator::V_ADDC_U32(const GcnInst& inst) {
const auto src0 = GetSrc<IR::U32>(inst.src[0]);
const auto src1 = GetSrc<IR::U32>(inst.src[1]);
IR::U32 scarry;
IR::U1 carry;
if (inst.src_count == 3) { // VOP3
IR::U1 thread_bit{ir.GetThreadBitScalarReg(IR::ScalarReg(inst.src[2].code))};
scarry = IR::U32{ir.Select(thread_bit, ir.Imm32(1), ir.Imm32(0))};
if (inst.src[2].field == OperandField::VccLo) {
carry = ir.GetVcc();
} else if (inst.src[2].field == OperandField::ScalarGPR) {
carry = ir.GetThreadBitScalarReg(IR::ScalarReg(inst.src[2].code));
} else {
UNREACHABLE();
}
} else { // VOP2
scarry = ir.GetVccLo();
carry = ir.GetVcc();
}
const IR::U32 scarry = IR::U32{ir.Select(carry, ir.Imm32(1), ir.Imm32(0))};
const IR::U32 result = ir.IAdd(ir.IAdd(src0, src1), scarry);
const IR::VectorReg dst_reg{inst.dst[0].code};

View file

@ -56,57 +56,57 @@ void Translator::EmitVectorMemory(const GcnInst& inst) {
// Buffer load operations
case Opcode::TBUFFER_LOAD_FORMAT_X:
return BUFFER_LOAD_FORMAT(1, true, true, inst);
return BUFFER_LOAD(1, true, inst);
case Opcode::TBUFFER_LOAD_FORMAT_XY:
return BUFFER_LOAD_FORMAT(2, true, true, inst);
return BUFFER_LOAD(2, true, inst);
case Opcode::TBUFFER_LOAD_FORMAT_XYZ:
return BUFFER_LOAD_FORMAT(3, true, true, inst);
return BUFFER_LOAD(3, true, inst);
case Opcode::TBUFFER_LOAD_FORMAT_XYZW:
return BUFFER_LOAD_FORMAT(4, true, true, inst);
return BUFFER_LOAD(4, true, inst);
case Opcode::BUFFER_LOAD_FORMAT_X:
return BUFFER_LOAD_FORMAT(1, false, true, inst);
return BUFFER_LOAD_FORMAT(1, inst);
case Opcode::BUFFER_LOAD_FORMAT_XY:
return BUFFER_LOAD_FORMAT(2, false, true, inst);
return BUFFER_LOAD_FORMAT(2, inst);
case Opcode::BUFFER_LOAD_FORMAT_XYZ:
return BUFFER_LOAD_FORMAT(3, false, true, inst);
return BUFFER_LOAD_FORMAT(3, inst);
case Opcode::BUFFER_LOAD_FORMAT_XYZW:
return BUFFER_LOAD_FORMAT(4, false, true, inst);
return BUFFER_LOAD_FORMAT(4, inst);
case Opcode::BUFFER_LOAD_DWORD:
return BUFFER_LOAD_FORMAT(1, false, false, inst);
return BUFFER_LOAD(1, false, inst);
case Opcode::BUFFER_LOAD_DWORDX2:
return BUFFER_LOAD_FORMAT(2, false, false, inst);
return BUFFER_LOAD(2, false, inst);
case Opcode::BUFFER_LOAD_DWORDX3:
return BUFFER_LOAD_FORMAT(3, false, false, inst);
return BUFFER_LOAD(3, false, inst);
case Opcode::BUFFER_LOAD_DWORDX4:
return BUFFER_LOAD_FORMAT(4, false, false, inst);
return BUFFER_LOAD(4, false, inst);
// Buffer store operations
case Opcode::BUFFER_STORE_FORMAT_X:
return BUFFER_STORE_FORMAT(1, false, true, inst);
return BUFFER_STORE_FORMAT(1, inst);
case Opcode::BUFFER_STORE_FORMAT_XY:
return BUFFER_STORE_FORMAT(2, false, true, inst);
return BUFFER_STORE_FORMAT(2, inst);
case Opcode::BUFFER_STORE_FORMAT_XYZ:
return BUFFER_STORE_FORMAT(3, false, true, inst);
return BUFFER_STORE_FORMAT(3, inst);
case Opcode::BUFFER_STORE_FORMAT_XYZW:
return BUFFER_STORE_FORMAT(4, false, true, inst);
return BUFFER_STORE_FORMAT(4, inst);
case Opcode::TBUFFER_STORE_FORMAT_X:
return BUFFER_STORE_FORMAT(1, true, true, inst);
return BUFFER_STORE(1, true, inst);
case Opcode::TBUFFER_STORE_FORMAT_XY:
return BUFFER_STORE_FORMAT(2, true, true, inst);
return BUFFER_STORE(2, true, inst);
case Opcode::TBUFFER_STORE_FORMAT_XYZ:
return BUFFER_STORE_FORMAT(3, true, true, inst);
return BUFFER_STORE(3, true, inst);
case Opcode::BUFFER_STORE_DWORD:
return BUFFER_STORE_FORMAT(1, false, false, inst);
return BUFFER_STORE(1, false, inst);
case Opcode::BUFFER_STORE_DWORDX2:
return BUFFER_STORE_FORMAT(2, false, false, inst);
return BUFFER_STORE(2, false, inst);
case Opcode::BUFFER_STORE_DWORDX3:
return BUFFER_STORE_FORMAT(3, false, false, inst);
return BUFFER_STORE(3, false, inst);
case Opcode::BUFFER_STORE_DWORDX4:
return BUFFER_STORE_FORMAT(4, false, false, inst);
return BUFFER_STORE(4, false, inst);
// Buffer atomic operations
case Opcode::BUFFER_ATOMIC_ADD:
@ -349,8 +349,7 @@ void Translator::IMAGE_STORE(const GcnInst& inst) {
ir.ImageWrite(handle, body, value, {});
}
void Translator::BUFFER_LOAD_FORMAT(u32 num_dwords, bool is_typed, bool is_format,
const GcnInst& inst) {
void Translator::BUFFER_LOAD(u32 num_dwords, bool is_typed, const GcnInst& inst) {
const auto& mtbuf = inst.control.mtbuf;
const IR::VectorReg vaddr{inst.src[0].code};
const IR::ScalarReg sharp{inst.src[2].code * 4};
@ -370,22 +369,19 @@ void Translator::BUFFER_LOAD_FORMAT(u32 num_dwords, bool is_typed, bool is_forma
info.index_enable.Assign(mtbuf.idxen);
info.offset_enable.Assign(mtbuf.offen);
info.inst_offset.Assign(mtbuf.offset);
info.is_typed.Assign(is_typed);
if (is_typed) {
info.dmft.Assign(static_cast<AmdGpu::DataFormat>(mtbuf.dfmt));
info.nfmt.Assign(static_cast<AmdGpu::NumberFormat>(mtbuf.nfmt));
ASSERT(info.nfmt == AmdGpu::NumberFormat::Float &&
(info.dmft == AmdGpu::DataFormat::Format32_32_32_32 ||
info.dmft == AmdGpu::DataFormat::Format32_32_32 ||
info.dmft == AmdGpu::DataFormat::Format32_32 ||
info.dmft == AmdGpu::DataFormat::Format32));
const auto dmft = static_cast<AmdGpu::DataFormat>(mtbuf.dfmt);
const auto nfmt = static_cast<AmdGpu::NumberFormat>(mtbuf.nfmt);
ASSERT(nfmt == AmdGpu::NumberFormat::Float &&
(dmft == AmdGpu::DataFormat::Format32_32_32_32 ||
dmft == AmdGpu::DataFormat::Format32_32_32 ||
dmft == AmdGpu::DataFormat::Format32_32 || dmft == AmdGpu::DataFormat::Format32));
}
const IR::Value handle =
ir.CompositeConstruct(ir.GetScalarReg(sharp), ir.GetScalarReg(sharp + 1),
ir.GetScalarReg(sharp + 2), ir.GetScalarReg(sharp + 3));
const IR::Value value = is_format ? ir.LoadBufferFormat(num_dwords, handle, address, info)
: ir.LoadBuffer(num_dwords, handle, address, info);
const IR::Value value = ir.LoadBuffer(num_dwords, handle, address, info);
const IR::VectorReg dst_reg{inst.src[1].code};
if (num_dwords == 1) {
ir.SetVectorReg(dst_reg, IR::F32{value});
@ -396,8 +392,34 @@ void Translator::BUFFER_LOAD_FORMAT(u32 num_dwords, bool is_typed, bool is_forma
}
}
void Translator::BUFFER_STORE_FORMAT(u32 num_dwords, bool is_typed, bool is_format,
const GcnInst& inst) {
void Translator::BUFFER_LOAD_FORMAT(u32 num_dwords, const GcnInst& inst) {
const auto& mubuf = inst.control.mubuf;
const IR::VectorReg vaddr{inst.src[0].code};
const IR::ScalarReg sharp{inst.src[2].code * 4};
ASSERT_MSG(!mubuf.offen && mubuf.offset == 0, "Offsets for image buffers are not supported");
const IR::Value address = [&] -> IR::Value {
if (mubuf.idxen) {
return ir.GetVectorReg(vaddr);
}
return {};
}();
const IR::Value soffset{GetSrc(inst.src[3])};
ASSERT_MSG(soffset.IsImmediate() && soffset.U32() == 0, "Non immediate offset not supported");
IR::BufferInstInfo info{};
info.index_enable.Assign(mubuf.idxen);
const IR::Value handle =
ir.CompositeConstruct(ir.GetScalarReg(sharp), ir.GetScalarReg(sharp + 1),
ir.GetScalarReg(sharp + 2), ir.GetScalarReg(sharp + 3));
const IR::Value value = ir.LoadBufferFormat(handle, address, info);
const IR::VectorReg dst_reg{inst.src[1].code};
for (u32 i = 0; i < num_dwords; i++) {
ir.SetVectorReg(dst_reg + i, IR::F32{ir.CompositeExtract(value, i)});
}
}
void Translator::BUFFER_STORE(u32 num_dwords, bool is_typed, const GcnInst& inst) {
const auto& mtbuf = inst.control.mtbuf;
const IR::VectorReg vaddr{inst.src[0].code};
const IR::ScalarReg sharp{inst.src[2].code * 4};
@ -417,45 +439,76 @@ void Translator::BUFFER_STORE_FORMAT(u32 num_dwords, bool is_typed, bool is_form
info.index_enable.Assign(mtbuf.idxen);
info.offset_enable.Assign(mtbuf.offen);
info.inst_offset.Assign(mtbuf.offset);
info.is_typed.Assign(is_typed);
if (is_typed) {
info.dmft.Assign(static_cast<AmdGpu::DataFormat>(mtbuf.dfmt));
info.nfmt.Assign(static_cast<AmdGpu::NumberFormat>(mtbuf.nfmt));
const auto dmft = static_cast<AmdGpu::DataFormat>(mtbuf.dfmt);
const auto nfmt = static_cast<AmdGpu::NumberFormat>(mtbuf.nfmt);
ASSERT(nfmt == AmdGpu::NumberFormat::Float &&
(dmft == AmdGpu::DataFormat::Format32_32_32_32 ||
dmft == AmdGpu::DataFormat::Format32_32_32 ||
dmft == AmdGpu::DataFormat::Format32_32 || dmft == AmdGpu::DataFormat::Format32));
}
IR::Value value{};
const IR::VectorReg src_reg{inst.src[1].code};
switch (num_dwords) {
case 1:
value = ir.GetVectorReg<Shader::IR::F32>(src_reg);
value = ir.GetVectorReg<IR::F32>(src_reg);
break;
case 2:
value = ir.CompositeConstruct(ir.GetVectorReg<Shader::IR::F32>(src_reg),
ir.GetVectorReg<Shader::IR::F32>(src_reg + 1));
value = ir.CompositeConstruct(ir.GetVectorReg<IR::F32>(src_reg),
ir.GetVectorReg<IR::F32>(src_reg + 1));
break;
case 3:
value = ir.CompositeConstruct(ir.GetVectorReg<Shader::IR::F32>(src_reg),
ir.GetVectorReg<Shader::IR::F32>(src_reg + 1),
ir.GetVectorReg<Shader::IR::F32>(src_reg + 2));
value = ir.CompositeConstruct(ir.GetVectorReg<IR::F32>(src_reg),
ir.GetVectorReg<IR::F32>(src_reg + 1),
ir.GetVectorReg<IR::F32>(src_reg + 2));
break;
case 4:
value = ir.CompositeConstruct(ir.GetVectorReg<Shader::IR::F32>(src_reg),
ir.GetVectorReg<Shader::IR::F32>(src_reg + 1),
ir.GetVectorReg<Shader::IR::F32>(src_reg + 2),
ir.GetVectorReg<Shader::IR::F32>(src_reg + 3));
value = ir.CompositeConstruct(
ir.GetVectorReg<IR::F32>(src_reg), ir.GetVectorReg<IR::F32>(src_reg + 1),
ir.GetVectorReg<IR::F32>(src_reg + 2), ir.GetVectorReg<IR::F32>(src_reg + 3));
break;
}
const IR::Value handle =
ir.CompositeConstruct(ir.GetScalarReg(sharp), ir.GetScalarReg(sharp + 1),
ir.GetScalarReg(sharp + 2), ir.GetScalarReg(sharp + 3));
if (is_format) {
ir.StoreBufferFormat(num_dwords, handle, address, value, info);
} else {
ir.StoreBuffer(num_dwords, handle, address, value, info);
}
ir.StoreBuffer(num_dwords, handle, address, value, info);
}
void Translator::BUFFER_STORE_FORMAT(u32 num_dwords, const GcnInst& inst) {
const auto& mubuf = inst.control.mubuf;
const IR::VectorReg vaddr{inst.src[0].code};
const IR::ScalarReg sharp{inst.src[2].code * 4};
ASSERT_MSG(!mubuf.offen && mubuf.offset == 0, "Offsets for image buffers are not supported");
const IR::Value address = [&] -> IR::Value {
if (mubuf.idxen) {
return ir.GetVectorReg(vaddr);
}
return {};
}();
const IR::Value soffset{GetSrc(inst.src[3])};
ASSERT_MSG(soffset.IsImmediate() && soffset.U32() == 0, "Non immediate offset not supported");
IR::BufferInstInfo info{};
info.index_enable.Assign(mubuf.idxen);
const IR::VectorReg src_reg{inst.src[1].code};
std::array<IR::Value, 4> comps{};
for (u32 i = 0; i < num_dwords; i++) {
comps[i] = ir.GetVectorReg<IR::F32>(src_reg + i);
}
for (u32 i = num_dwords; i < 4; i++) {
comps[i] = ir.Imm32(0.f);
}
const IR::Value value = ir.CompositeConstruct(comps[0], comps[1], comps[2], comps[3]);
const IR::Value handle =
ir.CompositeConstruct(ir.GetScalarReg(sharp), ir.GetScalarReg(sharp + 1),
ir.GetScalarReg(sharp + 2), ir.GetScalarReg(sharp + 3));
ir.StoreBufferFormat(handle, address, value, info);
}
// TODO: U64
void Translator::BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst) {
const auto& mubuf = inst.control.mubuf;
const IR::VectorReg vaddr{inst.src[0].code};

View file

@ -325,20 +325,8 @@ Value IREmitter::LoadBuffer(int num_dwords, const Value& handle, const Value& ad
}
}
Value IREmitter::LoadBufferFormat(int num_dwords, const Value& handle, const Value& address,
BufferInstInfo info) {
switch (num_dwords) {
case 1:
return Inst(Opcode::LoadBufferFormatF32, Flags{info}, handle, address);
case 2:
return Inst(Opcode::LoadBufferFormatF32x2, Flags{info}, handle, address);
case 3:
return Inst(Opcode::LoadBufferFormatF32x3, Flags{info}, handle, address);
case 4:
return Inst(Opcode::LoadBufferFormatF32x4, Flags{info}, handle, address);
default:
UNREACHABLE_MSG("Invalid number of dwords {}", num_dwords);
}
Value IREmitter::LoadBufferFormat(const Value& handle, const Value& address, BufferInstInfo info) {
return Inst(Opcode::LoadBufferFormatF32, Flags{info}, handle, address);
}
void IREmitter::StoreBuffer(int num_dwords, const Value& handle, const Value& address,
@ -409,24 +397,9 @@ Value IREmitter::BufferAtomicSwap(const Value& handle, const Value& address, con
return Inst(Opcode::BufferAtomicSwap32, Flags{info}, handle, address, value);
}
void IREmitter::StoreBufferFormat(int num_dwords, const Value& handle, const Value& address,
const Value& data, BufferInstInfo info) {
switch (num_dwords) {
case 1:
Inst(Opcode::StoreBufferFormatF32, Flags{info}, handle, address, data);
break;
case 2:
Inst(Opcode::StoreBufferFormatF32x2, Flags{info}, handle, address, data);
break;
case 3:
Inst(Opcode::StoreBufferFormatF32x3, Flags{info}, handle, address, data);
break;
case 4:
Inst(Opcode::StoreBufferFormatF32x4, Flags{info}, handle, address, data);
break;
default:
UNREACHABLE_MSG("Invalid number of dwords {}", num_dwords);
}
void IREmitter::StoreBufferFormat(const Value& handle, const Value& address, const Value& data,
BufferInstInfo info) {
Inst(Opcode::StoreBufferFormatF32, Flags{info}, handle, address, data);
}
U32 IREmitter::LaneId() {

View file

@ -92,12 +92,12 @@ public:
[[nodiscard]] Value LoadBuffer(int num_dwords, const Value& handle, const Value& address,
BufferInstInfo info);
[[nodiscard]] Value LoadBufferFormat(int num_dwords, const Value& handle, const Value& address,
[[nodiscard]] Value LoadBufferFormat(const Value& handle, const Value& address,
BufferInstInfo info);
void StoreBuffer(int num_dwords, const Value& handle, const Value& address, const Value& data,
BufferInstInfo info);
void StoreBufferFormat(int num_dwords, const Value& handle, const Value& address,
const Value& data, BufferInstInfo info);
void StoreBufferFormat(const Value& handle, const Value& address, const Value& data,
BufferInstInfo info);
[[nodiscard]] Value BufferAtomicIAdd(const Value& handle, const Value& address,
const Value& value, BufferInstInfo info);

View file

@ -56,9 +56,6 @@ bool Inst::MayHaveSideEffects() const noexcept {
case Opcode::StoreBufferF32x3:
case Opcode::StoreBufferF32x4:
case Opcode::StoreBufferFormatF32:
case Opcode::StoreBufferFormatF32x2:
case Opcode::StoreBufferFormatF32x3:
case Opcode::StoreBufferFormatF32x4:
case Opcode::StoreBufferU32:
case Opcode::BufferAtomicIAdd32:
case Opcode::BufferAtomicSMin32:

View file

@ -79,19 +79,13 @@ OPCODE(LoadBufferF32, F32, Opaq
OPCODE(LoadBufferF32x2, F32x2, Opaque, Opaque, )
OPCODE(LoadBufferF32x3, F32x3, Opaque, Opaque, )
OPCODE(LoadBufferF32x4, F32x4, Opaque, Opaque, )
OPCODE(LoadBufferFormatF32, F32, Opaque, Opaque, )
OPCODE(LoadBufferFormatF32x2, F32x2, Opaque, Opaque, )
OPCODE(LoadBufferFormatF32x3, F32x3, Opaque, Opaque, )
OPCODE(LoadBufferFormatF32x4, F32x4, Opaque, Opaque, )
OPCODE(LoadBufferFormatF32, F32x4, Opaque, Opaque, )
OPCODE(LoadBufferU32, U32, Opaque, Opaque, )
OPCODE(StoreBufferF32, Void, Opaque, Opaque, F32, )
OPCODE(StoreBufferF32x2, Void, Opaque, Opaque, F32x2, )
OPCODE(StoreBufferF32x3, Void, Opaque, Opaque, F32x3, )
OPCODE(StoreBufferF32x4, Void, Opaque, Opaque, F32x4, )
OPCODE(StoreBufferFormatF32, Void, Opaque, Opaque, F32, )
OPCODE(StoreBufferFormatF32x2, Void, Opaque, Opaque, F32x2, )
OPCODE(StoreBufferFormatF32x3, Void, Opaque, Opaque, F32x3, )
OPCODE(StoreBufferFormatF32x4, Void, Opaque, Opaque, F32x4, )
OPCODE(StoreBufferFormatF32, Void, Opaque, Opaque, F32x4, )
OPCODE(StoreBufferU32, Void, Opaque, Opaque, U32, )
// Buffer atomic operations

View file

@ -3,6 +3,7 @@
#include <algorithm>
#include <boost/container/small_vector.hpp>
#include "common/alignment.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/breadth_first_search.h"
#include "shader_recompiler/ir/ir_emitter.h"
@ -45,10 +46,6 @@ bool IsBufferStore(const IR::Inst& inst) {
case IR::Opcode::StoreBufferF32x2:
case IR::Opcode::StoreBufferF32x3:
case IR::Opcode::StoreBufferF32x4:
case IR::Opcode::StoreBufferFormatF32:
case IR::Opcode::StoreBufferFormatF32x2:
case IR::Opcode::StoreBufferFormatF32x3:
case IR::Opcode::StoreBufferFormatF32x4:
case IR::Opcode::StoreBufferU32:
return true;
default:
@ -62,10 +59,6 @@ bool IsBufferInstruction(const IR::Inst& inst) {
case IR::Opcode::LoadBufferF32x2:
case IR::Opcode::LoadBufferF32x3:
case IR::Opcode::LoadBufferF32x4:
case IR::Opcode::LoadBufferFormatF32:
case IR::Opcode::LoadBufferFormatF32x2:
case IR::Opcode::LoadBufferFormatF32x3:
case IR::Opcode::LoadBufferFormatF32x4:
case IR::Opcode::LoadBufferU32:
case IR::Opcode::ReadConstBuffer:
case IR::Opcode::ReadConstBufferU32:
@ -75,6 +68,11 @@ bool IsBufferInstruction(const IR::Inst& inst) {
}
}
bool IsTextureBufferInstruction(const IR::Inst& inst) {
return inst.GetOpcode() == IR::Opcode::LoadBufferFormatF32 ||
inst.GetOpcode() == IR::Opcode::StoreBufferFormatF32;
}
static bool UseFP16(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat num_format) {
switch (num_format) {
case AmdGpu::NumberFormat::Float:
@ -100,28 +98,6 @@ static bool UseFP16(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat num_for
IR::Type BufferDataType(const IR::Inst& inst, AmdGpu::NumberFormat num_format) {
switch (inst.GetOpcode()) {
case IR::Opcode::LoadBufferFormatF32:
case IR::Opcode::LoadBufferFormatF32x2:
case IR::Opcode::LoadBufferFormatF32x3:
case IR::Opcode::LoadBufferFormatF32x4:
case IR::Opcode::StoreBufferFormatF32:
case IR::Opcode::StoreBufferFormatF32x2:
case IR::Opcode::StoreBufferFormatF32x3:
case IR::Opcode::StoreBufferFormatF32x4:
switch (num_format) {
case AmdGpu::NumberFormat::Unorm:
case AmdGpu::NumberFormat::Snorm:
case AmdGpu::NumberFormat::Uscaled:
case AmdGpu::NumberFormat::Sscaled:
case AmdGpu::NumberFormat::Uint:
case AmdGpu::NumberFormat::Sint:
case AmdGpu::NumberFormat::SnormNz:
return IR::Type::U32;
case AmdGpu::NumberFormat::Float:
return IR::Type::F32;
default:
UNREACHABLE();
}
case IR::Opcode::LoadBufferF32:
case IR::Opcode::LoadBufferF32x2:
case IR::Opcode::LoadBufferF32x3:
@ -143,20 +119,8 @@ IR::Type BufferDataType(const IR::Inst& inst, AmdGpu::NumberFormat num_format) {
}
}
bool IsImageInstruction(const IR::Inst& inst) {
bool IsImageAtomicInstruction(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::ImageSampleExplicitLod:
case IR::Opcode::ImageSampleImplicitLod:
case IR::Opcode::ImageSampleDrefExplicitLod:
case IR::Opcode::ImageSampleDrefImplicitLod:
case IR::Opcode::ImageFetch:
case IR::Opcode::ImageGather:
case IR::Opcode::ImageGatherDref:
case IR::Opcode::ImageQueryDimensions:
case IR::Opcode::ImageQueryLod:
case IR::Opcode::ImageGradient:
case IR::Opcode::ImageRead:
case IR::Opcode::ImageWrite:
case IR::Opcode::ImageAtomicIAdd32:
case IR::Opcode::ImageAtomicSMin32:
case IR::Opcode::ImageAtomicUMin32:
@ -178,20 +142,27 @@ bool IsImageStorageInstruction(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::ImageWrite:
case IR::Opcode::ImageRead:
case IR::Opcode::ImageAtomicIAdd32:
case IR::Opcode::ImageAtomicSMin32:
case IR::Opcode::ImageAtomicUMin32:
case IR::Opcode::ImageAtomicSMax32:
case IR::Opcode::ImageAtomicUMax32:
case IR::Opcode::ImageAtomicInc32:
case IR::Opcode::ImageAtomicDec32:
case IR::Opcode::ImageAtomicAnd32:
case IR::Opcode::ImageAtomicOr32:
case IR::Opcode::ImageAtomicXor32:
case IR::Opcode::ImageAtomicExchange32:
return true;
default:
return false;
return IsImageAtomicInstruction(inst);
}
}
bool IsImageInstruction(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::ImageSampleExplicitLod:
case IR::Opcode::ImageSampleImplicitLod:
case IR::Opcode::ImageSampleDrefExplicitLod:
case IR::Opcode::ImageSampleDrefImplicitLod:
case IR::Opcode::ImageFetch:
case IR::Opcode::ImageGather:
case IR::Opcode::ImageGatherDref:
case IR::Opcode::ImageQueryDimensions:
case IR::Opcode::ImageQueryLod:
case IR::Opcode::ImageGradient:
return true;
default:
return IsImageStorageInstruction(inst);
}
}
@ -214,7 +185,8 @@ u32 ImageOffsetArgumentPosition(const IR::Inst& inst) {
class Descriptors {
public:
explicit Descriptors(Info& info_)
: info{info_}, buffer_resources{info_.buffers}, image_resources{info_.images},
: info{info_}, buffer_resources{info_.buffers},
texture_buffer_resources{info_.texture_buffers}, image_resources{info_.images},
sampler_resources{info_.samplers} {}
u32 Add(const BufferResource& desc) {
@ -224,13 +196,21 @@ public:
desc.inline_cbuf == existing.inline_cbuf;
})};
auto& buffer = buffer_resources[index];
ASSERT(buffer.length == desc.length);
buffer.is_storage |= desc.is_storage;
buffer.used_types |= desc.used_types;
buffer.is_written |= desc.is_written;
return index;
}
u32 Add(const TextureBufferResource& desc) {
const u32 index{Add(texture_buffer_resources, desc, [&desc](const auto& existing) {
return desc.sgpr_base == existing.sgpr_base &&
desc.dword_offset == existing.dword_offset;
})};
auto& buffer = texture_buffer_resources[index];
buffer.is_written |= desc.is_written;
return index;
}
u32 Add(const ImageResource& desc) {
const u32 index{Add(image_resources, desc, [&desc](const auto& existing) {
return desc.sgpr_base == existing.sgpr_base &&
@ -247,7 +227,7 @@ public:
return true;
}
// Samplers with different bindings might still be the same.
return existing.GetSsharp(info) == desc.GetSsharp(info);
return existing.GetSharp(info) == desc.GetSharp(info);
})};
return index;
}
@ -265,6 +245,7 @@ private:
const Info& info;
BufferResourceList& buffer_resources;
TextureBufferResourceList& texture_buffer_resources;
ImageResourceList& image_resources;
SamplerResourceList& sampler_resources;
};
@ -361,33 +342,6 @@ SharpLocation TrackSharp(const IR::Inst* inst) {
};
}
static constexpr size_t MaxUboSize = 65536;
static bool IsLoadBufferFormat(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::LoadBufferFormatF32:
case IR::Opcode::LoadBufferFormatF32x2:
case IR::Opcode::LoadBufferFormatF32x3:
case IR::Opcode::LoadBufferFormatF32x4:
return true;
default:
return false;
}
}
static u32 BufferLength(const AmdGpu::Buffer& buffer) {
const auto stride = buffer.GetStride();
if (stride < sizeof(f32)) {
ASSERT(sizeof(f32) % stride == 0);
return (((buffer.num_records - 1) / sizeof(f32)) + 1) * stride;
} else if (stride == sizeof(f32)) {
return buffer.num_records;
} else {
ASSERT(stride % sizeof(f32) == 0);
return buffer.num_records * (stride / sizeof(f32));
}
}
s32 TryHandleInlineCbuf(IR::Inst& inst, Info& info, Descriptors& descriptors,
AmdGpu::Buffer& cbuf) {
@ -414,10 +368,8 @@ s32 TryHandleInlineCbuf(IR::Inst& inst, Info& info, Descriptors& descriptors,
return descriptors.Add(BufferResource{
.sgpr_base = std::numeric_limits<u32>::max(),
.dword_offset = 0,
.length = BufferLength(cbuf),
.used_types = BufferDataType(inst, cbuf.GetNumberFmt()),
.inline_cbuf = cbuf,
.is_storage = IsBufferStore(inst) || cbuf.GetSize() > MaxUboSize,
});
}
@ -429,28 +381,17 @@ void PatchBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
IR::Inst* handle = inst.Arg(0).InstRecursive();
IR::Inst* producer = handle->Arg(0).InstRecursive();
const auto sharp = TrackSharp(producer);
const bool is_store = IsBufferStore(inst);
buffer = info.ReadUd<AmdGpu::Buffer>(sharp.sgpr_base, sharp.dword_offset);
binding = descriptors.Add(BufferResource{
.sgpr_base = sharp.sgpr_base,
.dword_offset = sharp.dword_offset,
.length = BufferLength(buffer),
.used_types = BufferDataType(inst, buffer.GetNumberFmt()),
.is_storage = is_store || buffer.GetSize() > MaxUboSize,
.is_written = is_store,
.is_written = IsBufferStore(inst),
});
}
// Update buffer descriptor format.
const auto inst_info = inst.Flags<IR::BufferInstInfo>();
auto& buffer_desc = info.buffers[binding];
if (inst_info.is_typed) {
buffer_desc.dfmt = inst_info.dmft;
buffer_desc.nfmt = inst_info.nfmt;
} else {
buffer_desc.dfmt = buffer.GetDataFmt();
buffer_desc.nfmt = buffer.GetNumberFmt();
}
// Replace handle with binding index in buffer resource list.
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
@ -463,20 +404,7 @@ void PatchBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
return;
}
if (IsLoadBufferFormat(inst)) {
if (UseFP16(buffer.GetDataFmt(), buffer.GetNumberFmt())) {
info.uses_fp16 = true;
}
} else {
const u32 stride = buffer.GetStride();
if (stride < 4) {
LOG_WARNING(Render_Vulkan,
"non-formatting load_buffer_* is not implemented for stride {}", stride);
}
}
// Compute address of the buffer using the stride.
// Todo: What if buffer is rebound with different stride?
IR::U32 address = ir.Imm32(inst_info.inst_offset.Value());
if (inst_info.index_enable) {
const IR::U32 index = inst_info.offset_enable ? IR::U32{ir.CompositeExtract(inst.Arg(1), 0)}
@ -491,8 +419,31 @@ void PatchBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
inst.SetArg(1, address);
}
void PatchTextureBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
Descriptors& descriptors) {
const IR::Inst* handle = inst.Arg(0).InstRecursive();
const IR::Inst* producer = handle->Arg(0).InstRecursive();
const auto sharp = TrackSharp(producer);
const auto buffer = info.ReadUd<AmdGpu::Buffer>(sharp.sgpr_base, sharp.dword_offset);
const s32 binding = descriptors.Add(TextureBufferResource{
.sgpr_base = sharp.sgpr_base,
.dword_offset = sharp.dword_offset,
.nfmt = buffer.GetNumberFmt(),
.is_written = inst.GetOpcode() == IR::Opcode::StoreBufferFormatF32,
});
// Replace handle with binding index in texture buffer resource list.
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
inst.SetArg(0, ir.Imm32(binding));
ASSERT(!buffer.swizzle_enable && !buffer.add_tid_enable);
}
IR::Value PatchCubeCoord(IR::IREmitter& ir, const IR::Value& s, const IR::Value& t,
const IR::Value& z) {
const IR::Value& z, bool is_storage) {
// When cubemap is written with imageStore it is treated like 2DArray.
if (is_storage) {
return ir.CompositeConstruct(s, t, z);
}
// We need to fix x and y coordinate,
// because the s and t coordinate will be scaled and plus 1.5 by v_madak_f32.
// We already force the scale value to be 1.0 when handling v_cubema_f32,
@ -530,13 +481,15 @@ void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descrip
return;
}
ASSERT(image.GetType() != AmdGpu::ImageType::Invalid);
const bool is_storage = IsImageStorageInstruction(inst);
u32 image_binding = descriptors.Add(ImageResource{
.sgpr_base = tsharp.sgpr_base,
.dword_offset = tsharp.dword_offset,
.type = image.GetType(),
.nfmt = static_cast<AmdGpu::NumberFormat>(image.GetNumberFmt()),
.is_storage = IsImageStorageInstruction(inst),
.is_storage = is_storage,
.is_depth = bool(inst_info.is_depth),
.is_atomic = IsImageAtomicInstruction(inst),
});
// Read sampler sharp. This doesn't exist for IMAGE_LOAD/IMAGE_STORE instructions
@ -593,7 +546,8 @@ void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descrip
case AmdGpu::ImageType::Color3D: // x, y, z
return {ir.CompositeConstruct(body->Arg(0), body->Arg(1), body->Arg(2)), body->Arg(3)};
case AmdGpu::ImageType::Cube: // x, y, face
return {PatchCubeCoord(ir, body->Arg(0), body->Arg(1), body->Arg(2)), body->Arg(3)};
return {PatchCubeCoord(ir, body->Arg(0), body->Arg(1), body->Arg(2), is_storage),
body->Arg(3)};
default:
UNREACHABLE_MSG("Unknown image type {}", image.GetType());
}
@ -668,6 +622,10 @@ void ResourceTrackingPass(IR::Program& program) {
PatchBufferInstruction(*block, inst, info, descriptors);
continue;
}
if (IsTextureBufferInstruction(inst)) {
PatchTextureBufferInstruction(*block, inst, info, descriptors);
continue;
}
if (IsImageInstruction(inst)) {
PatchImageInstruction(*block, inst, info, descriptors);
}

View file

@ -29,6 +29,12 @@ void Visit(Info& info, IR::Inst& inst) {
case IR::Opcode::ImageWrite:
info.has_storage_images = true;
break;
case IR::Opcode::LoadBufferFormatF32:
info.has_texel_buffers = true;
break;
case IR::Opcode::StoreBufferFormatF32:
info.has_image_buffers = true;
break;
case IR::Opcode::QuadShuffle:
info.uses_group_quad = true;
break;
@ -44,6 +50,9 @@ void Visit(Info& info, IR::Inst& inst) {
case IR::Opcode::ImageQueryLod:
info.has_image_query = true;
break;
case IR::Opcode::LaneId:
info.uses_lane_id = true;
break;
default:
break;
}

View file

@ -12,11 +12,13 @@
namespace Shader::IR {
struct Program {
explicit Program(Info& info_) : info{info_} {}
AbstractSyntaxList syntax_list;
BlockList blocks;
BlockList post_order_blocks;
std::vector<Gcn::GcnInst> ins_list;
Info info;
Info& info;
};
[[nodiscard]] std::string DumpProgram(const Program& program);

View file

@ -66,9 +66,6 @@ union BufferInstInfo {
BitField<0, 1, u32> index_enable;
BitField<1, 1, u32> offset_enable;
BitField<2, 12, u32> inst_offset;
BitField<14, 4, AmdGpu::DataFormat> dmft;
BitField<18, 3, AmdGpu::NumberFormat> nfmt;
BitField<21, 1, u32> is_typed;
};
enum class ScalarReg : u32 {

View file

@ -29,7 +29,7 @@ IR::BlockList GenerateBlocks(const IR::AbstractSyntaxList& syntax_list) {
IR::Program TranslateProgram(Common::ObjectPool<IR::Inst>& inst_pool,
Common::ObjectPool<IR::Block>& block_pool, std::span<const u32> token,
const Info&& info, const Profile& profile) {
Info& info, const Profile& profile) {
// Ensure first instruction is expected.
constexpr u32 token_mov_vcchi = 0xBEEB03FF;
ASSERT_MSG(token[0] == token_mov_vcchi, "First instruction is not s_mov_b32 vcc_hi, #imm");
@ -38,7 +38,7 @@ IR::Program TranslateProgram(Common::ObjectPool<IR::Inst>& inst_pool,
Gcn::GcnDecodeContext decoder;
// Decode and save instructions
IR::Program program;
IR::Program program{info};
program.ins_list.reserve(token.size());
while (!slice.atEnd()) {
program.ins_list.emplace_back(decoder.decodeInstruction(slice));
@ -49,7 +49,6 @@ IR::Program TranslateProgram(Common::ObjectPool<IR::Inst>& inst_pool,
Gcn::CFG cfg{gcn_block_pool, program.ins_list};
// Structurize control flow graph and create program.
program.info = std::move(info);
program.syntax_list = Shader::Gcn::BuildASL(inst_pool, block_pool, cfg, program.info, profile);
program.blocks = GenerateBlocks(program.syntax_list);
program.post_order_blocks = Shader::IR::PostOrder(program.syntax_list.front());

View file

@ -13,7 +13,7 @@ struct Profile;
[[nodiscard]] IR::Program TranslateProgram(Common::ObjectPool<IR::Inst>& inst_pool,
Common::ObjectPool<IR::Block>& block_pool,
std::span<const u32> code, const Info&& info,
std::span<const u32> code, Info& info,
const Profile& profile);
} // namespace Shader

View file

@ -4,6 +4,7 @@
#pragma once
#include <span>
#include <boost/container/small_vector.hpp>
#include <boost/container/static_vector.hpp>
#include "common/assert.h"
#include "common/types.h"
@ -74,18 +75,29 @@ struct Info;
struct BufferResource {
u32 sgpr_base;
u32 dword_offset;
u32 length;
IR::Type used_types;
AmdGpu::Buffer inline_cbuf;
AmdGpu::DataFormat dfmt;
AmdGpu::NumberFormat nfmt;
bool is_storage{};
bool is_instance_data{};
bool is_written{};
constexpr AmdGpu::Buffer GetVsharp(const Info& info) const noexcept;
bool IsStorage(AmdGpu::Buffer buffer) const noexcept {
static constexpr size_t MaxUboSize = 65536;
return buffer.GetSize() > MaxUboSize || is_written;
}
constexpr AmdGpu::Buffer GetSharp(const Info& info) const noexcept;
};
using BufferResourceList = boost::container::static_vector<BufferResource, 16>;
using BufferResourceList = boost::container::small_vector<BufferResource, 16>;
struct TextureBufferResource {
u32 sgpr_base;
u32 dword_offset;
AmdGpu::NumberFormat nfmt;
bool is_written{};
constexpr AmdGpu::Buffer GetSharp(const Info& info) const noexcept;
};
using TextureBufferResourceList = boost::container::small_vector<TextureBufferResource, 16>;
struct ImageResource {
u32 sgpr_base;
@ -94,8 +106,11 @@ struct ImageResource {
AmdGpu::NumberFormat nfmt;
bool is_storage;
bool is_depth;
bool is_atomic{};
constexpr AmdGpu::Image GetSharp(const Info& info) const noexcept;
};
using ImageResourceList = boost::container::static_vector<ImageResource, 16>;
using ImageResourceList = boost::container::small_vector<ImageResource, 16>;
struct SamplerResource {
u32 sgpr_base;
@ -104,9 +119,9 @@ struct SamplerResource {
u32 associated_image : 4;
u32 disable_aniso : 1;
constexpr AmdGpu::Sampler GetSsharp(const Info& info) const noexcept;
constexpr AmdGpu::Sampler GetSharp(const Info& info) const noexcept;
};
using SamplerResourceList = boost::container::static_vector<SamplerResource, 16>;
using SamplerResourceList = boost::container::small_vector<SamplerResource, 16>;
struct PushData {
static constexpr size_t BufOffsetIndex = 2;
@ -179,6 +194,7 @@ struct Info {
s8 instance_offset_sgpr = -1;
BufferResourceList buffers;
TextureBufferResourceList texture_buffers;
ImageResourceList images;
SamplerResourceList samplers;
@ -194,9 +210,12 @@ struct Info {
u64 pgm_hash{};
u32 shared_memory_size{};
bool has_storage_images{};
bool has_image_buffers{};
bool has_texel_buffers{};
bool has_discard{};
bool has_image_gather{};
bool has_image_query{};
bool uses_lane_id{};
bool uses_group_quad{};
bool uses_shared{};
bool uses_fp16{};
@ -214,6 +233,10 @@ struct Info {
return data;
}
size_t NumBindings() const noexcept {
return buffers.size() + texture_buffers.size() + images.size() + samplers.size();
}
[[nodiscard]] std::pair<u32, u32> GetDrawOffsets() const noexcept {
u32 vertex_offset = 0;
u32 instance_offset = 0;
@ -227,11 +250,19 @@ struct Info {
}
};
constexpr AmdGpu::Buffer BufferResource::GetVsharp(const Info& info) const noexcept {
constexpr AmdGpu::Buffer BufferResource::GetSharp(const Info& info) const noexcept {
return inline_cbuf ? inline_cbuf : info.ReadUd<AmdGpu::Buffer>(sgpr_base, dword_offset);
}
constexpr AmdGpu::Sampler SamplerResource::GetSsharp(const Info& info) const noexcept {
constexpr AmdGpu::Buffer TextureBufferResource::GetSharp(const Info& info) const noexcept {
return info.ReadUd<AmdGpu::Buffer>(sgpr_base, dword_offset);
}
constexpr AmdGpu::Image ImageResource::GetSharp(const Info& info) const noexcept {
return info.ReadUd<AmdGpu::Image>(sgpr_base, dword_offset);
}
constexpr AmdGpu::Sampler SamplerResource::GetSharp(const Info& info) const noexcept {
return inline_sampler ? inline_sampler : info.ReadUd<AmdGpu::Sampler>(sgpr_base, dword_offset);
}