mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-05-23 20:05:01 +00:00
shader_recompiler: Add swizzle support for unsupported formats. (#1869)
* shader_recompiler: Add swizzle support for unsupported formats. * renderer_vulkan: Rework MRT swizzles and add unsupported format swizzle support. * shader_recompiler: Clean up swizzle handling and handle ImageRead storage swizzle. * shader_recompiler: Fix type errors * liverpool_to_vk: Remove redundant clear color swizzles. * shader_recompiler: Reduce CompositeConstruct to constants where possible. * shader_recompiler: Fix ImageRead/Write and StoreBufferFormatF32 types. * amdgpu: Add a few more unsupported format remaps.
This commit is contained in:
parent
284f473a52
commit
41d64a200d
22 changed files with 522 additions and 282 deletions
|
@ -6,16 +6,22 @@
|
|||
|
||||
namespace Shader::Backend::SPIRV {
|
||||
|
||||
Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2) {
|
||||
return ctx.OpCompositeConstruct(ctx.U32[2], e1, e2);
|
||||
template <typename... Args>
|
||||
Id EmitCompositeConstruct(EmitContext& ctx, IR::Inst* inst, Args&&... args) {
|
||||
return inst->AreAllArgsImmediates() ? ctx.ConstantComposite(args...)
|
||||
: ctx.OpCompositeConstruct(args...);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
|
||||
return ctx.OpCompositeConstruct(ctx.U32[3], e1, e2, e3);
|
||||
Id EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.U32[2], e1, e2);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
|
||||
return ctx.OpCompositeConstruct(ctx.U32[4], e1, e2, e3, e4);
|
||||
Id EmitCompositeConstructU32x3(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.U32[3], e1, e2, e3);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructU32x4(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3, Id e4) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.U32[4], e1, e2, e3, e4);
|
||||
}
|
||||
|
||||
Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index) {
|
||||
|
@ -42,16 +48,30 @@ Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index
|
|||
return ctx.OpCompositeInsert(ctx.U32[4], object, composite, index);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2) {
|
||||
return ctx.OpCompositeConstruct(ctx.F16[2], e1, e2);
|
||||
Id EmitCompositeShuffleU32x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1) {
|
||||
return ctx.OpVectorShuffle(ctx.U32[2], composite1, composite2, comp0, comp1);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
|
||||
return ctx.OpCompositeConstruct(ctx.F16[3], e1, e2, e3);
|
||||
Id EmitCompositeShuffleU32x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2) {
|
||||
return ctx.OpVectorShuffle(ctx.U32[3], composite1, composite2, comp0, comp1, comp2);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
|
||||
return ctx.OpCompositeConstruct(ctx.F16[4], e1, e2, e3, e4);
|
||||
Id EmitCompositeShuffleU32x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3) {
|
||||
return ctx.OpVectorShuffle(ctx.U32[4], composite1, composite2, comp0, comp1, comp2, comp3);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF16x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.F16[2], e1, e2);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF16x3(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.F16[3], e1, e2, e3);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF16x4(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3, Id e4) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.F16[4], e1, e2, e3, e4);
|
||||
}
|
||||
|
||||
Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index) {
|
||||
|
@ -78,16 +98,30 @@ Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index
|
|||
return ctx.OpCompositeInsert(ctx.F16[4], object, composite, index);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2) {
|
||||
return ctx.OpCompositeConstruct(ctx.F32[2], e1, e2);
|
||||
Id EmitCompositeShuffleF16x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1) {
|
||||
return ctx.OpVectorShuffle(ctx.F16[2], composite1, composite2, comp0, comp1);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
|
||||
return ctx.OpCompositeConstruct(ctx.F32[3], e1, e2, e3);
|
||||
Id EmitCompositeShuffleF16x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2) {
|
||||
return ctx.OpVectorShuffle(ctx.F16[3], composite1, composite2, comp0, comp1, comp2);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
|
||||
return ctx.OpCompositeConstruct(ctx.F32[4], e1, e2, e3, e4);
|
||||
Id EmitCompositeShuffleF16x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3) {
|
||||
return ctx.OpVectorShuffle(ctx.F16[4], composite1, composite2, comp0, comp1, comp2, comp3);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF32x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.F32[2], e1, e2);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF32x3(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.F32[3], e1, e2, e3);
|
||||
}
|
||||
|
||||
Id EmitCompositeConstructF32x4(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3, Id e4) {
|
||||
return EmitCompositeConstruct(ctx, inst, ctx.F32[4], e1, e2, e3, e4);
|
||||
}
|
||||
|
||||
Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index) {
|
||||
|
@ -114,6 +148,20 @@ Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index
|
|||
return ctx.OpCompositeInsert(ctx.F32[4], object, composite, index);
|
||||
}
|
||||
|
||||
Id EmitCompositeShuffleF32x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1) {
|
||||
return ctx.OpVectorShuffle(ctx.F32[2], composite1, composite2, comp0, comp1);
|
||||
}
|
||||
|
||||
Id EmitCompositeShuffleF32x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2) {
|
||||
return ctx.OpVectorShuffle(ctx.F32[3], composite1, composite2, comp0, comp1, comp2);
|
||||
}
|
||||
|
||||
Id EmitCompositeShuffleF32x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3) {
|
||||
return ctx.OpVectorShuffle(ctx.F32[4], composite1, composite2, comp0, comp1, comp2, comp3);
|
||||
}
|
||||
|
||||
void EmitCompositeConstructF64x2(EmitContext&) {
|
||||
UNREACHABLE_MSG("SPIR-V Instruction");
|
||||
}
|
||||
|
@ -150,4 +198,18 @@ Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index
|
|||
return ctx.OpCompositeInsert(ctx.F64[4], object, composite, index);
|
||||
}
|
||||
|
||||
Id EmitCompositeShuffleF64x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1) {
|
||||
return ctx.OpVectorShuffle(ctx.F64[2], composite1, composite2, comp0, comp1);
|
||||
}
|
||||
|
||||
Id EmitCompositeShuffleF64x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2) {
|
||||
return ctx.OpVectorShuffle(ctx.F64[3], composite1, composite2, comp0, comp1, comp2);
|
||||
}
|
||||
|
||||
Id EmitCompositeShuffleF64x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3) {
|
||||
return ctx.OpVectorShuffle(ctx.F64[4], composite1, composite2, comp0, comp1, comp2, comp3);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -238,7 +238,7 @@ Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords, Id lod
|
|||
}
|
||||
texel = ctx.OpImageRead(color_type, image, coords, operands.mask, operands.operands);
|
||||
}
|
||||
return !texture.is_integer ? ctx.OpBitcast(ctx.U32[4], texel) : texel;
|
||||
return texture.is_integer ? ctx.OpBitcast(ctx.F32[4], texel) : texel;
|
||||
}
|
||||
|
||||
void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords, Id lod, Id ms,
|
||||
|
@ -253,8 +253,8 @@ void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords, Id
|
|||
} else if (Sirit::ValidId(lod)) {
|
||||
LOG_WARNING(Render, "Image write with LOD not supported by driver");
|
||||
}
|
||||
ctx.OpImageWrite(image, coords, ctx.OpBitcast(color_type, color), operands.mask,
|
||||
operands.operands);
|
||||
const Id texel = texture.is_integer ? ctx.OpBitcast(color_type, color) : color;
|
||||
ctx.OpImageWrite(image, coords, texel, operands.mask, operands.operands);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -120,33 +120,48 @@ Id EmitSharedAtomicSMin32(EmitContext& ctx, Id offset, Id value);
|
|||
Id EmitSharedAtomicAnd32(EmitContext& ctx, Id offset, Id value);
|
||||
Id EmitSharedAtomicOr32(EmitContext& ctx, Id offset, Id value);
|
||||
Id EmitSharedAtomicXor32(EmitContext& ctx, Id offset, Id value);
|
||||
Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2);
|
||||
Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
|
||||
Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
|
||||
Id EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2);
|
||||
Id EmitCompositeConstructU32x3(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3);
|
||||
Id EmitCompositeConstructU32x4(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3, Id e4);
|
||||
Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeExtractU32x3(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeExtractU32x4(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeInsertU32x2(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertU32x3(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2);
|
||||
Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3);
|
||||
Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
|
||||
Id EmitCompositeShuffleU32x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1);
|
||||
Id EmitCompositeShuffleU32x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2);
|
||||
Id EmitCompositeShuffleU32x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3);
|
||||
Id EmitCompositeConstructF16x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2);
|
||||
Id EmitCompositeConstructF16x3(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3);
|
||||
Id EmitCompositeConstructF16x4(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3, Id e4);
|
||||
Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeExtractF16x3(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeExtractF16x4(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeInsertF16x2(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertF16x3(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2);
|
||||
Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
|
||||
Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
|
||||
Id EmitCompositeShuffleF16x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1);
|
||||
Id EmitCompositeShuffleF16x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2);
|
||||
Id EmitCompositeShuffleF16x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3);
|
||||
Id EmitCompositeConstructF32x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2);
|
||||
Id EmitCompositeConstructF32x3(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3);
|
||||
Id EmitCompositeConstructF32x4(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2, Id e3, Id e4);
|
||||
Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeExtractF32x3(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeExtractF32x4(EmitContext& ctx, Id composite, u32 index);
|
||||
Id EmitCompositeInsertF32x2(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertF32x3(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeShuffleF32x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1);
|
||||
Id EmitCompositeShuffleF32x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2);
|
||||
Id EmitCompositeShuffleF32x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3);
|
||||
void EmitCompositeConstructF64x2(EmitContext& ctx);
|
||||
void EmitCompositeConstructF64x3(EmitContext& ctx);
|
||||
void EmitCompositeConstructF64x4(EmitContext& ctx);
|
||||
|
@ -156,6 +171,11 @@ void EmitCompositeExtractF64x4(EmitContext& ctx);
|
|||
Id EmitCompositeInsertF64x2(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertF64x3(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index);
|
||||
Id EmitCompositeShuffleF64x2(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1);
|
||||
Id EmitCompositeShuffleF64x3(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2);
|
||||
Id EmitCompositeShuffleF64x4(EmitContext& ctx, Id composite1, Id composite2, u32 comp0, u32 comp1,
|
||||
u32 comp2, u32 comp3);
|
||||
Id EmitSelectU1(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectU8(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
|
|
|
@ -25,34 +25,28 @@ void Translator::EmitExport(const GcnInst& inst) {
|
|||
IR::VectorReg(inst.src[3].code),
|
||||
};
|
||||
|
||||
const auto swizzle = [&](u32 comp) {
|
||||
const auto set_attribute = [&](u32 comp, IR::F32 value) {
|
||||
if (!IR::IsMrt(attrib)) {
|
||||
return comp;
|
||||
ir.SetAttribute(attrib, value, comp);
|
||||
return;
|
||||
}
|
||||
const u32 index = u32(attrib) - u32(IR::Attribute::RenderTarget0);
|
||||
switch (runtime_info.fs_info.color_buffers[index].mrt_swizzle) {
|
||||
case MrtSwizzle::Identity:
|
||||
return comp;
|
||||
case MrtSwizzle::Alt:
|
||||
static constexpr std::array<u32, 4> AltSwizzle = {2, 1, 0, 3};
|
||||
return AltSwizzle[comp];
|
||||
case MrtSwizzle::Reverse:
|
||||
static constexpr std::array<u32, 4> RevSwizzle = {3, 2, 1, 0};
|
||||
return RevSwizzle[comp];
|
||||
case MrtSwizzle::ReverseAlt:
|
||||
static constexpr std::array<u32, 4> AltRevSwizzle = {3, 0, 1, 2};
|
||||
return AltRevSwizzle[comp];
|
||||
default:
|
||||
UNREACHABLE();
|
||||
const auto [r, g, b, a] = runtime_info.fs_info.color_buffers[index].swizzle;
|
||||
const std::array swizzle_array = {r, g, b, a};
|
||||
const auto swizzled_comp = swizzle_array[comp];
|
||||
if (u32(swizzled_comp) < u32(AmdGpu::CompSwizzle::Red)) {
|
||||
ir.SetAttribute(attrib, value, comp);
|
||||
return;
|
||||
}
|
||||
ir.SetAttribute(attrib, value, u32(swizzled_comp) - u32(AmdGpu::CompSwizzle::Red));
|
||||
};
|
||||
|
||||
const auto unpack = [&](u32 idx) {
|
||||
const IR::Value value = ir.UnpackHalf2x16(ir.GetVectorReg(vsrc[idx]));
|
||||
const IR::F32 r = IR::F32{ir.CompositeExtract(value, 0)};
|
||||
const IR::F32 g = IR::F32{ir.CompositeExtract(value, 1)};
|
||||
ir.SetAttribute(attrib, r, swizzle(idx * 2));
|
||||
ir.SetAttribute(attrib, g, swizzle(idx * 2 + 1));
|
||||
set_attribute(idx * 2, r);
|
||||
set_attribute(idx * 2 + 1, g);
|
||||
};
|
||||
|
||||
// Components are float16 packed into a VGPR
|
||||
|
@ -73,7 +67,7 @@ void Translator::EmitExport(const GcnInst& inst) {
|
|||
continue;
|
||||
}
|
||||
const IR::F32 comp = ir.GetVectorReg<IR::F32>(vsrc[i]);
|
||||
ir.SetAttribute(attrib, comp, swizzle(i));
|
||||
set_attribute(i, comp);
|
||||
}
|
||||
}
|
||||
if (IR::IsMrt(attrib)) {
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "shader_recompiler/info.h"
|
||||
#include "shader_recompiler/ir/attribute.h"
|
||||
#include "shader_recompiler/ir/reg.h"
|
||||
#include "shader_recompiler/ir/reinterpret.h"
|
||||
#include "shader_recompiler/runtime_info.h"
|
||||
#include "video_core/amdgpu/resource.h"
|
||||
#include "video_core/amdgpu/types.h"
|
||||
|
@ -475,26 +476,12 @@ void Translator::EmitFetch(const GcnInst& inst) {
|
|||
|
||||
// Read the V# of the attribute to figure out component number and type.
|
||||
const auto buffer = info.ReadUdReg<AmdGpu::Buffer>(attrib.sgpr_base, attrib.dword_offset);
|
||||
const auto values =
|
||||
ir.CompositeConstruct(ir.GetAttribute(attr, 0), ir.GetAttribute(attr, 1),
|
||||
ir.GetAttribute(attr, 2), ir.GetAttribute(attr, 3));
|
||||
const auto swizzled = ApplySwizzle(ir, values, buffer.DstSelect());
|
||||
for (u32 i = 0; i < 4; i++) {
|
||||
const IR::F32 comp = [&] {
|
||||
switch (buffer.GetSwizzle(i)) {
|
||||
case AmdGpu::CompSwizzle::One:
|
||||
return ir.Imm32(1.f);
|
||||
case AmdGpu::CompSwizzle::Zero:
|
||||
return ir.Imm32(0.f);
|
||||
case AmdGpu::CompSwizzle::Red:
|
||||
return ir.GetAttribute(attr, 0);
|
||||
case AmdGpu::CompSwizzle::Green:
|
||||
return ir.GetAttribute(attr, 1);
|
||||
case AmdGpu::CompSwizzle::Blue:
|
||||
return ir.GetAttribute(attr, 2);
|
||||
case AmdGpu::CompSwizzle::Alpha:
|
||||
return ir.GetAttribute(attr, 3);
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}();
|
||||
ir.SetVectorReg(dst_reg++, comp);
|
||||
ir.SetVectorReg(dst_reg++, IR::F32{ir.CompositeExtract(swizzled, i)});
|
||||
}
|
||||
|
||||
// In case of programmable step rates we need to fallback to instance data pulling in
|
||||
|
|
|
@ -326,7 +326,7 @@ void Translator::BUFFER_STORE_FORMAT(u32 num_dwords, const GcnInst& inst) {
|
|||
|
||||
const IR::VectorReg src_reg{inst.src[1].code};
|
||||
|
||||
std::array<IR::Value, 4> comps{};
|
||||
std::array<IR::F32, 4> comps{};
|
||||
for (u32 i = 0; i < num_dwords; i++) {
|
||||
comps[i] = ir.GetVectorReg<IR::F32>(src_reg + i);
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ void Translator::IMAGE_LOAD(bool has_mip, const GcnInst& inst) {
|
|||
if (((mimg.dmask >> i) & 1) == 0) {
|
||||
continue;
|
||||
}
|
||||
IR::U32 value = IR::U32{ir.CompositeExtract(texel, i)};
|
||||
IR::F32 value = IR::F32{ir.CompositeExtract(texel, i)};
|
||||
ir.SetVectorReg(dest_reg++, value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -663,6 +663,86 @@ Value IREmitter::CompositeInsert(const Value& vector, const Value& object, size_
|
|||
}
|
||||
}
|
||||
|
||||
Value IREmitter::CompositeShuffle(const Value& vector1, const Value& vector2, size_t comp0,
|
||||
size_t comp1) {
|
||||
if (vector1.Type() != vector2.Type()) {
|
||||
UNREACHABLE_MSG("Mismatching types {} and {}", vector1.Type(), vector2.Type());
|
||||
}
|
||||
if (comp0 >= 4 || comp1 >= 4) {
|
||||
UNREACHABLE_MSG("One or more out of bounds elements {}, {}", comp0, comp1);
|
||||
}
|
||||
const auto shuffle{[&](Opcode opcode) -> Value {
|
||||
return Inst(opcode, vector1, vector2, Value{static_cast<u32>(comp0)},
|
||||
Value{static_cast<u32>(comp1)});
|
||||
}};
|
||||
switch (vector1.Type()) {
|
||||
case Type::U32x4:
|
||||
return shuffle(Opcode::CompositeShuffleU32x2);
|
||||
case Type::F16x4:
|
||||
return shuffle(Opcode::CompositeShuffleF16x2);
|
||||
case Type::F32x4:
|
||||
return shuffle(Opcode::CompositeShuffleF32x2);
|
||||
case Type::F64x4:
|
||||
return shuffle(Opcode::CompositeShuffleF64x2);
|
||||
default:
|
||||
ThrowInvalidType(vector1.Type());
|
||||
}
|
||||
}
|
||||
|
||||
Value IREmitter::CompositeShuffle(const Value& vector1, const Value& vector2, size_t comp0,
|
||||
size_t comp1, size_t comp2) {
|
||||
if (vector1.Type() != vector2.Type()) {
|
||||
UNREACHABLE_MSG("Mismatching types {} and {}", vector1.Type(), vector2.Type());
|
||||
}
|
||||
if (comp0 >= 6 || comp1 >= 6 || comp2 >= 6) {
|
||||
UNREACHABLE_MSG("One or more out of bounds elements {}, {}, {}", comp0, comp1, comp2);
|
||||
}
|
||||
const auto shuffle{[&](Opcode opcode) -> Value {
|
||||
return Inst(opcode, vector1, vector2, Value{static_cast<u32>(comp0)},
|
||||
Value{static_cast<u32>(comp1)}, Value{static_cast<u32>(comp2)});
|
||||
}};
|
||||
switch (vector1.Type()) {
|
||||
case Type::U32x4:
|
||||
return shuffle(Opcode::CompositeShuffleU32x3);
|
||||
case Type::F16x4:
|
||||
return shuffle(Opcode::CompositeShuffleF16x3);
|
||||
case Type::F32x4:
|
||||
return shuffle(Opcode::CompositeShuffleF32x3);
|
||||
case Type::F64x4:
|
||||
return shuffle(Opcode::CompositeShuffleF64x3);
|
||||
default:
|
||||
ThrowInvalidType(vector1.Type());
|
||||
}
|
||||
}
|
||||
|
||||
Value IREmitter::CompositeShuffle(const Value& vector1, const Value& vector2, size_t comp0,
|
||||
size_t comp1, size_t comp2, size_t comp3) {
|
||||
if (vector1.Type() != vector2.Type()) {
|
||||
UNREACHABLE_MSG("Mismatching types {} and {}", vector1.Type(), vector2.Type());
|
||||
}
|
||||
if (comp0 >= 8 || comp1 >= 8 || comp2 >= 8 || comp3 >= 8) {
|
||||
UNREACHABLE_MSG("One or more out of bounds elements {}, {}, {}, {}", comp0, comp1, comp2,
|
||||
comp3);
|
||||
}
|
||||
const auto shuffle{[&](Opcode opcode) -> Value {
|
||||
return Inst(opcode, vector1, vector2, Value{static_cast<u32>(comp0)},
|
||||
Value{static_cast<u32>(comp1)}, Value{static_cast<u32>(comp2)},
|
||||
Value{static_cast<u32>(comp3)});
|
||||
}};
|
||||
switch (vector1.Type()) {
|
||||
case Type::U32x4:
|
||||
return shuffle(Opcode::CompositeShuffleU32x4);
|
||||
case Type::F16x4:
|
||||
return shuffle(Opcode::CompositeShuffleF16x4);
|
||||
case Type::F32x4:
|
||||
return shuffle(Opcode::CompositeShuffleF32x4);
|
||||
case Type::F64x4:
|
||||
return shuffle(Opcode::CompositeShuffleF64x4);
|
||||
default:
|
||||
ThrowInvalidType(vector1.Type());
|
||||
}
|
||||
}
|
||||
|
||||
Value IREmitter::Select(const U1& condition, const Value& true_value, const Value& false_value) {
|
||||
if (true_value.Type() != false_value.Type()) {
|
||||
UNREACHABLE_MSG("Mismatching types {} and {}", true_value.Type(), false_value.Type());
|
||||
|
|
|
@ -155,6 +155,13 @@ public:
|
|||
[[nodiscard]] Value CompositeExtract(const Value& vector, size_t element);
|
||||
[[nodiscard]] Value CompositeInsert(const Value& vector, const Value& object, size_t element);
|
||||
|
||||
[[nodiscard]] Value CompositeShuffle(const Value& vector1, const Value& vector2, size_t comp0,
|
||||
size_t comp1);
|
||||
[[nodiscard]] Value CompositeShuffle(const Value& vector1, const Value& vector2, size_t comp0,
|
||||
size_t comp1, size_t comp2);
|
||||
[[nodiscard]] Value CompositeShuffle(const Value& vector1, const Value& vector2, size_t comp0,
|
||||
size_t comp1, size_t comp2, size_t comp3);
|
||||
|
||||
[[nodiscard]] Value Select(const U1& condition, const Value& true_value,
|
||||
const Value& false_value);
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ OPCODE(StoreBufferU32, Void, Opaq
|
|||
OPCODE(StoreBufferU32x2, Void, Opaque, Opaque, U32x2, )
|
||||
OPCODE(StoreBufferU32x3, Void, Opaque, Opaque, U32x3, )
|
||||
OPCODE(StoreBufferU32x4, Void, Opaque, Opaque, U32x4, )
|
||||
OPCODE(StoreBufferFormatF32, Void, Opaque, Opaque, U32x4, )
|
||||
OPCODE(StoreBufferFormatF32, Void, Opaque, Opaque, F32x4, )
|
||||
|
||||
// Buffer atomic operations
|
||||
OPCODE(BufferAtomicIAdd32, U32, Opaque, Opaque, U32 )
|
||||
|
@ -124,6 +124,9 @@ OPCODE(CompositeExtractU32x4, U32, U32x
|
|||
OPCODE(CompositeInsertU32x2, U32x2, U32x2, U32, U32, )
|
||||
OPCODE(CompositeInsertU32x3, U32x3, U32x3, U32, U32, )
|
||||
OPCODE(CompositeInsertU32x4, U32x4, U32x4, U32, U32, )
|
||||
OPCODE(CompositeShuffleU32x2, U32x2, U32x2, U32x2, U32, U32, )
|
||||
OPCODE(CompositeShuffleU32x3, U32x3, U32x3, U32x3, U32, U32, U32, )
|
||||
OPCODE(CompositeShuffleU32x4, U32x4, U32x4, U32x4, U32, U32, U32, U32, )
|
||||
OPCODE(CompositeConstructF16x2, F16x2, F16, F16, )
|
||||
OPCODE(CompositeConstructF16x3, F16x3, F16, F16, F16, )
|
||||
OPCODE(CompositeConstructF16x4, F16x4, F16, F16, F16, F16, )
|
||||
|
@ -133,6 +136,9 @@ OPCODE(CompositeExtractF16x4, F16, F16x
|
|||
OPCODE(CompositeInsertF16x2, F16x2, F16x2, F16, U32, )
|
||||
OPCODE(CompositeInsertF16x3, F16x3, F16x3, F16, U32, )
|
||||
OPCODE(CompositeInsertF16x4, F16x4, F16x4, F16, U32, )
|
||||
OPCODE(CompositeShuffleF16x2, F16x2, F16x2, F16x2, U32, U32, )
|
||||
OPCODE(CompositeShuffleF16x3, F16x3, F16x3, F16x3, U32, U32, U32, )
|
||||
OPCODE(CompositeShuffleF16x4, F16x4, F16x4, F16x4, U32, U32, U32, U32, )
|
||||
OPCODE(CompositeConstructF32x2, F32x2, F32, F32, )
|
||||
OPCODE(CompositeConstructF32x3, F32x3, F32, F32, F32, )
|
||||
OPCODE(CompositeConstructF32x4, F32x4, F32, F32, F32, F32, )
|
||||
|
@ -142,6 +148,9 @@ OPCODE(CompositeExtractF32x4, F32, F32x
|
|||
OPCODE(CompositeInsertF32x2, F32x2, F32x2, F32, U32, )
|
||||
OPCODE(CompositeInsertF32x3, F32x3, F32x3, F32, U32, )
|
||||
OPCODE(CompositeInsertF32x4, F32x4, F32x4, F32, U32, )
|
||||
OPCODE(CompositeShuffleF32x2, F32x2, F32x2, F32x2, U32, U32, )
|
||||
OPCODE(CompositeShuffleF32x3, F32x3, F32x3, F32x3, U32, U32, U32, )
|
||||
OPCODE(CompositeShuffleF32x4, F32x4, F32x4, F32x4, U32, U32, U32, U32, )
|
||||
OPCODE(CompositeConstructF64x2, F64x2, F64, F64, )
|
||||
OPCODE(CompositeConstructF64x3, F64x3, F64, F64, F64, )
|
||||
OPCODE(CompositeConstructF64x4, F64x4, F64, F64, F64, F64, )
|
||||
|
@ -151,6 +160,9 @@ OPCODE(CompositeExtractF64x4, F64, F64x
|
|||
OPCODE(CompositeInsertF64x2, F64x2, F64x2, F64, U32, )
|
||||
OPCODE(CompositeInsertF64x3, F64x3, F64x3, F64, U32, )
|
||||
OPCODE(CompositeInsertF64x4, F64x4, F64x4, F64, U32, )
|
||||
OPCODE(CompositeShuffleF64x2, F64x2, F64x2, F64x2, U32, U32, )
|
||||
OPCODE(CompositeShuffleF64x3, F64x3, F64x3, F64x3, U32, U32, U32, )
|
||||
OPCODE(CompositeShuffleF64x4, F64x4, F64x4, F64x4, U32, U32, U32, U32, )
|
||||
|
||||
// Select operations
|
||||
OPCODE(SelectU1, U1, U1, U1, U1, )
|
||||
|
@ -346,8 +358,8 @@ OPCODE(ImageGatherDref, F32x4, Opaq
|
|||
OPCODE(ImageQueryDimensions, U32x4, Opaque, U32, U1, )
|
||||
OPCODE(ImageQueryLod, F32x4, Opaque, Opaque, )
|
||||
OPCODE(ImageGradient, F32x4, Opaque, Opaque, Opaque, Opaque, Opaque, F32, )
|
||||
OPCODE(ImageRead, U32x4, Opaque, Opaque, U32, U32, )
|
||||
OPCODE(ImageWrite, Void, Opaque, Opaque, U32, U32, U32x4, )
|
||||
OPCODE(ImageRead, F32x4, Opaque, Opaque, U32, U32, )
|
||||
OPCODE(ImageWrite, Void, Opaque, Opaque, U32, U32, F32x4, )
|
||||
|
||||
// Image atomic operations
|
||||
OPCODE(ImageAtomicIAdd32, U32, Opaque, Opaque, U32, )
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "shader_recompiler/ir/breadth_first_search.h"
|
||||
#include "shader_recompiler/ir/ir_emitter.h"
|
||||
#include "shader_recompiler/ir/program.h"
|
||||
#include "shader_recompiler/ir/reinterpret.h"
|
||||
#include "video_core/amdgpu/resource.h"
|
||||
|
||||
namespace Shader::Optimization {
|
||||
|
@ -128,35 +129,6 @@ bool IsImageInstruction(const IR::Inst& inst) {
|
|||
}
|
||||
}
|
||||
|
||||
IR::Value SwizzleVector(IR::IREmitter& ir, auto sharp, IR::Value texel) {
|
||||
boost::container::static_vector<IR::Value, 4> comps;
|
||||
for (u32 i = 0; i < 4; i++) {
|
||||
switch (sharp.GetSwizzle(i)) {
|
||||
case AmdGpu::CompSwizzle::Zero:
|
||||
comps.emplace_back(ir.Imm32(0.f));
|
||||
break;
|
||||
case AmdGpu::CompSwizzle::One:
|
||||
comps.emplace_back(ir.Imm32(1.f));
|
||||
break;
|
||||
case AmdGpu::CompSwizzle::Red:
|
||||
comps.emplace_back(ir.CompositeExtract(texel, 0));
|
||||
break;
|
||||
case AmdGpu::CompSwizzle::Green:
|
||||
comps.emplace_back(ir.CompositeExtract(texel, 1));
|
||||
break;
|
||||
case AmdGpu::CompSwizzle::Blue:
|
||||
comps.emplace_back(ir.CompositeExtract(texel, 2));
|
||||
break;
|
||||
case AmdGpu::CompSwizzle::Alpha:
|
||||
comps.emplace_back(ir.CompositeExtract(texel, 3));
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
return ir.CompositeConstruct(comps[0], comps[1], comps[2], comps[3]);
|
||||
};
|
||||
|
||||
class Descriptors {
|
||||
public:
|
||||
explicit Descriptors(Info& info_)
|
||||
|
@ -409,15 +381,6 @@ void PatchTextureBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
|
|||
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
|
||||
inst.SetArg(0, ir.Imm32(binding));
|
||||
ASSERT(!buffer.swizzle_enable && !buffer.add_tid_enable);
|
||||
|
||||
// Apply dst_sel swizzle on formatted buffer instructions
|
||||
if (inst.GetOpcode() == IR::Opcode::StoreBufferFormatF32) {
|
||||
inst.SetArg(2, SwizzleVector(ir, buffer, inst.Arg(2)));
|
||||
} else {
|
||||
const auto inst_info = inst.Flags<IR::BufferInstInfo>();
|
||||
const auto texel = ir.LoadBufferFormat(inst.Arg(0), inst.Arg(1), inst_info);
|
||||
inst.ReplaceUsesWith(SwizzleVector(ir, buffer, texel));
|
||||
}
|
||||
}
|
||||
|
||||
IR::Value PatchCubeCoord(IR::IREmitter& ir, const IR::Value& s, const IR::Value& t,
|
||||
|
@ -765,10 +728,6 @@ void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descrip
|
|||
}();
|
||||
inst.SetArg(1, coords);
|
||||
|
||||
if (inst.GetOpcode() == IR::Opcode::ImageWrite) {
|
||||
inst.SetArg(4, SwizzleVector(ir, image, inst.Arg(4)));
|
||||
}
|
||||
|
||||
if (inst_info.has_lod) {
|
||||
ASSERT(inst.GetOpcode() == IR::Opcode::ImageRead ||
|
||||
inst.GetOpcode() == IR::Opcode::ImageWrite);
|
||||
|
@ -783,6 +742,50 @@ void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descrip
|
|||
}
|
||||
}
|
||||
|
||||
void PatchTextureBufferInterpretation(IR::Block& block, IR::Inst& inst, Info& info) {
|
||||
const auto binding = inst.Arg(0).U32();
|
||||
const auto buffer_res = info.texture_buffers[binding];
|
||||
const auto buffer = buffer_res.GetSharp(info);
|
||||
if (!buffer.Valid()) {
|
||||
// Don't need to swizzle invalid buffer.
|
||||
return;
|
||||
}
|
||||
|
||||
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
|
||||
if (inst.GetOpcode() == IR::Opcode::StoreBufferFormatF32) {
|
||||
inst.SetArg(2, ApplySwizzle(ir, inst.Arg(2), buffer.DstSelect()));
|
||||
} else if (inst.GetOpcode() == IR::Opcode::LoadBufferFormatF32) {
|
||||
const auto inst_info = inst.Flags<IR::BufferInstInfo>();
|
||||
const auto texel = ir.LoadBufferFormat(inst.Arg(0), inst.Arg(1), inst_info);
|
||||
const auto swizzled = ApplySwizzle(ir, texel, buffer.DstSelect());
|
||||
inst.ReplaceUsesWith(swizzled);
|
||||
}
|
||||
}
|
||||
|
||||
void PatchImageInterpretation(IR::Block& block, IR::Inst& inst, Info& info) {
|
||||
const auto binding = inst.Arg(0).U32();
|
||||
const auto image_res = info.images[binding & 0xFFFF];
|
||||
const auto image = image_res.GetSharp(info);
|
||||
if (!image.Valid() || !image_res.IsStorage(image)) {
|
||||
// Don't need to swizzle invalid or non-storage image.
|
||||
return;
|
||||
}
|
||||
|
||||
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
|
||||
if (inst.GetOpcode() == IR::Opcode::ImageWrite) {
|
||||
inst.SetArg(4, ApplySwizzle(ir, inst.Arg(4), image.DstSelect()));
|
||||
} else if (inst.GetOpcode() == IR::Opcode::ImageRead) {
|
||||
const auto inst_info = inst.Flags<IR::TextureInstInfo>();
|
||||
const auto lod = inst.Arg(2);
|
||||
const auto ms = inst.Arg(3);
|
||||
const auto texel =
|
||||
ir.ImageRead(inst.Arg(0), inst.Arg(1), lod.IsEmpty() ? IR::U32{} : IR::U32{lod},
|
||||
ms.IsEmpty() ? IR::U32{} : IR::U32{ms}, inst_info);
|
||||
const auto swizzled = ApplySwizzle(ir, texel, image.DstSelect());
|
||||
inst.ReplaceUsesWith(swizzled);
|
||||
}
|
||||
}
|
||||
|
||||
void PatchDataRingInstruction(IR::Block& block, IR::Inst& inst, Info& info,
|
||||
Descriptors& descriptors) {
|
||||
// Insert gds binding in the shader if it doesn't exist already.
|
||||
|
@ -852,6 +855,19 @@ void ResourceTrackingPass(IR::Program& program) {
|
|||
}
|
||||
}
|
||||
}
|
||||
// Second pass to reinterpret format read/write where needed, since we now know
|
||||
// the bindings and their properties.
|
||||
for (IR::Block* const block : program.blocks) {
|
||||
for (IR::Inst& inst : block->Instructions()) {
|
||||
if (IsTextureBufferInstruction(inst)) {
|
||||
PatchTextureBufferInterpretation(*block, inst, info);
|
||||
continue;
|
||||
}
|
||||
if (IsImageInstruction(inst)) {
|
||||
PatchImageInterpretation(*block, inst, info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Shader::Optimization
|
||||
|
|
24
src/shader_recompiler/ir/reinterpret.h
Normal file
24
src/shader_recompiler/ir/reinterpret.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "shader_recompiler/ir/ir_emitter.h"
|
||||
#include "video_core/amdgpu/resource.h"
|
||||
|
||||
namespace Shader::IR {
|
||||
|
||||
/// Applies a component swizzle to a vec4.
|
||||
inline Value ApplySwizzle(IREmitter& ir, const Value& vector, const AmdGpu::CompMapping& swizzle) {
|
||||
// Constants are indexed as 0 and 1, and components are 4-7. Thus we can apply a swizzle
|
||||
// using two vectors and a shuffle, using one vector of constants and one of the components.
|
||||
const auto zero = ir.Imm32(0.f);
|
||||
const auto one = ir.Imm32(1.f);
|
||||
const auto constants_vec = ir.CompositeConstruct(zero, one, zero, zero);
|
||||
const auto swizzled =
|
||||
ir.CompositeShuffle(constants_vec, vector, size_t(swizzle.r), size_t(swizzle.g),
|
||||
size_t(swizzle.b), size_t(swizzle.a));
|
||||
return swizzled;
|
||||
}
|
||||
|
||||
} // namespace Shader::IR
|
|
@ -180,7 +180,7 @@ struct FragmentRuntimeInfo {
|
|||
std::array<PsInput, 32> inputs;
|
||||
struct PsColorBuffer {
|
||||
AmdGpu::NumberFormat num_format;
|
||||
MrtSwizzle mrt_swizzle;
|
||||
AmdGpu::CompMapping swizzle;
|
||||
|
||||
auto operator<=>(const PsColorBuffer&) const noexcept = default;
|
||||
};
|
||||
|
|
|
@ -31,7 +31,7 @@ struct BufferSpecialization {
|
|||
|
||||
struct TextureBufferSpecialization {
|
||||
bool is_integer = false;
|
||||
u32 dst_select = 0;
|
||||
AmdGpu::CompMapping dst_select{};
|
||||
|
||||
auto operator<=>(const TextureBufferSpecialization&) const = default;
|
||||
};
|
||||
|
@ -40,13 +40,9 @@ struct ImageSpecialization {
|
|||
AmdGpu::ImageType type = AmdGpu::ImageType::Color2D;
|
||||
bool is_integer = false;
|
||||
bool is_storage = false;
|
||||
u32 dst_select = 0;
|
||||
AmdGpu::CompMapping dst_select{};
|
||||
|
||||
bool operator==(const ImageSpecialization& other) const {
|
||||
return type == other.type && is_integer == other.is_integer &&
|
||||
is_storage == other.is_storage &&
|
||||
(dst_select != 0 ? dst_select == other.dst_select : true);
|
||||
}
|
||||
auto operator<=>(const ImageSpecialization&) const = default;
|
||||
};
|
||||
|
||||
struct FMaskSpecialization {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue