mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-12 12:45:56 +00:00
buffer_atomic_imax_x2 (#3130)
Some checks are pending
Build and Release / linux-qt-gcc (push) Blocked by required conditions
Build and Release / pre-release (push) Blocked by required conditions
Build and Release / reuse (push) Waiting to run
Build and Release / clang-format (push) Waiting to run
Build and Release / get-info (push) Waiting to run
Build and Release / windows-sdl (push) Blocked by required conditions
Build and Release / linux-sdl-gcc (push) Blocked by required conditions
Build and Release / windows-qt (push) Blocked by required conditions
Build and Release / macos-sdl (push) Blocked by required conditions
Build and Release / macos-qt (push) Blocked by required conditions
Build and Release / linux-sdl (push) Blocked by required conditions
Build and Release / linux-qt (push) Blocked by required conditions
Some checks are pending
Build and Release / linux-qt-gcc (push) Blocked by required conditions
Build and Release / pre-release (push) Blocked by required conditions
Build and Release / reuse (push) Waiting to run
Build and Release / clang-format (push) Waiting to run
Build and Release / get-info (push) Waiting to run
Build and Release / windows-sdl (push) Blocked by required conditions
Build and Release / linux-sdl-gcc (push) Blocked by required conditions
Build and Release / windows-qt (push) Blocked by required conditions
Build and Release / macos-sdl (push) Blocked by required conditions
Build and Release / macos-qt (push) Blocked by required conditions
Build and Release / linux-sdl (push) Blocked by required conditions
Build and Release / linux-qt (push) Blocked by required conditions
* buffer_atomic_imax_x2 * Define Int64Atomics SPIR-V capability
This commit is contained in:
parent
77117abb31
commit
1757dfaf5a
15 changed files with 84 additions and 5 deletions
|
@ -310,6 +310,19 @@ void SetupCapabilities(const Info& info, const Profile& profile, EmitContext& ct
|
|||
ctx.AddCapability(spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
|
||||
ctx.AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
|
||||
}
|
||||
if (info.uses_buffer_int64_atomics || info.uses_shared_int64_atomics) {
|
||||
if (info.uses_buffer_int64_atomics) {
|
||||
ASSERT_MSG(ctx.profile.supports_buffer_int64_atomics,
|
||||
"Shader requires support for atomic Int64 buffer operations that your "
|
||||
"Vulkan instance does not advertise");
|
||||
}
|
||||
if (info.uses_shared_int64_atomics) {
|
||||
ASSERT_MSG(ctx.profile.supports_shared_int64_atomics,
|
||||
"Shader requires support for atomic Int64 shared memory operations that "
|
||||
"your Vulkan instance does not advertise");
|
||||
}
|
||||
ctx.AddCapability(spv::Capability::Int64Atomics);
|
||||
}
|
||||
}
|
||||
|
||||
void DefineEntryPoint(const Info& info, EmitContext& ctx, Id main) {
|
||||
|
|
|
@ -226,10 +226,18 @@ Id EmitBufferAtomicSMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addre
|
|||
return BufferAtomicU32(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicSMax);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicSMax64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
return BufferAtomicU64(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicSMax);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicUMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
return BufferAtomicU32(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicUMax);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicUMax64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
return BufferAtomicU64(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicUMax);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicFMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
if (ctx.profile.supports_buffer_fp32_atomic_min_max) {
|
||||
return BufferAtomicU32<true>(ctx, inst, handle, address, value,
|
||||
|
|
|
@ -94,7 +94,9 @@ Id EmitBufferAtomicSMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addre
|
|||
Id EmitBufferAtomicUMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicFMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicSMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicSMax64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicUMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicUMax64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicFMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicInc32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitBufferAtomicDec32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
|
|
|
@ -291,6 +291,7 @@ public:
|
|||
void BUFFER_LOAD(u32 num_dwords, bool is_inst_typed, bool is_buffer_typed, const GcnInst& inst);
|
||||
void BUFFER_STORE(u32 num_dwords, bool is_inst_typed, bool is_buffer_typed,
|
||||
const GcnInst& inst);
|
||||
template <typename T = IR::U32>
|
||||
void BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst);
|
||||
|
||||
// Image Memory
|
||||
|
|
|
@ -78,8 +78,12 @@ void Translator::EmitVectorMemory(const GcnInst& inst) {
|
|||
return BUFFER_ATOMIC(AtomicOp::Umin, inst);
|
||||
case Opcode::BUFFER_ATOMIC_SMAX:
|
||||
return BUFFER_ATOMIC(AtomicOp::Smax, inst);
|
||||
case Opcode::BUFFER_ATOMIC_SMAX_X2:
|
||||
return BUFFER_ATOMIC<IR::U64>(AtomicOp::Smax, inst);
|
||||
case Opcode::BUFFER_ATOMIC_UMAX:
|
||||
return BUFFER_ATOMIC(AtomicOp::Umax, inst);
|
||||
case Opcode::BUFFER_ATOMIC_UMAX_X2:
|
||||
return BUFFER_ATOMIC<IR::U64>(AtomicOp::Umax, inst);
|
||||
case Opcode::BUFFER_ATOMIC_AND:
|
||||
return BUFFER_ATOMIC(AtomicOp::And, inst);
|
||||
case Opcode::BUFFER_ATOMIC_OR:
|
||||
|
@ -304,6 +308,7 @@ void Translator::BUFFER_STORE(u32 num_dwords, bool is_inst_typed, bool is_buffer
|
|||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Translator::BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst) {
|
||||
const auto& mubuf = inst.control.mubuf;
|
||||
const IR::VectorReg vaddr{inst.src[0].code};
|
||||
|
@ -328,7 +333,17 @@ void Translator::BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst) {
|
|||
buffer_info.globally_coherent.Assign(mubuf.glc);
|
||||
buffer_info.system_coherent.Assign(mubuf.slc);
|
||||
|
||||
IR::Value vdata_val = ir.GetVectorReg<Shader::IR::U32>(vdata);
|
||||
IR::Value vdata_val = [&] {
|
||||
if constexpr (std::is_same_v<T, IR::U32>) {
|
||||
return ir.GetVectorReg<Shader::IR::U32>(vdata);
|
||||
} else if constexpr (std::is_same_v<T, IR::U64>) {
|
||||
return ir.PackUint2x32(
|
||||
ir.CompositeConstruct(ir.GetVectorReg<Shader::IR::U32>(vdata),
|
||||
ir.GetVectorReg<Shader::IR::U32>(vdata + 1)));
|
||||
} else {
|
||||
static_assert(false, "buffer_atomic: type not supported");
|
||||
}
|
||||
}();
|
||||
const IR::Value handle =
|
||||
ir.CompositeConstruct(ir.GetScalarReg(srsrc), ir.GetScalarReg(srsrc + 1),
|
||||
ir.GetScalarReg(srsrc + 2), ir.GetScalarReg(srsrc + 3));
|
||||
|
|
|
@ -226,6 +226,8 @@ struct Info {
|
|||
bool uses_fp64{};
|
||||
bool uses_pack_10_11_11{};
|
||||
bool uses_unpack_10_11_11{};
|
||||
bool uses_buffer_int64_atomics{};
|
||||
bool uses_shared_int64_atomics{};
|
||||
bool stores_tess_level_outer{};
|
||||
bool stores_tess_level_inner{};
|
||||
bool translation_failed{};
|
||||
|
|
|
@ -511,8 +511,16 @@ Value IREmitter::BufferAtomicFMin(const Value& handle, const Value& address, con
|
|||
|
||||
Value IREmitter::BufferAtomicIMax(const Value& handle, const Value& address, const Value& value,
|
||||
bool is_signed, BufferInstInfo info) {
|
||||
return is_signed ? Inst(Opcode::BufferAtomicSMax32, Flags{info}, handle, address, value)
|
||||
: Inst(Opcode::BufferAtomicUMax32, Flags{info}, handle, address, value);
|
||||
switch (value.Type()) {
|
||||
case Type::U32:
|
||||
return is_signed ? Inst(Opcode::BufferAtomicSMax32, Flags{info}, handle, address, value)
|
||||
: Inst(Opcode::BufferAtomicUMax32, Flags{info}, handle, address, value);
|
||||
case Type::U64:
|
||||
return is_signed ? Inst(Opcode::BufferAtomicSMax64, Flags{info}, handle, address, value)
|
||||
: Inst(Opcode::BufferAtomicUMax64, Flags{info}, handle, address, value);
|
||||
default:
|
||||
ThrowInvalidType(value.Type());
|
||||
}
|
||||
}
|
||||
|
||||
Value IREmitter::BufferAtomicFMax(const Value& handle, const Value& address, const Value& value,
|
||||
|
|
|
@ -73,7 +73,9 @@ bool Inst::MayHaveSideEffects() const noexcept {
|
|||
case Opcode::BufferAtomicUMin32:
|
||||
case Opcode::BufferAtomicFMin32:
|
||||
case Opcode::BufferAtomicSMax32:
|
||||
case Opcode::BufferAtomicSMax64:
|
||||
case Opcode::BufferAtomicUMax32:
|
||||
case Opcode::BufferAtomicUMax64:
|
||||
case Opcode::BufferAtomicFMax32:
|
||||
case Opcode::BufferAtomicInc32:
|
||||
case Opcode::BufferAtomicDec32:
|
||||
|
|
|
@ -127,7 +127,9 @@ OPCODE(BufferAtomicSMin32, U32, Opaq
|
|||
OPCODE(BufferAtomicUMin32, U32, Opaque, Opaque, U32 )
|
||||
OPCODE(BufferAtomicFMin32, U32, Opaque, Opaque, F32 )
|
||||
OPCODE(BufferAtomicSMax32, U32, Opaque, Opaque, U32 )
|
||||
OPCODE(BufferAtomicSMax64, U64, Opaque, Opaque, U64 )
|
||||
OPCODE(BufferAtomicUMax32, U32, Opaque, Opaque, U32 )
|
||||
OPCODE(BufferAtomicUMax64, U64, Opaque, Opaque, U64 )
|
||||
OPCODE(BufferAtomicFMax32, U32, Opaque, Opaque, F32 )
|
||||
OPCODE(BufferAtomicInc32, U32, Opaque, Opaque, )
|
||||
OPCODE(BufferAtomicDec32, U32, Opaque, Opaque, )
|
||||
|
|
|
@ -23,7 +23,9 @@ bool IsBufferAtomic(const IR::Inst& inst) {
|
|||
case IR::Opcode::BufferAtomicUMin32:
|
||||
case IR::Opcode::BufferAtomicFMin32:
|
||||
case IR::Opcode::BufferAtomicSMax32:
|
||||
case IR::Opcode::BufferAtomicSMax64:
|
||||
case IR::Opcode::BufferAtomicUMax32:
|
||||
case IR::Opcode::BufferAtomicUMax64:
|
||||
case IR::Opcode::BufferAtomicFMax32:
|
||||
case IR::Opcode::BufferAtomicInc32:
|
||||
case IR::Opcode::BufferAtomicDec32:
|
||||
|
|
|
@ -53,9 +53,11 @@ void Visit(Info& info, const IR::Inst& inst) {
|
|||
case IR::Opcode::SharedAtomicXor32:
|
||||
info.shared_types |= IR::Type::U32;
|
||||
break;
|
||||
case IR::Opcode::SharedAtomicIAdd64:
|
||||
info.uses_shared_int64_atomics = true;
|
||||
[[fallthrough]];
|
||||
case IR::Opcode::LoadSharedU64:
|
||||
case IR::Opcode::WriteSharedU64:
|
||||
case IR::Opcode::SharedAtomicIAdd64:
|
||||
info.shared_types |= IR::Type::U64;
|
||||
break;
|
||||
case IR::Opcode::ConvertF16F32:
|
||||
|
@ -98,6 +100,11 @@ void Visit(Info& info, const IR::Inst& inst) {
|
|||
case IR::Opcode::BufferAtomicFMin32:
|
||||
info.uses_buffer_atomic_float_min_max = true;
|
||||
break;
|
||||
case IR::Opcode::BufferAtomicIAdd64:
|
||||
case IR::Opcode::BufferAtomicSMax64:
|
||||
case IR::Opcode::BufferAtomicUMax64:
|
||||
info.uses_buffer_int64_atomics = true;
|
||||
break;
|
||||
case IR::Opcode::LaneId:
|
||||
info.uses_lane_id = true;
|
||||
break;
|
||||
|
|
|
@ -30,6 +30,8 @@ struct Profile {
|
|||
bool supports_robust_buffer_access{};
|
||||
bool supports_buffer_fp32_atomic_min_max{};
|
||||
bool supports_image_fp32_atomic_min_max{};
|
||||
bool supports_buffer_int64_atomics{};
|
||||
bool supports_shared_int64_atomics{};
|
||||
bool supports_workgroup_explicit_memory_layout{};
|
||||
bool has_broken_spirv_clamp{};
|
||||
bool lower_left_origin_mode{};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue