mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-07-13 05:05:57 +00:00
shader_recompiler: Various fixes to shared memory and atomics. (#3075)
* shader_recompiler: Various fixes to shared memory and atomics. * shader_recompiler: Re-type non-32bit load/stores.
This commit is contained in:
parent
b49340dff8
commit
ca92e72efe
17 changed files with 391 additions and 227 deletions
|
@ -27,6 +27,19 @@ Id SharedAtomicU32(EmitContext& ctx, Id offset, Id value,
|
|||
});
|
||||
}
|
||||
|
||||
Id SharedAtomicU32IncDec(EmitContext& ctx, Id offset,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id)) {
|
||||
const Id shift_id{ctx.ConstU32(2U)};
|
||||
const Id index{ctx.OpShiftRightLogical(ctx.U32[1], offset, shift_id)};
|
||||
const u32 num_elements{Common::DivCeil(ctx.runtime_info.cs_info.shared_memory_size, 4u)};
|
||||
const Id pointer{
|
||||
ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)};
|
||||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return AccessBoundsCheck<32>(ctx, index, ctx.ConstU32(num_elements), [&] {
|
||||
return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics);
|
||||
});
|
||||
}
|
||||
|
||||
Id SharedAtomicU64(EmitContext& ctx, Id offset, Id value,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
|
||||
const Id shift_id{ctx.ConstU32(3U)};
|
||||
|
@ -40,19 +53,6 @@ Id SharedAtomicU64(EmitContext& ctx, Id offset, Id value,
|
|||
});
|
||||
}
|
||||
|
||||
Id SharedAtomicU32_IncDec(EmitContext& ctx, Id offset,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id)) {
|
||||
const Id shift_id{ctx.ConstU32(2U)};
|
||||
const Id index{ctx.OpShiftRightLogical(ctx.U32[1], offset, shift_id)};
|
||||
const u32 num_elements{Common::DivCeil(ctx.runtime_info.cs_info.shared_memory_size, 4u)};
|
||||
const Id pointer{
|
||||
ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)};
|
||||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return AccessBoundsCheck<32>(ctx, index, ctx.ConstU32(num_elements), [&] {
|
||||
return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics);
|
||||
});
|
||||
}
|
||||
|
||||
Id BufferAtomicU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
|
@ -68,6 +68,21 @@ Id BufferAtomicU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id
|
|||
});
|
||||
}
|
||||
|
||||
Id BufferAtomicU32IncDec(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id)) {
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
if (Sirit::ValidId(buffer.offset)) {
|
||||
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
|
||||
}
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
|
||||
const auto [id, pointer_type] = buffer[EmitContext::PointerType::U32];
|
||||
const Id ptr = ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index);
|
||||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return AccessBoundsCheck<32>(ctx, index, buffer.size_dwords, [&] {
|
||||
return (ctx.*atomic_func)(ctx.U32[1], ptr, scope, semantics);
|
||||
});
|
||||
}
|
||||
|
||||
Id BufferAtomicU32CmpSwap(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value,
|
||||
Id cmp_value,
|
||||
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id, Id, Id)) {
|
||||
|
@ -156,12 +171,12 @@ Id EmitSharedAtomicISub32(EmitContext& ctx, Id offset, Id value) {
|
|||
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicISub);
|
||||
}
|
||||
|
||||
Id EmitSharedAtomicIIncrement32(EmitContext& ctx, Id offset) {
|
||||
return SharedAtomicU32_IncDec(ctx, offset, &Sirit::Module::OpAtomicIIncrement);
|
||||
Id EmitSharedAtomicInc32(EmitContext& ctx, Id offset) {
|
||||
return SharedAtomicU32IncDec(ctx, offset, &Sirit::Module::OpAtomicIIncrement);
|
||||
}
|
||||
|
||||
Id EmitSharedAtomicIDecrement32(EmitContext& ctx, Id offset) {
|
||||
return SharedAtomicU32_IncDec(ctx, offset, &Sirit::Module::OpAtomicIDecrement);
|
||||
Id EmitSharedAtomicDec32(EmitContext& ctx, Id offset) {
|
||||
return SharedAtomicU32IncDec(ctx, offset, &Sirit::Module::OpAtomicIDecrement);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicIAdd32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
|
@ -172,6 +187,10 @@ Id EmitBufferAtomicIAdd64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addre
|
|||
return BufferAtomicU64(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicIAdd);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicISub32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
return BufferAtomicU32(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicISub);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicSMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
return BufferAtomicU32(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicSMin);
|
||||
}
|
||||
|
@ -188,14 +207,12 @@ Id EmitBufferAtomicUMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addre
|
|||
return BufferAtomicU32(ctx, inst, handle, address, value, &Sirit::Module::OpAtomicUMax);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicInc32(EmitContext&, IR::Inst*, u32, Id, Id) {
|
||||
// TODO
|
||||
UNREACHABLE_MSG("Unsupported BUFFER_ATOMIC opcode: ", IR::Opcode::BufferAtomicInc32);
|
||||
Id EmitBufferAtomicInc32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
return BufferAtomicU32IncDec(ctx, inst, handle, address, &Sirit::Module::OpAtomicIIncrement);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicDec32(EmitContext&, IR::Inst*, u32, Id, Id) {
|
||||
// TODO
|
||||
UNREACHABLE_MSG("Unsupported BUFFER_ATOMIC opcode: ", IR::Opcode::BufferAtomicDec32);
|
||||
Id EmitBufferAtomicDec32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
return BufferAtomicU32IncDec(ctx, inst, handle, address, &Sirit::Module::OpAtomicIDecrement);
|
||||
}
|
||||
|
||||
Id EmitBufferAtomicAnd32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
|
|
|
@ -1,31 +1,54 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
|
||||
#pragma once
|
||||
|
||||
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
|
||||
|
||||
namespace Shader::Backend::SPIRV {
|
||||
|
||||
template <u32 bit_size>
|
||||
auto AccessBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, auto emit_func) {
|
||||
Id zero_value{};
|
||||
template <u32 bit_size, u32 num_components = 1, bool is_float = false>
|
||||
std::tuple<Id, Id> ResolveTypeAndZero(EmitContext& ctx) {
|
||||
Id result_type{};
|
||||
if constexpr (bit_size == 64) {
|
||||
zero_value = ctx.u64_zero_value;
|
||||
Id zero_value{};
|
||||
if constexpr (bit_size == 64 && num_components == 1 && !is_float) {
|
||||
result_type = ctx.U64;
|
||||
zero_value = ctx.u64_zero_value;
|
||||
} else if constexpr (bit_size == 32) {
|
||||
zero_value = ctx.u32_zero_value;
|
||||
result_type = ctx.U32[1];
|
||||
} else if constexpr (bit_size == 16) {
|
||||
zero_value = ctx.u16_zero_value;
|
||||
if (is_float) {
|
||||
result_type = ctx.F32[num_components];
|
||||
zero_value = ctx.f32_zero_value;
|
||||
} else {
|
||||
result_type = ctx.U32[num_components];
|
||||
zero_value = ctx.u32_zero_value;
|
||||
}
|
||||
} else if constexpr (bit_size == 16 && num_components == 1 && !is_float) {
|
||||
result_type = ctx.U16;
|
||||
zero_value = ctx.u16_zero_value;
|
||||
} else if constexpr (bit_size == 8 && num_components == 1 && !is_float) {
|
||||
result_type = ctx.U8;
|
||||
zero_value = ctx.u8_zero_value;
|
||||
} else {
|
||||
static_assert(false, "type not supported");
|
||||
static_assert(false, "Type not supported.");
|
||||
}
|
||||
if (num_components > 1) {
|
||||
std::array<Id, num_components> zero_ids;
|
||||
zero_ids.fill(zero_value);
|
||||
zero_value = ctx.ConstantComposite(result_type, zero_ids);
|
||||
}
|
||||
return {result_type, zero_value};
|
||||
}
|
||||
|
||||
template <u32 bit_size, u32 num_components = 1, bool is_float = false>
|
||||
auto AccessBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, auto emit_func) {
|
||||
if (Sirit::ValidId(buffer_size)) {
|
||||
// Bounds checking enabled, wrap in a conditional branch to make sure that
|
||||
// the atomic is not mistakenly executed when the index is out of bounds.
|
||||
const Id in_bounds = ctx.OpULessThan(ctx.U1[1], index, buffer_size);
|
||||
auto compare_index = index;
|
||||
if (num_components > 1) {
|
||||
compare_index = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(num_components - 1));
|
||||
}
|
||||
const Id in_bounds = ctx.OpULessThan(ctx.U1[1], compare_index, buffer_size);
|
||||
const Id ib_label = ctx.OpLabel();
|
||||
const Id end_label = ctx.OpLabel();
|
||||
ctx.OpSelectionMerge(end_label, spv::SelectionControlMask::MaskNone);
|
||||
|
@ -36,6 +59,8 @@ auto AccessBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, auto emit_fun
|
|||
ctx.OpBranch(end_label);
|
||||
ctx.AddLabel(end_label);
|
||||
if (Sirit::ValidId(ib_result)) {
|
||||
const auto [result_type, zero_value] =
|
||||
ResolveTypeAndZero<bit_size, num_components, is_float>(ctx);
|
||||
return ctx.OpPhi(result_type, ib_result, ib_label, zero_value, last_label);
|
||||
} else {
|
||||
return Id{0};
|
||||
|
@ -45,4 +70,21 @@ auto AccessBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, auto emit_fun
|
|||
return emit_func();
|
||||
}
|
||||
|
||||
template <u32 bit_size, u32 num_components = 1, bool is_float = false>
|
||||
static Id LoadAccessBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, Id result) {
|
||||
if (Sirit::ValidId(buffer_size)) {
|
||||
// Bounds checking enabled, wrap in a select.
|
||||
auto compare_index = index;
|
||||
if (num_components > 1) {
|
||||
compare_index = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(num_components - 1));
|
||||
}
|
||||
const Id in_bounds = ctx.OpULessThan(ctx.U1[1], compare_index, buffer_size);
|
||||
const auto [result_type, zero_value] =
|
||||
ResolveTypeAndZero<bit_size, num_components, is_float>(ctx);
|
||||
return ctx.OpSelect(result_type, in_bounds, result, zero_value);
|
||||
}
|
||||
// Bounds checking not enabled, just return the plain value.
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include <magic_enum/magic_enum.hpp>
|
||||
|
||||
#include "emit_spirv_bounds.h"
|
||||
|
||||
namespace Shader::Backend::SPIRV {
|
||||
namespace {
|
||||
|
||||
|
@ -239,8 +241,8 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp, Id index) {
|
|||
}
|
||||
|
||||
if (IR::IsParam(attr)) {
|
||||
const u32 index{u32(attr) - u32(IR::Attribute::Param0)};
|
||||
const auto& param{ctx.input_params.at(index)};
|
||||
const u32 param_index{u32(attr) - u32(IR::Attribute::Param0)};
|
||||
const auto& param{ctx.input_params.at(param_index)};
|
||||
if (param.buffer_handle >= 0) {
|
||||
const auto step_rate = EmitReadStepRate(ctx, param.id.value);
|
||||
const auto offset = ctx.OpIAdd(
|
||||
|
@ -415,27 +417,6 @@ void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
|
|||
ctx.OpStore(pointer, value);
|
||||
}
|
||||
|
||||
template <u32 N>
|
||||
static Id EmitLoadBufferBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, Id result,
|
||||
bool is_float) {
|
||||
if (Sirit::ValidId(buffer_size)) {
|
||||
// Bounds checking enabled, wrap in a select.
|
||||
const auto result_type = is_float ? ctx.F32[N] : ctx.U32[N];
|
||||
auto compare_index = index;
|
||||
auto zero_value = is_float ? ctx.f32_zero_value : ctx.u32_zero_value;
|
||||
if (N > 1) {
|
||||
compare_index = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(N - 1));
|
||||
std::array<Id, N> zero_ids;
|
||||
zero_ids.fill(zero_value);
|
||||
zero_value = ctx.ConstantComposite(result_type, zero_ids);
|
||||
}
|
||||
const Id in_bounds = ctx.OpULessThan(ctx.U1[1], compare_index, buffer_size);
|
||||
return ctx.OpSelect(result_type, in_bounds, result, zero_value);
|
||||
}
|
||||
// Bounds checking not enabled, just return the plain value.
|
||||
return result;
|
||||
}
|
||||
|
||||
template <u32 N, PointerType alias>
|
||||
static Id EmitLoadBufferB32xN(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
const auto flags = inst->Flags<IR::BufferInstInfo>();
|
||||
|
@ -454,8 +435,9 @@ static Id EmitLoadBufferB32xN(EmitContext& ctx, IR::Inst* inst, u32 handle, Id a
|
|||
const Id result_i = ctx.OpLoad(data_types[1], ptr_i);
|
||||
if (!flags.typed) {
|
||||
// Untyped loads have bounds checking per-component.
|
||||
ids.push_back(EmitLoadBufferBoundsCheck<1>(ctx, index_i, spv_buffer.size_dwords,
|
||||
result_i, alias == PointerType::F32));
|
||||
ids.push_back(LoadAccessBoundsCheck < 32, 1,
|
||||
alias ==
|
||||
PointerType::F32 > (ctx, index_i, spv_buffer.size_dwords, result_i));
|
||||
} else {
|
||||
ids.push_back(result_i);
|
||||
}
|
||||
|
@ -464,8 +446,8 @@ static Id EmitLoadBufferB32xN(EmitContext& ctx, IR::Inst* inst, u32 handle, Id a
|
|||
const Id result = N == 1 ? ids[0] : ctx.OpCompositeConstruct(data_types[N], ids);
|
||||
if (flags.typed) {
|
||||
// Typed loads have single bounds check for the whole load.
|
||||
return EmitLoadBufferBoundsCheck<N>(ctx, index, spv_buffer.size_dwords, result,
|
||||
alias == PointerType::F32);
|
||||
return LoadAccessBoundsCheck < 32, N,
|
||||
alias == PointerType::F32 > (ctx, index, spv_buffer.size_dwords, result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -477,8 +459,8 @@ Id EmitLoadBufferU8(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
|||
}
|
||||
const auto [id, pointer_type] = spv_buffer[PointerType::U8];
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, address)};
|
||||
const Id result{ctx.OpUConvert(ctx.U32[1], ctx.OpLoad(ctx.U8, ptr))};
|
||||
return EmitLoadBufferBoundsCheck<1>(ctx, address, spv_buffer.size, result, false);
|
||||
const Id result{ctx.OpLoad(ctx.U8, ptr)};
|
||||
return LoadAccessBoundsCheck<8>(ctx, address, spv_buffer.size, result);
|
||||
}
|
||||
|
||||
Id EmitLoadBufferU16(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
|
@ -489,8 +471,8 @@ Id EmitLoadBufferU16(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
|||
const auto [id, pointer_type] = spv_buffer[PointerType::U16];
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(1u));
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)};
|
||||
const Id result{ctx.OpUConvert(ctx.U32[1], ctx.OpLoad(ctx.U16, ptr))};
|
||||
return EmitLoadBufferBoundsCheck<1>(ctx, index, spv_buffer.size_shorts, result, false);
|
||||
const Id result{ctx.OpLoad(ctx.U16, ptr)};
|
||||
return LoadAccessBoundsCheck<16>(ctx, index, spv_buffer.size_shorts, result);
|
||||
}
|
||||
|
||||
Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
|
@ -509,6 +491,18 @@ Id EmitLoadBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address)
|
|||
return EmitLoadBufferB32xN<4, PointerType::U32>(ctx, inst, handle, address);
|
||||
}
|
||||
|
||||
Id EmitLoadBufferU64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
const auto& spv_buffer = ctx.buffers[handle];
|
||||
if (Sirit::ValidId(spv_buffer.offset)) {
|
||||
address = ctx.OpIAdd(ctx.U32[1], address, spv_buffer.offset);
|
||||
}
|
||||
const auto [id, pointer_type] = spv_buffer[PointerType::U64];
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(3u));
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u64_zero_value, index)};
|
||||
const Id result{ctx.OpLoad(ctx.U64, ptr)};
|
||||
return LoadAccessBoundsCheck<64>(ctx, index, spv_buffer.size_qwords, result);
|
||||
}
|
||||
|
||||
Id EmitLoadBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
return EmitLoadBufferB32xN<1, PointerType::F32>(ctx, inst, handle, address);
|
||||
}
|
||||
|
@ -529,29 +523,6 @@ Id EmitLoadBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addr
|
|||
UNREACHABLE_MSG("SPIR-V instruction");
|
||||
}
|
||||
|
||||
template <u32 N>
|
||||
void EmitStoreBufferBoundsCheck(EmitContext& ctx, Id index, Id buffer_size, auto emit_func) {
|
||||
if (Sirit::ValidId(buffer_size)) {
|
||||
// Bounds checking enabled, wrap in a conditional branch.
|
||||
auto compare_index = index;
|
||||
if (N > 1) {
|
||||
compare_index = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(N - 1));
|
||||
}
|
||||
const Id in_bounds = ctx.OpULessThan(ctx.U1[1], compare_index, buffer_size);
|
||||
const Id in_bounds_label = ctx.OpLabel();
|
||||
const Id merge_label = ctx.OpLabel();
|
||||
ctx.OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone);
|
||||
ctx.OpBranchConditional(in_bounds, in_bounds_label, merge_label);
|
||||
ctx.AddLabel(in_bounds_label);
|
||||
emit_func();
|
||||
ctx.OpBranch(merge_label);
|
||||
ctx.AddLabel(merge_label);
|
||||
return;
|
||||
}
|
||||
// Bounds checking not enabled, just perform the store.
|
||||
emit_func();
|
||||
}
|
||||
|
||||
template <u32 N, PointerType alias>
|
||||
static void EmitStoreBufferB32xN(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address,
|
||||
Id value) {
|
||||
|
@ -569,19 +540,25 @@ static void EmitStoreBufferB32xN(EmitContext& ctx, IR::Inst* inst, u32 handle, I
|
|||
const Id index_i = i == 0 ? index : ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(i));
|
||||
const Id ptr_i = ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index_i);
|
||||
const Id value_i = N == 1 ? value : ctx.OpCompositeExtract(data_types[1], value, i);
|
||||
auto store_i = [&]() { ctx.OpStore(ptr_i, value_i); };
|
||||
auto store_i = [&] {
|
||||
ctx.OpStore(ptr_i, value_i);
|
||||
return Id{};
|
||||
};
|
||||
if (!flags.typed) {
|
||||
// Untyped stores have bounds checking per-component.
|
||||
EmitStoreBufferBoundsCheck<1>(ctx, index_i, spv_buffer.size_dwords, store_i);
|
||||
AccessBoundsCheck<32, 1, alias == PointerType::F32>(
|
||||
ctx, index_i, spv_buffer.size_dwords, store_i);
|
||||
} else {
|
||||
store_i();
|
||||
}
|
||||
}
|
||||
return Id{};
|
||||
};
|
||||
|
||||
if (flags.typed) {
|
||||
// Typed stores have single bounds check for the whole store.
|
||||
EmitStoreBufferBoundsCheck<N>(ctx, index, spv_buffer.size_dwords, store);
|
||||
AccessBoundsCheck<32, N, alias == PointerType::F32>(ctx, index, spv_buffer.size_dwords,
|
||||
store);
|
||||
} else {
|
||||
store();
|
||||
}
|
||||
|
@ -594,8 +571,10 @@ void EmitStoreBufferU8(EmitContext& ctx, IR::Inst*, u32 handle, Id address, Id v
|
|||
}
|
||||
const auto [id, pointer_type] = spv_buffer[PointerType::U8];
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, address)};
|
||||
const Id result{ctx.OpUConvert(ctx.U8, value)};
|
||||
EmitStoreBufferBoundsCheck<1>(ctx, address, spv_buffer.size, [&] { ctx.OpStore(ptr, result); });
|
||||
AccessBoundsCheck<8>(ctx, address, spv_buffer.size, [&] {
|
||||
ctx.OpStore(ptr, value);
|
||||
return Id{};
|
||||
});
|
||||
}
|
||||
|
||||
void EmitStoreBufferU16(EmitContext& ctx, IR::Inst*, u32 handle, Id address, Id value) {
|
||||
|
@ -606,9 +585,10 @@ void EmitStoreBufferU16(EmitContext& ctx, IR::Inst*, u32 handle, Id address, Id
|
|||
const auto [id, pointer_type] = spv_buffer[PointerType::U16];
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(1u));
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u32_zero_value, index)};
|
||||
const Id result{ctx.OpUConvert(ctx.U16, value)};
|
||||
EmitStoreBufferBoundsCheck<1>(ctx, index, spv_buffer.size_shorts,
|
||||
[&] { ctx.OpStore(ptr, result); });
|
||||
AccessBoundsCheck<16>(ctx, index, spv_buffer.size_shorts, [&] {
|
||||
ctx.OpStore(ptr, value);
|
||||
return Id{};
|
||||
});
|
||||
}
|
||||
|
||||
void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
|
@ -627,6 +607,20 @@ void EmitStoreBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addre
|
|||
EmitStoreBufferB32xN<4, PointerType::U32>(ctx, inst, handle, address, value);
|
||||
}
|
||||
|
||||
void EmitStoreBufferU64(EmitContext& ctx, IR::Inst*, u32 handle, Id address, Id value) {
|
||||
const auto& spv_buffer = ctx.buffers[handle];
|
||||
if (Sirit::ValidId(spv_buffer.offset)) {
|
||||
address = ctx.OpIAdd(ctx.U32[1], address, spv_buffer.offset);
|
||||
}
|
||||
const auto [id, pointer_type] = spv_buffer[PointerType::U64];
|
||||
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(3u));
|
||||
const Id ptr{ctx.OpAccessChain(pointer_type, id, ctx.u64_zero_value, index)};
|
||||
AccessBoundsCheck<64>(ctx, index, spv_buffer.size_qwords, [&] {
|
||||
ctx.OpStore(ptr, value);
|
||||
return Id{};
|
||||
});
|
||||
}
|
||||
|
||||
void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
EmitStoreBufferB32xN<1, PointerType::F32>(ctx, inst, handle, address, value);
|
||||
}
|
||||
|
|
|
@ -263,4 +263,12 @@ Id EmitConvertU32U16(EmitContext& ctx, Id value) {
|
|||
return ctx.OpUConvert(ctx.U32[1], value);
|
||||
}
|
||||
|
||||
Id EmitConvertU8U32(EmitContext& ctx, Id value) {
|
||||
return ctx.OpUConvert(ctx.U8, value);
|
||||
}
|
||||
|
||||
Id EmitConvertU32U8(EmitContext& ctx, Id value) {
|
||||
return ctx.OpUConvert(ctx.U32[1], value);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -69,6 +69,7 @@ Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
|||
Id EmitLoadBufferU32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferU32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferU64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
|
@ -80,6 +81,7 @@ void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address
|
|||
void EmitStoreBufferU32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferU32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferU64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
|
@ -87,12 +89,13 @@ void EmitStoreBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addre
|
|||
void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicIAdd32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicIAdd64(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicISub32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicSMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicUMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicSMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicUMax32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicInc32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicDec32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicInc32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitBufferAtomicDec32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitBufferAtomicAnd32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicOr32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitBufferAtomicXor32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
|
@ -136,8 +139,8 @@ Id EmitSharedAtomicSMin32(EmitContext& ctx, Id offset, Id value);
|
|||
Id EmitSharedAtomicAnd32(EmitContext& ctx, Id offset, Id value);
|
||||
Id EmitSharedAtomicOr32(EmitContext& ctx, Id offset, Id value);
|
||||
Id EmitSharedAtomicXor32(EmitContext& ctx, Id offset, Id value);
|
||||
Id EmitSharedAtomicIIncrement32(EmitContext& ctx, Id offset);
|
||||
Id EmitSharedAtomicIDecrement32(EmitContext& ctx, Id offset);
|
||||
Id EmitSharedAtomicInc32(EmitContext& ctx, Id offset);
|
||||
Id EmitSharedAtomicDec32(EmitContext& ctx, Id offset);
|
||||
Id EmitSharedAtomicISub32(EmitContext& ctx, Id offset, Id value);
|
||||
|
||||
Id EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst* inst, Id e1, Id e2);
|
||||
|
@ -461,6 +464,8 @@ Id EmitConvertF64U32(EmitContext& ctx, Id value);
|
|||
Id EmitConvertF64U64(EmitContext& ctx, Id value);
|
||||
Id EmitConvertU16U32(EmitContext& ctx, Id value);
|
||||
Id EmitConvertU32U16(EmitContext& ctx, Id value);
|
||||
Id EmitConvertU8U32(EmitContext& ctx, Id value);
|
||||
Id EmitConvertU32U8(EmitContext& ctx, Id value);
|
||||
|
||||
Id EmitImageSampleRaw(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address1, Id address2,
|
||||
Id address3, Id address4);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue