mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2025-05-18 17:34:52 +00:00
video_core: Implement basic compute shaders and more instructions
This commit is contained in:
parent
10bceb1643
commit
58de7ff55a
58 changed files with 1234 additions and 293 deletions
|
@ -173,10 +173,10 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
|
|||
spv::ExecutionModel execution_model{};
|
||||
switch (program.info.stage) {
|
||||
case Stage::Compute: {
|
||||
// const std::array<u32, 3> workgroup_size{program.workgroup_size};
|
||||
// execution_model = spv::ExecutionModel::GLCompute;
|
||||
// ctx.AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0],
|
||||
// workgroup_size[1], workgroup_size[2]);
|
||||
const std::array<u32, 3> workgroup_size{program.info.workgroup_size};
|
||||
execution_model = spv::ExecutionModel::GLCompute;
|
||||
ctx.AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0],
|
||||
workgroup_size[1], workgroup_size[2]);
|
||||
break;
|
||||
}
|
||||
case Stage::Vertex:
|
||||
|
@ -189,6 +189,7 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
|
|||
} else {
|
||||
ctx.AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft);
|
||||
}
|
||||
ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
|
||||
// if (program.info.stores_frag_depth) {
|
||||
// ctx.AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
|
||||
// }
|
||||
|
@ -249,7 +250,11 @@ Id EmitIdentity(EmitContext& ctx, const IR::Value& value) {
|
|||
}
|
||||
|
||||
Id EmitConditionRef(EmitContext& ctx, const IR::Value& value) {
|
||||
throw NotImplementedException("Forward identity declaration");
|
||||
const Id id{ctx.Def(value)};
|
||||
if (!Sirit::ValidId(id)) {
|
||||
throw NotImplementedException("Forward identity declaration");
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
void EmitReference(EmitContext&) {}
|
||||
|
@ -258,23 +263,11 @@ void EmitPhiMove(EmitContext&) {
|
|||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitGetZeroFromOp(EmitContext&) {
|
||||
void EmitGetScc(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitGetSignFromOp(EmitContext&) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitGetCarryFromOp(EmitContext&) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitGetOverflowFromOp(EmitContext&) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitSetVcc(EmitContext& ctx) {
|
||||
void EmitGetExec(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
|
@ -282,4 +275,24 @@ void EmitGetVcc(EmitContext& ctx) {
|
|||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitGetVccLo(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitSetScc(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitSetExec(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitSetVcc(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
void EmitSetVccLo(EmitContext& ctx) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -29,8 +29,8 @@ Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr, u32 element) {
|
|||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void EmitGetUserData(EmitContext&) {
|
||||
throw LogicError("Unreachable instruction");
|
||||
Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg) {
|
||||
return ctx.ConstU32(ctx.info.user_data[static_cast<size_t>(reg)]);
|
||||
}
|
||||
|
||||
void EmitGetScalarRegister(EmitContext&) {
|
||||
|
@ -62,10 +62,13 @@ Id EmitReadConst(EmitContext& ctx) {
|
|||
}
|
||||
|
||||
Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) {
|
||||
const Id buffer = ctx.buffers[handle];
|
||||
const Id type = ctx.info.buffers[handle].is_storage ? ctx.storage_f32 : ctx.uniform_f32;
|
||||
const Id ptr{ctx.OpAccessChain(type, buffer, ctx.ConstU32(0U), index)};
|
||||
return ctx.OpLoad(ctx.F32[1], ptr);
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
const Id ptr{ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||
return ctx.OpLoad(buffer.data_types->Get(1), ptr);
|
||||
}
|
||||
|
||||
Id EmitReadConstBufferU32(EmitContext& ctx, u32 handle, Id index) {
|
||||
return EmitReadConstBuffer(ctx, handle, index);
|
||||
}
|
||||
|
||||
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
||||
|
@ -76,8 +79,12 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
|||
// Attribute is disabled or varying component is not written
|
||||
return ctx.ConstF32(comp == 3 ? 1.0f : 0.0f);
|
||||
}
|
||||
const Id pointer{ctx.OpAccessChain(param.pointer_type, param.id, ctx.ConstU32(comp))};
|
||||
return ctx.OpLoad(param.component_type, pointer);
|
||||
if (param.num_components > 1) {
|
||||
const Id pointer{ctx.OpAccessChain(param.pointer_type, param.id, ctx.ConstU32(comp))};
|
||||
return ctx.OpLoad(param.component_type, pointer);
|
||||
} else {
|
||||
return ctx.OpLoad(param.component_type, param.id);
|
||||
}
|
||||
}
|
||||
throw NotImplementedException("Read attribute {}", attr);
|
||||
}
|
||||
|
@ -86,6 +93,11 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp) {
|
|||
switch (attr) {
|
||||
case IR::Attribute::VertexId:
|
||||
return ctx.OpLoad(ctx.U32[1], ctx.vertex_index);
|
||||
case IR::Attribute::WorkgroupId:
|
||||
return ctx.OpCompositeExtract(ctx.U32[1], ctx.OpLoad(ctx.U32[3], ctx.workgroup_id), comp);
|
||||
case IR::Attribute::LocalInvocationId:
|
||||
return ctx.OpCompositeExtract(ctx.U32[1], ctx.OpLoad(ctx.U32[3], ctx.local_invocation_id),
|
||||
comp);
|
||||
default:
|
||||
throw NotImplementedException("Read U32 attribute {}", attr);
|
||||
}
|
||||
|
@ -97,9 +109,22 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 elemen
|
|||
}
|
||||
|
||||
Id EmitLoadBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
const auto info = inst->Flags<IR::BufferInstInfo>();
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
if (info.index_enable && info.offset_enable) {
|
||||
UNREACHABLE();
|
||||
} else if (info.index_enable) {
|
||||
const Id ptr{
|
||||
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, address)};
|
||||
return ctx.OpLoad(buffer.data_types->Get(1), ptr);
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
return EmitLoadBufferF32(ctx, inst, handle, address);
|
||||
}
|
||||
|
||||
Id EmitLoadBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -110,18 +135,48 @@ Id EmitLoadBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address)
|
|||
|
||||
Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
|
||||
const auto info = inst->Flags<IR::BufferInstInfo>();
|
||||
const Id buffer = ctx.buffers[handle];
|
||||
const Id type = ctx.info.buffers[handle].is_storage ? ctx.storage_f32 : ctx.uniform_f32;
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
if (info.index_enable && info.offset_enable) {
|
||||
UNREACHABLE();
|
||||
} else if (info.index_enable) {
|
||||
boost::container::static_vector<Id, 4> ids;
|
||||
for (u32 i = 0; i < 4; i++) {
|
||||
const Id index{ctx.OpIAdd(ctx.U32[1], address, ctx.ConstU32(i))};
|
||||
const Id ptr{ctx.OpAccessChain(type, buffer, ctx.ConstU32(0U), index)};
|
||||
ids.push_back(ctx.OpLoad(ctx.F32[1], ptr));
|
||||
const Id ptr{
|
||||
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index)};
|
||||
ids.push_back(ctx.OpLoad(buffer.data_types->Get(1), ptr));
|
||||
}
|
||||
return ctx.OpCompositeConstruct(ctx.F32[4], ids);
|
||||
return ctx.OpCompositeConstruct(buffer.data_types->Get(4), ids);
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitStoreBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitStoreBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitStoreBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
|
||||
const auto info = inst->Flags<IR::BufferInstInfo>();
|
||||
const auto& buffer = ctx.buffers[handle];
|
||||
if (info.index_enable && info.offset_enable) {
|
||||
UNREACHABLE();
|
||||
} else if (info.index_enable) {
|
||||
const Id ptr{
|
||||
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, address)};
|
||||
ctx.OpStore(ptr, value);
|
||||
return;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,10 @@ Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
|
|||
return ctx.OpFAdd(ctx.F64[1], a, b);
|
||||
}
|
||||
|
||||
Id EmitFPSub32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
|
||||
return ctx.OpFSub(ctx.F32[1], a, b);
|
||||
}
|
||||
|
||||
Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
|
||||
return ctx.OpFma(ctx.F16[1], a, b, c);
|
||||
}
|
||||
|
@ -196,6 +200,10 @@ Id EmitFPTrunc64(EmitContext& ctx, Id value) {
|
|||
return ctx.OpTrunc(ctx.F64[1], value);
|
||||
}
|
||||
|
||||
Id EmitFPFract(EmitContext& ctx, Id value) {
|
||||
return ctx.OpFract(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdEqual(ctx.U1[1], lhs, rhs);
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
namespace Shader::IR {
|
||||
enum class Attribute : u64;
|
||||
enum class Patch : u64;
|
||||
enum class ScalarReg : u32;
|
||||
class Inst;
|
||||
class Value;
|
||||
} // namespace Shader::IR
|
||||
|
@ -30,11 +30,18 @@ void EmitJoin(EmitContext& ctx);
|
|||
void EmitBarrier(EmitContext& ctx);
|
||||
void EmitWorkgroupMemoryBarrier(EmitContext& ctx);
|
||||
void EmitDeviceMemoryBarrier(EmitContext& ctx);
|
||||
void EmitGetScc(EmitContext& ctx);
|
||||
void EmitGetExec(EmitContext& ctx);
|
||||
void EmitGetVcc(EmitContext& ctx);
|
||||
void EmitGetVccLo(EmitContext& ctx);
|
||||
void EmitSetScc(EmitContext& ctx);
|
||||
void EmitSetExec(EmitContext& ctx);
|
||||
void EmitSetVcc(EmitContext& ctx);
|
||||
void EmitSetVccLo(EmitContext& ctx);
|
||||
void EmitPrologue(EmitContext& ctx);
|
||||
void EmitEpilogue(EmitContext& ctx);
|
||||
void EmitGetUserData(EmitContext& ctx);
|
||||
void EmitDiscard(EmitContext& ctx);
|
||||
Id EmitGetUserData(EmitContext& ctx, IR::ScalarReg reg);
|
||||
void EmitGetScalarRegister(EmitContext& ctx);
|
||||
void EmitSetScalarRegister(EmitContext& ctx);
|
||||
void EmitGetVectorRegister(EmitContext& ctx);
|
||||
|
@ -44,10 +51,17 @@ void EmitGetGotoVariable(EmitContext& ctx);
|
|||
void EmitSetScc(EmitContext& ctx);
|
||||
Id EmitReadConst(EmitContext& ctx);
|
||||
Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index);
|
||||
Id EmitReadConstBufferU32(EmitContext& ctx, u32 handle, Id index);
|
||||
Id EmitLoadBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
|
||||
void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
|
||||
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, u32 comp);
|
||||
Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, u32 comp);
|
||||
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 comp);
|
||||
|
@ -137,6 +151,7 @@ Id EmitFPAbs64(EmitContext& ctx, Id value);
|
|||
Id EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
|
||||
Id EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
|
||||
Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
|
||||
Id EmitFPSub32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
|
||||
Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
|
||||
Id EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
|
||||
Id EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
|
||||
|
@ -177,6 +192,7 @@ Id EmitFPCeil64(EmitContext& ctx, Id value);
|
|||
Id EmitFPTrunc16(EmitContext& ctx, Id value);
|
||||
Id EmitFPTrunc32(EmitContext& ctx, Id value);
|
||||
Id EmitFPTrunc64(EmitContext& ctx, Id value);
|
||||
Id EmitFPFract(EmitContext& ctx, Id value);
|
||||
Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
|
|
|
@ -10,6 +10,10 @@ void EmitPrologue(EmitContext& ctx) {}
|
|||
|
||||
void EmitEpilogue(EmitContext& ctx) {}
|
||||
|
||||
void EmitDiscard(EmitContext& ctx) {
|
||||
ctx.OpDemoteToHelperInvocationEXT();
|
||||
}
|
||||
|
||||
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
|
||||
throw NotImplementedException("Geometry streams");
|
||||
}
|
||||
|
|
|
@ -194,6 +194,12 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
input_params[input.semantic] = {id, input_f32, F32[1], num_components};
|
||||
interfaces.push_back(id);
|
||||
}
|
||||
break;
|
||||
case Stage::Compute:
|
||||
workgroup_id = DefineVariable(U32[3], spv::BuiltIn::WorkgroupId, spv::StorageClass::Input);
|
||||
local_invocation_id =
|
||||
DefineVariable(U32[3], spv::BuiltIn::LocalInvocationId, spv::StorageClass::Input);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -233,10 +239,11 @@ void EmitContext::DefineOutputs(const Info& info) {
|
|||
|
||||
void EmitContext::DefineBuffers(const Info& info) {
|
||||
for (u32 i = 0; const auto& buffer : info.buffers) {
|
||||
ASSERT(True(buffer.used_types & IR::Type::F32));
|
||||
ASSERT(buffer.stride % sizeof(float) == 0);
|
||||
const u32 num_elements = buffer.stride * buffer.num_records / sizeof(float);
|
||||
const Id record_array_type{TypeArray(F32[1], ConstU32(num_elements))};
|
||||
const auto* data_types = True(buffer.used_types & IR::Type::F32) ? &F32 : &U32;
|
||||
const Id data_type = (*data_types)[1];
|
||||
const u32 stride = buffer.stride == 0 ? 1 : buffer.stride;
|
||||
const u32 num_elements = stride * buffer.num_records;
|
||||
const Id record_array_type{TypeArray(data_type, ConstU32(num_elements))};
|
||||
const Id struct_type{TypeStruct(record_array_type)};
|
||||
Decorate(record_array_type, spv::Decoration::ArrayStride, 4);
|
||||
|
||||
|
@ -249,18 +256,18 @@ void EmitContext::DefineBuffers(const Info& info) {
|
|||
const auto storage_class =
|
||||
buffer.is_storage ? spv::StorageClass::StorageBuffer : spv::StorageClass::Uniform;
|
||||
const Id struct_pointer_type{TypePointer(storage_class, struct_type)};
|
||||
if (buffer.is_storage) {
|
||||
storage_f32 = TypePointer(storage_class, F32[1]);
|
||||
} else {
|
||||
uniform_f32 = TypePointer(storage_class, F32[1]);
|
||||
}
|
||||
const Id pointer_type = TypePointer(storage_class, data_type);
|
||||
const Id id{AddGlobalVariable(struct_pointer_type, storage_class)};
|
||||
Decorate(id, spv::Decoration::Binding, binding);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, 0U);
|
||||
Name(id, fmt::format("c{}", i));
|
||||
Name(id, fmt::format("{}{}", buffer.is_storage ? "ssbo" : "cbuf", i));
|
||||
|
||||
binding++;
|
||||
buffers.push_back(id);
|
||||
buffers.push_back({
|
||||
.id = id,
|
||||
.data_types = data_types,
|
||||
.pointer_type = pointer_type,
|
||||
});
|
||||
interfaces.push_back(id);
|
||||
i++;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,14 @@ struct VectorIds {
|
|||
return ids[index - 1];
|
||||
}
|
||||
|
||||
[[nodiscard]] Id& Get(u32 index) {
|
||||
return ids[index - 1];
|
||||
}
|
||||
|
||||
[[nodiscard]] const Id& Get(u32 index) const {
|
||||
return ids[index - 1];
|
||||
}
|
||||
|
||||
std::array<Id, 4> ids;
|
||||
};
|
||||
|
||||
|
@ -141,9 +149,6 @@ public:
|
|||
Id output_u32{};
|
||||
Id output_f32{};
|
||||
|
||||
Id uniform_f32{};
|
||||
Id storage_f32{};
|
||||
|
||||
boost::container::small_vector<Id, 16> interfaces;
|
||||
|
||||
Id output_position{};
|
||||
|
@ -151,6 +156,9 @@ public:
|
|||
Id base_vertex{};
|
||||
std::array<Id, 8> frag_color{};
|
||||
|
||||
Id workgroup_id{};
|
||||
Id local_invocation_id{};
|
||||
|
||||
struct TextureDefinition {
|
||||
Id id;
|
||||
Id sampled_type;
|
||||
|
@ -158,8 +166,14 @@ public:
|
|||
Id image_type;
|
||||
};
|
||||
|
||||
struct BufferDefinition {
|
||||
Id id;
|
||||
const VectorIds* data_types;
|
||||
Id pointer_type;
|
||||
};
|
||||
|
||||
u32& binding;
|
||||
boost::container::small_vector<Id, 4> buffers;
|
||||
boost::container::small_vector<BufferDefinition, 4> buffers;
|
||||
boost::container::small_vector<TextureDefinition, 4> images;
|
||||
boost::container::small_vector<Id, 4> samplers;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue