Merge remote-tracking branch 'upstream/master' into int-flags

This commit is contained in:
Levi 2021-01-10 22:09:56 -07:00
commit 7a3c884e39
912 changed files with 91129 additions and 25508 deletions

View file

@ -212,16 +212,15 @@ public:
}
void operator()(const ExprPredicate& expr) {
inner += "P" + std::to_string(expr.predicate);
inner += fmt::format("P{}", expr.predicate);
}
void operator()(const ExprCondCode& expr) {
u32 cc = static_cast<u32>(expr.cc);
inner += "CC" + std::to_string(cc);
inner += fmt::format("CC{}", expr.cc);
}
void operator()(const ExprVar& expr) {
inner += "V" + std::to_string(expr.var_index);
inner += fmt::format("V{}", expr.var_index);
}
void operator()(const ExprBoolean& expr) {
@ -229,7 +228,7 @@ public:
}
void operator()(const ExprGprEqual& expr) {
inner += "( gpr_" + std::to_string(expr.gpr) + " == " + std::to_string(expr.value) + ')';
inner += fmt::format("(gpr_{} == {})", expr.gpr, expr.value);
}
const std::string& GetResult() const {
@ -374,8 +373,8 @@ std::string ASTManager::Print() const {
return printer.GetResult();
}
ASTManager::ASTManager(bool full_decompile, bool disable_else_derivation)
: full_decompile{full_decompile}, disable_else_derivation{disable_else_derivation} {};
ASTManager::ASTManager(bool do_full_decompile, bool disable_else_derivation_)
: full_decompile{do_full_decompile}, disable_else_derivation{disable_else_derivation_} {}
ASTManager::~ASTManager() {
Clear();

View file

@ -76,7 +76,7 @@ public:
class ASTIfThen {
public:
explicit ASTIfThen(Expr condition) : condition{std::move(condition)} {}
explicit ASTIfThen(Expr condition_) : condition{std::move(condition_)} {}
Expr condition;
ASTZipper nodes{};
};
@ -88,63 +88,68 @@ public:
class ASTBlockEncoded {
public:
explicit ASTBlockEncoded(u32 start, u32 end) : start{start}, end{end} {}
explicit ASTBlockEncoded(u32 start_, u32 _) : start{start_}, end{_} {}
u32 start;
u32 end;
};
class ASTBlockDecoded {
public:
explicit ASTBlockDecoded(NodeBlock&& new_nodes) : nodes(std::move(new_nodes)) {}
explicit ASTBlockDecoded(NodeBlock&& new_nodes_) : nodes(std::move(new_nodes_)) {}
NodeBlock nodes;
};
class ASTVarSet {
public:
explicit ASTVarSet(u32 index, Expr condition) : index{index}, condition{std::move(condition)} {}
explicit ASTVarSet(u32 index_, Expr condition_)
: index{index_}, condition{std::move(condition_)} {}
u32 index;
Expr condition;
};
class ASTLabel {
public:
explicit ASTLabel(u32 index) : index{index} {}
explicit ASTLabel(u32 index_) : index{index_} {}
u32 index;
bool unused{};
};
class ASTGoto {
public:
explicit ASTGoto(Expr condition, u32 label) : condition{std::move(condition)}, label{label} {}
explicit ASTGoto(Expr condition_, u32 label_)
: condition{std::move(condition_)}, label{label_} {}
Expr condition;
u32 label;
};
class ASTDoWhile {
public:
explicit ASTDoWhile(Expr condition) : condition{std::move(condition)} {}
explicit ASTDoWhile(Expr condition_) : condition{std::move(condition_)} {}
Expr condition;
ASTZipper nodes{};
};
class ASTReturn {
public:
explicit ASTReturn(Expr condition, bool kills)
: condition{std::move(condition)}, kills{kills} {}
explicit ASTReturn(Expr condition_, bool kills_)
: condition{std::move(condition_)}, kills{kills_} {}
Expr condition;
bool kills;
};
class ASTBreak {
public:
explicit ASTBreak(Expr condition) : condition{std::move(condition)} {}
explicit ASTBreak(Expr condition_) : condition{std::move(condition_)} {}
Expr condition;
};
class ASTBase {
public:
explicit ASTBase(ASTNode parent, ASTData data)
: data{std::move(data)}, parent{std::move(parent)} {}
explicit ASTBase(ASTNode parent_, ASTData data_)
: data{std::move(data_)}, parent{std::move(parent_)} {}
template <class U, class... Args>
static ASTNode Make(ASTNode parent, Args&&... args) {
@ -300,7 +305,7 @@ private:
class ASTManager final {
public:
ASTManager(bool full_decompile, bool disable_else_derivation);
explicit ASTManager(bool do_full_decompile, bool disable_else_derivation_);
~ASTManager();
ASTManager(const ASTManager& o) = delete;

View file

@ -13,21 +13,22 @@
namespace VideoCommon::Shader {
AsyncShaders::AsyncShaders(Core::Frontend::EmuWindow& emu_window) : emu_window(emu_window) {}
AsyncShaders::AsyncShaders(Core::Frontend::EmuWindow& emu_window_) : emu_window(emu_window_) {}
AsyncShaders::~AsyncShaders() {
KillWorkers();
}
void AsyncShaders::AllocateWorkers() {
// Max worker threads we should allow
constexpr u32 MAX_THREADS = 4;
// Deduce how many threads we can use
const u32 threads_used = std::thread::hardware_concurrency() / 4;
// Always allow at least 1 thread regardless of our settings
const auto max_worker_count = std::max(1U, threads_used);
// Don't use more than MAX_THREADS
const auto num_workers = std::min(max_worker_count, MAX_THREADS);
// Use at least one thread
u32 num_workers = 1;
// Deduce how many more threads we can use
const u32 thread_count = std::thread::hardware_concurrency();
if (thread_count >= 8) {
// Increase async workers by 1 for every 2 threads >= 8
num_workers += 1 + (thread_count - 8) / 2;
}
// If we already have workers queued, ignore
if (num_workers == worker_threads.size()) {
@ -42,8 +43,8 @@ void AsyncShaders::AllocateWorkers() {
// Create workers
for (std::size_t i = 0; i < num_workers; i++) {
context_list.push_back(emu_window.CreateSharedContext());
worker_threads.push_back(
std::thread(&AsyncShaders::ShaderCompilerThread, this, context_list[i].get()));
worker_threads.emplace_back(&AsyncShaders::ShaderCompilerThread, this,
context_list[i].get());
}
}
@ -105,8 +106,7 @@ std::vector<AsyncShaders::Result> AsyncShaders::GetCompletedWork() {
std::vector<Result> results;
{
std::unique_lock lock{completed_mutex};
results.assign(std::make_move_iterator(finished_work.begin()),
std::make_move_iterator(finished_work.end()));
results = std::move(finished_work);
finished_work.clear();
}
return results;
@ -115,11 +115,10 @@ std::vector<AsyncShaders::Result> AsyncShaders::GetCompletedWork() {
void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
Tegra::Engines::ShaderType shader_type, u64 uid,
std::vector<u64> code, std::vector<u64> code_b,
u32 main_offset,
VideoCommon::Shader::CompilerSettings compiler_settings,
const VideoCommon::Shader::Registry& registry,
VAddr cpu_addr) {
WorkerParams params{
u32 main_offset, CompilerSettings compiler_settings,
const Registry& registry, VAddr cpu_addr) {
std::unique_lock lock(queue_mutex);
pending_queue.push({
.backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL,
.device = &device,
.shader_type = shader_type,
@ -130,35 +129,30 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
.compiler_settings = compiler_settings,
.registry = registry,
.cpu_address = cpu_addr,
};
std::unique_lock lock(queue_mutex);
pending_queue.push(std::move(params));
});
cv.notify_one();
}
void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache,
const Vulkan::VKDevice& device, Vulkan::VKScheduler& scheduler,
const Vulkan::Device& device, Vulkan::VKScheduler& scheduler,
Vulkan::VKDescriptorPool& descriptor_pool,
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
Vulkan::VKRenderPassCache& renderpass_cache,
std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program,
Vulkan::GraphicsPipelineCacheKey key) {
WorkerParams params{
Vulkan::GraphicsPipelineCacheKey key, u32 num_color_buffers) {
std::unique_lock lock(queue_mutex);
pending_queue.push({
.backend = Backend::Vulkan,
.pp_cache = pp_cache,
.vk_device = &device,
.scheduler = &scheduler,
.descriptor_pool = &descriptor_pool,
.update_descriptor_queue = &update_descriptor_queue,
.renderpass_cache = &renderpass_cache,
.bindings = bindings,
.program = program,
.bindings = std::move(bindings),
.program = std::move(program),
.key = key,
};
std::unique_lock lock(queue_mutex);
pending_queue.push(std::move(params));
.num_color_buffers = num_color_buffers,
});
cv.notify_one();
}
@ -210,8 +204,8 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context
} else if (work.backend == Backend::Vulkan) {
auto pipeline = std::make_unique<Vulkan::VKGraphicsPipeline>(
*work.vk_device, *work.scheduler, *work.descriptor_pool,
*work.update_descriptor_queue, *work.renderpass_cache, work.key, work.bindings,
work.program);
*work.update_descriptor_queue, work.key, work.bindings, work.program,
work.num_color_buffers);
work.pp_cache->EmplacePipeline(std::move(pipeline));
}

View file

@ -24,9 +24,9 @@
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/vulkan_common/vulkan_device.h"
namespace Core::Frontend {
class EmuWindow;
@ -66,7 +66,7 @@ public:
Tegra::Engines::ShaderType shader_type;
};
explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window);
explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window_);
~AsyncShaders();
/// Start up shader worker threads
@ -94,13 +94,13 @@ public:
CompilerSettings compiler_settings, const Registry& registry,
VAddr cpu_addr);
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device,
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::Device& device,
Vulkan::VKScheduler& scheduler,
Vulkan::VKDescriptorPool& descriptor_pool,
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
Vulkan::VKRenderPassCache& renderpass_cache,
std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key);
Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key,
u32 num_color_buffers);
private:
void ShaderCompilerThread(Core::Frontend::GraphicsContext* context);
@ -123,14 +123,14 @@ private:
// For Vulkan
Vulkan::VKPipelineCache* pp_cache;
const Vulkan::VKDevice* vk_device;
const Vulkan::Device* vk_device;
Vulkan::VKScheduler* scheduler;
Vulkan::VKDescriptorPool* descriptor_pool;
Vulkan::VKUpdateDescriptorQueue* update_descriptor_queue;
Vulkan::VKRenderPassCache* renderpass_cache;
std::vector<VkDescriptorSetLayoutBinding> bindings;
Vulkan::SPIRVProgram program;
Vulkan::GraphicsPipelineCacheKey key;
u32 num_color_buffers;
};
std::condition_variable cv;

View file

@ -66,8 +66,8 @@ struct BlockInfo {
};
struct CFGRebuildState {
explicit CFGRebuildState(const ProgramCode& program_code, u32 start, Registry& registry)
: program_code{program_code}, registry{registry}, start{start} {}
explicit CFGRebuildState(const ProgramCode& program_code_, u32 start_, Registry& registry_)
: program_code{program_code_}, registry{registry_}, start{start_} {}
const ProgramCode& program_code;
Registry& registry;
@ -241,10 +241,10 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address)
ParseInfo parse_info{};
SingleBranch single_branch{};
const auto insert_label = [](CFGRebuildState& state, u32 address) {
const auto pair = state.labels.emplace(address);
const auto insert_label = [](CFGRebuildState& rebuild_state, u32 label_address) {
const auto pair = rebuild_state.labels.emplace(label_address);
if (pair.second) {
state.inspect_queries.push_back(address);
rebuild_state.inspect_queries.push_back(label_address);
}
};
@ -257,7 +257,7 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address)
single_branch.ignore = false;
break;
}
if (state.registered.count(offset) != 0) {
if (state.registered.contains(offset)) {
single_branch.address = offset;
single_branch.ignore = true;
break;
@ -632,12 +632,12 @@ void DecompileShader(CFGRebuildState& state) {
for (auto label : state.labels) {
state.manager->DeclareLabel(label);
}
for (auto& block : state.block_info) {
if (state.labels.count(block.start) != 0) {
for (const auto& block : state.block_info) {
if (state.labels.contains(block.start)) {
state.manager->InsertLabel(block.start);
}
const bool ignore = BlockBranchIsIgnored(block.branch);
u32 end = ignore ? block.end + 1 : block.end;
const u32 end = ignore ? block.end + 1 : block.end;
state.manager->InsertBlock(block.start, end);
if (!ignore) {
InsertBranch(*state.manager, block.branch);
@ -737,7 +737,7 @@ std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code,
auto back = result_out->blocks.begin();
auto next = std::next(back);
while (next != result_out->blocks.end()) {
if (state.labels.count(next->start) == 0 && next->start == back->end + 1) {
if (!state.labels.contains(next->start) && next->start == back->end + 1) {
back->end = next->end;
next = result_out->blocks.erase(next);
continue;

View file

@ -42,10 +42,10 @@ struct Condition {
class SingleBranch {
public:
SingleBranch() = default;
SingleBranch(Condition condition, s32 address, bool kill, bool is_sync, bool is_brk,
bool ignore)
: condition{condition}, address{address}, kill{kill}, is_sync{is_sync}, is_brk{is_brk},
ignore{ignore} {}
explicit SingleBranch(Condition condition_, s32 address_, bool kill_, bool is_sync_,
bool is_brk_, bool ignore_)
: condition{condition_}, address{address_}, kill{kill_}, is_sync{is_sync_}, is_brk{is_brk_},
ignore{ignore_} {}
bool operator==(const SingleBranch& b) const {
return std::tie(condition, address, kill, is_sync, is_brk, ignore) ==
@ -65,15 +65,15 @@ public:
};
struct CaseBranch {
CaseBranch(u32 cmp_value, u32 address) : cmp_value{cmp_value}, address{address} {}
explicit CaseBranch(u32 cmp_value_, u32 address_) : cmp_value{cmp_value_}, address{address_} {}
u32 cmp_value;
u32 address;
};
class MultiBranch {
public:
MultiBranch(u32 gpr, std::vector<CaseBranch>&& branches)
: gpr{gpr}, branches{std::move(branches)} {}
explicit MultiBranch(u32 gpr_, std::vector<CaseBranch>&& branches_)
: gpr{gpr_}, branches{std::move(branches_)} {}
u32 gpr{};
std::vector<CaseBranch> branches{};

View file

@ -25,7 +25,7 @@ using Tegra::Shader::OpCode;
namespace {
void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
const std::list<Sampler>& used_samplers) {
const std::list<SamplerEntry>& used_samplers) {
if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) {
return;
}
@ -43,9 +43,9 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
}
}
std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
std::optional<u32> TryDeduceSamplerSize(const SamplerEntry& sampler_to_deduce,
VideoCore::GuestDriverProfile& gpu_driver,
const std::list<Sampler>& used_samplers) {
const std::list<SamplerEntry>& used_samplers) {
const u32 base_offset = sampler_to_deduce.offset;
u32 max_offset{std::numeric_limits<u32>::max()};
for (const auto& sampler : used_samplers) {
@ -66,7 +66,7 @@ std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
class ASTDecoder {
public:
ASTDecoder(ShaderIR& ir) : ir(ir) {}
explicit ASTDecoder(ShaderIR& ir_) : ir(ir_) {}
void operator()(ASTProgram& ast) {
ASTNode current = ast.nodes.GetFirst();
@ -153,8 +153,8 @@ void ShaderIR::Decode() {
const auto& blocks = shader_info.blocks;
NodeBlock current_block;
u32 current_label = static_cast<u32>(exit_branch);
for (auto& block : blocks) {
if (shader_info.labels.count(block.start) != 0) {
for (const auto& block : blocks) {
if (shader_info.labels.contains(block.start)) {
insert_block(current_block, current_label);
current_block.clear();
current_label = block.start;

View file

@ -110,8 +110,7 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) {
case SubOp::Sqrt:
return Operation(OperationCode::FSqrt, PRECISE, op_a);
default:
UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}",
static_cast<unsigned>(instr.sub_op.Value()));
UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}", instr.sub_op.Value());
return Immediate(0);
}
}();
@ -137,7 +136,8 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::FCMP_RR:
case OpCode::Id::FCMP_RC: {
case OpCode::Id::FCMP_RC:
case OpCode::Id::FCMP_IMMR: {
UNIMPLEMENTED_IF(instr.fcmp.ftz == 0);
Node op_c = GetRegister(instr.gpr39);
Node comp = GetPredicateComparisonFloat(instr.fcmp.cond, std::move(op_c), Immediate(0.0f));

View file

@ -83,7 +83,7 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
case IAdd3Height::UpperHalfWord:
return BitfieldExtract(value, 16, 16);
default:
UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", static_cast<u32>(height));
UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", height);
return Immediate(0);
}
};
@ -258,7 +258,7 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
case OpCode::Id::LEA_IMM:
case OpCode::Id::LEA_RZ:
case OpCode::Id::LEA_HI: {
auto [op_a, op_b, op_c] = [&]() -> std::tuple<Node, Node, Node> {
auto [op_a_, op_b_, op_c_] = [&]() -> std::tuple<Node, Node, Node> {
switch (opcode->get().GetId()) {
case OpCode::Id::LEA_R2: {
return {GetRegister(instr.gpr20), GetRegister(instr.gpr39),
@ -294,8 +294,9 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex),
"Unhandled LEA Predicate");
Node value = Operation(OperationCode::ILogicalShiftLeft, std::move(op_a), std::move(op_c));
value = Operation(OperationCode::IAdd, std::move(op_b), std::move(value));
Node value =
Operation(OperationCode::ILogicalShiftLeft, std::move(op_a_), std::move(op_c_));
value = Operation(OperationCode::IAdd, std::move(op_b_), std::move(value));
SetRegister(bb, instr.gpr0, std::move(value));
break;

View file

@ -28,23 +28,26 @@ u32 ShaderIR::DecodeArithmeticIntegerImmediate(NodeBlock& bb, u32 pc) {
case OpCode::Id::IADD32I: {
UNIMPLEMENTED_IF_MSG(instr.iadd32i.saturate, "IADD32I saturation is not implemented");
op_a = GetOperandAbsNegInteger(op_a, false, instr.iadd32i.negate_a, true);
op_a = GetOperandAbsNegInteger(std::move(op_a), false, instr.iadd32i.negate_a != 0, true);
const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b);
Node value = Operation(OperationCode::IAdd, PRECISE, std::move(op_a), std::move(op_b));
SetInternalFlagsFromInteger(bb, value, instr.op_32.generates_cc);
SetRegister(bb, instr.gpr0, value);
SetInternalFlagsFromInteger(bb, value, instr.op_32.generates_cc != 0);
SetRegister(bb, instr.gpr0, std::move(value));
break;
}
case OpCode::Id::LOP32I: {
if (instr.alu.lop32i.invert_a)
op_a = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_a);
if (instr.alu.lop32i.invert_a) {
op_a = Operation(OperationCode::IBitwiseNot, NO_PRECISE, std::move(op_a));
}
if (instr.alu.lop32i.invert_b)
op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b);
if (instr.alu.lop32i.invert_b) {
op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, std::move(op_b));
}
WriteLogicOperation(bb, instr.gpr0, instr.alu.lop32i.operation, op_a, op_b,
PredicateResultMode::None, Pred::UnusedIndex, instr.op_32.generates_cc);
WriteLogicOperation(bb, instr.gpr0, instr.alu.lop32i.operation, std::move(op_a),
std::move(op_b), PredicateResultMode::None, Pred::UnusedIndex,
instr.op_32.generates_cc != 0);
break;
}
default:
@ -58,18 +61,18 @@ u32 ShaderIR::DecodeArithmeticIntegerImmediate(NodeBlock& bb, u32 pc) {
void ShaderIR::WriteLogicOperation(NodeBlock& bb, Register dest, LogicOperation logic_op, Node op_a,
Node op_b, PredicateResultMode predicate_mode, Pred predicate,
bool sets_cc) {
const Node result = [&]() {
Node result = [&] {
switch (logic_op) {
case LogicOperation::And:
return Operation(OperationCode::IBitwiseAnd, PRECISE, op_a, op_b);
return Operation(OperationCode::IBitwiseAnd, PRECISE, std::move(op_a), std::move(op_b));
case LogicOperation::Or:
return Operation(OperationCode::IBitwiseOr, PRECISE, op_a, op_b);
return Operation(OperationCode::IBitwiseOr, PRECISE, std::move(op_a), std::move(op_b));
case LogicOperation::Xor:
return Operation(OperationCode::IBitwiseXor, PRECISE, op_a, op_b);
return Operation(OperationCode::IBitwiseXor, PRECISE, std::move(op_a), std::move(op_b));
case LogicOperation::PassB:
return op_b;
default:
UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast<u32>(logic_op));
UNIMPLEMENTED_MSG("Unimplemented logic operation={}", logic_op);
return Immediate(0);
}
}();
@ -84,13 +87,12 @@ void ShaderIR::WriteLogicOperation(NodeBlock& bb, Register dest, LogicOperation
return;
case PredicateResultMode::NotZero: {
// Set the predicate to true if the result is not zero.
const Node compare = Operation(OperationCode::LogicalINotEqual, result, Immediate(0));
SetPredicate(bb, static_cast<u64>(predicate), compare);
Node compare = Operation(OperationCode::LogicalINotEqual, std::move(result), Immediate(0));
SetPredicate(bb, static_cast<u64>(predicate), std::move(compare));
break;
}
default:
UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}",
static_cast<u32>(predicate_mode));
UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}", predicate_mode);
}
}

View file

@ -244,7 +244,7 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
return Operation(OperationCode::FTrunc, value);
default:
UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}",
static_cast<u32>(instr.conversion.f2f.rounding.Value()));
instr.conversion.f2f.rounding.Value());
return value;
}
}();
@ -300,7 +300,7 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
return Operation(OperationCode::FTrunc, PRECISE, value);
default:
UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}",
static_cast<u32>(instr.conversion.f2i.rounding.Value()));
instr.conversion.f2i.rounding.Value());
return Immediate(0);
}
}();

View file

@ -22,13 +22,13 @@ u32 ShaderIR::DecodeHalfSet(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
PredCondition cond;
bool bf;
bool ftz;
bool neg_a;
bool abs_a;
bool neg_b;
bool abs_b;
PredCondition cond{};
bool bf = false;
bool ftz = false;
bool neg_a = false;
bool abs_a = false;
bool neg_b = false;
bool abs_b = false;
switch (opcode->get().GetId()) {
case OpCode::Id::HSET2_C:
case OpCode::Id::HSET2_IMM:

View file

@ -212,10 +212,10 @@ u32 GetComponentSize(TextureFormat format, std::size_t component) {
return 0;
case TextureFormat::R8G24:
if (component == 0) {
return 8;
return 24;
}
if (component == 1) {
return 24;
return 8;
}
return 0;
case TextureFormat::R8G8:
@ -358,9 +358,9 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
instr.suldst.GetStoreDataLayout() != StoreType::Bits64);
auto descriptor = [this, instr] {
std::optional<Tegra::Engines::SamplerDescriptor> descriptor;
std::optional<Tegra::Engines::SamplerDescriptor> sampler_descriptor;
if (instr.suldst.is_immediate) {
descriptor =
sampler_descriptor =
registry.ObtainBoundSampler(static_cast<u32>(instr.image.index.Value()));
} else {
const Node image_register = GetRegister(instr.gpr39);
@ -368,12 +368,12 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
static_cast<s64>(global_code.size()));
const auto buffer = std::get<1>(result);
const auto offset = std::get<2>(result);
descriptor = registry.ObtainBindlessSampler(buffer, offset);
sampler_descriptor = registry.ObtainBindlessSampler(buffer, offset);
}
if (!descriptor) {
if (!sampler_descriptor) {
UNREACHABLE_MSG("Failed to obtain image descriptor");
}
return *descriptor;
return *sampler_descriptor;
}();
const auto comp_mask = GetImageComponentMask(descriptor.format);
@ -497,11 +497,12 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
return pc;
}
Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
ImageEntry& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
const auto offset = static_cast<u32>(image.index.Value());
const auto it = std::find_if(std::begin(used_images), std::end(used_images),
[offset](const Image& entry) { return entry.offset == offset; });
const auto it =
std::find_if(std::begin(used_images), std::end(used_images),
[offset](const ImageEntry& entry) { return entry.offset == offset; });
if (it != std::end(used_images)) {
ASSERT(!it->is_bindless && it->type == type);
return *it;
@ -511,7 +512,7 @@ Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType t
return used_images.emplace_back(next_index, offset, type);
}
Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) {
ImageEntry& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) {
const Node image_register = GetRegister(reg);
const auto result =
TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()));
@ -520,7 +521,7 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
const auto offset = std::get<2>(result);
const auto it = std::find_if(std::begin(used_images), std::end(used_images),
[buffer, offset](const Image& entry) {
[buffer, offset](const ImageEntry& entry) {
return entry.buffer == buffer && entry.offset == offset;
});
if (it != std::end(used_images)) {

View file

@ -47,7 +47,7 @@ OperationCode GetAtomOperation(AtomicOp op) {
case AtomicOp::Exch:
return OperationCode::AtomicIExchange;
default:
UNIMPLEMENTED_MSG("op={}", static_cast<int>(op));
UNIMPLEMENTED_MSG("op={}", op);
return OperationCode::AtomicIAdd;
}
}
@ -83,7 +83,7 @@ u32 GetMemorySize(Tegra::Shader::UniformType uniform_type) {
case Tegra::Shader::UniformType::UnsignedQuad:
return 128;
default:
UNIMPLEMENTED_MSG("Unimplemented size={}!", static_cast<u32>(uniform_type));
UNIMPLEMENTED_MSG("Unimplemented size={}!", uniform_type);
return 32;
}
}
@ -175,12 +175,12 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
break;
}
default:
UNIMPLEMENTED_MSG("Unhandled type: {}", static_cast<unsigned>(instr.ld_c.type.Value()));
UNIMPLEMENTED_MSG("Unhandled type: {}", instr.ld_c.type.Value());
}
break;
}
case OpCode::Id::LD_L:
LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}", static_cast<u64>(instr.ld_l.unknown));
LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}", instr.ld_l.unknown);
[[fallthrough]];
case OpCode::Id::LD_S: {
const auto GetAddress = [&](s32 offset) {
@ -224,7 +224,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
default:
UNIMPLEMENTED_MSG("{} Unhandled type: {}", opcode->get().GetName(),
static_cast<u32>(instr.ldst_sl.type.Value()));
instr.ldst_sl.type.Value());
}
break;
}
@ -306,8 +306,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::ST_L:
LOG_DEBUG(HW_GPU, "ST_L cache management mode: {}",
static_cast<u64>(instr.st_l.cache_management.Value()));
LOG_DEBUG(HW_GPU, "ST_L cache management mode: {}", instr.st_l.cache_management.Value());
[[fallthrough]];
case OpCode::Id::ST_S: {
const auto GetAddress = [&](s32 offset) {
@ -340,7 +339,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
default:
UNIMPLEMENTED_MSG("{} unhandled type: {}", opcode->get().GetName(),
static_cast<u32>(instr.ldst_sl.type.Value()));
instr.ldst_sl.type.Value());
}
break;
}
@ -387,7 +386,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
case OpCode::Id::RED: {
UNIMPLEMENTED_IF_MSG(instr.red.type != GlobalAtomicType::U32, "type={}",
static_cast<int>(instr.red.type.Value()));
instr.red.type.Value());
const auto [real_address, base_address, descriptor] =
TrackGlobalMemory(bb, instr, true, true);
if (!real_address || !base_address) {
@ -403,12 +402,12 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG(instr.atom.operation == AtomicOp::Inc ||
instr.atom.operation == AtomicOp::Dec ||
instr.atom.operation == AtomicOp::SafeAdd,
"operation={}", static_cast<int>(instr.atom.operation.Value()));
"operation={}", instr.atom.operation.Value());
UNIMPLEMENTED_IF_MSG(instr.atom.type == GlobalAtomicType::S64 ||
instr.atom.type == GlobalAtomicType::U64 ||
instr.atom.type == GlobalAtomicType::F16x2_FTZ_RN ||
instr.atom.type == GlobalAtomicType::F32_FTZ_RN,
"type={}", static_cast<int>(instr.atom.type.Value()));
"type={}", instr.atom.type.Value());
const auto [real_address, base_address, descriptor] =
TrackGlobalMemory(bb, instr, true, true);
@ -428,10 +427,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
case OpCode::Id::ATOMS: {
UNIMPLEMENTED_IF_MSG(instr.atoms.operation == AtomicOp::Inc ||
instr.atoms.operation == AtomicOp::Dec,
"operation={}", static_cast<int>(instr.atoms.operation.Value()));
"operation={}", instr.atoms.operation.Value());
UNIMPLEMENTED_IF_MSG(instr.atoms.type == AtomicType::S64 ||
instr.atoms.type == AtomicType::U64,
"type={}", static_cast<int>(instr.atoms.type.Value()));
"type={}", instr.atoms.type.Value());
const bool is_signed =
instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64;
const s32 offset = instr.atoms.GetImmediateOffset();

View file

@ -34,14 +34,13 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::EXIT: {
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}",
static_cast<u32>(cc));
const ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != ConditionCode::T, "EXIT condition code used: {}", cc);
switch (instr.flow.cond) {
case Tegra::Shader::FlowCondition::Always:
bb.push_back(Operation(OperationCode::Exit));
if (instr.pred.pred_index == static_cast<u64>(Tegra::Shader::Pred::UnusedIndex)) {
if (instr.pred.pred_index == static_cast<u64>(Pred::UnusedIndex)) {
// If this is an unconditional exit then just end processing here,
// otherwise we have to account for the possibility of the condition
// not being met, so continue processing the next instruction.
@ -56,17 +55,15 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
break;
default:
UNIMPLEMENTED_MSG("Unhandled flow condition: {}",
static_cast<u32>(instr.flow.cond.Value()));
UNIMPLEMENTED_MSG("Unhandled flow condition: {}", instr.flow.cond.Value());
}
break;
}
case OpCode::Id::KIL: {
UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always);
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "KIL condition code used: {}",
static_cast<u32>(cc));
const ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != ConditionCode::T, "KIL condition code used: {}", cc);
bb.push_back(Operation(OperationCode::Discard));
break;
@ -90,11 +87,11 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_MSG("S2R WscaleFactorZ is not implemented");
return Immediate(0U);
case SystemVariable::Tid: {
Node value = Immediate(0);
value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdX), 0, 9);
value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdY), 16, 9);
value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdZ), 26, 5);
return value;
Node val = Immediate(0);
val = BitfieldInsert(val, Operation(OperationCode::LocalInvocationIdX), 0, 9);
val = BitfieldInsert(val, Operation(OperationCode::LocalInvocationIdY), 16, 9);
val = BitfieldInsert(val, Operation(OperationCode::LocalInvocationIdZ), 26, 5);
return val;
}
case SystemVariable::TidX:
return Operation(OperationCode::LocalInvocationIdX);
@ -130,8 +127,7 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
return Immediate(0u);
}
default:
UNIMPLEMENTED_MSG("Unhandled system move: {}",
static_cast<u32>(instr.sys20.Value()));
UNIMPLEMENTED_MSG("Unhandled system move: {}", instr.sys20.Value());
return Immediate(0u);
}
}();
@ -181,8 +177,8 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
}
const Node branch = Operation(OperationCode::BranchIndirect, operand);
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
if (cc != Tegra::Shader::ConditionCode::T) {
const ConditionCode cc = instr.flow_condition_code;
if (cc != ConditionCode::T) {
bb.push_back(Conditional(GetConditionCode(cc), {branch}));
} else {
bb.push_back(branch);
@ -218,9 +214,8 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::SYNC: {
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "SYNC condition code used: {}",
static_cast<u32>(cc));
const ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != ConditionCode::T, "SYNC condition code used: {}", cc);
if (decompiled) {
break;
@ -231,9 +226,8 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::BRK: {
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "BRK condition code used: {}",
static_cast<u32>(cc));
const ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != ConditionCode::T, "BRK condition code used: {}", cc);
if (decompiled) {
break;
}
@ -306,7 +300,7 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
case Tegra::Shader::MembarType::GL:
return OperationCode::MemoryBarrierGlobal;
default:
UNIMPLEMENTED_MSG("MEMBAR type={}", static_cast<int>(instr.membar.type.Value()));
UNIMPLEMENTED_MSG("MEMBAR type={}", instr.membar.type.Value());
return OperationCode::MemoryBarrierGlobal;
}
}();

View file

@ -125,7 +125,7 @@ u32 ShaderIR::DecodeShift(NodeBlock& bb, u32 pc) {
case OpCode::Id::SHF_LEFT_IMM: {
UNIMPLEMENTED_IF(instr.generates_cc);
UNIMPLEMENTED_IF_MSG(instr.shf.xmode != ShfXmode::None, "xmode={}",
static_cast<int>(instr.shf.xmode.Value()));
instr.shf.xmode.Value());
if (instr.is_b_imm) {
op_b = Immediate(static_cast<u32>(instr.shf.immediate));

View file

@ -34,7 +34,7 @@ static std::size_t GetCoordCount(TextureType texture_type) {
case TextureType::TextureCube:
return 3;
default:
UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type));
UNIMPLEMENTED_MSG("Unhandled texture type: {}", texture_type);
return 0;
}
}
@ -141,7 +141,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
SamplerInfo info;
info.is_shadow = is_depth_compare;
const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
const std::optional<SamplerEntry> sampler = GetSampler(instr.sampler, info);
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
@ -173,9 +173,9 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
SamplerInfo info;
info.type = texture_type;
info.is_array = is_array;
const std::optional<Sampler> sampler = is_bindless
? GetBindlessSampler(base_reg, info, index_var)
: GetSampler(instr.sampler, info);
const std::optional<SamplerEntry> sampler =
is_bindless ? GetBindlessSampler(base_reg, info, index_var)
: GetSampler(instr.sampler, info);
Node4 values;
if (!sampler) {
std::generate(values.begin(), values.end(), [this] { return Immediate(0); });
@ -217,9 +217,9 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
[[fallthrough]];
case OpCode::Id::TXQ: {
Node index_var;
const std::optional<Sampler> sampler = is_bindless
? GetBindlessSampler(instr.gpr8, {}, index_var)
: GetSampler(instr.sampler, {});
const std::optional<SamplerEntry> sampler =
is_bindless ? GetBindlessSampler(instr.gpr8, {}, index_var)
: GetSampler(instr.sampler, {});
if (!sampler) {
u32 indexer = 0;
@ -255,8 +255,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
break;
}
default:
UNIMPLEMENTED_MSG("Unhandled texture query type: {}",
static_cast<u32>(instr.txq.query_type.Value()));
UNIMPLEMENTED_MSG("Unhandled texture query type: {}", instr.txq.query_type.Value());
}
break;
}
@ -273,7 +272,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
info.type = texture_type;
info.is_array = is_array;
Node index_var;
const std::optional<Sampler> sampler =
const std::optional<SamplerEntry> sampler =
is_bindless ? GetBindlessSampler(instr.gpr20, info, index_var)
: GetSampler(instr.sampler, info);
@ -292,33 +291,36 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
break;
}
const u64 base_index = is_array ? 1 : 0;
const u64 num_components = [texture_type] {
switch (texture_type) {
case TextureType::Texture1D:
return 1;
case TextureType::Texture2D:
return 2;
case TextureType::TextureCube:
return 3;
default:
UNIMPLEMENTED_MSG("Unhandled texture type {}", texture_type);
return 2;
}
}();
// TODO: What's the array component used for?
std::vector<Node> coords;
// TODO: Add coordinates for different samplers once other texture types are implemented.
switch (texture_type) {
case TextureType::Texture1D:
coords.push_back(GetRegister(instr.gpr8));
break;
case TextureType::Texture2D:
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
break;
default:
UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<int>(texture_type));
// Fallback to interpreting as a 2D texture for now
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
coords.reserve(num_components);
for (u64 component = 0; component < num_components; ++component) {
coords.push_back(GetRegister(instr.gpr8.Value() + base_index + component));
}
u32 indexer = 0;
for (u32 element = 0; element < 2; ++element) {
if (!instr.tmml.IsComponentEnabled(element)) {
continue;
}
auto params = coords;
MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element, index_var};
const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
SetTemporary(bb, indexer++, value);
Node value = Operation(OperationCode::TextureQueryLod, meta, coords);
SetTemporary(bb, indexer++, std::move(value));
}
for (u32 i = 0; i < indexer; ++i) {
SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
@ -377,14 +379,15 @@ ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(
return info;
}
std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
SamplerInfo sampler_info) {
std::optional<SamplerEntry> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
SamplerInfo sampler_info) {
const u32 offset = static_cast<u32>(sampler.index.Value());
const auto info = GetSamplerInfo(sampler_info, registry.ObtainBoundSampler(offset));
// If this sampler has already been used, return the existing mapping.
const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
[offset](const Sampler& entry) { return entry.offset == offset; });
const auto it =
std::find_if(used_samplers.begin(), used_samplers.end(),
[offset](const SamplerEntry& entry) { return entry.offset == offset; });
if (it != used_samplers.end()) {
ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
@ -397,8 +400,8 @@ std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
*info.is_shadow, *info.is_buffer, false);
}
std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
Node& index_var) {
std::optional<SamplerEntry> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
SamplerInfo info, Node& index_var) {
const Node sampler_register = GetRegister(reg);
const auto [base_node, tracked_sampler_info] =
TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
@ -414,7 +417,7 @@ std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
// If this sampler has already been used, return the existing mapping.
const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
[buffer, offset](const Sampler& entry) {
[buffer, offset](const SamplerEntry& entry) {
return entry.buffer == buffer && entry.offset == offset;
});
if (it != used_samplers.end()) {
@ -434,11 +437,12 @@ std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
info = GetSamplerInfo(info, registry.ObtainSeparateSampler(indices, offsets));
// Try to use an already created sampler if it exists
const auto it = std::find_if(
used_samplers.begin(), used_samplers.end(), [indices, offsets](const Sampler& entry) {
return offsets == std::pair{entry.offset, entry.secondary_offset} &&
indices == std::pair{entry.buffer, entry.secondary_buffer};
});
const auto it =
std::find_if(used_samplers.begin(), used_samplers.end(),
[indices, offsets](const SamplerEntry& entry) {
return offsets == std::pair{entry.offset, entry.secondary_offset} &&
indices == std::pair{entry.buffer, entry.secondary_buffer};
});
if (it != used_samplers.end()) {
ASSERT(it->is_separated && it->type == info.type && it->is_array == info.is_array &&
it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
@ -458,7 +462,7 @@ std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
// If this sampler has already been used, return the existing mapping.
const auto it = std::find_if(
used_samplers.begin(), used_samplers.end(),
[base_offset](const Sampler& entry) { return entry.offset == base_offset; });
[base_offset](const SamplerEntry& entry) { return entry.offset == base_offset; });
if (it != used_samplers.end()) {
ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
@ -553,7 +557,6 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
const bool is_shadow = depth_compare != nullptr;
const bool is_bindless = bindless_reg.has_value();
UNIMPLEMENTED_IF(texture_type == TextureType::TextureCube && is_array && is_shadow);
ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
"Illegal texture type");
@ -564,9 +567,9 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
info.is_buffer = false;
Node index_var;
const std::optional<Sampler> sampler = is_bindless
? GetBindlessSampler(*bindless_reg, info, index_var)
: GetSampler(instr.sampler, info);
const std::optional<SamplerEntry> sampler =
is_bindless ? GetBindlessSampler(*bindless_reg, info, index_var)
: GetSampler(instr.sampler, info);
if (!sampler) {
return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
}
@ -593,7 +596,7 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
lod = GetRegister(instr.gpr20.Value() + bias_offset);
break;
default:
UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode));
UNIMPLEMENTED_MSG("Unimplemented process mode={}", process_mode);
break;
}
@ -723,7 +726,7 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
info.is_shadow = depth_compare;
Node index_var;
const std::optional<Sampler> sampler =
const std::optional<SamplerEntry> sampler =
is_bindless ? GetBindlessSampler(parameter_register++, info, index_var)
: GetSampler(instr.sampler, info);
Node4 values;
@ -782,7 +785,7 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
// const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
// const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
const std::optional<Sampler> sampler = GetSampler(instr.sampler, {});
const std::optional<SamplerEntry> sampler = GetSampler(instr.sampler, {});
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
@ -799,7 +802,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
info.type = texture_type;
info.is_array = is_array;
info.is_shadow = false;
const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
const std::optional<SamplerEntry> sampler = GetSampler(instr.sampler, info);
const std::size_t type_coord_count = GetCoordCount(texture_type);
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;

View file

@ -27,7 +27,7 @@ OperationCode GetOperationCode(VoteOperation vote_op) {
case VoteOperation::Eq:
return OperationCode::VoteEqual;
default:
UNREACHABLE_MSG("Invalid vote operation={}", static_cast<u64>(vote_op));
UNREACHABLE_MSG("Invalid vote operation={}", vote_op);
return OperationCode::VoteAll;
}
}

View file

@ -76,7 +76,7 @@ public:
class ExprPredicate final {
public:
explicit ExprPredicate(u32 predicate) : predicate{predicate} {}
explicit ExprPredicate(u32 predicate_) : predicate{predicate_} {}
bool operator==(const ExprPredicate& b) const {
return predicate == b.predicate;
@ -91,7 +91,7 @@ public:
class ExprCondCode final {
public:
explicit ExprCondCode(ConditionCode cc) : cc{cc} {}
explicit ExprCondCode(ConditionCode condition_code) : cc{condition_code} {}
bool operator==(const ExprCondCode& b) const {
return cc == b.cc;
@ -121,7 +121,7 @@ public:
class ExprGprEqual final {
public:
ExprGprEqual(u32 gpr, u32 value) : gpr{gpr}, value{value} {}
explicit ExprGprEqual(u32 gpr_, u32 value_) : gpr{gpr_}, value{value_} {}
bool operator==(const ExprGprEqual& b) const {
return gpr == b.gpr && value == b.value;

View file

@ -282,26 +282,27 @@ struct SeparateSamplerNode;
using TrackSamplerData = std::variant<BindlessSamplerNode, SeparateSamplerNode, ArraySamplerNode>;
using TrackSampler = std::shared_ptr<TrackSamplerData>;
struct Sampler {
struct SamplerEntry {
/// Bound samplers constructor
constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
: index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
is_buffer{is_buffer}, is_indexed{is_indexed} {}
explicit SamplerEntry(u32 index_, u32 offset_, Tegra::Shader::TextureType type_, bool is_array_,
bool is_shadow_, bool is_buffer_, bool is_indexed_)
: index{index_}, offset{offset_}, type{type_}, is_array{is_array_}, is_shadow{is_shadow_},
is_buffer{is_buffer_}, is_indexed{is_indexed_} {}
/// Separate sampler constructor
constexpr explicit Sampler(u32 index, std::pair<u32, u32> offsets, std::pair<u32, u32> buffers,
Tegra::Shader::TextureType type, bool is_array, bool is_shadow,
bool is_buffer)
: index{index}, offset{offsets.first}, secondary_offset{offsets.second},
buffer{buffers.first}, secondary_buffer{buffers.second}, type{type}, is_array{is_array},
is_shadow{is_shadow}, is_buffer{is_buffer}, is_separated{true} {}
explicit SamplerEntry(u32 index_, std::pair<u32, u32> offsets, std::pair<u32, u32> buffers,
Tegra::Shader::TextureType type_, bool is_array_, bool is_shadow_,
bool is_buffer_)
: index{index_}, offset{offsets.first}, secondary_offset{offsets.second},
buffer{buffers.first}, secondary_buffer{buffers.second}, type{type_}, is_array{is_array_},
is_shadow{is_shadow_}, is_buffer{is_buffer_}, is_separated{true} {}
/// Bindless samplers constructor
constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
explicit SamplerEntry(u32 index_, u32 offset_, u32 buffer_, Tegra::Shader::TextureType type_,
bool is_array_, bool is_shadow_, bool is_buffer_, bool is_indexed_)
: index{index_}, offset{offset_}, buffer{buffer_}, type{type_}, is_array{is_array_},
is_shadow{is_shadow_}, is_buffer{is_buffer_}, is_bindless{true}, is_indexed{is_indexed_} {
}
u32 index = 0; ///< Emulated index given for the this sampler.
u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
@ -338,15 +339,15 @@ struct BindlessSamplerNode {
u32 offset;
};
struct Image {
struct ImageEntry {
public:
/// Bound images constructor
constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
: index{index}, offset{offset}, type{type} {}
explicit ImageEntry(u32 index_, u32 offset_, Tegra::Shader::ImageType type_)
: index{index_}, offset{offset_}, type{type_} {}
/// Bindless samplers constructor
constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
explicit ImageEntry(u32 index_, u32 offset_, u32 buffer_, Tegra::Shader::ImageType type_)
: index{index_}, offset{offset_}, buffer{buffer_}, type{type_}, is_bindless{true} {}
void MarkWrite() {
is_written = true;
@ -377,7 +378,7 @@ struct GlobalMemoryBase {
u32 cbuf_index = 0;
u32 cbuf_offset = 0;
bool operator<(const GlobalMemoryBase& rhs) const {
[[nodiscard]] bool operator<(const GlobalMemoryBase& rhs) const {
return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
}
};
@ -389,7 +390,7 @@ struct MetaArithmetic {
/// Parameters describing a texture sampler
struct MetaTexture {
Sampler sampler;
SamplerEntry sampler;
Node array;
Node depth_compare;
std::vector<Node> aoffi;
@ -403,7 +404,7 @@ struct MetaTexture {
};
struct MetaImage {
const Image& image;
const ImageEntry& image;
std::vector<Node> values;
u32 element{};
};
@ -414,7 +415,7 @@ using Meta =
class AmendNode {
public:
std::optional<std::size_t> GetAmendIndex() const {
[[nodiscard]] std::optional<std::size_t> GetAmendIndex() const {
if (amend_index == amend_null_index) {
return std::nullopt;
}
@ -437,30 +438,30 @@ private:
/// Holds any kind of operation that can be done in the IR
class OperationNode final : public AmendNode {
public:
explicit OperationNode(OperationCode code) : OperationNode(code, Meta{}) {}
explicit OperationNode(OperationCode code_) : OperationNode(code_, Meta{}) {}
explicit OperationNode(OperationCode code, Meta meta)
: OperationNode(code, std::move(meta), std::vector<Node>{}) {}
explicit OperationNode(OperationCode code_, Meta meta_)
: OperationNode(code_, std::move(meta_), std::vector<Node>{}) {}
explicit OperationNode(OperationCode code, std::vector<Node> operands)
: OperationNode(code, Meta{}, std::move(operands)) {}
explicit OperationNode(OperationCode code_, std::vector<Node> operands_)
: OperationNode(code_, Meta{}, std::move(operands_)) {}
explicit OperationNode(OperationCode code, Meta meta, std::vector<Node> operands)
: code{code}, meta{std::move(meta)}, operands{std::move(operands)} {}
explicit OperationNode(OperationCode code_, Meta meta_, std::vector<Node> operands_)
: code{code_}, meta{std::move(meta_)}, operands{std::move(operands_)} {}
template <typename... Args>
explicit OperationNode(OperationCode code, Meta meta, Args&&... operands)
: code{code}, meta{std::move(meta)}, operands{operands...} {}
explicit OperationNode(OperationCode code_, Meta meta_, Args&&... operands_)
: code{code_}, meta{std::move(meta_)}, operands{operands_...} {}
OperationCode GetCode() const {
[[nodiscard]] OperationCode GetCode() const {
return code;
}
const Meta& GetMeta() const {
[[nodiscard]] const Meta& GetMeta() const {
return meta;
}
std::size_t GetOperandsCount() const {
[[nodiscard]] std::size_t GetOperandsCount() const {
return operands.size();
}
@ -472,7 +473,7 @@ public:
return operands;
}
const Node& operator[](std::size_t operand_index) const {
[[nodiscard]] const Node& operator[](std::size_t operand_index) const {
return operands.at(operand_index);
}
@ -485,14 +486,14 @@ private:
/// Encloses inside any kind of node that returns a boolean conditionally-executed code
class ConditionalNode final : public AmendNode {
public:
explicit ConditionalNode(Node condition, std::vector<Node>&& code)
: condition{std::move(condition)}, code{std::move(code)} {}
explicit ConditionalNode(Node condition_, std::vector<Node>&& code_)
: condition{std::move(condition_)}, code{std::move(code_)} {}
const Node& GetCondition() const {
[[nodiscard]] const Node& GetCondition() const {
return condition;
}
const std::vector<Node>& GetCode() const {
[[nodiscard]] const std::vector<Node>& GetCode() const {
return code;
}
@ -504,9 +505,9 @@ private:
/// A general purpose register
class GprNode final {
public:
explicit constexpr GprNode(Tegra::Shader::Register index) : index{index} {}
explicit constexpr GprNode(Tegra::Shader::Register index_) : index{index_} {}
u32 GetIndex() const {
[[nodiscard]] constexpr u32 GetIndex() const {
return static_cast<u32>(index);
}
@ -517,9 +518,9 @@ private:
/// A custom variable
class CustomVarNode final {
public:
explicit constexpr CustomVarNode(u32 index) : index{index} {}
explicit constexpr CustomVarNode(u32 index_) : index{index_} {}
constexpr u32 GetIndex() const {
[[nodiscard]] constexpr u32 GetIndex() const {
return index;
}
@ -530,9 +531,9 @@ private:
/// A 32-bits value that represents an immediate value
class ImmediateNode final {
public:
explicit constexpr ImmediateNode(u32 value) : value{value} {}
explicit constexpr ImmediateNode(u32 value_) : value{value_} {}
u32 GetValue() const {
[[nodiscard]] constexpr u32 GetValue() const {
return value;
}
@ -543,9 +544,9 @@ private:
/// One of Maxwell's internal flags
class InternalFlagNode final {
public:
explicit constexpr InternalFlagNode(InternalFlag flag) : flag{flag} {}
explicit constexpr InternalFlagNode(InternalFlag flag_) : flag{flag_} {}
InternalFlag GetFlag() const {
[[nodiscard]] constexpr InternalFlag GetFlag() const {
return flag;
}
@ -556,14 +557,14 @@ private:
/// A predicate register, it can be negated without additional nodes
class PredicateNode final {
public:
explicit constexpr PredicateNode(Tegra::Shader::Pred index, bool negated)
: index{index}, negated{negated} {}
explicit constexpr PredicateNode(Tegra::Shader::Pred index_, bool negated_)
: index{index_}, negated{negated_} {}
Tegra::Shader::Pred GetIndex() const {
[[nodiscard]] constexpr Tegra::Shader::Pred GetIndex() const {
return index;
}
bool IsNegated() const {
[[nodiscard]] constexpr bool IsNegated() const {
return negated;
}
@ -576,30 +577,30 @@ private:
class AbufNode final {
public:
// Initialize for standard attributes (index is explicit).
explicit AbufNode(Tegra::Shader::Attribute::Index index, u32 element, Node buffer = {})
: buffer{std::move(buffer)}, index{index}, element{element} {}
explicit AbufNode(Tegra::Shader::Attribute::Index index_, u32 element_, Node buffer_ = {})
: buffer{std::move(buffer_)}, index{index_}, element{element_} {}
// Initialize for physical attributes (index is a variable value).
explicit AbufNode(Node physical_address, Node buffer = {})
: physical_address{std::move(physical_address)}, buffer{std::move(buffer)} {}
explicit AbufNode(Node physical_address_, Node buffer_ = {})
: physical_address{std::move(physical_address_)}, buffer{std::move(buffer_)} {}
Tegra::Shader::Attribute::Index GetIndex() const {
[[nodiscard]] Tegra::Shader::Attribute::Index GetIndex() const {
return index;
}
u32 GetElement() const {
[[nodiscard]] u32 GetElement() const {
return element;
}
const Node& GetBuffer() const {
[[nodiscard]] const Node& GetBuffer() const {
return buffer;
}
bool IsPhysicalBuffer() const {
[[nodiscard]] bool IsPhysicalBuffer() const {
return static_cast<bool>(physical_address);
}
const Node& GetPhysicalAddress() const {
[[nodiscard]] const Node& GetPhysicalAddress() const {
return physical_address;
}
@ -613,9 +614,9 @@ private:
/// Patch memory (used to communicate tessellation stages).
class PatchNode final {
public:
explicit PatchNode(u32 offset) : offset{offset} {}
explicit constexpr PatchNode(u32 offset_) : offset{offset_} {}
u32 GetOffset() const {
[[nodiscard]] constexpr u32 GetOffset() const {
return offset;
}
@ -626,13 +627,13 @@ private:
/// Constant buffer node, usually mapped to uniform buffers in GLSL
class CbufNode final {
public:
explicit CbufNode(u32 index, Node offset) : index{index}, offset{std::move(offset)} {}
explicit CbufNode(u32 index_, Node offset_) : index{index_}, offset{std::move(offset_)} {}
u32 GetIndex() const {
[[nodiscard]] u32 GetIndex() const {
return index;
}
const Node& GetOffset() const {
[[nodiscard]] const Node& GetOffset() const {
return offset;
}
@ -644,9 +645,9 @@ private:
/// Local memory node
class LmemNode final {
public:
explicit LmemNode(Node address) : address{std::move(address)} {}
explicit LmemNode(Node address_) : address{std::move(address_)} {}
const Node& GetAddress() const {
[[nodiscard]] const Node& GetAddress() const {
return address;
}
@ -657,9 +658,9 @@ private:
/// Shared memory node
class SmemNode final {
public:
explicit SmemNode(Node address) : address{std::move(address)} {}
explicit SmemNode(Node address_) : address{std::move(address_)} {}
const Node& GetAddress() const {
[[nodiscard]] const Node& GetAddress() const {
return address;
}
@ -670,19 +671,19 @@ private:
/// Global memory node
class GmemNode final {
public:
explicit GmemNode(Node real_address, Node base_address, const GlobalMemoryBase& descriptor)
: real_address{std::move(real_address)}, base_address{std::move(base_address)},
descriptor{descriptor} {}
explicit GmemNode(Node real_address_, Node base_address_, const GlobalMemoryBase& descriptor_)
: real_address{std::move(real_address_)}, base_address{std::move(base_address_)},
descriptor{descriptor_} {}
const Node& GetRealAddress() const {
[[nodiscard]] const Node& GetRealAddress() const {
return real_address;
}
const Node& GetBaseAddress() const {
[[nodiscard]] const Node& GetBaseAddress() const {
return base_address;
}
const GlobalMemoryBase& GetDescriptor() const {
[[nodiscard]] const GlobalMemoryBase& GetDescriptor() const {
return descriptor;
}
@ -695,9 +696,9 @@ private:
/// Commentary, can be dropped
class CommentNode final {
public:
explicit CommentNode(std::string text) : text{std::move(text)} {}
explicit CommentNode(std::string text_) : text{std::move(text_)} {}
const std::string& GetText() const {
[[nodiscard]] const std::string& GetText() const {
return text;
}

View file

@ -107,7 +107,7 @@ OperationCode SignedToUnsignedCode(OperationCode operation_code, bool is_signed)
UNREACHABLE_MSG("Can't apply absolute to an unsigned integer");
return {};
default:
UNREACHABLE_MSG("Unknown signed operation with code={}", static_cast<u32>(operation_code));
UNREACHABLE_MSG("Unknown signed operation with code={}", operation_code);
return {};
}
}

View file

@ -24,44 +24,45 @@ GraphicsInfo MakeGraphicsInfo(ShaderType shader_stage, ConstBufferEngineInterfac
if (shader_stage == ShaderType::Compute) {
return {};
}
auto& graphics = static_cast<Tegra::Engines::Maxwell3D&>(engine);
GraphicsInfo info;
info.tfb_layouts = graphics.regs.tfb_layouts;
info.tfb_varying_locs = graphics.regs.tfb_varying_locs;
info.primitive_topology = graphics.regs.draw.topology;
info.tessellation_primitive = graphics.regs.tess_mode.prim;
info.tessellation_spacing = graphics.regs.tess_mode.spacing;
info.tfb_enabled = graphics.regs.tfb_enabled;
info.tessellation_clockwise = graphics.regs.tess_mode.cw;
return info;
auto& graphics = dynamic_cast<Tegra::Engines::Maxwell3D&>(engine);
return {
.tfb_layouts = graphics.regs.tfb_layouts,
.tfb_varying_locs = graphics.regs.tfb_varying_locs,
.primitive_topology = graphics.regs.draw.topology,
.tessellation_primitive = graphics.regs.tess_mode.prim,
.tessellation_spacing = graphics.regs.tess_mode.spacing,
.tfb_enabled = graphics.regs.tfb_enabled != 0,
.tessellation_clockwise = graphics.regs.tess_mode.cw.Value() != 0,
};
}
ComputeInfo MakeComputeInfo(ShaderType shader_stage, ConstBufferEngineInterface& engine) {
if (shader_stage != ShaderType::Compute) {
return {};
}
auto& compute = static_cast<Tegra::Engines::KeplerCompute&>(engine);
auto& compute = dynamic_cast<Tegra::Engines::KeplerCompute&>(engine);
const auto& launch = compute.launch_description;
ComputeInfo info;
info.workgroup_size = {launch.block_dim_x, launch.block_dim_y, launch.block_dim_z};
info.local_memory_size_in_words = launch.local_pos_alloc;
info.shared_memory_size_in_words = launch.shared_alloc;
return info;
return {
.workgroup_size = {launch.block_dim_x, launch.block_dim_y, launch.block_dim_z},
.shared_memory_size_in_words = launch.shared_alloc,
.local_memory_size_in_words = launch.local_pos_alloc,
};
}
} // Anonymous namespace
Registry::Registry(Tegra::Engines::ShaderType shader_stage, const SerializedRegistryInfo& info)
Registry::Registry(ShaderType shader_stage, const SerializedRegistryInfo& info)
: stage{shader_stage}, stored_guest_driver_profile{info.guest_driver_profile},
bound_buffer{info.bound_buffer}, graphics_info{info.graphics}, compute_info{info.compute} {}
Registry::Registry(Tegra::Engines::ShaderType shader_stage,
Tegra::Engines::ConstBufferEngineInterface& engine)
: stage{shader_stage}, engine{&engine}, bound_buffer{engine.GetBoundBuffer()},
graphics_info{MakeGraphicsInfo(shader_stage, engine)}, compute_info{MakeComputeInfo(
shader_stage, engine)} {}
Registry::Registry(ShaderType shader_stage, ConstBufferEngineInterface& engine_)
: stage{shader_stage}, engine{&engine_}, bound_buffer{engine_.GetBoundBuffer()},
graphics_info{MakeGraphicsInfo(shader_stage, engine_)}, compute_info{MakeComputeInfo(
shader_stage, engine_)} {}
Registry::~Registry() = default;
@ -113,8 +114,7 @@ std::optional<Tegra::Engines::SamplerDescriptor> Registry::ObtainSeparateSampler
return value;
}
std::optional<Tegra::Engines::SamplerDescriptor> Registry::ObtainBindlessSampler(u32 buffer,
u32 offset) {
std::optional<SamplerDescriptor> Registry::ObtainBindlessSampler(u32 buffer, u32 offset) {
const std::pair key = {buffer, offset};
const auto iter = bindless_samplers.find(key);
if (iter != bindless_samplers.end()) {

View file

@ -94,7 +94,7 @@ public:
explicit Registry(Tegra::Engines::ShaderType shader_stage, const SerializedRegistryInfo& info);
explicit Registry(Tegra::Engines::ShaderType shader_stage,
Tegra::Engines::ConstBufferEngineInterface& engine);
Tegra::Engines::ConstBufferEngineInterface& engine_);
~Registry();

View file

@ -25,9 +25,10 @@ using Tegra::Shader::PredCondition;
using Tegra::Shader::PredOperation;
using Tegra::Shader::Register;
ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset, CompilerSettings settings,
Registry& registry)
: program_code{program_code}, main_offset{main_offset}, settings{settings}, registry{registry} {
ShaderIR::ShaderIR(const ProgramCode& program_code_, u32 main_offset_, CompilerSettings settings_,
Registry& registry_)
: program_code{program_code_}, main_offset{main_offset_}, settings{settings_}, registry{
registry_} {
Decode();
PostDecode();
}
@ -170,7 +171,7 @@ Node ShaderIR::ConvertIntegerSize(Node value, Register::Size size, bool is_signe
// Default - do nothing
return value;
default:
UNREACHABLE_MSG("Unimplemented conversion size: {}", static_cast<u32>(size));
UNREACHABLE_MSG("Unimplemented conversion size: {}", size);
return value;
}
}
@ -335,15 +336,15 @@ OperationCode ShaderIR::GetPredicateCombiner(PredOperation operation) {
return operation_table[index];
}
Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) const {
Node ShaderIR::GetConditionCode(ConditionCode cc) const {
switch (cc) {
case Tegra::Shader::ConditionCode::NEU:
case ConditionCode::NEU:
return GetInternalFlag(InternalFlag::Zero, true);
case Tegra::Shader::ConditionCode::FCSM_TR:
case ConditionCode::FCSM_TR:
UNIMPLEMENTED_MSG("EXIT.FCSM_TR is not implemented");
return MakeNode<PredicateNode>(Pred::NeverExecute, false);
default:
UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast<u32>(cc));
UNIMPLEMENTED_MSG("Unimplemented condition code: {}", cc);
return MakeNode<PredicateNode>(Pred::NeverExecute, false);
}
}
@ -496,8 +497,8 @@ void ShaderIR::MarkAttributeUsage(Attribute::Index index, u64 element) {
}
std::size_t ShaderIR::DeclareAmend(Node new_amend) {
const std::size_t id = amend_code.size();
amend_code.push_back(new_amend);
const auto id = amend_code.size();
amend_code.push_back(std::move(new_amend));
return id;
}

View file

@ -29,8 +29,8 @@ struct ShaderBlock;
constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
struct ConstBuffer {
constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
: max_offset{max_offset}, is_indirect{is_indirect} {}
constexpr explicit ConstBuffer(u32 max_offset_, bool is_indirect_)
: max_offset{max_offset_}, is_indirect{is_indirect_} {}
constexpr ConstBuffer() = default;
@ -66,8 +66,8 @@ struct GlobalMemoryUsage {
class ShaderIR final {
public:
explicit ShaderIR(const ProgramCode& program_code, u32 main_offset, CompilerSettings settings,
Registry& registry);
explicit ShaderIR(const ProgramCode& program_code_, u32 main_offset_,
CompilerSettings settings_, Registry& registry_);
~ShaderIR();
const std::map<u32, NodeBlock>& GetBasicBlocks() const {
@ -94,11 +94,11 @@ public:
return used_cbufs;
}
const std::list<Sampler>& GetSamplers() const {
const std::list<SamplerEntry>& GetSamplers() const {
return used_samplers;
}
const std::list<Image>& GetImages() const {
const std::list<ImageEntry>& GetImages() const {
return used_images;
}
@ -334,17 +334,17 @@ private:
std::optional<Tegra::Engines::SamplerDescriptor> sampler);
/// Accesses a texture sampler.
std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
std::optional<SamplerEntry> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
/// Accesses a texture sampler for a bindless texture.
std::optional<Sampler> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
Node& index_var);
std::optional<SamplerEntry> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
Node& index_var);
/// Accesses an image.
Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
ImageEntry& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
/// Access a bindless image sampler.
Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type);
ImageEntry& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type);
/// Recursive Iteration over the OperationNode operands, searching for GprNodes.
void SearchOperands(NodeBlock& nb, Node var);
@ -457,8 +457,8 @@ private:
std::set<Tegra::Shader::Attribute::Index> used_input_attributes;
std::set<Tegra::Shader::Attribute::Index> used_output_attributes;
std::map<u32, ConstBuffer> used_cbufs;
std::list<Sampler> used_samplers;
std::list<Image> used_images;
std::list<SamplerEntry> used_samplers;
std::list<ImageEntry> used_images;
std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{};
std::map<GlobalMemoryBase, GlobalMemoryUsage> used_global_memory;
bool uses_layer{};