global: Use std::optional instead of boost::optional (#1578)
* get rid of boost::optional * Remove optional references * Use std::reference_wrapper for optional references * Fix clang format * Fix clang format part 2 * Adressed feedback * Fix clang format and MacOS build
This commit is contained in:
parent
adf26ae668
commit
7a5eda5914
49 changed files with 274 additions and 266 deletions
|
@ -81,7 +81,7 @@ void GPU::ProcessCommandLists(const std::vector<CommandListHeader>& commands) {
|
|||
for (auto entry : commands) {
|
||||
Tegra::GPUVAddr address = entry.Address();
|
||||
u32 size = entry.sz;
|
||||
const boost::optional<VAddr> head_address = memory_manager->GpuToCpuAddress(address);
|
||||
const std::optional<VAddr> head_address = memory_manager->GpuToCpuAddress(address);
|
||||
VAddr current_addr = *head_address;
|
||||
while (current_addr < *head_address + size * sizeof(CommandHeader)) {
|
||||
const CommandHeader header = {Memory::Read32(current_addr)};
|
||||
|
|
|
@ -167,7 +167,7 @@ void Maxwell3D::ProcessQueryGet() {
|
|||
GPUVAddr sequence_address = regs.query.QueryAddress();
|
||||
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
|
||||
// VAddr before writing.
|
||||
boost::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address);
|
||||
std::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address);
|
||||
|
||||
// TODO(Subv): Support the other query units.
|
||||
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
|
||||
|
@ -285,7 +285,7 @@ void Maxwell3D::ProcessCBData(u32 value) {
|
|||
// Don't allow writing past the end of the buffer.
|
||||
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
|
||||
|
||||
boost::optional<VAddr> address =
|
||||
std::optional<VAddr> address =
|
||||
memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
|
||||
|
||||
Memory::Write32(*address, value);
|
||||
|
@ -298,7 +298,7 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
|||
GPUVAddr tic_base_address = regs.tic.TICAddress();
|
||||
|
||||
GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
|
||||
boost::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
|
||||
std::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
|
||||
|
||||
Texture::TICEntry tic_entry;
|
||||
Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
||||
|
@ -322,7 +322,7 @@ Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
|
|||
GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
|
||||
|
||||
GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
|
||||
boost::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
|
||||
std::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
|
||||
|
||||
Texture::TSCEntry tsc_entry;
|
||||
Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
||||
|
@ -386,7 +386,7 @@ Texture::FullTextureInfo Maxwell3D::GetStageTexture(Regs::ShaderStage stage,
|
|||
|
||||
ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size);
|
||||
|
||||
boost::optional<VAddr> tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address);
|
||||
std::optional<VAddr> tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address);
|
||||
Texture::TextureHandle tex_handle{Memory::Read32(*tex_address_cpu)};
|
||||
|
||||
Texture::FullTextureInfo tex_info{};
|
||||
|
|
|
@ -5,12 +5,11 @@
|
|||
#pragma once
|
||||
|
||||
#include <bitset>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
|
@ -1456,7 +1455,7 @@ public:
|
|||
Type type;
|
||||
};
|
||||
|
||||
static boost::optional<const Matcher&> Decode(Instruction instr) {
|
||||
static std::optional<std::reference_wrapper<const Matcher>> Decode(Instruction instr) {
|
||||
static const auto table{GetDecodeTable()};
|
||||
|
||||
const auto matches_instruction = [instr](const auto& matcher) {
|
||||
|
@ -1464,7 +1463,8 @@ public:
|
|||
};
|
||||
|
||||
auto iter = std::find_if(table.begin(), table.end(), matches_instruction);
|
||||
return iter != table.end() ? boost::optional<const Matcher&>(*iter) : boost::none;
|
||||
return iter != table.end() ? std::optional<std::reference_wrapper<const Matcher>>(*iter)
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -29,7 +29,7 @@ void MacroInterpreter::Execute(const std::vector<u32>& code, std::vector<u32> pa
|
|||
void MacroInterpreter::Reset() {
|
||||
registers = {};
|
||||
pc = 0;
|
||||
delayed_pc = boost::none;
|
||||
delayed_pc = {};
|
||||
method_address.raw = 0;
|
||||
parameters.clear();
|
||||
// The next parameter index starts at 1, because $r1 already has the value of the first
|
||||
|
@ -44,10 +44,10 @@ bool MacroInterpreter::Step(const std::vector<u32>& code, bool is_delay_slot) {
|
|||
pc += 4;
|
||||
|
||||
// Update the program counter if we were delayed
|
||||
if (delayed_pc != boost::none) {
|
||||
if (delayed_pc) {
|
||||
ASSERT(is_delay_slot);
|
||||
pc = *delayed_pc;
|
||||
delayed_pc = boost::none;
|
||||
delayed_pc = {};
|
||||
}
|
||||
|
||||
switch (opcode.operation) {
|
||||
|
|
|
@ -5,8 +5,9 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
|
@ -149,7 +150,7 @@ private:
|
|||
Engines::Maxwell3D& maxwell3d;
|
||||
|
||||
u32 pc; ///< Current program counter
|
||||
boost::optional<u32>
|
||||
std::optional<u32>
|
||||
delayed_pc; ///< Program counter to execute at after the delay slot is executed.
|
||||
|
||||
static constexpr std::size_t NumMacroRegisters = 8;
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
namespace Tegra {
|
||||
|
||||
GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
|
||||
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, align);
|
||||
std::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, align);
|
||||
ASSERT(gpu_addr);
|
||||
|
||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||
|
@ -34,7 +34,7 @@ GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
|
|||
}
|
||||
|
||||
GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
|
||||
boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, PAGE_SIZE);
|
||||
std::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, PAGE_SIZE);
|
||||
ASSERT(gpu_addr);
|
||||
|
||||
for (u64 offset = 0; offset < size; offset += PAGE_SIZE) {
|
||||
|
@ -97,7 +97,7 @@ GPUVAddr MemoryManager::GetRegionEnd(GPUVAddr region_start) const {
|
|||
return {};
|
||||
}
|
||||
|
||||
boost::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
||||
std::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
||||
GPUVAddr gpu_addr = 0;
|
||||
u64 free_space = 0;
|
||||
align = (align + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
@ -118,7 +118,7 @@ boost::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) {
|
|||
return {};
|
||||
}
|
||||
|
||||
boost::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
||||
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
||||
VAddr base_addr = PageSlot(gpu_addr);
|
||||
|
||||
if (base_addr == static_cast<u64>(PageStatus::Allocated) ||
|
||||
|
|
|
@ -6,10 +6,9 @@
|
|||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
@ -27,7 +26,7 @@ public:
|
|||
GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
|
||||
GPUVAddr UnmapBuffer(GPUVAddr gpu_addr, u64 size);
|
||||
GPUVAddr GetRegionEnd(GPUVAddr region_start) const;
|
||||
boost::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
|
||||
std::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
|
||||
std::vector<GPUVAddr> CpuToGpuAddress(VAddr cpu_addr) const;
|
||||
|
||||
static constexpr u64 PAGE_BITS = 16;
|
||||
|
@ -35,7 +34,7 @@ public:
|
|||
static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
|
||||
|
||||
private:
|
||||
boost::optional<GPUVAddr> FindFreeBlock(u64 size, u64 align = 1);
|
||||
std::optional<GPUVAddr> FindFreeBlock(u64 size, u64 align = 1);
|
||||
bool IsPageMapped(GPUVAddr gpu_addr);
|
||||
VAddr& PageSlot(GPUVAddr gpu_addr);
|
||||
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <boost/optional.hpp>
|
||||
#include <optional>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
@ -28,7 +29,8 @@ public:
|
|||
virtual ~RendererBase();
|
||||
|
||||
/// Swap buffers (render frame)
|
||||
virtual void SwapBuffers(boost::optional<const Tegra::FramebufferConfig&> framebuffer) = 0;
|
||||
virtual void SwapBuffers(
|
||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0;
|
||||
|
||||
/// Initialize the renderer
|
||||
virtual bool Init() = 0;
|
||||
|
|
|
@ -17,7 +17,7 @@ OGLBufferCache::OGLBufferCache(std::size_t size) : stream_buffer(GL_ARRAY_BUFFER
|
|||
GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size,
|
||||
std::size_t alignment, bool cache) {
|
||||
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
||||
const boost::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||
const std::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||
|
||||
// Cache management is a big overhead, so only cache entries with a given size.
|
||||
// TODO: Figure out which size is the best for given games.
|
||||
|
|
|
@ -45,7 +45,7 @@ GLintptr PrimitiveAssembler::MakeQuadIndexed(Tegra::GPUVAddr gpu_addr, std::size
|
|||
auto [dst_pointer, index_offset] = buffer_cache.ReserveMemory(map_size);
|
||||
|
||||
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
||||
const boost::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||
const std::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||
const u8* source{Memory::GetPointer(*cpu_addr)};
|
||||
|
||||
for (u32 primitive = 0; primitive < count / 4; ++primitive) {
|
||||
|
|
|
@ -401,7 +401,7 @@ void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
|||
|
||||
void RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb, bool using_depth_fb,
|
||||
bool preserve_contents,
|
||||
boost::optional<std::size_t> single_color_target) {
|
||||
std::optional<std::size_t> single_color_target) {
|
||||
MICROPROFILE_SCOPE(OpenGL_Framebuffer);
|
||||
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
|
||||
|
||||
|
|
|
@ -8,12 +8,12 @@
|
|||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <glad/glad.h>
|
||||
|
||||
|
@ -111,7 +111,7 @@ private:
|
|||
*/
|
||||
void ConfigureFramebuffers(bool use_color_fb = true, bool using_depth_fb = true,
|
||||
bool preserve_contents = true,
|
||||
boost::optional<std::size_t> single_color_target = {});
|
||||
std::optional<std::size_t> single_color_target = {});
|
||||
|
||||
/*
|
||||
* Configures the current constbuffers to use for the draw command.
|
||||
|
|
|
@ -3,12 +3,12 @@
|
|||
// Refer to the license.txt file included.
|
||||
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "common/assert.h"
|
||||
|
@ -144,7 +144,7 @@ private:
|
|||
for (u32 offset = begin; offset != end && offset != PROGRAM_END; ++offset) {
|
||||
const Instruction instr = {program_code[offset]};
|
||||
if (const auto opcode = OpCode::Decode(instr)) {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::EXIT: {
|
||||
// The EXIT instruction can be predicated, which means that the shader can
|
||||
// conditionally end on this instruction. We have to consider the case where the
|
||||
|
@ -430,7 +430,7 @@ public:
|
|||
*/
|
||||
void SetRegisterToInputAttibute(const Register& reg, u64 elem, Attribute::Index attribute,
|
||||
const Tegra::Shader::IpaMode& input_mode,
|
||||
boost::optional<Register> vertex = {}) {
|
||||
std::optional<Register> vertex = {}) {
|
||||
const std::string dest = GetRegisterAsFloat(reg);
|
||||
const std::string src = GetInputAttribute(attribute, input_mode, vertex) + GetSwizzle(elem);
|
||||
shader.AddLine(dest + " = " + src + ';');
|
||||
|
@ -807,10 +807,10 @@ private:
|
|||
/// Generates code representing an input attribute register.
|
||||
std::string GetInputAttribute(Attribute::Index attribute,
|
||||
const Tegra::Shader::IpaMode& input_mode,
|
||||
boost::optional<Register> vertex = {}) {
|
||||
std::optional<Register> vertex = {}) {
|
||||
auto GeometryPass = [&](const std::string& name) {
|
||||
if (stage == Maxwell3D::Regs::ShaderStage::Geometry && vertex) {
|
||||
return "gs_" + name + '[' + GetRegisterAsInteger(vertex.value(), 0, false) + ']';
|
||||
return "gs_" + name + '[' + GetRegisterAsInteger(*vertex, 0, false) + ']';
|
||||
}
|
||||
return name;
|
||||
};
|
||||
|
@ -1465,7 +1465,7 @@ private:
|
|||
}
|
||||
|
||||
shader.AddLine(
|
||||
fmt::format("// {}: {} (0x{:016x})", offset, opcode->GetName(), instr.value));
|
||||
fmt::format("// {}: {} (0x{:016x})", offset, opcode->get().GetName(), instr.value));
|
||||
|
||||
using Tegra::Shader::Pred;
|
||||
ASSERT_MSG(instr.pred.full_pred != Pred::NeverExecute,
|
||||
|
@ -1473,7 +1473,7 @@ private:
|
|||
|
||||
// Some instructions (like SSY) don't have a predicate field, they are always
|
||||
// unconditionally executed.
|
||||
bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->GetId());
|
||||
bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->get().GetId());
|
||||
|
||||
if (can_be_predicated && instr.pred.pred_index != static_cast<u64>(Pred::UnusedIndex)) {
|
||||
shader.AddLine("if (" +
|
||||
|
@ -1483,7 +1483,7 @@ private:
|
|||
++shader.scope;
|
||||
}
|
||||
|
||||
switch (opcode->GetType()) {
|
||||
switch (opcode->get().GetType()) {
|
||||
case OpCode::Type::Arithmetic: {
|
||||
std::string op_a = regs.GetRegisterAsFloat(instr.gpr8);
|
||||
|
||||
|
@ -1500,7 +1500,7 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::MOV_C:
|
||||
case OpCode::Id::MOV_R: {
|
||||
// MOV does not have neither 'abs' nor 'neg' bits.
|
||||
|
@ -1600,14 +1600,15 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled arithmetic instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled arithmetic instruction: {}",
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Type::ArithmeticImmediate: {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::MOV32_IMM: {
|
||||
regs.SetRegisterToFloat(instr.gpr0, 0, GetImmediate32(instr), 1, 1);
|
||||
break;
|
||||
|
@ -1651,7 +1652,7 @@ private:
|
|||
std::string op_a = instr.bfe.negate_a ? "-" : "";
|
||||
op_a += regs.GetRegisterAsInteger(instr.gpr8);
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::BFE_IMM: {
|
||||
std::string inner_shift =
|
||||
'(' + op_a + " << " + std::to_string(instr.bfe.GetLeftShiftValue()) + ')';
|
||||
|
@ -1663,7 +1664,7 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled BFE instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled BFE instruction: {}", opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -1685,7 +1686,7 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::SHR_C:
|
||||
case OpCode::Id::SHR_R:
|
||||
case OpCode::Id::SHR_IMM: {
|
||||
|
@ -1705,7 +1706,7 @@ private:
|
|||
regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " << " + op_b, 1, 1);
|
||||
break;
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled shift instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled shift instruction: {}", opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -1715,7 +1716,7 @@ private:
|
|||
std::string op_a = regs.GetRegisterAsInteger(instr.gpr8);
|
||||
std::string op_b = std::to_string(instr.alu.imm20_32.Value());
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::IADD32I:
|
||||
if (instr.iadd32i.negate_a)
|
||||
op_a = "-(" + op_a + ')';
|
||||
|
@ -1737,7 +1738,7 @@ private:
|
|||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled ArithmeticIntegerImmediate instruction: {}",
|
||||
opcode->GetName());
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -1757,7 +1758,7 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::IADD_C:
|
||||
case OpCode::Id::IADD_R:
|
||||
case OpCode::Id::IADD_IMM: {
|
||||
|
@ -1793,7 +1794,7 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
if (opcode->GetId() == OpCode::Id::IADD3_R) {
|
||||
if (opcode->get().GetId() == OpCode::Id::IADD3_R) {
|
||||
apply_height(instr.iadd3.height_a, op_a);
|
||||
apply_height(instr.iadd3.height_b, op_b);
|
||||
apply_height(instr.iadd3.height_c, op_c);
|
||||
|
@ -1809,7 +1810,7 @@ private:
|
|||
op_c = "-(" + op_c + ')';
|
||||
|
||||
std::string result;
|
||||
if (opcode->GetId() == OpCode::Id::IADD3_R) {
|
||||
if (opcode->get().GetId() == OpCode::Id::IADD3_R) {
|
||||
switch (instr.iadd3.mode) {
|
||||
case Tegra::Shader::IAdd3Mode::RightShift:
|
||||
// TODO(tech4me): According to
|
||||
|
@ -1884,7 +1885,7 @@ private:
|
|||
const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39);
|
||||
std::string lut;
|
||||
|
||||
if (opcode->GetId() == OpCode::Id::LOP3_R) {
|
||||
if (opcode->get().GetId() == OpCode::Id::LOP3_R) {
|
||||
lut = '(' + std::to_string(instr.alu.lop3.GetImmLut28()) + ')';
|
||||
} else {
|
||||
lut = '(' + std::to_string(instr.alu.lop3.GetImmLut48()) + ')';
|
||||
|
@ -1914,7 +1915,7 @@ private:
|
|||
case OpCode::Id::LEA_HI: {
|
||||
std::string op_c;
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::LEA_R2: {
|
||||
op_a = regs.GetRegisterAsInteger(instr.gpr20);
|
||||
op_b = regs.GetRegisterAsInteger(instr.gpr39);
|
||||
|
@ -1959,7 +1960,8 @@ private:
|
|||
op_b = regs.GetRegisterAsInteger(instr.gpr8);
|
||||
op_a = std::to_string(instr.lea.imm.entry_a);
|
||||
op_c = std::to_string(instr.lea.imm.entry_b);
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled LEA subinstruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled LEA subinstruction: {}",
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -1974,7 +1976,7 @@ private:
|
|||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled ArithmeticInteger instruction: {}",
|
||||
opcode->GetName());
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -1982,20 +1984,21 @@ private:
|
|||
break;
|
||||
}
|
||||
case OpCode::Type::ArithmeticHalf: {
|
||||
if (opcode->GetId() == OpCode::Id::HADD2_C || opcode->GetId() == OpCode::Id::HADD2_R) {
|
||||
if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
|
||||
opcode->get().GetId() == OpCode::Id::HADD2_R) {
|
||||
ASSERT_MSG(instr.alu_half.ftz == 0, "Unimplemented");
|
||||
}
|
||||
const bool negate_a =
|
||||
opcode->GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
|
||||
opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
|
||||
const bool negate_b =
|
||||
opcode->GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
|
||||
opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
|
||||
|
||||
const std::string op_a =
|
||||
GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half.type_a,
|
||||
instr.alu_half.abs_a != 0, negate_a);
|
||||
|
||||
std::string op_b;
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::HADD2_C:
|
||||
case OpCode::Id::HMUL2_C:
|
||||
op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset,
|
||||
|
@ -2013,7 +2016,7 @@ private:
|
|||
op_b = GetHalfFloat(op_b, instr.alu_half.type_b, instr.alu_half.abs_b != 0, negate_b);
|
||||
|
||||
const std::string result = [&]() {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::HADD2_C:
|
||||
case OpCode::Id::HADD2_R:
|
||||
return '(' + op_a + " + " + op_b + ')';
|
||||
|
@ -2021,7 +2024,8 @@ private:
|
|||
case OpCode::Id::HMUL2_R:
|
||||
return '(' + op_a + " * " + op_b + ')';
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled half float instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled half float instruction: {}",
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
return std::string("0");
|
||||
}
|
||||
|
@ -2032,7 +2036,7 @@ private:
|
|||
break;
|
||||
}
|
||||
case OpCode::Type::ArithmeticHalfImmediate: {
|
||||
if (opcode->GetId() == OpCode::Id::HADD2_IMM) {
|
||||
if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) {
|
||||
ASSERT_MSG(instr.alu_half_imm.ftz == 0, "Unimplemented");
|
||||
} else {
|
||||
ASSERT_MSG(instr.alu_half_imm.precision == Tegra::Shader::HalfPrecision::None,
|
||||
|
@ -2046,7 +2050,7 @@ private:
|
|||
const std::string op_b = UnpackHalfImmediate(instr, true);
|
||||
|
||||
const std::string result = [&]() {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::HADD2_IMM:
|
||||
return op_a + " + " + op_b;
|
||||
case OpCode::Id::HMUL2_IMM:
|
||||
|
@ -2072,7 +2076,7 @@ private:
|
|||
ASSERT_MSG(instr.ffma.tab5980_1 == 0, "FFMA tab5980_1({}) not implemented",
|
||||
instr.ffma.tab5980_1.Value());
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::FFMA_CR: {
|
||||
op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset,
|
||||
GLSLRegister::Type::Float);
|
||||
|
@ -2096,7 +2100,7 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled FFMA instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled FFMA instruction: {}", opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -2107,14 +2111,14 @@ private:
|
|||
break;
|
||||
}
|
||||
case OpCode::Type::Hfma2: {
|
||||
if (opcode->GetId() == OpCode::Id::HFMA2_RR) {
|
||||
if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) {
|
||||
ASSERT_MSG(instr.hfma2.rr.precision == Tegra::Shader::HalfPrecision::None,
|
||||
"Unimplemented");
|
||||
} else {
|
||||
ASSERT_MSG(instr.hfma2.precision == Tegra::Shader::HalfPrecision::None,
|
||||
"Unimplemented");
|
||||
}
|
||||
const bool saturate = opcode->GetId() == OpCode::Id::HFMA2_RR
|
||||
const bool saturate = opcode->get().GetId() == OpCode::Id::HFMA2_RR
|
||||
? instr.hfma2.rr.saturate != 0
|
||||
: instr.hfma2.saturate != 0;
|
||||
|
||||
|
@ -2122,7 +2126,7 @@ private:
|
|||
GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hfma2.type_a);
|
||||
std::string op_b, op_c;
|
||||
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::HFMA2_CR:
|
||||
op_b = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset,
|
||||
GLSLRegister::Type::UnsignedInteger),
|
||||
|
@ -2160,7 +2164,7 @@ private:
|
|||
break;
|
||||
}
|
||||
case OpCode::Type::Conversion: {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::I2I_R: {
|
||||
ASSERT_MSG(!instr.conversion.selector, "Unimplemented");
|
||||
|
||||
|
@ -2298,14 +2302,15 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled conversion instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled conversion instruction: {}",
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OpCode::Type::Memory: {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::LD_A: {
|
||||
// Note: Shouldn't this be interp mode flat? As in no interpolation made.
|
||||
ASSERT_MSG(instr.gpr8.Value() == Register::ZeroIndex,
|
||||
|
@ -2949,7 +2954,7 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled memory instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled memory instruction: {}", opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -3043,7 +3048,7 @@ private:
|
|||
instr.hsetp2.abs_a, instr.hsetp2.negate_a);
|
||||
|
||||
const std::string op_b = [&]() {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::HSETP2_R:
|
||||
return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false),
|
||||
instr.hsetp2.type_b, instr.hsetp2.abs_a,
|
||||
|
@ -3105,7 +3110,7 @@ private:
|
|||
break;
|
||||
}
|
||||
case OpCode::Type::PredicateSetPredicate: {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::PSETP: {
|
||||
const std::string op_a =
|
||||
GetPredicateCondition(instr.psetp.pred12, instr.psetp.neg_pred12 != 0);
|
||||
|
@ -3151,7 +3156,8 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled predicate instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled predicate instruction: {}",
|
||||
opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -3239,7 +3245,7 @@ private:
|
|||
instr.hset2.abs_a != 0, instr.hset2.negate_a != 0);
|
||||
|
||||
const std::string op_b = [&]() {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::HSET2_R:
|
||||
return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false),
|
||||
instr.hset2.type_b, instr.hset2.abs_b != 0,
|
||||
|
@ -3288,7 +3294,7 @@ private:
|
|||
const bool is_signed{instr.xmad.sign_a == 1};
|
||||
|
||||
bool is_merge{};
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::XMAD_CR: {
|
||||
is_merge = instr.xmad.merge_56;
|
||||
op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset,
|
||||
|
@ -3317,7 +3323,7 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled XMAD instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled XMAD instruction: {}", opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -3369,7 +3375,7 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
switch (opcode->GetId()) {
|
||||
switch (opcode->get().GetId()) {
|
||||
case OpCode::Id::EXIT: {
|
||||
if (stage == Maxwell3D::Regs::ShaderStage::Fragment) {
|
||||
EmitFragmentOutputsWrite();
|
||||
|
@ -3564,7 +3570,7 @@ private:
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled instruction: {}", opcode->GetName());
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled instruction: {}", opcode->get().GetName());
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -3705,9 +3711,9 @@ std::string GetCommonDeclarations() {
|
|||
RasterizerOpenGL::MaxConstbufferSize / sizeof(GLvec4));
|
||||
}
|
||||
|
||||
boost::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u32 main_offset,
|
||||
Maxwell3D::Regs::ShaderStage stage,
|
||||
const std::string& suffix) {
|
||||
std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u32 main_offset,
|
||||
Maxwell3D::Regs::ShaderStage stage,
|
||||
const std::string& suffix) {
|
||||
try {
|
||||
const auto subroutines =
|
||||
ControlFlowAnalyzer(program_code, main_offset, suffix).GetSubroutines();
|
||||
|
@ -3716,7 +3722,7 @@ boost::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code,
|
|||
} catch (const DecompileFail& exception) {
|
||||
LOG_ERROR(HW_GPU, "Shader decompilation failed: {}", exception.what());
|
||||
}
|
||||
return boost::none;
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace OpenGL::GLShader::Decompiler
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <boost/optional.hpp>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||
|
@ -18,8 +18,8 @@ using Tegra::Engines::Maxwell3D;
|
|||
|
||||
std::string GetCommonDeclarations();
|
||||
|
||||
boost::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u32 main_offset,
|
||||
Maxwell3D::Regs::ShaderStage stage,
|
||||
const std::string& suffix);
|
||||
std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u32 main_offset,
|
||||
Maxwell3D::Regs::ShaderStage stage,
|
||||
const std::string& suffix);
|
||||
|
||||
} // namespace OpenGL::GLShader::Decompiler
|
||||
|
|
|
@ -37,7 +37,7 @@ layout(std140) uniform vs_config {
|
|||
ProgramResult program =
|
||||
Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET,
|
||||
Maxwell3D::Regs::ShaderStage::Vertex, "vertex")
|
||||
.get_value_or({});
|
||||
.value_or(ProgramResult());
|
||||
|
||||
out += program.first;
|
||||
|
||||
|
@ -45,7 +45,7 @@ layout(std140) uniform vs_config {
|
|||
ProgramResult program_b =
|
||||
Decompiler::DecompileProgram(setup.program.code_b, PROGRAM_OFFSET,
|
||||
Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b")
|
||||
.get_value_or({});
|
||||
.value_or(ProgramResult());
|
||||
out += program_b.first;
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ ProgramResult GenerateGeometryShader(const ShaderSetup& setup) {
|
|||
ProgramResult program =
|
||||
Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET,
|
||||
Maxwell3D::Regs::ShaderStage::Geometry, "geometry")
|
||||
.get_value_or({});
|
||||
.value_or(ProgramResult());
|
||||
out += R"(
|
||||
out gl_PerVertex {
|
||||
vec4 gl_Position;
|
||||
|
@ -124,7 +124,7 @@ ProgramResult GenerateFragmentShader(const ShaderSetup& setup) {
|
|||
ProgramResult program =
|
||||
Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET,
|
||||
Maxwell3D::Regs::ShaderStage::Fragment, "fragment")
|
||||
.get_value_or({});
|
||||
.value_or(ProgramResult());
|
||||
out += R"(
|
||||
layout(location = 0) out vec4 FragColor0;
|
||||
layout(location = 1) out vec4 FragColor1;
|
||||
|
|
|
@ -115,7 +115,8 @@ RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& window)
|
|||
RendererOpenGL::~RendererOpenGL() = default;
|
||||
|
||||
/// Swap buffers (render frame)
|
||||
void RendererOpenGL::SwapBuffers(boost::optional<const Tegra::FramebufferConfig&> framebuffer) {
|
||||
void RendererOpenGL::SwapBuffers(
|
||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
|
||||
ScopeAcquireGLContext acquire_context{render_window};
|
||||
|
||||
Core::System::GetInstance().GetPerfStats().EndSystemFrame();
|
||||
|
@ -124,11 +125,11 @@ void RendererOpenGL::SwapBuffers(boost::optional<const Tegra::FramebufferConfig&
|
|||
OpenGLState prev_state = OpenGLState::GetCurState();
|
||||
state.Apply();
|
||||
|
||||
if (framebuffer != boost::none) {
|
||||
if (framebuffer) {
|
||||
// If framebuffer is provided, reload it from memory to a texture
|
||||
if (screen_info.texture.width != (GLsizei)framebuffer->width ||
|
||||
screen_info.texture.height != (GLsizei)framebuffer->height ||
|
||||
screen_info.texture.pixel_format != framebuffer->pixel_format) {
|
||||
if (screen_info.texture.width != (GLsizei)framebuffer->get().width ||
|
||||
screen_info.texture.height != (GLsizei)framebuffer->get().height ||
|
||||
screen_info.texture.pixel_format != framebuffer->get().pixel_format) {
|
||||
// Reallocate texture if the framebuffer size has changed.
|
||||
// This is expected to not happen very often and hence should not be a
|
||||
// performance problem.
|
||||
|
|
|
@ -51,7 +51,8 @@ public:
|
|||
~RendererOpenGL() override;
|
||||
|
||||
/// Swap buffers (render frame)
|
||||
void SwapBuffers(boost::optional<const Tegra::FramebufferConfig&> framebuffer) override;
|
||||
void SwapBuffers(
|
||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
|
||||
|
||||
/// Initialize the renderer
|
||||
bool Init() override;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue