video_core: Reimplement the buffer cache

Reimplement the buffer cache using cached bindings and page level
granularity for modification tracking. This also drops the usage of
shared pointers and virtual functions from the cache.

- Bindings are cached, allowing to skip work when the game changes few
  bits between draws.
- OpenGL Assembly shaders no longer copy when a region has been modified
  from the GPU to emulate constant buffers, instead GL_EXT_memory_object
  is used to alias sub-buffers within the same allocation.
- OpenGL Assembly shaders stream constant buffer data using
  glProgramBufferParametersIuivNV, from NV_parameter_buffer_object. In
  theory this should save one hash table resolve inside the driver
  compared to glBufferSubData.
- A new OpenGL stream buffer is implemented based on fences for drivers
  that are not Nvidia's proprietary, due to their low performance on
  partial glBufferSubData calls synchronized with 3D rendering (that
  some games use a lot).
- Most optimizations are shared between APIs now, allowing Vulkan to
  cache more bindings than before, skipping unnecesarry work.

This commit adds the necessary infrastructure to use Vulkan object from
OpenGL. Overall, it improves performance and fixes some bugs present on
the old cache. There are still some edge cases hit by some games that
harm performance on some vendors, this are planned to be fixed in later
commits.
This commit is contained in:
ReinUsesLisp 2021-01-16 20:48:58 -03:00
parent a39d9c5194
commit 82c2601555
67 changed files with 2555 additions and 2648 deletions

View file

@ -39,7 +39,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
case KEPLER_COMPUTE_REG_INDEX(data_upload): {
upload_state.ProcessData(method_argument, is_last_call);
if (is_last_call) {
system.GPU().Maxwell3D().OnMemoryWrite();
}
break;
}

View file

@ -33,7 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
case KEPLERMEMORY_REG_INDEX(data): {
upload_state.ProcessData(method_argument, is_last_call);
if (is_last_call) {
system.GPU().Maxwell3D().OnMemoryWrite();
}
break;
}

View file

@ -223,7 +223,6 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
case MAXWELL3D_REG_INDEX(data_upload):
upload_state.ProcessData(argument, is_last_call);
if (is_last_call) {
OnMemoryWrite();
}
return;
case MAXWELL3D_REG_INDEX(fragment_barrier):
@ -570,17 +569,18 @@ std::optional<u64> Maxwell3D::GetQueryResult() {
}
}
void Maxwell3D::ProcessCBBind(std::size_t stage_index) {
void Maxwell3D::ProcessCBBind(size_t stage_index) {
// Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage.
auto& shader = state.shader_stages[stage_index];
auto& bind_data = regs.cb_bind[stage_index];
ASSERT(bind_data.index < Regs::MaxConstBuffers);
auto& buffer = shader.const_buffers[bind_data.index];
const auto& bind_data = regs.cb_bind[stage_index];
auto& buffer = state.shader_stages[stage_index].const_buffers[bind_data.index];
buffer.enabled = bind_data.valid.Value() != 0;
buffer.address = regs.const_buffer.BufferAddress();
buffer.size = regs.const_buffer.cb_size;
const bool is_enabled = bind_data.valid.Value() != 0;
const GPUVAddr gpu_addr = is_enabled ? regs.const_buffer.BufferAddress() : 0;
const u32 size = is_enabled ? regs.const_buffer.cb_size : 0;
rasterizer->BindGraphicsUniformBuffer(stage_index, bind_data.index, gpu_addr, size);
}
void Maxwell3D::ProcessCBData(u32 value) {
@ -635,7 +635,6 @@ void Maxwell3D::FinishCBData() {
const u32 id = cb_data_state.id;
memory_manager.WriteBlock(address, cb_data_state.buffer[id].data(), size);
OnMemoryWrite();
cb_data_state.id = null_cb_data;
cb_data_state.current = null_cb_data;

View file

@ -1314,8 +1314,7 @@ public:
GPUVAddr LimitAddress() const {
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
limit_low) +
1;
limit_low);
}
} vertex_array_limit[NumVertexArrays];
@ -1403,6 +1402,7 @@ public:
};
std::array<ShaderStageInfo, Regs::MaxShaderStage> shader_stages;
u32 current_instance = 0; ///< Current instance to be used to simulate instanced rendering.
};
@ -1452,11 +1452,6 @@ public:
return *rasterizer;
}
/// Notify a memory write has happened.
void OnMemoryWrite() {
dirty.flags |= dirty.on_write_stores;
}
enum class MMEDrawMode : u32 {
Undefined,
Array,
@ -1478,7 +1473,6 @@ public:
using Tables = std::array<Table, 2>;
Flags flags;
Flags on_write_stores;
Tables tables{};
} dirty;
@ -1541,7 +1535,7 @@ private:
void FinishCBData();
/// Handles a write to the CB_BIND register.
void ProcessCBBind(std::size_t stage_index);
void ProcessCBBind(size_t stage_index);
/// Handles a write to the VERTEX_END_GL register, triggering a draw.
void DrawArrays();

View file

@ -60,9 +60,6 @@ void MaxwellDMA::Launch() {
return;
}
// All copies here update the main memory, so mark all rasterizer states as invalid.
system.GPU().Maxwell3D().OnMemoryWrite();
if (is_src_pitch && is_dst_pitch) {
CopyPitchToPitch();
} else {