Address feedback, add shader compile notifier, update setting text

This commit is contained in:
ameerj 2020-08-02 13:05:41 -04:00
parent c02464f64e
commit 31a76410e8
9 changed files with 115 additions and 160 deletions

View file

@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <thread>
@ -111,38 +110,44 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
VideoCommon::Shader::CompilerSettings compiler_settings,
const VideoCommon::Shader::Registry& registry,
VAddr cpu_addr) {
auto params = std::make_unique<WorkerParams>();
params->backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL;
params->device = &device;
params->shader_type = shader_type;
params->uid = uid;
params->code = std::move(code);
params->code_b = std::move(code_b);
params->main_offset = main_offset;
params->compiler_settings = compiler_settings;
params->registry = &registry;
params->cpu_address = cpu_addr;
WorkerParams params{
.backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL,
.device = &device,
.shader_type = shader_type,
.uid = uid,
.code = std::move(code),
.code_b = std::move(code_b),
.main_offset = main_offset,
.compiler_settings = compiler_settings,
.registry = &registry,
.cpu_address = cpu_addr,
};
std::unique_lock lock(queue_mutex);
pending_queue.push(std::move(params));
cv.notify_one();
}
void AsyncShaders::QueueVulkanShader(
Vulkan::VKPipelineCache* pp_cache, std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program, Vulkan::RenderPassParams renderpass_params, u32 padding,
std::array<GPUVAddr, Vulkan::Maxwell::MaxShaderProgram> shaders,
Vulkan::FixedPipelineState fixed_state) {
void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache,
const Vulkan::VKDevice& device, Vulkan::VKScheduler& scheduler,
Vulkan::VKDescriptorPool& descriptor_pool,
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
Vulkan::VKRenderPassCache& renderpass_cache,
std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program,
Vulkan::GraphicsPipelineCacheKey key) {
auto params = std::make_unique<WorkerParams>();
params->backend = Backend::Vulkan;
params->pp_cache = pp_cache;
params->bindings = bindings;
params->program = program;
params->renderpass_params = renderpass_params;
params->padding = padding;
params->shaders = shaders;
params->fixed_state = fixed_state;
WorkerParams params{
.backend = Backend::Vulkan,
.pp_cache = pp_cache,
.vk_device = &device,
.scheduler = &scheduler,
.descriptor_pool = &descriptor_pool,
.update_descriptor_queue = &update_descriptor_queue,
.renderpass_cache = &renderpass_cache,
.bindings = bindings,
.program = program,
.key = key,
};
std::unique_lock lock(queue_mutex);
pending_queue.push(std::move(params));
@ -150,7 +155,6 @@ void AsyncShaders::QueueVulkanShader(
}
void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) {
using namespace std::chrono_literals;
while (!is_thread_exiting.load(std::memory_order_relaxed)) {
std::unique_lock lock{queue_mutex};
cv.wait(lock, [this] { return HasWorkQueued() || is_thread_exiting; });
@ -168,53 +172,43 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context
}
// Pull work from queue
auto work = std::move(pending_queue.front());
WorkerParams work = std::move(pending_queue.front());
pending_queue.pop();
lock.unlock();
if (work->backend == Backend::OpenGL || work->backend == Backend::GLASM) {
VideoCommon::Shader::Registry registry = *work->registry;
const ShaderIR ir(work->code, work->main_offset, work->compiler_settings, registry);
if (work.backend == Backend::OpenGL || work.backend == Backend::GLASM) {
VideoCommon::Shader::Registry registry = *work.registry;
const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, registry);
const auto scope = context->Acquire();
auto program =
OpenGL::BuildShader(*work->device, work->shader_type, work->uid, ir, registry);
OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, registry);
Result result{};
result.backend = work->backend;
result.cpu_address = work->cpu_address;
result.uid = work->uid;
result.code = std::move(work->code);
result.code_b = std::move(work->code_b);
result.shader_type = work->shader_type;
result.backend = work.backend;
result.cpu_address = work.cpu_address;
result.uid = work.uid;
result.code = std::move(work.code);
result.code_b = std::move(work.code_b);
result.shader_type = work.shader_type;
if (work->backend == Backend::OpenGL) {
if (work.backend == Backend::OpenGL) {
result.program.opengl = std::move(program->source_program);
} else if (work->backend == Backend::GLASM) {
} else if (work.backend == Backend::GLASM) {
result.program.glasm = std::move(program->assembly_program);
}
work.reset();
{
std::unique_lock complete_lock(completed_mutex);
finished_work.push_back(std::move(result));
}
} else if (work->backend == Backend::Vulkan) {
Vulkan::GraphicsPipelineCacheKey params_key{
.renderpass_params = work->renderpass_params,
.padding = work->padding,
.shaders = work->shaders,
.fixed_state = work->fixed_state,
};
} else if (work.backend == Backend::Vulkan) {
auto pipeline = std::make_unique<Vulkan::VKGraphicsPipeline>(
work->pp_cache->GetDevice(), work->pp_cache->GetScheduler(),
work->pp_cache->GetDescriptorPool(), work->pp_cache->GetUpdateDescriptorQueue(),
work->pp_cache->GetRenderpassCache(), params_key, work->bindings, work->program);
*work.vk_device, *work.scheduler, *work.descriptor_pool,
*work.update_descriptor_queue, *work.renderpass_cache, work.key, work.bindings,
work.program);
work->pp_cache->EmplacePipeline(std::move(pipeline));
work.reset();
work.pp_cache->EmplacePipeline(std::move(pipeline));
}
// Give a chance for another thread to get work.
std::this_thread::yield();
}
}

View file

@ -86,12 +86,13 @@ public:
VideoCommon::Shader::CompilerSettings compiler_settings,
const VideoCommon::Shader::Registry& registry, VAddr cpu_addr);
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache,
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device,
Vulkan::VKScheduler& scheduler,
Vulkan::VKDescriptorPool& descriptor_pool,
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
Vulkan::VKRenderPassCache& renderpass_cache,
std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program, Vulkan::RenderPassParams renderpass_params,
u32 padding,
std::array<GPUVAddr, Vulkan::Maxwell::MaxShaderProgram> shaders,
Vulkan::FixedPipelineState fixed_state);
Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key);
private:
void ShaderCompilerThread(Core::Frontend::GraphicsContext* context);
@ -114,12 +115,14 @@ private:
// For Vulkan
Vulkan::VKPipelineCache* pp_cache;
const Vulkan::VKDevice* vk_device;
Vulkan::VKScheduler* scheduler;
Vulkan::VKDescriptorPool* descriptor_pool;
Vulkan::VKUpdateDescriptorQueue* update_descriptor_queue;
Vulkan::VKRenderPassCache* renderpass_cache;
std::vector<VkDescriptorSetLayoutBinding> bindings;
Vulkan::SPIRVProgram program;
Vulkan::RenderPassParams renderpass_params;
u32 padding;
std::array<GPUVAddr, Vulkan::Maxwell::MaxShaderProgram> shaders;
Vulkan::FixedPipelineState fixed_state;
Vulkan::GraphicsPipelineCacheKey key;
};
std::condition_variable cv;
@ -128,7 +131,7 @@ private:
std::atomic<bool> is_thread_exiting{};
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list;
std::vector<std::thread> worker_threads;
std::queue<std::unique_ptr<WorkerParams>> pending_queue;
std::queue<WorkerParams> pending_queue;
std::vector<AsyncShaders::Result> finished_work;
Core::Frontend::EmuWindow& emu_window;
};