general: Use deducation guides for std::lock_guard and std::unique_lock
Since C++17, the introduction of deduction guides for locking facilities means that we no longer need to hardcode the mutex type into the locks themselves, making it easier to switch mutex types, should it ever be necessary in the future.
This commit is contained in:
parent
d9b7bc4474
commit
781ab8407b
23 changed files with 77 additions and 75 deletions
|
@ -10,7 +10,7 @@ namespace Tegra {
|
|||
|
||||
void DebugContext::DoOnEvent(Event event, void* data) {
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(breakpoint_mutex);
|
||||
std::unique_lock lock{breakpoint_mutex};
|
||||
|
||||
// TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will
|
||||
// show on debug widgets
|
||||
|
@ -32,7 +32,7 @@ void DebugContext::DoOnEvent(Event event, void* data) {
|
|||
|
||||
void DebugContext::Resume() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(breakpoint_mutex);
|
||||
std::lock_guard lock{breakpoint_mutex};
|
||||
|
||||
// Tell all observers that we are about to resume
|
||||
for (auto& breakpoint_observer : breakpoint_observers) {
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
/// Constructs the object such that it observes events of the given DebugContext.
|
||||
explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context)
|
||||
: context_weak(debug_context) {
|
||||
std::unique_lock<std::mutex> lock(debug_context->breakpoint_mutex);
|
||||
std::unique_lock lock{debug_context->breakpoint_mutex};
|
||||
debug_context->breakpoint_observers.push_back(this);
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ public:
|
|||
auto context = context_weak.lock();
|
||||
if (context) {
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(context->breakpoint_mutex);
|
||||
std::unique_lock lock{context->breakpoint_mutex};
|
||||
context->breakpoint_observers.remove(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,13 +95,13 @@ struct SynchState final {
|
|||
std::condition_variable frames_condition;
|
||||
|
||||
void IncrementFramesCounter() {
|
||||
std::lock_guard<std::mutex> lock{frames_mutex};
|
||||
std::lock_guard lock{frames_mutex};
|
||||
++queued_frame_count;
|
||||
}
|
||||
|
||||
void DecrementFramesCounter() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock{frames_mutex};
|
||||
std::lock_guard lock{frames_mutex};
|
||||
--queued_frame_count;
|
||||
|
||||
if (queued_frame_count) {
|
||||
|
@ -113,7 +113,7 @@ struct SynchState final {
|
|||
|
||||
void WaitForFrames() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock{frames_mutex};
|
||||
std::lock_guard lock{frames_mutex};
|
||||
if (!queued_frame_count) {
|
||||
return;
|
||||
}
|
||||
|
@ -121,14 +121,14 @@ struct SynchState final {
|
|||
|
||||
// Wait for the GPU to be idle (all commands to be executed)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock{frames_mutex};
|
||||
std::unique_lock lock{frames_mutex};
|
||||
frames_condition.wait(lock, [this] { return !queued_frame_count; });
|
||||
}
|
||||
}
|
||||
|
||||
void SignalCommands() {
|
||||
{
|
||||
std::unique_lock<std::mutex> lock{commands_mutex};
|
||||
std::unique_lock lock{commands_mutex};
|
||||
if (queue.Empty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ struct SynchState final {
|
|||
}
|
||||
|
||||
void WaitForCommands() {
|
||||
std::unique_lock<std::mutex> lock{commands_mutex};
|
||||
std::unique_lock lock{commands_mutex};
|
||||
commands_condition.wait(lock, [this] { return !queue.Empty(); });
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ public:
|
|||
|
||||
/// Write any cached resources overlapping the specified region back to memory
|
||||
void FlushRegion(CacheAddr addr, std::size_t size) {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
const auto& objects{GetSortedObjectsFromRegion(addr, size)};
|
||||
for (auto& object : objects) {
|
||||
|
@ -94,7 +94,7 @@ public:
|
|||
|
||||
/// Mark the specified region as being invalidated
|
||||
void InvalidateRegion(CacheAddr addr, u64 size) {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
const auto& objects{GetSortedObjectsFromRegion(addr, size)};
|
||||
for (auto& object : objects) {
|
||||
|
@ -108,7 +108,7 @@ public:
|
|||
|
||||
/// Invalidates everything in the cache
|
||||
void InvalidateAll() {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
while (interval_cache.begin() != interval_cache.end()) {
|
||||
Unregister(*interval_cache.begin()->second.begin());
|
||||
|
@ -133,7 +133,7 @@ protected:
|
|||
|
||||
/// Register an object into the cache
|
||||
virtual void Register(const T& object) {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
object->SetIsRegistered(true);
|
||||
interval_cache.add({GetInterval(object), ObjectSet{object}});
|
||||
|
@ -143,7 +143,7 @@ protected:
|
|||
|
||||
/// Unregisters an object from the cache
|
||||
virtual void Unregister(const T& object) {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
object->SetIsRegistered(false);
|
||||
rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1);
|
||||
|
@ -153,14 +153,14 @@ protected:
|
|||
|
||||
/// Returns a ticks counter used for tracking when cached objects were last modified
|
||||
u64 GetModifiedTicks() {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
return ++modified_ticks;
|
||||
}
|
||||
|
||||
/// Flushes the specified object, updating appropriate cache state as needed
|
||||
void FlushObject(const T& object) {
|
||||
std::lock_guard<std::recursive_mutex> lock{mutex};
|
||||
std::lock_guard lock{mutex};
|
||||
|
||||
if (!object->IsDirty()) {
|
||||
return;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue