core: Fix clang build
Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795
This commit is contained in:
parent
ca416a0fb8
commit
be1954e04c
105 changed files with 906 additions and 667 deletions
|
@ -139,7 +139,7 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
|
||||
const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
LOG_ERROR(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::InvalidInput;
|
||||
}
|
||||
|
@ -151,21 +151,24 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
auto& gpu = system.GPU();
|
||||
|
||||
u64 page_size{params.page_size};
|
||||
if (!page_size) {
|
||||
if (page_size == 0) {
|
||||
page_size = object->align;
|
||||
}
|
||||
|
||||
if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
|
||||
if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
|
||||
const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
|
||||
const auto buffer_map = FindBufferMap(static_cast<GPUVAddr>(params.offset));
|
||||
|
||||
if (buffer_map) {
|
||||
const auto cpu_addr{
|
||||
static_cast<VAddr>(buffer_map->CpuAddr() + static_cast<u64>(params.buffer_offset))};
|
||||
const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
|
||||
|
||||
if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
"mapping_size = {}, offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
||||
params.mapping_size, params.offset);
|
||||
LOG_ERROR(Service_NVDRV,
|
||||
"Remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
"mapping_size = {}, offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
||||
params.mapping_size, params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::InvalidInput;
|
||||
|
@ -174,7 +177,7 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::Success;
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
|
||||
LOG_ERROR(Service_NVDRV, "Address not mapped. offset={}", params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::InvalidInput;
|
||||
|
@ -184,25 +187,27 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
// We can only map objects that have already been assigned a CPU address.
|
||||
ASSERT(object->status == nvmap::Object::Status::Allocated);
|
||||
|
||||
const auto physical_address{object->addr + params.buffer_offset};
|
||||
const auto physical_address{object->addr + static_cast<VAddr>(params.buffer_offset)};
|
||||
u64 size{params.mapping_size};
|
||||
if (!size) {
|
||||
if (size == 0) {
|
||||
size = object->size;
|
||||
}
|
||||
|
||||
const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
|
||||
if (is_alloc) {
|
||||
params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
|
||||
params.offset =
|
||||
static_cast<s64>(gpu.MemoryManager().MapAllocate(physical_address, size, page_size));
|
||||
} else {
|
||||
params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
|
||||
params.offset = static_cast<s64>(
|
||||
gpu.MemoryManager().Map(physical_address, static_cast<GPUVAddr>(params.offset), size));
|
||||
}
|
||||
|
||||
auto result{NvErrCodes::Success};
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
|
||||
if (params.offset == 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Failed to map size={}", size);
|
||||
result = NvErrCodes::InvalidInput;
|
||||
} else {
|
||||
AddBufferMap(params.offset, size, physical_address, is_alloc);
|
||||
AddBufferMap(static_cast<GPUVAddr>(params.offset), size, physical_address, is_alloc);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
|
@ -213,12 +218,13 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
IoctlUnmapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||
const auto offset = static_cast<GPUVAddr>(params.offset);
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", offset);
|
||||
|
||||
if (const auto size{RemoveBufferMap(params.offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(params.offset, *size);
|
||||
if (const auto size{RemoveBufferMap(offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(offset, *size);
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", offset);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
|
|
|
@ -63,8 +63,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
u32 event_id = params.value & 0x00FF;
|
||||
|
||||
const u32 event_id = params.value & 0x00FF;
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
|
@ -78,16 +77,17 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
event.writable->Signal();
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
auto lock = gpu.LockSync();
|
||||
const u32 current_syncpoint_value = gpu.GetSyncpointValue(params.syncpt_id);
|
||||
const s32 diff = current_syncpoint_value - params.threshold;
|
||||
const s32 diff = static_cast<s32>(current_syncpoint_value - params.threshold);
|
||||
if (diff >= 0) {
|
||||
event.writable->Signal();
|
||||
params.value = current_syncpoint_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
const u32 target_value = current_syncpoint_value - diff;
|
||||
const u32 target_value = current_syncpoint_value - static_cast<u32>(diff);
|
||||
|
||||
if (!is_async) {
|
||||
params.value = 0;
|
||||
|
@ -98,7 +98,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
EventState status = events_interface.status[event_id];
|
||||
const EventState status = events_interface.status[event_id];
|
||||
if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) {
|
||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
||||
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
|
||||
|
@ -114,7 +114,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
if (!is_async && ctrl.fresh_call) {
|
||||
ctrl.must_delay = true;
|
||||
ctrl.timeout = params.timeout;
|
||||
ctrl.event_id = event_id;
|
||||
ctrl.event_id = static_cast<s32>(event_id);
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
|
|
|
@ -127,7 +127,7 @@ u32 nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
params.unk3);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
params.fence_out.id = assigned_syncpoints;
|
||||
params.fence_out.id = static_cast<s32>(assigned_syncpoints);
|
||||
params.fence_out.value = gpu.GetSyncpointValue(assigned_syncpoints);
|
||||
assigned_syncpoints++;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
|
@ -166,7 +166,8 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp
|
|||
UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
u32 current_syncpoint_value = gpu.GetSyncpointValue(params.fence_out.id);
|
||||
const u32 current_syncpoint_value =
|
||||
gpu.GetSyncpointValue(static_cast<u32>(params.fence_out.id));
|
||||
if (params.flags.increment.Value()) {
|
||||
params.fence_out.value += current_syncpoint_value;
|
||||
} else {
|
||||
|
@ -200,7 +201,8 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output,
|
|||
UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
u32 current_syncpoint_value = gpu.GetSyncpointValue(params.fence_out.id);
|
||||
const u32 current_syncpoint_value =
|
||||
gpu.GetSyncpointValue(static_cast<u32>(params.fence_out.id));
|
||||
if (params.flags.increment.Value()) {
|
||||
params.fence_out.value += current_syncpoint_value;
|
||||
} else {
|
||||
|
|
|
@ -61,9 +61,9 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) {
|
|||
if (ctrl.must_delay) {
|
||||
ctrl.fresh_call = false;
|
||||
ctx.SleepClientThread(
|
||||
"NVServices::DelayedResponse", ctrl.timeout,
|
||||
[=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_,
|
||||
Kernel::ThreadWakeupReason reason) {
|
||||
"NVServices::DelayedResponse", static_cast<u64>(ctrl.timeout),
|
||||
[=, this](std::shared_ptr<Kernel::Thread>, Kernel::HLERequestContext& ctx_,
|
||||
Kernel::ThreadWakeupReason) {
|
||||
IoctlCtrl ctrl2{ctrl};
|
||||
std::vector<u8> tmp_output = output;
|
||||
std::vector<u8> tmp_output2 = output2;
|
||||
|
@ -77,7 +77,7 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) {
|
|||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(ioctl_result);
|
||||
},
|
||||
nvdrv->GetEventWriteable(ctrl.event_id));
|
||||
nvdrv->GetEventWriteable(static_cast<u32>(ctrl.event_id)));
|
||||
} else {
|
||||
ctx.WriteBuffer(output);
|
||||
if (version == IoctlVersion::Version3) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue