mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2025-06-24 20:57:53 -05:00
core: Fix clang build
Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795
This commit is contained in:
@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
|
@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
||||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast<u32>(heap_index))};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
||||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast<u32>(index))};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
|
@ -33,11 +33,12 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
|
||||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
const auto u_index = static_cast<std::size_t>(index);
|
||||
const auto needed_size{blocks[u_index].GetSize()};
|
||||
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) {
|
||||
if (const VAddr addr = blocks[i].PopBlock(); addr != 0) {
|
||||
if (const std::size_t allocated_size = blocks[i].GetSize();
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
@ -50,7 +51,7 @@ VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[index++].PushBlock(block);
|
||||
block = blocks[static_cast<std::size_t>(index++)].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
@ -69,7 +70,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
VAddr after_start{end};
|
||||
VAddr after_end{end};
|
||||
while (big_index >= 0) {
|
||||
const std::size_t block_size{blocks[big_index].GetSize()};
|
||||
const std::size_t block_size{blocks[static_cast<std::size_t>(big_index)].GetSize()};
|
||||
const VAddr big_start{Common::AlignUp((start), block_size)};
|
||||
const VAddr big_end{Common::AlignDown((end), block_size)};
|
||||
if (big_start < big_end) {
|
||||
@ -87,7 +88,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
|
||||
// Free space before the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
|
||||
while (before_start + block_size <= before_end) {
|
||||
before_end -= block_size;
|
||||
FreeBlock(before_end, i);
|
||||
@ -96,7 +97,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
|
||||
// Free space after the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
|
||||
while (after_start + block_size <= after_end) {
|
||||
FreeBlock(after_start, i);
|
||||
after_start += block_size;
|
||||
|
@ -34,7 +34,9 @@ public:
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
const auto shift_index = static_cast<std::size_t>(i);
|
||||
if (num_pages >=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[shift_index]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -86,7 +88,7 @@ private:
|
||||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
bit_storages[static_cast<std::size_t>(depth)] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
@ -99,7 +101,7 @@ private:
|
||||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
const u64 v{bit_storages[static_cast<std::size_t>(depth)][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
@ -125,7 +127,7 @@ private:
|
||||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[depth]};
|
||||
u64* bits{bit_storages[static_cast<std::size_t>(depth)]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
@ -177,11 +179,11 @@ private:
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
if (v != 0) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
@ -195,12 +197,12 @@ private:
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
if (v != 0) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
|
@ -414,7 +414,8 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
|
||||
const std::size_t remaining_pages{remaining_size / PageSize};
|
||||
|
||||
if (process->GetResourceLimit() &&
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(remaining_size))) {
|
||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
@ -778,7 +779,8 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
|
||||
|
||||
auto process{system.Kernel().CurrentProcess()};
|
||||
if (process->GetResourceLimit() && delta != 0 &&
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(delta))) {
|
||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user