mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2025-06-18 15:04:09 -05:00
gpu: Make memory_manager private
Makes the class interface consistent and provides accessors for obtaining a reference to the memory manager instance. Given we also return references, this makes our more flimsy uses of const apparent, given const doesn't propagate through pointers in the way one would typically expect. This makes our mutable state more apparent in some places.
This commit is contained in:
@ -56,9 +56,9 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>&
|
||||
auto& gpu = Core::System::GetInstance().GPU();
|
||||
const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
||||
if (params.flags & 1) {
|
||||
params.offset = gpu.memory_manager->AllocateSpace(params.offset, size, 1);
|
||||
params.offset = gpu.MemoryManager().AllocateSpace(params.offset, size, 1);
|
||||
} else {
|
||||
params.offset = gpu.memory_manager->AllocateSpace(size, params.align);
|
||||
params.offset = gpu.MemoryManager().AllocateSpace(size, params.align);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
@ -88,7 +88,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output)
|
||||
u64 size = static_cast<u64>(entry.pages) << 0x10;
|
||||
ASSERT(size <= object->size);
|
||||
|
||||
Tegra::GPUVAddr returned = gpu.memory_manager->MapBufferEx(object->addr, offset, size);
|
||||
Tegra::GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size);
|
||||
ASSERT(returned == offset);
|
||||
}
|
||||
std::memcpy(output.data(), entries.data(), output.size());
|
||||
@ -125,9 +125,9 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
||||
auto& gpu = Core::System::GetInstance().GPU();
|
||||
|
||||
if (params.flags & 1) {
|
||||
params.offset = gpu.memory_manager->MapBufferEx(object->addr, params.offset, object->size);
|
||||
params.offset = gpu.MemoryManager().MapBufferEx(object->addr, params.offset, object->size);
|
||||
} else {
|
||||
params.offset = gpu.memory_manager->MapBufferEx(object->addr, object->size);
|
||||
params.offset = gpu.MemoryManager().MapBufferEx(object->addr, object->size);
|
||||
}
|
||||
|
||||
// Create a new mapping entry for this operation.
|
||||
@ -161,7 +161,7 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
|
||||
itr->second.size);
|
||||
|
||||
auto& gpu = system_instance.GPU();
|
||||
params.offset = gpu.memory_manager->UnmapBuffer(params.offset, itr->second.size);
|
||||
params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
|
||||
|
||||
buffer_mappings.erase(itr->second.offset);
|
||||
|
||||
|
@ -264,7 +264,7 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
|
||||
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
|
||||
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
|
||||
boost::optional<VAddr> maybe_vaddr =
|
||||
Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(gpu_addr);
|
||||
Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
|
||||
// The GPU <-> CPU virtual memory mapping is not 1:1
|
||||
if (!maybe_vaddr) {
|
||||
LOG_ERROR(HW_Memory,
|
||||
@ -346,7 +346,7 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
|
||||
const VAddr overlap_end = std::min(end, region_end);
|
||||
|
||||
const std::vector<Tegra::GPUVAddr> gpu_addresses =
|
||||
system_instance.GPU().memory_manager->CpuToGpuAddress(overlap_start);
|
||||
system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
|
||||
|
||||
if (gpu_addresses.empty()) {
|
||||
return;
|
||||
|
Reference in New Issue
Block a user