mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2025-06-19 08:07:58 -05:00
Core: Clang format and other small issues.
This commit is contained in:
@ -27,13 +27,13 @@ struct DeviceMemoryManagerAllocator;
|
||||
template <typename Traits>
|
||||
class DeviceMemoryManager {
|
||||
using DeviceInterface = typename Traits::DeviceInterface;
|
||||
using DeviceMethods = Traits::DeviceMethods;
|
||||
using DeviceMethods = typename Traits::DeviceMethods;
|
||||
|
||||
public:
|
||||
DeviceMemoryManager(const DeviceMemory& device_memory);
|
||||
~DeviceMemoryManager();
|
||||
|
||||
void BindInterface(DeviceInterface* interface);
|
||||
void BindInterface(DeviceInterface* device_inter);
|
||||
|
||||
DAddr Allocate(size_t size);
|
||||
void AllocateFixed(DAddr start, size_t size);
|
||||
@ -111,6 +111,7 @@ public:
|
||||
private:
|
||||
static constexpr size_t device_virtual_bits = Traits::device_virtual_bits;
|
||||
static constexpr size_t device_as_size = 1ULL << device_virtual_bits;
|
||||
static constexpr size_t physical_min_bits = 32;
|
||||
static constexpr size_t physical_max_bits = 33;
|
||||
static constexpr size_t page_bits = 12;
|
||||
static constexpr size_t page_size = 1ULL << page_bits;
|
||||
@ -143,7 +144,7 @@ private:
|
||||
std::unique_ptr<DeviceMemoryManagerAllocator<Traits>> impl;
|
||||
|
||||
const uintptr_t physical_base;
|
||||
DeviceInterface* interface;
|
||||
DeviceInterface* device_inter;
|
||||
Common::VirtualBuffer<u32> compressed_physical_ptr;
|
||||
Common::VirtualBuffer<u32> compressed_device_addr;
|
||||
Common::VirtualBuffer<u32> continuity_tracker;
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/device_memory_manager.h"
|
||||
#include "core/memory.h"
|
||||
@ -162,20 +163,39 @@ struct DeviceMemoryManagerAllocator {
|
||||
template <typename Traits>
|
||||
DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_)
|
||||
: physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
|
||||
interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)),
|
||||
device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
|
||||
Settings::MemoryLayout::Memory_4Gb
|
||||
? physical_min_bits
|
||||
: physical_max_bits) -
|
||||
Memory::YUZU_PAGEBITS)),
|
||||
continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
|
||||
impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
|
||||
cached_pages = std::make_unique<CachedPages>();
|
||||
|
||||
const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS;
|
||||
for (size_t i = 0; i < total_virtual; i++) {
|
||||
compressed_physical_ptr[i] = 0;
|
||||
continuity_tracker[i] = 1;
|
||||
cpu_backing_address[i] = 0;
|
||||
}
|
||||
const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
|
||||
Settings::MemoryLayout::Memory_4Gb
|
||||
? physical_min_bits
|
||||
: physical_max_bits) -
|
||||
Memory::YUZU_PAGEBITS);
|
||||
for (size_t i = 0; i < total_phys; i++) {
|
||||
compressed_device_addr[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
DeviceMemoryManager<Traits>::~DeviceMemoryManager() = default;
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* interface_) {
|
||||
interface = interface_;
|
||||
void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* device_inter_) {
|
||||
device_inter = device_inter_;
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
@ -232,7 +252,7 @@ template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
interface->InvalidateRegion(address, size);
|
||||
device_inter->InvalidateRegion(address, size);
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
auto phys_addr = compressed_physical_ptr[start_page_d + i];
|
||||
@ -392,7 +412,7 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
|
||||
interface->FlushRegion(address, size);
|
||||
device_inter->FlushRegion(address, size);
|
||||
WalkBlock(
|
||||
address, size,
|
||||
[&](size_t copy_amount, DAddr current_vaddr) {
|
||||
@ -426,7 +446,7 @@ void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_poin
|
||||
[&](const std::size_t copy_amount) {
|
||||
src_pointer = static_cast<const u8*>(src_pointer) + copy_amount;
|
||||
});
|
||||
interface->InvalidateRegion(address, size);
|
||||
device_inter->InvalidateRegion(address, size);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
@ -468,14 +488,14 @@ void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* sr
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
size_t DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_interface) {
|
||||
size_t DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_device_inter) {
|
||||
size_t new_id;
|
||||
if (!id_pool.empty()) {
|
||||
new_id = id_pool.front();
|
||||
id_pool.pop_front();
|
||||
registered_processes[new_id] = memory_interface;
|
||||
registered_processes[new_id] = memory_device_inter;
|
||||
} else {
|
||||
registered_processes.emplace_back(memory_interface);
|
||||
registered_processes.emplace_back(memory_device_inter);
|
||||
new_id = registered_processes.size() - 1U;
|
||||
}
|
||||
return new_id;
|
||||
@ -512,7 +532,7 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
size_t page = addr >> Memory::YUZU_PAGEBITS;
|
||||
auto [process_id, base_vaddress] = ExtractCPUBacking(page);
|
||||
size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS;
|
||||
auto* memory_interface = registered_processes[process_id];
|
||||
auto* memory_device_inter = registered_processes[process_id];
|
||||
for (; page != page_end; ++page) {
|
||||
std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page);
|
||||
|
||||
@ -536,7 +556,7 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
uncache_bytes += Memory::YUZU_PAGESIZE;
|
||||
} else if (uncache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS,
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
|
||||
uncache_bytes, false);
|
||||
uncache_bytes = 0;
|
||||
}
|
||||
@ -547,7 +567,7 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
cache_bytes += Memory::YUZU_PAGESIZE;
|
||||
} else if (cache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
|
||||
true);
|
||||
cache_bytes = 0;
|
||||
}
|
||||
@ -555,12 +575,12 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
}
|
||||
if (uncache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
|
||||
false);
|
||||
}
|
||||
if (cache_bytes > 0) {
|
||||
lock();
|
||||
MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
@ -202,7 +202,8 @@ public:
|
||||
} else {
|
||||
this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes());
|
||||
}
|
||||
} else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || (FLAGS & GuestMemoryFlags::Cached)) {
|
||||
} else if constexpr ((FLAGS & GuestMemoryFlags::Safe) ||
|
||||
(FLAGS & GuestMemoryFlags::Cached)) {
|
||||
this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes());
|
||||
}
|
||||
}
|
||||
@ -215,4 +216,4 @@ using CpuGuestMemory = GuestMemory<Core::Memory::Memory, T, FLAGS>;
|
||||
template <typename T, GuestMemoryFlags FLAGS>
|
||||
using CpuGuestMemoryScoped = GuestMemoryScoped<Core::Memory::Memory, T, FLAGS>;
|
||||
|
||||
} // namespace Tegra::Memory
|
||||
} // namespace Core::Memory
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scratch_buffer.h"
|
||||
#include "core/guest_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
@ -22,7 +23,6 @@
|
||||
#include "core/hle/service/hle_ipc.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/guest_memory.h"
|
||||
|
||||
namespace Service {
|
||||
|
||||
|
@ -16,6 +16,12 @@
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
|
||||
Session::Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_)
|
||||
: id{id_}, process{process_}, smmu_id{smmu_id_},
|
||||
has_preallocated_area{}, mapper{}, is_active{} {}
|
||||
|
||||
Session::~Session() = default;
|
||||
|
||||
struct ContainerImpl {
|
||||
explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_)
|
||||
: host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {}
|
||||
@ -54,8 +60,8 @@ size_t Container::OpenSession(Kernel::KProcess* process) {
|
||||
impl->id_pool.pop_front();
|
||||
impl->sessions[new_id] = Session{new_id, process, smmu_id};
|
||||
} else {
|
||||
impl->sessions.emplace_back(new_id, process, smmu_id);
|
||||
new_id = impl->new_ids++;
|
||||
impl->sessions.emplace_back(new_id, process, smmu_id);
|
||||
}
|
||||
auto& session = impl->sessions[new_id];
|
||||
session.is_active = true;
|
||||
|
@ -27,6 +27,14 @@ class SyncpointManager;
|
||||
struct ContainerImpl;
|
||||
|
||||
struct Session {
|
||||
Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_);
|
||||
~Session();
|
||||
|
||||
Session(const Session&) = delete;
|
||||
Session& operator=(const Session&) = delete;
|
||||
Session(Session&&) = default;
|
||||
Session& operator=(Session&&) = default;
|
||||
|
||||
size_t id;
|
||||
Kernel::KProcess* process;
|
||||
size_t smmu_id;
|
||||
|
@ -124,10 +124,11 @@ DAddr HeapMapper::Map(VAddr start, size_t size) {
|
||||
m_internal->base_set.clear();
|
||||
const IntervalType interval{start, start + size};
|
||||
m_internal->base_set.insert(interval);
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int){
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.subtract(other);
|
||||
});
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
|
||||
[this](VAddr start_addr, VAddr end_addr, int) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.subtract(other);
|
||||
});
|
||||
if (!m_internal->base_set.empty()) {
|
||||
auto it = m_internal->base_set.begin();
|
||||
auto end_it = m_internal->base_set.end();
|
||||
@ -136,7 +137,8 @@ DAddr HeapMapper::Map(VAddr start, size_t size) {
|
||||
const VAddr inter_addr = it->lower();
|
||||
const size_t offset = inter_addr - m_vaddress;
|
||||
const size_t sub_size = inter_addr_end - inter_addr;
|
||||
m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_smmu_id);
|
||||
m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
|
||||
m_smmu_id);
|
||||
}
|
||||
}
|
||||
m_internal->mapping_overlaps += std::make_pair(interval, 1);
|
||||
@ -147,12 +149,13 @@ DAddr HeapMapper::Map(VAddr start, size_t size) {
|
||||
void HeapMapper::Unmap(VAddr start, size_t size) {
|
||||
std::scoped_lock lk(m_internal->guard);
|
||||
m_internal->base_set.clear();
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int value) {
|
||||
if (value <= 1) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.insert(other);
|
||||
}
|
||||
});
|
||||
m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
|
||||
[this](VAddr start_addr, VAddr end_addr, int value) {
|
||||
if (value <= 1) {
|
||||
const IntervalType other{start_addr, end_addr};
|
||||
m_internal->base_set.insert(other);
|
||||
}
|
||||
});
|
||||
if (!m_internal->base_set.empty()) {
|
||||
auto it = m_internal->base_set.begin();
|
||||
auto end_it = m_internal->base_set.end();
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
NvMap::Handle::Handle(u64 size_, Id id_)
|
||||
@ -96,8 +96,9 @@ void NvMap::UnmapHandle(Handle& handle_description) {
|
||||
const size_t map_size = handle_description.aligned_size;
|
||||
if (!handle_description.in_heap) {
|
||||
auto& smmu = host1x.MemoryManager();
|
||||
size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
|
||||
smmu.Unmap(handle_description.d_address, map_size);
|
||||
smmu.Free(handle_description.d_address, static_cast<size_t>(map_size));
|
||||
smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up));
|
||||
handle_description.d_address = 0;
|
||||
return;
|
||||
}
|
||||
@ -206,7 +207,8 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
|
||||
handle_description->d_address = session->mapper->Map(vaddress, map_size);
|
||||
handle_description->in_heap = true;
|
||||
} else {
|
||||
while ((address = smmu.Allocate(map_size)) == 0) {
|
||||
size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
|
||||
while ((address = smmu.Allocate(aligned_up)) == 0) {
|
||||
// Free handles until the allocation succeeds
|
||||
std::scoped_lock queueLock(unmap_queue_lock);
|
||||
if (auto freeHandleDesc{unmap_queue.front()}) {
|
||||
|
@ -63,8 +63,8 @@ public:
|
||||
} flags{};
|
||||
static_assert(sizeof(Flags) == sizeof(u32));
|
||||
|
||||
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||
//!< this can also be in the nvdrv tmem
|
||||
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||
//!< this can also be in the nvdrv tmem
|
||||
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
|
||||
//!< call
|
||||
|
||||
@ -73,8 +73,8 @@ public:
|
||||
bool in_heap{};
|
||||
size_t session_id{};
|
||||
|
||||
DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to,
|
||||
//!< this can also be in the nvdrv tmem
|
||||
DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
|
||||
//!< to, this can also be in the nvdrv tmem
|
||||
|
||||
Handle(u64 size, Id id);
|
||||
|
||||
@ -82,7 +82,8 @@ public:
|
||||
* @brief Sets up the handle with the given memory config, can allocate memory from the tmem
|
||||
* if a 0 address is passed
|
||||
*/
|
||||
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId);
|
||||
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
|
||||
size_t pSessionId);
|
||||
|
||||
/**
|
||||
* @brief Increases the dupe counter of the handle for the given session
|
||||
|
@ -4,8 +4,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <deque>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
|
@ -69,7 +69,7 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||
}
|
||||
|
||||
void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) {
|
||||
sessions[fd] = session_id;
|
||||
sessions[fd] = session_id;
|
||||
}
|
||||
|
||||
void nvhost_vic::OnClose(DeviceFD fd) {
|
||||
|
@ -123,8 +123,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
const auto result =
|
||||
handle_description->Alloc(params.flags, params.align, params.kind, params.address, sessions[fd]);
|
||||
const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
|
||||
params.address, sessions[fd]);
|
||||
if (result != NvResult::Success) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||
return result;
|
||||
|
@ -92,7 +92,8 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::Device
|
||||
Nvidia::Devices::nvmap::IocFreeParams free_params{
|
||||
.handle = handle,
|
||||
};
|
||||
R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||
R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success,
|
||||
VI::ResultOperationFailed);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
@ -109,7 +110,8 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
|
||||
.kind = 0,
|
||||
.address = GetInteger(buffer),
|
||||
};
|
||||
R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||
R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success,
|
||||
VI::ResultOperationFailed);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
@ -201,8 +203,8 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
|
||||
m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
|
||||
|
||||
// Create an nvmap handle for the buffer and assign the memory to it.
|
||||
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, map_address,
|
||||
SharedBufferSize));
|
||||
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
|
||||
map_address, SharedBufferSize));
|
||||
|
||||
// Record the display id.
|
||||
m_display_id = display_id;
|
||||
|
@ -4,9 +4,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/math_util.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
|
||||
#include "core/hle/service/nvnflinger/ui/fence.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
||||
namespace Kernel {
|
||||
class KPageGroup;
|
||||
@ -62,7 +62,6 @@ private:
|
||||
Core::System& m_system;
|
||||
Nvnflinger& m_flinger;
|
||||
std::shared_ptr<Nvidia::Module> m_nvdrv;
|
||||
|
||||
};
|
||||
|
||||
} // namespace Service::Nvnflinger
|
||||
|
@ -488,7 +488,7 @@ public:
|
||||
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
|
||||
|
||||
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
|
||||
|
||||
|
||||
bool InvalidateSeparateHeap(void* fault_address);
|
||||
|
||||
private:
|
||||
|
Reference in New Issue
Block a user