mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2025-06-20 12:37:53 -05:00
general: fix spelling mistakes
This commit is contained in:
@ -44,7 +44,7 @@ public:
|
||||
template <class P>
|
||||
class ChannelSetupCaches {
|
||||
public:
|
||||
/// Operations for seting the channel of execution.
|
||||
/// Operations for setting the channel of execution.
|
||||
virtual ~ChannelSetupCaches();
|
||||
|
||||
/// Create channel state.
|
||||
|
@ -193,7 +193,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
|
||||
output_converter->ConvertFrom(impl->intermediate_dst, impl->dst_buffer);
|
||||
};
|
||||
|
||||
// Do actuall Blit
|
||||
// Do actual Blit
|
||||
|
||||
impl->dst_buffer.resize(dst_copy_size);
|
||||
if (src.linear == Fermi2D::MemoryLayout::BlockLinear) {
|
||||
|
@ -125,7 +125,7 @@ uvec4 local_buff;
|
||||
uvec4 color_endpoint_data;
|
||||
int color_bitsread = 0;
|
||||
|
||||
// Four values, two endpoints, four maximum paritions
|
||||
// Four values, two endpoints, four maximum partitions
|
||||
uint color_values[32];
|
||||
int colvals_index = 0;
|
||||
|
||||
|
@ -97,7 +97,7 @@
|
||||
* half-rate linear filtering on GCN.
|
||||
*
|
||||
* If SMAA is applied to 64-bit color buffers, switching to point filtering
|
||||
* when accesing them will increase the performance. Search for
|
||||
* when accessing them will increase the performance. Search for
|
||||
* 'SMAASamplePoint' to see which textures may benefit from point
|
||||
* filtering, and where (which is basically the color input in the edge
|
||||
* detection and resolve passes).
|
||||
|
@ -103,8 +103,8 @@ public:
|
||||
|
||||
/**
|
||||
* Returns a vector with all the subranges of cpu addresses mapped beneath.
|
||||
* if the region is continous, a single pair will be returned. If it's unmapped, an empty vector
|
||||
* will be returned;
|
||||
* if the region is continuous, a single pair will be returned. If it's unmapped, an empty
|
||||
* vector will be returned;
|
||||
*/
|
||||
std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
|
||||
std::size_t size) const;
|
||||
|
@ -341,7 +341,7 @@ public:
|
||||
|
||||
/// Flushes the query to guest memory.
|
||||
virtual void Flush() {
|
||||
// When counter is nullptr it means that it's just been reseted. We are supposed to write a
|
||||
// When counter is nullptr it means that it's just been reset. We are supposed to write a
|
||||
// zero in these cases.
|
||||
const u64 value = counter ? counter->Query() : 0;
|
||||
std::memcpy(host_ptr, &value, sizeof(u64));
|
||||
|
@ -576,7 +576,7 @@ bool RasterizerOpenGL::AccelerateConditionalRendering() {
|
||||
// Reimplement Host conditional rendering.
|
||||
return false;
|
||||
}
|
||||
// Medium / Low Hack: stub any checks on queries writen into the buffer cache.
|
||||
// Medium / Low Hack: stub any checks on queries written into the buffer cache.
|
||||
const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
|
||||
Maxwell::ReportSemaphore::Compare cmp;
|
||||
if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
|
||||
|
@ -162,7 +162,7 @@ private:
|
||||
/// Syncs the cull mode to match the guest state
|
||||
void SyncCullMode();
|
||||
|
||||
/// Syncs the primitve restart to match the guest state
|
||||
/// Syncs the primitive restart to match the guest state
|
||||
void SyncPrimitiveRestart();
|
||||
|
||||
/// Syncs the depth test state to match the guest state
|
||||
@ -246,7 +246,7 @@ private:
|
||||
std::array<GLuint, MAX_TEXTURES> texture_handles{};
|
||||
std::array<GLuint, MAX_IMAGES> image_handles{};
|
||||
|
||||
/// Number of commands queued to the OpenGL driver. Resetted on flush.
|
||||
/// Number of commands queued to the OpenGL driver. Reset on flush.
|
||||
size_t num_queued_commands = 0;
|
||||
bool has_written_global_memory = false;
|
||||
|
||||
|
@ -271,7 +271,7 @@ bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcep
|
||||
|
||||
u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept {
|
||||
// OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8
|
||||
// If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
|
||||
// If we subtract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
|
||||
// Perfect for a hash.
|
||||
const u32 value = static_cast<u32>(op);
|
||||
return value - (value >= 0x200 ? 0x200 : 1);
|
||||
@ -322,8 +322,8 @@ Maxwell::StencilOp::Op FixedPipelineState::UnpackStencilOp(u32 packed) noexcept
|
||||
}
|
||||
|
||||
u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept {
|
||||
// FrontAndBack is 0x408, by substracting 0x406 in it we get 2.
|
||||
// Individual cull faces are in 0x404 and 0x405, substracting 0x404 we get 0 and 1.
|
||||
// FrontAndBack is 0x408, by subtracting 0x406 in it we get 2.
|
||||
// Individual cull faces are in 0x404 and 0x405, subtracting 0x404 we get 0 and 1.
|
||||
const u32 value = static_cast<u32>(cull);
|
||||
return value - (value == 0x408 ? 0x406 : 0x404);
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ CommandPool::CommandPool(MasterSemaphore& master_semaphore_, const Device& devic
|
||||
CommandPool::~CommandPool() = default;
|
||||
|
||||
void CommandPool::Allocate(size_t begin, size_t end) {
|
||||
// Command buffers are going to be commited, recorded, executed every single usage cycle.
|
||||
// They are also going to be reseted when commited.
|
||||
// Command buffers are going to be committed, recorded, executed every single usage cycle.
|
||||
// They are also going to be reset when committed.
|
||||
Pool& pool = pools.emplace_back();
|
||||
pool.handle = device.GetLogical().CreateCommandPool({
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
||||
|
@ -671,7 +671,7 @@ bool RasterizerVulkan::AccelerateConditionalRendering() {
|
||||
// TODO(Blinkhawk): Reimplement Host conditional rendering.
|
||||
return false;
|
||||
}
|
||||
// Medium / Low Hack: stub any checks on queries writen into the buffer cache.
|
||||
// Medium / Low Hack: stub any checks on queries written into the buffer cache.
|
||||
const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
|
||||
Maxwell::ReportSemaphore::Compare cmp;
|
||||
if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
|
||||
|
@ -37,7 +37,7 @@ size_t ResourcePool::CommitResource() {
|
||||
found = free_resource;
|
||||
}
|
||||
}
|
||||
// Free iterator is hinted to the resource after the one that's been commited.
|
||||
// Free iterator is hinted to the resource after the one that's been committed.
|
||||
hint_iterator = (*found + 1) % ticks.size();
|
||||
return *found;
|
||||
}
|
||||
@ -46,7 +46,7 @@ size_t ResourcePool::ManageOverflow() {
|
||||
const size_t old_capacity = ticks.size();
|
||||
Grow();
|
||||
|
||||
// The last entry is guaranted to be free, since it's the first element of the freshly
|
||||
// The last entry is guaranteed to be free, since it's the first element of the freshly
|
||||
// allocated resources.
|
||||
return old_capacity;
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, bo
|
||||
present_mode = ChooseSwapPresentMode(present_modes);
|
||||
|
||||
u32 requested_image_count{capabilities.minImageCount + 1};
|
||||
// Ensure Tripple buffering if possible.
|
||||
// Ensure Triple buffering if possible.
|
||||
if (capabilities.maxImageCount > 0) {
|
||||
if (requested_image_count > capabilities.maxImageCount) {
|
||||
requested_image_count = capabilities.maxImageCount;
|
||||
|
@ -25,7 +25,7 @@ void UpdateDescriptorQueue::TickFrame() {
|
||||
|
||||
void UpdateDescriptorQueue::Acquire() {
|
||||
// Minimum number of entries required.
|
||||
// This is the maximum number of entries a single draw call migth use.
|
||||
// This is the maximum number of entries a single draw call might use.
|
||||
static constexpr size_t MIN_ENTRIES = 0x400;
|
||||
|
||||
if (std::distance(payload.data(), payload_cursor) + MIN_ENTRIES >= payload.max_size()) {
|
||||
|
@ -25,7 +25,7 @@ enum class ImageFlagBits : u32 {
|
||||
Registered = 1 << 6, ///< True when the image is registered
|
||||
Picked = 1 << 7, ///< Temporary flag to mark the image as picked
|
||||
Remapped = 1 << 8, ///< Image has been remapped.
|
||||
Sparse = 1 << 9, ///< Image has non continous submemory.
|
||||
Sparse = 1 << 9, ///< Image has non continuous submemory.
|
||||
|
||||
// Garbage Collection Flags
|
||||
BadOverlap = 1 << 10, ///< This image overlaps other but doesn't fit, has higher
|
||||
|
@ -1571,7 +1571,7 @@ static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth,
|
||||
assert(strm.GetBitsRead() + weightParams.GetPackedBitSize() == 128);
|
||||
|
||||
// Decode both color data and texel weight data
|
||||
u32 colorValues[32]; // Four values, two endpoints, four maximum paritions
|
||||
u32 colorValues[32]; // Four values, two endpoints, four maximum partitions
|
||||
DecodeColorValues(colorValues, colorEndpointData, colorEndpointMode, nPartitions,
|
||||
colorDataBits);
|
||||
|
||||
|
@ -417,7 +417,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
||||
|
||||
sets_per_pool = 64;
|
||||
if (is_amd_driver) {
|
||||
// AMD drivers need a higher amount of Sets per Pool in certain circunstances like in XC2.
|
||||
// AMD drivers need a higher amount of Sets per Pool in certain circumstances like in XC2.
|
||||
sets_per_pool = 96;
|
||||
// Disable VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT on AMD GCN4 and lower as it is broken.
|
||||
if (!features.shader_float16_int8.shaderFloat16) {
|
||||
|
@ -180,7 +180,7 @@ public:
|
||||
~Device();
|
||||
|
||||
/**
|
||||
* Returns a format supported by the device for the passed requeriments.
|
||||
* Returns a format supported by the device for the passed requirements.
|
||||
* @param wanted_format The ideal format to be returned. It may not be the returned format.
|
||||
* @param wanted_usage The usage that must be fulfilled even if the format is not supported.
|
||||
* @param format_type Format type usage.
|
||||
@ -259,12 +259,12 @@ public:
|
||||
|
||||
bool ShouldBoostClocks() const;
|
||||
|
||||
/// Returns uniform buffer alignment requeriment.
|
||||
/// Returns uniform buffer alignment requirement.
|
||||
VkDeviceSize GetUniformBufferAlignment() const {
|
||||
return properties.properties.limits.minUniformBufferOffsetAlignment;
|
||||
}
|
||||
|
||||
/// Returns storage alignment requeriment.
|
||||
/// Returns storage alignment requirement.
|
||||
VkDeviceSize GetStorageBufferAlignment() const {
|
||||
return properties.properties.limits.minStorageBufferOffsetAlignment;
|
||||
}
|
||||
@ -656,7 +656,7 @@ private:
|
||||
bool is_integrated{}; ///< Is GPU an iGPU.
|
||||
bool is_virtual{}; ///< Is GPU a virtual GPU.
|
||||
bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device.
|
||||
bool has_broken_cube_compatibility{}; ///< Has broken cube compatiblity bit
|
||||
bool has_broken_cube_compatibility{}; ///< Has broken cube compatibility bit
|
||||
bool has_renderdoc{}; ///< Has RenderDoc attached
|
||||
bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
|
||||
bool supports_d24_depth{}; ///< Supports D24 depth buffers.
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
constexpr Span(const Range& range) : ptr{std::data(range)}, num{std::size(range)} {}
|
||||
|
||||
/// Construct a span from a pointer and a size.
|
||||
/// This is inteded for subranges.
|
||||
/// This is intended for subranges.
|
||||
constexpr Span(const T* ptr_, std::size_t num_) noexcept : ptr{ptr_}, num{num_} {}
|
||||
|
||||
/// Returns the data pointer by the span.
|
||||
@ -390,11 +390,11 @@ public:
|
||||
Handle(const Handle&) = delete;
|
||||
Handle& operator=(const Handle&) = delete;
|
||||
|
||||
/// Construct a handle transfering the ownership from another handle.
|
||||
/// Construct a handle transferring the ownership from another handle.
|
||||
Handle(Handle&& rhs) noexcept
|
||||
: handle{std::exchange(rhs.handle, nullptr)}, owner{rhs.owner}, dld{rhs.dld} {}
|
||||
|
||||
/// Assign the current handle transfering the ownership from another handle.
|
||||
/// Assign the current handle transferring the ownership from another handle.
|
||||
/// Destroys any previously held object.
|
||||
Handle& operator=(Handle&& rhs) noexcept {
|
||||
Release();
|
||||
@ -463,10 +463,10 @@ public:
|
||||
Handle(const Handle&) = delete;
|
||||
Handle& operator=(const Handle&) = delete;
|
||||
|
||||
/// Construct a handle transfering ownership from another handle.
|
||||
/// Construct a handle transferring ownership from another handle.
|
||||
Handle(Handle&& rhs) noexcept : handle{std::exchange(rhs.handle, nullptr)}, dld{rhs.dld} {}
|
||||
|
||||
/// Assign the current handle transfering the ownership from another handle.
|
||||
/// Assign the current handle transferring the ownership from another handle.
|
||||
/// Destroys any previously held object.
|
||||
Handle& operator=(Handle&& rhs) noexcept {
|
||||
Release();
|
||||
@ -533,12 +533,12 @@ public:
|
||||
PoolAllocations(const PoolAllocations&) = delete;
|
||||
PoolAllocations& operator=(const PoolAllocations&) = delete;
|
||||
|
||||
/// Construct an allocation transfering ownership from another allocation.
|
||||
/// Construct an allocation transferring ownership from another allocation.
|
||||
PoolAllocations(PoolAllocations&& rhs) noexcept
|
||||
: allocations{std::move(rhs.allocations)}, num{rhs.num}, device{rhs.device}, pool{rhs.pool},
|
||||
dld{rhs.dld} {}
|
||||
|
||||
/// Assign an allocation transfering ownership from another allocation.
|
||||
/// Assign an allocation transferring ownership from another allocation.
|
||||
PoolAllocations& operator=(PoolAllocations&& rhs) noexcept {
|
||||
allocations = std::move(rhs.allocations);
|
||||
num = rhs.num;
|
||||
|
Reference in New Issue
Block a user