Merge pull request #6585 from ameerj/hades

Shader Decompiler Rewrite
This commit is contained in:
bunnei
2021-07-25 11:39:04 -07:00
committed by GitHub
428 changed files with 49376 additions and 27255 deletions

View File

@ -29,7 +29,6 @@ add_library(video_core STATIC
dirty_flags.h
dma_pusher.cpp
dma_pusher.h
engines/const_buffer_engine_interface.h
engines/const_buffer_info.h
engines/engine_interface.h
engines/engine_upload.cpp
@ -44,9 +43,6 @@ add_library(video_core STATIC
engines/maxwell_3d.h
engines/maxwell_dma.cpp
engines/maxwell_dma.h
engines/shader_bytecode.h
engines/shader_header.h
engines/shader_type.h
framebuffer_config.h
macro/macro.cpp
macro/macro.h
@ -61,8 +57,6 @@ add_library(video_core STATIC
gpu.h
gpu_thread.cpp
gpu_thread.h
guest_driver.cpp
guest_driver.h
memory_manager.cpp
memory_manager.h
query_cache.h
@ -71,26 +65,25 @@ add_library(video_core STATIC
rasterizer_interface.h
renderer_base.cpp
renderer_base.h
renderer_opengl/gl_arb_decompiler.cpp
renderer_opengl/gl_arb_decompiler.h
renderer_opengl/gl_buffer_cache.cpp
renderer_opengl/gl_buffer_cache.h
renderer_opengl/gl_compute_pipeline.cpp
renderer_opengl/gl_compute_pipeline.h
renderer_opengl/gl_device.cpp
renderer_opengl/gl_device.h
renderer_opengl/gl_fence_manager.cpp
renderer_opengl/gl_fence_manager.h
renderer_opengl/gl_graphics_pipeline.cpp
renderer_opengl/gl_graphics_pipeline.h
renderer_opengl/gl_rasterizer.cpp
renderer_opengl/gl_rasterizer.h
renderer_opengl/gl_resource_manager.cpp
renderer_opengl/gl_resource_manager.h
renderer_opengl/gl_shader_cache.cpp
renderer_opengl/gl_shader_cache.h
renderer_opengl/gl_shader_decompiler.cpp
renderer_opengl/gl_shader_decompiler.h
renderer_opengl/gl_shader_disk_cache.cpp
renderer_opengl/gl_shader_disk_cache.h
renderer_opengl/gl_shader_manager.cpp
renderer_opengl/gl_shader_manager.h
renderer_opengl/gl_shader_context.h
renderer_opengl/gl_shader_util.cpp
renderer_opengl/gl_shader_util.h
renderer_opengl/gl_state_tracker.cpp
@ -112,6 +105,7 @@ add_library(video_core STATIC
renderer_vulkan/fixed_pipeline_state.h
renderer_vulkan/maxwell_to_vk.cpp
renderer_vulkan/maxwell_to_vk.h
renderer_vulkan/pipeline_helper.h
renderer_vulkan/renderer_vulkan.h
renderer_vulkan/renderer_vulkan.cpp
renderer_vulkan/vk_blit_screen.cpp
@ -138,12 +132,12 @@ add_library(video_core STATIC
renderer_vulkan/vk_query_cache.h
renderer_vulkan/vk_rasterizer.cpp
renderer_vulkan/vk_rasterizer.h
renderer_vulkan/vk_render_pass_cache.cpp
renderer_vulkan/vk_render_pass_cache.h
renderer_vulkan/vk_resource_pool.cpp
renderer_vulkan/vk_resource_pool.h
renderer_vulkan/vk_scheduler.cpp
renderer_vulkan/vk_scheduler.h
renderer_vulkan/vk_shader_decompiler.cpp
renderer_vulkan/vk_shader_decompiler.h
renderer_vulkan/vk_shader_util.cpp
renderer_vulkan/vk_shader_util.h
renderer_vulkan/vk_staging_buffer_pool.cpp
@ -156,60 +150,12 @@ add_library(video_core STATIC
renderer_vulkan/vk_texture_cache.h
renderer_vulkan/vk_update_descriptor.cpp
renderer_vulkan/vk_update_descriptor.h
shader_cache.cpp
shader_cache.h
shader_environment.cpp
shader_environment.h
shader_notify.cpp
shader_notify.h
shader/decode/arithmetic.cpp
shader/decode/arithmetic_immediate.cpp
shader/decode/bfe.cpp
shader/decode/bfi.cpp
shader/decode/shift.cpp
shader/decode/arithmetic_integer.cpp
shader/decode/arithmetic_integer_immediate.cpp
shader/decode/arithmetic_half.cpp
shader/decode/arithmetic_half_immediate.cpp
shader/decode/ffma.cpp
shader/decode/hfma2.cpp
shader/decode/conversion.cpp
shader/decode/memory.cpp
shader/decode/texture.cpp
shader/decode/image.cpp
shader/decode/float_set_predicate.cpp
shader/decode/integer_set_predicate.cpp
shader/decode/half_set_predicate.cpp
shader/decode/predicate_set_register.cpp
shader/decode/predicate_set_predicate.cpp
shader/decode/register_set_predicate.cpp
shader/decode/float_set.cpp
shader/decode/integer_set.cpp
shader/decode/half_set.cpp
shader/decode/video.cpp
shader/decode/warp.cpp
shader/decode/xmad.cpp
shader/decode/other.cpp
shader/ast.cpp
shader/ast.h
shader/async_shaders.cpp
shader/async_shaders.h
shader/compiler_settings.cpp
shader/compiler_settings.h
shader/control_flow.cpp
shader/control_flow.h
shader/decode.cpp
shader/expr.cpp
shader/expr.h
shader/memory_util.cpp
shader/memory_util.h
shader/node_helper.cpp
shader/node_helper.h
shader/node.h
shader/registry.cpp
shader/registry.h
shader/shader_ir.cpp
shader/shader_ir.h
shader/track.cpp
shader/transform_feedback.cpp
shader/transform_feedback.h
surface.cpp
surface.h
texture_cache/accelerated_swizzle.cpp
@ -242,6 +188,8 @@ add_library(video_core STATIC
textures/decoders.h
textures/texture.cpp
textures/texture.h
transform_feedback.cpp
transform_feedback.h
video_core.cpp
video_core.h
vulkan_common/vulkan_debug_callback.cpp
@ -265,7 +213,7 @@ add_library(video_core STATIC
create_target_directory_groups(video_core)
target_link_libraries(video_core PUBLIC common core)
target_link_libraries(video_core PRIVATE glad xbyak)
target_link_libraries(video_core PUBLIC glad shader_recompiler xbyak)
if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32)
add_dependencies(video_core ffmpeg-build)

View File

@ -31,6 +31,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/slot_vector.h"
#include "video_core/texture_cache/types.h"
@ -42,14 +43,19 @@ MICROPROFILE_DECLARE(GPU_DownloadMemory);
using BufferId = SlotId;
using VideoCore::Surface::PixelFormat;
using namespace Common::Literals;
constexpr u32 NUM_VERTEX_BUFFERS = 32;
constexpr u32 NUM_TRANSFORM_FEEDBACK_BUFFERS = 4;
constexpr u32 NUM_GRAPHICS_UNIFORM_BUFFERS = 18;
constexpr u32 NUM_COMPUTE_UNIFORM_BUFFERS = 8;
constexpr u32 NUM_STORAGE_BUFFERS = 16;
constexpr u32 NUM_TEXTURE_BUFFERS = 16;
constexpr u32 NUM_STAGES = 5;
using namespace Common::Literals;
using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>;
using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
template <typename P>
class BufferCache {
@ -67,6 +73,7 @@ class BufferCache {
static constexpr bool NEEDS_BIND_UNIFORM_INDEX = P::NEEDS_BIND_UNIFORM_INDEX;
static constexpr bool NEEDS_BIND_STORAGE_INDEX = P::NEEDS_BIND_STORAGE_INDEX;
static constexpr bool USE_MEMORY_MAPS = P::USE_MEMORY_MAPS;
static constexpr bool SEPARATE_IMAGE_BUFFERS_BINDINGS = P::SEPARATE_IMAGE_BUFFER_BINDINGS;
static constexpr BufferId NULL_BUFFER_ID{0};
@ -96,6 +103,10 @@ class BufferCache {
BufferId buffer_id;
};
struct TextureBufferBinding : Binding {
PixelFormat format;
};
static constexpr Binding NULL_BINDING{
.cpu_addr = 0,
.size = 0,
@ -133,20 +144,31 @@ public:
void BindHostComputeBuffers();
void SetEnabledUniformBuffers(size_t stage, u32 enabled);
void SetUniformBuffersState(const std::array<u32, NUM_STAGES>& mask,
const UniformBufferSizes* sizes);
void SetEnabledComputeUniformBuffers(u32 enabled);
void SetComputeUniformBufferState(u32 mask, const ComputeUniformBufferSizes* sizes);
void UnbindGraphicsStorageBuffers(size_t stage);
void BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, u32 cbuf_index, u32 cbuf_offset,
bool is_written);
void UnbindGraphicsTextureBuffers(size_t stage);
void BindGraphicsTextureBuffer(size_t stage, size_t tbo_index, GPUVAddr gpu_addr, u32 size,
PixelFormat format, bool is_written, bool is_image);
void UnbindComputeStorageBuffers();
void BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, u32 cbuf_offset,
bool is_written);
void UnbindComputeTextureBuffers();
void BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_addr, u32 size, PixelFormat format,
bool is_written, bool is_image);
void FlushCachedWrites();
/// Return true when there are uncommitted buffers to be downloaded
@ -178,6 +200,7 @@ public:
[[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size);
std::mutex mutex;
Runtime& runtime;
private:
template <typename Func>
@ -254,12 +277,16 @@ private:
void BindHostGraphicsStorageBuffers(size_t stage);
void BindHostGraphicsTextureBuffers(size_t stage);
void BindHostTransformFeedbackBuffers();
void BindHostComputeUniformBuffers();
void BindHostComputeStorageBuffers();
void BindHostComputeTextureBuffers();
void DoUpdateGraphicsBuffers(bool is_indexed);
void DoUpdateComputeBuffers();
@ -274,6 +301,8 @@ private:
void UpdateStorageBuffers(size_t stage);
void UpdateTextureBuffers(size_t stage);
void UpdateTransformFeedbackBuffers();
void UpdateTransformFeedbackBuffer(u32 index);
@ -282,6 +311,8 @@ private:
void UpdateComputeStorageBuffers();
void UpdateComputeTextureBuffers();
void MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size);
[[nodiscard]] BufferId FindBuffer(VAddr cpu_addr, u32 size);
@ -323,6 +354,9 @@ private:
[[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const;
[[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
PixelFormat format);
[[nodiscard]] std::span<const u8> ImmediateBufferWithData(VAddr cpu_addr, size_t size);
[[nodiscard]] std::span<u8> ImmediateBuffer(size_t wanted_capacity);
@ -336,7 +370,6 @@ private:
Tegra::Engines::KeplerCompute& kepler_compute;
Tegra::MemoryManager& gpu_memory;
Core::Memory::Memory& cpu_memory;
Runtime& runtime;
SlotVector<Buffer> slot_buffers;
DelayedDestructionRing<Buffer, 8> delayed_destruction_ring;
@ -347,20 +380,30 @@ private:
std::array<Binding, NUM_VERTEX_BUFFERS> vertex_buffers;
std::array<std::array<Binding, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES> uniform_buffers;
std::array<std::array<Binding, NUM_STORAGE_BUFFERS>, NUM_STAGES> storage_buffers;
std::array<std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS>, NUM_STAGES> texture_buffers;
std::array<Binding, NUM_TRANSFORM_FEEDBACK_BUFFERS> transform_feedback_buffers;
std::array<Binding, NUM_COMPUTE_UNIFORM_BUFFERS> compute_uniform_buffers;
std::array<Binding, NUM_STORAGE_BUFFERS> compute_storage_buffers;
std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS> compute_texture_buffers;
std::array<u32, NUM_STAGES> enabled_uniform_buffers{};
u32 enabled_compute_uniform_buffers = 0;
std::array<u32, NUM_STAGES> enabled_uniform_buffer_masks{};
u32 enabled_compute_uniform_buffer_mask = 0;
const UniformBufferSizes* uniform_buffer_sizes{};
const ComputeUniformBufferSizes* compute_uniform_buffer_sizes{};
std::array<u32, NUM_STAGES> enabled_storage_buffers{};
std::array<u32, NUM_STAGES> written_storage_buffers{};
u32 enabled_compute_storage_buffers = 0;
u32 written_compute_storage_buffers = 0;
std::array<u32, NUM_STAGES> fast_bound_uniform_buffers{};
std::array<u32, NUM_STAGES> enabled_texture_buffers{};
std::array<u32, NUM_STAGES> written_texture_buffers{};
std::array<u32, NUM_STAGES> image_texture_buffers{};
u32 enabled_compute_texture_buffers = 0;
u32 written_compute_texture_buffers = 0;
u32 image_compute_texture_buffers = 0;
std::array<u32, 16> uniform_cache_hits{};
std::array<u32, 16> uniform_cache_shots{};
@ -371,6 +414,10 @@ private:
std::conditional_t<HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS, std::array<u32, NUM_STAGES>, Empty>
dirty_uniform_buffers{};
std::conditional_t<IS_OPENGL, std::array<u32, NUM_STAGES>, Empty> fast_bound_uniform_buffers{};
std::conditional_t<HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS,
std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
uniform_buffer_binding_sizes{};
std::vector<BufferId> cached_write_buffer_ids;
@ -394,8 +441,8 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
Tegra::Engines::KeplerCompute& kepler_compute_,
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
Runtime& runtime_)
: rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_},
gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_}, runtime{runtime_} {
: runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
// Ensure the first slot is used for the null buffer
void(slot_buffers.insert(runtime, NullBufferParams{}));
deletion_iterator = slot_buffers.end();
@ -615,6 +662,7 @@ void BufferCache<P>::BindHostStageBuffers(size_t stage) {
MICROPROFILE_SCOPE(GPU_BindUploadBuffers);
BindHostGraphicsUniformBuffers(stage);
BindHostGraphicsStorageBuffers(stage);
BindHostGraphicsTextureBuffers(stage);
}
template <class P>
@ -622,21 +670,30 @@ void BufferCache<P>::BindHostComputeBuffers() {
MICROPROFILE_SCOPE(GPU_BindUploadBuffers);
BindHostComputeUniformBuffers();
BindHostComputeStorageBuffers();
BindHostComputeTextureBuffers();
}
template <class P>
void BufferCache<P>::SetEnabledUniformBuffers(size_t stage, u32 enabled) {
void BufferCache<P>::SetUniformBuffersState(const std::array<u32, NUM_STAGES>& mask,
const UniformBufferSizes* sizes) {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
if (enabled_uniform_buffers[stage] != enabled) {
dirty_uniform_buffers[stage] = ~u32{0};
if (enabled_uniform_buffer_masks != mask) {
if constexpr (IS_OPENGL) {
fast_bound_uniform_buffers.fill(0);
}
dirty_uniform_buffers.fill(~u32{0});
uniform_buffer_binding_sizes.fill({});
}
}
enabled_uniform_buffers[stage] = enabled;
enabled_uniform_buffer_masks = mask;
uniform_buffer_sizes = sizes;
}
template <class P>
void BufferCache<P>::SetEnabledComputeUniformBuffers(u32 enabled) {
enabled_compute_uniform_buffers = enabled;
void BufferCache<P>::SetComputeUniformBufferState(u32 mask,
const ComputeUniformBufferSizes* sizes) {
enabled_compute_uniform_buffer_mask = mask;
compute_uniform_buffer_sizes = sizes;
}
template <class P>
@ -656,10 +713,30 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index,
storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr);
}
template <class P>
void BufferCache<P>::UnbindGraphicsTextureBuffers(size_t stage) {
enabled_texture_buffers[stage] = 0;
written_texture_buffers[stage] = 0;
image_texture_buffers[stage] = 0;
}
template <class P>
void BufferCache<P>::BindGraphicsTextureBuffer(size_t stage, size_t tbo_index, GPUVAddr gpu_addr,
u32 size, PixelFormat format, bool is_written,
bool is_image) {
enabled_texture_buffers[stage] |= 1U << tbo_index;
written_texture_buffers[stage] |= (is_written ? 1U : 0U) << tbo_index;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
image_texture_buffers[stage] |= (is_image ? 1U : 0U) << tbo_index;
}
texture_buffers[stage][tbo_index] = GetTextureBufferBinding(gpu_addr, size, format);
}
template <class P>
void BufferCache<P>::UnbindComputeStorageBuffers() {
enabled_compute_storage_buffers = 0;
written_compute_storage_buffers = 0;
image_compute_texture_buffers = 0;
}
template <class P>
@ -676,6 +753,24 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index,
compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr);
}
template <class P>
void BufferCache<P>::UnbindComputeTextureBuffers() {
enabled_compute_texture_buffers = 0;
written_compute_texture_buffers = 0;
image_compute_texture_buffers = 0;
}
template <class P>
void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_addr, u32 size,
PixelFormat format, bool is_written, bool is_image) {
enabled_compute_texture_buffers |= 1U << tbo_index;
written_compute_texture_buffers |= (is_written ? 1U : 0U) << tbo_index;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
image_compute_texture_buffers |= (is_image ? 1U : 0U) << tbo_index;
}
compute_texture_buffers[tbo_index] = GetTextureBufferBinding(gpu_addr, size, format);
}
template <class P>
void BufferCache<P>::FlushCachedWrites() {
for (const BufferId buffer_id : cached_write_buffer_ids) {
@ -901,7 +996,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffers(size_t stage) {
dirty = std::exchange(dirty_uniform_buffers[stage], 0);
}
u32 binding_index = 0;
ForEachEnabledBit(enabled_uniform_buffers[stage], [&](u32 index) {
ForEachEnabledBit(enabled_uniform_buffer_masks[stage], [&](u32 index) {
const bool needs_bind = ((dirty >> index) & 1) != 0;
BindHostGraphicsUniformBuffer(stage, index, binding_index, needs_bind);
if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
@ -915,7 +1010,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
bool needs_bind) {
const Binding& binding = uniform_buffers[stage][index];
const VAddr cpu_addr = binding.cpu_addr;
const u32 size = binding.size;
const u32 size = std::min(binding.size, (*uniform_buffer_sizes)[stage][index]);
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer);
const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
@ -925,8 +1020,13 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
if constexpr (IS_OPENGL) {
if (runtime.HasFastBufferSubData()) {
// Fast path for Nvidia
if (!HasFastUniformBufferBound(stage, binding_index)) {
const bool should_fast_bind =
!HasFastUniformBufferBound(stage, binding_index) ||
uniform_buffer_binding_sizes[stage][binding_index] != size;
if (should_fast_bind) {
// We only have to bind when the currently bound buffer is not the fast version
fast_bound_uniform_buffers[stage] |= 1U << binding_index;
uniform_buffer_binding_sizes[stage][binding_index] = size;
runtime.BindFastUniformBuffer(stage, binding_index, size);
}
const auto span = ImmediateBufferWithData(cpu_addr, size);
@ -934,8 +1034,10 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
return;
}
}
fast_bound_uniform_buffers[stage] |= 1U << binding_index;
if constexpr (IS_OPENGL) {
fast_bound_uniform_buffers[stage] |= 1U << binding_index;
uniform_buffer_binding_sizes[stage][binding_index] = size;
}
// Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan
const std::span<u8> span = runtime.BindMappedUniformBuffer(stage, binding_index, size);
cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size);
@ -948,14 +1050,27 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
}
++uniform_cache_shots[0];
if (!needs_bind && !HasFastUniformBufferBound(stage, binding_index)) {
// Skip binding if it's not needed and if the bound buffer is not the fast version
// This exists to avoid instances where the fast buffer is bound and a GPU write happens
// Skip binding if it's not needed and if the bound buffer is not the fast version
// This exists to avoid instances where the fast buffer is bound and a GPU write happens
needs_bind |= HasFastUniformBufferBound(stage, binding_index);
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
needs_bind |= uniform_buffer_binding_sizes[stage][binding_index] != size;
}
if (!needs_bind) {
return;
}
fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
const u32 offset = buffer.Offset(cpu_addr);
if constexpr (IS_OPENGL) {
// Fast buffer will be unbound
fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
// Mark the index as dirty if offset doesn't match
const bool is_copy_bind = offset != 0 && !runtime.SupportsNonZeroUniformOffset();
dirty_uniform_buffers[stage] |= (is_copy_bind ? 1U : 0U) << index;
}
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
uniform_buffer_binding_sizes[stage][binding_index] = size;
}
if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
runtime.BindUniformBuffer(stage, binding_index, buffer, offset, size);
} else {
@ -984,6 +1099,28 @@ void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
});
}
template <class P>
void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
ForEachEnabledBit(enabled_texture_buffers[stage], [&](u32 index) {
const TextureBufferBinding& binding = texture_buffers[stage][index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
const PixelFormat format = binding.format;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
if (((image_texture_buffers[stage] >> index) & 1) != 0) {
runtime.BindImageBuffer(buffer, offset, size, format);
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
}
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
}
});
}
template <class P>
void BufferCache<P>::BindHostTransformFeedbackBuffers() {
if (maxwell3d.regs.tfb_enabled == 0) {
@ -1006,13 +1143,14 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
// Mark all uniform buffers as dirty
dirty_uniform_buffers.fill(~u32{0});
fast_bound_uniform_buffers.fill(0);
}
u32 binding_index = 0;
ForEachEnabledBit(enabled_compute_uniform_buffers, [&](u32 index) {
ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
const Binding& binding = compute_uniform_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer);
const u32 size = binding.size;
const u32 size = std::min(binding.size, (*compute_uniform_buffer_sizes)[index]);
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
@ -1046,6 +1184,28 @@ void BufferCache<P>::BindHostComputeStorageBuffers() {
});
}
template <class P>
void BufferCache<P>::BindHostComputeTextureBuffers() {
ForEachEnabledBit(enabled_compute_texture_buffers, [&](u32 index) {
const TextureBufferBinding& binding = compute_texture_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
const PixelFormat format = binding.format;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
if (((image_compute_texture_buffers >> index) & 1) != 0) {
runtime.BindImageBuffer(buffer, offset, size, format);
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
}
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
}
});
}
template <class P>
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
if (is_indexed) {
@ -1056,6 +1216,7 @@ void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
UpdateUniformBuffers(stage);
UpdateStorageBuffers(stage);
UpdateTextureBuffers(stage);
}
}
@ -1063,6 +1224,7 @@ template <class P>
void BufferCache<P>::DoUpdateComputeBuffers() {
UpdateComputeUniformBuffers();
UpdateComputeStorageBuffers();
UpdateComputeTextureBuffers();
}
template <class P>
@ -1132,7 +1294,7 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
template <class P>
void BufferCache<P>::UpdateUniformBuffers(size_t stage) {
ForEachEnabledBit(enabled_uniform_buffers[stage], [&](u32 index) {
ForEachEnabledBit(enabled_uniform_buffer_masks[stage], [&](u32 index) {
Binding& binding = uniform_buffers[stage][index];
if (binding.buffer_id) {
// Already updated
@ -1162,6 +1324,18 @@ void BufferCache<P>::UpdateStorageBuffers(size_t stage) {
});
}
template <class P>
void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
ForEachEnabledBit(enabled_texture_buffers[stage], [&](u32 index) {
Binding& binding = texture_buffers[stage][index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark buffer as written if needed
if (((written_texture_buffers[stage] >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
}
template <class P>
void BufferCache<P>::UpdateTransformFeedbackBuffers() {
if (maxwell3d.regs.tfb_enabled == 0) {
@ -1193,7 +1367,7 @@ void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
template <class P>
void BufferCache<P>::UpdateComputeUniformBuffers() {
ForEachEnabledBit(enabled_compute_uniform_buffers, [&](u32 index) {
ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
Binding& binding = compute_uniform_buffers[index];
binding = NULL_BINDING;
const auto& launch_desc = kepler_compute.launch_description;
@ -1214,11 +1388,22 @@ void BufferCache<P>::UpdateComputeStorageBuffers() {
ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) {
// Resolve buffer
Binding& binding = compute_storage_buffers[index];
const BufferId buffer_id = FindBuffer(binding.cpu_addr, binding.size);
binding.buffer_id = buffer_id;
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark as written if needed
if (((written_compute_storage_buffers >> index) & 1) != 0) {
MarkWrittenBuffer(buffer_id, binding.cpu_addr, binding.size);
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
}
template <class P>
void BufferCache<P>::UpdateComputeTextureBuffers() {
ForEachEnabledBit(enabled_compute_texture_buffers, [&](u32 index) {
Binding& binding = compute_texture_buffers[index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark as written if needed
if (((written_compute_texture_buffers >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
}
@ -1551,6 +1736,7 @@ template <class P>
void BufferCache<P>::NotifyBufferDeletion() {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
dirty_uniform_buffers.fill(~u32{0});
uniform_buffer_binding_sizes.fill({});
}
auto& flags = maxwell3d.dirty.flags;
flags[Dirty::IndexBuffer] = true;
@ -1577,6 +1763,25 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
return binding;
}
template <class P>
typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
GPUVAddr gpu_addr, u32 size, PixelFormat format) {
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
TextureBufferBinding binding;
if (!cpu_addr || size == 0) {
binding.cpu_addr = 0;
binding.size = 0;
binding.buffer_id = NULL_BUFFER_ID;
binding.format = PixelFormat::Invalid;
} else {
binding.cpu_addr = *cpu_addr;
binding.size = size;
binding.buffer_id = BufferId{};
binding.format = format;
}
return binding;
}
template <class P>
std::span<const u8> BufferCache<P>::ImmediateBufferWithData(VAddr cpu_addr, size_t size) {
u8* const base_pointer = cpu_memory.GetPointer(cpu_addr);

View File

@ -58,6 +58,11 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) {
FillBlock(table, OFF(zeta), NUM(zeta), flag);
}
}
void SetupDirtyShaders(Maxwell3D::DirtyState::Tables& tables) {
FillBlock(tables[0], OFF(shader_config[0]),
NUM(shader_config[0]) * Maxwell3D::Regs::MaxShaderProgram, Shaders);
}
} // Anonymous namespace
void SetupDirtyFlags(Maxwell3D::DirtyState::Tables& tables) {
@ -65,6 +70,7 @@ void SetupDirtyFlags(Maxwell3D::DirtyState::Tables& tables) {
SetupIndexBuffer(tables);
SetupDirtyDescriptors(tables);
SetupDirtyRenderTargets(tables);
SetupDirtyShaders(tables);
}
} // namespace VideoCommon::Dirty

View File

@ -36,6 +36,8 @@ enum : u8 {
IndexBuffer,
Shaders,
LastCommonEntry,
};

View File

@ -1,103 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <type_traits>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/engines/shader_bytecode.h"
#include "video_core/engines/shader_type.h"
#include "video_core/guest_driver.h"
#include "video_core/textures/texture.h"
namespace Tegra::Engines {
struct SamplerDescriptor {
union {
u32 raw = 0;
BitField<0, 2, Tegra::Shader::TextureType> texture_type;
BitField<2, 3, Tegra::Texture::ComponentType> r_type;
BitField<5, 1, u32> is_array;
BitField<6, 1, u32> is_buffer;
BitField<7, 1, u32> is_shadow;
BitField<8, 3, Tegra::Texture::ComponentType> g_type;
BitField<11, 3, Tegra::Texture::ComponentType> b_type;
BitField<14, 3, Tegra::Texture::ComponentType> a_type;
BitField<17, 7, Tegra::Texture::TextureFormat> format;
};
bool operator==(const SamplerDescriptor& rhs) const noexcept {
return raw == rhs.raw;
}
bool operator!=(const SamplerDescriptor& rhs) const noexcept {
return !operator==(rhs);
}
static SamplerDescriptor FromTIC(const Tegra::Texture::TICEntry& tic) {
using Tegra::Shader::TextureType;
SamplerDescriptor result;
result.format.Assign(tic.format.Value());
result.r_type.Assign(tic.r_type.Value());
result.g_type.Assign(tic.g_type.Value());
result.b_type.Assign(tic.b_type.Value());
result.a_type.Assign(tic.a_type.Value());
switch (tic.texture_type.Value()) {
case Tegra::Texture::TextureType::Texture1D:
result.texture_type.Assign(TextureType::Texture1D);
return result;
case Tegra::Texture::TextureType::Texture2D:
result.texture_type.Assign(TextureType::Texture2D);
return result;
case Tegra::Texture::TextureType::Texture3D:
result.texture_type.Assign(TextureType::Texture3D);
return result;
case Tegra::Texture::TextureType::TextureCubemap:
result.texture_type.Assign(TextureType::TextureCube);
return result;
case Tegra::Texture::TextureType::Texture1DArray:
result.texture_type.Assign(TextureType::Texture1D);
result.is_array.Assign(1);
return result;
case Tegra::Texture::TextureType::Texture2DArray:
result.texture_type.Assign(TextureType::Texture2D);
result.is_array.Assign(1);
return result;
case Tegra::Texture::TextureType::Texture1DBuffer:
result.texture_type.Assign(TextureType::Texture1D);
result.is_buffer.Assign(1);
return result;
case Tegra::Texture::TextureType::Texture2DNoMipmap:
result.texture_type.Assign(TextureType::Texture2D);
return result;
case Tegra::Texture::TextureType::TextureCubeArray:
result.texture_type.Assign(TextureType::TextureCube);
result.is_array.Assign(1);
return result;
default:
result.texture_type.Assign(TextureType::Texture2D);
return result;
}
}
};
static_assert(std::is_trivially_copyable_v<SamplerDescriptor>);
class ConstBufferEngineInterface {
public:
virtual ~ConstBufferEngineInterface() = default;
virtual u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const = 0;
virtual SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const = 0;
virtual SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer,
u64 offset) const = 0;
virtual SamplerDescriptor AccessSampler(u32 handle) const = 0;
virtual u32 GetBoundBuffer() const = 0;
virtual VideoCore::GuestDriverProfile& AccessGuestDriverProfile() = 0;
virtual const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const = 0;
};
} // namespace Tegra::Engines

View File

@ -8,7 +8,6 @@
#include "core/core.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/shader_type.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_base.h"
@ -57,53 +56,11 @@ void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amoun
}
}
u32 KeplerCompute::AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const {
ASSERT(stage == ShaderType::Compute);
const auto& buffer = launch_description.const_buffer_config[const_buffer];
u32 result;
std::memcpy(&result, memory_manager.GetPointer(buffer.Address() + offset), sizeof(u32));
return result;
}
SamplerDescriptor KeplerCompute::AccessBoundSampler(ShaderType stage, u64 offset) const {
return AccessBindlessSampler(stage, regs.tex_cb_index, offset * sizeof(Texture::TextureHandle));
}
SamplerDescriptor KeplerCompute::AccessBindlessSampler(ShaderType stage, u64 const_buffer,
u64 offset) const {
ASSERT(stage == ShaderType::Compute);
const auto& tex_info_buffer = launch_description.const_buffer_config[const_buffer];
const GPUVAddr tex_info_address = tex_info_buffer.Address() + offset;
return AccessSampler(memory_manager.Read<u32>(tex_info_address));
}
SamplerDescriptor KeplerCompute::AccessSampler(u32 handle) const {
const Texture::TextureHandle tex_handle{handle};
const Texture::TICEntry tic = GetTICEntry(tex_handle.tic_id);
const Texture::TSCEntry tsc = GetTSCEntry(tex_handle.tsc_id);
SamplerDescriptor result = SamplerDescriptor::FromTIC(tic);
result.is_shadow.Assign(tsc.depth_compare_enabled.Value());
return result;
}
VideoCore::GuestDriverProfile& KeplerCompute::AccessGuestDriverProfile() {
return rasterizer->AccessGuestDriverProfile();
}
const VideoCore::GuestDriverProfile& KeplerCompute::AccessGuestDriverProfile() const {
return rasterizer->AccessGuestDriverProfile();
}
void KeplerCompute::ProcessLaunch() {
const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address();
memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description,
LaunchParams::NUM_LAUNCH_PARAMETERS * sizeof(u32));
const GPUVAddr code_addr = regs.code_loc.Address() + launch_description.program_start;
LOG_TRACE(HW_GPU, "Compute invocation launched at address 0x{:016x}", code_addr);
rasterizer->DispatchCompute(code_addr);
rasterizer->DispatchCompute();
}
Texture::TICEntry KeplerCompute::GetTICEntry(u32 tic_index) const {

View File

@ -10,10 +10,8 @@
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/engines/const_buffer_engine_interface.h"
#include "video_core/engines/engine_interface.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/engines/shader_type.h"
#include "video_core/gpu.h"
#include "video_core/textures/texture.h"
@ -40,7 +38,7 @@ namespace Tegra::Engines {
#define KEPLER_COMPUTE_REG_INDEX(field_name) \
(offsetof(Tegra::Engines::KeplerCompute::Regs, field_name) / sizeof(u32))
class KeplerCompute final : public ConstBufferEngineInterface, public EngineInterface {
class KeplerCompute final : public EngineInterface {
public:
explicit KeplerCompute(Core::System& system, MemoryManager& memory_manager);
~KeplerCompute();
@ -209,23 +207,6 @@ public:
void CallMultiMethod(u32 method, const u32* base_start, u32 amount,
u32 methods_pending) override;
u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const override;
SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const override;
SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer,
u64 offset) const override;
SamplerDescriptor AccessSampler(u32 handle) const override;
u32 GetBoundBuffer() const override {
return regs.tex_cb_index;
}
VideoCore::GuestDriverProfile& AccessGuestDriverProfile() override;
const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override;
private:
void ProcessLaunch();

View File

@ -8,7 +8,6 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/shader_type.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
@ -670,42 +669,4 @@ void Maxwell3D::ProcessClearBuffers() {
rasterizer->Clear();
}
u32 Maxwell3D::AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const {
ASSERT(stage != ShaderType::Compute);
const auto& shader_stage = state.shader_stages[static_cast<std::size_t>(stage)];
const auto& buffer = shader_stage.const_buffers[const_buffer];
return memory_manager.Read<u32>(buffer.address + offset);
}
SamplerDescriptor Maxwell3D::AccessBoundSampler(ShaderType stage, u64 offset) const {
return AccessBindlessSampler(stage, regs.tex_cb_index, offset * sizeof(Texture::TextureHandle));
}
SamplerDescriptor Maxwell3D::AccessBindlessSampler(ShaderType stage, u64 const_buffer,
u64 offset) const {
ASSERT(stage != ShaderType::Compute);
const auto& shader = state.shader_stages[static_cast<std::size_t>(stage)];
const auto& tex_info_buffer = shader.const_buffers[const_buffer];
const GPUVAddr tex_info_address = tex_info_buffer.address + offset;
return AccessSampler(memory_manager.Read<u32>(tex_info_address));
}
SamplerDescriptor Maxwell3D::AccessSampler(u32 handle) const {
const Texture::TextureHandle tex_handle{handle};
const Texture::TICEntry tic = GetTICEntry(tex_handle.tic_id);
const Texture::TSCEntry tsc = GetTSCEntry(tex_handle.tsc_id);
SamplerDescriptor result = SamplerDescriptor::FromTIC(tic);
result.is_shadow.Assign(tsc.depth_compare_enabled.Value());
return result;
}
VideoCore::GuestDriverProfile& Maxwell3D::AccessGuestDriverProfile() {
return rasterizer->AccessGuestDriverProfile();
}
const VideoCore::GuestDriverProfile& Maxwell3D::AccessGuestDriverProfile() const {
return rasterizer->AccessGuestDriverProfile();
}
} // namespace Tegra::Engines

View File

@ -17,11 +17,9 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/math_util.h"
#include "video_core/engines/const_buffer_engine_interface.h"
#include "video_core/engines/const_buffer_info.h"
#include "video_core/engines/engine_interface.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/engines/shader_type.h"
#include "video_core/gpu.h"
#include "video_core/macro/macro.h"
#include "video_core/textures/texture.h"
@ -49,7 +47,7 @@ namespace Tegra::Engines {
#define MAXWELL3D_REG_INDEX(field_name) \
(offsetof(Tegra::Engines::Maxwell3D::Regs, field_name) / sizeof(u32))
class Maxwell3D final : public ConstBufferEngineInterface, public EngineInterface {
class Maxwell3D final : public EngineInterface {
public:
explicit Maxwell3D(Core::System& system, MemoryManager& memory_manager);
~Maxwell3D();
@ -307,10 +305,6 @@ public:
return (type == Type::SignedNorm) || (type == Type::UnsignedNorm);
}
bool IsConstant() const {
return constant;
}
bool IsValid() const {
return size != Size::Invalid;
}
@ -912,7 +906,11 @@ public:
u32 fill_rectangle;
INSERT_PADDING_WORDS_NOINIT(0x8);
INSERT_PADDING_WORDS_NOINIT(0x2);
u32 conservative_raster_enable;
INSERT_PADDING_WORDS_NOINIT(0x5);
std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format;
@ -959,7 +957,11 @@ public:
SamplerIndex sampler_index;
INSERT_PADDING_WORDS_NOINIT(0x25);
INSERT_PADDING_WORDS_NOINIT(0x2);
std::array<u32, 8> gp_passthrough_mask;
INSERT_PADDING_WORDS_NOINIT(0x1B);
u32 depth_test_enable;
@ -1152,7 +1154,11 @@ public:
u32 index;
} primitive_restart;
INSERT_PADDING_WORDS_NOINIT(0x5F);
INSERT_PADDING_WORDS_NOINIT(0xE);
u32 provoking_vertex_last;
INSERT_PADDING_WORDS_NOINIT(0x50);
struct {
u32 start_addr_high;
@ -1424,23 +1430,6 @@ public:
void FlushMMEInlineDraw();
u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const override;
SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const override;
SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer,
u64 offset) const override;
SamplerDescriptor AccessSampler(u32 handle) const override;
u32 GetBoundBuffer() const override {
return regs.tex_cb_index;
}
VideoCore::GuestDriverProfile& AccessGuestDriverProfile() override;
const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override;
bool ShouldExecute() const {
return execute_on;
}
@ -1630,6 +1619,7 @@ ASSERT_REG_POSITION(zeta, 0x3F8);
ASSERT_REG_POSITION(render_area, 0x3FD);
ASSERT_REG_POSITION(clear_flags, 0x43E);
ASSERT_REG_POSITION(fill_rectangle, 0x44F);
ASSERT_REG_POSITION(conservative_raster_enable, 0x452);
ASSERT_REG_POSITION(vertex_attrib_format, 0x458);
ASSERT_REG_POSITION(multisample_sample_locations, 0x478);
ASSERT_REG_POSITION(multisample_coverage_to_color, 0x47E);
@ -1638,6 +1628,7 @@ ASSERT_REG_POSITION(zeta_width, 0x48a);
ASSERT_REG_POSITION(zeta_height, 0x48b);
ASSERT_REG_POSITION(zeta_depth, 0x48c);
ASSERT_REG_POSITION(sampler_index, 0x48D);
ASSERT_REG_POSITION(gp_passthrough_mask, 0x490);
ASSERT_REG_POSITION(depth_test_enable, 0x4B3);
ASSERT_REG_POSITION(independent_blend_enable, 0x4B9);
ASSERT_REG_POSITION(depth_write_enabled, 0x4BA);
@ -1690,6 +1681,7 @@ ASSERT_REG_POSITION(point_coord_replace, 0x581);
ASSERT_REG_POSITION(code_address, 0x582);
ASSERT_REG_POSITION(draw, 0x585);
ASSERT_REG_POSITION(primitive_restart, 0x591);
ASSERT_REG_POSITION(provoking_vertex_last, 0x5A1);
ASSERT_REG_POSITION(index_array, 0x5F2);
ASSERT_REG_POSITION(polygon_offset_clamp, 0x61F);
ASSERT_REG_POSITION(instanced_arrays, 0x620);

View File

@ -127,7 +127,8 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
// Optimized path for micro copies.
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X) {
if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
regs.src_params.height > GOB_SIZE_Y) {
FastCopyBlockLinearToPitch();
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,158 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <optional>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Tegra::Shader {
enum class OutputTopology : u32 {
PointList = 1,
LineStrip = 6,
TriangleStrip = 7,
};
enum class PixelImap : u8 {
Unused = 0,
Constant = 1,
Perspective = 2,
ScreenLinear = 3,
};
// Documentation in:
// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html
struct Header {
union {
BitField<0, 5, u32> sph_type;
BitField<5, 5, u32> version;
BitField<10, 4, u32> shader_type;
BitField<14, 1, u32> mrt_enable;
BitField<15, 1, u32> kills_pixels;
BitField<16, 1, u32> does_global_store;
BitField<17, 4, u32> sass_version;
BitField<21, 5, u32> reserved;
BitField<26, 1, u32> does_load_or_store;
BitField<27, 1, u32> does_fp64;
BitField<28, 4, u32> stream_out_mask;
} common0;
union {
BitField<0, 24, u32> shader_local_memory_low_size;
BitField<24, 8, u32> per_patch_attribute_count;
} common1;
union {
BitField<0, 24, u32> shader_local_memory_high_size;
BitField<24, 8, u32> threads_per_input_primitive;
} common2;
union {
BitField<0, 24, u32> shader_local_memory_crs_size;
BitField<24, 4, OutputTopology> output_topology;
BitField<28, 4, u32> reserved;
} common3;
union {
BitField<0, 12, u32> max_output_vertices;
BitField<12, 8, u32> store_req_start; // NOTE: not used by geometry shaders.
BitField<20, 4, u32> reserved;
BitField<24, 8, u32> store_req_end; // NOTE: not used by geometry shaders.
} common4;
union {
struct {
INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
INSERT_PADDING_BYTES_NOINIT(16); // ImapGenericVector[32]
INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
union {
BitField<0, 8, u16> clip_distances;
BitField<8, 1, u16> point_sprite_s;
BitField<9, 1, u16> point_sprite_t;
BitField<10, 1, u16> fog_coordinate;
BitField<12, 1, u16> tessellation_eval_point_u;
BitField<13, 1, u16> tessellation_eval_point_v;
BitField<14, 1, u16> instance_id;
BitField<15, 1, u16> vertex_id;
};
INSERT_PADDING_BYTES_NOINIT(5); // ImapFixedFncTexture[10]
INSERT_PADDING_BYTES_NOINIT(1); // ImapReserved
INSERT_PADDING_BYTES_NOINIT(3); // OmapSystemValuesA
INSERT_PADDING_BYTES_NOINIT(1); // OmapSystemValuesB
INSERT_PADDING_BYTES_NOINIT(16); // OmapGenericVector[32]
INSERT_PADDING_BYTES_NOINIT(2); // OmapColor
INSERT_PADDING_BYTES_NOINIT(2); // OmapSystemValuesC
INSERT_PADDING_BYTES_NOINIT(5); // OmapFixedFncTexture[10]
INSERT_PADDING_BYTES_NOINIT(1); // OmapReserved
} vtg;
struct {
INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
union {
BitField<0, 2, PixelImap> x;
BitField<2, 2, PixelImap> y;
BitField<4, 2, PixelImap> z;
BitField<6, 2, PixelImap> w;
u8 raw;
} imap_generic_vector[32];
INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
INSERT_PADDING_BYTES_NOINIT(2); // ImapSystemValuesC
INSERT_PADDING_BYTES_NOINIT(10); // ImapFixedFncTexture[10]
INSERT_PADDING_BYTES_NOINIT(2); // ImapReserved
struct {
u32 target;
union {
BitField<0, 1, u32> sample_mask;
BitField<1, 1, u32> depth;
BitField<2, 30, u32> reserved;
};
} omap;
bool IsColorComponentOutputEnabled(u32 render_target, u32 component) const {
const u32 bit = render_target * 4 + component;
return omap.target & (1 << bit);
}
PixelImap GetPixelImap(u32 attribute) const {
const auto get_index = [this, attribute](u32 index) {
return static_cast<PixelImap>(
(imap_generic_vector[attribute].raw >> (index * 2)) & 3);
};
std::optional<PixelImap> result;
for (u32 component = 0; component < 4; ++component) {
const PixelImap index = get_index(component);
if (index == PixelImap::Unused) {
continue;
}
if (result && result != index) {
LOG_CRITICAL(HW_GPU, "Generic attribute conflict in interpolation mode");
}
result = index;
}
return result.value_or(PixelImap::Unused);
}
} ps;
std::array<u32, 0xF> raw;
};
u64 GetLocalMemorySize() const {
return (common1.shader_local_memory_low_size |
(common2.shader_local_memory_high_size << 24));
}
};
static_assert(sizeof(Header) == 0x50, "Incorrect structure size");
} // namespace Tegra::Shader

View File

@ -1,21 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
namespace Tegra::Engines {
enum class ShaderType : u32 {
Vertex = 0,
TesselationControl = 1,
TesselationEval = 2,
Geometry = 3,
Fragment = 4,
Compute = 5,
};
static constexpr std::size_t MaxShaderTypes = 6;
} // namespace Tegra::Engines

View File

@ -1,37 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <limits>
#include <vector>
#include "common/common_types.h"
#include "video_core/guest_driver.h"
namespace VideoCore {
void GuestDriverProfile::DeduceTextureHandlerSize(std::vector<u32> bound_offsets) {
if (texture_handler_size) {
return;
}
const std::size_t size = bound_offsets.size();
if (size < 2) {
return;
}
std::sort(bound_offsets.begin(), bound_offsets.end(), std::less{});
u32 min_val = std::numeric_limits<u32>::max();
for (std::size_t i = 1; i < size; ++i) {
if (bound_offsets[i] == bound_offsets[i - 1]) {
continue;
}
const u32 new_min = bound_offsets[i] - bound_offsets[i - 1];
min_val = std::min(min_val, new_min);
}
if (min_val > 2) {
return;
}
texture_handler_size = min_texture_handler_size * min_val;
}
} // namespace VideoCore

View File

@ -1,46 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <optional>
#include <vector>
#include "common/common_types.h"
namespace VideoCore {
/**
* The GuestDriverProfile class is used to learn about the GPU drivers behavior and collect
* information necessary for impossible to avoid HLE methods like shader tracks as they are
* Entscheidungsproblems.
*/
class GuestDriverProfile {
public:
explicit GuestDriverProfile() = default;
explicit GuestDriverProfile(std::optional<u32> texture_handler_size_)
: texture_handler_size{texture_handler_size_} {}
void DeduceTextureHandlerSize(std::vector<u32> bound_offsets);
u32 GetTextureHandlerSize() const {
return texture_handler_size.value_or(default_texture_handler_size);
}
bool IsTextureHandlerSizeKnown() const {
return texture_handler_size.has_value();
}
private:
// Minimum size of texture handler any driver can use.
static constexpr u32 min_texture_handler_size = 4;
// This goes with Vulkan and OpenGL standards but Nvidia GPUs can easily use 4 bytes instead.
// Thus, certain drivers may squish the size.
static constexpr u32 default_texture_handler_size = 8;
std::optional<u32> texture_handler_size = default_texture_handler_size;
};
} // namespace VideoCore

View File

@ -69,7 +69,6 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
} else {
UNREACHABLE_MSG("Unmapping non-existent GPU address=0x{:x}", gpu_addr);
}
const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
for (const auto& map : submapped_ranges) {

View File

@ -11,7 +11,6 @@
#include "common/common_types.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/gpu.h"
#include "video_core/guest_driver.h"
namespace Tegra {
class MemoryManager;
@ -45,7 +44,7 @@ public:
virtual void Clear() = 0;
/// Dispatches a compute shader invocation
virtual void DispatchCompute(GPUVAddr code_addr) = 0;
virtual void DispatchCompute() = 0;
/// Resets the counter of a query
virtual void ResetCounter(QueryType type) = 0;
@ -136,18 +135,5 @@ public:
/// Initialize disk cached resources for the game being emulated
virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const DiskResourceLoadCallback& callback) {}
/// Grant access to the Guest Driver Profile for recording/obtaining info on the guest driver.
[[nodiscard]] GuestDriverProfile& AccessGuestDriverProfile() {
return guest_driver_profile;
}
/// Grant access to the Guest Driver Profile for recording/obtaining info on the guest driver.
[[nodiscard]] const GuestDriverProfile& AccessGuestDriverProfile() const {
return guest_driver_profile;
}
private:
GuestDriverProfile guest_driver_profile{};
};
} // namespace VideoCore

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <string_view>
#include "common/common_types.h"
namespace Tegra::Engines {
enum class ShaderType : u32;
}
namespace VideoCommon::Shader {
class ShaderIR;
class Registry;
} // namespace VideoCommon::Shader
namespace OpenGL {
class Device;
std::string DecompileAssemblyShader(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
const VideoCommon::Shader::Registry& registry,
Tegra::Engines::ShaderType stage, std::string_view identifier);
} // namespace OpenGL

View File

@ -2,14 +2,18 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <span>
#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/maxwell_to_gl.h"
namespace OpenGL {
namespace {
using VideoCore::Surface::PixelFormat;
struct BindlessSSBO {
GLuint64EXT address;
GLsizei length;
@ -21,6 +25,25 @@ constexpr std::array PROGRAM_LUT{
GL_VERTEX_PROGRAM_NV, GL_TESS_CONTROL_PROGRAM_NV, GL_TESS_EVALUATION_PROGRAM_NV,
GL_GEOMETRY_PROGRAM_NV, GL_FRAGMENT_PROGRAM_NV,
};
[[nodiscard]] GLenum GetTextureBufferFormat(GLenum gl_format) {
switch (gl_format) {
case GL_RGBA8_SNORM:
return GL_RGBA8;
case GL_R8_SNORM:
return GL_R8;
case GL_RGBA16_SNORM:
return GL_RGBA16;
case GL_R16_SNORM:
return GL_R16;
case GL_RG16_SNORM:
return GL_RG16;
case GL_RG8_SNORM:
return GL_RG8;
default:
return gl_format;
}
}
} // Anonymous namespace
Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
@ -62,6 +85,30 @@ void Buffer::MakeResident(GLenum access) noexcept {
glMakeNamedBufferResidentNV(buffer.handle, access);
}
GLuint Buffer::View(u32 offset, u32 size, PixelFormat format) {
const auto it{std::ranges::find_if(views, [offset, size, format](const BufferView& view) {
return offset == view.offset && size == view.size && format == view.format;
})};
if (it != views.end()) {
return it->texture.handle;
}
OGLTexture texture;
texture.Create(GL_TEXTURE_BUFFER);
const GLenum gl_format{MaxwellToGL::GetFormatTuple(format).internal_format};
const GLenum texture_format{GetTextureBufferFormat(gl_format)};
if (texture_format != gl_format) {
LOG_WARNING(Render_OpenGL, "Emulating SNORM texture buffer with UNORM.");
}
glTextureBufferRange(texture.handle, texture_format, buffer.handle, offset, size);
views.push_back({
.offset = offset,
.size = size,
.format = format,
.texture = std::move(texture),
});
return views.back().texture.handle;
}
BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
: device{device_}, has_fast_buffer_sub_data{device.HasFastBufferSubData()},
use_assembly_shaders{device.UseAssemblyShaders()},
@ -144,7 +191,7 @@ void BufferCacheRuntime::BindUniformBuffer(size_t stage, u32 binding_index, Buff
glBindBufferRangeNV(PABO_LUT[stage], binding_index, handle, 0,
static_cast<GLsizeiptr>(size));
} else {
const GLuint base_binding = device.GetBaseBindings(stage).uniform_buffer;
const GLuint base_binding = graphics_base_uniform_bindings[stage];
const GLuint binding = base_binding + binding_index;
glBindBufferRange(GL_UNIFORM_BUFFER, binding, buffer.Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
@ -171,7 +218,12 @@ void BufferCacheRuntime::BindComputeUniformBuffer(u32 binding_index, Buffer& buf
void BufferCacheRuntime::BindStorageBuffer(size_t stage, u32 binding_index, Buffer& buffer,
u32 offset, u32 size, bool is_written) {
if (use_assembly_shaders) {
if (use_storage_buffers) {
const GLuint base_binding = graphics_base_storage_bindings[stage];
const GLuint binding = base_binding + binding_index;
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, buffer.Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
} else {
const BindlessSSBO ssbo{
.address = buffer.HostGpuAddr() + offset,
.length = static_cast<GLsizei>(size),
@ -180,17 +232,19 @@ void BufferCacheRuntime::BindStorageBuffer(size_t stage, u32 binding_index, Buff
buffer.MakeResident(is_written ? GL_READ_WRITE : GL_READ_ONLY);
glProgramLocalParametersI4uivNV(PROGRAM_LUT[stage], binding_index, 1,
reinterpret_cast<const GLuint*>(&ssbo));
} else {
const GLuint base_binding = device.GetBaseBindings(stage).shader_storage_buffer;
const GLuint binding = base_binding + binding_index;
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, buffer.Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
}
}
void BufferCacheRuntime::BindComputeStorageBuffer(u32 binding_index, Buffer& buffer, u32 offset,
u32 size, bool is_written) {
if (use_assembly_shaders) {
if (use_storage_buffers) {
if (size != 0) {
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding_index, buffer.Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
} else {
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding_index, 0, 0, 0);
}
} else {
const BindlessSSBO ssbo{
.address = buffer.HostGpuAddr() + offset,
.length = static_cast<GLsizei>(size),
@ -199,11 +253,6 @@ void BufferCacheRuntime::BindComputeStorageBuffer(u32 binding_index, Buffer& buf
buffer.MakeResident(is_written ? GL_READ_WRITE : GL_READ_ONLY);
glProgramLocalParametersI4uivNV(GL_COMPUTE_PROGRAM_NV, binding_index, 1,
reinterpret_cast<const GLuint*>(&ssbo));
} else if (size == 0) {
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding_index, 0, 0, 0);
} else {
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding_index, buffer.Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
}
}
@ -213,4 +262,13 @@ void BufferCacheRuntime::BindTransformFeedbackBuffer(u32 index, Buffer& buffer,
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
}
void BufferCacheRuntime::BindTextureBuffer(Buffer& buffer, u32 offset, u32 size,
PixelFormat format) {
*texture_handles++ = buffer.View(offset, size, format);
}
void BufferCacheRuntime::BindImageBuffer(Buffer& buffer, u32 offset, u32 size, PixelFormat format) {
*image_handles++ = buffer.View(offset, size, format);
}
} // namespace OpenGL

View File

@ -32,6 +32,8 @@ public:
void MakeResident(GLenum access) noexcept;
[[nodiscard]] GLuint View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format);
[[nodiscard]] GLuint64EXT HostGpuAddr() const noexcept {
return address;
}
@ -41,9 +43,17 @@ public:
}
private:
struct BufferView {
u32 offset;
u32 size;
VideoCore::Surface::PixelFormat format;
OGLTexture texture;
};
GLuint64EXT address = 0;
OGLBuffer buffer;
GLenum current_residency_access = GL_NONE;
std::vector<BufferView> views;
};
class BufferCacheRuntime {
@ -75,17 +85,21 @@ public:
void BindTransformFeedbackBuffer(u32 index, Buffer& buffer, u32 offset, u32 size);
void BindTextureBuffer(Buffer& buffer, u32 offset, u32 size,
VideoCore::Surface::PixelFormat format);
void BindImageBuffer(Buffer& buffer, u32 offset, u32 size,
VideoCore::Surface::PixelFormat format);
void BindFastUniformBuffer(size_t stage, u32 binding_index, u32 size) {
const GLuint handle = fast_uniforms[stage][binding_index].handle;
const GLsizeiptr gl_size = static_cast<GLsizeiptr>(size);
if (use_assembly_shaders) {
const GLuint handle = fast_uniforms[stage][binding_index].handle;
const GLsizeiptr gl_size = static_cast<GLsizeiptr>(size);
glBindBufferRangeNV(PABO_LUT[stage], binding_index, handle, 0, gl_size);
} else {
const GLuint base_binding = device.GetBaseBindings(stage).uniform_buffer;
const GLuint base_binding = graphics_base_uniform_bindings[stage];
const GLuint binding = base_binding + binding_index;
glBindBufferRange(GL_UNIFORM_BUFFER, binding,
fast_uniforms[stage][binding_index].handle, 0,
static_cast<GLsizeiptr>(size));
glBindBufferRange(GL_UNIFORM_BUFFER, binding, handle, 0, gl_size);
}
}
@ -103,7 +117,7 @@ public:
std::span<u8> BindMappedUniformBuffer(size_t stage, u32 binding_index, u32 size) noexcept {
const auto [mapped_span, offset] = stream_buffer->Request(static_cast<size_t>(size));
const GLuint base_binding = device.GetBaseBindings(stage).uniform_buffer;
const GLuint base_binding = graphics_base_uniform_bindings[stage];
const GLuint binding = base_binding + binding_index;
glBindBufferRange(GL_UNIFORM_BUFFER, binding, stream_buffer->Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
@ -118,6 +132,27 @@ public:
return has_fast_buffer_sub_data;
}
[[nodiscard]] bool SupportsNonZeroUniformOffset() const noexcept {
return !use_assembly_shaders;
}
void SetBaseUniformBindings(const std::array<GLuint, 5>& bindings) {
graphics_base_uniform_bindings = bindings;
}
void SetBaseStorageBindings(const std::array<GLuint, 5>& bindings) {
graphics_base_storage_bindings = bindings;
}
void SetImagePointers(GLuint* texture_handles_, GLuint* image_handles_) {
texture_handles = texture_handles_;
image_handles = image_handles_;
}
void SetEnableStorageBuffers(bool use_storage_buffers_) {
use_storage_buffers = use_storage_buffers_;
}
private:
static constexpr std::array PABO_LUT{
GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV,
@ -131,8 +166,15 @@ private:
bool use_assembly_shaders = false;
bool has_unified_vertex_buffers = false;
bool use_storage_buffers = false;
u32 max_attributes = 0;
std::array<GLuint, 5> graphics_base_uniform_bindings{};
std::array<GLuint, 5> graphics_base_storage_bindings{};
GLuint* texture_handles = nullptr;
GLuint* image_handles = nullptr;
std::optional<StreamBuffer> stream_buffer;
std::array<std::array<OGLBuffer, VideoCommon::NUM_GRAPHICS_UNIFORM_BUFFERS>,
@ -156,6 +198,7 @@ struct BufferCacheParams {
static constexpr bool NEEDS_BIND_UNIFORM_INDEX = true;
static constexpr bool NEEDS_BIND_STORAGE_INDEX = true;
static constexpr bool USE_MEMORY_MAPS = false;
static constexpr bool SEPARATE_IMAGE_BUFFER_BINDINGS = true;
};
using BufferCache = VideoCommon::BufferCache<BufferCacheParams>;

View File

@ -0,0 +1,209 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstring>
#include "common/cityhash.h"
#include "common/settings.h" // for enum class Settings::ShaderBackend
#include "video_core/renderer_opengl/gl_compute_pipeline.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_shader_util.h"
namespace OpenGL {
using Shader::ImageBufferDescriptor;
using Tegra::Texture::TexturePair;
using VideoCommon::ImageId;
constexpr u32 MAX_TEXTURES = 64;
constexpr u32 MAX_IMAGES = 16;
template <typename Range>
u32 AccumulateCount(const Range& range) {
u32 num{};
for (const auto& desc : range) {
num += desc.count;
}
return num;
}
size_t ComputePipelineKey::Hash() const noexcept {
return static_cast<size_t>(
Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this));
}
bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcept {
return std::memcmp(this, &rhs, sizeof *this) == 0;
}
ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_,
BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
Tegra::Engines::KeplerCompute& kepler_compute_,
ProgramManager& program_manager_, const Shader::Info& info_,
std::string code, std::vector<u32> code_v)
: texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_},
kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} {
switch (device.GetShaderBackend()) {
case Settings::ShaderBackend::GLSL:
source_program = CreateProgram(code, GL_COMPUTE_SHADER);
break;
case Settings::ShaderBackend::GLASM:
assembly_program = CompileProgram(code, GL_COMPUTE_PROGRAM_NV);
break;
case Settings::ShaderBackend::SPIRV:
source_program = CreateProgram(code_v, GL_COMPUTE_SHADER);
break;
}
std::copy_n(info.constant_buffer_used_sizes.begin(), uniform_buffer_sizes.size(),
uniform_buffer_sizes.begin());
num_texture_buffers = AccumulateCount(info.texture_buffer_descriptors);
num_image_buffers = AccumulateCount(info.image_buffer_descriptors);
const u32 num_textures{num_texture_buffers + AccumulateCount(info.texture_descriptors)};
ASSERT(num_textures <= MAX_TEXTURES);
const u32 num_images{num_image_buffers + AccumulateCount(info.image_descriptors)};
ASSERT(num_images <= MAX_IMAGES);
const bool is_glasm{assembly_program.handle != 0};
const u32 num_storage_buffers{AccumulateCount(info.storage_buffers_descriptors)};
use_storage_buffers =
!is_glasm || num_storage_buffers < device.GetMaxGLASMStorageBufferBlocks();
writes_global_memory = !use_storage_buffers &&
std::ranges::any_of(info.storage_buffers_descriptors,
[](const auto& desc) { return desc.is_written; });
}
void ComputePipeline::Configure() {
buffer_cache.SetComputeUniformBufferState(info.constant_buffer_mask, &uniform_buffer_sizes);
buffer_cache.UnbindComputeStorageBuffers();
size_t ssbo_index{};
for (const auto& desc : info.storage_buffers_descriptors) {
ASSERT(desc.count == 1);
buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
desc.is_written);
++ssbo_index;
}
texture_cache.SynchronizeComputeDescriptors();
std::array<ImageViewId, MAX_TEXTURES + MAX_IMAGES> image_view_ids;
boost::container::static_vector<u32, MAX_TEXTURES + MAX_IMAGES> image_view_indices;
std::array<GLuint, MAX_TEXTURES> samplers;
std::array<GLuint, MAX_TEXTURES> textures;
std::array<GLuint, MAX_IMAGES> images;
GLsizei sampler_binding{};
GLsizei texture_binding{};
GLsizei image_binding{};
const auto& qmd{kepler_compute.launch_description};
const auto& cbufs{qmd.const_buffer_config};
const bool via_header_index{qmd.linked_tsc != 0};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0);
const u32 index_offset{index << desc.size_shift};
const u32 offset{desc.cbuf_offset + index_offset};
const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + offset};
if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
if (desc.has_secondary) {
ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0);
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
secondary_offset};
const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
return TexturePair(lhs_raw | rhs_raw, via_header_index);
}
}
return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices.push_back(handle.first);
}
}};
for (const auto& desc : info.texture_buffer_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices.push_back(handle.first);
samplers[sampler_binding++] = 0;
}
}
std::ranges::for_each(info.image_buffer_descriptors, add_image);
for (const auto& desc : info.texture_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices.push_back(handle.first);
Sampler* const sampler = texture_cache.GetComputeSampler(handle.second);
samplers[sampler_binding++] = sampler->Handle();
}
}
std::ranges::for_each(info.image_descriptors, add_image);
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
if (assembly_program.handle != 0) {
program_manager.BindComputeAssemblyProgram(assembly_program.handle);
} else {
program_manager.BindComputeProgram(source_program.handle);
}
buffer_cache.UnbindComputeTextureBuffers();
size_t texbuf_index{};
const auto add_buffer{[&](const auto& desc) {
constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
for (u32 i = 0; i < desc.count; ++i) {
bool is_written{false};
if constexpr (is_image) {
is_written = desc.is_written;
}
ImageView& image_view{texture_cache.GetImageView(image_view_ids[texbuf_index])};
buffer_cache.BindComputeTextureBuffer(texbuf_index, image_view.GpuAddr(),
image_view.BufferSize(), image_view.format,
is_written, is_image);
++texbuf_index;
}
}};
std::ranges::for_each(info.texture_buffer_descriptors, add_buffer);
std::ranges::for_each(info.image_buffer_descriptors, add_buffer);
buffer_cache.UpdateComputeBuffers();
buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers);
buffer_cache.runtime.SetImagePointers(textures.data(), images.data());
buffer_cache.BindHostComputeBuffers();
const ImageId* views_it{image_view_ids.data() + num_texture_buffers + num_image_buffers};
texture_binding += num_texture_buffers;
image_binding += num_image_buffers;
for (const auto& desc : info.texture_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
ImageView& image_view{texture_cache.GetImageView(*(views_it++))};
textures[texture_binding++] = image_view.Handle(desc.type);
}
}
for (const auto& desc : info.image_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
ImageView& image_view{texture_cache.GetImageView(*(views_it++))};
if (desc.is_written) {
texture_cache.MarkModification(image_view.image_id);
}
images[image_binding++] = image_view.StorageView(desc.type, desc.format);
}
}
if (texture_binding != 0) {
ASSERT(texture_binding == sampler_binding);
glBindTextures(0, texture_binding, textures.data());
glBindSamplers(0, sampler_binding, samplers.data());
}
if (image_binding != 0) {
glBindImageTextures(0, image_binding, images.data());
}
}
} // namespace OpenGL

View File

@ -0,0 +1,93 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <type_traits>
#include <utility>
#include "common/common_types.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_texture_cache.h"
namespace Tegra {
class MemoryManager;
}
namespace Tegra::Engines {
class KeplerCompute;
}
namespace Shader {
struct Info;
}
namespace OpenGL {
class Device;
class ProgramManager;
struct ComputePipelineKey {
u64 unique_hash;
u32 shared_memory_size;
std::array<u32, 3> workgroup_size;
size_t Hash() const noexcept;
bool operator==(const ComputePipelineKey&) const noexcept;
bool operator!=(const ComputePipelineKey& rhs) const noexcept {
return !operator==(rhs);
}
};
static_assert(std::has_unique_object_representations_v<ComputePipelineKey>);
static_assert(std::is_trivially_copyable_v<ComputePipelineKey>);
static_assert(std::is_trivially_constructible_v<ComputePipelineKey>);
class ComputePipeline {
public:
explicit ComputePipeline(const Device& device, TextureCache& texture_cache_,
BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
Tegra::Engines::KeplerCompute& kepler_compute_,
ProgramManager& program_manager_, const Shader::Info& info_,
std::string code, std::vector<u32> code_v);
void Configure();
[[nodiscard]] bool WritesGlobalMemory() const noexcept {
return writes_global_memory;
}
private:
TextureCache& texture_cache;
BufferCache& buffer_cache;
Tegra::MemoryManager& gpu_memory;
Tegra::Engines::KeplerCompute& kepler_compute;
ProgramManager& program_manager;
Shader::Info info;
OGLProgram source_program;
OGLAssemblyProgram assembly_program;
VideoCommon::ComputeUniformBufferSizes uniform_buffer_sizes{};
u32 num_texture_buffers{};
u32 num_image_buffers{};
bool use_storage_buffers{};
bool writes_global_memory{};
};
} // namespace OpenGL
namespace std {
template <>
struct hash<OpenGL::ComputePipelineKey> {
size_t operator()(const OpenGL::ComputePipelineKey& k) const noexcept {
return k.Hash();
}
};
} // namespace std

View File

@ -17,39 +17,17 @@
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "shader_recompiler/stage.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
namespace OpenGL {
namespace {
// One uniform block is reserved for emulation purposes
constexpr u32 ReservedUniformBlocks = 1;
constexpr u32 NumStages = 5;
constexpr std::array LIMIT_UBOS = {
GL_MAX_VERTEX_UNIFORM_BLOCKS, GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS,
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS, GL_MAX_GEOMETRY_UNIFORM_BLOCKS,
GL_MAX_FRAGMENT_UNIFORM_BLOCKS, GL_MAX_COMPUTE_UNIFORM_BLOCKS,
};
constexpr std::array LIMIT_SSBOS = {
GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS,
GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS, GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS,
GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS,
};
constexpr std::array LIMIT_SAMPLERS = {
GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS,
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS,
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS,
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS,
GL_MAX_TEXTURE_IMAGE_UNITS,
GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS,
};
constexpr std::array LIMIT_IMAGES = {
GL_MAX_VERTEX_IMAGE_UNIFORMS, GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS,
GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS, GL_MAX_GEOMETRY_IMAGE_UNIFORMS,
GL_MAX_FRAGMENT_IMAGE_UNIFORMS, GL_MAX_COMPUTE_IMAGE_UNIFORMS,
};
template <typename T>
T GetInteger(GLenum pname) {
@ -82,81 +60,18 @@ bool HasExtension(std::span<const std::string_view> extensions, std::string_view
return std::ranges::find(extensions, extension) != extensions.end();
}
u32 Extract(u32& base, u32& num, u32 amount, std::optional<GLenum> limit = {}) {
ASSERT(num >= amount);
if (limit) {
amount = std::min(amount, GetInteger<u32>(*limit));
}
num -= amount;
return std::exchange(base, base + amount);
}
std::array<u32, Tegra::Engines::MaxShaderTypes> BuildMaxUniformBuffers() noexcept {
std::array<u32, Tegra::Engines::MaxShaderTypes> max;
std::ranges::transform(LIMIT_UBOS, max.begin(),
[](GLenum pname) { return GetInteger<u32>(pname); });
std::array<u32, Shader::MaxStageTypes> BuildMaxUniformBuffers() noexcept {
std::array<u32, Shader::MaxStageTypes> max;
std::ranges::transform(LIMIT_UBOS, max.begin(), &GetInteger<u32>);
return max;
}
std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept {
std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings;
static constexpr std::array<std::size_t, 5> stage_swizzle{0, 1, 2, 3, 4};
const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS);
const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS);
const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS);
u32 num_ubos = total_ubos - ReservedUniformBlocks;
u32 num_ssbos = total_ssbos;
u32 num_samplers = total_samplers;
u32 base_ubo = ReservedUniformBlocks;
u32 base_ssbo = 0;
u32 base_samplers = 0;
for (std::size_t i = 0; i < NumStages; ++i) {
const std::size_t stage = stage_swizzle[i];
bindings[stage] = {
Extract(base_ubo, num_ubos, total_ubos / NumStages, LIMIT_UBOS[stage]),
Extract(base_ssbo, num_ssbos, total_ssbos / NumStages, LIMIT_SSBOS[stage]),
Extract(base_samplers, num_samplers, total_samplers / NumStages,
LIMIT_SAMPLERS[stage])};
}
u32 num_images = GetInteger<u32>(GL_MAX_IMAGE_UNITS);
u32 base_images = 0;
// GL_MAX_IMAGE_UNITS is guaranteed by the spec to have a minimum value of 8.
// Due to the limitation of GL_MAX_IMAGE_UNITS, reserve at least 4 image bindings on the
// fragment stage, and at least 1 for the rest of the stages.
// So far games are observed to use 1 image binding on vertex and 4 on fragment stages.
// Reserve at least 4 image bindings on the fragment stage.
bindings[4].image =
Extract(base_images, num_images, std::max(4U, num_images / NumStages), LIMIT_IMAGES[4]);
// This is guaranteed to be at least 1.
const u32 total_extracted_images = num_images / (NumStages - 1);
// Reserve the other image bindings.
for (std::size_t i = 0; i < NumStages; ++i) {
const std::size_t stage = stage_swizzle[i];
if (stage == 4) {
continue;
}
bindings[stage].image =
Extract(base_images, num_images, total_extracted_images, LIMIT_IMAGES[stage]);
}
// Compute doesn't care about any of this.
bindings[5] = {0, 0, 0, 0};
return bindings;
}
bool IsASTCSupported() {
static constexpr std::array targets = {GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY};
static constexpr std::array formats = {
static constexpr std::array targets{
GL_TEXTURE_2D,
GL_TEXTURE_2D_ARRAY,
};
static constexpr std::array formats{
GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_COMPRESSED_RGBA_ASTC_5x4_KHR,
GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_COMPRESSED_RGBA_ASTC_6x5_KHR,
GL_COMPRESSED_RGBA_ASTC_6x6_KHR, GL_COMPRESSED_RGBA_ASTC_8x5_KHR,
@ -172,11 +87,10 @@ bool IsASTCSupported() {
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR,
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR,
};
static constexpr std::array required_support = {
static constexpr std::array required_support{
GL_VERTEX_TEXTURE, GL_TESS_CONTROL_TEXTURE, GL_TESS_EVALUATION_TEXTURE,
GL_GEOMETRY_TEXTURE, GL_FRAGMENT_TEXTURE, GL_COMPUTE_TEXTURE,
};
for (const GLenum target : targets) {
for (const GLenum format : formats) {
for (const GLenum support : required_support) {
@ -223,14 +137,13 @@ Device::Device() {
"Beta driver 443.24 is known to have issues. There might be performance issues.");
disable_fast_buffer_sub_data = true;
}
max_uniform_buffers = BuildMaxUniformBuffers();
base_bindings = BuildBaseBindings();
uniform_buffer_alignment = GetInteger<size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
shader_storage_alignment = GetInteger<size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT);
max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS);
max_varyings = GetInteger<u32>(GL_MAX_VARYING_VECTORS);
max_compute_shared_memory_size = GetInteger<u32>(GL_MAX_COMPUTE_SHARED_MEMORY_SIZE);
max_glasm_storage_buffer_blocks = GetInteger<u32>(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS);
has_warp_intrinsics = GLAD_GL_NV_gpu_shader5 && GLAD_GL_NV_shader_thread_group &&
GLAD_GL_NV_shader_thread_shuffle;
has_shader_ballot = GLAD_GL_ARB_shader_ballot;
@ -243,18 +156,30 @@ Device::Device() {
has_precise_bug = TestPreciseBug();
has_broken_texture_view_formats = is_amd || (!is_linux && is_intel);
has_nv_viewport_array2 = GLAD_GL_NV_viewport_array2;
has_derivative_control = GLAD_GL_ARB_derivative_control;
has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory;
has_debugging_tool_attached = IsDebugToolAttached(extensions);
has_depth_buffer_float = HasExtension(extensions, "GL_NV_depth_buffer_float");
has_geometry_shader_passthrough = GLAD_GL_NV_geometry_shader_passthrough;
has_nv_gpu_shader_5 = GLAD_GL_NV_gpu_shader5;
has_shader_int64 = HasExtension(extensions, "GL_ARB_gpu_shader_int64");
has_amd_shader_half_float = GLAD_GL_AMD_gpu_shader_half_float;
has_sparse_texture_2 = GLAD_GL_ARB_sparse_texture2;
warp_size_potentially_larger_than_guest = !is_nvidia && !is_intel;
need_fastmath_off = is_nvidia;
// At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive
// uniform buffers as "push constants"
has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data;
use_assembly_shaders = Settings::values.use_assembly_shaders.GetValue() &&
shader_backend = Settings::values.shader_backend.GetValue();
use_assembly_shaders = shader_backend == Settings::ShaderBackend::GLASM &&
GLAD_GL_NV_gpu_program5 && GLAD_GL_NV_compute_program5 &&
GLAD_GL_NV_transform_feedback && GLAD_GL_NV_transform_feedback2;
if (shader_backend == Settings::ShaderBackend::GLASM && !use_assembly_shaders) {
LOG_ERROR(Render_OpenGL, "Assembly shaders enabled but not supported");
shader_backend = Settings::ShaderBackend::GLSL;
}
// Blocks AMD and Intel OpenGL drivers on Windows from using asynchronous shader compilation.
use_asynchronous_shaders = Settings::values.use_asynchronous_shaders.GetValue() &&
!(is_amd || (is_intel && !is_linux));
@ -265,11 +190,6 @@ Device::Device() {
LOG_INFO(Render_OpenGL, "Renderer_PreciseBug: {}", has_precise_bug);
LOG_INFO(Render_OpenGL, "Renderer_BrokenTextureViewFormats: {}",
has_broken_texture_view_formats);
if (Settings::values.use_assembly_shaders.GetValue() && !use_assembly_shaders) {
LOG_ERROR(Render_OpenGL, "Assembly shaders enabled but not supported");
}
if (Settings::values.use_asynchronous_shaders.GetValue() && !use_asynchronous_shaders) {
LOG_WARNING(Render_OpenGL, "Asynchronous shader compilation enabled but not supported");
}
@ -325,22 +245,6 @@ std::string Device::GetVendorName() const {
return vendor_name;
}
Device::Device(std::nullptr_t) {
max_uniform_buffers.fill(std::numeric_limits<u32>::max());
uniform_buffer_alignment = 4;
shader_storage_alignment = 4;
max_vertex_attributes = 16;
max_varyings = 15;
max_compute_shared_memory_size = 0x10000;
has_warp_intrinsics = true;
has_shader_ballot = true;
has_vertex_viewport_layer = true;
has_image_load_formatted = true;
has_texture_shadow_lod = true;
has_variable_aoffi = true;
has_depth_buffer_float = true;
}
bool Device::TestVariableAoffi() {
return TestProgram(R"(#version 430 core
// This is a unit test, please ignore me on apitrace bug reports.

View File

@ -6,34 +6,22 @@
#include <cstddef>
#include "common/common_types.h"
#include "video_core/engines/shader_type.h"
#include "shader_recompiler/stage.h"
namespace Settings {
enum class ShaderBackend : u32;
};
namespace OpenGL {
class Device {
public:
struct BaseBindings {
u32 uniform_buffer{};
u32 shader_storage_buffer{};
u32 sampler{};
u32 image{};
};
explicit Device();
explicit Device(std::nullptr_t);
[[nodiscard]] std::string GetVendorName() const;
u32 GetMaxUniformBuffers(Tegra::Engines::ShaderType shader_type) const noexcept {
return max_uniform_buffers[static_cast<std::size_t>(shader_type)];
}
const BaseBindings& GetBaseBindings(std::size_t stage_index) const noexcept {
return base_bindings[stage_index];
}
const BaseBindings& GetBaseBindings(Tegra::Engines::ShaderType shader_type) const noexcept {
return GetBaseBindings(static_cast<std::size_t>(shader_type));
u32 GetMaxUniformBuffers(Shader::Stage stage) const noexcept {
return max_uniform_buffers[static_cast<size_t>(stage)];
}
size_t GetUniformBufferAlignment() const {
@ -56,6 +44,10 @@ public:
return max_compute_shared_memory_size;
}
u32 GetMaxGLASMStorageBufferBlocks() const {
return max_glasm_storage_buffer_blocks;
}
bool HasWarpIntrinsics() const {
return has_warp_intrinsics;
}
@ -108,6 +100,10 @@ public:
return has_nv_viewport_array2;
}
bool HasDerivativeControl() const {
return has_derivative_control;
}
bool HasDebuggingToolAttached() const {
return has_debugging_tool_attached;
}
@ -128,18 +124,52 @@ public:
return has_depth_buffer_float;
}
bool HasGeometryShaderPassthrough() const {
return has_geometry_shader_passthrough;
}
bool HasNvGpuShader5() const {
return has_nv_gpu_shader_5;
}
bool HasShaderInt64() const {
return has_shader_int64;
}
bool HasAmdShaderHalfFloat() const {
return has_amd_shader_half_float;
}
bool HasSparseTexture2() const {
return has_sparse_texture_2;
}
bool IsWarpSizePotentiallyLargerThanGuest() const {
return warp_size_potentially_larger_than_guest;
}
bool NeedsFastmathOff() const {
return need_fastmath_off;
}
Settings::ShaderBackend GetShaderBackend() const {
return shader_backend;
}
private:
static bool TestVariableAoffi();
static bool TestPreciseBug();
std::string vendor_name;
std::array<u32, Tegra::Engines::MaxShaderTypes> max_uniform_buffers{};
std::array<BaseBindings, Tegra::Engines::MaxShaderTypes> base_bindings{};
std::array<u32, Shader::MaxStageTypes> max_uniform_buffers{};
size_t uniform_buffer_alignment{};
size_t shader_storage_alignment{};
u32 max_vertex_attributes{};
u32 max_varyings{};
u32 max_compute_shared_memory_size{};
u32 max_glasm_storage_buffer_blocks{};
Settings::ShaderBackend shader_backend{};
bool has_warp_intrinsics{};
bool has_shader_ballot{};
bool has_vertex_viewport_layer{};
@ -153,11 +183,21 @@ private:
bool has_broken_texture_view_formats{};
bool has_fast_buffer_sub_data{};
bool has_nv_viewport_array2{};
bool has_derivative_control{};
bool has_debugging_tool_attached{};
bool use_assembly_shaders{};
bool use_asynchronous_shaders{};
bool use_driver_cache{};
bool has_depth_buffer_float{};
bool has_geometry_shader_passthrough{};
bool has_nv_gpu_shader_5{};
bool has_shader_int64{};
bool has_amd_shader_half_float{};
bool has_sparse_texture_2{};
bool warp_size_potentially_larger_than_guest{};
bool need_fastmath_off{};
std::string vendor_name;
};
} // namespace OpenGL

View File

@ -0,0 +1,572 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <array>
#include <string>
#include <vector>
#include "common/settings.h" // for enum class Settings::ShaderBackend
#include "common/thread_worker.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_opengl/gl_graphics_pipeline.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_shader_util.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
#include "video_core/shader_notify.h"
#include "video_core/texture_cache/texture_cache.h"
#if defined(_MSC_VER) && defined(NDEBUG)
#define LAMBDA_FORCEINLINE [[msvc::forceinline]]
#else
#define LAMBDA_FORCEINLINE
#endif
namespace OpenGL {
namespace {
using Shader::ImageBufferDescriptor;
using Shader::ImageDescriptor;
using Shader::TextureBufferDescriptor;
using Shader::TextureDescriptor;
using Tegra::Texture::TexturePair;
using VideoCommon::ImageId;
constexpr u32 MAX_TEXTURES = 64;
constexpr u32 MAX_IMAGES = 8;
template <typename Range>
u32 AccumulateCount(const Range& range) {
u32 num{};
for (const auto& desc : range) {
num += desc.count;
}
return num;
}
GLenum Stage(size_t stage_index) {
switch (stage_index) {
case 0:
return GL_VERTEX_SHADER;
case 1:
return GL_TESS_CONTROL_SHADER;
case 2:
return GL_TESS_EVALUATION_SHADER;
case 3:
return GL_GEOMETRY_SHADER;
case 4:
return GL_FRAGMENT_SHADER;
}
UNREACHABLE_MSG("{}", stage_index);
return GL_NONE;
}
GLenum AssemblyStage(size_t stage_index) {
switch (stage_index) {
case 0:
return GL_VERTEX_PROGRAM_NV;
case 1:
return GL_TESS_CONTROL_PROGRAM_NV;
case 2:
return GL_TESS_EVALUATION_PROGRAM_NV;
case 3:
return GL_GEOMETRY_PROGRAM_NV;
case 4:
return GL_FRAGMENT_PROGRAM_NV;
}
UNREACHABLE_MSG("{}", stage_index);
return GL_NONE;
}
/// Translates hardware transform feedback indices
/// @param location Hardware location
/// @return Pair of ARB_transform_feedback3 token stream first and third arguments
/// @note Read https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_transform_feedback3.txt
std::pair<GLint, GLint> TransformFeedbackEnum(u8 location) {
const u8 index = location / 4;
if (index >= 8 && index <= 39) {
return {GL_GENERIC_ATTRIB_NV, index - 8};
}
if (index >= 48 && index <= 55) {
return {GL_TEXTURE_COORD_NV, index - 48};
}
switch (index) {
case 7:
return {GL_POSITION, 0};
case 40:
return {GL_PRIMARY_COLOR_NV, 0};
case 41:
return {GL_SECONDARY_COLOR_NV, 0};
case 42:
return {GL_BACK_PRIMARY_COLOR_NV, 0};
case 43:
return {GL_BACK_SECONDARY_COLOR_NV, 0};
}
UNIMPLEMENTED_MSG("index={}", index);
return {GL_POSITION, 0};
}
template <typename Spec>
bool Passes(const std::array<Shader::Info, 5>& stage_infos, u32 enabled_mask) {
for (size_t stage = 0; stage < stage_infos.size(); ++stage) {
if (!Spec::enabled_stages[stage] && ((enabled_mask >> stage) & 1) != 0) {
return false;
}
const auto& info{stage_infos[stage]};
if constexpr (!Spec::has_storage_buffers) {
if (!info.storage_buffers_descriptors.empty()) {
return false;
}
}
if constexpr (!Spec::has_texture_buffers) {
if (!info.texture_buffer_descriptors.empty()) {
return false;
}
}
if constexpr (!Spec::has_image_buffers) {
if (!info.image_buffer_descriptors.empty()) {
return false;
}
}
if constexpr (!Spec::has_images) {
if (!info.image_descriptors.empty()) {
return false;
}
}
}
return true;
}
using ConfigureFuncPtr = void (*)(GraphicsPipeline*, bool);
template <typename Spec, typename... Specs>
ConfigureFuncPtr FindSpec(const std::array<Shader::Info, 5>& stage_infos, u32 enabled_mask) {
if constexpr (sizeof...(Specs) > 0) {
if (!Passes<Spec>(stage_infos, enabled_mask)) {
return FindSpec<Specs...>(stage_infos, enabled_mask);
}
}
return GraphicsPipeline::MakeConfigureSpecFunc<Spec>();
}
struct SimpleVertexFragmentSpec {
static constexpr std::array<bool, 5> enabled_stages{true, false, false, false, true};
static constexpr bool has_storage_buffers = false;
static constexpr bool has_texture_buffers = false;
static constexpr bool has_image_buffers = false;
static constexpr bool has_images = false;
};
struct SimpleVertexSpec {
static constexpr std::array<bool, 5> enabled_stages{true, false, false, false, false};
static constexpr bool has_storage_buffers = false;
static constexpr bool has_texture_buffers = false;
static constexpr bool has_image_buffers = false;
static constexpr bool has_images = false;
};
struct DefaultSpec {
static constexpr std::array<bool, 5> enabled_stages{true, true, true, true, true};
static constexpr bool has_storage_buffers = true;
static constexpr bool has_texture_buffers = true;
static constexpr bool has_image_buffers = true;
static constexpr bool has_images = true;
};
ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 enabled_mask) {
return FindSpec<SimpleVertexSpec, SimpleVertexFragmentSpec, DefaultSpec>(infos, enabled_mask);
}
} // Anonymous namespace
GraphicsPipeline::GraphicsPipeline(
const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_,
Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker,
VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources,
std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos,
const GraphicsPipelineKey& key_)
: texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_},
state_tracker{state_tracker_}, key{key_} {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
}
u32 num_textures{};
u32 num_images{};
u32 num_storage_buffers{};
for (size_t stage = 0; stage < base_uniform_bindings.size(); ++stage) {
auto& info{stage_infos[stage]};
if (infos[stage]) {
info = *infos[stage];
enabled_stages_mask |= 1u << stage;
}
if (stage < 4) {
base_uniform_bindings[stage + 1] = base_uniform_bindings[stage];
base_storage_bindings[stage + 1] = base_storage_bindings[stage];
base_uniform_bindings[stage + 1] += AccumulateCount(info.constant_buffer_descriptors);
base_storage_bindings[stage + 1] += AccumulateCount(info.storage_buffers_descriptors);
}
enabled_uniform_buffer_masks[stage] = info.constant_buffer_mask;
std::ranges::copy(info.constant_buffer_used_sizes, uniform_buffer_sizes[stage].begin());
const u32 num_tex_buffer_bindings{AccumulateCount(info.texture_buffer_descriptors)};
num_texture_buffers[stage] += num_tex_buffer_bindings;
num_textures += num_tex_buffer_bindings;
const u32 num_img_buffers_bindings{AccumulateCount(info.image_buffer_descriptors)};
num_image_buffers[stage] += num_img_buffers_bindings;
num_images += num_img_buffers_bindings;
num_textures += AccumulateCount(info.texture_descriptors);
num_images += AccumulateCount(info.image_descriptors);
num_storage_buffers += AccumulateCount(info.storage_buffers_descriptors);
writes_global_memory |= std::ranges::any_of(
info.storage_buffers_descriptors, [](const auto& desc) { return desc.is_written; });
}
ASSERT(num_textures <= MAX_TEXTURES);
ASSERT(num_images <= MAX_IMAGES);
const bool assembly_shaders{assembly_programs[0].handle != 0};
use_storage_buffers =
!assembly_shaders || num_storage_buffers <= device.GetMaxGLASMStorageBufferBlocks();
writes_global_memory &= !use_storage_buffers;
configure_func = ConfigureFunc(stage_infos, enabled_stages_mask);
if (key.xfb_enabled && device.UseAssemblyShaders()) {
GenerateTransformFeedbackState();
}
const bool in_parallel = thread_worker != nullptr;
const auto backend = device.GetShaderBackend();
auto func{[this, sources = std::move(sources), sources_spirv = std::move(sources_spirv),
shader_notify, backend, in_parallel](ShaderContext::Context*) mutable {
for (size_t stage = 0; stage < 5; ++stage) {
switch (backend) {
case Settings::ShaderBackend::GLSL:
if (!sources[stage].empty()) {
source_programs[stage] = CreateProgram(sources[stage], Stage(stage));
}
break;
case Settings::ShaderBackend::GLASM:
if (!sources[stage].empty()) {
assembly_programs[stage] = CompileProgram(sources[stage], AssemblyStage(stage));
if (in_parallel) {
// Make sure program is built before continuing when building in parallel
glGetString(GL_PROGRAM_ERROR_STRING_NV);
}
}
break;
case Settings::ShaderBackend::SPIRV:
if (!sources_spirv[stage].empty()) {
source_programs[stage] = CreateProgram(sources_spirv[stage], Stage(stage));
}
break;
}
}
if (in_parallel && backend != Settings::ShaderBackend::GLASM) {
// Make sure programs have built if we are building shaders in parallel
for (OGLProgram& program : source_programs) {
if (program.handle != 0) {
GLint status{};
glGetProgramiv(program.handle, GL_LINK_STATUS, &status);
}
}
}
if (shader_notify) {
shader_notify->MarkShaderComplete();
}
is_built = true;
built_condvar.notify_one();
}};
if (thread_worker) {
thread_worker->QueueWork(std::move(func));
} else {
func(nullptr);
}
}
template <typename Spec>
void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
std::array<ImageId, MAX_TEXTURES + MAX_IMAGES> image_view_ids;
std::array<u32, MAX_TEXTURES + MAX_IMAGES> image_view_indices;
std::array<GLuint, MAX_TEXTURES> samplers;
size_t image_view_index{};
GLsizei sampler_binding{};
texture_cache.SynchronizeGraphicsDescriptors();
buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes);
buffer_cache.runtime.SetBaseUniformBindings(base_uniform_bindings);
buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings);
buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers);
const auto& regs{maxwell3d.regs};
const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
const Shader::Info& info{stage_infos[stage]};
buffer_cache.UnbindGraphicsStorageBuffers(stage);
if constexpr (Spec::has_storage_buffers) {
size_t ssbo_index{};
for (const auto& desc : info.storage_buffers_descriptors) {
ASSERT(desc.count == 1);
buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, desc.cbuf_index,
desc.cbuf_offset, desc.is_written);
++ssbo_index;
}
}
const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(cbufs[desc.cbuf_index].enabled);
const u32 index_offset{index << desc.size_shift};
const u32 offset{desc.cbuf_offset + index_offset};
const GPUVAddr addr{cbufs[desc.cbuf_index].address + offset};
if constexpr (std::is_same_v<decltype(desc), const TextureDescriptor&> ||
std::is_same_v<decltype(desc), const TextureBufferDescriptor&>) {
if (desc.has_secondary) {
ASSERT(cbufs[desc.secondary_cbuf_index].enabled);
const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
second_offset};
const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
const u32 raw{lhs_raw | rhs_raw};
return TexturePair(raw, via_header_index);
}
}
return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices[image_view_index++] = handle.first;
}
}};
if constexpr (Spec::has_texture_buffers) {
for (const auto& desc : info.texture_buffer_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices[image_view_index++] = handle.first;
samplers[sampler_binding++] = 0;
}
}
}
if constexpr (Spec::has_image_buffers) {
for (const auto& desc : info.image_buffer_descriptors) {
add_image(desc);
}
}
for (const auto& desc : info.texture_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices[image_view_index++] = handle.first;
Sampler* const sampler{texture_cache.GetGraphicsSampler(handle.second)};
samplers[sampler_binding++] = sampler->Handle();
}
}
if constexpr (Spec::has_images) {
for (const auto& desc : info.image_descriptors) {
add_image(desc);
}
}
}};
if constexpr (Spec::enabled_stages[0]) {
config_stage(0);
}
if constexpr (Spec::enabled_stages[1]) {
config_stage(1);
}
if constexpr (Spec::enabled_stages[2]) {
config_stage(2);
}
if constexpr (Spec::enabled_stages[3]) {
config_stage(3);
}
if constexpr (Spec::enabled_stages[4]) {
config_stage(4);
}
const std::span indices_span(image_view_indices.data(), image_view_index);
texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
texture_cache.UpdateRenderTargets(false);
state_tracker.BindFramebuffer(texture_cache.GetFramebuffer()->Handle());
ImageId* texture_buffer_index{image_view_ids.data()};
const auto bind_stage_info{[&](size_t stage) LAMBDA_FORCEINLINE {
size_t index{};
const auto add_buffer{[&](const auto& desc) {
constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
for (u32 i = 0; i < desc.count; ++i) {
bool is_written{false};
if constexpr (is_image) {
is_written = desc.is_written;
}
ImageView& image_view{texture_cache.GetImageView(*texture_buffer_index)};
buffer_cache.BindGraphicsTextureBuffer(stage, index, image_view.GpuAddr(),
image_view.BufferSize(), image_view.format,
is_written, is_image);
++index;
++texture_buffer_index;
}
}};
const Shader::Info& info{stage_infos[stage]};
buffer_cache.UnbindGraphicsTextureBuffers(stage);
if constexpr (Spec::has_texture_buffers) {
for (const auto& desc : info.texture_buffer_descriptors) {
add_buffer(desc);
}
}
if constexpr (Spec::has_image_buffers) {
for (const auto& desc : info.image_buffer_descriptors) {
add_buffer(desc);
}
}
for (const auto& desc : info.texture_descriptors) {
texture_buffer_index += desc.count;
}
if constexpr (Spec::has_images) {
for (const auto& desc : info.image_descriptors) {
texture_buffer_index += desc.count;
}
}
}};
if constexpr (Spec::enabled_stages[0]) {
bind_stage_info(0);
}
if constexpr (Spec::enabled_stages[1]) {
bind_stage_info(1);
}
if constexpr (Spec::enabled_stages[2]) {
bind_stage_info(2);
}
if constexpr (Spec::enabled_stages[3]) {
bind_stage_info(3);
}
if constexpr (Spec::enabled_stages[4]) {
bind_stage_info(4);
}
buffer_cache.UpdateGraphicsBuffers(is_indexed);
buffer_cache.BindHostGeometryBuffers(is_indexed);
if (!is_built.load(std::memory_order::relaxed)) {
WaitForBuild();
}
if (assembly_programs[0].handle != 0) {
program_manager.BindAssemblyPrograms(assembly_programs, enabled_stages_mask);
} else {
program_manager.BindSourcePrograms(source_programs);
}
const ImageId* views_it{image_view_ids.data()};
GLsizei texture_binding = 0;
GLsizei image_binding = 0;
std::array<GLuint, MAX_TEXTURES> textures;
std::array<GLuint, MAX_IMAGES> images;
const auto prepare_stage{[&](size_t stage) {
buffer_cache.runtime.SetImagePointers(&textures[texture_binding], &images[image_binding]);
buffer_cache.BindHostStageBuffers(stage);
texture_binding += num_texture_buffers[stage];
image_binding += num_image_buffers[stage];
views_it += num_texture_buffers[stage];
views_it += num_image_buffers[stage];
const auto& info{stage_infos[stage]};
for (const auto& desc : info.texture_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
ImageView& image_view{texture_cache.GetImageView(*(views_it++))};
textures[texture_binding++] = image_view.Handle(desc.type);
}
}
for (const auto& desc : info.image_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
ImageView& image_view{texture_cache.GetImageView(*(views_it++))};
if (desc.is_written) {
texture_cache.MarkModification(image_view.image_id);
}
images[image_binding++] = image_view.StorageView(desc.type, desc.format);
}
}
}};
if constexpr (Spec::enabled_stages[0]) {
prepare_stage(0);
}
if constexpr (Spec::enabled_stages[1]) {
prepare_stage(1);
}
if constexpr (Spec::enabled_stages[2]) {
prepare_stage(2);
}
if constexpr (Spec::enabled_stages[3]) {
prepare_stage(3);
}
if constexpr (Spec::enabled_stages[4]) {
prepare_stage(4);
}
if (texture_binding != 0) {
ASSERT(texture_binding == sampler_binding);
glBindTextures(0, texture_binding, textures.data());
glBindSamplers(0, sampler_binding, samplers.data());
}
if (image_binding != 0) {
glBindImageTextures(0, image_binding, images.data());
}
}
void GraphicsPipeline::ConfigureTransformFeedbackImpl() const {
glTransformFeedbackStreamAttribsNV(num_xfb_attribs, xfb_attribs.data(), num_xfb_strides,
xfb_streams.data(), GL_INTERLEAVED_ATTRIBS);
}
void GraphicsPipeline::GenerateTransformFeedbackState() {
// TODO(Rodrigo): Inject SKIP_COMPONENTS*_NV when required. An unimplemented message will signal
// when this is required.
GLint* cursor{xfb_attribs.data()};
GLint* current_stream{xfb_streams.data()};
for (size_t feedback = 0; feedback < Maxwell::NumTransformFeedbackBuffers; ++feedback) {
const auto& layout = key.xfb_state.layouts[feedback];
UNIMPLEMENTED_IF_MSG(layout.stride != layout.varying_count * 4, "Stride padding");
if (layout.varying_count == 0) {
continue;
}
*current_stream = static_cast<GLint>(feedback);
if (current_stream != xfb_streams.data()) {
// When stepping one stream, push the expected token
cursor[0] = GL_NEXT_BUFFER_NV;
cursor[1] = 0;
cursor[2] = 0;
cursor += XFB_ENTRY_STRIDE;
}
++current_stream;
const auto& locations = key.xfb_state.varyings[feedback];
std::optional<u8> current_index;
for (u32 offset = 0; offset < layout.varying_count; ++offset) {
const u8 location = locations[offset];
const u8 index = location / 4;
if (current_index == index) {
// Increase number of components of the previous attachment
++cursor[-2];
continue;
}
current_index = index;
std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(location);
cursor[1] = 1;
cursor += XFB_ENTRY_STRIDE;
}
}
num_xfb_attribs = static_cast<GLsizei>((cursor - xfb_attribs.data()) / XFB_ENTRY_STRIDE);
num_xfb_strides = static_cast<GLsizei>(current_stream - xfb_streams.data());
}
void GraphicsPipeline::WaitForBuild() {
std::unique_lock lock{built_mutex};
built_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
}
} // namespace OpenGL

View File

@ -0,0 +1,169 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <cstring>
#include <type_traits>
#include <utility>
#include "common/bit_field.h"
#include "common/cityhash.h"
#include "common/common_types.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_texture_cache.h"
#include "video_core/transform_feedback.h"
namespace OpenGL {
namespace ShaderContext {
struct Context;
}
class Device;
class ProgramManager;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
struct GraphicsPipelineKey {
std::array<u64, 6> unique_hashes;
union {
u32 raw;
BitField<0, 1, u32> xfb_enabled;
BitField<1, 1, u32> early_z;
BitField<2, 4, Maxwell::PrimitiveTopology> gs_input_topology;
BitField<6, 2, Maxwell::TessellationPrimitive> tessellation_primitive;
BitField<8, 2, Maxwell::TessellationSpacing> tessellation_spacing;
BitField<10, 1, u32> tessellation_clockwise;
};
std::array<u32, 3> padding;
VideoCommon::TransformFeedbackState xfb_state;
size_t Hash() const noexcept {
return static_cast<size_t>(Common::CityHash64(reinterpret_cast<const char*>(this), Size()));
}
bool operator==(const GraphicsPipelineKey& rhs) const noexcept {
return std::memcmp(this, &rhs, Size()) == 0;
}
bool operator!=(const GraphicsPipelineKey& rhs) const noexcept {
return !operator==(rhs);
}
[[nodiscard]] size_t Size() const noexcept {
if (xfb_enabled != 0) {
return sizeof(GraphicsPipelineKey);
} else {
return offsetof(GraphicsPipelineKey, padding);
}
}
};
static_assert(std::has_unique_object_representations_v<GraphicsPipelineKey>);
static_assert(std::is_trivially_copyable_v<GraphicsPipelineKey>);
static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>);
class GraphicsPipeline {
public:
explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
Tegra::Engines::Maxwell3D& maxwell3d_,
ProgramManager& program_manager_, StateTracker& state_tracker_,
ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify,
std::array<std::string, 5> sources,
std::array<std::vector<u32>, 5> sources_spirv,
const std::array<const Shader::Info*, 5>& infos,
const GraphicsPipelineKey& key_);
void Configure(bool is_indexed) {
configure_func(this, is_indexed);
}
void ConfigureTransformFeedback() const {
if (num_xfb_attribs != 0) {
ConfigureTransformFeedbackImpl();
}
}
[[nodiscard]] const GraphicsPipelineKey& Key() const noexcept {
return key;
}
[[nodiscard]] bool WritesGlobalMemory() const noexcept {
return writes_global_memory;
}
[[nodiscard]] bool IsBuilt() const noexcept {
return is_built.load(std::memory_order::relaxed);
}
template <typename Spec>
static auto MakeConfigureSpecFunc() {
return [](GraphicsPipeline* pipeline, bool is_indexed) {
pipeline->ConfigureImpl<Spec>(is_indexed);
};
}
private:
template <typename Spec>
void ConfigureImpl(bool is_indexed);
void ConfigureTransformFeedbackImpl() const;
void GenerateTransformFeedbackState();
void WaitForBuild();
TextureCache& texture_cache;
BufferCache& buffer_cache;
Tegra::MemoryManager& gpu_memory;
Tegra::Engines::Maxwell3D& maxwell3d;
ProgramManager& program_manager;
StateTracker& state_tracker;
const GraphicsPipelineKey key;
void (*configure_func)(GraphicsPipeline*, bool){};
std::array<OGLProgram, 5> source_programs;
std::array<OGLAssemblyProgram, 5> assembly_programs;
u32 enabled_stages_mask{};
std::array<Shader::Info, 5> stage_infos{};
std::array<u32, 5> enabled_uniform_buffer_masks{};
VideoCommon::UniformBufferSizes uniform_buffer_sizes{};
std::array<u32, 5> base_uniform_bindings{};
std::array<u32, 5> base_storage_bindings{};
std::array<u32, 5> num_texture_buffers{};
std::array<u32, 5> num_image_buffers{};
bool use_storage_buffers{};
bool writes_global_memory{};
static constexpr std::size_t XFB_ENTRY_STRIDE = 3;
GLsizei num_xfb_attribs{};
GLsizei num_xfb_strides{};
std::array<GLint, 128 * XFB_ENTRY_STRIDE * Maxwell::NumTransformFeedbackBuffers> xfb_attribs{};
std::array<GLint, Maxwell::NumTransformFeedbackBuffers> xfb_streams{};
std::mutex built_mutex;
std::condition_variable built_condvar;
std::atomic_bool is_built{false};
};
} // namespace OpenGL
namespace std {
template <>
struct hash<OpenGL::GraphicsPipelineKey> {
size_t operator()(const OpenGL::GraphicsPipelineKey& k) const noexcept {
return k.Hash();
}
};
} // namespace std

View File

@ -23,7 +23,6 @@
#include "core/memory.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/shader_type.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_query_cache.h"
@ -40,7 +39,6 @@ namespace OpenGL {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using GLvec4 = std::array<GLfloat, 4>;
using Tegra::Engines::ShaderType;
using VideoCore::Surface::PixelFormat;
using VideoCore::Surface::SurfaceTarget;
using VideoCore::Surface::SurfaceType;
@ -51,112 +49,11 @@ MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(128, 128, 192));
MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Management", MP_RGB(100, 255, 100));
namespace {
constexpr size_t NUM_SUPPORTED_VERTEX_ATTRIBUTES = 16;
struct TextureHandle {
constexpr TextureHandle(u32 data, bool via_header_index) {
const Tegra::Texture::TextureHandle handle{data};
image = handle.tic_id;
sampler = via_header_index ? image : handle.tsc_id.Value();
}
u32 image;
u32 sampler;
};
template <typename Engine, typename Entry>
TextureHandle GetTextureInfo(const Engine& engine, bool via_header_index, const Entry& entry,
ShaderType shader_type, size_t index = 0) {
if constexpr (std::is_same_v<Entry, SamplerEntry>) {
if (entry.is_separated) {
const u32 buffer_1 = entry.buffer;
const u32 buffer_2 = entry.secondary_buffer;
const u32 offset_1 = entry.offset;
const u32 offset_2 = entry.secondary_offset;
const u32 handle_1 = engine.AccessConstBuffer32(shader_type, buffer_1, offset_1);
const u32 handle_2 = engine.AccessConstBuffer32(shader_type, buffer_2, offset_2);
return TextureHandle(handle_1 | handle_2, via_header_index);
}
}
if (entry.is_bindless) {
const u32 raw = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
return TextureHandle(raw, via_header_index);
}
const u32 buffer = engine.GetBoundBuffer();
const u64 offset = (entry.offset + index) * sizeof(u32);
return TextureHandle(engine.AccessConstBuffer32(shader_type, buffer, offset), via_header_index);
}
/// Translates hardware transform feedback indices
/// @param location Hardware location
/// @return Pair of ARB_transform_feedback3 token stream first and third arguments
/// @note Read https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_transform_feedback3.txt
std::pair<GLint, GLint> TransformFeedbackEnum(u8 location) {
const u8 index = location / 4;
if (index >= 8 && index <= 39) {
return {GL_GENERIC_ATTRIB_NV, index - 8};
}
if (index >= 48 && index <= 55) {
return {GL_TEXTURE_COORD_NV, index - 48};
}
switch (index) {
case 7:
return {GL_POSITION, 0};
case 40:
return {GL_PRIMARY_COLOR_NV, 0};
case 41:
return {GL_SECONDARY_COLOR_NV, 0};
case 42:
return {GL_BACK_PRIMARY_COLOR_NV, 0};
case 43:
return {GL_BACK_SECONDARY_COLOR_NV, 0};
}
UNIMPLEMENTED_MSG("index={}", index);
return {GL_POSITION, 0};
}
void oglEnable(GLenum cap, bool state) {
(state ? glEnable : glDisable)(cap);
}
ImageViewType ImageViewTypeFromEntry(const SamplerEntry& entry) {
if (entry.is_buffer) {
return ImageViewType::Buffer;
}
switch (entry.type) {
case Tegra::Shader::TextureType::Texture1D:
return entry.is_array ? ImageViewType::e1DArray : ImageViewType::e1D;
case Tegra::Shader::TextureType::Texture2D:
return entry.is_array ? ImageViewType::e2DArray : ImageViewType::e2D;
case Tegra::Shader::TextureType::Texture3D:
return ImageViewType::e3D;
case Tegra::Shader::TextureType::TextureCube:
return entry.is_array ? ImageViewType::CubeArray : ImageViewType::Cube;
}
UNREACHABLE();
return ImageViewType::e2D;
}
ImageViewType ImageViewTypeFromEntry(const ImageEntry& entry) {
switch (entry.type) {
case Tegra::Shader::ImageType::Texture1D:
return ImageViewType::e1D;
case Tegra::Shader::ImageType::Texture1DArray:
return ImageViewType::e1DArray;
case Tegra::Shader::ImageType::Texture2D:
return ImageViewType::e2D;
case Tegra::Shader::ImageType::Texture2DArray:
return ImageViewType::e2DArray;
case Tegra::Shader::ImageType::Texture3D:
return ImageViewType::e3D;
case Tegra::Shader::ImageType::TextureBuffer:
return ImageViewType::Buffer;
}
UNREACHABLE();
return ImageViewType::e2D;
}
} // Anonymous namespace
RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
@ -170,14 +67,10 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
buffer_cache_runtime(device),
buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
shader_cache(*this, emu_window_, gpu, maxwell3d, kepler_compute, gpu_memory, device),
shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache,
buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()),
query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache),
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
async_shaders(emu_window_) {
if (device.UseAsynchronousShaders()) {
async_shaders.AllocateWorkers();
}
}
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {}
RasterizerOpenGL::~RasterizerOpenGL() = default;
@ -204,7 +97,7 @@ void RasterizerOpenGL::SyncVertexFormats() {
const auto gl_index = static_cast<GLuint>(index);
// Disable constant attributes.
if (attrib.IsConstant()) {
if (attrib.constant) {
glDisableVertexAttribArray(gl_index);
continue;
}
@ -244,116 +137,9 @@ void RasterizerOpenGL::SyncVertexInstances() {
}
}
void RasterizerOpenGL::SetupShaders(bool is_indexed) {
u32 clip_distances = 0;
std::array<Shader*, Maxwell::MaxShaderStage> shaders{};
image_view_indices.clear();
sampler_handles.clear();
texture_cache.SynchronizeGraphicsDescriptors();
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto& shader_config = maxwell3d.regs.shader_config[index];
const auto program{static_cast<Maxwell::ShaderProgram>(index)};
// Skip stages that are not enabled
if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
switch (program) {
case Maxwell::ShaderProgram::Geometry:
program_manager.UseGeometryShader(0);
break;
case Maxwell::ShaderProgram::Fragment:
program_manager.UseFragmentShader(0);
break;
default:
break;
}
continue;
}
// Currently this stages are not supported in the OpenGL backend.
// TODO(Blinkhawk): Port tesselation shaders from Vulkan to OpenGL
if (program == Maxwell::ShaderProgram::TesselationControl ||
program == Maxwell::ShaderProgram::TesselationEval) {
continue;
}
Shader* const shader = shader_cache.GetStageProgram(program, async_shaders);
const GLuint program_handle = shader->IsBuilt() ? shader->GetHandle() : 0;
switch (program) {
case Maxwell::ShaderProgram::VertexA:
case Maxwell::ShaderProgram::VertexB:
program_manager.UseVertexShader(program_handle);
break;
case Maxwell::ShaderProgram::Geometry:
program_manager.UseGeometryShader(program_handle);
break;
case Maxwell::ShaderProgram::Fragment:
program_manager.UseFragmentShader(program_handle);
break;
default:
UNIMPLEMENTED_MSG("Unimplemented shader index={}, enable={}, offset=0x{:08X}", index,
shader_config.enable.Value(), shader_config.offset);
break;
}
// Stage indices are 0 - 5
const size_t stage = index == 0 ? 0 : index - 1;
shaders[stage] = shader;
SetupDrawTextures(shader, stage);
SetupDrawImages(shader, stage);
buffer_cache.SetEnabledUniformBuffers(stage, shader->GetEntries().enabled_uniform_buffers);
buffer_cache.UnbindGraphicsStorageBuffers(stage);
u32 ssbo_index = 0;
for (const auto& buffer : shader->GetEntries().global_memory_entries) {
buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, buffer.cbuf_index,
buffer.cbuf_offset, buffer.is_written);
++ssbo_index;
}
// Workaround for Intel drivers.
// When a clip distance is enabled but not set in the shader it crops parts of the screen
// (sometimes it's half the screen, sometimes three quarters). To avoid this, enable the
// clip distances only when it's written by a shader stage.
clip_distances |= shader->GetEntries().clip_distances;
// When VertexA is enabled, we have dual vertex shaders
if (program == Maxwell::ShaderProgram::VertexA) {
// VertexB was combined with VertexA, so we skip the VertexB iteration
++index;
}
}
SyncClipEnabled(clip_distances);
maxwell3d.dirty.flags[Dirty::Shaders] = false;
buffer_cache.UpdateGraphicsBuffers(is_indexed);
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
buffer_cache.BindHostGeometryBuffers(is_indexed);
size_t image_view_index = 0;
size_t texture_index = 0;
size_t image_index = 0;
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
const Shader* const shader = shaders[stage];
if (!shader) {
continue;
}
buffer_cache.BindHostStageBuffers(stage);
const auto& base = device.GetBaseBindings(stage);
BindTextures(shader->GetEntries(), base.sampler, base.image, image_view_index,
texture_index, image_index);
}
}
void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) {
shader_cache.LoadDiskCache(title_id, stop_loading, callback);
shader_cache.LoadDiskResources(title_id, stop_loading, callback);
}
void RasterizerOpenGL::Clear() {
@ -432,16 +218,15 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
SyncState();
// Setup shaders and their used resources.
GraphicsPipeline* const pipeline{shader_cache.CurrentGraphicsPipeline()};
if (!pipeline) {
return;
}
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
SetupShaders(is_indexed);
texture_cache.UpdateRenderTargets(false);
state_tracker.BindFramebuffer(texture_cache.GetFramebuffer()->Handle());
program_manager.BindGraphicsPipeline();
pipeline->Configure(is_indexed);
const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology);
BeginTransformFeedback(primitive_mode);
BeginTransformFeedback(pipeline, primitive_mode);
const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance);
const GLsizei num_instances =
@ -480,35 +265,24 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
num_instances, base_instance);
}
}
EndTransformFeedback();
++num_queued_commands;
has_written_global_memory |= pipeline->WritesGlobalMemory();
gpu.TickWork();
}
void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
Shader* const kernel = shader_cache.GetComputeKernel(code_addr);
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
BindComputeTextures(kernel);
const auto& entries = kernel->GetEntries();
buffer_cache.SetEnabledComputeUniformBuffers(entries.enabled_uniform_buffers);
buffer_cache.UnbindComputeStorageBuffers();
u32 ssbo_index = 0;
for (const auto& buffer : entries.global_memory_entries) {
buffer_cache.BindComputeStorageBuffer(ssbo_index, buffer.cbuf_index, buffer.cbuf_offset,
buffer.is_written);
++ssbo_index;
void RasterizerOpenGL::DispatchCompute() {
ComputePipeline* const pipeline{shader_cache.CurrentComputePipeline()};
if (!pipeline) {
return;
}
buffer_cache.UpdateComputeBuffers();
buffer_cache.BindHostComputeBuffers();
const auto& launch_desc = kepler_compute.launch_description;
glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z);
pipeline->Configure();
const auto& qmd{kepler_compute.launch_description};
glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z);
++num_queued_commands;
has_written_global_memory |= pipeline->WritesGlobalMemory();
}
void RasterizerOpenGL::ResetCounter(VideoCore::QueryType type) {
@ -661,7 +435,7 @@ void RasterizerOpenGL::WaitForIdle() {
}
void RasterizerOpenGL::FragmentBarrier() {
glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT);
glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT);
}
void RasterizerOpenGL::TiledCacheBarrier() {
@ -674,6 +448,13 @@ void RasterizerOpenGL::FlushCommands() {
return;
}
num_queued_commands = 0;
// Make sure memory stored from the previous GL command stream is visible
// This is only needed on assembly shaders where we write to GPU memory with raw pointers
if (has_written_global_memory) {
has_written_global_memory = false;
glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
}
glFlush();
}
@ -721,111 +502,11 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
// ASSERT_MSG(image_view->size.width == config.width, "Framebuffer width is different");
// ASSERT_MSG(image_view->size.height == config.height, "Framebuffer height is different");
screen_info.display_texture = image_view->Handle(ImageViewType::e2D);
screen_info.display_texture = image_view->Handle(Shader::TextureType::Color2D);
screen_info.display_srgb = VideoCore::Surface::IsPixelFormatSRGB(image_view->format);
return true;
}
void RasterizerOpenGL::BindComputeTextures(Shader* kernel) {
image_view_indices.clear();
sampler_handles.clear();
texture_cache.SynchronizeComputeDescriptors();
SetupComputeTextures(kernel);
SetupComputeImages(kernel);
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
program_manager.BindCompute(kernel->GetHandle());
size_t image_view_index = 0;
size_t texture_index = 0;
size_t image_index = 0;
BindTextures(kernel->GetEntries(), 0, 0, image_view_index, texture_index, image_index);
}
void RasterizerOpenGL::BindTextures(const ShaderEntries& entries, GLuint base_texture,
GLuint base_image, size_t& image_view_index,
size_t& texture_index, size_t& image_index) {
const GLuint* const samplers = sampler_handles.data() + texture_index;
const GLuint* const textures = texture_handles.data() + texture_index;
const GLuint* const images = image_handles.data() + image_index;
const size_t num_samplers = entries.samplers.size();
for (const auto& sampler : entries.samplers) {
for (size_t i = 0; i < sampler.size; ++i) {
const ImageViewId image_view_id = image_view_ids[image_view_index++];
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
const GLuint handle = image_view.Handle(ImageViewTypeFromEntry(sampler));
texture_handles[texture_index++] = handle;
}
}
const size_t num_images = entries.images.size();
for (size_t unit = 0; unit < num_images; ++unit) {
// TODO: Mark as modified
const ImageViewId image_view_id = image_view_ids[image_view_index++];
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
const GLuint handle = image_view.Handle(ImageViewTypeFromEntry(entries.images[unit]));
image_handles[image_index] = handle;
++image_index;
}
if (num_samplers > 0) {
glBindSamplers(base_texture, static_cast<GLsizei>(num_samplers), samplers);
glBindTextures(base_texture, static_cast<GLsizei>(num_samplers), textures);
}
if (num_images > 0) {
glBindImageTextures(base_image, static_cast<GLsizei>(num_images), images);
}
}
void RasterizerOpenGL::SetupDrawTextures(const Shader* shader, size_t stage_index) {
const bool via_header_index =
maxwell3d.regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
for (const auto& entry : shader->GetEntries().samplers) {
const auto shader_type = static_cast<ShaderType>(stage_index);
for (size_t index = 0; index < entry.size; ++index) {
const auto handle =
GetTextureInfo(maxwell3d, via_header_index, entry, shader_type, index);
const Sampler* const sampler = texture_cache.GetGraphicsSampler(handle.sampler);
sampler_handles.push_back(sampler->Handle());
image_view_indices.push_back(handle.image);
}
}
}
void RasterizerOpenGL::SetupComputeTextures(const Shader* kernel) {
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
for (const auto& entry : kernel->GetEntries().samplers) {
for (size_t i = 0; i < entry.size; ++i) {
const auto handle =
GetTextureInfo(kepler_compute, via_header_index, entry, ShaderType::Compute, i);
const Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
sampler_handles.push_back(sampler->Handle());
image_view_indices.push_back(handle.image);
}
}
}
void RasterizerOpenGL::SetupDrawImages(const Shader* shader, size_t stage_index) {
const bool via_header_index =
maxwell3d.regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
for (const auto& entry : shader->GetEntries().images) {
const auto shader_type = static_cast<ShaderType>(stage_index);
const auto handle = GetTextureInfo(maxwell3d, via_header_index, entry, shader_type);
image_view_indices.push_back(handle.image);
}
}
void RasterizerOpenGL::SetupComputeImages(const Shader* shader) {
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
for (const auto& entry : shader->GetEntries().images) {
const auto handle =
GetTextureInfo(kepler_compute, via_header_index, entry, ShaderType::Compute);
image_view_indices.push_back(handle.image);
}
}
void RasterizerOpenGL::SyncState() {
SyncViewport();
SyncRasterizeEnable();
@ -941,7 +622,7 @@ void RasterizerOpenGL::SyncDepthClamp() {
void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
auto& flags = maxwell3d.dirty.flags;
if (!flags[Dirty::ClipDistances] && !flags[Dirty::Shaders]) {
if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) {
return;
}
flags[Dirty::ClipDistances] = false;
@ -1318,68 +999,13 @@ void RasterizerOpenGL::SyncFramebufferSRGB() {
oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb);
}
void RasterizerOpenGL::SyncTransformFeedback() {
// TODO(Rodrigo): Inject SKIP_COMPONENTS*_NV when required. An unimplemented message will signal
// when this is required.
const auto& regs = maxwell3d.regs;
static constexpr std::size_t STRIDE = 3;
std::array<GLint, 128 * STRIDE * Maxwell::NumTransformFeedbackBuffers> attribs;
std::array<GLint, Maxwell::NumTransformFeedbackBuffers> streams;
GLint* cursor = attribs.data();
GLint* current_stream = streams.data();
for (std::size_t feedback = 0; feedback < Maxwell::NumTransformFeedbackBuffers; ++feedback) {
const auto& layout = regs.tfb_layouts[feedback];
UNIMPLEMENTED_IF_MSG(layout.stride != layout.varying_count * 4, "Stride padding");
if (layout.varying_count == 0) {
continue;
}
*current_stream = static_cast<GLint>(feedback);
if (current_stream != streams.data()) {
// When stepping one stream, push the expected token
cursor[0] = GL_NEXT_BUFFER_NV;
cursor[1] = 0;
cursor[2] = 0;
cursor += STRIDE;
}
++current_stream;
const auto& locations = regs.tfb_varying_locs[feedback];
std::optional<u8> current_index;
for (u32 offset = 0; offset < layout.varying_count; ++offset) {
const u8 location = locations[offset];
const u8 index = location / 4;
if (current_index == index) {
// Increase number of components of the previous attachment
++cursor[-2];
continue;
}
current_index = index;
std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(location);
cursor[1] = 1;
cursor += STRIDE;
}
}
const GLsizei num_attribs = static_cast<GLsizei>((cursor - attribs.data()) / STRIDE);
const GLsizei num_strides = static_cast<GLsizei>(current_stream - streams.data());
glTransformFeedbackStreamAttribsNV(num_attribs, attribs.data(), num_strides, streams.data(),
GL_INTERLEAVED_ATTRIBS);
}
void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) {
void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) {
const auto& regs = maxwell3d.regs;
if (regs.tfb_enabled == 0) {
return;
}
if (device.UseAssemblyShaders()) {
SyncTransformFeedback();
}
program->ConfigureTransformFeedback();
UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry));
@ -1393,11 +1019,9 @@ void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) {
}
void RasterizerOpenGL::EndTransformFeedback() {
const auto& regs = maxwell3d.regs;
if (regs.tfb_enabled == 0) {
return;
if (maxwell3d.regs.tfb_enabled != 0) {
glEndTransformFeedback();
}
glEndTransformFeedback();
}
AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}

View File

@ -28,11 +28,9 @@
#include "video_core/renderer_opengl/gl_query_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_shader_cache.h"
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
#include "video_core/renderer_opengl/gl_texture_cache.h"
#include "video_core/shader/async_shaders.h"
#include "video_core/textures/texture.h"
namespace Core::Memory {
@ -81,7 +79,7 @@ public:
void Draw(bool is_indexed, bool is_instanced) override;
void Clear() override;
void DispatchCompute(GPUVAddr code_addr) override;
void DispatchCompute() override;
void ResetCounter(VideoCore::QueryType type) override;
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
@ -118,36 +116,11 @@ public:
return num_queued_commands > 0;
}
VideoCommon::Shader::AsyncShaders& GetAsyncShaders() {
return async_shaders;
}
const VideoCommon::Shader::AsyncShaders& GetAsyncShaders() const {
return async_shaders;
}
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;
static constexpr size_t MAX_IMAGE_VIEWS = MAX_TEXTURES + MAX_IMAGES;
void BindComputeTextures(Shader* kernel);
void BindTextures(const ShaderEntries& entries, GLuint base_texture, GLuint base_image,
size_t& image_view_index, size_t& texture_index, size_t& image_index);
/// Configures the current textures to use for the draw command.
void SetupDrawTextures(const Shader* shader, size_t stage_index);
/// Configures the textures used in a compute shader.
void SetupComputeTextures(const Shader* kernel);
/// Configures images in a graphics shader.
void SetupDrawImages(const Shader* shader, size_t stage_index);
/// Configures images in a compute shader.
void SetupComputeImages(const Shader* shader);
/// Syncs state to match guest's
void SyncState();
@ -220,18 +193,12 @@ private:
/// Syncs vertex instances to match the guest state
void SyncVertexInstances();
/// Syncs transform feedback state to match guest state
/// @note Only valid on assembly shaders
void SyncTransformFeedback();
/// Begin a transform feedback
void BeginTransformFeedback(GLenum primitive_mode);
void BeginTransformFeedback(GraphicsPipeline* pipeline, GLenum primitive_mode);
/// End a transform feedback
void EndTransformFeedback();
void SetupShaders(bool is_indexed);
Tegra::GPU& gpu;
Tegra::Engines::Maxwell3D& maxwell3d;
Tegra::Engines::KeplerCompute& kepler_compute;
@ -246,13 +213,11 @@ private:
TextureCache texture_cache;
BufferCacheRuntime buffer_cache_runtime;
BufferCache buffer_cache;
ShaderCacheOpenGL shader_cache;
ShaderCache shader_cache;
QueryCache query_cache;
AccelerateDMA accelerate_dma;
FenceManagerOpenGL fence_manager;
VideoCommon::Shader::AsyncShaders async_shaders;
boost::container::static_vector<u32, MAX_IMAGE_VIEWS> image_view_indices;
std::array<ImageViewId, MAX_IMAGE_VIEWS> image_view_ids;
boost::container::static_vector<GLuint, MAX_TEXTURES> sampler_handles;
@ -260,7 +225,8 @@ private:
std::array<GLuint, MAX_IMAGES> image_handles{};
/// Number of commands queued to the OpenGL driver. Resetted on flush.
std::size_t num_queued_commands = 0;
size_t num_queued_commands = 0;
bool has_written_global_memory = false;
u32 last_clip_distance_mask = 0;
};

View File

@ -83,18 +83,6 @@ void OGLSampler::Release() {
handle = 0;
}
void OGLShader::Create(std::string_view source, GLenum type) {
if (handle != 0) {
return;
}
if (source.empty()) {
return;
}
MICROPROFILE_SCOPE(OpenGL_ResourceCreation);
handle = GLShader::LoadShader(source, type);
}
void OGLShader::Release() {
if (handle == 0)
return;
@ -104,21 +92,6 @@ void OGLShader::Release() {
handle = 0;
}
void OGLProgram::CreateFromSource(const char* vert_shader, const char* geo_shader,
const char* frag_shader, bool separable_program,
bool hint_retrievable) {
OGLShader vert, geo, frag;
if (vert_shader)
vert.Create(vert_shader, GL_VERTEX_SHADER);
if (geo_shader)
geo.Create(geo_shader, GL_GEOMETRY_SHADER);
if (frag_shader)
frag.Create(frag_shader, GL_FRAGMENT_SHADER);
MICROPROFILE_SCOPE(OpenGL_ResourceCreation);
Create(separable_program, hint_retrievable, vert.handle, geo.handle, frag.handle);
}
void OGLProgram::Release() {
if (handle == 0)
return;

View File

@ -8,7 +8,6 @@
#include <utility>
#include <glad/glad.h>
#include "common/common_types.h"
#include "video_core/renderer_opengl/gl_shader_util.h"
namespace OpenGL {
@ -128,8 +127,6 @@ public:
return *this;
}
void Create(std::string_view source, GLenum type);
void Release();
GLuint handle = 0;
@ -151,17 +148,6 @@ public:
return *this;
}
template <typename... T>
void Create(bool separable_program, bool hint_retrievable, T... shaders) {
if (handle != 0)
return;
handle = GLShader::LoadProgram(separable_program, hint_retrievable, shaders...);
}
/// Creates a new internal OpenGL resource and stores the handle
void CreateFromSource(const char* vert_shader, const char* geo_shader, const char* frag_shader,
bool separable_program = false, bool hint_retrievable = false);
/// Deletes the internal OpenGL resource
void Release();

File diff suppressed because it is too large Load Diff

View File

@ -5,157 +5,93 @@
#pragma once
#include <array>
#include <atomic>
#include <bitset>
#include <memory>
#include <string>
#include <tuple>
#include <filesystem>
#include <stop_token>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <glad/glad.h>
#include "common/common_types.h"
#include "video_core/engines/shader_type.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
#include "common/thread_worker.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/host_translate_info.h"
#include "shader_recompiler/object_pool.h"
#include "shader_recompiler/profile.h"
#include "video_core/renderer_opengl/gl_compute_pipeline.h"
#include "video_core/renderer_opengl/gl_graphics_pipeline.h"
#include "video_core/renderer_opengl/gl_shader_context.h"
#include "video_core/shader_cache.h"
namespace Tegra {
class MemoryManager;
}
namespace Core::Frontend {
class EmuWindow;
}
namespace VideoCommon::Shader {
class AsyncShaders;
}
namespace OpenGL {
class Device;
class ProgramManager;
class RasterizerOpenGL;
using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct ProgramHandle {
OGLProgram source_program;
OGLAssemblyProgram assembly_program;
};
using ProgramSharedPtr = std::shared_ptr<ProgramHandle>;
struct PrecompiledShader {
ProgramSharedPtr program;
std::shared_ptr<VideoCommon::Shader::Registry> registry;
ShaderEntries entries;
};
struct ShaderParameters {
Tegra::GPU& gpu;
Tegra::Engines::ConstBufferEngineInterface& engine;
ShaderDiskCacheOpenGL& disk_cache;
const Device& device;
VAddr cpu_addr;
const u8* host_ptr;
u64 unique_identifier;
};
ProgramSharedPtr BuildShader(const Device& device, Tegra::Engines::ShaderType shader_type,
u64 unique_identifier, const VideoCommon::Shader::ShaderIR& ir,
const VideoCommon::Shader::Registry& registry,
bool hint_retrievable = false);
class Shader final {
class ShaderCache : public VideoCommon::ShaderCache {
public:
~Shader();
explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::Engines::KeplerCompute& kepler_compute_,
Tegra::MemoryManager& gpu_memory_, const Device& device_,
TextureCache& texture_cache_, BufferCache& buffer_cache_,
ProgramManager& program_manager_, StateTracker& state_tracker_,
VideoCore::ShaderNotify& shader_notify_);
~ShaderCache();
/// Gets the GL program handle for the shader
GLuint GetHandle() const;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback);
bool IsBuilt() const;
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipeline();
/// Gets the shader entries for the shader
const ShaderEntries& GetEntries() const {
return entries;
}
const VideoCommon::Shader::Registry& GetRegistry() const {
return *registry;
}
/// Mark a OpenGL shader as built
void AsyncOpenGLBuilt(OGLProgram new_program);
/// Mark a GLASM shader as built
void AsyncGLASMBuilt(OGLAssemblyProgram new_program);
static std::unique_ptr<Shader> CreateStageFromMemory(
const ShaderParameters& params, Maxwell::ShaderProgram program_type,
ProgramCode program_code, ProgramCode program_code_b,
VideoCommon::Shader::AsyncShaders& async_shaders, VAddr cpu_addr);
static std::unique_ptr<Shader> CreateKernelFromMemory(const ShaderParameters& params,
ProgramCode code);
static std::unique_ptr<Shader> CreateFromCache(const ShaderParameters& params,
const PrecompiledShader& precompiled_shader);
[[nodiscard]] ComputePipeline* CurrentComputePipeline();
private:
explicit Shader(std::shared_ptr<VideoCommon::Shader::Registry> registry, ShaderEntries entries,
ProgramSharedPtr program, bool is_built_ = true);
GraphicsPipeline* CurrentGraphicsPipelineSlowPath();
std::shared_ptr<VideoCommon::Shader::Registry> registry;
ShaderEntries entries;
ProgramSharedPtr program;
GLuint handle = 0;
bool is_built{};
};
[[nodiscard]] GraphicsPipeline* BuiltPipeline(GraphicsPipeline* pipeline) const noexcept;
class ShaderCacheOpenGL final : public VideoCommon::ShaderCache<Shader> {
public:
explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer_,
Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu,
Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::Engines::KeplerCompute& kepler_compute_,
Tegra::MemoryManager& gpu_memory_, const Device& device_);
~ShaderCacheOpenGL() override;
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline();
/// Loads disk cache for the current game
void LoadDiskCache(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback);
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline(
ShaderContext::ShaderPools& pools, const GraphicsPipelineKey& key,
std::span<Shader::Environment* const> envs, bool build_in_parallel);
/// Gets the current specified shader stage program
Shader* GetStageProgram(Maxwell::ShaderProgram program,
VideoCommon::Shader::AsyncShaders& async_shaders);
std::unique_ptr<ComputePipeline> CreateComputePipeline(const ComputePipelineKey& key,
const VideoCommon::ShaderInfo* shader);
/// Gets a compute kernel in the passed address
Shader* GetComputeKernel(GPUVAddr code_addr);
std::unique_ptr<ComputePipeline> CreateComputePipeline(ShaderContext::ShaderPools& pools,
const ComputePipelineKey& key,
Shader::Environment& env);
private:
ProgramSharedPtr GeneratePrecompiledProgram(
const ShaderDiskCacheEntry& entry, const ShaderDiskCachePrecompiled& precompiled_entry,
const std::unordered_set<GLenum>& supported_formats);
std::unique_ptr<ShaderWorker> CreateWorkers() const;
Core::Frontend::EmuWindow& emu_window;
Tegra::GPU& gpu;
Tegra::MemoryManager& gpu_memory;
Tegra::Engines::Maxwell3D& maxwell3d;
Tegra::Engines::KeplerCompute& kepler_compute;
const Device& device;
TextureCache& texture_cache;
BufferCache& buffer_cache;
ProgramManager& program_manager;
StateTracker& state_tracker;
VideoCore::ShaderNotify& shader_notify;
const bool use_asynchronous_shaders;
ShaderDiskCacheOpenGL disk_cache;
std::unordered_map<u64, PrecompiledShader> runtime_cache;
GraphicsPipelineKey graphics_key{};
GraphicsPipeline* current_pipeline{};
std::unique_ptr<Shader> null_shader;
std::unique_ptr<Shader> null_kernel;
ShaderContext::ShaderPools main_pools;
std::unordered_map<GraphicsPipelineKey, std::unique_ptr<GraphicsPipeline>> graphics_cache;
std::unordered_map<ComputePipelineKey, std::unique_ptr<ComputePipeline>> compute_cache;
std::array<Shader*, Maxwell::MaxShaderProgram> last_shaders{};
Shader::Profile profile;
Shader::HostTranslateInfo host_info;
std::filesystem::path shader_cache_filename;
std::unique_ptr<ShaderWorker> workers;
};
} // namespace OpenGL

View File

@ -0,0 +1,33 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/frontend/emu_window.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
namespace OpenGL::ShaderContext {
struct ShaderPools {
void ReleaseContents() {
flow_block.ReleaseContents();
block.ReleaseContents();
inst.ReleaseContents();
}
Shader::ObjectPool<Shader::IR::Inst> inst;
Shader::ObjectPool<Shader::IR::Block> block;
Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block;
};
struct Context {
explicit Context(Core::Frontend::EmuWindow& emu_window)
: gl_context{emu_window.CreateSharedContext()}, scoped{*gl_context} {}
std::unique_ptr<Core::Frontend::GraphicsContext> gl_context;
Core::Frontend::GraphicsContext::Scoped scoped;
ShaderPools pools;
};
} // namespace OpenGL::ShaderContext

File diff suppressed because it is too large Load Diff

View File

@ -1,69 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/shader_type.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace OpenGL {
class Device;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using SamplerEntry = VideoCommon::Shader::SamplerEntry;
using ImageEntry = VideoCommon::Shader::ImageEntry;
class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer {
public:
explicit ConstBufferEntry(u32 max_offset_, bool is_indirect_, u32 index_)
: ConstBuffer{max_offset_, is_indirect_}, index{index_} {}
u32 GetIndex() const {
return index;
}
private:
u32 index = 0;
};
struct GlobalMemoryEntry {
constexpr explicit GlobalMemoryEntry(u32 cbuf_index_, u32 cbuf_offset_, bool is_read_,
bool is_written_)
: cbuf_index{cbuf_index_}, cbuf_offset{cbuf_offset_}, is_read{is_read_}, is_written{
is_written_} {}
u32 cbuf_index = 0;
u32 cbuf_offset = 0;
bool is_read = false;
bool is_written = false;
};
struct ShaderEntries {
std::vector<ConstBufferEntry> const_buffers;
std::vector<GlobalMemoryEntry> global_memory_entries;
std::vector<SamplerEntry> samplers;
std::vector<ImageEntry> images;
std::size_t shader_length{};
u32 clip_distances{};
u32 enabled_uniform_buffers{};
};
ShaderEntries MakeEntries(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
Tegra::Engines::ShaderType stage);
std::string DecompileShader(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
const VideoCommon::Shader::Registry& registry,
Tegra::Engines::ShaderType stage, std::string_view identifier,
std::string_view suffix = {});
} // namespace OpenGL

View File

@ -1,482 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstring>
#include <fmt/format.h>
#include "common/assert.h"
#include "common/common_types.h"
#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/scm_rev.h"
#include "common/settings.h"
#include "common/zstd_compression.h"
#include "core/core.h"
#include "core/hle/kernel/k_process.h"
#include "video_core/engines/shader_type.h"
#include "video_core/renderer_opengl/gl_shader_cache.h"
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
namespace OpenGL {
using Tegra::Engines::ShaderType;
using VideoCommon::Shader::BindlessSamplerMap;
using VideoCommon::Shader::BoundSamplerMap;
using VideoCommon::Shader::KeyMap;
using VideoCommon::Shader::SeparateSamplerKey;
using ShaderCacheVersionHash = std::array<u8, 64>;
struct ConstBufferKey {
u32 cbuf = 0;
u32 offset = 0;
u32 value = 0;
};
struct BoundSamplerEntry {
u32 offset = 0;
Tegra::Engines::SamplerDescriptor sampler;
};
struct SeparateSamplerEntry {
u32 cbuf1 = 0;
u32 cbuf2 = 0;
u32 offset1 = 0;
u32 offset2 = 0;
Tegra::Engines::SamplerDescriptor sampler;
};
struct BindlessSamplerEntry {
u32 cbuf = 0;
u32 offset = 0;
Tegra::Engines::SamplerDescriptor sampler;
};
namespace {
constexpr u32 NativeVersion = 21;
ShaderCacheVersionHash GetShaderCacheVersionHash() {
ShaderCacheVersionHash hash{};
const std::size_t length = std::min(std::strlen(Common::g_shader_cache_version), hash.size());
std::memcpy(hash.data(), Common::g_shader_cache_version, length);
return hash;
}
} // Anonymous namespace
ShaderDiskCacheEntry::ShaderDiskCacheEntry() = default;
ShaderDiskCacheEntry::~ShaderDiskCacheEntry() = default;
bool ShaderDiskCacheEntry::Load(Common::FS::IOFile& file) {
if (!file.ReadObject(type)) {
return false;
}
u32 code_size;
u32 code_size_b;
if (!file.ReadObject(code_size) || !file.ReadObject(code_size_b)) {
return false;
}
code.resize(code_size);
code_b.resize(code_size_b);
if (file.Read(code) != code_size) {
return false;
}
if (HasProgramA() && file.Read(code_b) != code_size_b) {
return false;
}
u8 is_texture_handler_size_known;
u32 texture_handler_size_value;
u32 num_keys;
u32 num_bound_samplers;
u32 num_separate_samplers;
u32 num_bindless_samplers;
if (!file.ReadObject(unique_identifier) || !file.ReadObject(bound_buffer) ||
!file.ReadObject(is_texture_handler_size_known) ||
!file.ReadObject(texture_handler_size_value) || !file.ReadObject(graphics_info) ||
!file.ReadObject(compute_info) || !file.ReadObject(num_keys) ||
!file.ReadObject(num_bound_samplers) || !file.ReadObject(num_separate_samplers) ||
!file.ReadObject(num_bindless_samplers)) {
return false;
}
if (is_texture_handler_size_known) {
texture_handler_size = texture_handler_size_value;
}
std::vector<ConstBufferKey> flat_keys(num_keys);
std::vector<BoundSamplerEntry> flat_bound_samplers(num_bound_samplers);
std::vector<SeparateSamplerEntry> flat_separate_samplers(num_separate_samplers);
std::vector<BindlessSamplerEntry> flat_bindless_samplers(num_bindless_samplers);
if (file.Read(flat_keys) != flat_keys.size() ||
file.Read(flat_bound_samplers) != flat_bound_samplers.size() ||
file.Read(flat_separate_samplers) != flat_separate_samplers.size() ||
file.Read(flat_bindless_samplers) != flat_bindless_samplers.size()) {
return false;
}
for (const auto& entry : flat_keys) {
keys.insert({{entry.cbuf, entry.offset}, entry.value});
}
for (const auto& entry : flat_bound_samplers) {
bound_samplers.emplace(entry.offset, entry.sampler);
}
for (const auto& entry : flat_separate_samplers) {
SeparateSamplerKey key;
key.buffers = {entry.cbuf1, entry.cbuf2};
key.offsets = {entry.offset1, entry.offset2};
separate_samplers.emplace(key, entry.sampler);
}
for (const auto& entry : flat_bindless_samplers) {
bindless_samplers.insert({{entry.cbuf, entry.offset}, entry.sampler});
}
return true;
}
bool ShaderDiskCacheEntry::Save(Common::FS::IOFile& file) const {
if (!file.WriteObject(static_cast<u32>(type)) ||
!file.WriteObject(static_cast<u32>(code.size())) ||
!file.WriteObject(static_cast<u32>(code_b.size()))) {
return false;
}
if (file.Write(code) != code.size()) {
return false;
}
if (HasProgramA() && file.Write(code_b) != code_b.size()) {
return false;
}
if (!file.WriteObject(unique_identifier) || !file.WriteObject(bound_buffer) ||
!file.WriteObject(static_cast<u8>(texture_handler_size.has_value())) ||
!file.WriteObject(texture_handler_size.value_or(0)) || !file.WriteObject(graphics_info) ||
!file.WriteObject(compute_info) || !file.WriteObject(static_cast<u32>(keys.size())) ||
!file.WriteObject(static_cast<u32>(bound_samplers.size())) ||
!file.WriteObject(static_cast<u32>(separate_samplers.size())) ||
!file.WriteObject(static_cast<u32>(bindless_samplers.size()))) {
return false;
}
std::vector<ConstBufferKey> flat_keys;
flat_keys.reserve(keys.size());
for (const auto& [address, value] : keys) {
flat_keys.push_back(ConstBufferKey{address.first, address.second, value});
}
std::vector<BoundSamplerEntry> flat_bound_samplers;
flat_bound_samplers.reserve(bound_samplers.size());
for (const auto& [address, sampler] : bound_samplers) {
flat_bound_samplers.push_back(BoundSamplerEntry{address, sampler});
}
std::vector<SeparateSamplerEntry> flat_separate_samplers;
flat_separate_samplers.reserve(separate_samplers.size());
for (const auto& [key, sampler] : separate_samplers) {
SeparateSamplerEntry entry;
std::tie(entry.cbuf1, entry.cbuf2) = key.buffers;
std::tie(entry.offset1, entry.offset2) = key.offsets;
entry.sampler = sampler;
flat_separate_samplers.push_back(entry);
}
std::vector<BindlessSamplerEntry> flat_bindless_samplers;
flat_bindless_samplers.reserve(bindless_samplers.size());
for (const auto& [address, sampler] : bindless_samplers) {
flat_bindless_samplers.push_back(
BindlessSamplerEntry{address.first, address.second, sampler});
}
return file.Write(flat_keys) == flat_keys.size() &&
file.Write(flat_bound_samplers) == flat_bound_samplers.size() &&
file.Write(flat_separate_samplers) == flat_separate_samplers.size() &&
file.Write(flat_bindless_samplers) == flat_bindless_samplers.size();
}
ShaderDiskCacheOpenGL::ShaderDiskCacheOpenGL() = default;
ShaderDiskCacheOpenGL::~ShaderDiskCacheOpenGL() = default;
void ShaderDiskCacheOpenGL::BindTitleID(u64 title_id_) {
title_id = title_id_;
}
std::optional<std::vector<ShaderDiskCacheEntry>> ShaderDiskCacheOpenGL::LoadTransferable() {
// Skip games without title id
const bool has_title_id = title_id != 0;
if (!Settings::values.use_disk_shader_cache.GetValue() || !has_title_id) {
return std::nullopt;
}
Common::FS::IOFile file{GetTransferablePath(), Common::FS::FileAccessMode::Read,
Common::FS::FileType::BinaryFile};
if (!file.IsOpen()) {
LOG_INFO(Render_OpenGL, "No transferable shader cache found");
is_usable = true;
return std::nullopt;
}
u32 version{};
if (!file.ReadObject(version)) {
LOG_ERROR(Render_OpenGL, "Failed to get transferable cache version, skipping it");
return std::nullopt;
}
if (version < NativeVersion) {
LOG_INFO(Render_OpenGL, "Transferable shader cache is old, removing");
file.Close();
InvalidateTransferable();
is_usable = true;
return std::nullopt;
}
if (version > NativeVersion) {
LOG_WARNING(Render_OpenGL, "Transferable shader cache was generated with a newer version "
"of the emulator, skipping");
return std::nullopt;
}
// Version is valid, load the shaders
std::vector<ShaderDiskCacheEntry> entries;
while (static_cast<u64>(file.Tell()) < file.GetSize()) {
ShaderDiskCacheEntry& entry = entries.emplace_back();
if (!entry.Load(file)) {
LOG_ERROR(Render_OpenGL, "Failed to load transferable raw entry, skipping");
return std::nullopt;
}
}
is_usable = true;
return {std::move(entries)};
}
std::vector<ShaderDiskCachePrecompiled> ShaderDiskCacheOpenGL::LoadPrecompiled() {
if (!is_usable) {
return {};
}
Common::FS::IOFile file{GetPrecompiledPath(), Common::FS::FileAccessMode::Read,
Common::FS::FileType::BinaryFile};
if (!file.IsOpen()) {
LOG_INFO(Render_OpenGL, "No precompiled shader cache found");
return {};
}
if (const auto result = LoadPrecompiledFile(file)) {
return *result;
}
LOG_INFO(Render_OpenGL, "Failed to load precompiled cache");
file.Close();
InvalidatePrecompiled();
return {};
}
std::optional<std::vector<ShaderDiskCachePrecompiled>> ShaderDiskCacheOpenGL::LoadPrecompiledFile(
Common::FS::IOFile& file) {
// Read compressed file from disk and decompress to virtual precompiled cache file
std::vector<u8> compressed(file.GetSize());
if (file.Read(compressed) != file.GetSize()) {
return std::nullopt;
}
const std::vector<u8> decompressed = Common::Compression::DecompressDataZSTD(compressed);
SaveArrayToPrecompiled(decompressed.data(), decompressed.size());
precompiled_cache_virtual_file_offset = 0;
ShaderCacheVersionHash file_hash{};
if (!LoadArrayFromPrecompiled(file_hash.data(), file_hash.size())) {
precompiled_cache_virtual_file_offset = 0;
return std::nullopt;
}
if (GetShaderCacheVersionHash() != file_hash) {
LOG_INFO(Render_OpenGL, "Precompiled cache is from another version of the emulator");
precompiled_cache_virtual_file_offset = 0;
return std::nullopt;
}
std::vector<ShaderDiskCachePrecompiled> entries;
while (precompiled_cache_virtual_file_offset < precompiled_cache_virtual_file.GetSize()) {
u32 binary_size;
auto& entry = entries.emplace_back();
if (!LoadObjectFromPrecompiled(entry.unique_identifier) ||
!LoadObjectFromPrecompiled(entry.binary_format) ||
!LoadObjectFromPrecompiled(binary_size)) {
return std::nullopt;
}
entry.binary.resize(binary_size);
if (!LoadArrayFromPrecompiled(entry.binary.data(), entry.binary.size())) {
return std::nullopt;
}
}
return entries;
}
void ShaderDiskCacheOpenGL::InvalidateTransferable() {
if (!Common::FS::RemoveFile(GetTransferablePath())) {
LOG_ERROR(Render_OpenGL, "Failed to invalidate transferable file={}",
Common::FS::PathToUTF8String(GetTransferablePath()));
}
InvalidatePrecompiled();
}
void ShaderDiskCacheOpenGL::InvalidatePrecompiled() {
// Clear virtaul precompiled cache file
precompiled_cache_virtual_file.Resize(0);
if (!Common::FS::RemoveFile(GetPrecompiledPath())) {
LOG_ERROR(Render_OpenGL, "Failed to invalidate precompiled file={}",
Common::FS::PathToUTF8String(GetPrecompiledPath()));
}
}
void ShaderDiskCacheOpenGL::SaveEntry(const ShaderDiskCacheEntry& entry) {
if (!is_usable) {
return;
}
const u64 id = entry.unique_identifier;
if (stored_transferable.contains(id)) {
// The shader already exists
return;
}
Common::FS::IOFile file = AppendTransferableFile();
if (!file.IsOpen()) {
return;
}
if (!entry.Save(file)) {
LOG_ERROR(Render_OpenGL, "Failed to save raw transferable cache entry, removing");
file.Close();
InvalidateTransferable();
return;
}
stored_transferable.insert(id);
}
void ShaderDiskCacheOpenGL::SavePrecompiled(u64 unique_identifier, GLuint program) {
if (!is_usable) {
return;
}
// TODO(Rodrigo): This is a design smell. I shouldn't be having to manually write the header
// when writing the dump. This should be done the moment I get access to write to the virtual
// file.
if (precompiled_cache_virtual_file.GetSize() == 0) {
SavePrecompiledHeaderToVirtualPrecompiledCache();
}
GLint binary_length;
glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH, &binary_length);
GLenum binary_format;
std::vector<u8> binary(binary_length);
glGetProgramBinary(program, binary_length, nullptr, &binary_format, binary.data());
if (!SaveObjectToPrecompiled(unique_identifier) || !SaveObjectToPrecompiled(binary_format) ||
!SaveObjectToPrecompiled(static_cast<u32>(binary.size())) ||
!SaveArrayToPrecompiled(binary.data(), binary.size())) {
LOG_ERROR(Render_OpenGL, "Failed to save binary program file in shader={:016X}, removing",
unique_identifier);
InvalidatePrecompiled();
}
}
Common::FS::IOFile ShaderDiskCacheOpenGL::AppendTransferableFile() const {
if (!EnsureDirectories()) {
return {};
}
const auto transferable_path{GetTransferablePath()};
const bool existed = Common::FS::Exists(transferable_path);
Common::FS::IOFile file{transferable_path, Common::FS::FileAccessMode::Append,
Common::FS::FileType::BinaryFile};
if (!file.IsOpen()) {
LOG_ERROR(Render_OpenGL, "Failed to open transferable cache in path={}",
Common::FS::PathToUTF8String(transferable_path));
return {};
}
if (!existed || file.GetSize() == 0) {
// If the file didn't exist, write its version
if (!file.WriteObject(NativeVersion)) {
LOG_ERROR(Render_OpenGL, "Failed to write transferable cache version in path={}",
Common::FS::PathToUTF8String(transferable_path));
return {};
}
}
return file;
}
void ShaderDiskCacheOpenGL::SavePrecompiledHeaderToVirtualPrecompiledCache() {
const auto hash{GetShaderCacheVersionHash()};
if (!SaveArrayToPrecompiled(hash.data(), hash.size())) {
LOG_ERROR(
Render_OpenGL,
"Failed to write precompiled cache version hash to virtual precompiled cache file");
}
}
void ShaderDiskCacheOpenGL::SaveVirtualPrecompiledFile() {
precompiled_cache_virtual_file_offset = 0;
const std::vector<u8> uncompressed = precompiled_cache_virtual_file.ReadAllBytes();
const std::vector<u8> compressed =
Common::Compression::CompressDataZSTDDefault(uncompressed.data(), uncompressed.size());
const auto precompiled_path = GetPrecompiledPath();
Common::FS::IOFile file{precompiled_path, Common::FS::FileAccessMode::Write,
Common::FS::FileType::BinaryFile};
if (!file.IsOpen()) {
LOG_ERROR(Render_OpenGL, "Failed to open precompiled cache in path={}",
Common::FS::PathToUTF8String(precompiled_path));
return;
}
if (file.Write(compressed) != compressed.size()) {
LOG_ERROR(Render_OpenGL, "Failed to write precompiled cache version in path={}",
Common::FS::PathToUTF8String(precompiled_path));
}
}
bool ShaderDiskCacheOpenGL::EnsureDirectories() const {
const auto CreateDir = [](const std::filesystem::path& dir) {
if (!Common::FS::CreateDir(dir)) {
LOG_ERROR(Render_OpenGL, "Failed to create directory={}",
Common::FS::PathToUTF8String(dir));
return false;
}
return true;
};
return CreateDir(Common::FS::GetYuzuPath(Common::FS::YuzuPath::ShaderDir)) &&
CreateDir(GetBaseDir()) && CreateDir(GetTransferableDir()) &&
CreateDir(GetPrecompiledDir());
}
std::filesystem::path ShaderDiskCacheOpenGL::GetTransferablePath() const {
return GetTransferableDir() / fmt::format("{}.bin", GetTitleID());
}
std::filesystem::path ShaderDiskCacheOpenGL::GetPrecompiledPath() const {
return GetPrecompiledDir() / fmt::format("{}.bin", GetTitleID());
}
std::filesystem::path ShaderDiskCacheOpenGL::GetTransferableDir() const {
return GetBaseDir() / "transferable";
}
std::filesystem::path ShaderDiskCacheOpenGL::GetPrecompiledDir() const {
return GetBaseDir() / "precompiled";
}
std::filesystem::path ShaderDiskCacheOpenGL::GetBaseDir() const {
return Common::FS::GetYuzuPath(Common::FS::YuzuPath::ShaderDir) / "opengl";
}
std::string ShaderDiskCacheOpenGL::GetTitleID() const {
return fmt::format("{:016X}", title_id);
}
} // namespace OpenGL

View File

@ -1,176 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <filesystem>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include <glad/glad.h>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/file_sys/vfs_vector.h"
#include "video_core/engines/shader_type.h"
#include "video_core/shader/registry.h"
namespace Common::FS {
class IOFile;
}
namespace OpenGL {
using ProgramCode = std::vector<u64>;
/// Describes a shader and how it's used by the guest GPU
struct ShaderDiskCacheEntry {
ShaderDiskCacheEntry();
~ShaderDiskCacheEntry();
bool Load(Common::FS::IOFile& file);
bool Save(Common::FS::IOFile& file) const;
bool HasProgramA() const {
return !code.empty() && !code_b.empty();
}
Tegra::Engines::ShaderType type{};
ProgramCode code;
ProgramCode code_b;
u64 unique_identifier = 0;
std::optional<u32> texture_handler_size;
u32 bound_buffer = 0;
VideoCommon::Shader::GraphicsInfo graphics_info;
VideoCommon::Shader::ComputeInfo compute_info;
VideoCommon::Shader::KeyMap keys;
VideoCommon::Shader::BoundSamplerMap bound_samplers;
VideoCommon::Shader::SeparateSamplerMap separate_samplers;
VideoCommon::Shader::BindlessSamplerMap bindless_samplers;
};
/// Contains an OpenGL dumped binary program
struct ShaderDiskCachePrecompiled {
u64 unique_identifier = 0;
GLenum binary_format = 0;
std::vector<u8> binary;
};
class ShaderDiskCacheOpenGL {
public:
explicit ShaderDiskCacheOpenGL();
~ShaderDiskCacheOpenGL();
/// Binds a title ID for all future operations.
void BindTitleID(u64 title_id);
/// Loads transferable cache. If file has a old version or on failure, it deletes the file.
std::optional<std::vector<ShaderDiskCacheEntry>> LoadTransferable();
/// Loads current game's precompiled cache. Invalidates on failure.
std::vector<ShaderDiskCachePrecompiled> LoadPrecompiled();
/// Removes the transferable (and precompiled) cache file.
void InvalidateTransferable();
/// Removes the precompiled cache file and clears virtual precompiled cache file.
void InvalidatePrecompiled();
/// Saves a raw dump to the transferable file. Checks for collisions.
void SaveEntry(const ShaderDiskCacheEntry& entry);
/// Saves a dump entry to the precompiled file. Does not check for collisions.
void SavePrecompiled(u64 unique_identifier, GLuint program);
/// Serializes virtual precompiled shader cache file to real file
void SaveVirtualPrecompiledFile();
private:
/// Loads the transferable cache. Returns empty on failure.
std::optional<std::vector<ShaderDiskCachePrecompiled>> LoadPrecompiledFile(
Common::FS::IOFile& file);
/// Opens current game's transferable file and write it's header if it doesn't exist
Common::FS::IOFile AppendTransferableFile() const;
/// Save precompiled header to precompiled_cache_in_memory
void SavePrecompiledHeaderToVirtualPrecompiledCache();
/// Create shader disk cache directories. Returns true on success.
bool EnsureDirectories() const;
/// Gets current game's transferable file path
std::filesystem::path GetTransferablePath() const;
/// Gets current game's precompiled file path
std::filesystem::path GetPrecompiledPath() const;
/// Get user's transferable directory path
std::filesystem::path GetTransferableDir() const;
/// Get user's precompiled directory path
std::filesystem::path GetPrecompiledDir() const;
/// Get user's shader directory path
std::filesystem::path GetBaseDir() const;
/// Get current game's title id
std::string GetTitleID() const;
template <typename T>
bool SaveArrayToPrecompiled(const T* data, std::size_t length) {
const std::size_t write_length = precompiled_cache_virtual_file.WriteArray(
data, length, precompiled_cache_virtual_file_offset);
precompiled_cache_virtual_file_offset += write_length;
return write_length == sizeof(T) * length;
}
template <typename T>
bool LoadArrayFromPrecompiled(T* data, std::size_t length) {
const std::size_t read_length = precompiled_cache_virtual_file.ReadArray(
data, length, precompiled_cache_virtual_file_offset);
precompiled_cache_virtual_file_offset += read_length;
return read_length == sizeof(T) * length;
}
template <typename T>
bool SaveObjectToPrecompiled(const T& object) {
return SaveArrayToPrecompiled(&object, 1);
}
bool SaveObjectToPrecompiled(bool object) {
const auto value = static_cast<u8>(object);
return SaveArrayToPrecompiled(&value, 1);
}
template <typename T>
bool LoadObjectFromPrecompiled(T& object) {
return LoadArrayFromPrecompiled(&object, 1);
}
// Stores whole precompiled cache which will be read from or saved to the precompiled chache
// file
FileSys::VectorVfsFile precompiled_cache_virtual_file;
// Stores the current offset of the precompiled cache file for IO purposes
std::size_t precompiled_cache_virtual_file_offset = 0;
// Stored transferable shaders
std::unordered_set<u64> stored_transferable;
/// Title ID to operate on
u64 title_id = 0;
// The cache has been loaded at boot
bool is_usable = false;
};
} // namespace OpenGL

View File

@ -1,149 +1,3 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <glad/glad.h>
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
namespace OpenGL {
namespace {
void BindProgram(GLenum stage, GLuint current, GLuint old, bool& enabled) {
if (current == old) {
return;
}
if (current == 0) {
if (enabled) {
enabled = false;
glDisable(stage);
}
return;
}
if (!enabled) {
enabled = true;
glEnable(stage);
}
glBindProgramARB(stage, current);
}
} // Anonymous namespace
ProgramManager::ProgramManager(const Device& device)
: use_assembly_programs{device.UseAssemblyShaders()} {
if (use_assembly_programs) {
glEnable(GL_COMPUTE_PROGRAM_NV);
} else {
graphics_pipeline.Create();
glBindProgramPipeline(graphics_pipeline.handle);
}
}
ProgramManager::~ProgramManager() = default;
void ProgramManager::BindCompute(GLuint program) {
if (use_assembly_programs) {
glBindProgramARB(GL_COMPUTE_PROGRAM_NV, program);
} else {
is_graphics_bound = false;
glUseProgram(program);
}
}
void ProgramManager::BindGraphicsPipeline() {
if (!use_assembly_programs) {
UpdateSourcePrograms();
}
}
void ProgramManager::BindHostPipeline(GLuint pipeline) {
if (use_assembly_programs) {
if (geometry_enabled) {
geometry_enabled = false;
old_state.geometry = 0;
glDisable(GL_GEOMETRY_PROGRAM_NV);
}
} else {
if (!is_graphics_bound) {
glUseProgram(0);
}
}
glBindProgramPipeline(pipeline);
}
void ProgramManager::RestoreGuestPipeline() {
if (use_assembly_programs) {
glBindProgramPipeline(0);
} else {
glBindProgramPipeline(graphics_pipeline.handle);
}
}
void ProgramManager::BindHostCompute(GLuint program) {
if (use_assembly_programs) {
glDisable(GL_COMPUTE_PROGRAM_NV);
}
glUseProgram(program);
is_graphics_bound = false;
}
void ProgramManager::RestoreGuestCompute() {
if (use_assembly_programs) {
glEnable(GL_COMPUTE_PROGRAM_NV);
glUseProgram(0);
}
}
void ProgramManager::UseVertexShader(GLuint program) {
if (use_assembly_programs) {
BindProgram(GL_VERTEX_PROGRAM_NV, program, current_state.vertex, vertex_enabled);
}
current_state.vertex = program;
}
void ProgramManager::UseGeometryShader(GLuint program) {
if (use_assembly_programs) {
BindProgram(GL_GEOMETRY_PROGRAM_NV, program, current_state.vertex, geometry_enabled);
}
current_state.geometry = program;
}
void ProgramManager::UseFragmentShader(GLuint program) {
if (use_assembly_programs) {
BindProgram(GL_FRAGMENT_PROGRAM_NV, program, current_state.vertex, fragment_enabled);
}
current_state.fragment = program;
}
void ProgramManager::UpdateSourcePrograms() {
if (!is_graphics_bound) {
is_graphics_bound = true;
glUseProgram(0);
}
const GLuint handle = graphics_pipeline.handle;
const auto update_state = [handle](GLenum stage, GLuint current, GLuint old) {
if (current == old) {
return;
}
glUseProgramStages(handle, stage, current);
};
update_state(GL_VERTEX_SHADER_BIT, current_state.vertex, old_state.vertex);
update_state(GL_GEOMETRY_SHADER_BIT, current_state.geometry, old_state.geometry);
update_state(GL_FRAGMENT_SHADER_BIT, current_state.fragment, old_state.fragment);
old_state = current_state;
}
void MaxwellUniformData::SetFromRegs(const Tegra::Engines::Maxwell3D& maxwell) {
const auto& regs = maxwell.regs;
// Y_NEGATE controls what value S2R returns for the Y_DIRECTION system value.
y_direction = regs.screen_y_control.y_negate == 0 ? 1.0f : -1.0f;
}
} // namespace OpenGL

View File

@ -4,79 +4,142 @@
#pragma once
#include <cstddef>
#include <array>
#include <span>
#include <glad/glad.h>
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/maxwell_to_gl.h"
namespace OpenGL {
class Device;
/// Uniform structure for the Uniform Buffer Object, all vectors must be 16-byte aligned
/// @note Always keep a vec4 at the end. The GL spec is not clear whether the alignment at
/// the end of a uniform block is included in UNIFORM_BLOCK_DATA_SIZE or not.
/// Not following that rule will cause problems on some AMD drivers.
struct alignas(16) MaxwellUniformData {
void SetFromRegs(const Tegra::Engines::Maxwell3D& maxwell);
GLfloat y_direction;
};
static_assert(sizeof(MaxwellUniformData) == 16, "MaxwellUniformData structure size is incorrect");
static_assert(sizeof(MaxwellUniformData) < 16384,
"MaxwellUniformData structure must be less than 16kb as per the OpenGL spec");
class ProgramManager {
public:
explicit ProgramManager(const Device& device);
~ProgramManager();
static constexpr size_t NUM_STAGES = 5;
/// Binds a compute program
void BindCompute(GLuint program);
/// Updates bound programs.
void BindGraphicsPipeline();
/// Binds an OpenGL pipeline object unsynchronized with the guest state.
void BindHostPipeline(GLuint pipeline);
/// Rewinds BindHostPipeline state changes.
void RestoreGuestPipeline();
/// Binds an OpenGL GLSL program object unsynchronized with the guest state.
void BindHostCompute(GLuint program);
/// Rewinds BindHostCompute state changes.
void RestoreGuestCompute();
void UseVertexShader(GLuint program);
void UseGeometryShader(GLuint program);
void UseFragmentShader(GLuint program);
private:
struct PipelineState {
GLuint vertex = 0;
GLuint geometry = 0;
GLuint fragment = 0;
static constexpr std::array ASSEMBLY_PROGRAM_ENUMS{
GL_VERTEX_PROGRAM_NV, GL_TESS_CONTROL_PROGRAM_NV, GL_TESS_EVALUATION_PROGRAM_NV,
GL_GEOMETRY_PROGRAM_NV, GL_FRAGMENT_PROGRAM_NV,
};
/// Update GLSL programs.
void UpdateSourcePrograms();
public:
explicit ProgramManager(const Device& device) {
glCreateProgramPipelines(1, &pipeline.handle);
if (device.UseAssemblyShaders()) {
glEnable(GL_COMPUTE_PROGRAM_NV);
}
}
OGLPipeline graphics_pipeline;
void BindComputeProgram(GLuint program) {
glUseProgram(program);
is_compute_bound = true;
}
PipelineState current_state;
PipelineState old_state;
void BindComputeAssemblyProgram(GLuint program) {
if (current_assembly_compute_program != program) {
current_assembly_compute_program = program;
glBindProgramARB(GL_COMPUTE_PROGRAM_NV, program);
}
UnbindPipeline();
}
bool use_assembly_programs = false;
void BindSourcePrograms(std::span<const OGLProgram, NUM_STAGES> programs) {
static constexpr std::array<GLenum, 5> stage_enums{
GL_VERTEX_SHADER_BIT, GL_TESS_CONTROL_SHADER_BIT, GL_TESS_EVALUATION_SHADER_BIT,
GL_GEOMETRY_SHADER_BIT, GL_FRAGMENT_SHADER_BIT,
};
for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
if (current_programs[stage] != programs[stage].handle) {
current_programs[stage] = programs[stage].handle;
glUseProgramStages(pipeline.handle, stage_enums[stage], programs[stage].handle);
}
}
BindPipeline();
}
bool is_graphics_bound = true;
void BindPresentPrograms(GLuint vertex, GLuint fragment) {
if (current_programs[0] != vertex) {
current_programs[0] = vertex;
glUseProgramStages(pipeline.handle, GL_VERTEX_SHADER_BIT, vertex);
}
if (current_programs[4] != fragment) {
current_programs[4] = fragment;
glUseProgramStages(pipeline.handle, GL_FRAGMENT_SHADER_BIT, fragment);
}
glUseProgramStages(
pipeline.handle,
GL_TESS_CONTROL_SHADER_BIT | GL_TESS_EVALUATION_SHADER_BIT | GL_GEOMETRY_SHADER_BIT, 0);
current_programs[1] = 0;
current_programs[2] = 0;
current_programs[3] = 0;
bool vertex_enabled = false;
bool geometry_enabled = false;
bool fragment_enabled = false;
if (current_stage_mask != 0) {
current_stage_mask = 0;
for (const GLenum program_type : ASSEMBLY_PROGRAM_ENUMS) {
glDisable(program_type);
}
}
BindPipeline();
}
void BindAssemblyPrograms(std::span<const OGLAssemblyProgram, NUM_STAGES> programs,
u32 stage_mask) {
const u32 changed_mask = current_stage_mask ^ stage_mask;
current_stage_mask = stage_mask;
if (changed_mask != 0) {
for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
if (((changed_mask >> stage) & 1) != 0) {
if (((stage_mask >> stage) & 1) != 0) {
glEnable(ASSEMBLY_PROGRAM_ENUMS[stage]);
} else {
glDisable(ASSEMBLY_PROGRAM_ENUMS[stage]);
}
}
}
}
for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
if (current_programs[stage] != programs[stage].handle) {
current_programs[stage] = programs[stage].handle;
glBindProgramARB(ASSEMBLY_PROGRAM_ENUMS[stage], programs[stage].handle);
}
}
UnbindPipeline();
}
void RestoreGuestCompute() {}
private:
void BindPipeline() {
if (!is_pipeline_bound) {
is_pipeline_bound = true;
glBindProgramPipeline(pipeline.handle);
}
UnbindCompute();
}
void UnbindPipeline() {
if (is_pipeline_bound) {
is_pipeline_bound = false;
glBindProgramPipeline(0);
}
UnbindCompute();
}
void UnbindCompute() {
if (is_compute_bound) {
is_compute_bound = false;
glUseProgram(0);
}
}
OGLPipeline pipeline;
bool is_pipeline_bound{};
bool is_compute_bound{};
u32 current_stage_mask = 0;
std::array<GLuint, NUM_STAGES> current_programs{};
GLuint current_assembly_compute_program = 0;
};
} // namespace OpenGL

View File

@ -5,57 +5,108 @@
#include <string_view>
#include <vector>
#include <glad/glad.h>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "video_core/renderer_opengl/gl_shader_util.h"
namespace OpenGL::GLShader {
namespace OpenGL {
namespace {
std::string_view StageDebugName(GLenum type) {
switch (type) {
case GL_VERTEX_SHADER:
return "vertex";
case GL_GEOMETRY_SHADER:
return "geometry";
case GL_FRAGMENT_SHADER:
return "fragment";
case GL_COMPUTE_SHADER:
return "compute";
static OGLProgram LinkSeparableProgram(GLuint shader) {
OGLProgram program;
program.handle = glCreateProgram();
glProgramParameteri(program.handle, GL_PROGRAM_SEPARABLE, GL_TRUE);
glAttachShader(program.handle, shader);
glLinkProgram(program.handle);
if (!Settings::values.renderer_debug) {
return program;
}
UNIMPLEMENTED();
return "unknown";
GLint link_status{};
glGetProgramiv(program.handle, GL_LINK_STATUS, &link_status);
GLint log_length{};
glGetProgramiv(program.handle, GL_INFO_LOG_LENGTH, &log_length);
if (log_length == 0) {
return program;
}
std::string log(log_length, 0);
glGetProgramInfoLog(program.handle, log_length, nullptr, log.data());
if (link_status == GL_FALSE) {
LOG_ERROR(Render_OpenGL, "{}", log);
} else {
LOG_WARNING(Render_OpenGL, "{}", log);
}
return program;
}
} // Anonymous namespace
static void LogShader(GLuint shader, std::string_view code = {}) {
GLint shader_status{};
glGetShaderiv(shader, GL_COMPILE_STATUS, &shader_status);
if (shader_status == GL_FALSE) {
LOG_ERROR(Render_OpenGL, "Failed to build shader");
}
GLint log_length{};
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length);
if (log_length == 0) {
return;
}
std::string log(log_length, 0);
glGetShaderInfoLog(shader, log_length, nullptr, log.data());
if (shader_status == GL_FALSE) {
LOG_ERROR(Render_OpenGL, "{}", log);
if (!code.empty()) {
LOG_INFO(Render_OpenGL, "\n{}", code);
}
} else {
LOG_WARNING(Render_OpenGL, "{}", log);
}
}
GLuint LoadShader(std::string_view source, GLenum type) {
const std::string_view debug_type = StageDebugName(type);
const GLuint shader_id = glCreateShader(type);
OGLProgram CreateProgram(std::string_view code, GLenum stage) {
OGLShader shader;
shader.handle = glCreateShader(stage);
const GLchar* source_string = source.data();
const GLint source_length = static_cast<GLint>(source.size());
const GLint length = static_cast<GLint>(code.size());
const GLchar* const code_ptr = code.data();
glShaderSource(shader.handle, 1, &code_ptr, &length);
glCompileShader(shader.handle);
if (Settings::values.renderer_debug) {
LogShader(shader.handle, code);
}
return LinkSeparableProgram(shader.handle);
}
glShaderSource(shader_id, 1, &source_string, &source_length);
LOG_DEBUG(Render_OpenGL, "Compiling {} shader...", debug_type);
glCompileShader(shader_id);
OGLProgram CreateProgram(std::span<const u32> code, GLenum stage) {
OGLShader shader;
shader.handle = glCreateShader(stage);
GLint result = GL_FALSE;
GLint info_log_length;
glGetShaderiv(shader_id, GL_COMPILE_STATUS, &result);
glGetShaderiv(shader_id, GL_INFO_LOG_LENGTH, &info_log_length);
glShaderBinary(1, &shader.handle, GL_SHADER_BINARY_FORMAT_SPIR_V_ARB, code.data(),
static_cast<GLsizei>(code.size_bytes()));
glSpecializeShader(shader.handle, "main", 0, nullptr, nullptr);
if (Settings::values.renderer_debug) {
LogShader(shader.handle);
}
return LinkSeparableProgram(shader.handle);
}
if (info_log_length > 1) {
std::string shader_error(info_log_length, ' ');
glGetShaderInfoLog(shader_id, info_log_length, nullptr, &shader_error[0]);
if (result == GL_TRUE) {
LOG_DEBUG(Render_OpenGL, "{}", shader_error);
} else {
LOG_ERROR(Render_OpenGL, "Error compiling {} shader:\n{}", debug_type, shader_error);
OGLAssemblyProgram CompileProgram(std::string_view code, GLenum target) {
OGLAssemblyProgram program;
glGenProgramsARB(1, &program.handle);
glNamedProgramStringEXT(program.handle, target, GL_PROGRAM_FORMAT_ASCII_ARB,
static_cast<GLsizei>(code.size()), code.data());
if (Settings::values.renderer_debug) {
const auto err = reinterpret_cast<const char*>(glGetString(GL_PROGRAM_ERROR_STRING_NV));
if (err && *err) {
if (std::strstr(err, "error")) {
LOG_CRITICAL(Render_OpenGL, "\n{}", err);
LOG_INFO(Render_OpenGL, "\n{}", code);
} else {
LOG_WARNING(Render_OpenGL, "\n{}", err);
}
}
}
return shader_id;
return program;
}
} // namespace OpenGL::GLShader
} // namespace OpenGL

View File

@ -4,92 +4,23 @@
#pragma once
#include <span>
#include <string>
#include <string_view>
#include <vector>
#include <glad/glad.h>
#include "common/assert.h"
#include "common/logging/log.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
namespace OpenGL::GLShader {
namespace OpenGL {
/**
* Utility function to log the source code of a list of shaders.
* @param shaders The OpenGL shaders whose source we will print.
*/
template <typename... T>
void LogShaderSource(T... shaders) {
auto shader_list = {shaders...};
OGLProgram CreateProgram(std::string_view code, GLenum stage);
for (const auto& shader : shader_list) {
if (shader == 0)
continue;
OGLProgram CreateProgram(std::span<const u32> code, GLenum stage);
GLint source_length;
glGetShaderiv(shader, GL_SHADER_SOURCE_LENGTH, &source_length);
OGLAssemblyProgram CompileProgram(std::string_view code, GLenum target);
std::string source(source_length, ' ');
glGetShaderSource(shader, source_length, nullptr, &source[0]);
LOG_INFO(Render_OpenGL, "Shader source {}", source);
}
}
/**
* Utility function to create and compile an OpenGL GLSL shader
* @param source String of the GLSL shader program
* @param type Type of the shader (GL_VERTEX_SHADER, GL_GEOMETRY_SHADER or GL_FRAGMENT_SHADER)
*/
GLuint LoadShader(std::string_view source, GLenum type);
/**
* Utility function to create and compile an OpenGL GLSL shader program (vertex + fragment shader)
* @param separable_program whether to create a separable program
* @param shaders ID of shaders to attach to the program
* @returns Handle of the newly created OpenGL program object
*/
template <typename... T>
GLuint LoadProgram(bool separable_program, bool hint_retrievable, T... shaders) {
// Link the program
LOG_DEBUG(Render_OpenGL, "Linking program...");
GLuint program_id = glCreateProgram();
((shaders == 0 ? (void)0 : glAttachShader(program_id, shaders)), ...);
if (separable_program) {
glProgramParameteri(program_id, GL_PROGRAM_SEPARABLE, GL_TRUE);
}
if (hint_retrievable) {
glProgramParameteri(program_id, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
}
glLinkProgram(program_id);
// Check the program
GLint result = GL_FALSE;
GLint info_log_length;
glGetProgramiv(program_id, GL_LINK_STATUS, &result);
glGetProgramiv(program_id, GL_INFO_LOG_LENGTH, &info_log_length);
if (info_log_length > 1) {
std::string program_error(info_log_length, ' ');
glGetProgramInfoLog(program_id, info_log_length, nullptr, &program_error[0]);
if (result == GL_TRUE) {
LOG_DEBUG(Render_OpenGL, "{}", program_error);
} else {
LOG_ERROR(Render_OpenGL, "Error linking shader:\n{}", program_error);
}
}
if (result == GL_FALSE) {
// There was a problem linking the shader, print the source for debugging purposes.
LogShaderSource(shaders...);
}
ASSERT_MSG(result == GL_TRUE, "Shader not linked");
((shaders == 0 ? (void)0 : glDetachShader(program_id, shaders)), ...);
return program_id;
}
} // namespace OpenGL::GLShader
} // namespace OpenGL

View File

@ -83,11 +83,6 @@ void SetupDirtyScissors(Tables& tables) {
FillBlock(tables[1], OFF(scissor_test), NUM(scissor_test), Scissors);
}
void SetupDirtyShaders(Tables& tables) {
FillBlock(tables[0], OFF(shader_config[0]), NUM(shader_config[0]) * Regs::MaxShaderProgram,
Shaders);
}
void SetupDirtyPolygonModes(Tables& tables) {
tables[0][OFF(polygon_mode_front)] = PolygonModeFront;
tables[0][OFF(polygon_mode_back)] = PolygonModeBack;
@ -217,7 +212,6 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags}
SetupDirtyScissors(tables);
SetupDirtyVertexInstances(tables);
SetupDirtyVertexFormat(tables);
SetupDirtyShaders(tables);
SetupDirtyPolygonModes(tables);
SetupDirtyDepthTest(tables);
SetupDirtyStencilTest(tables);

View File

@ -52,7 +52,6 @@ enum : u8 {
BlendState0,
BlendState7 = BlendState0 + 7,
Shaders,
ClipDistances,
PolygonModes,

View File

@ -24,9 +24,7 @@
#include "video_core/textures/decoders.h"
namespace OpenGL {
namespace {
using Tegra::Texture::SwizzleSource;
using Tegra::Texture::TextureMipmapFilter;
using Tegra::Texture::TextureType;
@ -59,107 +57,6 @@ struct CopyRegion {
GLsizei depth;
};
struct FormatTuple {
GLenum internal_format;
GLenum format = GL_NONE;
GLenum type = GL_NONE;
};
constexpr std::array<FormatTuple, MaxPixelFormat> FORMAT_TABLE = {{
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // A8B8G8R8_UNORM
{GL_RGBA8_SNORM, GL_RGBA, GL_BYTE}, // A8B8G8R8_SNORM
{GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE}, // A8B8G8R8_SINT
{GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE}, // A8B8G8R8_UINT
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // R5G6B5_UNORM
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV}, // B5G6R5_UNORM
{GL_RGB5_A1, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1R5G5B5_UNORM
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10_UNORM
{GL_RGB10_A2UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10_UINT
{GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1B5G5R5_UNORM
{GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R8_UNORM
{GL_R8_SNORM, GL_RED, GL_BYTE}, // R8_SNORM
{GL_R8I, GL_RED_INTEGER, GL_BYTE}, // R8_SINT
{GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE}, // R8_UINT
{GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT}, // R16G16B16A16_FLOAT
{GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT}, // R16G16B16A16_UNORM
{GL_RGBA16_SNORM, GL_RGBA, GL_SHORT}, // R16G16B16A16_SNORM
{GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT}, // R16G16B16A16_SINT
{GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT}, // R16G16B16A16_UINT
{GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV}, // B10G11R11_FLOAT
{GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT}, // R32G32B32A32_UINT
{GL_COMPRESSED_RGBA_S3TC_DXT1_EXT}, // BC1_RGBA_UNORM
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT}, // BC2_UNORM
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT}, // BC3_UNORM
{GL_COMPRESSED_RED_RGTC1}, // BC4_UNORM
{GL_COMPRESSED_SIGNED_RED_RGTC1}, // BC4_SNORM
{GL_COMPRESSED_RG_RGTC2}, // BC5_UNORM
{GL_COMPRESSED_SIGNED_RG_RGTC2}, // BC5_SNORM
{GL_COMPRESSED_RGBA_BPTC_UNORM}, // BC7_UNORM
{GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT}, // BC6H_UFLOAT
{GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT}, // BC6H_SFLOAT
{GL_COMPRESSED_RGBA_ASTC_4x4_KHR}, // ASTC_2D_4X4_UNORM
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE}, // B8G8R8A8_UNORM
{GL_RGBA32F, GL_RGBA, GL_FLOAT}, // R32G32B32A32_FLOAT
{GL_RGBA32I, GL_RGBA_INTEGER, GL_INT}, // R32G32B32A32_SINT
{GL_RG32F, GL_RG, GL_FLOAT}, // R32G32_FLOAT
{GL_RG32I, GL_RG_INTEGER, GL_INT}, // R32G32_SINT
{GL_R32F, GL_RED, GL_FLOAT}, // R32_FLOAT
{GL_R16F, GL_RED, GL_HALF_FLOAT}, // R16_FLOAT
{GL_R16, GL_RED, GL_UNSIGNED_SHORT}, // R16_UNORM
{GL_R16_SNORM, GL_RED, GL_SHORT}, // R16_SNORM
{GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT}, // R16_UINT
{GL_R16I, GL_RED_INTEGER, GL_SHORT}, // R16_SINT
{GL_RG16, GL_RG, GL_UNSIGNED_SHORT}, // R16G16_UNORM
{GL_RG16F, GL_RG, GL_HALF_FLOAT}, // R16G16_FLOAT
{GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT}, // R16G16_UINT
{GL_RG16I, GL_RG_INTEGER, GL_SHORT}, // R16G16_SINT
{GL_RG16_SNORM, GL_RG, GL_SHORT}, // R16G16_SNORM
{GL_RGB32F, GL_RGB, GL_FLOAT}, // R32G32B32_FLOAT
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // A8B8G8R8_SRGB
{GL_RG8, GL_RG, GL_UNSIGNED_BYTE}, // R8G8_UNORM
{GL_RG8_SNORM, GL_RG, GL_BYTE}, // R8G8_SNORM
{GL_RG8I, GL_RG_INTEGER, GL_BYTE}, // R8G8_SINT
{GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE}, // R8G8_UINT
{GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT}, // R32G32_UINT
{GL_RGB16F, GL_RGBA, GL_HALF_FLOAT}, // R16G16B16X16_FLOAT
{GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT}, // R32_UINT
{GL_R32I, GL_RED_INTEGER, GL_INT}, // R32_SINT
{GL_COMPRESSED_RGBA_ASTC_8x8_KHR}, // ASTC_2D_8X8_UNORM
{GL_COMPRESSED_RGBA_ASTC_8x5_KHR}, // ASTC_2D_8X5_UNORM
{GL_COMPRESSED_RGBA_ASTC_5x4_KHR}, // ASTC_2D_5X4_UNORM
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE}, // B8G8R8A8_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT}, // BC1_RGBA_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT}, // BC2_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR}, // ASTC_2D_5X4_SRGB
{GL_COMPRESSED_RGBA_ASTC_5x5_KHR}, // ASTC_2D_5X5_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR}, // ASTC_2D_5X5_SRGB
{GL_COMPRESSED_RGBA_ASTC_10x8_KHR}, // ASTC_2D_10X8_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR}, // ASTC_2D_10X8_SRGB
{GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB
{GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB
{GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR}, // ASTC_2D_12X12_SRGB
{GL_COMPRESSED_RGBA_ASTC_8x6_KHR}, // ASTC_2D_8X6_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR}, // ASTC_2D_8X6_SRGB
{GL_COMPRESSED_RGBA_ASTC_6x5_KHR}, // ASTC_2D_6X5_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR}, // ASTC_2D_6X5_SRGB
{GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV}, // E5B9G9R9_FLOAT
{GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT}, // D32_FLOAT
{GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // D16_UNORM
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // D24_UNORM_S8_UINT
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // S8_UINT_D24_UNORM
{GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL,
GL_FLOAT_32_UNSIGNED_INT_24_8_REV}, // D32_FLOAT_S8_UINT
}};
constexpr std::array ACCELERATED_FORMATS{
GL_RGBA32F, GL_RGBA16F, GL_RG32F, GL_RG16F, GL_R11F_G11F_B10F, GL_R32F,
GL_R16F, GL_RGBA32UI, GL_RGBA16UI, GL_RGB10_A2UI, GL_RGBA8UI, GL_RG32UI,
@ -170,11 +67,6 @@ constexpr std::array ACCELERATED_FORMATS{
GL_RG8_SNORM, GL_R16_SNORM, GL_R8_SNORM,
};
const FormatTuple& GetFormatTuple(PixelFormat pixel_format) {
ASSERT(static_cast<size_t>(pixel_format) < FORMAT_TABLE.size());
return FORMAT_TABLE[static_cast<size_t>(pixel_format)];
}
GLenum ImageTarget(const VideoCommon::ImageInfo& info) {
switch (info.type) {
case ImageType::e1D:
@ -195,26 +87,24 @@ GLenum ImageTarget(const VideoCommon::ImageInfo& info) {
return GL_NONE;
}
GLenum ImageTarget(ImageViewType type, int num_samples = 1) {
GLenum ImageTarget(Shader::TextureType type, int num_samples = 1) {
const bool is_multisampled = num_samples > 1;
switch (type) {
case ImageViewType::e1D:
case Shader::TextureType::Color1D:
return GL_TEXTURE_1D;
case ImageViewType::e2D:
case Shader::TextureType::Color2D:
return is_multisampled ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
case ImageViewType::Cube:
case Shader::TextureType::ColorCube:
return GL_TEXTURE_CUBE_MAP;
case ImageViewType::e3D:
case Shader::TextureType::Color3D:
return GL_TEXTURE_3D;
case ImageViewType::e1DArray:
case Shader::TextureType::ColorArray1D:
return GL_TEXTURE_1D_ARRAY;
case ImageViewType::e2DArray:
case Shader::TextureType::ColorArray2D:
return is_multisampled ? GL_TEXTURE_2D_MULTISAMPLE_ARRAY : GL_TEXTURE_2D_ARRAY;
case ImageViewType::CubeArray:
case Shader::TextureType::ColorArrayCube:
return GL_TEXTURE_CUBE_MAP_ARRAY;
case ImageViewType::Rect:
return GL_TEXTURE_RECTANGLE;
case ImageViewType::Buffer:
case Shader::TextureType::Buffer:
return GL_TEXTURE_BUFFER;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
@ -322,7 +212,7 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::array<SwizzleSource, 4
default:
return false;
}
const GLenum internal_format = GetFormatTuple(info.format).internal_format;
const GLenum internal_format = MaxwellToGL::GetFormatTuple(info.format).internal_format;
const auto& format_info = runtime.FormatInfo(info.type, internal_format);
if (format_info.is_compressed) {
return false;
@ -414,11 +304,10 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::array<SwizzleSource, 4
void AttachTexture(GLuint fbo, GLenum attachment, const ImageView* image_view) {
if (False(image_view->flags & VideoCommon::ImageViewFlagBits::Slice)) {
const GLuint texture = image_view->DefaultHandle();
glNamedFramebufferTexture(fbo, attachment, texture, 0);
glNamedFramebufferTexture(fbo, attachment, image_view->DefaultHandle(), 0);
return;
}
const GLuint texture = image_view->Handle(ImageViewType::e3D);
const GLuint texture = image_view->Handle(Shader::TextureType::Color3D);
if (image_view->range.extent.layers > 1) {
// TODO: OpenGL doesn't support rendering to a fixed number of slices
glNamedFramebufferTexture(fbo, attachment, texture, 0);
@ -439,6 +328,28 @@ void AttachTexture(GLuint fbo, GLenum attachment, const ImageView* image_view) {
}
}
[[nodiscard]] GLenum ShaderFormat(Shader::ImageFormat format) {
switch (format) {
case Shader::ImageFormat::Typeless:
break;
case Shader::ImageFormat::R8_SINT:
return GL_R8I;
case Shader::ImageFormat::R8_UINT:
return GL_R8UI;
case Shader::ImageFormat::R16_UINT:
return GL_R16UI;
case Shader::ImageFormat::R16_SINT:
return GL_R16I;
case Shader::ImageFormat::R32_UINT:
return GL_R32UI;
case Shader::ImageFormat::R32G32_UINT:
return GL_RG32UI;
case Shader::ImageFormat::R32G32B32A32_UINT:
return GL_RGBA32UI;
}
UNREACHABLE_MSG("Invalid image format={}", format);
return GL_R32UI;
}
} // Anonymous namespace
ImageBufferMap::~ImageBufferMap() {
@ -453,7 +364,7 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, ProgramManager&
static constexpr std::array TARGETS{GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_3D};
for (size_t i = 0; i < TARGETS.size(); ++i) {
const GLenum target = TARGETS[i];
for (const FormatTuple& tuple : FORMAT_TABLE) {
for (const MaxwellToGL::FormatTuple& tuple : MaxwellToGL::FORMAT_TABLE) {
const GLenum format = tuple.internal_format;
GLint compat_class;
GLint compat_type;
@ -475,11 +386,9 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, ProgramManager&
null_image_1d_array.Create(GL_TEXTURE_1D_ARRAY);
null_image_cube_array.Create(GL_TEXTURE_CUBE_MAP_ARRAY);
null_image_3d.Create(GL_TEXTURE_3D);
null_image_rect.Create(GL_TEXTURE_RECTANGLE);
glTextureStorage2D(null_image_1d_array.handle, 1, GL_R8, 1, 1);
glTextureStorage3D(null_image_cube_array.handle, 1, GL_R8, 1, 1, 6);
glTextureStorage3D(null_image_3d.handle, 1, GL_R8, 1, 1, 1);
glTextureStorage2D(null_image_rect.handle, 1, GL_R8, 1, 1);
std::array<GLuint, 4> new_handles;
glGenTextures(static_cast<GLsizei>(new_handles.size()), new_handles.data());
@ -496,29 +405,28 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, ProgramManager&
glTextureView(null_image_view_cube.handle, GL_TEXTURE_CUBE_MAP, null_image_cube_array.handle,
GL_R8, 0, 1, 0, 6);
const std::array texture_handles{
null_image_1d_array.handle, null_image_cube_array.handle, null_image_3d.handle,
null_image_rect.handle, null_image_view_1d.handle, null_image_view_2d.handle,
null_image_view_2d_array.handle, null_image_view_cube.handle,
null_image_1d_array.handle, null_image_cube_array.handle, null_image_3d.handle,
null_image_view_1d.handle, null_image_view_2d.handle, null_image_view_2d_array.handle,
null_image_view_cube.handle,
};
for (const GLuint handle : texture_handles) {
static constexpr std::array NULL_SWIZZLE{GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO};
glTextureParameteriv(handle, GL_TEXTURE_SWIZZLE_RGBA, NULL_SWIZZLE.data());
}
const auto set_view = [this](ImageViewType type, GLuint handle) {
const auto set_view = [this](Shader::TextureType type, GLuint handle) {
if (device.HasDebuggingToolAttached()) {
const std::string name = fmt::format("NullImage {}", type);
glObjectLabel(GL_TEXTURE, handle, static_cast<GLsizei>(name.size()), name.data());
}
null_image_views[static_cast<size_t>(type)] = handle;
};
set_view(ImageViewType::e1D, null_image_view_1d.handle);
set_view(ImageViewType::e2D, null_image_view_2d.handle);
set_view(ImageViewType::Cube, null_image_view_cube.handle);
set_view(ImageViewType::e3D, null_image_3d.handle);
set_view(ImageViewType::e1DArray, null_image_1d_array.handle);
set_view(ImageViewType::e2DArray, null_image_view_2d_array.handle);
set_view(ImageViewType::CubeArray, null_image_cube_array.handle);
set_view(ImageViewType::Rect, null_image_rect.handle);
set_view(Shader::TextureType::Color1D, null_image_view_1d.handle);
set_view(Shader::TextureType::Color2D, null_image_view_2d.handle);
set_view(Shader::TextureType::ColorCube, null_image_view_cube.handle);
set_view(Shader::TextureType::Color3D, null_image_3d.handle);
set_view(Shader::TextureType::ColorArray1D, null_image_1d_array.handle);
set_view(Shader::TextureType::ColorArray2D, null_image_view_2d_array.handle);
set_view(Shader::TextureType::ColorArrayCube, null_image_cube_array.handle);
}
TextureCacheRuntime::~TextureCacheRuntime() = default;
@ -710,7 +618,7 @@ Image::Image(TextureCacheRuntime& runtime, const VideoCommon::ImageInfo& info_,
gl_format = GL_RGBA;
gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
} else {
const auto& tuple = GetFormatTuple(info.format);
const auto& tuple = MaxwellToGL::GetFormatTuple(info.format);
gl_internal_format = tuple.internal_format;
gl_format = tuple.format;
gl_type = tuple.type;
@ -750,8 +658,7 @@ Image::Image(TextureCacheRuntime& runtime, const VideoCommon::ImageInfo& info_,
glTextureStorage3D(handle, num_levels, gl_internal_format, width, height, depth);
break;
case GL_TEXTURE_BUFFER:
buffer.Create();
glNamedBufferStorage(buffer.handle, guest_size_bytes, nullptr, 0);
UNREACHABLE();
break;
default:
UNREACHABLE_MSG("Invalid target=0x{:x}", target);
@ -789,14 +696,6 @@ void Image::UploadMemory(const ImageBufferMap& map,
}
}
void Image::UploadMemory(const ImageBufferMap& map,
std::span<const VideoCommon::BufferCopy> copies) {
for (const VideoCommon::BufferCopy& copy : copies) {
glCopyNamedBufferSubData(map.buffer, buffer.handle, copy.src_offset + map.offset,
copy.dst_offset, copy.size);
}
}
void Image::DownloadMemory(ImageBufferMap& map,
std::span<const VideoCommon::BufferImageCopy> copies) {
glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT); // TODO: Move this to its own API
@ -958,23 +857,30 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
if (True(image.flags & ImageFlagBits::Converted)) {
internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8;
} else {
internal_format = GetFormatTuple(format).internal_format;
internal_format = MaxwellToGL::GetFormatTuple(format).internal_format;
}
full_range = info.range;
flat_range = info.range;
set_object_label = device.HasDebuggingToolAttached();
is_render_target = info.IsRenderTarget();
original_texture = image.texture.handle;
num_samples = image.info.num_samples;
if (!is_render_target) {
swizzle[0] = info.x_source;
swizzle[1] = info.y_source;
swizzle[2] = info.z_source;
swizzle[3] = info.w_source;
}
VideoCommon::SubresourceRange flatten_range = info.range;
std::array<GLuint, 2> handles;
stored_views.reserve(2);
switch (info.type) {
case ImageViewType::e1DArray:
flatten_range.extent.layers = 1;
flat_range.extent.layers = 1;
[[fallthrough]];
case ImageViewType::e1D:
glGenTextures(2, handles.data());
SetupView(device, image, ImageViewType::e1D, handles[0], info, flatten_range);
SetupView(device, image, ImageViewType::e1DArray, handles[1], info, info.range);
SetupView(Shader::TextureType::Color1D);
SetupView(Shader::TextureType::ColorArray1D);
break;
case ImageViewType::e2DArray:
flatten_range.extent.layers = 1;
flat_range.extent.layers = 1;
[[fallthrough]];
case ImageViewType::e2D:
if (True(flags & VideoCommon::ImageViewFlagBits::Slice)) {
@ -984,63 +890,126 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
.base = {.level = info.range.base.level, .layer = 0},
.extent = {.levels = 1, .layers = 1},
};
glGenTextures(1, handles.data());
SetupView(device, image, ImageViewType::e3D, handles[0], info, slice_range);
break;
full_range = slice_range;
SetupView(Shader::TextureType::Color3D);
} else {
SetupView(Shader::TextureType::Color2D);
SetupView(Shader::TextureType::ColorArray2D);
}
glGenTextures(2, handles.data());
SetupView(device, image, ImageViewType::e2D, handles[0], info, flatten_range);
SetupView(device, image, ImageViewType::e2DArray, handles[1], info, info.range);
break;
case ImageViewType::e3D:
glGenTextures(1, handles.data());
SetupView(device, image, ImageViewType::e3D, handles[0], info, info.range);
SetupView(Shader::TextureType::Color3D);
break;
case ImageViewType::CubeArray:
flatten_range.extent.layers = 6;
flat_range.extent.layers = 6;
[[fallthrough]];
case ImageViewType::Cube:
glGenTextures(2, handles.data());
SetupView(device, image, ImageViewType::Cube, handles[0], info, flatten_range);
SetupView(device, image, ImageViewType::CubeArray, handles[1], info, info.range);
SetupView(Shader::TextureType::ColorCube);
SetupView(Shader::TextureType::ColorArrayCube);
break;
case ImageViewType::Rect:
glGenTextures(1, handles.data());
SetupView(device, image, ImageViewType::Rect, handles[0], info, info.range);
UNIMPLEMENTED();
break;
case ImageViewType::Buffer:
glCreateTextures(GL_TEXTURE_BUFFER, 1, handles.data());
SetupView(device, image, ImageViewType::Buffer, handles[0], info, info.range);
UNREACHABLE();
break;
}
switch (info.type) {
case ImageViewType::e1D:
default_handle = Handle(Shader::TextureType::Color1D);
break;
case ImageViewType::e1DArray:
default_handle = Handle(Shader::TextureType::ColorArray1D);
break;
case ImageViewType::e2D:
default_handle = Handle(Shader::TextureType::Color2D);
break;
case ImageViewType::e2DArray:
default_handle = Handle(Shader::TextureType::ColorArray2D);
break;
case ImageViewType::e3D:
default_handle = Handle(Shader::TextureType::Color3D);
break;
case ImageViewType::Cube:
default_handle = Handle(Shader::TextureType::ColorCube);
break;
case ImageViewType::CubeArray:
default_handle = Handle(Shader::TextureType::ColorArrayCube);
break;
default:
break;
}
default_handle = Handle(info.type);
}
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_)
: VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_},
buffer_size{VideoCommon::CalculateGuestSizeInBytes(info)} {}
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info)
: VideoCommon::ImageViewBase{info, view_info} {}
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::NullImageParams& params)
: VideoCommon::ImageViewBase{params}, views{runtime.null_image_views} {}
void ImageView::SetupView(const Device& device, Image& image, ImageViewType view_type,
GLuint handle, const VideoCommon::ImageViewInfo& info,
VideoCommon::SubresourceRange view_range) {
if (info.type == ImageViewType::Buffer) {
// TODO: Take offset from buffer cache
glTextureBufferRange(handle, internal_format, image.buffer.handle, 0,
image.guest_size_bytes);
} else {
const GLuint parent = image.texture.handle;
const GLenum target = ImageTarget(view_type, image.info.num_samples);
glTextureView(handle, target, parent, internal_format, view_range.base.level,
view_range.extent.levels, view_range.base.layer, view_range.extent.layers);
if (!info.IsRenderTarget()) {
ApplySwizzle(handle, format, info.Swizzle());
}
GLuint ImageView::StorageView(Shader::TextureType texture_type, Shader::ImageFormat image_format) {
if (image_format == Shader::ImageFormat::Typeless) {
return Handle(texture_type);
}
if (device.HasDebuggingToolAttached()) {
const std::string name = VideoCommon::Name(*this, view_type);
glObjectLabel(GL_TEXTURE, handle, static_cast<GLsizei>(name.size()), name.data());
const bool is_signed{image_format == Shader::ImageFormat::R8_SINT ||
image_format == Shader::ImageFormat::R16_SINT};
if (!storage_views) {
storage_views = std::make_unique<StorageViews>();
}
stored_views.emplace_back().handle = handle;
views[static_cast<size_t>(view_type)] = handle;
auto& type_views{is_signed ? storage_views->signeds : storage_views->unsigneds};
GLuint& view{type_views[static_cast<size_t>(texture_type)]};
if (view == 0) {
view = MakeView(texture_type, ShaderFormat(image_format));
}
return view;
}
void ImageView::SetupView(Shader::TextureType view_type) {
views[static_cast<size_t>(view_type)] = MakeView(view_type, internal_format);
}
GLuint ImageView::MakeView(Shader::TextureType view_type, GLenum view_format) {
VideoCommon::SubresourceRange view_range;
switch (view_type) {
case Shader::TextureType::Color1D:
case Shader::TextureType::Color2D:
case Shader::TextureType::ColorCube:
view_range = flat_range;
break;
case Shader::TextureType::ColorArray1D:
case Shader::TextureType::ColorArray2D:
case Shader::TextureType::Color3D:
case Shader::TextureType::ColorArrayCube:
view_range = full_range;
break;
default:
UNREACHABLE();
}
OGLTextureView& view = stored_views.emplace_back();
view.Create();
const GLenum target = ImageTarget(view_type, num_samples);
glTextureView(view.handle, target, original_texture, view_format, view_range.base.level,
view_range.extent.levels, view_range.base.layer, view_range.extent.layers);
if (!is_render_target) {
std::array<SwizzleSource, 4> casted_swizzle;
std::ranges::transform(swizzle, casted_swizzle.begin(), [](u8 component_swizzle) {
return static_cast<SwizzleSource>(component_swizzle);
});
ApplySwizzle(view.handle, format, casted_swizzle);
}
if (set_object_label) {
const std::string name = VideoCommon::Name(*this);
glObjectLabel(GL_TEXTURE, view.handle, static_cast<GLsizei>(name.size()), name.data());
}
return view.handle;
}
Sampler::Sampler(TextureCacheRuntime& runtime, const TSCEntry& config) {

View File

@ -9,6 +9,7 @@
#include <glad/glad.h>
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/util_shaders.h"
#include "video_core/texture_cache/texture_cache.h"
@ -127,13 +128,12 @@ private:
OGLTexture null_image_1d_array;
OGLTexture null_image_cube_array;
OGLTexture null_image_3d;
OGLTexture null_image_rect;
OGLTextureView null_image_view_1d;
OGLTextureView null_image_view_2d;
OGLTextureView null_image_view_2d_array;
OGLTextureView null_image_view_cube;
std::array<GLuint, VideoCommon::NUM_IMAGE_VIEW_TYPES> null_image_views;
std::array<GLuint, Shader::NUM_TEXTURE_TYPES> null_image_views{};
};
class Image : public VideoCommon::ImageBase {
@ -154,8 +154,6 @@ public:
void UploadMemory(const ImageBufferMap& map,
std::span<const VideoCommon::BufferImageCopy> copies);
void UploadMemory(const ImageBufferMap& map, std::span<const VideoCommon::BufferCopy> copies);
void DownloadMemory(ImageBufferMap& map, std::span<const VideoCommon::BufferImageCopy> copies);
GLuint StorageHandle() noexcept;
@ -170,7 +168,6 @@ private:
void CopyImageToBuffer(const VideoCommon::BufferImageCopy& copy, size_t buffer_offset);
OGLTexture texture;
OGLBuffer buffer;
OGLTextureView store_view;
GLenum gl_internal_format = GL_NONE;
GLenum gl_format = GL_NONE;
@ -182,10 +179,17 @@ class ImageView : public VideoCommon::ImageViewBase {
public:
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&);
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo&,
const VideoCommon::ImageViewInfo&, GPUVAddr);
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info);
explicit ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams&);
[[nodiscard]] GLuint Handle(ImageViewType query_type) const noexcept {
return views[static_cast<size_t>(query_type)];
[[nodiscard]] GLuint StorageView(Shader::TextureType texture_type,
Shader::ImageFormat image_format);
[[nodiscard]] GLuint Handle(Shader::TextureType handle_type) const noexcept {
return views[static_cast<size_t>(handle_type)];
}
[[nodiscard]] GLuint DefaultHandle() const noexcept {
@ -196,15 +200,38 @@ public:
return internal_format;
}
private:
void SetupView(const Device& device, Image& image, ImageViewType view_type, GLuint handle,
const VideoCommon::ImageViewInfo& info,
VideoCommon::SubresourceRange view_range);
[[nodiscard]] GPUVAddr GpuAddr() const noexcept {
return gpu_addr;
}
std::array<GLuint, VideoCommon::NUM_IMAGE_VIEW_TYPES> views{};
[[nodiscard]] u32 BufferSize() const noexcept {
return buffer_size;
}
private:
struct StorageViews {
std::array<GLuint, Shader::NUM_TEXTURE_TYPES> signeds{};
std::array<GLuint, Shader::NUM_TEXTURE_TYPES> unsigneds{};
};
void SetupView(Shader::TextureType view_type);
GLuint MakeView(Shader::TextureType view_type, GLenum view_format);
std::array<GLuint, Shader::NUM_TEXTURE_TYPES> views{};
std::vector<OGLTextureView> stored_views;
GLuint default_handle = 0;
std::unique_ptr<StorageViews> storage_views;
GLenum internal_format = GL_NONE;
GLuint default_handle = 0;
GPUVAddr gpu_addr = 0;
u32 buffer_size = 0;
GLuint original_texture = 0;
int num_samples = 0;
VideoCommon::SubresourceRange flat_range;
VideoCommon::SubresourceRange full_range;
std::array<u8, 4> swizzle{};
bool set_object_label = false;
bool is_render_target = false;
};
class ImageAlloc : public VideoCommon::ImageAllocBase {};

View File

@ -5,12 +5,120 @@
#pragma once
#include <glad/glad.h>
#include "video_core/engines/maxwell_3d.h"
#include "video_core/surface.h"
namespace OpenGL::MaxwellToGL {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct FormatTuple {
GLenum internal_format;
GLenum format = GL_NONE;
GLenum type = GL_NONE;
};
constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TABLE = {{
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // A8B8G8R8_UNORM
{GL_RGBA8_SNORM, GL_RGBA, GL_BYTE}, // A8B8G8R8_SNORM
{GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE}, // A8B8G8R8_SINT
{GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE}, // A8B8G8R8_UINT
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // R5G6B5_UNORM
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV}, // B5G6R5_UNORM
{GL_RGB5_A1, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1R5G5B5_UNORM
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10_UNORM
{GL_RGB10_A2UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10_UINT
{GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1B5G5R5_UNORM
{GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R8_UNORM
{GL_R8_SNORM, GL_RED, GL_BYTE}, // R8_SNORM
{GL_R8I, GL_RED_INTEGER, GL_BYTE}, // R8_SINT
{GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE}, // R8_UINT
{GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT}, // R16G16B16A16_FLOAT
{GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT}, // R16G16B16A16_UNORM
{GL_RGBA16_SNORM, GL_RGBA, GL_SHORT}, // R16G16B16A16_SNORM
{GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT}, // R16G16B16A16_SINT
{GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT}, // R16G16B16A16_UINT
{GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV}, // B10G11R11_FLOAT
{GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT}, // R32G32B32A32_UINT
{GL_COMPRESSED_RGBA_S3TC_DXT1_EXT}, // BC1_RGBA_UNORM
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT}, // BC2_UNORM
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT}, // BC3_UNORM
{GL_COMPRESSED_RED_RGTC1}, // BC4_UNORM
{GL_COMPRESSED_SIGNED_RED_RGTC1}, // BC4_SNORM
{GL_COMPRESSED_RG_RGTC2}, // BC5_UNORM
{GL_COMPRESSED_SIGNED_RG_RGTC2}, // BC5_SNORM
{GL_COMPRESSED_RGBA_BPTC_UNORM}, // BC7_UNORM
{GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT}, // BC6H_UFLOAT
{GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT}, // BC6H_SFLOAT
{GL_COMPRESSED_RGBA_ASTC_4x4_KHR}, // ASTC_2D_4X4_UNORM
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE}, // B8G8R8A8_UNORM
{GL_RGBA32F, GL_RGBA, GL_FLOAT}, // R32G32B32A32_FLOAT
{GL_RGBA32I, GL_RGBA_INTEGER, GL_INT}, // R32G32B32A32_SINT
{GL_RG32F, GL_RG, GL_FLOAT}, // R32G32_FLOAT
{GL_RG32I, GL_RG_INTEGER, GL_INT}, // R32G32_SINT
{GL_R32F, GL_RED, GL_FLOAT}, // R32_FLOAT
{GL_R16F, GL_RED, GL_HALF_FLOAT}, // R16_FLOAT
{GL_R16, GL_RED, GL_UNSIGNED_SHORT}, // R16_UNORM
{GL_R16_SNORM, GL_RED, GL_SHORT}, // R16_SNORM
{GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT}, // R16_UINT
{GL_R16I, GL_RED_INTEGER, GL_SHORT}, // R16_SINT
{GL_RG16, GL_RG, GL_UNSIGNED_SHORT}, // R16G16_UNORM
{GL_RG16F, GL_RG, GL_HALF_FLOAT}, // R16G16_FLOAT
{GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT}, // R16G16_UINT
{GL_RG16I, GL_RG_INTEGER, GL_SHORT}, // R16G16_SINT
{GL_RG16_SNORM, GL_RG, GL_SHORT}, // R16G16_SNORM
{GL_RGB32F, GL_RGB, GL_FLOAT}, // R32G32B32_FLOAT
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // A8B8G8R8_SRGB
{GL_RG8, GL_RG, GL_UNSIGNED_BYTE}, // R8G8_UNORM
{GL_RG8_SNORM, GL_RG, GL_BYTE}, // R8G8_SNORM
{GL_RG8I, GL_RG_INTEGER, GL_BYTE}, // R8G8_SINT
{GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE}, // R8G8_UINT
{GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT}, // R32G32_UINT
{GL_RGB16F, GL_RGBA, GL_HALF_FLOAT}, // R16G16B16X16_FLOAT
{GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT}, // R32_UINT
{GL_R32I, GL_RED_INTEGER, GL_INT}, // R32_SINT
{GL_COMPRESSED_RGBA_ASTC_8x8_KHR}, // ASTC_2D_8X8_UNORM
{GL_COMPRESSED_RGBA_ASTC_8x5_KHR}, // ASTC_2D_8X5_UNORM
{GL_COMPRESSED_RGBA_ASTC_5x4_KHR}, // ASTC_2D_5X4_UNORM
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE}, // B8G8R8A8_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT}, // BC1_RGBA_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT}, // BC2_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR}, // ASTC_2D_5X4_SRGB
{GL_COMPRESSED_RGBA_ASTC_5x5_KHR}, // ASTC_2D_5X5_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR}, // ASTC_2D_5X5_SRGB
{GL_COMPRESSED_RGBA_ASTC_10x8_KHR}, // ASTC_2D_10X8_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR}, // ASTC_2D_10X8_SRGB
{GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB
{GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB
{GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR}, // ASTC_2D_12X12_SRGB
{GL_COMPRESSED_RGBA_ASTC_8x6_KHR}, // ASTC_2D_8X6_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR}, // ASTC_2D_8X6_SRGB
{GL_COMPRESSED_RGBA_ASTC_6x5_KHR}, // ASTC_2D_6X5_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR}, // ASTC_2D_6X5_SRGB
{GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV}, // E5B9G9R9_FLOAT
{GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT}, // D32_FLOAT
{GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // D16_UNORM
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // D24_UNORM_S8_UINT
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // S8_UINT_D24_UNORM
{GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL,
GL_FLOAT_32_UNSIGNED_INT_24_8_REV}, // D32_FLOAT_S8_UINT
}};
inline const FormatTuple& GetFormatTuple(VideoCore::Surface::PixelFormat pixel_format) {
ASSERT(static_cast<size_t>(pixel_format) < FORMAT_TABLE.size());
return FORMAT_TABLE[static_cast<size_t>(pixel_format)];
}
inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) {
switch (attrib.type) {
case Maxwell::VertexAttribute::Type::UnsignedNorm:

View File

@ -25,6 +25,7 @@
#include "video_core/host_shaders/opengl_present_vert.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_shader_util.h"
#include "video_core/renderer_opengl/renderer_opengl.h"
#include "video_core/textures/decoders.h"
@ -139,6 +140,26 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
}
AddTelemetryFields();
InitOpenGLObjects();
// Initialize default attributes to match hardware's disabled attributes
GLint max_attribs{};
glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &max_attribs);
for (GLint attrib = 0; attrib < max_attribs; ++attrib) {
glVertexAttrib4f(attrib, 0.0f, 0.0f, 0.0f, 1.0f);
}
// Enable seamless cubemaps when per texture parameters are not available
if (!GLAD_GL_ARB_seamless_cubemap_per_texture && !GLAD_GL_AMD_seamless_cubemap_per_texture) {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
// Enable unified vertex attributes and query vertex buffer address when the driver supports it
if (device.HasVertexBufferUnifiedMemory()) {
glEnableClientState(GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV);
glEnableClientState(GL_ELEMENT_ARRAY_UNIFIED_NV);
glMakeNamedBufferResidentNV(vertex_buffer.handle, GL_READ_ONLY);
glGetNamedBufferParameterui64vNV(vertex_buffer.handle, GL_BUFFER_GPU_ADDRESS_NV,
&vertex_buffer_address);
}
}
RendererOpenGL::~RendererOpenGL() = default;
@ -230,18 +251,8 @@ void RendererOpenGL::LoadColorToActiveGLTexture(u8 color_r, u8 color_g, u8 color
void RendererOpenGL::InitOpenGLObjects() {
// Create shader programs
OGLShader vertex_shader;
vertex_shader.Create(HostShaders::OPENGL_PRESENT_VERT, GL_VERTEX_SHADER);
OGLShader fragment_shader;
fragment_shader.Create(HostShaders::OPENGL_PRESENT_FRAG, GL_FRAGMENT_SHADER);
vertex_program.Create(true, false, vertex_shader.handle);
fragment_program.Create(true, false, fragment_shader.handle);
pipeline.Create();
glUseProgramStages(pipeline.handle, GL_VERTEX_SHADER_BIT, vertex_program.handle);
glUseProgramStages(pipeline.handle, GL_FRAGMENT_SHADER_BIT, fragment_program.handle);
present_vertex = CreateProgram(HostShaders::OPENGL_PRESENT_VERT, GL_VERTEX_SHADER);
present_fragment = CreateProgram(HostShaders::OPENGL_PRESENT_FRAG, GL_FRAGMENT_SHADER);
// Generate presentation sampler
present_sampler.Create();
@ -263,21 +274,6 @@ void RendererOpenGL::InitOpenGLObjects() {
// Clear screen to black
LoadColorToActiveGLTexture(0, 0, 0, 0, screen_info.texture);
// Enable seamless cubemaps when per texture parameters are not available
if (!GLAD_GL_ARB_seamless_cubemap_per_texture && !GLAD_GL_AMD_seamless_cubemap_per_texture) {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
// Enable unified vertex attributes and query vertex buffer address when the driver supports it
if (device.HasVertexBufferUnifiedMemory()) {
glEnableClientState(GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV);
glEnableClientState(GL_ELEMENT_ARRAY_UNIFIED_NV);
glMakeNamedBufferResidentNV(vertex_buffer.handle, GL_READ_ONLY);
glGetNamedBufferParameterui64vNV(vertex_buffer.handle, GL_BUFFER_GPU_ADDRESS_NV,
&vertex_buffer_address);
}
}
void RendererOpenGL::AddTelemetryFields() {
@ -342,8 +338,9 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) {
// Set projection matrix
const std::array ortho_matrix =
MakeOrthographicMatrix(static_cast<float>(layout.width), static_cast<float>(layout.height));
glProgramUniformMatrix3x2fv(vertex_program.handle, ModelViewMatrixLocation, 1, GL_FALSE,
std::data(ortho_matrix));
program_manager.BindPresentPrograms(present_vertex.handle, present_fragment.handle);
glProgramUniformMatrix3x2fv(present_vertex.handle, ModelViewMatrixLocation, 1, GL_FALSE,
ortho_matrix.data());
const auto& texcoords = screen_info.display_texcoords;
auto left = texcoords.left;
@ -404,8 +401,6 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) {
state_tracker.NotifyClipControl();
state_tracker.NotifyAlphaTest();
program_manager.BindHostPipeline(pipeline.handle);
state_tracker.ClipControl(GL_LOWER_LEFT, GL_ZERO_TO_ONE);
glEnable(GL_CULL_FACE);
if (screen_info.display_srgb) {
@ -453,7 +448,8 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) {
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
program_manager.RestoreGuestPipeline();
// TODO
// program_manager.RestoreGuestPipeline();
}
void RendererOpenGL::RenderScreenshot() {

View File

@ -12,7 +12,6 @@
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
namespace Core {
@ -111,9 +110,8 @@ private:
// OpenGL object IDs
OGLSampler present_sampler;
OGLBuffer vertex_buffer;
OGLProgram vertex_program;
OGLProgram fragment_program;
OGLPipeline pipeline;
OGLProgram present_vertex;
OGLProgram present_fragment;
OGLFramebuffer screenshot_framebuffer;
// GPU address of the vertex buffer

View File

@ -16,8 +16,8 @@
#include "video_core/host_shaders/opengl_copy_bc4_comp.h"
#include "video_core/host_shaders/opengl_copy_bgra_comp.h"
#include "video_core/host_shaders/pitch_unswizzle_comp.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_shader_util.h"
#include "video_core/renderer_opengl/gl_texture_cache.h"
#include "video_core/renderer_opengl/util_shaders.h"
#include "video_core/texture_cache/accelerated_swizzle.h"
@ -41,21 +41,14 @@ using VideoCommon::Accelerated::MakeBlockLinearSwizzle3DParams;
using VideoCore::Surface::BytesPerBlock;
namespace {
OGLProgram MakeProgram(std::string_view source) {
OGLShader shader;
shader.Create(source, GL_COMPUTE_SHADER);
OGLProgram program;
program.Create(true, false, shader.handle);
return program;
return CreateProgram(source, GL_COMPUTE_SHADER);
}
size_t NumPixelsInCopy(const VideoCommon::ImageCopy& copy) {
return static_cast<size_t>(copy.extent.width * copy.extent.height *
copy.src_subresource.num_layers);
}
} // Anonymous namespace
UtilShaders::UtilShaders(ProgramManager& program_manager_)
@ -86,7 +79,7 @@ void UtilShaders::ASTCDecode(Image& image, const ImageBufferMap& map,
.width = VideoCore::Surface::DefaultBlockWidth(image.info.format),
.height = VideoCore::Surface::DefaultBlockHeight(image.info.format),
};
program_manager.BindHostCompute(astc_decoder_program.handle);
program_manager.BindComputeProgram(astc_decoder_program.handle);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, BINDING_SWIZZLE_BUFFER, swizzle_table_buffer.handle);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, BINDING_ENC_BUFFER, astc_buffer.handle);
@ -134,7 +127,7 @@ void UtilShaders::BlockLinearUpload2D(Image& image, const ImageBufferMap& map,
static constexpr GLuint BINDING_INPUT_BUFFER = 1;
static constexpr GLuint BINDING_OUTPUT_IMAGE = 0;
program_manager.BindHostCompute(block_linear_unswizzle_2d_program.handle);
program_manager.BindComputeProgram(block_linear_unswizzle_2d_program.handle);
glFlushMappedNamedBufferRange(map.buffer, map.offset, image.guest_size_bytes);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, BINDING_SWIZZLE_BUFFER, swizzle_table_buffer.handle);
@ -173,7 +166,7 @@ void UtilShaders::BlockLinearUpload3D(Image& image, const ImageBufferMap& map,
static constexpr GLuint BINDING_OUTPUT_IMAGE = 0;
glFlushMappedNamedBufferRange(map.buffer, map.offset, image.guest_size_bytes);
program_manager.BindHostCompute(block_linear_unswizzle_3d_program.handle);
program_manager.BindComputeProgram(block_linear_unswizzle_3d_program.handle);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, BINDING_SWIZZLE_BUFFER, swizzle_table_buffer.handle);
const GLenum store_format = StoreFormat(BytesPerBlock(image.info.format));
@ -222,7 +215,7 @@ void UtilShaders::PitchUpload(Image& image, const ImageBufferMap& map,
UNIMPLEMENTED_IF_MSG(!std::has_single_bit(bytes_per_block),
"Non-power of two images are not implemented");
program_manager.BindHostCompute(pitch_unswizzle_program.handle);
program_manager.BindComputeProgram(pitch_unswizzle_program.handle);
glFlushMappedNamedBufferRange(map.buffer, map.offset, image.guest_size_bytes);
glUniform2ui(LOC_ORIGIN, 0, 0);
glUniform2i(LOC_DESTINATION, 0, 0);
@ -250,7 +243,7 @@ void UtilShaders::CopyBC4(Image& dst_image, Image& src_image, std::span<const Im
static constexpr GLuint LOC_SRC_OFFSET = 0;
static constexpr GLuint LOC_DST_OFFSET = 1;
program_manager.BindHostCompute(copy_bc4_program.handle);
program_manager.BindComputeProgram(copy_bc4_program.handle);
for (const ImageCopy& copy : copies) {
ASSERT(copy.src_subresource.base_layer == 0);
@ -286,7 +279,7 @@ void UtilShaders::CopyBGR(Image& dst_image, Image& src_image,
break;
case 4: {
// BGRA8 copy
program_manager.BindHostCompute(copy_bgra_program.handle);
program_manager.BindComputeProgram(copy_bgra_program.handle);
constexpr GLenum FORMAT = GL_RGBA8;
for (const ImageCopy& copy : copies) {
ASSERT(copy.src_offset == zero_offset);

View File

@ -49,6 +49,16 @@ constexpr VkDescriptorSetLayoutCreateInfo ONE_TEXTURE_DESCRIPTOR_SET_LAYOUT_CREA
.bindingCount = 1,
.pBindings = &TEXTURE_DESCRIPTOR_SET_LAYOUT_BINDING<0>,
};
template <u32 num_textures>
inline constexpr DescriptorBankInfo TEXTURE_DESCRIPTOR_BANK_INFO{
.uniform_buffers = 0,
.storage_buffers = 0,
.texture_buffers = 0,
.image_buffers = 0,
.textures = num_textures,
.images = 0,
.score = 2,
};
constexpr VkDescriptorSetLayoutCreateInfo TWO_TEXTURES_DESCRIPTOR_SET_LAYOUT_CREATE_INFO{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
@ -323,18 +333,19 @@ void BindBlitState(vk::CommandBuffer cmdbuf, VkPipelineLayout layout, const Regi
cmdbuf.SetScissor(0, scissor);
cmdbuf.PushConstants(layout, VK_SHADER_STAGE_VERTEX_BIT, push_constants);
}
} // Anonymous namespace
BlitImageHelper::BlitImageHelper(const Device& device_, VKScheduler& scheduler_,
StateTracker& state_tracker_, VKDescriptorPool& descriptor_pool)
StateTracker& state_tracker_, DescriptorPool& descriptor_pool)
: device{device_}, scheduler{scheduler_}, state_tracker{state_tracker_},
one_texture_set_layout(device.GetLogical().CreateDescriptorSetLayout(
ONE_TEXTURE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO)),
two_textures_set_layout(device.GetLogical().CreateDescriptorSetLayout(
TWO_TEXTURES_DESCRIPTOR_SET_LAYOUT_CREATE_INFO)),
one_texture_descriptor_allocator(descriptor_pool, *one_texture_set_layout),
two_textures_descriptor_allocator(descriptor_pool, *two_textures_set_layout),
one_texture_descriptor_allocator{
descriptor_pool.Allocator(*one_texture_set_layout, TEXTURE_DESCRIPTOR_BANK_INFO<1>)},
two_textures_descriptor_allocator{
descriptor_pool.Allocator(*two_textures_set_layout, TEXTURE_DESCRIPTOR_BANK_INFO<2>)},
one_texture_pipeline_layout(device.GetLogical().CreatePipelineLayout(
PipelineLayoutCreateInfo(one_texture_set_layout.address()))),
two_textures_pipeline_layout(device.GetLogical().CreatePipelineLayout(
@ -362,14 +373,14 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, const ImageV
.operation = operation,
};
const VkPipelineLayout layout = *one_texture_pipeline_layout;
const VkImageView src_view = src_image_view.Handle(ImageViewType::e2D);
const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
const VkSampler sampler = is_linear ? *linear_sampler : *nearest_sampler;
const VkPipeline pipeline = FindOrEmplacePipeline(key);
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_view, descriptor_set,
&device = device](vk::CommandBuffer cmdbuf) {
scheduler.Record([this, dst_region, src_region, pipeline, layout, sampler,
src_view](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
@ -391,12 +402,11 @@ void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer,
const VkPipelineLayout layout = *two_textures_pipeline_layout;
const VkSampler sampler = *nearest_sampler;
const VkPipeline pipeline = BlitDepthStencilPipeline(dst_framebuffer->RenderPass());
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_depth_view,
src_stencil_view, descriptor_set,
&device = device](vk::CommandBuffer cmdbuf) {
src_stencil_view, this](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
@ -416,7 +426,6 @@ void BlitImageHelper::ConvertD32ToR32(const Framebuffer* dst_framebuffer,
void BlitImageHelper::ConvertR32ToD32(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
ConvertColorToDepthPipeline(convert_r32_to_d32_pipeline, dst_framebuffer->RenderPass());
Convert(*convert_r32_to_d32_pipeline, dst_framebuffer, src_image_view);
}
@ -436,16 +445,14 @@ void BlitImageHelper::ConvertR16ToD16(const Framebuffer* dst_framebuffer,
void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
const VkPipelineLayout layout = *one_texture_pipeline_layout;
const VkImageView src_view = src_image_view.Handle(ImageViewType::e2D);
const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
const VkSampler sampler = *nearest_sampler;
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
const VkExtent2D extent{
.width = src_image_view.size.width,
.height = src_image_view.size.height,
};
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([pipeline, layout, sampler, src_view, descriptor_set, extent,
&device = device](vk::CommandBuffer cmdbuf) {
scheduler.Record([pipeline, layout, sampler, src_view, extent, this](vk::CommandBuffer cmdbuf) {
const VkOffset2D offset{
.x = 0,
.y = 0,
@ -466,6 +473,7 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
.tex_scale = {viewport.width, viewport.height},
.tex_offset = {0.0f, 0.0f},
};
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
// TODO: Barriers

View File

@ -31,7 +31,7 @@ struct BlitImagePipelineKey {
class BlitImageHelper {
public:
explicit BlitImageHelper(const Device& device, VKScheduler& scheduler,
StateTracker& state_tracker, VKDescriptorPool& descriptor_pool);
StateTracker& state_tracker, DescriptorPool& descriptor_pool);
~BlitImageHelper();
void BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,

View File

@ -15,9 +15,7 @@
#include "video_core/renderer_vulkan/vk_state_tracker.h"
namespace Vulkan {
namespace {
constexpr size_t POINT = 0;
constexpr size_t LINE = 1;
constexpr size_t POLYGON = 2;
@ -39,10 +37,20 @@ constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
POLYGON, // Patches
};
void RefreshXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) {
std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) {
return VideoCommon::TransformFeedbackState::Layout{
.stream = layout.stream,
.varying_count = layout.varying_count,
.stride = layout.stride,
};
});
state.varyings = regs.tfb_varying_locs;
}
} // Anonymous namespace
void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
bool has_extended_dynamic_state) {
bool has_extended_dynamic_state, bool has_dynamic_vertex_input) {
const Maxwell& regs = maxwell3d.regs;
const std::array enabled_lut{
regs.polygon_offset_point_enable,
@ -52,6 +60,9 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
raw1 = 0;
extended_dynamic_state.Assign(has_extended_dynamic_state ? 1 : 0);
dynamic_vertex_input.Assign(has_dynamic_vertex_input ? 1 : 0);
xfb_enabled.Assign(regs.tfb_enabled != 0);
primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
@ -63,37 +74,66 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
logic_op.Assign(PackLogicOp(regs.logic_op.operation));
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
topology.Assign(regs.draw.topology);
msaa_mode.Assign(regs.multisample_mode);
raw2 = 0;
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
const auto test_func =
regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
alpha_test_func.Assign(PackComparisonOp(test_func));
early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
depth_enabled.Assign(regs.zeta_enable != 0 ? 1 : 0);
depth_format.Assign(static_cast<u32>(regs.zeta.format));
y_negate.Assign(regs.screen_y_control.y_negate != 0 ? 1 : 0);
provoking_vertex_last.Assign(regs.provoking_vertex_last != 0 ? 1 : 0);
conservative_raster_enable.Assign(regs.conservative_raster_enable != 0 ? 1 : 0);
smooth_lines.Assign(regs.line_smooth_enable != 0 ? 1 : 0);
for (size_t i = 0; i < regs.rt.size(); ++i) {
color_formats[i] = static_cast<u8>(regs.rt[i].format);
}
alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref);
point_size = Common::BitCast<u32>(regs.point_size);
if (maxwell3d.dirty.flags[Dirty::InstanceDivisors]) {
maxwell3d.dirty.flags[Dirty::InstanceDivisors] = false;
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
}
}
if (maxwell3d.dirty.flags[Dirty::VertexAttributes]) {
maxwell3d.dirty.flags[Dirty::VertexAttributes] = false;
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
const auto& input = regs.vertex_attrib_format[index];
auto& attribute = attributes[index];
attribute.raw = 0;
attribute.enabled.Assign(input.IsConstant() ? 0 : 1);
attribute.buffer.Assign(input.buffer);
attribute.offset.Assign(input.offset);
attribute.type.Assign(static_cast<u32>(input.type.Value()));
attribute.size.Assign(static_cast<u32>(input.size.Value()));
if (maxwell3d.dirty.flags[Dirty::VertexInput]) {
if (has_dynamic_vertex_input) {
// Dirty flag will be reset by the command buffer update
static constexpr std::array LUT{
0u, // Invalid
1u, // SignedNorm
1u, // UnsignedNorm
2u, // SignedInt
3u, // UnsignedInt
1u, // UnsignedScaled
1u, // SignedScaled
1u, // Float
};
const auto& attrs = regs.vertex_attrib_format;
attribute_types = 0;
for (size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
const u32 mask = attrs[i].constant != 0 ? 0 : 3;
const u32 type = LUT[static_cast<size_t>(attrs[i].type.Value())];
attribute_types |= static_cast<u64>(type & mask) << (i * 2);
}
} else {
maxwell3d.dirty.flags[Dirty::VertexInput] = false;
enabled_divisors = 0;
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
enabled_divisors |= (is_enabled ? u64{1} : 0) << index;
}
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
const auto& input = regs.vertex_attrib_format[index];
auto& attribute = attributes[index];
attribute.raw = 0;
attribute.enabled.Assign(input.constant ? 0 : 1);
attribute.buffer.Assign(input.buffer);
attribute.offset.Assign(input.offset);
attribute.type.Assign(static_cast<u32>(input.type.Value()));
attribute.size.Assign(static_cast<u32>(input.size.Value()));
}
}
}
if (maxwell3d.dirty.flags[Dirty::Blending]) {
@ -109,10 +149,12 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
return static_cast<u16>(viewport.swizzle.raw);
});
}
if (!has_extended_dynamic_state) {
no_extended_dynamic_state.Assign(1);
if (!extended_dynamic_state) {
dynamic_state.Refresh(regs);
}
if (xfb_enabled) {
RefreshXfbState(xfb_state, regs);
}
}
void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t index) {

View File

@ -12,6 +12,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/surface.h"
#include "video_core/transform_feedback.h"
namespace Vulkan {
@ -60,7 +61,7 @@ struct FixedPipelineState {
void Refresh(const Maxwell& regs, size_t index);
constexpr std::array<bool, 4> Mask() const noexcept {
std::array<bool, 4> Mask() const noexcept {
return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
}
@ -97,11 +98,11 @@ struct FixedPipelineState {
BitField<20, 3, u32> type;
BitField<23, 6, u32> size;
constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
Maxwell::VertexAttribute::Type Type() const noexcept {
return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
}
constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
Maxwell::VertexAttribute::Size Size() const noexcept {
return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
}
};
@ -167,37 +168,53 @@ struct FixedPipelineState {
union {
u32 raw1;
BitField<0, 1, u32> no_extended_dynamic_state;
BitField<2, 1, u32> primitive_restart_enable;
BitField<3, 1, u32> depth_bias_enable;
BitField<4, 1, u32> depth_clamp_disabled;
BitField<5, 1, u32> ndc_minus_one_to_one;
BitField<6, 2, u32> polygon_mode;
BitField<8, 5, u32> patch_control_points_minus_one;
BitField<13, 2, u32> tessellation_primitive;
BitField<15, 2, u32> tessellation_spacing;
BitField<17, 1, u32> tessellation_clockwise;
BitField<18, 1, u32> logic_op_enable;
BitField<19, 4, u32> logic_op;
BitField<23, 1, u32> rasterize_enable;
BitField<0, 1, u32> extended_dynamic_state;
BitField<1, 1, u32> dynamic_vertex_input;
BitField<2, 1, u32> xfb_enabled;
BitField<3, 1, u32> primitive_restart_enable;
BitField<4, 1, u32> depth_bias_enable;
BitField<5, 1, u32> depth_clamp_disabled;
BitField<6, 1, u32> ndc_minus_one_to_one;
BitField<7, 2, u32> polygon_mode;
BitField<9, 5, u32> patch_control_points_minus_one;
BitField<14, 2, u32> tessellation_primitive;
BitField<16, 2, u32> tessellation_spacing;
BitField<18, 1, u32> tessellation_clockwise;
BitField<19, 1, u32> logic_op_enable;
BitField<20, 4, u32> logic_op;
BitField<24, 4, Maxwell::PrimitiveTopology> topology;
BitField<28, 4, Tegra::Texture::MsaaMode> msaa_mode;
};
union {
u32 raw2;
BitField<0, 3, u32> alpha_test_func;
BitField<3, 1, u32> early_z;
BitField<0, 1, u32> rasterize_enable;
BitField<1, 3, u32> alpha_test_func;
BitField<4, 1, u32> early_z;
BitField<5, 1, u32> depth_enabled;
BitField<6, 5, u32> depth_format;
BitField<11, 1, u32> y_negate;
BitField<12, 1, u32> provoking_vertex_last;
BitField<13, 1, u32> conservative_raster_enable;
BitField<14, 1, u32> smooth_lines;
};
std::array<u8, Maxwell::NumRenderTargets> color_formats;
u32 alpha_test_ref;
u32 point_size;
std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
std::array<u16, Maxwell::NumViewports> viewport_swizzles;
DynamicState dynamic_state;
union {
u64 attribute_types; // Used with VK_EXT_vertex_input_dynamic_state
u64 enabled_divisors;
};
std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state);
DynamicState dynamic_state;
VideoCommon::TransformFeedbackState xfb_state;
void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state,
bool has_dynamic_vertex_input);
size_t Hash() const noexcept;
@ -208,8 +225,24 @@ struct FixedPipelineState {
}
size_t Size() const noexcept {
const size_t total_size = sizeof *this;
return total_size - (no_extended_dynamic_state != 0 ? 0 : sizeof(DynamicState));
if (xfb_enabled) {
// When transform feedback is enabled, use the whole struct
return sizeof(*this);
}
if (dynamic_vertex_input) {
// Exclude dynamic state and attributes
return offsetof(FixedPipelineState, attributes);
}
if (extended_dynamic_state) {
// Exclude dynamic state
return offsetof(FixedPipelineState, dynamic_state);
}
// Default
return offsetof(FixedPipelineState, xfb_state);
}
u32 DynamicAttributeType(size_t index) const noexcept {
return (attribute_types >> (index * 2)) & 0b11;
}
};
static_assert(std::has_unique_object_representations_v<FixedPipelineState>);

View File

@ -157,7 +157,7 @@ struct FormatTuple {
{VK_FORMAT_R32_SFLOAT, Attachable | Storage}, // R32_FLOAT
{VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16_FLOAT
{VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16_UNORM
{VK_FORMAT_UNDEFINED}, // R16_SNORM
{VK_FORMAT_R16_SNORM, Attachable | Storage}, // R16_SNORM
{VK_FORMAT_R16_UINT, Attachable | Storage}, // R16_UINT
{VK_FORMAT_UNDEFINED}, // R16_SINT
{VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // R16G16_UNORM
@ -266,19 +266,20 @@ FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with
return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage};
}
VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
VkShaderStageFlagBits ShaderStage(Shader::Stage stage) {
switch (stage) {
case Tegra::Engines::ShaderType::Vertex:
case Shader::Stage::VertexA:
case Shader::Stage::VertexB:
return VK_SHADER_STAGE_VERTEX_BIT;
case Tegra::Engines::ShaderType::TesselationControl:
case Shader::Stage::TessellationControl:
return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
case Tegra::Engines::ShaderType::TesselationEval:
case Shader::Stage::TessellationEval:
return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
case Tegra::Engines::ShaderType::Geometry:
case Shader::Stage::Geometry:
return VK_SHADER_STAGE_GEOMETRY_BIT;
case Tegra::Engines::ShaderType::Fragment:
case Shader::Stage::Fragment:
return VK_SHADER_STAGE_FRAGMENT_BIT;
case Tegra::Engines::ShaderType::Compute:
case Shader::Stage::Compute:
return VK_SHADER_STAGE_COMPUTE_BIT;
}
UNIMPLEMENTED_MSG("Unimplemented shader stage={}", stage);
@ -685,6 +686,19 @@ VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face) {
return {};
}
VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode) {
switch (polygon_mode) {
case Maxwell::PolygonMode::Point:
return VK_POLYGON_MODE_POINT;
case Maxwell::PolygonMode::Line:
return VK_POLYGON_MODE_LINE;
case Maxwell::PolygonMode::Fill:
return VK_POLYGON_MODE_FILL;
}
UNIMPLEMENTED_MSG("Unimplemented polygon mode={}", polygon_mode);
return {};
}
VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
switch (swizzle) {
case Tegra::Texture::SwizzleSource::Zero:
@ -741,4 +755,28 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
}
VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
switch (msaa_mode) {
case Tegra::Texture::MsaaMode::Msaa1x1:
return VK_SAMPLE_COUNT_1_BIT;
case Tegra::Texture::MsaaMode::Msaa2x1:
case Tegra::Texture::MsaaMode::Msaa2x1_D3D:
return VK_SAMPLE_COUNT_2_BIT;
case Tegra::Texture::MsaaMode::Msaa2x2:
case Tegra::Texture::MsaaMode::Msaa2x2_VC4:
case Tegra::Texture::MsaaMode::Msaa2x2_VC12:
return VK_SAMPLE_COUNT_4_BIT;
case Tegra::Texture::MsaaMode::Msaa4x2:
case Tegra::Texture::MsaaMode::Msaa4x2_D3D:
case Tegra::Texture::MsaaMode::Msaa4x2_VC8:
case Tegra::Texture::MsaaMode::Msaa4x2_VC24:
return VK_SAMPLE_COUNT_8_BIT;
case Tegra::Texture::MsaaMode::Msaa4x4:
return VK_SAMPLE_COUNT_16_BIT;
default:
UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
return VK_SAMPLE_COUNT_1_BIT;
}
}
} // namespace Vulkan::MaxwellToVK

View File

@ -5,6 +5,7 @@
#pragma once
#include "common/common_types.h"
#include "shader_recompiler/stage.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/surface.h"
#include "video_core/textures/texture.h"
@ -45,7 +46,7 @@ struct FormatInfo {
[[nodiscard]] FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with_srgb,
PixelFormat pixel_format);
VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
VkShaderStageFlagBits ShaderStage(Shader::Stage stage);
VkPrimitiveTopology PrimitiveTopology(const Device& device, Maxwell::PrimitiveTopology topology);
@ -65,10 +66,14 @@ VkFrontFace FrontFace(Maxwell::FrontFace front_face);
VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face);
VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode);
VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle);
VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reduction);
VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode);
} // namespace Vulkan::MaxwellToVK

View File

@ -0,0 +1,154 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <boost/container/small_vector.hpp>
#include "common/assert.h"
#include "common/common_types.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/texture_cache/texture_cache.h"
#include "video_core/texture_cache/types.h"
#include "video_core/textures/texture.h"
#include "video_core/vulkan_common/vulkan_device.h"
namespace Vulkan {
class DescriptorLayoutBuilder {
public:
DescriptorLayoutBuilder(const Device& device_) : device{&device_} {}
bool CanUsePushDescriptor() const noexcept {
return device->IsKhrPushDescriptorSupported() &&
num_descriptors <= device->MaxPushDescriptors();
}
vk::DescriptorSetLayout CreateDescriptorSetLayout(bool use_push_descriptor) const {
if (bindings.empty()) {
return nullptr;
}
const VkDescriptorSetLayoutCreateFlags flags =
use_push_descriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
return device->GetLogical().CreateDescriptorSetLayout({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = flags,
.bindingCount = static_cast<u32>(bindings.size()),
.pBindings = bindings.data(),
});
}
vk::DescriptorUpdateTemplateKHR CreateTemplate(VkDescriptorSetLayout descriptor_set_layout,
VkPipelineLayout pipeline_layout,
bool use_push_descriptor) const {
if (entries.empty()) {
return nullptr;
}
const VkDescriptorUpdateTemplateType type =
use_push_descriptor ? VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR
: VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
return device->GetLogical().CreateDescriptorUpdateTemplateKHR({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
.pNext = nullptr,
.flags = 0,
.descriptorUpdateEntryCount = static_cast<u32>(entries.size()),
.pDescriptorUpdateEntries = entries.data(),
.templateType = type,
.descriptorSetLayout = descriptor_set_layout,
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.pipelineLayout = pipeline_layout,
.set = 0,
});
}
vk::PipelineLayout CreatePipelineLayout(VkDescriptorSetLayout descriptor_set_layout) const {
return device->GetLogical().CreatePipelineLayout({
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.setLayoutCount = descriptor_set_layout ? 1U : 0U,
.pSetLayouts = bindings.empty() ? nullptr : &descriptor_set_layout,
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
});
}
void Add(const Shader::Info& info, VkShaderStageFlags stage) {
Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage, info.constant_buffer_descriptors);
Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage, info.storage_buffers_descriptors);
Add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, stage, info.texture_buffer_descriptors);
Add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, stage, info.image_buffer_descriptors);
Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage, info.texture_descriptors);
Add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, stage, info.image_descriptors);
}
private:
template <typename Descriptors>
void Add(VkDescriptorType type, VkShaderStageFlags stage, const Descriptors& descriptors) {
const size_t num{descriptors.size()};
for (size_t i = 0; i < num; ++i) {
bindings.push_back({
.binding = binding,
.descriptorType = type,
.descriptorCount = descriptors[i].count,
.stageFlags = stage,
.pImmutableSamplers = nullptr,
});
entries.push_back({
.dstBinding = binding,
.dstArrayElement = 0,
.descriptorCount = descriptors[i].count,
.descriptorType = type,
.offset = offset,
.stride = sizeof(DescriptorUpdateEntry),
});
++binding;
num_descriptors += descriptors[i].count;
offset += sizeof(DescriptorUpdateEntry);
}
}
const Device* device{};
boost::container::small_vector<VkDescriptorSetLayoutBinding, 32> bindings;
boost::container::small_vector<VkDescriptorUpdateTemplateEntryKHR, 32> entries;
u32 binding{};
u32 num_descriptors{};
size_t offset{};
};
inline void PushImageDescriptors(const Shader::Info& info, const VkSampler*& samplers,
const ImageId*& image_view_ids, TextureCache& texture_cache,
VKUpdateDescriptorQueue& update_descriptor_queue) {
for (const auto& desc : info.texture_buffer_descriptors) {
image_view_ids += desc.count;
}
for (const auto& desc : info.image_buffer_descriptors) {
image_view_ids += desc.count;
}
for (const auto& desc : info.texture_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
const VkSampler sampler{*(samplers++)};
ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))};
const VkImageView vk_image_view{image_view.Handle(desc.type)};
update_descriptor_queue.AddSampledImage(vk_image_view, sampler);
}
}
for (const auto& desc : info.image_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))};
if (desc.is_written) {
texture_cache.MarkModification(image_view.image_id);
}
const VkImageView vk_image_view{image_view.StorageView(desc.type, desc.format)};
update_descriptor_queue.AddImage(vk_image_view);
}
}
}
} // namespace Vulkan

View File

@ -130,35 +130,45 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
if (!framebuffer) {
return;
}
const auto& layout = render_window.GetFramebufferLayout();
if (layout.width > 0 && layout.height > 0 && render_window.IsShown()) {
const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
const bool use_accelerated =
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
const bool is_srgb = use_accelerated && screen_info.is_srgb;
if (swapchain.HasFramebufferChanged(layout) || swapchain.GetSrgbState() != is_srgb) {
swapchain.Create(layout.width, layout.height, is_srgb);
blit_screen.Recreate();
}
scheduler.WaitWorker();
while (!swapchain.AcquireNextImage()) {
swapchain.Create(layout.width, layout.height, is_srgb);
blit_screen.Recreate();
}
const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
scheduler.Flush(render_semaphore);
if (swapchain.Present(render_semaphore)) {
blit_screen.Recreate();
}
gpu.RendererFrameEndNotify();
rasterizer.TickFrame();
SCOPE_EXIT({ render_window.OnFrameDisplayed(); });
if (!render_window.IsShown()) {
return;
}
const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
const bool use_accelerated =
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
const bool is_srgb = use_accelerated && screen_info.is_srgb;
render_window.OnFrameDisplayed();
bool has_been_recreated = false;
const auto recreate_swapchain = [&] {
if (!has_been_recreated) {
has_been_recreated = true;
scheduler.WaitWorker();
}
const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
swapchain.Create(layout.width, layout.height, is_srgb);
};
if (swapchain.IsSubOptimal() || swapchain.HasColorSpaceChanged(is_srgb)) {
recreate_swapchain();
}
bool is_outdated;
do {
swapchain.AcquireNextImage();
is_outdated = swapchain.IsOutDated();
if (is_outdated) {
recreate_swapchain();
}
} while (is_outdated);
if (has_been_recreated) {
blit_screen.Recreate();
}
const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
scheduler.Flush(render_semaphore);
scheduler.WaitWorker();
swapchain.Present(render_semaphore);
gpu.RendererFrameEndNotify();
rasterizer.TickFrame();
}
void RendererVulkan::Report() const {

View File

@ -184,47 +184,43 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
.depth = 1,
},
};
scheduler.Record(
[buffer = *buffer, image = *raw_images[image_index], copy](vk::CommandBuffer cmdbuf) {
const VkImageMemoryBarrier base_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = image,
.subresourceRange =
{
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
VkImageMemoryBarrier read_barrier = base_barrier;
read_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
read_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
read_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
scheduler.Record([this, copy, image_index](vk::CommandBuffer cmdbuf) {
const VkImage image = *raw_images[image_index];
const VkImageMemoryBarrier base_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = image,
.subresourceRange{
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
VkImageMemoryBarrier read_barrier = base_barrier;
read_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
read_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
read_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageMemoryBarrier write_barrier = base_barrier;
write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
VkImageMemoryBarrier write_barrier = base_barrier;
write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, read_barrier);
cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
});
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,
read_barrier);
cmdbuf.CopyBufferToImage(*buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
});
}
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
descriptor_set = descriptor_sets[image_index], buffer = *buffer,
size = swapchain.GetSize(), pipeline = *pipeline,
layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
scheduler.Record([this, image_index, size = swapchain.GetSize()](vk::CommandBuffer cmdbuf) {
const f32 bg_red = Settings::values.bg_red.GetValue() / 255.0f;
const f32 bg_green = Settings::values.bg_green.GetValue() / 255.0f;
const f32 bg_blue = Settings::values.bg_blue.GetValue() / 255.0f;
@ -234,8 +230,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
const VkRenderPassBeginInfo renderpass_bi{
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.pNext = nullptr,
.renderPass = renderpass,
.framebuffer = framebuffer,
.renderPass = *renderpass,
.framebuffer = *framebuffers[image_index],
.renderArea =
{
.offset = {0, 0},
@ -257,12 +253,13 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
.extent = size,
};
cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
cmdbuf.SetViewport(0, viewport);
cmdbuf.SetScissor(0, scissor);
cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
cmdbuf.BindVertexBuffer(0, *buffer, offsetof(BufferData, vertices));
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0,
descriptor_sets[image_index], {});
cmdbuf.Draw(4, 1, 0, 0);
cmdbuf.EndRenderPass();
});
@ -304,8 +301,7 @@ void VKBlitScreen::CreateShaders() {
void VKBlitScreen::CreateSemaphores() {
semaphores.resize(image_count);
std::generate(semaphores.begin(), semaphores.end(),
[this] { return device.GetLogical().CreateSemaphore(); });
std::ranges::generate(semaphores, [this] { return device.GetLogical().CreateSemaphore(); });
}
void VKBlitScreen::CreateDescriptorPool() {
@ -633,8 +629,8 @@ void VKBlitScreen::CreateFramebuffers() {
}
void VKBlitScreen::ReleaseRawImages() {
for (std::size_t i = 0; i < raw_images.size(); ++i) {
scheduler.Wait(resource_ticks.at(i));
for (const u64 tick : resource_ticks) {
scheduler.Wait(tick);
}
raw_images.clear();
raw_buffer_commits.clear();

View File

@ -60,6 +60,27 @@ std::array<T, 6> MakeQuadIndices(u32 quad, u32 first) {
}
return indices;
}
vk::Buffer CreateBuffer(const Device& device, u64 size) {
VkBufferUsageFlags flags =
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
if (device.IsExtTransformFeedbackSupported()) {
flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
}
return device.GetLogical().CreateBuffer({
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = flags,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
}
} // Anonymous namespace
Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
@ -67,31 +88,46 @@ Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
VAddr cpu_addr_, u64 size_bytes_)
: VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_) {
buffer = runtime.device.GetLogical().CreateBuffer(VkBufferCreateInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = SizeBytes(),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
: VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_),
device{&runtime.device}, buffer{CreateBuffer(*device, SizeBytes())},
commit{runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal)} {
if (runtime.device.HasDebuggingToolAttached()) {
buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str());
}
commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
}
VkBufferView Buffer::View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format) {
if (!device) {
// Null buffer, return a null descriptor
return VK_NULL_HANDLE;
}
const auto it{std::ranges::find_if(views, [offset, size, format](const BufferView& view) {
return offset == view.offset && size == view.size && format == view.format;
})};
if (it != views.end()) {
return *it->handle;
}
views.push_back({
.offset = offset,
.size = size,
.format = format,
.handle = device->GetLogical().CreateBufferView({
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.buffer = *buffer,
.format = MaxwellToVK::SurfaceFormat(*device, FormatType::Buffer, false, format).format,
.offset = offset,
.range = size,
}),
});
return *views.back().handle;
}
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
VKDescriptorPool& descriptor_pool)
DescriptorPool& descriptor_pool)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_},
uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),

View File

@ -9,13 +9,14 @@
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/surface.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
class Device;
class VKDescriptorPool;
class DescriptorPool;
class VKScheduler;
class BufferCacheRuntime;
@ -26,6 +27,8 @@ public:
explicit Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
VAddr cpu_addr_, u64 size_bytes_);
[[nodiscard]] VkBufferView View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format);
[[nodiscard]] VkBuffer Handle() const noexcept {
return *buffer;
}
@ -35,8 +38,17 @@ public:
}
private:
struct BufferView {
u32 offset;
u32 size;
VideoCore::Surface::PixelFormat format;
vk::BufferView handle;
};
const Device* device{};
vk::Buffer buffer;
MemoryCommit commit;
std::vector<BufferView> views;
};
class BufferCacheRuntime {
@ -49,7 +61,7 @@ public:
explicit BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_manager_,
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
VKDescriptorPool& descriptor_pool);
DescriptorPool& descriptor_pool);
void Finish();
@ -87,6 +99,11 @@ public:
BindBuffer(buffer, offset, size);
}
void BindTextureBuffer(Buffer& buffer, u32 offset, u32 size,
VideoCore::Surface::PixelFormat format) {
update_descriptor_queue.AddTexelBuffer(buffer.View(offset, size, format));
}
private:
void BindBuffer(VkBuffer buffer, u32 offset, u32 size) {
update_descriptor_queue.AddBuffer(buffer, offset, size);
@ -124,6 +141,7 @@ struct BufferCacheParams {
static constexpr bool NEEDS_BIND_UNIFORM_INDEX = false;
static constexpr bool NEEDS_BIND_STORAGE_INDEX = false;
static constexpr bool USE_MEMORY_MAPS = true;
static constexpr bool SEPARATE_IMAGE_BUFFER_BINDINGS = false;
};
using BufferCache = VideoCommon::BufferCache<BufferCacheParams>;

View File

@ -41,80 +41,92 @@ constexpr u32 ASTC_BINDING_SWIZZLE_BUFFER = 2;
constexpr u32 ASTC_BINDING_OUTPUT_IMAGE = 3;
constexpr size_t ASTC_NUM_BINDINGS = 4;
VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
return {
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.offset = 0,
.size = static_cast<u32>(size),
};
}
template <size_t size>
inline constexpr VkPushConstantRange COMPUTE_PUSH_CONSTANT_RANGE{
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.offset = 0,
.size = static_cast<u32>(size),
};
std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
return {{
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
}};
}
std::array<VkDescriptorSetLayoutBinding, ASTC_NUM_BINDINGS> BuildASTCDescriptorSetBindings() {
return {{
{
.binding = ASTC_BINDING_INPUT_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = ASTC_BINDING_ENC_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = ASTC_BINDING_SWIZZLE_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = ASTC_BINDING_OUTPUT_IMAGE,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
}};
}
VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
return {
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 2,
constexpr std::array<VkDescriptorSetLayoutBinding, 2> INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS{{
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.offset = 0,
.stride = sizeof(DescriptorUpdateEntry),
};
}
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
}};
std::array<VkDescriptorUpdateTemplateEntryKHR, ASTC_NUM_BINDINGS>
BuildASTCPassDescriptorUpdateTemplateEntry() {
return {{
constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{
.uniform_buffers = 0,
.storage_buffers = 2,
.texture_buffers = 0,
.image_buffers = 0,
.textures = 0,
.images = 0,
.score = 2,
};
constexpr std::array<VkDescriptorSetLayoutBinding, 4> ASTC_DESCRIPTOR_SET_BINDINGS{{
{
.binding = ASTC_BINDING_INPUT_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = ASTC_BINDING_ENC_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = ASTC_BINDING_SWIZZLE_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
{
.binding = ASTC_BINDING_OUTPUT_IMAGE,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
},
}};
constexpr DescriptorBankInfo ASTC_BANK_INFO{
.uniform_buffers = 0,
.storage_buffers = 3,
.texture_buffers = 0,
.image_buffers = 0,
.textures = 0,
.images = 1,
.score = 4,
};
constexpr VkDescriptorUpdateTemplateEntryKHR INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE{
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 2,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.offset = 0,
.stride = sizeof(DescriptorUpdateEntry),
};
constexpr std::array<VkDescriptorUpdateTemplateEntryKHR, ASTC_NUM_BINDINGS>
ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY{{
{
.dstBinding = ASTC_BINDING_INPUT_BUFFER,
.dstArrayElement = 0,
@ -148,7 +160,6 @@ BuildASTCPassDescriptorUpdateTemplateEntry() {
.stride = sizeof(DescriptorUpdateEntry),
},
}};
}
struct AstcPushConstants {
std::array<u32, 2> blocks_dims;
@ -159,14 +170,14 @@ struct AstcPushConstants {
u32 block_height;
u32 block_height_mask;
};
} // Anonymous namespace
VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
vk::Span<VkDescriptorSetLayoutBinding> bindings,
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
vk::Span<VkPushConstantRange> push_constants,
std::span<const u32> code) {
ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
vk::Span<VkDescriptorSetLayoutBinding> bindings,
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
const DescriptorBankInfo& bank_info,
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code)
: device{device_} {
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
@ -196,8 +207,7 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
.pipelineLayout = *layout,
.set = 0,
});
descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, bank_info);
}
module = device.GetLogical().CreateShaderModule({
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
@ -206,43 +216,34 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
.codeSize = static_cast<u32>(code.size_bytes()),
.pCode = code.data(),
});
device.SaveShader(code);
pipeline = device.GetLogical().CreateComputePipeline({
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage =
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
.stage{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
.layout = *layout,
.basePipelineHandle = nullptr,
.basePipelineIndex = 0,
});
}
VKComputePass::~VKComputePass() = default;
ComputePass::~ComputePass() = default;
VkDescriptorSet VKComputePass::CommitDescriptorSet(
VKUpdateDescriptorQueue& update_descriptor_queue) {
if (!descriptor_template) {
return nullptr;
}
const VkDescriptorSet set = descriptor_allocator->Commit();
update_descriptor_queue.Send(*descriptor_template, set);
return set;
}
Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
Uint8Pass::Uint8Pass(const Device& device_, VKScheduler& scheduler_,
DescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_)
: VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
: ComputePass(device_, descriptor_pool, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, {},
VULKAN_UINT8_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
update_descriptor_queue{update_descriptor_queue_} {}
@ -256,11 +257,11 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
const VkBuffer buffer{staging.buffer};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set,
num_vertices](vk::CommandBuffer cmdbuf) {
scheduler.Record([this, buffer, descriptor_data, num_vertices](vk::CommandBuffer cmdbuf) {
static constexpr u32 DISPATCH_SIZE = 1024;
static constexpr VkMemoryBarrier WRITE_BARRIER{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@ -268,8 +269,10 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
};
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
const VkDescriptorSet set = descriptor_allocator.Commit();
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, WRITE_BARRIER);
@ -278,12 +281,12 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
}
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_)
: VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
BuildInputOutputDescriptorUpdateTemplate(),
BuildComputePushConstantRange(sizeof(u32) * 2), VULKAN_QUAD_INDEXED_COMP_SPV),
: ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO,
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 2>, VULKAN_QUAD_INDEXED_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
update_descriptor_queue{update_descriptor_queue_} {}
@ -313,11 +316,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set,
num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
scheduler.Record([this, buffer = staging.buffer, descriptor_data, num_tri_vertices, base_vertex,
index_shift](vk::CommandBuffer cmdbuf) {
static constexpr u32 DISPATCH_SIZE = 1024;
static constexpr VkMemoryBarrier WRITE_BARRIER{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@ -325,10 +328,12 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
};
const std::array push_constants = {base_vertex, index_shift};
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
const std::array push_constants{base_vertex, index_shift};
const VkDescriptorSet set = descriptor_allocator.Commit();
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
&push_constants);
cmdbuf.Dispatch(Common::DivCeil(num_tri_vertices, DISPATCH_SIZE), 1, 1);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
@ -338,15 +343,14 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
}
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
MemoryAllocator& memory_allocator_)
: VKComputePass(device_, descriptor_pool_, BuildASTCDescriptorSetBindings(),
BuildASTCPassDescriptorUpdateTemplateEntry(),
BuildComputePushConstantRange(sizeof(AstcPushConstants)),
ASTC_DECODER_COMP_SPV),
device{device_}, scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
: ComputePass(device_, descriptor_pool_, ASTC_DESCRIPTOR_SET_BINDINGS,
ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY, ASTC_BANK_INFO,
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(AstcPushConstants)>, ASTC_DECODER_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {}
ASTCDecoderPass::~ASTCDecoderPass() = default;
@ -444,16 +448,14 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
update_descriptor_queue.AddBuffer(*data_buffer, sizeof(ASTC_ENCODINGS_VALUES),
sizeof(SWIZZLE_TABLE));
update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
const VkPipelineLayout vk_layout = *layout;
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
// To unswizzle the ASTC data
const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info);
ASSERT(params.origin == (std::array<u32, 3>{0, 0, 0}));
ASSERT(params.destination == (std::array<s32, 3>{0, 0, 0}));
scheduler.Record([vk_layout, num_dispatches_x, num_dispatches_y, num_dispatches_z,
block_dims, params, set](vk::CommandBuffer cmdbuf) {
scheduler.Record([this, num_dispatches_x, num_dispatches_y, num_dispatches_z, block_dims,
params, descriptor_data](vk::CommandBuffer cmdbuf) {
const AstcPushConstants uniforms{
.blocks_dims = block_dims,
.bytes_per_block_log2 = params.bytes_per_block_log2,
@ -463,8 +465,10 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
.block_height = params.block_height,
.block_height_mask = params.block_height_mask,
};
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, vk_layout, 0, set, {});
cmdbuf.PushConstants(vk_layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
const VkDescriptorSet set = descriptor_allocator.Commit();
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, num_dispatches_z);
});
}

View File

@ -4,7 +4,6 @@
#pragma once
#include <optional>
#include <span>
#include <utility>
@ -27,31 +26,31 @@ class VKUpdateDescriptorQueue;
class Image;
struct StagingBufferRef;
class VKComputePass {
class ComputePass {
public:
explicit VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
vk::Span<VkDescriptorSetLayoutBinding> bindings,
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
~VKComputePass();
explicit ComputePass(const Device& device, DescriptorPool& descriptor_pool,
vk::Span<VkDescriptorSetLayoutBinding> bindings,
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
const DescriptorBankInfo& bank_info,
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
~ComputePass();
protected:
VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue);
const Device& device;
vk::DescriptorUpdateTemplateKHR descriptor_template;
vk::PipelineLayout layout;
vk::Pipeline pipeline;
vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
private:
vk::DescriptorSetLayout descriptor_set_layout;
std::optional<DescriptorAllocator> descriptor_allocator;
vk::ShaderModule module;
};
class Uint8Pass final : public VKComputePass {
class Uint8Pass final : public ComputePass {
public:
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_);
~Uint8Pass();
@ -66,10 +65,10 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
};
class QuadIndexedPass final : public VKComputePass {
class QuadIndexedPass final : public ComputePass {
public:
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_);
~QuadIndexedPass();
@ -84,10 +83,10 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
};
class ASTCDecoderPass final : public VKComputePass {
class ASTCDecoderPass final : public ComputePass {
public:
explicit ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
MemoryAllocator& memory_allocator_);
@ -99,7 +98,6 @@ public:
private:
void MakeDataBuffer();
const Device& device;
VKScheduler& scheduler;
StagingBufferPool& staging_buffer_pool;
VKUpdateDescriptorQueue& update_descriptor_queue;

View File

@ -2,152 +2,198 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <vector>
#include <boost/container/small_vector.hpp>
#include "video_core/renderer_vulkan/pipeline_helper.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
VKComputePipeline::VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
const SPIRVShader& shader_)
: device{device_}, scheduler{scheduler_}, entries{shader_.entries},
descriptor_set_layout{CreateDescriptorSetLayout()},
descriptor_allocator{descriptor_pool_, *descriptor_set_layout},
update_descriptor_queue{update_descriptor_queue_}, layout{CreatePipelineLayout()},
descriptor_template{CreateDescriptorUpdateTemplate()},
shader_module{CreateShaderModule(shader_.code)}, pipeline{CreatePipeline()} {}
using Shader::ImageBufferDescriptor;
using Tegra::Texture::TexturePair;
VKComputePipeline::~VKComputePipeline() = default;
VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
if (!descriptor_template) {
return {};
ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue_,
Common::ThreadWorker* thread_worker,
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info_,
vk::ShaderModule spv_module_)
: device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
spv_module(std::move(spv_module_)) {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
}
const VkDescriptorSet set = descriptor_allocator.Commit();
update_descriptor_queue.Send(*descriptor_template, set);
return set;
}
std::copy_n(info.constant_buffer_used_sizes.begin(), uniform_buffer_sizes.size(),
uniform_buffer_sizes.begin());
vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
std::vector<VkDescriptorSetLayoutBinding> bindings;
u32 binding = 0;
const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
// TODO(Rodrigo): Maybe make individual bindings here?
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
bindings.push_back({
.binding = binding++,
.descriptorType = descriptor_type,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
});
}
};
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
auto func{[this, &descriptor_pool, shader_notify] {
DescriptorLayoutBuilder builder{device};
builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
return device.GetLogical().CreateDescriptorSetLayout({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.bindingCount = static_cast<u32>(bindings.size()),
.pBindings = bindings.data(),
});
}
vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
return device.GetLogical().CreatePipelineLayout({
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.setLayoutCount = 1,
.pSetLayouts = descriptor_set_layout.address(),
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
});
}
vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
u32 binding = 0;
u32 offset = 0;
FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
if (template_entries.empty()) {
// If the shader doesn't use descriptor sets, skip template creation.
return {};
}
return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
.pNext = nullptr,
.flags = 0,
.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
.pDescriptorUpdateEntries = template_entries.data(),
.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
.descriptorSetLayout = *descriptor_set_layout,
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.pipelineLayout = *layout,
.set = DESCRIPTOR_SET,
});
}
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
device.SaveShader(code);
return device.GetLogical().CreateShaderModule({
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.codeSize = code.size() * sizeof(u32),
.pCode = code.data(),
});
}
vk::Pipeline VKComputePipeline::CreatePipeline() const {
VkComputePipelineCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage =
{
descriptor_set_layout = builder.CreateDescriptorSetLayout(false);
pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
descriptor_update_template =
builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout, false);
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info);
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
.pNext = nullptr,
.requiredSubgroupSize = GuestWarpSize,
};
pipeline = device.GetLogical().CreateComputePipeline({
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.pNext = device.IsExtSubgroupSizeControlSupported() ? &subgroup_size_ci : nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *shader_module,
.module = *spv_module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
.layout = *layout,
.basePipelineHandle = nullptr,
.basePipelineIndex = 0,
};
.layout = *pipeline_layout,
.basePipelineHandle = 0,
.basePipelineIndex = 0,
});
std::lock_guard lock{build_mutex};
is_built = true;
build_condvar.notify_one();
if (shader_notify) {
shader_notify->MarkShaderComplete();
}
}};
if (thread_worker) {
thread_worker->QueueWork(std::move(func));
} else {
func();
}
}
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
.pNext = nullptr,
.requiredSubgroupSize = GuestWarpSize,
};
void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
BufferCache& buffer_cache, TextureCache& texture_cache) {
update_descriptor_queue.Acquire();
if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
ci.stage.pNext = &subgroup_size_ci;
buffer_cache.SetComputeUniformBufferState(info.constant_buffer_mask, &uniform_buffer_sizes);
buffer_cache.UnbindComputeStorageBuffers();
size_t ssbo_index{};
for (const auto& desc : info.storage_buffers_descriptors) {
ASSERT(desc.count == 1);
buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
desc.is_written);
++ssbo_index;
}
return device.GetLogical().CreateComputePipeline(ci);
texture_cache.SynchronizeComputeDescriptors();
static constexpr size_t max_elements = 64;
std::array<ImageId, max_elements> image_view_ids;
boost::container::static_vector<u32, max_elements> image_view_indices;
boost::container::static_vector<VkSampler, max_elements> samplers;
const auto& qmd{kepler_compute.launch_description};
const auto& cbufs{qmd.const_buffer_config};
const bool via_header_index{qmd.linked_tsc != 0};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0);
const u32 index_offset{index << desc.size_shift};
const u32 offset{desc.cbuf_offset + index_offset};
const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + offset};
if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
if (desc.has_secondary) {
ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0);
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
secondary_offset};
const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
return TexturePair(lhs_raw | rhs_raw, via_header_index);
}
}
return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices.push_back(handle.first);
}
}};
std::ranges::for_each(info.texture_buffer_descriptors, add_image);
std::ranges::for_each(info.image_buffer_descriptors, add_image);
for (const auto& desc : info.texture_descriptors) {
for (u32 index = 0; index < desc.count; ++index) {
const auto handle{read_handle(desc, index)};
image_view_indices.push_back(handle.first);
Sampler* const sampler = texture_cache.GetComputeSampler(handle.second);
samplers.push_back(sampler->Handle());
}
}
std::ranges::for_each(info.image_descriptors, add_image);
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
buffer_cache.UnbindComputeTextureBuffers();
ImageId* texture_buffer_ids{image_view_ids.data()};
size_t index{};
const auto add_buffer{[&](const auto& desc) {
constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
for (u32 i = 0; i < desc.count; ++i) {
bool is_written{false};
if constexpr (is_image) {
is_written = desc.is_written;
}
ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids);
buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(),
image_view.BufferSize(), image_view.format,
is_written, is_image);
++texture_buffer_ids;
++index;
}
}};
std::ranges::for_each(info.texture_buffer_descriptors, add_buffer);
std::ranges::for_each(info.image_buffer_descriptors, add_buffer);
buffer_cache.UpdateComputeBuffers();
buffer_cache.BindHostComputeBuffers();
const VkSampler* samplers_it{samplers.data()};
const ImageId* views_it{image_view_ids.data()};
PushImageDescriptors(info, samplers_it, views_it, texture_cache, update_descriptor_queue);
if (!is_built.load(std::memory_order::relaxed)) {
// Wait for the pipeline to be built
scheduler.Record([this](vk::CommandBuffer) {
std::unique_lock lock{build_mutex};
build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
});
}
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
if (!descriptor_set_layout) {
return;
}
const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
const vk::Device& dev{device.GetLogical()};
dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
descriptor_set, nullptr);
});
}
} // namespace Vulkan

View File

@ -4,61 +4,63 @@
#pragma once
#include <atomic>
#include <condition_variable>
#include <mutex>
#include "common/common_types.h"
#include "common/thread_worker.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace VideoCore {
class ShaderNotify;
}
namespace Vulkan {
class Device;
class VKScheduler;
class VKUpdateDescriptorQueue;
class VKComputePipeline final {
class ComputePipeline {
public:
explicit VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
const SPIRVShader& shader_);
~VKComputePipeline();
explicit ComputePipeline(const Device& device, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue,
Common::ThreadWorker* thread_worker,
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info,
vk::ShaderModule spv_module);
VkDescriptorSet CommitDescriptorSet();
ComputePipeline& operator=(ComputePipeline&&) noexcept = delete;
ComputePipeline(ComputePipeline&&) noexcept = delete;
VkPipeline GetHandle() const {
return *pipeline;
}
ComputePipeline& operator=(const ComputePipeline&) = delete;
ComputePipeline(const ComputePipeline&) = delete;
VkPipelineLayout GetLayout() const {
return *layout;
}
const ShaderEntries& GetEntries() const {
return entries;
}
void Configure(Tegra::Engines::KeplerCompute& kepler_compute, Tegra::MemoryManager& gpu_memory,
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache);
private:
vk::DescriptorSetLayout CreateDescriptorSetLayout() const;
vk::PipelineLayout CreatePipelineLayout() const;
vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate() const;
vk::ShaderModule CreateShaderModule(const std::vector<u32>& code) const;
vk::Pipeline CreatePipeline() const;
const Device& device;
VKScheduler& scheduler;
ShaderEntries entries;
VKUpdateDescriptorQueue& update_descriptor_queue;
Shader::Info info;
VideoCommon::ComputeUniformBufferSizes uniform_buffer_sizes{};
vk::ShaderModule spv_module;
vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
VKUpdateDescriptorQueue& update_descriptor_queue;
vk::PipelineLayout layout;
vk::DescriptorUpdateTemplateKHR descriptor_template;
vk::ShaderModule shader_module;
vk::PipelineLayout pipeline_layout;
vk::DescriptorUpdateTemplateKHR descriptor_update_template;
vk::Pipeline pipeline;
std::condition_variable build_condvar;
std::mutex build_mutex;
std::atomic_bool is_built{false};
};
} // namespace Vulkan

View File

@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <mutex>
#include <span>
#include <vector>
#include "common/common_types.h"
@ -13,79 +15,149 @@
namespace Vulkan {
// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines.
constexpr std::size_t SETS_GROW_RATE = 0x20;
// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines
constexpr size_t SETS_GROW_RATE = 16;
constexpr s32 SCORE_THRESHOLD = 3;
constexpr u32 SETS_PER_POOL = 64;
DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool_,
VkDescriptorSetLayout layout_)
: ResourcePool(descriptor_pool_.master_semaphore, SETS_GROW_RATE),
descriptor_pool{descriptor_pool_}, layout{layout_} {}
struct DescriptorBank {
DescriptorBankInfo info;
std::vector<vk::DescriptorPool> pools;
};
DescriptorAllocator::~DescriptorAllocator() = default;
VkDescriptorSet DescriptorAllocator::Commit() {
const std::size_t index = CommitResource();
return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
bool DescriptorBankInfo::IsSuperset(const DescriptorBankInfo& subset) const noexcept {
return uniform_buffers >= subset.uniform_buffers && storage_buffers >= subset.storage_buffers &&
texture_buffers >= subset.texture_buffers && image_buffers >= subset.image_buffers &&
textures >= subset.textures && images >= subset.image_buffers;
}
void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
template <typename Descriptors>
static u32 Accumulate(const Descriptors& descriptors) {
u32 count = 0;
for (const auto& descriptor : descriptors) {
count += descriptor.count;
}
return count;
}
VKDescriptorPool::VKDescriptorPool(const Device& device_, VKScheduler& scheduler)
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()}, active_pool{
AllocateNewPool()} {}
static DescriptorBankInfo MakeBankInfo(std::span<const Shader::Info> infos) {
DescriptorBankInfo bank;
for (const Shader::Info& info : infos) {
bank.uniform_buffers += Accumulate(info.constant_buffer_descriptors);
bank.storage_buffers += Accumulate(info.storage_buffers_descriptors);
bank.texture_buffers += Accumulate(info.texture_buffer_descriptors);
bank.image_buffers += Accumulate(info.image_buffer_descriptors);
bank.textures += Accumulate(info.texture_descriptors);
bank.images += Accumulate(info.image_descriptors);
}
bank.score = bank.uniform_buffers + bank.storage_buffers + bank.texture_buffers +
bank.image_buffers + bank.textures + bank.images;
return bank;
}
VKDescriptorPool::~VKDescriptorPool() = default;
vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
static constexpr u32 num_sets = 0x20000;
static constexpr VkDescriptorPoolSize pool_sizes[] = {
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, num_sets * 90},
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40},
static void AllocatePool(const Device& device, DescriptorBank& bank) {
std::array<VkDescriptorPoolSize, 6> pool_sizes;
size_t pool_cursor{};
const auto add = [&](VkDescriptorType type, u32 count) {
if (count > 0) {
pool_sizes[pool_cursor++] = {
.type = type,
.descriptorCount = count * SETS_PER_POOL,
};
}
};
const VkDescriptorPoolCreateInfo ci{
const auto& info{bank.info};
add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, info.uniform_buffers);
add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, info.storage_buffers);
add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, info.texture_buffers);
add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, info.image_buffers);
add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, info.textures);
add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, info.images);
bank.pools.push_back(device.GetLogical().CreateDescriptorPool({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
.maxSets = num_sets,
.poolSizeCount = static_cast<u32>(std::size(pool_sizes)),
.maxSets = SETS_PER_POOL,
.poolSizeCount = static_cast<u32>(pool_cursor),
.pPoolSizes = std::data(pool_sizes),
};
return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
}));
}
vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
std::size_t count) {
const std::vector layout_copies(count, layout);
VkDescriptorSetAllocateInfo ai{
DescriptorAllocator::DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
DescriptorBank& bank_, VkDescriptorSetLayout layout_)
: ResourcePool(master_semaphore_, SETS_GROW_RATE), device{&device_}, bank{&bank_},
layout{layout_} {}
VkDescriptorSet DescriptorAllocator::Commit() {
const size_t index = CommitResource();
return sets[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
}
void DescriptorAllocator::Allocate(size_t begin, size_t end) {
sets.push_back(AllocateDescriptors(end - begin));
}
vk::DescriptorSets DescriptorAllocator::AllocateDescriptors(size_t count) {
const std::vector<VkDescriptorSetLayout> layouts(count, layout);
VkDescriptorSetAllocateInfo allocate_info{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = **active_pool,
.descriptorPool = *bank->pools.back(),
.descriptorSetCount = static_cast<u32>(count),
.pSetLayouts = layout_copies.data(),
.pSetLayouts = layouts.data(),
};
vk::DescriptorSets sets = active_pool->Allocate(ai);
if (!sets.IsOutOfPoolMemory()) {
return sets;
vk::DescriptorSets new_sets = bank->pools.back().Allocate(allocate_info);
if (!new_sets.IsOutOfPoolMemory()) {
return new_sets;
}
// Our current pool is out of memory. Allocate a new one and retry
active_pool = AllocateNewPool();
ai.descriptorPool = **active_pool;
sets = active_pool->Allocate(ai);
if (!sets.IsOutOfPoolMemory()) {
return sets;
AllocatePool(*device, *bank);
allocate_info.descriptorPool = *bank->pools.back();
new_sets = bank->pools.back().Allocate(allocate_info);
if (!new_sets.IsOutOfPoolMemory()) {
return new_sets;
}
// After allocating a new pool, we are out of memory again. We can't handle this from here.
throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
}
DescriptorPool::DescriptorPool(const Device& device_, VKScheduler& scheduler)
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()} {}
DescriptorPool::~DescriptorPool() = default;
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
std::span<const Shader::Info> infos) {
return Allocator(layout, MakeBankInfo(infos));
}
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
const Shader::Info& info) {
return Allocator(layout, MakeBankInfo(std::array{info}));
}
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
const DescriptorBankInfo& info) {
return DescriptorAllocator(device, master_semaphore, Bank(info), layout);
}
DescriptorBank& DescriptorPool::Bank(const DescriptorBankInfo& reqs) {
std::shared_lock read_lock{banks_mutex};
const auto it = std::ranges::find_if(bank_infos, [&reqs](const DescriptorBankInfo& bank) {
return std::abs(bank.score - reqs.score) < SCORE_THRESHOLD && bank.IsSuperset(reqs);
});
if (it != bank_infos.end()) {
return *banks[std::distance(bank_infos.begin(), it)].get();
}
read_lock.unlock();
std::unique_lock write_lock{banks_mutex};
bank_infos.push_back(reqs);
auto& bank = *banks.emplace_back(std::make_unique<DescriptorBank>());
bank.info = reqs;
AllocatePool(device, bank);
return bank;
}
} // namespace Vulkan

View File

@ -4,57 +4,85 @@
#pragma once
#include <shared_mutex>
#include <span>
#include <vector>
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
class Device;
class VKDescriptorPool;
class VKScheduler;
struct DescriptorBank;
struct DescriptorBankInfo {
[[nodiscard]] bool IsSuperset(const DescriptorBankInfo& subset) const noexcept;
u32 uniform_buffers{}; ///< Number of uniform buffer descriptors
u32 storage_buffers{}; ///< Number of storage buffer descriptors
u32 texture_buffers{}; ///< Number of texture buffer descriptors
u32 image_buffers{}; ///< Number of image buffer descriptors
u32 textures{}; ///< Number of texture descriptors
u32 images{}; ///< Number of image descriptors
s32 score{}; ///< Number of descriptors in total
};
class DescriptorAllocator final : public ResourcePool {
friend class DescriptorPool;
public:
explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
~DescriptorAllocator() override;
explicit DescriptorAllocator() = default;
~DescriptorAllocator() override = default;
DescriptorAllocator& operator=(DescriptorAllocator&&) noexcept = default;
DescriptorAllocator(DescriptorAllocator&&) noexcept = default;
DescriptorAllocator& operator=(const DescriptorAllocator&) = delete;
DescriptorAllocator(const DescriptorAllocator&) = delete;
VkDescriptorSet Commit();
protected:
void Allocate(std::size_t begin, std::size_t end) override;
private:
VKDescriptorPool& descriptor_pool;
const VkDescriptorSetLayout layout;
explicit DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
DescriptorBank& bank_, VkDescriptorSetLayout layout_);
std::vector<vk::DescriptorSets> descriptors_allocations;
void Allocate(size_t begin, size_t end) override;
vk::DescriptorSets AllocateDescriptors(size_t count);
const Device* device{};
DescriptorBank* bank{};
VkDescriptorSetLayout layout{};
std::vector<vk::DescriptorSets> sets;
};
class VKDescriptorPool final {
friend DescriptorAllocator;
class DescriptorPool {
public:
explicit VKDescriptorPool(const Device& device, VKScheduler& scheduler);
~VKDescriptorPool();
explicit DescriptorPool(const Device& device, VKScheduler& scheduler);
~DescriptorPool();
VKDescriptorPool(const VKDescriptorPool&) = delete;
VKDescriptorPool& operator=(const VKDescriptorPool&) = delete;
DescriptorPool& operator=(const DescriptorPool&) = delete;
DescriptorPool(const DescriptorPool&) = delete;
DescriptorAllocator Allocator(VkDescriptorSetLayout layout,
std::span<const Shader::Info> infos);
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const Shader::Info& info);
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const DescriptorBankInfo& info);
private:
vk::DescriptorPool* AllocateNewPool();
vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
DescriptorBank& Bank(const DescriptorBankInfo& reqs);
const Device& device;
MasterSemaphore& master_semaphore;
std::vector<vk::DescriptorPool> pools;
vk::DescriptorPool* active_pool;
std::shared_mutex banks_mutex;
std::vector<DescriptorBankInfo> bank_infos;
std::vector<std::unique_ptr<DescriptorBank>> banks;
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -1,30 +1,36 @@
// Copyright 2019 yuzu Emulator Project
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <algorithm>
#include <array>
#include <optional>
#include <vector>
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <type_traits>
#include "common/common_types.h"
#include "common/thread_worker.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace VideoCore {
class ShaderNotify;
}
namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct GraphicsPipelineCacheKey {
VkRenderPass renderpass;
std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
FixedPipelineState fixed_state;
std::array<u64, 6> unique_hashes;
FixedPipelineState state;
std::size_t Hash() const noexcept;
size_t Hash() const noexcept;
bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
@ -32,72 +38,115 @@ struct GraphicsPipelineCacheKey {
return !operator==(rhs);
}
std::size_t Size() const noexcept {
return sizeof(renderpass) + sizeof(shaders) + fixed_state.Size();
size_t Size() const noexcept {
return sizeof(unique_hashes) + state.Size();
}
};
static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
} // namespace Vulkan
namespace std {
template <>
struct hash<Vulkan::GraphicsPipelineCacheKey> {
size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
return k.Hash();
}
};
} // namespace std
namespace Vulkan {
class Device;
class VKDescriptorPool;
class RenderPassCache;
class VKScheduler;
class VKUpdateDescriptorQueue;
using SPIRVProgram = std::array<std::optional<SPIRVShader>, Maxwell::MaxShaderStage>;
class GraphicsPipeline {
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
class VKGraphicsPipeline final {
public:
explicit VKGraphicsPipeline(const Device& device_, VKScheduler& scheduler_,
VKDescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue_,
const GraphicsPipelineCacheKey& key,
vk::Span<VkDescriptorSetLayoutBinding> bindings,
const SPIRVProgram& program, u32 num_color_buffers);
~VKGraphicsPipeline();
explicit GraphicsPipeline(
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
VideoCore::ShaderNotify* shader_notify, const Device& device,
DescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue,
Common::ThreadWorker* worker_thread, RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos);
VkDescriptorSet CommitDescriptorSet();
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
VkPipeline GetHandle() const {
return *pipeline;
GraphicsPipeline& operator=(const GraphicsPipeline&) = delete;
GraphicsPipeline(const GraphicsPipeline&) = delete;
void AddTransition(GraphicsPipeline* transition);
void Configure(bool is_indexed) {
configure_func(this, is_indexed);
}
VkPipelineLayout GetLayout() const {
return *layout;
[[nodiscard]] GraphicsPipeline* Next(const GraphicsPipelineCacheKey& current_key) noexcept {
if (key == current_key) {
return this;
}
const auto it{std::find(transition_keys.begin(), transition_keys.end(), current_key)};
return it != transition_keys.end() ? transitions[std::distance(transition_keys.begin(), it)]
: nullptr;
}
GraphicsPipelineCacheKey GetCacheKey() const {
return cache_key;
[[nodiscard]] bool IsBuilt() const noexcept {
return is_built.load(std::memory_order::relaxed);
}
template <typename Spec>
static auto MakeConfigureSpecFunc() {
return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); };
}
private:
vk::DescriptorSetLayout CreateDescriptorSetLayout(
vk::Span<VkDescriptorSetLayoutBinding> bindings) const;
template <typename Spec>
void ConfigureImpl(bool is_indexed);
vk::PipelineLayout CreatePipelineLayout() const;
void ConfigureDraw();
vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
const SPIRVProgram& program) const;
void MakePipeline(VkRenderPass render_pass);
std::vector<vk::ShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
vk::Pipeline CreatePipeline(const SPIRVProgram& program, VkRenderPass renderpass,
u32 num_color_buffers) const;
void Validate();
const GraphicsPipelineCacheKey key;
Tegra::Engines::Maxwell3D& maxwell3d;
Tegra::MemoryManager& gpu_memory;
const Device& device;
TextureCache& texture_cache;
BufferCache& buffer_cache;
VKScheduler& scheduler;
const GraphicsPipelineCacheKey cache_key;
const u64 hash;
VKUpdateDescriptorQueue& update_descriptor_queue;
void (*configure_func)(GraphicsPipeline*, bool){};
std::vector<GraphicsPipelineCacheKey> transition_keys;
std::vector<GraphicsPipeline*> transitions;
std::array<vk::ShaderModule, NUM_STAGES> spv_modules;
std::array<Shader::Info, NUM_STAGES> stage_infos;
std::array<u32, 5> enabled_uniform_buffer_masks{};
VideoCommon::UniformBufferSizes uniform_buffer_sizes{};
vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
VKUpdateDescriptorQueue& update_descriptor_queue;
vk::PipelineLayout layout;
vk::DescriptorUpdateTemplateKHR descriptor_template;
std::vector<vk::ShaderModule> modules;
vk::PipelineLayout pipeline_layout;
vk::DescriptorUpdateTemplateKHR descriptor_update_template;
vk::Pipeline pipeline;
std::condition_variable build_condvar;
std::mutex build_mutex;
std::atomic_bool is_built{false};
bool uses_push_descriptor{false};
};
} // namespace Vulkan

View File

@ -39,9 +39,9 @@ public:
return KnownGpuTick() >= tick;
}
/// Advance to the logical tick.
void NextTick() noexcept {
++current_tick;
/// Advance to the logical tick and return the old one
[[nodiscard]] u64 NextTick() noexcept {
return current_tick.fetch_add(1, std::memory_order::relaxed);
}
/// Refresh the known GPU tick

View File

@ -4,444 +4,613 @@
#include <algorithm>
#include <cstddef>
#include <fstream>
#include <memory>
#include <thread>
#include <vector>
#include "common/bit_cast.h"
#include "common/cityhash.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/microprofile.h"
#include "common/thread_worker.h"
#include "core/core.h"
#include "core/memory.h"
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/environment.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
#include "shader_recompiler/frontend/maxwell/translate_program.h"
#include "shader_recompiler/program_header.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/pipeline_helper.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/shader/compiler_settings.h"
#include "video_core/shader/memory_util.h"
#include "video_core/shader_cache.h"
#include "video_core/shader_environment.h"
#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
MICROPROFILE_DECLARE(Vulkan_PipelineCache);
using Tegra::Engines::ShaderType;
using VideoCommon::Shader::GetShaderAddress;
using VideoCommon::Shader::GetShaderCode;
using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
using VideoCommon::Shader::ProgramCode;
using VideoCommon::Shader::STAGE_MAIN_OFFSET;
namespace {
using Shader::Backend::SPIRV::EmitSPIRV;
using Shader::Maxwell::MergeDualVertexPrograms;
using Shader::Maxwell::TranslateProgram;
using VideoCommon::ComputeEnvironment;
using VideoCommon::FileEnvironment;
using VideoCommon::GenericEnvironment;
using VideoCommon::GraphicsEnvironment;
constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
constexpr u32 CACHE_VERSION = 5;
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
.depth = VideoCommon::Shader::CompileDepth::FullDecompile,
.disable_else_derivation = true,
};
constexpr std::size_t GetStageFromProgram(std::size_t program) {
return program == 0 ? 0 : program - 1;
template <typename Container>
auto MakeSpan(Container& container) {
return std::span(container.data(), container.size());
}
constexpr ShaderType GetStageFromProgram(Maxwell::ShaderProgram program) {
return static_cast<ShaderType>(GetStageFromProgram(static_cast<std::size_t>(program)));
}
ShaderType GetShaderType(Maxwell::ShaderProgram program) {
switch (program) {
case Maxwell::ShaderProgram::VertexB:
return ShaderType::Vertex;
case Maxwell::ShaderProgram::TesselationControl:
return ShaderType::TesselationControl;
case Maxwell::ShaderProgram::TesselationEval:
return ShaderType::TesselationEval;
case Maxwell::ShaderProgram::Geometry:
return ShaderType::Geometry;
case Maxwell::ShaderProgram::Fragment:
return ShaderType::Fragment;
default:
UNIMPLEMENTED_MSG("program={}", program);
return ShaderType::Vertex;
Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp comparison) {
switch (comparison) {
case Maxwell::ComparisonOp::Never:
case Maxwell::ComparisonOp::NeverOld:
return Shader::CompareFunction::Never;
case Maxwell::ComparisonOp::Less:
case Maxwell::ComparisonOp::LessOld:
return Shader::CompareFunction::Less;
case Maxwell::ComparisonOp::Equal:
case Maxwell::ComparisonOp::EqualOld:
return Shader::CompareFunction::Equal;
case Maxwell::ComparisonOp::LessEqual:
case Maxwell::ComparisonOp::LessEqualOld:
return Shader::CompareFunction::LessThanEqual;
case Maxwell::ComparisonOp::Greater:
case Maxwell::ComparisonOp::GreaterOld:
return Shader::CompareFunction::Greater;
case Maxwell::ComparisonOp::NotEqual:
case Maxwell::ComparisonOp::NotEqualOld:
return Shader::CompareFunction::NotEqual;
case Maxwell::ComparisonOp::GreaterEqual:
case Maxwell::ComparisonOp::GreaterEqualOld:
return Shader::CompareFunction::GreaterThanEqual;
case Maxwell::ComparisonOp::Always:
case Maxwell::ComparisonOp::AlwaysOld:
return Shader::CompareFunction::Always;
}
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison);
return {};
}
template <VkDescriptorType descriptor_type, class Container>
void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& binding,
VkShaderStageFlags stage_flags, const Container& container) {
const u32 num_entries = static_cast<u32>(std::size(container));
for (std::size_t i = 0; i < num_entries; ++i) {
u32 count = 1;
if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
// Combined image samplers can be arrayed.
count = container[i].size;
Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribute& attr) {
if (attr.enabled == 0) {
return Shader::AttributeType::Disabled;
}
switch (attr.Type()) {
case Maxwell::VertexAttribute::Type::SignedNorm:
case Maxwell::VertexAttribute::Type::UnsignedNorm:
case Maxwell::VertexAttribute::Type::UnsignedScaled:
case Maxwell::VertexAttribute::Type::SignedScaled:
case Maxwell::VertexAttribute::Type::Float:
return Shader::AttributeType::Float;
case Maxwell::VertexAttribute::Type::SignedInt:
return Shader::AttributeType::SignedInt;
case Maxwell::VertexAttribute::Type::UnsignedInt:
return Shader::AttributeType::UnsignedInt;
}
return Shader::AttributeType::Float;
}
Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t index) {
switch (state.DynamicAttributeType(index)) {
case 0:
return Shader::AttributeType::Disabled;
case 1:
return Shader::AttributeType::Float;
case 2:
return Shader::AttributeType::SignedInt;
case 3:
return Shader::AttributeType::UnsignedInt;
}
return Shader::AttributeType::Disabled;
}
Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
const GraphicsPipelineCacheKey& key,
const Shader::IR::Program& program,
const Shader::IR::Program* previous_program) {
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores = previous_program->info.stores;
if (previous_program->is_geometry_passthrough) {
info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
}
bindings.push_back({
.binding = binding++,
.descriptorType = descriptor_type,
.descriptorCount = count,
.stageFlags = stage_flags,
.pImmutableSamplers = nullptr,
});
} else {
info.previous_stage_stores.mask.set();
}
const Shader::Stage stage{program.stage};
const bool has_geometry{key.unique_hashes[4] != 0 && !programs[4].is_geometry_passthrough};
const bool gl_ndc{key.state.ndc_minus_one_to_one != 0};
const float point_size{Common::BitCast<float>(key.state.point_size)};
switch (stage) {
case Shader::Stage::VertexB:
if (!has_geometry) {
if (key.state.topology == Maxwell::PrimitiveTopology::Points) {
info.fixed_state_point_size = point_size;
}
if (key.state.xfb_enabled) {
info.xfb_varyings = VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
}
info.convert_depth_mode = gl_ndc;
}
if (key.state.dynamic_vertex_input) {
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
info.generic_input_types[index] = AttributeType(key.state, index);
}
} else {
std::ranges::transform(key.state.attributes, info.generic_input_types.begin(),
&CastAttributeType);
}
break;
case Shader::Stage::TessellationEval:
// We have to flip tessellation clockwise for some reason...
info.tess_clockwise = key.state.tessellation_clockwise == 0;
info.tess_primitive = [&key] {
const u32 raw{key.state.tessellation_primitive.Value()};
switch (static_cast<Maxwell::TessellationPrimitive>(raw)) {
case Maxwell::TessellationPrimitive::Isolines:
return Shader::TessPrimitive::Isolines;
case Maxwell::TessellationPrimitive::Triangles:
return Shader::TessPrimitive::Triangles;
case Maxwell::TessellationPrimitive::Quads:
return Shader::TessPrimitive::Quads;
}
UNREACHABLE();
return Shader::TessPrimitive::Triangles;
}();
info.tess_spacing = [&] {
const u32 raw{key.state.tessellation_spacing};
switch (static_cast<Maxwell::TessellationSpacing>(raw)) {
case Maxwell::TessellationSpacing::Equal:
return Shader::TessSpacing::Equal;
case Maxwell::TessellationSpacing::FractionalOdd:
return Shader::TessSpacing::FractionalOdd;
case Maxwell::TessellationSpacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
UNREACHABLE();
return Shader::TessSpacing::Equal;
}();
break;
case Shader::Stage::Geometry:
if (program.output_topology == Shader::OutputTopology::PointList) {
info.fixed_state_point_size = point_size;
}
if (key.state.xfb_enabled != 0) {
info.xfb_varyings = VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
}
info.convert_depth_mode = gl_ndc;
break;
case Shader::Stage::Fragment:
info.alpha_test_func = MaxwellToCompareFunction(
key.state.UnpackComparisonOp(key.state.alpha_test_func.Value()));
info.alpha_test_reference = Common::BitCast<float>(key.state.alpha_test_ref);
break;
default:
break;
}
switch (key.state.topology) {
case Maxwell::PrimitiveTopology::Points:
info.input_topology = Shader::InputTopology::Points;
break;
case Maxwell::PrimitiveTopology::Lines:
case Maxwell::PrimitiveTopology::LineLoop:
case Maxwell::PrimitiveTopology::LineStrip:
info.input_topology = Shader::InputTopology::Lines;
break;
case Maxwell::PrimitiveTopology::Triangles:
case Maxwell::PrimitiveTopology::TriangleStrip:
case Maxwell::PrimitiveTopology::TriangleFan:
case Maxwell::PrimitiveTopology::Quads:
case Maxwell::PrimitiveTopology::QuadStrip:
case Maxwell::PrimitiveTopology::Polygon:
case Maxwell::PrimitiveTopology::Patches:
info.input_topology = Shader::InputTopology::Triangles;
break;
case Maxwell::PrimitiveTopology::LinesAdjacency:
case Maxwell::PrimitiveTopology::LineStripAdjacency:
info.input_topology = Shader::InputTopology::LinesAdjacency;
break;
case Maxwell::PrimitiveTopology::TrianglesAdjacency:
case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
info.input_topology = Shader::InputTopology::TrianglesAdjacency;
break;
}
info.force_early_z = key.state.early_z != 0;
info.y_negate = key.state.y_negate != 0;
return info;
}
u32 FillDescriptorLayout(const ShaderEntries& entries,
std::vector<VkDescriptorSetLayoutBinding>& bindings,
Maxwell::ShaderProgram program_type, u32 base_binding) {
const ShaderType stage = GetStageFromProgram(program_type);
const VkShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
u32 binding = base_binding;
AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
return binding;
}
} // Anonymous namespace
std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
return static_cast<std::size_t>(hash);
}
bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
return std::memcmp(&rhs, this, Size()) == 0;
}
std::size_t ComputePipelineCacheKey::Hash() const noexcept {
size_t ComputePipelineCacheKey::Hash() const noexcept {
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
return static_cast<std::size_t>(hash);
return static_cast<size_t>(hash);
}
bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
return std::memcmp(&rhs, this, sizeof *this) == 0;
}
Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine_, ShaderType stage_,
GPUVAddr gpu_addr_, VAddr cpu_addr_, ProgramCode program_code_, u32 main_offset_)
: gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage_, engine_),
shader_ir(program_code, main_offset_, compiler_settings, registry),
entries(GenerateShaderEntries(shader_ir)) {}
Shader::~Shader() = default;
VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::Engines::KeplerCompute& kepler_compute_,
Tegra::MemoryManager& gpu_memory_, const Device& device_,
VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_)
: VideoCommon::ShaderCache<Shader>{rasterizer_}, gpu{gpu_}, maxwell3d{maxwell3d_},
kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_},
scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, update_descriptor_queue{
update_descriptor_queue_} {}
VKPipelineCache::~VKPipelineCache() = default;
std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
std::array<Shader*, Maxwell::MaxShaderProgram> shaders{};
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto program{static_cast<Maxwell::ShaderProgram>(index)};
// Skip stages that are not enabled
if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
continue;
}
const GPUVAddr gpu_addr{GetShaderAddress(maxwell3d, program)};
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
if (!result) {
const u8* const host_ptr{gpu_memory.GetPointer(gpu_addr)};
// No shader found - create a new one
static constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1);
ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, false);
const std::size_t size_in_bytes = code.size() * sizeof(u64);
auto shader = std::make_unique<Shader>(maxwell3d, stage, gpu_addr, *cpu_addr,
std::move(code), stage_offset);
result = shader.get();
if (cpu_addr) {
Register(std::move(shader), *cpu_addr, size_in_bytes);
} else {
null_shader = std::move(shader);
}
}
shaders[index] = result;
}
return last_shaders = shaders;
size_t GraphicsPipelineCacheKey::Hash() const noexcept {
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
return static_cast<size_t>(hash);
}
VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
const GraphicsPipelineCacheKey& key, u32 num_color_buffers,
VideoCommon::Shader::AsyncShaders& async_shaders) {
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
if (last_graphics_pipeline && last_graphics_key == key) {
return last_graphics_pipeline;
}
last_graphics_key = key;
if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(gpu)) {
std::unique_lock lock{pipeline_cache};
const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
if (is_cache_miss) {
gpu.ShaderNotify().MarkSharderBuilding();
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
const auto [program, bindings] = DecompileShaders(key.fixed_state);
async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool,
update_descriptor_queue, bindings, program, key,
num_color_buffers);
}
last_graphics_pipeline = pair->second.get();
return last_graphics_pipeline;
}
const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
auto& entry = pair->second;
if (is_cache_miss) {
gpu.ShaderNotify().MarkSharderBuilding();
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
const auto [program, bindings] = DecompileShaders(key.fixed_state);
entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool,
update_descriptor_queue, key, bindings,
program, num_color_buffers);
gpu.ShaderNotify().MarkShaderComplete();
}
last_graphics_pipeline = entry.get();
return last_graphics_pipeline;
bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
return std::memcmp(&rhs, this, Size()) == 0;
}
VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCacheKey& key) {
PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::Engines::KeplerCompute& kepler_compute_,
Tegra::MemoryManager& gpu_memory_, const Device& device_,
VKScheduler& scheduler_, DescriptorPool& descriptor_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
: VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_},
use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"),
serialization_thread(1, "yuzu:PipelineSerialization") {
const auto& float_control{device.FloatControlProperties()};
const VkDriverIdKHR driver_id{device.GetDriverID()};
profile = Shader::Profile{
.supported_spirv = device.IsKhrSpirv1_4Supported() ? 0x00010400U : 0x00010000U,
.unified_descriptor_binding = true,
.support_descriptor_aliasing = true,
.support_int8 = true,
.support_int16 = device.IsShaderInt16Supported(),
.support_int64 = device.IsShaderInt64Supported(),
.support_vertex_instance_id = false,
.support_float_controls = true,
.support_separate_denorm_behavior = float_control.denormBehaviorIndependence ==
VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
.support_separate_rounding_mode =
float_control.roundingModeIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
.support_fp16_denorm_preserve = float_control.shaderDenormPreserveFloat16 != VK_FALSE,
.support_fp32_denorm_preserve = float_control.shaderDenormPreserveFloat32 != VK_FALSE,
.support_fp16_denorm_flush = float_control.shaderDenormFlushToZeroFloat16 != VK_FALSE,
.support_fp32_denorm_flush = float_control.shaderDenormFlushToZeroFloat32 != VK_FALSE,
.support_fp16_signed_zero_nan_preserve =
float_control.shaderSignedZeroInfNanPreserveFloat16 != VK_FALSE,
.support_fp32_signed_zero_nan_preserve =
float_control.shaderSignedZeroInfNanPreserveFloat32 != VK_FALSE,
.support_fp64_signed_zero_nan_preserve =
float_control.shaderSignedZeroInfNanPreserveFloat64 != VK_FALSE,
.support_explicit_workgroup_layout = device.IsKhrWorkgroupMemoryExplicitLayoutSupported(),
.support_vote = true,
.support_viewport_index_layer_non_geometry =
device.IsExtShaderViewportIndexLayerSupported(),
.support_viewport_mask = device.IsNvViewportArray2Supported(),
.support_typeless_image_loads = device.IsFormatlessImageLoadSupported(),
.support_demote_to_helper_invocation = true,
.support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
.support_derivative_control = true,
.support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
.warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
.lower_left_origin_mode = false,
.need_declared_frag_colors = false,
.has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR,
.has_broken_unsigned_image_offsets = false,
.has_broken_signed_operations = false,
.has_broken_fp16_float_controls = driver_id == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR,
.ignore_nan_fp_comparisons = false,
};
host_info = Shader::HostTranslateInfo{
.support_float16 = device.IsFloat16Supported(),
.support_int64 = device.IsShaderInt64Supported(),
};
}
PipelineCache::~PipelineCache() = default;
GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
const auto [pair, is_cache_miss] = compute_cache.try_emplace(key);
auto& entry = pair->second;
if (!is_cache_miss) {
return *entry;
if (!RefreshStages(graphics_key.unique_hashes)) {
current_pipeline = nullptr;
return nullptr;
}
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(),
device.IsExtVertexInputDynamicStateSupported());
const GPUVAddr gpu_addr = key.shader;
if (current_pipeline) {
GraphicsPipeline* const next{current_pipeline->Next(graphics_key)};
if (next) {
current_pipeline = next;
return BuiltPipeline(current_pipeline);
}
}
return CurrentGraphicsPipelineSlowPath();
}
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
ComputePipeline* PipelineCache::CurrentComputePipeline() {
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get();
const ShaderInfo* const shader{ComputeShader()};
if (!shader) {
// No shader found - create a new one
const auto host_ptr = gpu_memory.GetPointer(gpu_addr);
ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, true);
const std::size_t size_in_bytes = code.size() * sizeof(u64);
auto shader_info = std::make_unique<Shader>(kepler_compute, ShaderType::Compute, gpu_addr,
*cpu_addr, std::move(code), KERNEL_MAIN_OFFSET);
shader = shader_info.get();
if (cpu_addr) {
Register(std::move(shader_info), *cpu_addr, size_in_bytes);
} else {
null_kernel = std::move(shader_info);
}
return nullptr;
}
const Specialization specialization{
.base_binding = 0,
.workgroup_size = key.workgroup_size,
.shared_memory_size = key.shared_memory_size,
.point_size = std::nullopt,
.enabled_attributes = {},
.attribute_types = {},
.ndc_minus_one_to_one = false,
const auto& qmd{kepler_compute.launch_description};
const ComputePipelineCacheKey key{
.unique_hash = shader->unique_hash,
.shared_memory_size = qmd.shared_alloc,
.workgroup_size{qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z},
};
const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
shader->GetRegistry(), specialization),
shader->GetEntries()};
entry = std::make_unique<VKComputePipeline>(device, scheduler, descriptor_pool,
update_descriptor_queue, spirv_shader);
return *entry;
const auto [pair, is_new]{compute_cache.try_emplace(key)};
auto& pipeline{pair->second};
if (!is_new) {
return pipeline.get();
}
pipeline = CreateComputePipeline(key, shader);
return pipeline.get();
}
void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) {
gpu.ShaderNotify().MarkShaderComplete();
std::unique_lock lock{pipeline_cache};
graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline);
}
void VKPipelineCache::OnShaderRemoval(Shader* shader) {
bool finished = false;
const auto Finish = [&] {
// TODO(Rodrigo): Instead of finishing here, wait for the fences that use this pipeline and
// flush.
if (finished) {
return;
}
finished = true;
scheduler.Finish();
};
const GPUVAddr invalidated_addr = shader->GetGpuAddr();
for (auto it = graphics_cache.begin(); it != graphics_cache.end();) {
auto& entry = it->first;
if (std::find(entry.shaders.begin(), entry.shaders.end(), invalidated_addr) ==
entry.shaders.end()) {
++it;
continue;
}
Finish();
it = graphics_cache.erase(it);
}
for (auto it = compute_cache.begin(); it != compute_cache.end();) {
auto& entry = it->first;
if (entry.shader != invalidated_addr) {
++it;
continue;
}
Finish();
it = compute_cache.erase(it);
}
}
std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
Specialization specialization;
if (fixed_state.topology == Maxwell::PrimitiveTopology::Points) {
float point_size;
std::memcpy(&point_size, &fixed_state.point_size, sizeof(float));
specialization.point_size = point_size;
ASSERT(point_size != 0.0f);
}
for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
const auto& attribute = fixed_state.attributes[i];
specialization.enabled_attributes[i] = attribute.enabled.Value() != 0;
specialization.attribute_types[i] = attribute.Type();
}
specialization.ndc_minus_one_to_one = fixed_state.ndc_minus_one_to_one;
specialization.early_fragment_tests = fixed_state.early_z;
// Alpha test
specialization.alpha_test_func =
FixedPipelineState::UnpackComparisonOp(fixed_state.alpha_test_func.Value());
specialization.alpha_test_ref = Common::BitCast<float>(fixed_state.alpha_test_ref);
SPIRVProgram program;
std::vector<VkDescriptorSetLayoutBinding> bindings;
for (std::size_t index = 1; index < Maxwell::MaxShaderProgram; ++index) {
const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
// Skip stages that are not enabled
if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
continue;
}
const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum);
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
const ShaderType program_type = GetShaderType(program_enum);
const auto& entries = shader->GetEntries();
program[stage] = {
Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization),
entries,
};
const u32 old_binding = specialization.base_binding;
specialization.base_binding =
FillDescriptorLayout(entries, bindings, program_enum, specialization.base_binding);
ASSERT(old_binding + entries.NumBindings() == specialization.base_binding);
}
return {std::move(program), std::move(bindings)};
}
template <VkDescriptorType descriptor_type, class Container>
void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u32& binding,
u32& offset, const Container& container) {
static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
const u32 count = static_cast<u32>(std::size(container));
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
for (u32 i = 0; i < count; ++i) {
const u32 num_samplers = container[i].size;
template_entries.push_back({
.dstBinding = binding,
.dstArrayElement = 0,
.descriptorCount = num_samplers,
.descriptorType = descriptor_type,
.offset = offset,
.stride = entry_size,
});
++binding;
offset += num_samplers * entry_size;
}
void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) {
if (title_id == 0) {
return;
}
if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
descriptor_type == STORAGE_TEXEL_BUFFER) {
// Nvidia has a bug where updating multiple texels at once causes the driver to crash.
// Note: Fixed in driver Windows 443.24, Linux 440.66.15
for (u32 i = 0; i < count; ++i) {
template_entries.push_back({
.dstBinding = binding + i,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = descriptor_type,
.offset = static_cast<std::size_t>(offset + i * entry_size),
.stride = entry_size,
});
}
} else if (count > 0) {
template_entries.push_back({
.dstBinding = binding,
.dstArrayElement = 0,
.descriptorCount = count,
.descriptorType = descriptor_type,
.offset = offset,
.stride = entry_size,
});
const auto shader_dir{Common::FS::GetYuzuPath(Common::FS::YuzuPath::ShaderDir)};
const auto base_dir{shader_dir / fmt::format("{:016x}", title_id)};
if (!Common::FS::CreateDir(shader_dir) || !Common::FS::CreateDir(base_dir)) {
LOG_ERROR(Common_Filesystem, "Failed to create pipeline cache directories");
return;
}
offset += count * entry_size;
binding += count;
pipeline_cache_filename = base_dir / "vulkan.bin";
struct {
std::mutex mutex;
size_t total{};
size_t built{};
bool has_loaded{};
} state;
const auto load_compute{[&](std::ifstream& file, FileEnvironment env) {
ComputePipelineCacheKey key;
file.read(reinterpret_cast<char*>(&key), sizeof(key));
workers.QueueWork([this, key, env = std::move(env), &state, &callback]() mutable {
ShaderPools pools;
auto pipeline{CreateComputePipeline(pools, key, env, false)};
std::lock_guard lock{state.mutex};
if (pipeline) {
compute_cache.emplace(key, std::move(pipeline));
}
++state.built;
if (state.has_loaded) {
callback(VideoCore::LoadCallbackStage::Build, state.built, state.total);
}
});
++state.total;
}};
const bool extended_dynamic_state = device.IsExtExtendedDynamicStateSupported();
const bool dynamic_vertex_input = device.IsExtVertexInputDynamicStateSupported();
const auto load_graphics{[&](std::ifstream& file, std::vector<FileEnvironment> envs) {
GraphicsPipelineCacheKey key;
file.read(reinterpret_cast<char*>(&key), sizeof(key));
if ((key.state.extended_dynamic_state != 0) != extended_dynamic_state ||
(key.state.dynamic_vertex_input != 0) != dynamic_vertex_input) {
return;
}
workers.QueueWork([this, key, envs = std::move(envs), &state, &callback]() mutable {
ShaderPools pools;
boost::container::static_vector<Shader::Environment*, 5> env_ptrs;
for (auto& env : envs) {
env_ptrs.push_back(&env);
}
auto pipeline{CreateGraphicsPipeline(pools, key, MakeSpan(env_ptrs), false)};
std::lock_guard lock{state.mutex};
graphics_cache.emplace(key, std::move(pipeline));
++state.built;
if (state.has_loaded) {
callback(VideoCore::LoadCallbackStage::Build, state.built, state.total);
}
});
++state.total;
}};
VideoCommon::LoadPipelines(stop_loading, pipeline_cache_filename, CACHE_VERSION, load_compute,
load_graphics);
std::unique_lock lock{state.mutex};
callback(VideoCore::LoadCallbackStage::Build, 0, state.total);
state.has_loaded = true;
lock.unlock();
workers.WaitForRequests();
}
void FillDescriptorUpdateTemplateEntries(
const ShaderEntries& entries, u32& binding, u32& offset,
std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
GraphicsPipeline* PipelineCache::CurrentGraphicsPipelineSlowPath() {
const auto [pair, is_new]{graphics_cache.try_emplace(graphics_key)};
auto& pipeline{pair->second};
if (is_new) {
pipeline = CreateGraphicsPipeline();
}
if (!pipeline) {
return nullptr;
}
if (current_pipeline) {
current_pipeline->AddTransition(pipeline.get());
}
current_pipeline = pipeline.get();
return BuiltPipeline(current_pipeline);
}
GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const noexcept {
if (pipeline->IsBuilt()) {
return pipeline;
}
if (!use_asynchronous_shaders) {
return pipeline;
}
// If something is using depth, we can assume that games are not rendering anything which
// will be used one time.
if (maxwell3d.regs.zeta_enable) {
return nullptr;
}
// If games are using a small index count, we can assume these are full screen quads.
// Usually these shaders are only used once for building textures so we can assume they
// can't be built async
if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
return pipeline;
}
return nullptr;
}
std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
ShaderPools& pools, const GraphicsPipelineCacheKey& key,
std::span<Shader::Environment* const> envs, bool build_in_parallel) try {
LOG_INFO(Render_Vulkan, "0x{:016x}", key.Hash());
size_t env_index{0};
std::array<Shader::IR::Program, Maxwell::MaxShaderProgram> programs;
const bool uses_vertex_a{key.unique_hashes[0] != 0};
const bool uses_vertex_b{key.unique_hashes[1] != 0};
for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
if (key.unique_hashes[index] == 0) {
continue;
}
Shader::Environment& env{*envs[env_index]};
++env_index;
const u32 cfg_offset{static_cast<u32>(env.StartAddress() + sizeof(Shader::ProgramHeader))};
Shader::Maxwell::Flow::CFG cfg(env, pools.flow_block, cfg_offset, index == 0);
if (!uses_vertex_a || index != 1) {
// Normal path
programs[index] = TranslateProgram(pools.inst, pools.block, env, cfg, host_info);
} else {
// VertexB path when VertexA is present.
auto& program_va{programs[0]};
auto program_vb{TranslateProgram(pools.inst, pools.block, env, cfg, host_info)};
programs[index] = MergeDualVertexPrograms(program_va, program_vb, env);
}
}
std::array<const Shader::Info*, Maxwell::MaxShaderStage> infos{};
std::array<vk::ShaderModule, Maxwell::MaxShaderStage> modules;
const Shader::IR::Program* previous_stage{};
Shader::Backend::Bindings binding;
for (size_t index = uses_vertex_a && uses_vertex_b ? 1 : 0; index < Maxwell::MaxShaderProgram;
++index) {
if (key.unique_hashes[index] == 0) {
continue;
}
UNIMPLEMENTED_IF(index == 0);
Shader::IR::Program& program{programs[index]};
const size_t stage_index{index - 1};
infos[stage_index] = &program.info;
const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage)};
const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)};
device.SaveShader(code);
modules[stage_index] = BuildShader(device, code);
if (device.HasDebuggingToolAttached()) {
const std::string name{fmt::format("Shader {:016x}", key.unique_hashes[index])};
modules[stage_index].SetObjectNameEXT(name.c_str());
}
previous_stage = &program;
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
return std::make_unique<GraphicsPipeline>(
maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device,
descriptor_pool, update_descriptor_queue, thread_worker, render_pass_cache, key,
std::move(modules), infos);
} catch (const Shader::Exception& exception) {
LOG_ERROR(Render_Vulkan, "{}", exception.what());
return nullptr;
}
std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
GraphicsEnvironments environments;
GetGraphicsEnvironments(environments, graphics_key.unique_hashes);
main_pools.ReleaseContents();
auto pipeline{CreateGraphicsPipeline(main_pools, graphics_key, environments.Span(), true)};
if (!pipeline || pipeline_cache_filename.empty()) {
return pipeline;
}
serialization_thread.QueueWork([this, key = graphics_key, envs = std::move(environments.envs)] {
boost::container::static_vector<const GenericEnvironment*, Maxwell::MaxShaderProgram>
env_ptrs;
for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
if (key.unique_hashes[index] != 0) {
env_ptrs.push_back(&envs[index]);
}
}
SerializePipeline(key, env_ptrs, pipeline_cache_filename, CACHE_VERSION);
});
return pipeline;
}
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
const auto& qmd{kepler_compute.launch_description};
ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
env.SetCachedSize(shader->size_bytes);
main_pools.ReleaseContents();
auto pipeline{CreateComputePipeline(main_pools, key, env, true)};
if (!pipeline || pipeline_cache_filename.empty()) {
return pipeline;
}
serialization_thread.QueueWork([this, key, env = std::move(env)] {
SerializePipeline(key, std::array<const GenericEnvironment*, 1>{&env},
pipeline_cache_filename, CACHE_VERSION);
});
return pipeline;
}
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
ShaderPools& pools, const ComputePipelineCacheKey& key, Shader::Environment& env,
bool build_in_parallel) try {
LOG_INFO(Render_Vulkan, "0x{:016x}", key.Hash());
Shader::Maxwell::Flow::CFG cfg{env, pools.flow_block, env.StartAddress()};
auto program{TranslateProgram(pools.inst, pools.block, env, cfg, host_info)};
const std::vector<u32> code{EmitSPIRV(profile, program)};
device.SaveShader(code);
vk::ShaderModule spv_module{BuildShader(device, code)};
if (device.HasDebuggingToolAttached()) {
const auto name{fmt::format("Shader {:016x}", key.unique_hash)};
spv_module.SetObjectNameEXT(name.c_str());
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
return std::make_unique<ComputePipeline>(device, descriptor_pool, update_descriptor_queue,
thread_worker, &shader_notify, program.info,
std::move(spv_module));
} catch (const Shader::Exception& exception) {
LOG_ERROR(Render_Vulkan, "{}", exception.what());
return nullptr;
}
} // namespace Vulkan

View File

@ -6,24 +6,28 @@
#include <array>
#include <cstddef>
#include <filesystem>
#include <iosfwd>
#include <memory>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include <boost/functional/hash.hpp>
#include "common/common_types.h"
#include "video_core/engines/const_buffer_engine_interface.h"
#include "common/thread_worker.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
#include "shader_recompiler/host_translate_info.h"
#include "shader_recompiler/object_pool.h"
#include "shader_recompiler/profile.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/shader/async_shaders.h"
#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/shader_cache.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@ -31,23 +35,24 @@ namespace Core {
class System;
}
namespace Vulkan {
namespace Shader::IR {
struct Program;
}
class Device;
class RasterizerVulkan;
class VKComputePipeline;
class VKDescriptorPool;
class VKScheduler;
class VKUpdateDescriptorQueue;
namespace VideoCore {
class ShaderNotify;
}
namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct ComputePipelineCacheKey {
GPUVAddr shader;
u64 unique_hash;
u32 shared_memory_size;
std::array<u32, 3> workgroup_size;
std::size_t Hash() const noexcept;
size_t Hash() const noexcept;
bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
@ -63,16 +68,9 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
namespace std {
template <>
struct hash<Vulkan::GraphicsPipelineCacheKey> {
std::size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
return k.Hash();
}
};
template <>
struct hash<Vulkan::ComputePipelineCacheKey> {
std::size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
return k.Hash();
}
};
@ -81,94 +79,90 @@ struct hash<Vulkan::ComputePipelineCacheKey> {
namespace Vulkan {
class Shader {
public:
explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine_,
Tegra::Engines::ShaderType stage_, GPUVAddr gpu_addr, VAddr cpu_addr_,
VideoCommon::Shader::ProgramCode program_code, u32 main_offset_);
~Shader();
class ComputePipeline;
class Device;
class DescriptorPool;
class RasterizerVulkan;
class RenderPassCache;
class VKScheduler;
class VKUpdateDescriptorQueue;
GPUVAddr GetGpuAddr() const {
return gpu_addr;
using VideoCommon::ShaderInfo;
struct ShaderPools {
void ReleaseContents() {
flow_block.ReleaseContents();
block.ReleaseContents();
inst.ReleaseContents();
}
VideoCommon::Shader::ShaderIR& GetIR() {
return shader_ir;
}
const VideoCommon::Shader::ShaderIR& GetIR() const {
return shader_ir;
}
const VideoCommon::Shader::Registry& GetRegistry() const {
return registry;
}
const ShaderEntries& GetEntries() const {
return entries;
}
private:
GPUVAddr gpu_addr{};
VideoCommon::Shader::ProgramCode program_code;
VideoCommon::Shader::Registry registry;
VideoCommon::Shader::ShaderIR shader_ir;
ShaderEntries entries;
Shader::ObjectPool<Shader::IR::Inst> inst;
Shader::ObjectPool<Shader::IR::Block> block;
Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block;
};
class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> {
class PipelineCache : public VideoCommon::ShaderCache {
public:
explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu,
Tegra::Engines::Maxwell3D& maxwell3d,
Tegra::Engines::KeplerCompute& kepler_compute,
Tegra::MemoryManager& gpu_memory, const Device& device,
VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue);
~VKPipelineCache() override;
explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
Tegra::Engines::KeplerCompute& kepler_compute,
Tegra::MemoryManager& gpu_memory, const Device& device,
VKScheduler& scheduler, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue,
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
~PipelineCache();
std::array<Shader*, Maxwell::MaxShaderProgram> GetShaders();
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipeline();
VKGraphicsPipeline* GetGraphicsPipeline(const GraphicsPipelineCacheKey& key,
u32 num_color_buffers,
VideoCommon::Shader::AsyncShaders& async_shaders);
[[nodiscard]] ComputePipeline* CurrentComputePipeline();
VKComputePipeline& GetComputePipeline(const ComputePipelineCacheKey& key);
void EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline);
protected:
void OnShaderRemoval(Shader* shader) final;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback);
private:
std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
const FixedPipelineState& fixed_state);
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipelineSlowPath();
Tegra::GPU& gpu;
Tegra::Engines::Maxwell3D& maxwell3d;
Tegra::Engines::KeplerCompute& kepler_compute;
Tegra::MemoryManager& gpu_memory;
[[nodiscard]] GraphicsPipeline* BuiltPipeline(GraphicsPipeline* pipeline) const noexcept;
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline();
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline(
ShaderPools& pools, const GraphicsPipelineCacheKey& key,
std::span<Shader::Environment* const> envs, bool build_in_parallel);
std::unique_ptr<ComputePipeline> CreateComputePipeline(const ComputePipelineCacheKey& key,
const ShaderInfo* shader);
std::unique_ptr<ComputePipeline> CreateComputePipeline(ShaderPools& pools,
const ComputePipelineCacheKey& key,
Shader::Environment& env,
bool build_in_parallel);
const Device& device;
VKScheduler& scheduler;
VKDescriptorPool& descriptor_pool;
DescriptorPool& descriptor_pool;
VKUpdateDescriptorQueue& update_descriptor_queue;
RenderPassCache& render_pass_cache;
BufferCache& buffer_cache;
TextureCache& texture_cache;
VideoCore::ShaderNotify& shader_notify;
bool use_asynchronous_shaders{};
std::unique_ptr<Shader> null_shader;
std::unique_ptr<Shader> null_kernel;
GraphicsPipelineCacheKey graphics_key{};
GraphicsPipeline* current_pipeline{};
std::array<Shader*, Maxwell::MaxShaderProgram> last_shaders{};
std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<ComputePipeline>> compute_cache;
std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<GraphicsPipeline>> graphics_cache;
GraphicsPipelineCacheKey last_graphics_key;
VKGraphicsPipeline* last_graphics_pipeline = nullptr;
ShaderPools main_pools;
std::mutex pipeline_cache;
std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<VKGraphicsPipeline>>
graphics_cache;
std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<VKComputePipeline>> compute_cache;
Shader::Profile profile;
Shader::HostTranslateInfo host_info;
std::filesystem::path pipeline_cache_filename;
Common::ThreadWorker workers;
Common::ThreadWorker serialization_thread;
};
void FillDescriptorUpdateTemplateEntries(
const ShaderEntries& entries, u32& binding, u32& offset,
std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries);
} // namespace Vulkan

View File

@ -114,14 +114,10 @@ void HostCounter::EndQuery() {
}
u64 HostCounter::BlockingQuery() const {
if (tick >= cache.GetScheduler().CurrentTick()) {
cache.GetScheduler().Flush();
}
cache.GetScheduler().Wait(tick);
u64 data;
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
query.first, query.second, 1, sizeof(data), &data, sizeof(data),
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
query.first, query.second, 1, sizeof(data), &data, sizeof(data), VK_QUERY_RESULT_64_BIT);
switch (query_result) {
case VK_SUCCESS:

View File

@ -24,7 +24,6 @@
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
@ -55,11 +54,10 @@ struct DrawParams {
u32 num_instances;
u32 base_vertex;
u32 num_vertices;
u32 first_index;
bool is_indexed;
};
constexpr auto COMPUTE_SHADER_INDEX = static_cast<size_t>(Tegra::Engines::ShaderType::Compute);
VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t index) {
const auto& src = regs.viewport_transform[index];
const float width = src.scale_x * 2.0f;
@ -97,118 +95,6 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index) {
return scissor;
}
std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders) {
std::array<GPUVAddr, Maxwell::MaxShaderProgram> addresses;
for (size_t i = 0; i < std::size(addresses); ++i) {
addresses[i] = shaders[i] ? shaders[i]->GetGpuAddr() : 0;
}
return addresses;
}
struct TextureHandle {
constexpr TextureHandle(u32 data, bool via_header_index) {
const Tegra::Texture::TextureHandle handle{data};
image = handle.tic_id;
sampler = via_header_index ? image : handle.tsc_id.Value();
}
u32 image;
u32 sampler;
};
template <typename Engine, typename Entry>
TextureHandle GetTextureInfo(const Engine& engine, bool via_header_index, const Entry& entry,
size_t stage, size_t index = 0) {
const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage);
if constexpr (std::is_same_v<Entry, SamplerEntry>) {
if (entry.is_separated) {
const u32 buffer_1 = entry.buffer;
const u32 buffer_2 = entry.secondary_buffer;
const u32 offset_1 = entry.offset;
const u32 offset_2 = entry.secondary_offset;
const u32 handle_1 = engine.AccessConstBuffer32(shader_type, buffer_1, offset_1);
const u32 handle_2 = engine.AccessConstBuffer32(shader_type, buffer_2, offset_2);
return TextureHandle(handle_1 | handle_2, via_header_index);
}
}
if (entry.is_bindless) {
const u32 raw = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
return TextureHandle(raw, via_header_index);
}
const u32 buffer = engine.GetBoundBuffer();
const u64 offset = (entry.offset + index) * sizeof(u32);
return TextureHandle(engine.AccessConstBuffer32(shader_type, buffer, offset), via_header_index);
}
ImageViewType ImageViewTypeFromEntry(const SamplerEntry& entry) {
if (entry.is_buffer) {
return ImageViewType::e2D;
}
switch (entry.type) {
case Tegra::Shader::TextureType::Texture1D:
return entry.is_array ? ImageViewType::e1DArray : ImageViewType::e1D;
case Tegra::Shader::TextureType::Texture2D:
return entry.is_array ? ImageViewType::e2DArray : ImageViewType::e2D;
case Tegra::Shader::TextureType::Texture3D:
return ImageViewType::e3D;
case Tegra::Shader::TextureType::TextureCube:
return entry.is_array ? ImageViewType::CubeArray : ImageViewType::Cube;
}
UNREACHABLE();
return ImageViewType::e2D;
}
ImageViewType ImageViewTypeFromEntry(const ImageEntry& entry) {
switch (entry.type) {
case Tegra::Shader::ImageType::Texture1D:
return ImageViewType::e1D;
case Tegra::Shader::ImageType::Texture1DArray:
return ImageViewType::e1DArray;
case Tegra::Shader::ImageType::Texture2D:
return ImageViewType::e2D;
case Tegra::Shader::ImageType::Texture2DArray:
return ImageViewType::e2DArray;
case Tegra::Shader::ImageType::Texture3D:
return ImageViewType::e3D;
case Tegra::Shader::ImageType::TextureBuffer:
return ImageViewType::Buffer;
}
UNREACHABLE();
return ImageViewType::e2D;
}
void PushImageDescriptors(const ShaderEntries& entries, TextureCache& texture_cache,
VKUpdateDescriptorQueue& update_descriptor_queue,
ImageViewId*& image_view_id_ptr, VkSampler*& sampler_ptr) {
for ([[maybe_unused]] const auto& entry : entries.uniform_texels) {
const ImageViewId image_view_id = *image_view_id_ptr++;
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
update_descriptor_queue.AddTexelBuffer(image_view.BufferView());
}
for (const auto& entry : entries.samplers) {
for (size_t i = 0; i < entry.size; ++i) {
const VkSampler sampler = *sampler_ptr++;
const ImageViewId image_view_id = *image_view_id_ptr++;
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
const VkImageView handle = image_view.Handle(ImageViewTypeFromEntry(entry));
update_descriptor_queue.AddSampledImage(handle, sampler);
}
}
for ([[maybe_unused]] const auto& entry : entries.storage_texels) {
const ImageViewId image_view_id = *image_view_id_ptr++;
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
update_descriptor_queue.AddTexelBuffer(image_view.BufferView());
}
for (const auto& entry : entries.images) {
// TODO: Mark as modified
const ImageViewId image_view_id = *image_view_id_ptr++;
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
const VkImageView handle = image_view.Handle(ImageViewTypeFromEntry(entry));
update_descriptor_queue.AddImage(handle);
}
}
DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced,
bool is_indexed) {
DrawParams params{
@ -216,6 +102,7 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
.num_instances = is_instanced ? num_instances : 1,
.base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first,
.num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count,
.first_index = is_indexed ? regs.index_array.first : 0,
.is_indexed = is_indexed,
};
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
@ -243,21 +130,21 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
blit_image(device, scheduler, state_tracker, descriptor_pool),
astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue,
memory_allocator),
texture_cache_runtime{device, scheduler, memory_allocator,
staging_pool, blit_image, astc_decoder_pass},
render_pass_cache(device), texture_cache_runtime{device, scheduler,
memory_allocator, staging_pool,
blit_image, astc_decoder_pass,
render_pass_cache},
texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
update_descriptor_queue, descriptor_pool),
buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
descriptor_pool, update_descriptor_queue),
pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
texture_cache, gpu.ShaderNotify()),
query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) {
wfi_event(device.GetLogical().CreateEvent()) {
scheduler.SetQueryCache(query_cache);
if (device.UseAsynchronousShaders()) {
async_shaders.AllocateWorkers();
}
}
RasterizerVulkan::~RasterizerVulkan() = default;
@ -270,53 +157,30 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
query_cache.UpdateCounters();
graphics_key.fixed_state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported());
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
texture_cache.SynchronizeGraphicsDescriptors();
texture_cache.UpdateRenderTargets(false);
const auto shaders = pipeline_cache.GetShaders();
graphics_key.shaders = GetShaderAddresses(shaders);
SetupShaderDescriptors(shaders, is_indexed);
const Framebuffer* const framebuffer = texture_cache.GetFramebuffer();
graphics_key.renderpass = framebuffer->RenderPass();
VKGraphicsPipeline* const pipeline = pipeline_cache.GetGraphicsPipeline(
graphics_key, framebuffer->NumColorBuffers(), async_shaders);
if (pipeline == nullptr || pipeline->GetHandle() == VK_NULL_HANDLE) {
// Async graphics pipeline was not ready.
GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()};
if (!pipeline) {
return;
}
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
pipeline->Configure(is_indexed);
BeginTransformFeedback();
scheduler.RequestRenderpass(framebuffer);
scheduler.BindGraphicsPipeline(pipeline->GetHandle());
UpdateDynamicStates();
const auto& regs = maxwell3d.regs;
const u32 num_instances = maxwell3d.mme_draw.instance_count;
const DrawParams draw_params = MakeDrawParams(regs, num_instances, is_instanced, is_indexed);
const VkPipelineLayout pipeline_layout = pipeline->GetLayout();
const VkDescriptorSet descriptor_set = pipeline->CommitDescriptorSet();
scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) {
if (descriptor_set) {
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout,
DESCRIPTOR_SET, descriptor_set, nullptr);
}
const auto& regs{maxwell3d.regs};
const u32 num_instances{maxwell3d.mme_draw.instance_count};
const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, 0,
draw_params.base_vertex, draw_params.base_instance);
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
draw_params.first_index, draw_params.base_vertex,
draw_params.base_instance);
} else {
cmdbuf.Draw(draw_params.num_vertices, draw_params.num_instances,
draw_params.base_vertex, draw_params.base_instance);
}
});
EndTransformFeedback();
}
@ -326,6 +190,7 @@ void RasterizerVulkan::Clear() {
if (!maxwell3d.ShouldExecute()) {
return;
}
FlushWork();
query_cache.UpdateCounters();
@ -395,73 +260,20 @@ void RasterizerVulkan::Clear() {
});
}
void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
MICROPROFILE_SCOPE(Vulkan_Compute);
void RasterizerVulkan::DispatchCompute() {
FlushWork();
query_cache.UpdateCounters();
const auto& launch_desc = kepler_compute.launch_description;
auto& pipeline = pipeline_cache.GetComputePipeline({
.shader = code_addr,
.shared_memory_size = launch_desc.shared_alloc,
.workgroup_size{
launch_desc.block_dim_x,
launch_desc.block_dim_y,
launch_desc.block_dim_z,
},
});
// Compute dispatches can't be executed inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext();
image_view_indices.clear();
sampler_handles.clear();
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
const auto& entries = pipeline.GetEntries();
buffer_cache.SetEnabledComputeUniformBuffers(entries.enabled_uniform_buffers);
buffer_cache.UnbindComputeStorageBuffers();
u32 ssbo_index = 0;
for (const auto& buffer : entries.global_buffers) {
buffer_cache.BindComputeStorageBuffer(ssbo_index, buffer.cbuf_index, buffer.cbuf_offset,
buffer.is_written);
++ssbo_index;
ComputePipeline* const pipeline{pipeline_cache.CurrentComputePipeline()};
if (!pipeline) {
return;
}
buffer_cache.UpdateComputeBuffers();
std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache);
texture_cache.SynchronizeComputeDescriptors();
SetupComputeUniformTexels(entries);
SetupComputeTextures(entries);
SetupComputeStorageTexels(entries);
SetupComputeImages(entries);
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
update_descriptor_queue.Acquire();
buffer_cache.BindHostComputeBuffers();
ImageViewId* image_view_id_ptr = image_view_ids.data();
VkSampler* sampler_ptr = sampler_handles.data();
PushImageDescriptors(entries, texture_cache, update_descriptor_queue, image_view_id_ptr,
sampler_ptr);
const VkPipeline pipeline_handle = pipeline.GetHandle();
const VkPipelineLayout pipeline_layout = pipeline.GetLayout();
const VkDescriptorSet descriptor_set = pipeline.CommitDescriptorSet();
scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
grid_z = launch_desc.grid_dim_z, pipeline_handle, pipeline_layout,
descriptor_set](vk::CommandBuffer cmdbuf) {
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_handle);
if (descriptor_set) {
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout,
DESCRIPTOR_SET, descriptor_set, nullptr);
}
cmdbuf.Dispatch(grid_x, grid_y, grid_z);
});
const auto& qmd{kepler_compute.launch_description};
const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
}
void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) {
@ -626,6 +438,7 @@ void RasterizerVulkan::WaitForIdle() {
void RasterizerVulkan::FragmentBarrier() {
// We already put barriers when a render pass finishes
scheduler.RequestOutsideRenderPassOperationContext();
}
void RasterizerVulkan::TiledCacheBarrier() {
@ -633,10 +446,11 @@ void RasterizerVulkan::TiledCacheBarrier() {
}
void RasterizerVulkan::FlushCommands() {
if (draw_counter > 0) {
draw_counter = 0;
scheduler.Flush();
if (draw_counter == 0) {
return;
}
draw_counter = 0;
scheduler.Flush();
}
void RasterizerVulkan::TickFrame() {
@ -676,13 +490,18 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
if (!image_view) {
return false;
}
screen_info.image_view = image_view->Handle(VideoCommon::ImageViewType::e2D);
screen_info.image_view = image_view->Handle(Shader::TextureType::Color2D);
screen_info.width = image_view->size.width;
screen_info.height = image_view->size.height;
screen_info.is_srgb = VideoCore::Surface::IsPixelFormatSRGB(image_view->format);
return true;
}
void RasterizerVulkan::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) {
pipeline_cache.LoadDiskResources(title_id, stop_loading, callback);
}
void RasterizerVulkan::FlushWork() {
static constexpr u32 DRAWS_TO_DISPATCH = 4096;
@ -691,13 +510,11 @@ void RasterizerVulkan::FlushWork() {
if ((++draw_counter & 7) != 7) {
return;
}
if (draw_counter < DRAWS_TO_DISPATCH) {
// Send recorded tasks to the worker thread
scheduler.DispatchWork();
return;
}
// Otherwise (every certain number of draws) flush execution.
// This submits commands to the Vulkan driver.
scheduler.Flush();
@ -716,52 +533,6 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
return buffer_cache.DMACopy(src_address, dest_address, amount);
}
void RasterizerVulkan::SetupShaderDescriptors(
const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders, bool is_indexed) {
image_view_indices.clear();
sampler_handles.clear();
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
Shader* const shader = shaders[stage + 1];
if (!shader) {
continue;
}
const ShaderEntries& entries = shader->GetEntries();
SetupGraphicsUniformTexels(entries, stage);
SetupGraphicsTextures(entries, stage);
SetupGraphicsStorageTexels(entries, stage);
SetupGraphicsImages(entries, stage);
buffer_cache.SetEnabledUniformBuffers(stage, entries.enabled_uniform_buffers);
buffer_cache.UnbindGraphicsStorageBuffers(stage);
u32 ssbo_index = 0;
for (const auto& buffer : entries.global_buffers) {
buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, buffer.cbuf_index,
buffer.cbuf_offset, buffer.is_written);
++ssbo_index;
}
}
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
buffer_cache.UpdateGraphicsBuffers(is_indexed);
texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
buffer_cache.BindHostGeometryBuffers(is_indexed);
update_descriptor_queue.Acquire();
ImageViewId* image_view_id_ptr = image_view_ids.data();
VkSampler* sampler_ptr = sampler_handles.data();
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
// Skip VertexA stage
Shader* const shader = shaders[stage + 1];
if (!shader) {
continue;
}
buffer_cache.BindHostStageBuffers(stage);
PushImageDescriptors(shader->GetEntries(), texture_cache, update_descriptor_queue,
image_view_id_ptr, sampler_ptr);
}
}
void RasterizerVulkan::UpdateDynamicStates() {
auto& regs = maxwell3d.regs;
UpdateViewportsState(regs);
@ -770,6 +541,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateBlendConstants(regs);
UpdateDepthBounds(regs);
UpdateStencilFaces(regs);
UpdateLineWidth(regs);
if (device.IsExtExtendedDynamicStateSupported()) {
UpdateCullMode(regs);
UpdateDepthBoundsTestEnable(regs);
@ -779,6 +551,9 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateFrontFace(regs);
UpdateStencilOp(regs);
UpdateStencilTestEnable(regs);
if (device.IsExtVertexInputDynamicStateSupported()) {
UpdateVertexInput(regs);
}
}
}
@ -810,89 +585,6 @@ void RasterizerVulkan::EndTransformFeedback() {
[](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
}
void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, size_t stage) {
const auto& regs = maxwell3d.regs;
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
for (const auto& entry : entries.uniform_texels) {
const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
image_view_indices.push_back(handle.image);
}
}
void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, size_t stage) {
const auto& regs = maxwell3d.regs;
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
for (const auto& entry : entries.samplers) {
for (size_t index = 0; index < entry.size; ++index) {
const TextureHandle handle =
GetTextureInfo(maxwell3d, via_header_index, entry, stage, index);
image_view_indices.push_back(handle.image);
Sampler* const sampler = texture_cache.GetGraphicsSampler(handle.sampler);
sampler_handles.push_back(sampler->Handle());
}
}
}
void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, size_t stage) {
const auto& regs = maxwell3d.regs;
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
for (const auto& entry : entries.storage_texels) {
const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
image_view_indices.push_back(handle.image);
}
}
void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, size_t stage) {
const auto& regs = maxwell3d.regs;
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
for (const auto& entry : entries.images) {
const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
image_view_indices.push_back(handle.image);
}
}
void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
for (const auto& entry : entries.uniform_texels) {
const TextureHandle handle =
GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
image_view_indices.push_back(handle.image);
}
}
void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
for (const auto& entry : entries.samplers) {
for (size_t index = 0; index < entry.size; ++index) {
const TextureHandle handle = GetTextureInfo(kepler_compute, via_header_index, entry,
COMPUTE_SHADER_INDEX, index);
image_view_indices.push_back(handle.image);
Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
sampler_handles.push_back(sampler->Handle());
}
}
}
void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
for (const auto& entry : entries.storage_texels) {
const TextureHandle handle =
GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
image_view_indices.push_back(handle.image);
}
}
void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
for (const auto& entry : entries.images) {
const TextureHandle handle =
GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
image_view_indices.push_back(handle.image);
}
}
void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchViewports()) {
return;
@ -985,6 +677,14 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
}
}
void RasterizerVulkan::UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchLineWidth()) {
return;
}
const float width = regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased;
scheduler.Record([width](vk::CommandBuffer cmdbuf) { cmdbuf.SetLineWidth(width); });
}
void RasterizerVulkan::UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchCullMode()) {
return;
@ -999,6 +699,11 @@ void RasterizerVulkan::UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Re
if (!state_tracker.TouchDepthBoundsTestEnable()) {
return;
}
bool enabled = regs.depth_bounds_enable;
if (enabled && !device.IsDepthBoundsSupported()) {
LOG_WARNING(Render_Vulkan, "Depth bounds is enabled but not supported");
enabled = false;
}
scheduler.Record([enable = regs.depth_bounds_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetDepthBoundsTestEnableEXT(enable);
});
@ -1086,4 +791,62 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
});
}
void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
auto& dirty{maxwell3d.dirty.flags};
if (!dirty[Dirty::VertexInput]) {
return;
}
dirty[Dirty::VertexInput] = false;
boost::container::static_vector<VkVertexInputBindingDescription2EXT, 32> bindings;
boost::container::static_vector<VkVertexInputAttributeDescription2EXT, 32> attributes;
// There seems to be a bug on Nvidia's driver where updating only higher attributes ends up
// generating dirty state. Track the highest dirty attribute and update all attributes until
// that one.
size_t highest_dirty_attr{};
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
if (dirty[Dirty::VertexAttribute0 + index]) {
highest_dirty_attr = index;
}
}
for (size_t index = 0; index < highest_dirty_attr; ++index) {
const Maxwell::VertexAttribute attribute{regs.vertex_attrib_format[index]};
const u32 binding{attribute.buffer};
dirty[Dirty::VertexAttribute0 + index] = false;
dirty[Dirty::VertexBinding0 + static_cast<size_t>(binding)] = true;
if (!attribute.constant) {
attributes.push_back({
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT,
.pNext = nullptr,
.location = static_cast<u32>(index),
.binding = binding,
.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size),
.offset = attribute.offset,
});
}
}
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
if (!dirty[Dirty::VertexBinding0 + index]) {
continue;
}
dirty[Dirty::VertexBinding0 + index] = false;
const u32 binding{static_cast<u32>(index)};
const auto& input_binding{regs.vertex_array[binding]};
const bool is_instanced{regs.instanced_arrays.IsInstancingEnabled(binding)};
bindings.push_back({
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT,
.pNext = nullptr,
.binding = binding,
.stride = input_binding.stride,
.inputRate = is_instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX,
.divisor = is_instanced ? input_binding.divisor : 1,
});
}
scheduler.Record([bindings, attributes](vk::CommandBuffer cmdbuf) {
cmdbuf.SetVertexInputEXT(bindings, attributes);
});
}
} // namespace Vulkan

View File

@ -21,14 +21,13 @@
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_fence_manager.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/shader/async_shaders.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@ -73,7 +72,7 @@ public:
void Draw(bool is_indexed, bool is_instanced) override;
void Clear() override;
void DispatchCompute(GPUVAddr code_addr) override;
void DispatchCompute() override;
void ResetCounter(VideoCore::QueryType type) override;
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
@ -102,19 +101,8 @@ public:
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
VideoCommon::Shader::AsyncShaders& GetAsyncShaders() {
return async_shaders;
}
const VideoCommon::Shader::AsyncShaders& GetAsyncShaders() const {
return async_shaders;
}
/// Maximum supported size that a constbuffer can have in bytes.
static constexpr size_t MaxConstbufferSize = 0x10000;
static_assert(MaxConstbufferSize % (4 * sizeof(float)) == 0,
"The maximum size of a constbuffer must be a multiple of the size of GLvec4");
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) override;
private:
static constexpr size_t MAX_TEXTURES = 192;
@ -125,46 +113,19 @@ private:
void FlushWork();
/// Setup descriptors in the graphics pipeline.
void SetupShaderDescriptors(const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders,
bool is_indexed);
void UpdateDynamicStates();
void BeginTransformFeedback();
void EndTransformFeedback();
/// Setup uniform texels in the graphics pipeline.
void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
/// Setup textures in the graphics pipeline.
void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
/// Setup storage texels in the graphics pipeline.
void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
/// Setup images in the graphics pipeline.
void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
/// Setup texel buffers in the compute pipeline.
void SetupComputeUniformTexels(const ShaderEntries& entries);
/// Setup textures in the compute pipeline.
void SetupComputeTextures(const ShaderEntries& entries);
/// Setup storage texels in the compute pipeline.
void SetupComputeStorageTexels(const ShaderEntries& entries);
/// Setup images in the compute pipeline.
void SetupComputeImages(const ShaderEntries& entries);
void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
@ -175,6 +136,8 @@ private:
void UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
Tegra::GPU& gpu;
Tegra::MemoryManager& gpu_memory;
Tegra::Engines::Maxwell3D& maxwell3d;
@ -187,24 +150,22 @@ private:
VKScheduler& scheduler;
StagingBufferPool staging_pool;
VKDescriptorPool descriptor_pool;
DescriptorPool descriptor_pool;
VKUpdateDescriptorQueue update_descriptor_queue;
BlitImageHelper blit_image;
ASTCDecoderPass astc_decoder_pass;
GraphicsPipelineCacheKey graphics_key;
RenderPassCache render_pass_cache;
TextureCacheRuntime texture_cache_runtime;
TextureCache texture_cache;
BufferCacheRuntime buffer_cache_runtime;
BufferCache buffer_cache;
VKPipelineCache pipeline_cache;
PipelineCache pipeline_cache;
VKQueryCache query_cache;
AccelerateDMA accelerate_dma;
VKFenceManager fence_manager;
vk::Event wfi_event;
VideoCommon::Shader::AsyncShaders async_shaders;
boost::container::static_vector<u32, MAX_IMAGE_VIEWS> image_view_indices;
std::array<VideoCommon::ImageViewId, MAX_IMAGE_VIEWS> image_view_ids;

View File

@ -0,0 +1,96 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <unordered_map>
#include <boost/container/static_vector.hpp>
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
#include "video_core/surface.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
namespace {
using VideoCore::Surface::PixelFormat;
VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat format,
VkSampleCountFlagBits samples) {
using MaxwellToVK::SurfaceFormat;
return {
.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
.format = SurfaceFormat(device, FormatType::Optimal, true, format).format,
.samples = samples,
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
};
}
} // Anonymous namespace
RenderPassCache::RenderPassCache(const Device& device_) : device{&device_} {}
VkRenderPass RenderPassCache::Get(const RenderPassKey& key) {
std::lock_guard lock{mutex};
const auto [pair, is_new] = cache.try_emplace(key);
if (!is_new) {
return *pair->second;
}
boost::container::static_vector<VkAttachmentDescription, 9> descriptions;
std::array<VkAttachmentReference, 8> references{};
u32 num_attachments{};
u32 num_colors{};
for (size_t index = 0; index < key.color_formats.size(); ++index) {
const PixelFormat format{key.color_formats[index]};
const bool is_valid{format != PixelFormat::Invalid};
references[index] = VkAttachmentReference{
.attachment = is_valid ? num_colors : VK_ATTACHMENT_UNUSED,
.layout = VK_IMAGE_LAYOUT_GENERAL,
};
if (is_valid) {
descriptions.push_back(AttachmentDescription(*device, format, key.samples));
num_attachments = static_cast<u32>(index + 1);
++num_colors;
}
}
const bool has_depth{key.depth_format != PixelFormat::Invalid};
VkAttachmentReference depth_reference{};
if (key.depth_format != PixelFormat::Invalid) {
depth_reference = VkAttachmentReference{
.attachment = num_colors,
.layout = VK_IMAGE_LAYOUT_GENERAL,
};
descriptions.push_back(AttachmentDescription(*device, key.depth_format, key.samples));
}
const VkSubpassDescription subpass{
.flags = 0,
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.inputAttachmentCount = 0,
.pInputAttachments = nullptr,
.colorAttachmentCount = num_attachments,
.pColorAttachments = references.data(),
.pResolveAttachments = nullptr,
.pDepthStencilAttachment = has_depth ? &depth_reference : nullptr,
.preserveAttachmentCount = 0,
.pPreserveAttachments = nullptr,
};
pair->second = device->GetLogical().CreateRenderPass({
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.attachmentCount = static_cast<u32>(descriptions.size()),
.pAttachments = descriptions.empty() ? nullptr : descriptions.data(),
.subpassCount = 1,
.pSubpasses = &subpass,
.dependencyCount = 0,
.pDependencies = nullptr,
});
return *pair->second;
}
} // namespace Vulkan

View File

@ -0,0 +1,55 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <mutex>
#include <unordered_map>
#include "video_core/surface.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
struct RenderPassKey {
auto operator<=>(const RenderPassKey&) const noexcept = default;
std::array<VideoCore::Surface::PixelFormat, 8> color_formats;
VideoCore::Surface::PixelFormat depth_format;
VkSampleCountFlagBits samples;
};
} // namespace Vulkan
namespace std {
template <>
struct hash<Vulkan::RenderPassKey> {
[[nodiscard]] size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
size_t value = static_cast<size_t>(key.depth_format) << 48;
value ^= static_cast<size_t>(key.samples) << 52;
for (size_t i = 0; i < key.color_formats.size(); ++i) {
value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
}
return value;
}
};
} // namespace std
namespace Vulkan {
class Device;
class RenderPassCache {
public:
explicit RenderPassCache(const Device& device_);
VkRenderPass Get(const RenderPassKey& key);
private:
const Device* device{};
std::unordered_map<RenderPassKey, vk::RenderPass> cache;
std::mutex mutex;
};
} // namespace Vulkan

View File

@ -10,18 +10,16 @@
namespace Vulkan {
ResourcePool::ResourcePool(MasterSemaphore& master_semaphore_, size_t grow_step_)
: master_semaphore{master_semaphore_}, grow_step{grow_step_} {}
ResourcePool::~ResourcePool() = default;
: master_semaphore{&master_semaphore_}, grow_step{grow_step_} {}
size_t ResourcePool::CommitResource() {
// Refresh semaphore to query updated results
master_semaphore.Refresh();
const u64 gpu_tick = master_semaphore.KnownGpuTick();
master_semaphore->Refresh();
const u64 gpu_tick = master_semaphore->KnownGpuTick();
const auto search = [this, gpu_tick](size_t begin, size_t end) -> std::optional<size_t> {
for (size_t iterator = begin; iterator < end; ++iterator) {
if (gpu_tick >= ticks[iterator]) {
ticks[iterator] = master_semaphore.CurrentTick();
ticks[iterator] = master_semaphore->CurrentTick();
return iterator;
}
}
@ -36,7 +34,7 @@ size_t ResourcePool::CommitResource() {
// Both searches failed, the pool is full; handle it.
const size_t free_resource = ManageOverflow();
ticks[free_resource] = master_semaphore.CurrentTick();
ticks[free_resource] = master_semaphore->CurrentTick();
found = free_resource;
}
}

View File

@ -18,8 +18,16 @@ class MasterSemaphore;
*/
class ResourcePool {
public:
explicit ResourcePool() = default;
explicit ResourcePool(MasterSemaphore& master_semaphore, size_t grow_step);
virtual ~ResourcePool();
virtual ~ResourcePool() = default;
ResourcePool& operator=(ResourcePool&&) noexcept = default;
ResourcePool(ResourcePool&&) noexcept = default;
ResourcePool& operator=(const ResourcePool&) = default;
ResourcePool(const ResourcePool&) = default;
protected:
size_t CommitResource();
@ -34,7 +42,7 @@ private:
/// Allocates a new page of resources.
void Grow();
MasterSemaphore& master_semaphore;
MasterSemaphore* master_semaphore{};
size_t grow_step = 0; ///< Number of new resources created after an overflow
size_t hint_iterator = 0; ///< Hint to where the next free resources is likely to be found
std::vector<u64> ticks; ///< Ticks for each resource

View File

@ -31,7 +31,7 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
command->~Command();
command = next;
}
submit = false;
command_offset = 0;
first = nullptr;
last = nullptr;
@ -42,13 +42,16 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
master_semaphore{std::make_unique<MasterSemaphore>(device)},
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
AcquireNewChunk();
AllocateNewContext();
AllocateWorkerCommandBuffer();
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
}
VKScheduler::~VKScheduler() {
quit = true;
cv.notify_all();
{
std::lock_guard lock{work_mutex};
quit = true;
}
work_cv.notify_all();
worker_thread.join();
}
@ -60,6 +63,7 @@ void VKScheduler::Flush(VkSemaphore semaphore) {
void VKScheduler::Finish(VkSemaphore semaphore) {
const u64 presubmit_tick = CurrentTick();
SubmitExecution(semaphore);
WaitWorker();
Wait(presubmit_tick);
AllocateNewContext();
}
@ -68,20 +72,19 @@ void VKScheduler::WaitWorker() {
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
DispatchWork();
bool finished = false;
do {
cv.notify_all();
std::unique_lock lock{mutex};
finished = chunk_queue.Empty();
} while (!finished);
std::unique_lock lock{work_mutex};
wait_cv.wait(lock, [this] { return work_queue.empty(); });
}
void VKScheduler::DispatchWork() {
if (chunk->Empty()) {
return;
}
chunk_queue.Push(std::move(chunk));
cv.notify_all();
{
std::lock_guard lock{work_mutex};
work_queue.push(std::move(chunk));
}
work_cv.notify_one();
AcquireNewChunk();
}
@ -124,85 +127,41 @@ void VKScheduler::RequestOutsideRenderPassOperationContext() {
EndRenderPass();
}
void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
if (state.graphics_pipeline == pipeline) {
return;
return false;
}
state.graphics_pipeline = pipeline;
Record([pipeline](vk::CommandBuffer cmdbuf) {
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
});
return true;
}
void VKScheduler::WorkerThread() {
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
std::unique_lock lock{mutex};
Common::SetCurrentThreadName("yuzu:VulkanWorker");
do {
cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; });
if (quit) {
continue;
if (work_queue.empty()) {
wait_cv.notify_all();
}
auto extracted_chunk = std::move(chunk_queue.Front());
chunk_queue.Pop();
extracted_chunk->ExecuteAll(current_cmdbuf);
chunk_reserve.Push(std::move(extracted_chunk));
std::unique_ptr<CommandChunk> work;
{
std::unique_lock lock{work_mutex};
work_cv.wait(lock, [this] { return !work_queue.empty() || quit; });
if (quit) {
continue;
}
work = std::move(work_queue.front());
work_queue.pop();
}
const bool has_submit = work->HasSubmit();
work->ExecuteAll(current_cmdbuf);
if (has_submit) {
AllocateWorkerCommandBuffer();
}
std::lock_guard reserve_lock{reserve_mutex};
chunk_reserve.push_back(std::move(work));
} while (!quit);
}
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
EndPendingOperations();
InvalidateState();
WaitWorker();
std::unique_lock lock{mutex};
current_cmdbuf.End();
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
const u64 signal_value = master_semaphore->CurrentTick();
const u64 wait_value = signal_value - 1;
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
master_semaphore->NextTick();
const std::array signal_values{signal_value, u64(0)};
const std::array signal_semaphores{timeline_semaphore, semaphore};
const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
.pNext = nullptr,
.waitSemaphoreValueCount = 1,
.pWaitSemaphoreValues = &wait_value,
.signalSemaphoreValueCount = num_signal_semaphores,
.pSignalSemaphoreValues = signal_values.data(),
};
const VkSubmitInfo submit_info{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = &timeline_si,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &timeline_semaphore,
.pWaitDstStageMask = &wait_stage_mask,
.commandBufferCount = 1,
.pCommandBuffers = current_cmdbuf.address(),
.signalSemaphoreCount = num_signal_semaphores,
.pSignalSemaphores = signal_semaphores.data(),
};
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
case VK_SUCCESS:
break;
case VK_ERROR_DEVICE_LOST:
device.ReportLoss();
[[fallthrough]];
default:
vk::Check(result);
}
}
void VKScheduler::AllocateNewContext() {
std::unique_lock lock{mutex};
void VKScheduler::AllocateWorkerCommandBuffer() {
current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
current_cmdbuf.Begin({
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
@ -210,7 +169,59 @@ void VKScheduler::AllocateNewContext() {
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
});
}
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
EndPendingOperations();
InvalidateState();
const u64 signal_value = master_semaphore->NextTick();
Record([semaphore, signal_value, this](vk::CommandBuffer cmdbuf) {
cmdbuf.End();
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
const u64 wait_value = signal_value - 1;
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
const std::array signal_values{signal_value, u64(0)};
const std::array signal_semaphores{timeline_semaphore, semaphore};
const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
.pNext = nullptr,
.waitSemaphoreValueCount = 1,
.pWaitSemaphoreValues = &wait_value,
.signalSemaphoreValueCount = num_signal_semaphores,
.pSignalSemaphoreValues = signal_values.data(),
};
const VkSubmitInfo submit_info{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = &timeline_si,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &timeline_semaphore,
.pWaitDstStageMask = &wait_stage_mask,
.commandBufferCount = 1,
.pCommandBuffers = cmdbuf.address(),
.signalSemaphoreCount = num_signal_semaphores,
.pSignalSemaphores = signal_semaphores.data(),
};
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
case VK_SUCCESS:
break;
case VK_ERROR_DEVICE_LOST:
device.ReportLoss();
[[fallthrough]];
default:
vk::Check(result);
}
});
chunk->MarkSubmit();
DispatchWork();
}
void VKScheduler::AllocateNewContext() {
// Enable counters once again. These are disabled when a command buffer is finished.
if (query_cache) {
query_cache->UpdateCounters();
@ -265,12 +276,13 @@ void VKScheduler::EndRenderPass() {
}
void VKScheduler::AcquireNewChunk() {
if (chunk_reserve.Empty()) {
std::lock_guard lock{reserve_mutex};
if (chunk_reserve.empty()) {
chunk = std::make_unique<CommandChunk>();
return;
}
chunk = std::move(chunk_reserve.Front());
chunk_reserve.Pop();
chunk = std::move(chunk_reserve.back());
chunk_reserve.pop_back();
}
} // namespace Vulkan

View File

@ -8,12 +8,12 @@
#include <condition_variable>
#include <cstddef>
#include <memory>
#include <stack>
#include <thread>
#include <utility>
#include <queue>
#include "common/alignment.h"
#include "common/common_types.h"
#include "common/threadsafe_queue.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@ -22,6 +22,7 @@ namespace Vulkan {
class CommandPool;
class Device;
class Framebuffer;
class GraphicsPipeline;
class StateTracker;
class VKQueryCache;
@ -52,8 +53,8 @@ public:
/// of a renderpass.
void RequestOutsideRenderPassOperationContext();
/// Binds a pipeline to the current execution context.
void BindGraphicsPipeline(VkPipeline pipeline);
/// Update the pipeline to the current execution context.
bool UpdateGraphicsPipeline(GraphicsPipeline* pipeline);
/// Invalidates current command buffer state except for render passes
void InvalidateState();
@ -85,6 +86,10 @@ public:
/// Waits for the given tick to trigger on the GPU.
void Wait(u64 tick) {
if (tick >= master_semaphore->CurrentTick()) {
// Make sure we are not waiting for the current tick without signalling
Flush();
}
master_semaphore->Wait(tick);
}
@ -154,15 +159,24 @@ private:
return true;
}
void MarkSubmit() {
submit = true;
}
bool Empty() const {
return command_offset == 0;
}
bool HasSubmit() const {
return submit;
}
private:
Command* first = nullptr;
Command* last = nullptr;
size_t command_offset = 0;
bool submit = false;
alignas(std::max_align_t) std::array<u8, 0x8000> data{};
};
@ -170,11 +184,13 @@ private:
VkRenderPass renderpass = nullptr;
VkFramebuffer framebuffer = nullptr;
VkExtent2D render_area = {0, 0};
VkPipeline graphics_pipeline = nullptr;
GraphicsPipeline* graphics_pipeline = nullptr;
};
void WorkerThread();
void AllocateWorkerCommandBuffer();
void SubmitExecution(VkSemaphore semaphore);
void AllocateNewContext();
@ -204,11 +220,13 @@ private:
std::array<VkImage, 9> renderpass_images{};
std::array<VkImageSubresourceRange, 9> renderpass_image_ranges{};
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue;
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex mutex;
std::condition_variable cv;
bool quit = false;
std::queue<std::unique_ptr<CommandChunk>> work_queue;
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex reserve_mutex;
std::mutex work_mutex;
std::condition_variable work_cv;
std::condition_variable wait_cv;
std::atomic_bool quit{};
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -1,99 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <set>
#include <vector>
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/shader_type.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace Vulkan {
class Device;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using UniformTexelEntry = VideoCommon::Shader::SamplerEntry;
using SamplerEntry = VideoCommon::Shader::SamplerEntry;
using StorageTexelEntry = VideoCommon::Shader::ImageEntry;
using ImageEntry = VideoCommon::Shader::ImageEntry;
constexpr u32 DESCRIPTOR_SET = 0;
class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer {
public:
explicit constexpr ConstBufferEntry(const ConstBuffer& entry_, u32 index_)
: ConstBuffer{entry_}, index{index_} {}
constexpr u32 GetIndex() const {
return index;
}
private:
u32 index{};
};
struct GlobalBufferEntry {
u32 cbuf_index{};
u32 cbuf_offset{};
bool is_written{};
};
struct ShaderEntries {
u32 NumBindings() const {
return static_cast<u32>(const_buffers.size() + global_buffers.size() +
uniform_texels.size() + samplers.size() + storage_texels.size() +
images.size());
}
std::vector<ConstBufferEntry> const_buffers;
std::vector<GlobalBufferEntry> global_buffers;
std::vector<UniformTexelEntry> uniform_texels;
std::vector<SamplerEntry> samplers;
std::vector<StorageTexelEntry> storage_texels;
std::vector<ImageEntry> images;
std::set<u32> attributes;
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
std::size_t shader_length{};
u32 enabled_uniform_buffers{};
bool uses_warps{};
};
struct Specialization final {
u32 base_binding{};
// Compute specific
std::array<u32, 3> workgroup_size{};
u32 shared_memory_size{};
// Graphics specific
std::optional<float> point_size;
std::bitset<Maxwell::NumVertexAttributes> enabled_attributes;
std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{};
bool ndc_minus_one_to_one{};
bool early_fragment_tests{};
float alpha_test_ref{};
Maxwell::ComparisonOp alpha_test_func{};
};
// Old gcc versions don't consider this trivially copyable.
// static_assert(std::is_trivially_copyable_v<Specialization>);
struct SPIRVShader {
std::vector<u32> code;
ShaderEntries entries;
};
ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir);
std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
Tegra::Engines::ShaderType stage,
const VideoCommon::Shader::Registry& registry,
const Specialization& specialization);
} // namespace Vulkan

View File

@ -91,7 +91,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.flags = 0,
.size = STREAM_BUFFER_SIZE,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,

View File

@ -29,9 +29,10 @@ using Flags = Maxwell3D::DirtyState::Flags;
Flags MakeInvalidationFlags() {
static constexpr int INVALIDATION_FLAGS[]{
Viewports, Scissors, DepthBias, BlendConstants, DepthBounds,
StencilProperties, CullMode, DepthBoundsEnable, DepthTestEnable, DepthWriteEnable,
DepthCompareOp, FrontFace, StencilOp, StencilTestEnable, VertexBuffers,
Viewports, Scissors, DepthBias, BlendConstants, DepthBounds,
StencilProperties, LineWidth, CullMode, DepthBoundsEnable, DepthTestEnable,
DepthWriteEnable, DepthCompareOp, FrontFace, StencilOp, StencilTestEnable,
VertexBuffers, VertexInput,
};
Flags flags{};
for (const int flag : INVALIDATION_FLAGS) {
@ -40,6 +41,12 @@ Flags MakeInvalidationFlags() {
for (int index = VertexBuffer0; index <= VertexBuffer31; ++index) {
flags[index] = true;
}
for (int index = VertexAttribute0; index <= VertexAttribute31; ++index) {
flags[index] = true;
}
for (int index = VertexBinding0; index <= VertexBinding31; ++index) {
flags[index] = true;
}
return flags;
}
@ -79,6 +86,11 @@ void SetupDirtyStencilProperties(Tables& tables) {
table[OFF(stencil_back_func_mask)] = StencilProperties;
}
void SetupDirtyLineWidth(Tables& tables) {
tables[0][OFF(line_width_smooth)] = LineWidth;
tables[0][OFF(line_width_aliased)] = LineWidth;
}
void SetupDirtyCullMode(Tables& tables) {
auto& table = tables[0];
table[OFF(cull_face)] = CullMode;
@ -134,19 +146,6 @@ void SetupDirtyBlending(Tables& tables) {
FillBlock(tables[0], OFF(independent_blend), NUM(independent_blend), Blending);
}
void SetupDirtyInstanceDivisors(Tables& tables) {
static constexpr size_t divisor_offset = 3;
for (size_t index = 0; index < Regs::NumVertexArrays; ++index) {
tables[0][OFF(instanced_arrays) + index] = InstanceDivisors;
tables[0][OFF(vertex_array) + index * NUM(vertex_array[0]) + divisor_offset] =
InstanceDivisors;
}
}
void SetupDirtyVertexAttributes(Tables& tables) {
FillBlock(tables[0], OFF(vertex_attrib_format), NUM(vertex_attrib_format), VertexAttributes);
}
void SetupDirtyViewportSwizzles(Tables& tables) {
static constexpr size_t swizzle_offset = 6;
for (size_t index = 0; index < Regs::NumViewports; ++index) {
@ -154,11 +153,31 @@ void SetupDirtyViewportSwizzles(Tables& tables) {
ViewportSwizzles;
}
}
void SetupDirtyVertexAttributes(Tables& tables) {
for (size_t i = 0; i < Regs::NumVertexAttributes; ++i) {
const size_t offset = OFF(vertex_attrib_format) + i * NUM(vertex_attrib_format[0]);
FillBlock(tables[0], offset, NUM(vertex_attrib_format[0]), VertexAttribute0 + i);
}
FillBlock(tables[1], OFF(vertex_attrib_format), Regs::NumVertexAttributes, VertexInput);
}
void SetupDirtyVertexBindings(Tables& tables) {
// Do NOT include stride here, it's implicit in VertexBuffer
static constexpr size_t divisor_offset = 3;
for (size_t i = 0; i < Regs::NumVertexArrays; ++i) {
const u8 flag = static_cast<u8>(VertexBinding0 + i);
tables[0][OFF(instanced_arrays) + i] = VertexInput;
tables[1][OFF(instanced_arrays) + i] = flag;
tables[0][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = VertexInput;
tables[1][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = flag;
}
}
} // Anonymous namespace
StateTracker::StateTracker(Tegra::GPU& gpu)
: flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
auto& tables = gpu.Maxwell3D().dirty.tables;
auto& tables{gpu.Maxwell3D().dirty.tables};
SetupDirtyFlags(tables);
SetupDirtyViewports(tables);
SetupDirtyScissors(tables);
@ -166,6 +185,7 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyBlendConstants(tables);
SetupDirtyDepthBounds(tables);
SetupDirtyStencilProperties(tables);
SetupDirtyLineWidth(tables);
SetupDirtyCullMode(tables);
SetupDirtyDepthBoundsEnable(tables);
SetupDirtyDepthTestEnable(tables);
@ -175,9 +195,9 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyStencilOp(tables);
SetupDirtyStencilTestEnable(tables);
SetupDirtyBlending(tables);
SetupDirtyInstanceDivisors(tables);
SetupDirtyVertexAttributes(tables);
SetupDirtyViewportSwizzles(tables);
SetupDirtyVertexAttributes(tables);
SetupDirtyVertexBindings(tables);
}
} // namespace Vulkan

View File

@ -19,12 +19,19 @@ namespace Dirty {
enum : u8 {
First = VideoCommon::Dirty::LastCommonEntry,
VertexInput,
VertexAttribute0,
VertexAttribute31 = VertexAttribute0 + 31,
VertexBinding0,
VertexBinding31 = VertexBinding0 + 31,
Viewports,
Scissors,
DepthBias,
BlendConstants,
DepthBounds,
StencilProperties,
LineWidth,
CullMode,
DepthBoundsEnable,
@ -36,11 +43,9 @@ enum : u8 {
StencilTestEnable,
Blending,
InstanceDivisors,
VertexAttributes,
ViewportSwizzles,
Last
Last,
};
static_assert(Last <= std::numeric_limits<u8>::max());
@ -89,6 +94,10 @@ public:
return Exchange(Dirty::StencilProperties, false);
}
bool TouchLineWidth() const {
return Exchange(Dirty::LineWidth, false);
}
bool TouchCullMode() {
return Exchange(Dirty::CullMode, false);
}

View File

@ -65,6 +65,9 @@ VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, VKSchedul
VKSwapchain::~VKSwapchain() = default;
void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
is_outdated = false;
is_suboptimal = false;
const auto physical_device = device.GetPhysical();
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
@ -82,21 +85,31 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
resource_ticks.resize(image_count);
}
bool VKSwapchain::AcquireNextImage() {
const VkResult result =
device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
*present_semaphores[frame_index], {}, &image_index);
void VKSwapchain::AcquireNextImage() {
const VkResult result = device.GetLogical().AcquireNextImageKHR(
*swapchain, std::numeric_limits<u64>::max(), *present_semaphores[frame_index],
VK_NULL_HANDLE, &image_index);
switch (result) {
case VK_SUCCESS:
break;
case VK_SUBOPTIMAL_KHR:
is_suboptimal = true;
break;
case VK_ERROR_OUT_OF_DATE_KHR:
is_outdated = true;
break;
default:
LOG_ERROR(Render_Vulkan, "vkAcquireNextImageKHR returned {}", vk::ToString(result));
break;
}
scheduler.Wait(resource_ticks[image_index]);
return result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR;
resource_ticks[image_index] = scheduler.CurrentTick();
}
bool VKSwapchain::Present(VkSemaphore render_semaphore) {
void VKSwapchain::Present(VkSemaphore render_semaphore) {
const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
const auto present_queue{device.GetPresentQueue()};
bool recreated = false;
const VkPresentInfoKHR present_info{
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = nullptr,
@ -107,7 +120,6 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
.pImageIndices = &image_index,
.pResults = nullptr,
};
switch (const VkResult result = present_queue.Present(present_info)) {
case VK_SUCCESS:
break;
@ -115,24 +127,16 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
break;
case VK_ERROR_OUT_OF_DATE_KHR:
if (current_width > 0 && current_height > 0) {
Create(current_width, current_height, current_srgb);
recreated = true;
}
is_outdated = true;
break;
default:
LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
break;
}
resource_ticks[image_index] = scheduler.CurrentTick();
frame_index = (frame_index + 1) % static_cast<u32>(image_count);
return recreated;
}
bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const {
// TODO(Rodrigo): Handle framebuffer pixel format changes
return framebuffer.width != current_width || framebuffer.height != current_height;
++frame_index;
if (frame_index >= image_count) {
frame_index = 0;
}
}
void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
@ -148,7 +152,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
requested_image_count = capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR swapchain_ci{
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = nullptr,
@ -169,7 +172,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
.clipped = VK_FALSE,
.oldSwapchain = nullptr,
};
const u32 graphics_family{device.GetGraphicsFamily()};
const u32 present_family{device.GetPresentFamily()};
const std::array<u32, 2> queue_indices{graphics_family, present_family};
@ -178,7 +180,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
}
// Request the size again to reduce the possibility of a TOCTOU race condition.
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
@ -186,8 +187,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
extent = swapchain_ci.imageExtent;
current_width = extent.width;
current_height = extent.height;
current_srgb = srgb;
images = swapchain.GetImages();
@ -197,8 +196,8 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
void VKSwapchain::CreateSemaphores() {
present_semaphores.resize(image_count);
std::generate(present_semaphores.begin(), present_semaphores.end(),
[this] { return device.GetLogical().CreateSemaphore(); });
std::ranges::generate(present_semaphores,
[this] { return device.GetLogical().CreateSemaphore(); });
}
void VKSwapchain::CreateImageViews() {

View File

@ -28,14 +28,25 @@ public:
void Create(u32 width, u32 height, bool srgb);
/// Acquires the next image in the swapchain, waits as needed.
bool AcquireNextImage();
void AcquireNextImage();
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
/// recreated. Takes responsability for the ownership of fence.
bool Present(VkSemaphore render_semaphore);
/// Presents the rendered image to the swapchain.
void Present(VkSemaphore render_semaphore);
/// Returns true when the framebuffer layout has changed.
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
/// Returns true when the color space has changed.
bool HasColorSpaceChanged(bool is_srgb) const {
return current_srgb != is_srgb;
}
/// Returns true when the swapchain is outdated.
bool IsOutDated() const {
return is_outdated;
}
/// Returns true when the swapchain is suboptimal.
bool IsSubOptimal() const {
return is_suboptimal;
}
VkExtent2D GetSize() const {
return extent;
@ -61,10 +72,6 @@ public:
return image_format;
}
bool GetSrgbState() const {
return current_srgb;
}
private:
void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
bool srgb);
@ -92,9 +99,9 @@ private:
VkFormat image_format{};
VkExtent2D extent{};
u32 current_width{};
u32 current_height{};
bool current_srgb{};
bool is_outdated{};
bool is_suboptimal{};
};
} // namespace Vulkan

View File

@ -15,6 +15,7 @@
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
@ -34,19 +35,6 @@ using VideoCommon::SubresourceRange;
using VideoCore::Surface::IsPixelFormatASTC;
namespace {
constexpr std::array ATTACHMENT_REFERENCES{
VkAttachmentReference{0, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{1, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{2, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{3, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{4, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{5, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{6, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{7, VK_IMAGE_LAYOUT_GENERAL},
VkAttachmentReference{8, VK_IMAGE_LAYOUT_GENERAL},
};
constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
if (color == std::array<float, 4>{0, 0, 0, 0}) {
return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
@ -174,25 +162,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
return device.GetLogical().CreateImage(MakeImageCreateInfo(device, info));
}
[[nodiscard]] vk::Buffer MakeBuffer(const Device& device, const ImageInfo& info) {
if (info.type != ImageType::Buffer) {
return vk::Buffer{};
}
const size_t bytes_per_block = VideoCore::Surface::BytesPerBlock(info.format);
return device.GetLogical().CreateBuffer(VkBufferCreateInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = info.size.width * bytes_per_block,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
}
[[nodiscard]] VkImageAspectFlags ImageAspectMask(PixelFormat format) {
switch (VideoCore::Surface::GetFormatType(format)) {
case VideoCore::Surface::SurfaceType::ColorTexture:
@ -226,23 +195,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
}
}
[[nodiscard]] VkAttachmentDescription AttachmentDescription(const Device& device,
const ImageView* image_view) {
using MaxwellToVK::SurfaceFormat;
const PixelFormat pixel_format = image_view->format;
return VkAttachmentDescription{
.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
.format = SurfaceFormat(device, FormatType::Optimal, true, pixel_format).format,
.samples = image_view->Samples(),
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
};
}
[[nodiscard]] VkComponentSwizzle ComponentSwizzle(SwizzleSource swizzle) {
switch (swizzle) {
case SwizzleSource::Zero:
@ -263,6 +215,30 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
return VK_COMPONENT_SWIZZLE_ZERO;
}
[[nodiscard]] VkImageViewType ImageViewType(Shader::TextureType type) {
switch (type) {
case Shader::TextureType::Color1D:
return VK_IMAGE_VIEW_TYPE_1D;
case Shader::TextureType::Color2D:
return VK_IMAGE_VIEW_TYPE_2D;
case Shader::TextureType::ColorCube:
return VK_IMAGE_VIEW_TYPE_CUBE;
case Shader::TextureType::Color3D:
return VK_IMAGE_VIEW_TYPE_3D;
case Shader::TextureType::ColorArray1D:
return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
case Shader::TextureType::ColorArray2D:
return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
case Shader::TextureType::ColorArrayCube:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case Shader::TextureType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
return VK_IMAGE_VIEW_TYPE_1D;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
return VK_IMAGE_VIEW_TYPE_2D;
}
[[nodiscard]] VkImageViewType ImageViewType(VideoCommon::ImageViewType type) {
switch (type) {
case VideoCommon::ImageViewType::e1D:
@ -280,7 +256,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case VideoCommon::ImageViewType::CubeArray:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case VideoCommon::ImageViewType::Rect:
LOG_WARNING(Render_Vulkan, "Unnormalized image view type not supported");
UNIMPLEMENTED_MSG("Rect image view");
return VK_IMAGE_VIEW_TYPE_2D;
case VideoCommon::ImageViewType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
@ -327,7 +303,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
};
}
[[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
[[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) {
std::vector<VkBufferCopy> result(copies.size());
std::ranges::transform(
@ -587,6 +563,28 @@ struct RangedBarrierRange {
}
};
[[nodiscard]] VkFormat Format(Shader::ImageFormat format) {
switch (format) {
case Shader::ImageFormat::Typeless:
break;
case Shader::ImageFormat::R8_SINT:
return VK_FORMAT_R8_SINT;
case Shader::ImageFormat::R8_UINT:
return VK_FORMAT_R8_UINT;
case Shader::ImageFormat::R16_UINT:
return VK_FORMAT_R16_UINT;
case Shader::ImageFormat::R16_SINT:
return VK_FORMAT_R16_SINT;
case Shader::ImageFormat::R32_UINT:
return VK_FORMAT_R32_UINT;
case Shader::ImageFormat::R32G32_UINT:
return VK_FORMAT_R32G32_UINT;
case Shader::ImageFormat::R32G32B32A32_UINT:
return VK_FORMAT_R32G32B32A32_UINT;
}
UNREACHABLE_MSG("Invalid image format={}", format);
return VK_FORMAT_R32_UINT;
}
} // Anonymous namespace
void TextureCacheRuntime::Finish() {
@ -625,7 +623,7 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
return;
}
}
ASSERT(src.ImageFormat() == dst.ImageFormat());
ASSERT(src.format == dst.format);
ASSERT(!(is_dst_msaa && !is_src_msaa));
ASSERT(operation == Fermi2D::Operation::SrcCopy);
@ -842,13 +840,9 @@ u64 TextureCacheRuntime::GetDeviceLocalMemory() const {
Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_addr_,
VAddr cpu_addr_)
: VideoCommon::ImageBase(info_, gpu_addr_, cpu_addr_), scheduler{&runtime.scheduler},
image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)),
image(MakeImage(runtime.device, info)),
commit(runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal)),
aspect_mask(ImageAspectMask(info.format)) {
if (image) {
commit = runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal);
} else {
commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
}
if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) {
if (Settings::values.accelerate_astc.GetValue()) {
flags |= VideoCommon::ImageFlagBits::AcceleratedUpload;
@ -857,11 +851,7 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_
}
}
if (runtime.device.HasDebuggingToolAttached()) {
if (image) {
image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
} else {
buffer.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
}
image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
}
static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
@ -913,19 +903,6 @@ void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImag
});
}
void Image::UploadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferCopy> copies) {
// TODO: Move this to another API
scheduler->RequestOutsideRenderPassOperationContext();
std::vector vk_copies = TransformBufferCopies(copies, map.offset);
const VkBuffer src_buffer = map.buffer;
const VkBuffer dst_buffer = *buffer;
scheduler->Record([src_buffer, dst_buffer, vk_copies](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
cmdbuf.CopyBuffer(src_buffer, dst_buffer, vk_copies);
});
}
void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask);
scheduler->RequestOutsideRenderPassOperationContext();
@ -984,8 +961,9 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info,
ImageId image_id_, Image& image)
: VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device},
image_handle{image.Handle()}, image_format{image.info.format}, samples{ConvertSampleCount(
image.info.num_samples)} {
image_handle{image.Handle()}, samples{ConvertSampleCount(image.info.num_samples)} {
using Shader::TextureType;
const VkImageAspectFlags aspect_mask = ImageViewAspectMask(info);
std::array<SwizzleSource, 4> swizzle{
SwizzleSource::R,
@ -1023,57 +1001,54 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
},
.subresourceRange = MakeSubresourceRange(aspect_mask, info.range),
};
const auto create = [&](VideoCommon::ImageViewType view_type, std::optional<u32> num_layers) {
const auto create = [&](TextureType tex_type, std::optional<u32> num_layers) {
VkImageViewCreateInfo ci{create_info};
ci.viewType = ImageViewType(view_type);
ci.viewType = ImageViewType(tex_type);
if (num_layers) {
ci.subresourceRange.layerCount = *num_layers;
}
vk::ImageView handle = device->GetLogical().CreateImageView(ci);
if (device->HasDebuggingToolAttached()) {
handle.SetObjectNameEXT(VideoCommon::Name(*this, view_type).c_str());
handle.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
}
image_views[static_cast<size_t>(view_type)] = std::move(handle);
image_views[static_cast<size_t>(tex_type)] = std::move(handle);
};
switch (info.type) {
case VideoCommon::ImageViewType::e1D:
case VideoCommon::ImageViewType::e1DArray:
create(VideoCommon::ImageViewType::e1D, 1);
create(VideoCommon::ImageViewType::e1DArray, std::nullopt);
render_target = Handle(VideoCommon::ImageViewType::e1DArray);
create(TextureType::Color1D, 1);
create(TextureType::ColorArray1D, std::nullopt);
render_target = Handle(TextureType::ColorArray1D);
break;
case VideoCommon::ImageViewType::e2D:
case VideoCommon::ImageViewType::e2DArray:
create(VideoCommon::ImageViewType::e2D, 1);
create(VideoCommon::ImageViewType::e2DArray, std::nullopt);
render_target = Handle(VideoCommon::ImageViewType::e2DArray);
create(TextureType::Color2D, 1);
create(TextureType::ColorArray2D, std::nullopt);
render_target = Handle(Shader::TextureType::ColorArray2D);
break;
case VideoCommon::ImageViewType::e3D:
create(VideoCommon::ImageViewType::e3D, std::nullopt);
render_target = Handle(VideoCommon::ImageViewType::e3D);
create(TextureType::Color3D, std::nullopt);
render_target = Handle(Shader::TextureType::Color3D);
break;
case VideoCommon::ImageViewType::Cube:
case VideoCommon::ImageViewType::CubeArray:
create(VideoCommon::ImageViewType::Cube, 6);
create(VideoCommon::ImageViewType::CubeArray, std::nullopt);
create(TextureType::ColorCube, 6);
create(TextureType::ColorArrayCube, std::nullopt);
break;
case VideoCommon::ImageViewType::Rect:
UNIMPLEMENTED();
break;
case VideoCommon::ImageViewType::Buffer:
buffer_view = device->GetLogical().CreateBufferView(VkBufferViewCreateInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.buffer = image.Buffer(),
.format = format_info.format,
.offset = 0, // TODO: Redesign buffer cache to support this
.range = image.guest_size_bytes,
});
UNREACHABLE();
break;
}
}
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_)
: VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_},
buffer_size{VideoCommon::CalculateGuestSizeInBytes(info)} {}
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams& params)
: VideoCommon::ImageViewBase{params} {}
@ -1081,7 +1056,8 @@ VkImageView ImageView::DepthView() {
if (depth_view) {
return *depth_view;
}
depth_view = MakeDepthStencilView(VK_IMAGE_ASPECT_DEPTH_BIT);
const auto& info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
depth_view = MakeView(info.format, VK_IMAGE_ASPECT_DEPTH_BIT);
return *depth_view;
}
@ -1089,18 +1065,38 @@ VkImageView ImageView::StencilView() {
if (stencil_view) {
return *stencil_view;
}
stencil_view = MakeDepthStencilView(VK_IMAGE_ASPECT_STENCIL_BIT);
const auto& info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
stencil_view = MakeView(info.format, VK_IMAGE_ASPECT_STENCIL_BIT);
return *stencil_view;
}
vk::ImageView ImageView::MakeDepthStencilView(VkImageAspectFlags aspect_mask) {
VkImageView ImageView::StorageView(Shader::TextureType texture_type,
Shader::ImageFormat image_format) {
if (image_format == Shader::ImageFormat::Typeless) {
return Handle(texture_type);
}
const bool is_signed{image_format == Shader::ImageFormat::R8_SINT ||
image_format == Shader::ImageFormat::R16_SINT};
if (!storage_views) {
storage_views = std::make_unique<StorageViews>();
}
auto& views{is_signed ? storage_views->signeds : storage_views->unsigneds};
auto& view{views[static_cast<size_t>(texture_type)]};
if (view) {
return *view;
}
view = MakeView(Format(image_format), VK_IMAGE_ASPECT_COLOR_BIT);
return *view;
}
vk::ImageView ImageView::MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask) {
return device->GetLogical().CreateImageView({
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = image_handle,
.viewType = ImageViewType(type),
.format = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format).format,
.format = vk_format,
.components{
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
@ -1164,7 +1160,6 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM_RT> color_buffers,
ImageView* depth_buffer, const VideoCommon::RenderTargets& key) {
std::vector<VkAttachmentDescription> descriptions;
std::vector<VkImageView> attachments;
RenderPassKey renderpass_key{};
s32 num_layers = 1;
@ -1175,7 +1170,6 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
renderpass_key.color_formats[index] = PixelFormat::Invalid;
continue;
}
descriptions.push_back(AttachmentDescription(runtime.device, color_buffer));
attachments.push_back(color_buffer->RenderTarget());
renderpass_key.color_formats[index] = color_buffer->format;
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@ -1185,10 +1179,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
++num_images;
}
const size_t num_colors = attachments.size();
const VkAttachmentReference* depth_attachment =
depth_buffer ? &ATTACHMENT_REFERENCES[num_colors] : nullptr;
if (depth_buffer) {
descriptions.push_back(AttachmentDescription(runtime.device, depth_buffer));
attachments.push_back(depth_buffer->RenderTarget());
renderpass_key.depth_format = depth_buffer->format;
num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@ -1201,40 +1192,14 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
}
renderpass_key.samples = samples;
const auto& device = runtime.device.GetLogical();
const auto [cache_pair, is_new] = runtime.renderpass_cache.try_emplace(renderpass_key);
if (is_new) {
const VkSubpassDescription subpass{
.flags = 0,
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.inputAttachmentCount = 0,
.pInputAttachments = nullptr,
.colorAttachmentCount = static_cast<u32>(num_colors),
.pColorAttachments = num_colors != 0 ? ATTACHMENT_REFERENCES.data() : nullptr,
.pResolveAttachments = nullptr,
.pDepthStencilAttachment = depth_attachment,
.preserveAttachmentCount = 0,
.pPreserveAttachments = nullptr,
};
cache_pair->second = device.CreateRenderPass(VkRenderPassCreateInfo{
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.attachmentCount = static_cast<u32>(descriptions.size()),
.pAttachments = descriptions.data(),
.subpassCount = 1,
.pSubpasses = &subpass,
.dependencyCount = 0,
.pDependencies = nullptr,
});
}
renderpass = *cache_pair->second;
renderpass = runtime.render_pass_cache.Get(renderpass_key);
render_area = VkExtent2D{
.width = key.size.width,
.height = key.size.height,
};
num_color_buffers = static_cast<u32>(num_colors);
framebuffer = device.CreateFramebuffer(VkFramebufferCreateInfo{
framebuffer = runtime.device.GetLogical().CreateFramebuffer({
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,

View File

@ -7,6 +7,7 @@
#include <compare>
#include <span>
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/texture_cache/texture_cache.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
@ -26,35 +27,10 @@ class Device;
class Image;
class ImageView;
class Framebuffer;
class RenderPassCache;
class StagingBufferPool;
class VKScheduler;
struct RenderPassKey {
constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
std::array<PixelFormat, NUM_RT> color_formats;
PixelFormat depth_format;
VkSampleCountFlagBits samples;
};
} // namespace Vulkan
namespace std {
template <>
struct hash<Vulkan::RenderPassKey> {
[[nodiscard]] constexpr size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
size_t value = static_cast<size_t>(key.depth_format) << 48;
value ^= static_cast<size_t>(key.samples) << 52;
for (size_t i = 0; i < key.color_formats.size(); ++i) {
value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
}
return value;
}
};
} // namespace std
namespace Vulkan {
struct TextureCacheRuntime {
const Device& device;
VKScheduler& scheduler;
@ -62,13 +38,13 @@ struct TextureCacheRuntime {
StagingBufferPool& staging_buffer_pool;
BlitImageHelper& blit_image_helper;
ASTCDecoderPass& astc_decoder_pass;
std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache{};
RenderPassCache& render_pass_cache;
void Finish();
[[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size);
StagingBufferRef UploadStagingBuffer(size_t size);
[[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size);
StagingBufferRef DownloadStagingBuffer(size_t size);
void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
const Region2D& dst_region, const Region2D& src_region,
@ -79,7 +55,7 @@ struct TextureCacheRuntime {
void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view);
[[nodiscard]] bool CanAccelerateImageUpload(Image&) const noexcept {
bool CanAccelerateImageUpload(Image&) const noexcept {
return false;
}
@ -117,8 +93,6 @@ public:
void UploadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferImageCopy> copies);
void UploadMemory(const StagingBufferRef& map, std::span<const VideoCommon::BufferCopy> copies);
void DownloadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferImageCopy> copies);
@ -126,10 +100,6 @@ public:
return *image;
}
[[nodiscard]] VkBuffer Buffer() const noexcept {
return *buffer;
}
[[nodiscard]] VkImageAspectFlags AspectMask() const noexcept {
return aspect_mask;
}
@ -146,7 +116,6 @@ public:
private:
VKScheduler* scheduler;
vk::Image image;
vk::Buffer buffer;
MemoryCommit commit;
vk::ImageView image_view;
std::vector<vk::ImageView> storage_image_views;
@ -157,18 +126,19 @@ private:
class ImageView : public VideoCommon::ImageViewBase {
public:
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&);
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo&,
const VideoCommon::ImageViewInfo&, GPUVAddr);
explicit ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams&);
[[nodiscard]] VkImageView DepthView();
[[nodiscard]] VkImageView StencilView();
[[nodiscard]] VkImageView Handle(VideoCommon::ImageViewType query_type) const noexcept {
return *image_views[static_cast<size_t>(query_type)];
}
[[nodiscard]] VkImageView StorageView(Shader::TextureType texture_type,
Shader::ImageFormat image_format);
[[nodiscard]] VkBufferView BufferView() const noexcept {
return *buffer_view;
[[nodiscard]] VkImageView Handle(Shader::TextureType texture_type) const noexcept {
return *image_views[static_cast<size_t>(texture_type)];
}
[[nodiscard]] VkImage ImageHandle() const noexcept {
@ -179,26 +149,36 @@ public:
return render_target;
}
[[nodiscard]] PixelFormat ImageFormat() const noexcept {
return image_format;
}
[[nodiscard]] VkSampleCountFlagBits Samples() const noexcept {
return samples;
}
[[nodiscard]] GPUVAddr GpuAddr() const noexcept {
return gpu_addr;
}
[[nodiscard]] u32 BufferSize() const noexcept {
return buffer_size;
}
private:
[[nodiscard]] vk::ImageView MakeDepthStencilView(VkImageAspectFlags aspect_mask);
struct StorageViews {
std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> signeds;
std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> unsigneds;
};
[[nodiscard]] vk::ImageView MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask);
const Device* device = nullptr;
std::array<vk::ImageView, VideoCommon::NUM_IMAGE_VIEW_TYPES> image_views;
std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> image_views;
std::unique_ptr<StorageViews> storage_views;
vk::ImageView depth_view;
vk::ImageView stencil_view;
vk::BufferView buffer_view;
VkImage image_handle = VK_NULL_HANDLE;
VkImageView render_target = VK_NULL_HANDLE;
PixelFormat image_format = PixelFormat::Invalid;
VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT;
GPUVAddr gpu_addr = 0;
u32 buffer_size = 0;
};
class ImageAlloc : public VideoCommon::ImageAllocBase {};

View File

@ -15,7 +15,9 @@
namespace Vulkan {
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_)
: device{device_}, scheduler{scheduler_} {}
: device{device_}, scheduler{scheduler_} {
payload_cursor = payload.data();
}
VKUpdateDescriptorQueue::~VKUpdateDescriptorQueue() = default;
@ -36,13 +38,4 @@ void VKUpdateDescriptorQueue::Acquire() {
upload_start = payload_cursor;
}
void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
VkDescriptorSet set) {
const void* const data = upload_start;
const vk::Device* const logical = &device.GetLogical();
scheduler.Record([data, logical, set, update_template](vk::CommandBuffer) {
logical->UpdateDescriptorSet(set, update_template, data);
});
}
} // namespace Vulkan

View File

@ -39,7 +39,9 @@ public:
void Acquire();
void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
const DescriptorUpdateEntry* UpdateData() const noexcept {
return upload_start;
}
void AddSampledImage(VkImageView image_view, VkSampler sampler) {
*(payload_cursor++) = VkDescriptorImageInfo{

View File

@ -1,752 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string>
#include <string_view>
#include <fmt/format.h>
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/expr.h"
namespace VideoCommon::Shader {
ASTZipper::ASTZipper() = default;
void ASTZipper::Init(const ASTNode new_first, const ASTNode parent) {
ASSERT(new_first->manager == nullptr);
first = new_first;
last = new_first;
ASTNode current = first;
while (current) {
current->manager = this;
current->parent = parent;
last = current;
current = current->next;
}
}
void ASTZipper::PushBack(const ASTNode new_node) {
ASSERT(new_node->manager == nullptr);
new_node->previous = last;
if (last) {
last->next = new_node;
}
new_node->next.reset();
last = new_node;
if (!first) {
first = new_node;
}
new_node->manager = this;
}
void ASTZipper::PushFront(const ASTNode new_node) {
ASSERT(new_node->manager == nullptr);
new_node->previous.reset();
new_node->next = first;
if (first) {
first->previous = new_node;
}
if (last == first) {
last = new_node;
}
first = new_node;
new_node->manager = this;
}
void ASTZipper::InsertAfter(const ASTNode new_node, const ASTNode at_node) {
ASSERT(new_node->manager == nullptr);
if (!at_node) {
PushFront(new_node);
return;
}
const ASTNode next = at_node->next;
if (next) {
next->previous = new_node;
}
new_node->previous = at_node;
if (at_node == last) {
last = new_node;
}
new_node->next = next;
at_node->next = new_node;
new_node->manager = this;
}
void ASTZipper::InsertBefore(const ASTNode new_node, const ASTNode at_node) {
ASSERT(new_node->manager == nullptr);
if (!at_node) {
PushBack(new_node);
return;
}
const ASTNode previous = at_node->previous;
if (previous) {
previous->next = new_node;
}
new_node->next = at_node;
if (at_node == first) {
first = new_node;
}
new_node->previous = previous;
at_node->previous = new_node;
new_node->manager = this;
}
void ASTZipper::DetachTail(ASTNode node) {
ASSERT(node->manager == this);
if (node == first) {
first.reset();
last.reset();
return;
}
last = node->previous;
last->next.reset();
node->previous.reset();
ASTNode current = std::move(node);
while (current) {
current->manager = nullptr;
current->parent.reset();
current = current->next;
}
}
void ASTZipper::DetachSegment(const ASTNode start, const ASTNode end) {
ASSERT(start->manager == this && end->manager == this);
if (start == end) {
DetachSingle(start);
return;
}
const ASTNode prev = start->previous;
const ASTNode post = end->next;
if (!prev) {
first = post;
} else {
prev->next = post;
}
if (!post) {
last = prev;
} else {
post->previous = prev;
}
start->previous.reset();
end->next.reset();
ASTNode current = start;
bool found = false;
while (current) {
current->manager = nullptr;
current->parent.reset();
found |= current == end;
current = current->next;
}
ASSERT(found);
}
void ASTZipper::DetachSingle(const ASTNode node) {
ASSERT(node->manager == this);
const ASTNode prev = node->previous;
const ASTNode post = node->next;
node->previous.reset();
node->next.reset();
if (!prev) {
first = post;
} else {
prev->next = post;
}
if (!post) {
last = prev;
} else {
post->previous = prev;
}
node->manager = nullptr;
node->parent.reset();
}
void ASTZipper::Remove(const ASTNode node) {
ASSERT(node->manager == this);
const ASTNode next = node->next;
const ASTNode previous = node->previous;
if (previous) {
previous->next = next;
}
if (next) {
next->previous = previous;
}
node->parent.reset();
node->manager = nullptr;
if (node == last) {
last = previous;
}
if (node == first) {
first = next;
}
}
class ExprPrinter final {
public:
void operator()(const ExprAnd& expr) {
inner += "( ";
std::visit(*this, *expr.operand1);
inner += " && ";
std::visit(*this, *expr.operand2);
inner += ')';
}
void operator()(const ExprOr& expr) {
inner += "( ";
std::visit(*this, *expr.operand1);
inner += " || ";
std::visit(*this, *expr.operand2);
inner += ')';
}
void operator()(const ExprNot& expr) {
inner += "!";
std::visit(*this, *expr.operand1);
}
void operator()(const ExprPredicate& expr) {
inner += fmt::format("P{}", expr.predicate);
}
void operator()(const ExprCondCode& expr) {
inner += fmt::format("CC{}", expr.cc);
}
void operator()(const ExprVar& expr) {
inner += fmt::format("V{}", expr.var_index);
}
void operator()(const ExprBoolean& expr) {
inner += expr.value ? "true" : "false";
}
void operator()(const ExprGprEqual& expr) {
inner += fmt::format("(gpr_{} == {})", expr.gpr, expr.value);
}
const std::string& GetResult() const {
return inner;
}
private:
std::string inner;
};
class ASTPrinter {
public:
void operator()(const ASTProgram& ast) {
scope++;
inner += "program {\n";
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
inner += "}\n";
scope--;
}
void operator()(const ASTIfThen& ast) {
ExprPrinter expr_parser{};
std::visit(expr_parser, *ast.condition);
inner += fmt::format("{}if ({}) {{\n", Indent(), expr_parser.GetResult());
scope++;
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
scope--;
inner += fmt::format("{}}}\n", Indent());
}
void operator()(const ASTIfElse& ast) {
inner += Indent();
inner += "else {\n";
scope++;
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
scope--;
inner += Indent();
inner += "}\n";
}
void operator()(const ASTBlockEncoded& ast) {
inner += fmt::format("{}Block({}, {});\n", Indent(), ast.start, ast.end);
}
void operator()([[maybe_unused]] const ASTBlockDecoded& ast) {
inner += Indent();
inner += "Block;\n";
}
void operator()(const ASTVarSet& ast) {
ExprPrinter expr_parser{};
std::visit(expr_parser, *ast.condition);
inner += fmt::format("{}V{} := {};\n", Indent(), ast.index, expr_parser.GetResult());
}
void operator()(const ASTLabel& ast) {
inner += fmt::format("Label_{}:\n", ast.index);
}
void operator()(const ASTGoto& ast) {
ExprPrinter expr_parser{};
std::visit(expr_parser, *ast.condition);
inner +=
fmt::format("{}({}) -> goto Label_{};\n", Indent(), expr_parser.GetResult(), ast.label);
}
void operator()(const ASTDoWhile& ast) {
ExprPrinter expr_parser{};
std::visit(expr_parser, *ast.condition);
inner += fmt::format("{}do {{\n", Indent());
scope++;
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
scope--;
inner += fmt::format("{}}} while ({});\n", Indent(), expr_parser.GetResult());
}
void operator()(const ASTReturn& ast) {
ExprPrinter expr_parser{};
std::visit(expr_parser, *ast.condition);
inner += fmt::format("{}({}) -> {};\n", Indent(), expr_parser.GetResult(),
ast.kills ? "discard" : "exit");
}
void operator()(const ASTBreak& ast) {
ExprPrinter expr_parser{};
std::visit(expr_parser, *ast.condition);
inner += fmt::format("{}({}) -> break;\n", Indent(), expr_parser.GetResult());
}
void Visit(const ASTNode& node) {
std::visit(*this, *node->GetInnerData());
}
const std::string& GetResult() const {
return inner;
}
private:
std::string_view Indent() {
if (space_segment_scope == scope) {
return space_segment;
}
// Ensure that we don't exceed our view.
ASSERT(scope * 2 < spaces.size());
space_segment = spaces.substr(0, scope * 2);
space_segment_scope = scope;
return space_segment;
}
std::string inner{};
std::string_view space_segment;
u32 scope{};
u32 space_segment_scope{};
static constexpr std::string_view spaces{" "};
};
std::string ASTManager::Print() const {
ASTPrinter printer{};
printer.Visit(main_node);
return printer.GetResult();
}
ASTManager::ASTManager(bool do_full_decompile, bool disable_else_derivation_)
: full_decompile{do_full_decompile}, disable_else_derivation{disable_else_derivation_} {}
ASTManager::~ASTManager() {
Clear();
}
void ASTManager::Init() {
main_node = ASTBase::Make<ASTProgram>(ASTNode{});
program = std::get_if<ASTProgram>(main_node->GetInnerData());
false_condition = MakeExpr<ExprBoolean>(false);
}
void ASTManager::DeclareLabel(u32 address) {
const auto pair = labels_map.emplace(address, labels_count);
if (pair.second) {
labels_count++;
labels.resize(labels_count);
}
}
void ASTManager::InsertLabel(u32 address) {
const u32 index = labels_map[address];
const ASTNode label = ASTBase::Make<ASTLabel>(main_node, index);
labels[index] = label;
program->nodes.PushBack(label);
}
void ASTManager::InsertGoto(Expr condition, u32 address) {
const u32 index = labels_map[address];
const ASTNode goto_node = ASTBase::Make<ASTGoto>(main_node, std::move(condition), index);
gotos.push_back(goto_node);
program->nodes.PushBack(goto_node);
}
void ASTManager::InsertBlock(u32 start_address, u32 end_address) {
ASTNode block = ASTBase::Make<ASTBlockEncoded>(main_node, start_address, end_address);
program->nodes.PushBack(std::move(block));
}
void ASTManager::InsertReturn(Expr condition, bool kills) {
ASTNode node = ASTBase::Make<ASTReturn>(main_node, std::move(condition), kills);
program->nodes.PushBack(std::move(node));
}
// The decompile algorithm is based on
// "Taming control flow: A structured approach to eliminating goto statements"
// by AM Erosa, LJ Hendren 1994. In general, the idea is to get gotos to be
// on the same structured level as the label which they jump to. This is done,
// through outward/inward movements and lifting. Once they are at the same
// level, you can enclose them in an "if" structure or a "do-while" structure.
void ASTManager::Decompile() {
auto it = gotos.begin();
while (it != gotos.end()) {
const ASTNode goto_node = *it;
const auto label_index = goto_node->GetGotoLabel();
if (!label_index) {
return;
}
const ASTNode label = labels[*label_index];
if (!full_decompile) {
// We only decompile backward jumps
if (!IsBackwardsJump(goto_node, label)) {
it++;
continue;
}
}
if (IndirectlyRelated(goto_node, label)) {
while (!DirectlyRelated(goto_node, label)) {
MoveOutward(goto_node);
}
}
if (DirectlyRelated(goto_node, label)) {
u32 goto_level = goto_node->GetLevel();
const u32 label_level = label->GetLevel();
while (label_level < goto_level) {
MoveOutward(goto_node);
goto_level--;
}
// TODO(Blinkhawk): Implement Lifting and Inward Movements
}
if (label->GetParent() == goto_node->GetParent()) {
bool is_loop = false;
ASTNode current = goto_node->GetPrevious();
while (current) {
if (current == label) {
is_loop = true;
break;
}
current = current->GetPrevious();
}
if (is_loop) {
EncloseDoWhile(goto_node, label);
} else {
EncloseIfThen(goto_node, label);
}
it = gotos.erase(it);
continue;
}
it++;
}
if (full_decompile) {
for (const ASTNode& label : labels) {
auto& manager = label->GetManager();
manager.Remove(label);
}
labels.clear();
} else {
auto label_it = labels.begin();
while (label_it != labels.end()) {
bool can_remove = true;
ASTNode label = *label_it;
for (const ASTNode& goto_node : gotos) {
const auto label_index = goto_node->GetGotoLabel();
if (!label_index) {
return;
}
ASTNode& glabel = labels[*label_index];
if (glabel == label) {
can_remove = false;
break;
}
}
if (can_remove) {
label->MarkLabelUnused();
}
}
}
}
bool ASTManager::IsBackwardsJump(ASTNode goto_node, ASTNode label_node) const {
u32 goto_level = goto_node->GetLevel();
u32 label_level = label_node->GetLevel();
while (goto_level > label_level) {
goto_level--;
goto_node = goto_node->GetParent();
}
while (label_level > goto_level) {
label_level--;
label_node = label_node->GetParent();
}
while (goto_node->GetParent() != label_node->GetParent()) {
goto_node = goto_node->GetParent();
label_node = label_node->GetParent();
}
ASTNode current = goto_node->GetPrevious();
while (current) {
if (current == label_node) {
return true;
}
current = current->GetPrevious();
}
return false;
}
bool ASTManager::IndirectlyRelated(const ASTNode& first, const ASTNode& second) const {
return !(first->GetParent() == second->GetParent() || DirectlyRelated(first, second));
}
bool ASTManager::DirectlyRelated(const ASTNode& first, const ASTNode& second) const {
if (first->GetParent() == second->GetParent()) {
return false;
}
const u32 first_level = first->GetLevel();
const u32 second_level = second->GetLevel();
u32 min_level;
u32 max_level;
ASTNode max;
ASTNode min;
if (first_level > second_level) {
min_level = second_level;
min = second;
max_level = first_level;
max = first;
} else {
min_level = first_level;
min = first;
max_level = second_level;
max = second;
}
while (max_level > min_level) {
max_level--;
max = max->GetParent();
}
return min->GetParent() == max->GetParent();
}
void ASTManager::ShowCurrentState(std::string_view state) const {
LOG_CRITICAL(HW_GPU, "\nState {}:\n\n{}\n", state, Print());
SanityCheck();
}
void ASTManager::SanityCheck() const {
for (const auto& label : labels) {
if (!label->GetParent()) {
LOG_CRITICAL(HW_GPU, "Sanity Check Failed");
}
}
}
void ASTManager::EncloseDoWhile(ASTNode goto_node, ASTNode label) {
ASTZipper& zipper = goto_node->GetManager();
const ASTNode loop_start = label->GetNext();
if (loop_start == goto_node) {
zipper.Remove(goto_node);
return;
}
const ASTNode parent = label->GetParent();
const Expr condition = goto_node->GetGotoCondition();
zipper.DetachSegment(loop_start, goto_node);
const ASTNode do_while_node = ASTBase::Make<ASTDoWhile>(parent, condition);
ASTZipper* sub_zipper = do_while_node->GetSubNodes();
sub_zipper->Init(loop_start, do_while_node);
zipper.InsertAfter(do_while_node, label);
sub_zipper->Remove(goto_node);
}
void ASTManager::EncloseIfThen(ASTNode goto_node, ASTNode label) {
ASTZipper& zipper = goto_node->GetManager();
const ASTNode if_end = label->GetPrevious();
if (if_end == goto_node) {
zipper.Remove(goto_node);
return;
}
const ASTNode prev = goto_node->GetPrevious();
const Expr condition = goto_node->GetGotoCondition();
bool do_else = false;
if (!disable_else_derivation && prev->IsIfThen()) {
const Expr if_condition = prev->GetIfCondition();
do_else = ExprAreEqual(if_condition, condition);
}
const ASTNode parent = label->GetParent();
zipper.DetachSegment(goto_node, if_end);
ASTNode if_node;
if (do_else) {
if_node = ASTBase::Make<ASTIfElse>(parent);
} else {
Expr neg_condition = MakeExprNot(condition);
if_node = ASTBase::Make<ASTIfThen>(parent, neg_condition);
}
ASTZipper* sub_zipper = if_node->GetSubNodes();
sub_zipper->Init(goto_node, if_node);
zipper.InsertAfter(if_node, prev);
sub_zipper->Remove(goto_node);
}
void ASTManager::MoveOutward(ASTNode goto_node) {
ASTZipper& zipper = goto_node->GetManager();
const ASTNode parent = goto_node->GetParent();
ASTZipper& zipper2 = parent->GetManager();
const ASTNode grandpa = parent->GetParent();
const bool is_loop = parent->IsLoop();
const bool is_else = parent->IsIfElse();
const bool is_if = parent->IsIfThen();
const ASTNode prev = goto_node->GetPrevious();
const ASTNode post = goto_node->GetNext();
const Expr condition = goto_node->GetGotoCondition();
zipper.DetachSingle(goto_node);
if (is_loop) {
const u32 var_index = NewVariable();
const Expr var_condition = MakeExpr<ExprVar>(var_index);
const ASTNode var_node = ASTBase::Make<ASTVarSet>(parent, var_index, condition);
const ASTNode var_node_init = ASTBase::Make<ASTVarSet>(parent, var_index, false_condition);
zipper2.InsertBefore(var_node_init, parent);
zipper.InsertAfter(var_node, prev);
goto_node->SetGotoCondition(var_condition);
const ASTNode break_node = ASTBase::Make<ASTBreak>(parent, var_condition);
zipper.InsertAfter(break_node, var_node);
} else if (is_if || is_else) {
const u32 var_index = NewVariable();
const Expr var_condition = MakeExpr<ExprVar>(var_index);
const ASTNode var_node = ASTBase::Make<ASTVarSet>(parent, var_index, condition);
const ASTNode var_node_init = ASTBase::Make<ASTVarSet>(parent, var_index, false_condition);
if (is_if) {
zipper2.InsertBefore(var_node_init, parent);
} else {
zipper2.InsertBefore(var_node_init, parent->GetPrevious());
}
zipper.InsertAfter(var_node, prev);
goto_node->SetGotoCondition(var_condition);
if (post) {
zipper.DetachTail(post);
const ASTNode if_node = ASTBase::Make<ASTIfThen>(parent, MakeExprNot(var_condition));
ASTZipper* sub_zipper = if_node->GetSubNodes();
sub_zipper->Init(post, if_node);
zipper.InsertAfter(if_node, var_node);
}
} else {
UNREACHABLE();
}
const ASTNode next = parent->GetNext();
if (is_if && next && next->IsIfElse()) {
zipper2.InsertAfter(goto_node, next);
goto_node->SetParent(grandpa);
return;
}
zipper2.InsertAfter(goto_node, parent);
goto_node->SetParent(grandpa);
}
class ASTClearer {
public:
ASTClearer() = default;
void operator()(const ASTProgram& ast) {
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
}
void operator()(const ASTIfThen& ast) {
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
}
void operator()(const ASTIfElse& ast) {
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
}
void operator()([[maybe_unused]] const ASTBlockEncoded& ast) {}
void operator()(ASTBlockDecoded& ast) {
ast.nodes.clear();
}
void operator()([[maybe_unused]] const ASTVarSet& ast) {}
void operator()([[maybe_unused]] const ASTLabel& ast) {}
void operator()([[maybe_unused]] const ASTGoto& ast) {}
void operator()(const ASTDoWhile& ast) {
ASTNode current = ast.nodes.GetFirst();
while (current) {
Visit(current);
current = current->GetNext();
}
}
void operator()([[maybe_unused]] const ASTReturn& ast) {}
void operator()([[maybe_unused]] const ASTBreak& ast) {}
void Visit(const ASTNode& node) {
std::visit(*this, *node->GetInnerData());
node->Clear();
}
};
void ASTManager::Clear() {
if (!main_node) {
return;
}
ASTClearer clearer{};
clearer.Visit(main_node);
main_node.reset();
program = nullptr;
labels_map.clear();
labels.clear();
gotos.clear();
}
} // namespace VideoCommon::Shader

View File

@ -1,398 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <vector>
#include "video_core/shader/expr.h"
#include "video_core/shader/node.h"
namespace VideoCommon::Shader {
class ASTBase;
class ASTBlockDecoded;
class ASTBlockEncoded;
class ASTBreak;
class ASTDoWhile;
class ASTGoto;
class ASTIfElse;
class ASTIfThen;
class ASTLabel;
class ASTProgram;
class ASTReturn;
class ASTVarSet;
using ASTData = std::variant<ASTProgram, ASTIfThen, ASTIfElse, ASTBlockEncoded, ASTBlockDecoded,
ASTVarSet, ASTGoto, ASTLabel, ASTDoWhile, ASTReturn, ASTBreak>;
using ASTNode = std::shared_ptr<ASTBase>;
enum class ASTZipperType : u32 {
Program,
IfThen,
IfElse,
Loop,
};
class ASTZipper final {
public:
explicit ASTZipper();
void Init(ASTNode first, ASTNode parent);
ASTNode GetFirst() const {
return first;
}
ASTNode GetLast() const {
return last;
}
void PushBack(ASTNode new_node);
void PushFront(ASTNode new_node);
void InsertAfter(ASTNode new_node, ASTNode at_node);
void InsertBefore(ASTNode new_node, ASTNode at_node);
void DetachTail(ASTNode node);
void DetachSingle(ASTNode node);
void DetachSegment(ASTNode start, ASTNode end);
void Remove(ASTNode node);
ASTNode first;
ASTNode last;
};
class ASTProgram {
public:
ASTZipper nodes{};
};
class ASTIfThen {
public:
explicit ASTIfThen(Expr condition_) : condition{std::move(condition_)} {}
Expr condition;
ASTZipper nodes{};
};
class ASTIfElse {
public:
ASTZipper nodes{};
};
class ASTBlockEncoded {
public:
explicit ASTBlockEncoded(u32 start_, u32 _) : start{start_}, end{_} {}
u32 start;
u32 end;
};
class ASTBlockDecoded {
public:
explicit ASTBlockDecoded(NodeBlock&& new_nodes_) : nodes(std::move(new_nodes_)) {}
NodeBlock nodes;
};
class ASTVarSet {
public:
explicit ASTVarSet(u32 index_, Expr condition_)
: index{index_}, condition{std::move(condition_)} {}
u32 index;
Expr condition;
};
class ASTLabel {
public:
explicit ASTLabel(u32 index_) : index{index_} {}
u32 index;
bool unused{};
};
class ASTGoto {
public:
explicit ASTGoto(Expr condition_, u32 label_)
: condition{std::move(condition_)}, label{label_} {}
Expr condition;
u32 label;
};
class ASTDoWhile {
public:
explicit ASTDoWhile(Expr condition_) : condition{std::move(condition_)} {}
Expr condition;
ASTZipper nodes{};
};
class ASTReturn {
public:
explicit ASTReturn(Expr condition_, bool kills_)
: condition{std::move(condition_)}, kills{kills_} {}
Expr condition;
bool kills;
};
class ASTBreak {
public:
explicit ASTBreak(Expr condition_) : condition{std::move(condition_)} {}
Expr condition;
};
class ASTBase {
public:
explicit ASTBase(ASTNode parent_, ASTData data_)
: data{std::move(data_)}, parent{std::move(parent_)} {}
template <class U, class... Args>
static ASTNode Make(ASTNode parent, Args&&... args) {
return std::make_shared<ASTBase>(std::move(parent),
ASTData(U(std::forward<Args>(args)...)));
}
void SetParent(ASTNode new_parent) {
parent = std::move(new_parent);
}
ASTNode& GetParent() {
return parent;
}
const ASTNode& GetParent() const {
return parent;
}
u32 GetLevel() const {
u32 level = 0;
auto next_parent = parent;
while (next_parent) {
next_parent = next_parent->GetParent();
level++;
}
return level;
}
ASTData* GetInnerData() {
return &data;
}
const ASTData* GetInnerData() const {
return &data;
}
ASTNode GetNext() const {
return next;
}
ASTNode GetPrevious() const {
return previous;
}
ASTZipper& GetManager() {
return *manager;
}
const ASTZipper& GetManager() const {
return *manager;
}
std::optional<u32> GetGotoLabel() const {
if (const auto* inner = std::get_if<ASTGoto>(&data)) {
return {inner->label};
}
return std::nullopt;
}
Expr GetGotoCondition() const {
if (const auto* inner = std::get_if<ASTGoto>(&data)) {
return inner->condition;
}
return nullptr;
}
void MarkLabelUnused() {
if (auto* inner = std::get_if<ASTLabel>(&data)) {
inner->unused = true;
}
}
bool IsLabelUnused() const {
if (const auto* inner = std::get_if<ASTLabel>(&data)) {
return inner->unused;
}
return true;
}
std::optional<u32> GetLabelIndex() const {
if (const auto* inner = std::get_if<ASTLabel>(&data)) {
return {inner->index};
}
return std::nullopt;
}
Expr GetIfCondition() const {
if (const auto* inner = std::get_if<ASTIfThen>(&data)) {
return inner->condition;
}
return nullptr;
}
void SetGotoCondition(Expr new_condition) {
if (auto* inner = std::get_if<ASTGoto>(&data)) {
inner->condition = std::move(new_condition);
}
}
bool IsIfThen() const {
return std::holds_alternative<ASTIfThen>(data);
}
bool IsIfElse() const {
return std::holds_alternative<ASTIfElse>(data);
}
bool IsBlockEncoded() const {
return std::holds_alternative<ASTBlockEncoded>(data);
}
void TransformBlockEncoded(NodeBlock&& nodes) {
data = ASTBlockDecoded(std::move(nodes));
}
bool IsLoop() const {
return std::holds_alternative<ASTDoWhile>(data);
}
ASTZipper* GetSubNodes() {
if (std::holds_alternative<ASTProgram>(data)) {
return &std::get_if<ASTProgram>(&data)->nodes;
}
if (std::holds_alternative<ASTIfThen>(data)) {
return &std::get_if<ASTIfThen>(&data)->nodes;
}
if (std::holds_alternative<ASTIfElse>(data)) {
return &std::get_if<ASTIfElse>(&data)->nodes;
}
if (std::holds_alternative<ASTDoWhile>(data)) {
return &std::get_if<ASTDoWhile>(&data)->nodes;
}
return nullptr;
}
void Clear() {
next.reset();
previous.reset();
parent.reset();
manager = nullptr;
}
private:
friend class ASTZipper;
ASTData data;
ASTNode parent;
ASTNode next;
ASTNode previous;
ASTZipper* manager{};
};
class ASTManager final {
public:
explicit ASTManager(bool do_full_decompile, bool disable_else_derivation_);
~ASTManager();
ASTManager(const ASTManager& o) = delete;
ASTManager& operator=(const ASTManager& other) = delete;
ASTManager(ASTManager&& other) noexcept = default;
ASTManager& operator=(ASTManager&& other) noexcept = default;
void Init();
void DeclareLabel(u32 address);
void InsertLabel(u32 address);
void InsertGoto(Expr condition, u32 address);
void InsertBlock(u32 start_address, u32 end_address);
void InsertReturn(Expr condition, bool kills);
std::string Print() const;
void Decompile();
void ShowCurrentState(std::string_view state) const;
void SanityCheck() const;
void Clear();
bool IsFullyDecompiled() const {
if (full_decompile) {
return gotos.empty();
}
for (ASTNode goto_node : gotos) {
auto label_index = goto_node->GetGotoLabel();
if (!label_index) {
return false;
}
ASTNode glabel = labels[*label_index];
if (IsBackwardsJump(goto_node, glabel)) {
return false;
}
}
return true;
}
ASTNode GetProgram() const {
return main_node;
}
u32 GetVariables() const {
return variables;
}
const std::vector<ASTNode>& GetLabels() const {
return labels;
}
private:
bool IsBackwardsJump(ASTNode goto_node, ASTNode label_node) const;
bool IndirectlyRelated(const ASTNode& first, const ASTNode& second) const;
bool DirectlyRelated(const ASTNode& first, const ASTNode& second) const;
void EncloseDoWhile(ASTNode goto_node, ASTNode label);
void EncloseIfThen(ASTNode goto_node, ASTNode label);
void MoveOutward(ASTNode goto_node);
u32 NewVariable() {
return variables++;
}
bool full_decompile{};
bool disable_else_derivation{};
std::unordered_map<u32, u32> labels_map{};
u32 labels_count{};
std::vector<ASTNode> labels{};
std::list<ASTNode> gotos{};
u32 variables{};
ASTProgram* program{};
ASTNode main_node{};
Expr false_condition{};
};
} // namespace VideoCommon::Shader

View File

@ -1,234 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <condition_variable>
#include <mutex>
#include <thread>
#include <vector>
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_base.h"
#include "video_core/renderer_opengl/gl_shader_cache.h"
#include "video_core/shader/async_shaders.h"
namespace VideoCommon::Shader {
AsyncShaders::AsyncShaders(Core::Frontend::EmuWindow& emu_window_) : emu_window(emu_window_) {}
AsyncShaders::~AsyncShaders() {
KillWorkers();
}
void AsyncShaders::AllocateWorkers() {
// Use at least one thread
u32 num_workers = 1;
// Deduce how many more threads we can use
const u32 thread_count = std::thread::hardware_concurrency();
if (thread_count >= 8) {
// Increase async workers by 1 for every 2 threads >= 8
num_workers += 1 + (thread_count - 8) / 2;
}
// If we already have workers queued, ignore
if (num_workers == worker_threads.size()) {
return;
}
// If workers already exist, clear them
if (!worker_threads.empty()) {
FreeWorkers();
}
// Create workers
for (std::size_t i = 0; i < num_workers; i++) {
context_list.push_back(emu_window.CreateSharedContext());
worker_threads.emplace_back(&AsyncShaders::ShaderCompilerThread, this,
context_list[i].get());
}
}
void AsyncShaders::FreeWorkers() {
// Mark all threads to quit
is_thread_exiting.store(true);
cv.notify_all();
for (auto& thread : worker_threads) {
thread.join();
}
// Clear our shared contexts
context_list.clear();
// Clear our worker threads
worker_threads.clear();
}
void AsyncShaders::KillWorkers() {
is_thread_exiting.store(true);
cv.notify_all();
for (auto& thread : worker_threads) {
thread.detach();
}
// Clear our shared contexts
context_list.clear();
// Clear our worker threads
worker_threads.clear();
}
bool AsyncShaders::HasWorkQueued() const {
return !pending_queue.empty();
}
bool AsyncShaders::HasCompletedWork() const {
std::shared_lock lock{completed_mutex};
return !finished_work.empty();
}
bool AsyncShaders::IsShaderAsync(const Tegra::GPU& gpu) const {
const auto& regs = gpu.Maxwell3D().regs;
// If something is using depth, we can assume that games are not rendering anything which will
// be used one time.
if (regs.zeta_enable) {
return true;
}
// If games are using a small index count, we can assume these are full screen quads. Usually
// these shaders are only used once for building textures so we can assume they can't be built
// async
if (regs.index_array.count <= 6 || regs.vertex_buffer.count <= 6) {
return false;
}
return true;
}
std::vector<AsyncShaders::Result> AsyncShaders::GetCompletedWork() {
std::vector<Result> results;
{
std::unique_lock lock{completed_mutex};
results = std::move(finished_work);
finished_work.clear();
}
return results;
}
void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
Tegra::Engines::ShaderType shader_type, u64 uid,
std::vector<u64> code, std::vector<u64> code_b,
u32 main_offset, CompilerSettings compiler_settings,
const Registry& registry, VAddr cpu_addr) {
std::unique_lock lock(queue_mutex);
pending_queue.push({
.backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL,
.device = &device,
.shader_type = shader_type,
.uid = uid,
.code = std::move(code),
.code_b = std::move(code_b),
.main_offset = main_offset,
.compiler_settings = compiler_settings,
.registry = registry,
.cpu_address = cpu_addr,
.pp_cache = nullptr,
.vk_device = nullptr,
.scheduler = nullptr,
.descriptor_pool = nullptr,
.update_descriptor_queue = nullptr,
.bindings{},
.program{},
.key{},
.num_color_buffers = 0,
});
cv.notify_one();
}
void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache,
const Vulkan::Device& device, Vulkan::VKScheduler& scheduler,
Vulkan::VKDescriptorPool& descriptor_pool,
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program,
Vulkan::GraphicsPipelineCacheKey key, u32 num_color_buffers) {
std::unique_lock lock(queue_mutex);
pending_queue.push({
.backend = Backend::Vulkan,
.device = nullptr,
.shader_type{},
.uid = 0,
.code{},
.code_b{},
.main_offset = 0,
.compiler_settings{},
.registry{},
.cpu_address = 0,
.pp_cache = pp_cache,
.vk_device = &device,
.scheduler = &scheduler,
.descriptor_pool = &descriptor_pool,
.update_descriptor_queue = &update_descriptor_queue,
.bindings = std::move(bindings),
.program = std::move(program),
.key = key,
.num_color_buffers = num_color_buffers,
});
cv.notify_one();
}
void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) {
while (!is_thread_exiting.load(std::memory_order_relaxed)) {
std::unique_lock lock{queue_mutex};
cv.wait(lock, [this] { return HasWorkQueued() || is_thread_exiting; });
if (is_thread_exiting) {
return;
}
// Partial lock to allow all threads to read at the same time
if (!HasWorkQueued()) {
continue;
}
// Another thread beat us, just unlock and wait for the next load
if (pending_queue.empty()) {
continue;
}
// Pull work from queue
WorkerParams work = std::move(pending_queue.front());
pending_queue.pop();
lock.unlock();
if (work.backend == Backend::OpenGL || work.backend == Backend::GLASM) {
const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, *work.registry);
const auto scope = context->Acquire();
auto program =
OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, *work.registry);
Result result{};
result.backend = work.backend;
result.cpu_address = work.cpu_address;
result.uid = work.uid;
result.code = std::move(work.code);
result.code_b = std::move(work.code_b);
result.shader_type = work.shader_type;
if (work.backend == Backend::OpenGL) {
result.program.opengl = std::move(program->source_program);
} else if (work.backend == Backend::GLASM) {
result.program.glasm = std::move(program->assembly_program);
}
{
std::unique_lock complete_lock(completed_mutex);
finished_work.push_back(std::move(result));
}
} else if (work.backend == Backend::Vulkan) {
auto pipeline = std::make_unique<Vulkan::VKGraphicsPipeline>(
*work.vk_device, *work.scheduler, *work.descriptor_pool,
*work.update_descriptor_queue, work.key, work.bindings, work.program,
work.num_color_buffers);
work.pp_cache->EmplacePipeline(std::move(pipeline));
}
}
}
} // namespace VideoCommon::Shader

View File

@ -1,138 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <condition_variable>
#include <memory>
#include <shared_mutex>
#include <thread>
#include <glad/glad.h>
#include "common/common_types.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/vulkan_common/vulkan_device.h"
namespace Core::Frontend {
class EmuWindow;
class GraphicsContext;
} // namespace Core::Frontend
namespace Tegra {
class GPU;
}
namespace Vulkan {
class VKPipelineCache;
}
namespace VideoCommon::Shader {
class AsyncShaders {
public:
enum class Backend {
OpenGL,
GLASM,
Vulkan,
};
struct ResultPrograms {
OpenGL::OGLProgram opengl;
OpenGL::OGLAssemblyProgram glasm;
};
struct Result {
u64 uid;
VAddr cpu_address;
Backend backend;
ResultPrograms program;
std::vector<u64> code;
std::vector<u64> code_b;
Tegra::Engines::ShaderType shader_type;
};
explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window_);
~AsyncShaders();
/// Start up shader worker threads
void AllocateWorkers();
/// Clear the shader queue and kill all worker threads
void FreeWorkers();
// Force end all threads
void KillWorkers();
/// Check to see if any shaders have actually been compiled
[[nodiscard]] bool HasCompletedWork() const;
/// Deduce if a shader can be build on another thread of MUST be built in sync. We cannot build
/// every shader async as some shaders are only built and executed once. We try to "guess" which
/// shader would be used only once
[[nodiscard]] bool IsShaderAsync(const Tegra::GPU& gpu) const;
/// Pulls completed compiled shaders
[[nodiscard]] std::vector<Result> GetCompletedWork();
void QueueOpenGLShader(const OpenGL::Device& device, Tegra::Engines::ShaderType shader_type,
u64 uid, std::vector<u64> code, std::vector<u64> code_b, u32 main_offset,
CompilerSettings compiler_settings, const Registry& registry,
VAddr cpu_addr);
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::Device& device,
Vulkan::VKScheduler& scheduler,
Vulkan::VKDescriptorPool& descriptor_pool,
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
std::vector<VkDescriptorSetLayoutBinding> bindings,
Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key,
u32 num_color_buffers);
private:
void ShaderCompilerThread(Core::Frontend::GraphicsContext* context);
/// Check our worker queue to see if we have any work queued already
[[nodiscard]] bool HasWorkQueued() const;
struct WorkerParams {
Backend backend;
// For OGL
const OpenGL::Device* device;
Tegra::Engines::ShaderType shader_type;
u64 uid;
std::vector<u64> code;
std::vector<u64> code_b;
u32 main_offset;
CompilerSettings compiler_settings;
std::optional<Registry> registry;
VAddr cpu_address;
// For Vulkan
Vulkan::VKPipelineCache* pp_cache;
const Vulkan::Device* vk_device;
Vulkan::VKScheduler* scheduler;
Vulkan::VKDescriptorPool* descriptor_pool;
Vulkan::VKUpdateDescriptorQueue* update_descriptor_queue;
std::vector<VkDescriptorSetLayoutBinding> bindings;
Vulkan::SPIRVProgram program;
Vulkan::GraphicsPipelineCacheKey key;
u32 num_color_buffers;
};
std::condition_variable cv;
mutable std::mutex queue_mutex;
mutable std::shared_mutex completed_mutex;
std::atomic<bool> is_thread_exiting{};
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list;
std::vector<std::thread> worker_threads;
std::queue<WorkerParams> pending_queue;
std::vector<Result> finished_work;
Core::Frontend::EmuWindow& emu_window;
};
} // namespace VideoCommon::Shader

View File

@ -1,26 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "video_core/shader/compiler_settings.h"
namespace VideoCommon::Shader {
std::string CompileDepthAsString(const CompileDepth cd) {
switch (cd) {
case CompileDepth::BruteForce:
return "Brute Force Compile";
case CompileDepth::FlowStack:
return "Simple Flow Stack Mode";
case CompileDepth::NoFlowStack:
return "Remove Flow Stack";
case CompileDepth::DecompileBackwards:
return "Decompile Backward Jumps";
case CompileDepth::FullDecompile:
return "Full Decompilation";
default:
return "Unknown Compiler Process";
}
}
} // namespace VideoCommon::Shader

View File

@ -1,26 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "video_core/engines/shader_bytecode.h"
namespace VideoCommon::Shader {
enum class CompileDepth : u32 {
BruteForce = 0,
FlowStack = 1,
NoFlowStack = 2,
DecompileBackwards = 3,
FullDecompile = 4,
};
std::string CompileDepthAsString(CompileDepth cd);
struct CompilerSettings {
CompileDepth depth{CompileDepth::NoFlowStack};
bool disable_else_derivation{true};
};
} // namespace VideoCommon::Shader

View File

@ -1,751 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <list>
#include <map>
#include <set>
#include <stack>
#include <unordered_map>
#include <vector>
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/control_flow.h"
#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace VideoCommon::Shader {
namespace {
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
constexpr s32 unassigned_branch = -2;
struct Query {
u32 address{};
std::stack<u32> ssy_stack{};
std::stack<u32> pbk_stack{};
};
struct BlockStack {
BlockStack() = default;
explicit BlockStack(const Query& q) : ssy_stack{q.ssy_stack}, pbk_stack{q.pbk_stack} {}
std::stack<u32> ssy_stack{};
std::stack<u32> pbk_stack{};
};
template <typename T, typename... Args>
BlockBranchInfo MakeBranchInfo(Args&&... args) {
static_assert(std::is_convertible_v<T, BranchData>);
return std::make_shared<BranchData>(T(std::forward<Args>(args)...));
}
bool BlockBranchIsIgnored(BlockBranchInfo first) {
bool ignore = false;
if (std::holds_alternative<SingleBranch>(*first)) {
const auto branch = std::get_if<SingleBranch>(first.get());
ignore = branch->ignore;
}
return ignore;
}
struct BlockInfo {
u32 start{};
u32 end{};
bool visited{};
BlockBranchInfo branch{};
bool IsInside(const u32 address) const {
return start <= address && address <= end;
}
};
struct CFGRebuildState {
explicit CFGRebuildState(const ProgramCode& program_code_, u32 start_, Registry& registry_)
: program_code{program_code_}, registry{registry_}, start{start_} {}
const ProgramCode& program_code;
Registry& registry;
u32 start{};
std::vector<BlockInfo> block_info;
std::list<u32> inspect_queries;
std::list<Query> queries;
std::unordered_map<u32, u32> registered;
std::set<u32> labels;
std::map<u32, u32> ssy_labels;
std::map<u32, u32> pbk_labels;
std::unordered_map<u32, BlockStack> stacks;
ASTManager* manager{};
};
enum class BlockCollision : u32 { None, Found, Inside };
std::pair<BlockCollision, u32> TryGetBlock(CFGRebuildState& state, u32 address) {
const auto& blocks = state.block_info;
for (u32 index = 0; index < blocks.size(); index++) {
if (blocks[index].start == address) {
return {BlockCollision::Found, index};
}
if (blocks[index].IsInside(address)) {
return {BlockCollision::Inside, index};
}
}
return {BlockCollision::None, 0xFFFFFFFF};
}
struct ParseInfo {
BlockBranchInfo branch_info{};
u32 end_address{};
};
BlockInfo& CreateBlockInfo(CFGRebuildState& state, u32 start, u32 end) {
auto& it = state.block_info.emplace_back();
it.start = start;
it.end = end;
const u32 index = static_cast<u32>(state.block_info.size() - 1);
state.registered.insert({start, index});
return it;
}
Pred GetPredicate(u32 index, bool negated) {
return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL));
}
enum class ParseResult : u32 {
ControlCaught,
BlockEnd,
AbnormalFlow,
};
struct BranchIndirectInfo {
u32 buffer{};
u32 offset{};
u32 entries{};
s32 relative_position{};
};
struct BufferInfo {
u32 index;
u32 offset;
};
std::optional<std::pair<s32, u64>> GetBRXInfo(const CFGRebuildState& state, u32& pos) {
const Instruction instr = state.program_code[pos];
const auto opcode = OpCode::Decode(instr);
if (opcode->get().GetId() != OpCode::Id::BRX) {
return std::nullopt;
}
if (instr.brx.constant_buffer != 0) {
return std::nullopt;
}
--pos;
return std::make_pair(instr.brx.GetBranchExtend(), instr.gpr8.Value());
}
template <typename Result, typename TestCallable, typename PackCallable>
// requires std::predicate<TestCallable, Instruction, const OpCode::Matcher&>
// requires std::invocable<PackCallable, Instruction, const OpCode::Matcher&>
std::optional<Result> TrackInstruction(const CFGRebuildState& state, u32& pos, TestCallable test,
PackCallable pack) {
for (; pos >= state.start; --pos) {
if (IsSchedInstruction(pos, state.start)) {
continue;
}
const Instruction instr = state.program_code[pos];
const auto opcode = OpCode::Decode(instr);
if (!opcode) {
continue;
}
if (test(instr, opcode->get())) {
--pos;
return std::make_optional(pack(instr, opcode->get()));
}
}
return std::nullopt;
}
std::optional<std::pair<BufferInfo, u64>> TrackLDC(const CFGRebuildState& state, u32& pos,
u64 brx_tracked_register) {
return TrackInstruction<std::pair<BufferInfo, u64>>(
state, pos,
[brx_tracked_register](auto instr, const auto& opcode) {
return opcode.GetId() == OpCode::Id::LD_C &&
instr.gpr0.Value() == brx_tracked_register &&
instr.ld_c.type.Value() == Tegra::Shader::UniformType::Single;
},
[](auto instr, const auto& opcode) {
const BufferInfo info = {static_cast<u32>(instr.cbuf36.index.Value()),
static_cast<u32>(instr.cbuf36.GetOffset())};
return std::make_pair(info, instr.gpr8.Value());
});
}
std::optional<u64> TrackSHLRegister(const CFGRebuildState& state, u32& pos,
u64 ldc_tracked_register) {
return TrackInstruction<u64>(
state, pos,
[ldc_tracked_register](auto instr, const auto& opcode) {
return opcode.GetId() == OpCode::Id::SHL_IMM &&
instr.gpr0.Value() == ldc_tracked_register;
},
[](auto instr, const auto&) { return instr.gpr8.Value(); });
}
std::optional<u32> TrackIMNMXValue(const CFGRebuildState& state, u32& pos,
u64 shl_tracked_register) {
return TrackInstruction<u32>(
state, pos,
[shl_tracked_register](auto instr, const auto& opcode) {
return opcode.GetId() == OpCode::Id::IMNMX_IMM &&
instr.gpr0.Value() == shl_tracked_register;
},
[](auto instr, const auto&) {
return static_cast<u32>(instr.alu.GetSignedImm20_20() + 1);
});
}
std::optional<BranchIndirectInfo> TrackBranchIndirectInfo(const CFGRebuildState& state, u32 pos) {
const auto brx_info = GetBRXInfo(state, pos);
if (!brx_info) {
return std::nullopt;
}
const auto [relative_position, brx_tracked_register] = *brx_info;
const auto ldc_info = TrackLDC(state, pos, brx_tracked_register);
if (!ldc_info) {
return std::nullopt;
}
const auto [buffer_info, ldc_tracked_register] = *ldc_info;
const auto shl_tracked_register = TrackSHLRegister(state, pos, ldc_tracked_register);
if (!shl_tracked_register) {
return std::nullopt;
}
const auto entries = TrackIMNMXValue(state, pos, *shl_tracked_register);
if (!entries) {
return std::nullopt;
}
return BranchIndirectInfo{buffer_info.index, buffer_info.offset, *entries, relative_position};
}
std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) {
u32 offset = static_cast<u32>(address);
const u32 end_address = static_cast<u32>(state.program_code.size());
ParseInfo parse_info{};
SingleBranch single_branch{};
const auto insert_label = [](CFGRebuildState& rebuild_state, u32 label_address) {
const auto pair = rebuild_state.labels.emplace(label_address);
if (pair.second) {
rebuild_state.inspect_queries.push_back(label_address);
}
};
while (true) {
if (offset >= end_address) {
// ASSERT_OR_EXECUTE can't be used, as it ignores the break
ASSERT_MSG(false, "Shader passed the current limit!");
single_branch.address = exit_branch;
single_branch.ignore = false;
break;
}
if (state.registered.contains(offset)) {
single_branch.address = offset;
single_branch.ignore = true;
break;
}
if (IsSchedInstruction(offset, state.start)) {
offset++;
continue;
}
const Instruction instr = {state.program_code[offset]};
const auto opcode = OpCode::Decode(instr);
if (!opcode || opcode->get().GetType() != OpCode::Type::Flow) {
offset++;
continue;
}
switch (opcode->get().GetId()) {
case OpCode::Id::EXIT: {
const auto pred_index = static_cast<u32>(instr.pred.pred_index);
single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0);
if (single_branch.condition.predicate == Pred::NeverExecute) {
offset++;
continue;
}
const ConditionCode cc = instr.flow_condition_code;
single_branch.condition.cc = cc;
if (cc == ConditionCode::F) {
offset++;
continue;
}
single_branch.address = exit_branch;
single_branch.kill = false;
single_branch.is_sync = false;
single_branch.is_brk = false;
single_branch.ignore = false;
parse_info.end_address = offset;
parse_info.branch_info = MakeBranchInfo<SingleBranch>(
single_branch.condition, single_branch.address, single_branch.kill,
single_branch.is_sync, single_branch.is_brk, single_branch.ignore);
return {ParseResult::ControlCaught, parse_info};
}
case OpCode::Id::BRA: {
if (instr.bra.constant_buffer != 0) {
return {ParseResult::AbnormalFlow, parse_info};
}
const auto pred_index = static_cast<u32>(instr.pred.pred_index);
single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0);
if (single_branch.condition.predicate == Pred::NeverExecute) {
offset++;
continue;
}
const ConditionCode cc = instr.flow_condition_code;
single_branch.condition.cc = cc;
if (cc == ConditionCode::F) {
offset++;
continue;
}
const u32 branch_offset = offset + instr.bra.GetBranchTarget();
if (branch_offset == 0) {
single_branch.address = exit_branch;
} else {
single_branch.address = branch_offset;
}
insert_label(state, branch_offset);
single_branch.kill = false;
single_branch.is_sync = false;
single_branch.is_brk = false;
single_branch.ignore = false;
parse_info.end_address = offset;
parse_info.branch_info = MakeBranchInfo<SingleBranch>(
single_branch.condition, single_branch.address, single_branch.kill,
single_branch.is_sync, single_branch.is_brk, single_branch.ignore);
return {ParseResult::ControlCaught, parse_info};
}
case OpCode::Id::SYNC: {
const auto pred_index = static_cast<u32>(instr.pred.pred_index);
single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0);
if (single_branch.condition.predicate == Pred::NeverExecute) {
offset++;
continue;
}
const ConditionCode cc = instr.flow_condition_code;
single_branch.condition.cc = cc;
if (cc == ConditionCode::F) {
offset++;
continue;
}
single_branch.address = unassigned_branch;
single_branch.kill = false;
single_branch.is_sync = true;
single_branch.is_brk = false;
single_branch.ignore = false;
parse_info.end_address = offset;
parse_info.branch_info = MakeBranchInfo<SingleBranch>(
single_branch.condition, single_branch.address, single_branch.kill,
single_branch.is_sync, single_branch.is_brk, single_branch.ignore);
return {ParseResult::ControlCaught, parse_info};
}
case OpCode::Id::BRK: {
const auto pred_index = static_cast<u32>(instr.pred.pred_index);
single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0);
if (single_branch.condition.predicate == Pred::NeverExecute) {
offset++;
continue;
}
const ConditionCode cc = instr.flow_condition_code;
single_branch.condition.cc = cc;
if (cc == ConditionCode::F) {
offset++;
continue;
}
single_branch.address = unassigned_branch;
single_branch.kill = false;
single_branch.is_sync = false;
single_branch.is_brk = true;
single_branch.ignore = false;
parse_info.end_address = offset;
parse_info.branch_info = MakeBranchInfo<SingleBranch>(
single_branch.condition, single_branch.address, single_branch.kill,
single_branch.is_sync, single_branch.is_brk, single_branch.ignore);
return {ParseResult::ControlCaught, parse_info};
}
case OpCode::Id::KIL: {
const auto pred_index = static_cast<u32>(instr.pred.pred_index);
single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0);
if (single_branch.condition.predicate == Pred::NeverExecute) {
offset++;
continue;
}
const ConditionCode cc = instr.flow_condition_code;
single_branch.condition.cc = cc;
if (cc == ConditionCode::F) {
offset++;
continue;
}
single_branch.address = exit_branch;
single_branch.kill = true;
single_branch.is_sync = false;
single_branch.is_brk = false;
single_branch.ignore = false;
parse_info.end_address = offset;
parse_info.branch_info = MakeBranchInfo<SingleBranch>(
single_branch.condition, single_branch.address, single_branch.kill,
single_branch.is_sync, single_branch.is_brk, single_branch.ignore);
return {ParseResult::ControlCaught, parse_info};
}
case OpCode::Id::SSY: {
const u32 target = offset + instr.bra.GetBranchTarget();
insert_label(state, target);
state.ssy_labels.emplace(offset, target);
break;
}
case OpCode::Id::PBK: {
const u32 target = offset + instr.bra.GetBranchTarget();
insert_label(state, target);
state.pbk_labels.emplace(offset, target);
break;
}
case OpCode::Id::BRX: {
const auto tmp = TrackBranchIndirectInfo(state, offset);
if (!tmp) {
LOG_WARNING(HW_GPU, "BRX Track Unsuccesful");
return {ParseResult::AbnormalFlow, parse_info};
}
const auto result = *tmp;
const s32 pc_target = offset + result.relative_position;
std::vector<CaseBranch> branches;
for (u32 i = 0; i < result.entries; i++) {
auto key = state.registry.ObtainKey(result.buffer, result.offset + i * 4);
if (!key) {
return {ParseResult::AbnormalFlow, parse_info};
}
u32 value = *key;
u32 target = static_cast<u32>((value >> 3) + pc_target);
insert_label(state, target);
branches.emplace_back(value, target);
}
parse_info.end_address = offset;
parse_info.branch_info = MakeBranchInfo<MultiBranch>(
static_cast<u32>(instr.gpr8.Value()), std::move(branches));
return {ParseResult::ControlCaught, parse_info};
}
default:
break;
}
offset++;
}
single_branch.kill = false;
single_branch.is_sync = false;
single_branch.is_brk = false;
parse_info.end_address = offset - 1;
parse_info.branch_info = MakeBranchInfo<SingleBranch>(
single_branch.condition, single_branch.address, single_branch.kill, single_branch.is_sync,
single_branch.is_brk, single_branch.ignore);
return {ParseResult::BlockEnd, parse_info};
}
bool TryInspectAddress(CFGRebuildState& state) {
if (state.inspect_queries.empty()) {
return false;
}
const u32 address = state.inspect_queries.front();
state.inspect_queries.pop_front();
const auto [result, block_index] = TryGetBlock(state, address);
switch (result) {
case BlockCollision::Found: {
return true;
}
case BlockCollision::Inside: {
// This case is the tricky one:
// We need to split the block into 2 separate blocks
const u32 end = state.block_info[block_index].end;
BlockInfo& new_block = CreateBlockInfo(state, address, end);
BlockInfo& current_block = state.block_info[block_index];
current_block.end = address - 1;
new_block.branch = std::move(current_block.branch);
BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>();
const auto branch = std::get_if<SingleBranch>(forward_branch.get());
branch->address = address;
branch->ignore = true;
current_block.branch = std::move(forward_branch);
return true;
}
default:
break;
}
const auto [parse_result, parse_info] = ParseCode(state, address);
if (parse_result == ParseResult::AbnormalFlow) {
// if it's AbnormalFlow, we end it as false, ending the CFG reconstruction
return false;
}
BlockInfo& block_info = CreateBlockInfo(state, address, parse_info.end_address);
block_info.branch = parse_info.branch_info;
if (std::holds_alternative<SingleBranch>(*block_info.branch)) {
const auto branch = std::get_if<SingleBranch>(block_info.branch.get());
if (branch->condition.IsUnconditional()) {
return true;
}
const u32 fallthrough_address = parse_info.end_address + 1;
state.inspect_queries.push_front(fallthrough_address);
return true;
}
return true;
}
bool TryQuery(CFGRebuildState& state) {
const auto gather_labels = [](std::stack<u32>& cc, std::map<u32, u32>& labels,
BlockInfo& block) {
auto gather_start = labels.lower_bound(block.start);
const auto gather_end = labels.upper_bound(block.end);
while (gather_start != gather_end) {
cc.push(gather_start->second);
++gather_start;
}
};
if (state.queries.empty()) {
return false;
}
Query& q = state.queries.front();
const u32 block_index = state.registered[q.address];
BlockInfo& block = state.block_info[block_index];
// If the block is visited, check if the stacks match, else gather the ssy/pbk
// labels into the current stack and look if the branch at the end of the block
// consumes a label. Schedule new queries accordingly
if (block.visited) {
BlockStack& stack = state.stacks[q.address];
const bool all_okay = (stack.ssy_stack.empty() || q.ssy_stack == stack.ssy_stack) &&
(stack.pbk_stack.empty() || q.pbk_stack == stack.pbk_stack);
state.queries.pop_front();
return all_okay;
}
block.visited = true;
state.stacks.insert_or_assign(q.address, BlockStack{q});
Query q2(q);
state.queries.pop_front();
gather_labels(q2.ssy_stack, state.ssy_labels, block);
gather_labels(q2.pbk_stack, state.pbk_labels, block);
if (std::holds_alternative<SingleBranch>(*block.branch)) {
auto* branch = std::get_if<SingleBranch>(block.branch.get());
if (!branch->condition.IsUnconditional()) {
q2.address = block.end + 1;
state.queries.push_back(q2);
}
auto& conditional_query = state.queries.emplace_back(q2);
if (branch->is_sync) {
if (branch->address == unassigned_branch) {
branch->address = conditional_query.ssy_stack.top();
}
conditional_query.ssy_stack.pop();
}
if (branch->is_brk) {
if (branch->address == unassigned_branch) {
branch->address = conditional_query.pbk_stack.top();
}
conditional_query.pbk_stack.pop();
}
conditional_query.address = branch->address;
return true;
}
const auto* multi_branch = std::get_if<MultiBranch>(block.branch.get());
for (const auto& branch_case : multi_branch->branches) {
auto& conditional_query = state.queries.emplace_back(q2);
conditional_query.address = branch_case.address;
}
return true;
}
void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch_info) {
const auto get_expr = [](const Condition& cond) -> Expr {
Expr result;
if (cond.cc != ConditionCode::T) {
result = MakeExpr<ExprCondCode>(cond.cc);
}
if (cond.predicate != Pred::UnusedIndex) {
u32 pred = static_cast<u32>(cond.predicate);
bool negate = false;
if (pred > 7) {
negate = true;
pred -= 8;
}
Expr extra = MakeExpr<ExprPredicate>(pred);
if (negate) {
extra = MakeExpr<ExprNot>(std::move(extra));
}
if (result) {
return MakeExpr<ExprAnd>(std::move(extra), std::move(result));
}
return extra;
}
if (result) {
return result;
}
return MakeExpr<ExprBoolean>(true);
};
if (std::holds_alternative<SingleBranch>(*branch_info)) {
const auto* branch = std::get_if<SingleBranch>(branch_info.get());
if (branch->address < 0) {
if (branch->kill) {
mm.InsertReturn(get_expr(branch->condition), true);
return;
}
mm.InsertReturn(get_expr(branch->condition), false);
return;
}
mm.InsertGoto(get_expr(branch->condition), branch->address);
return;
}
const auto* multi_branch = std::get_if<MultiBranch>(branch_info.get());
for (const auto& branch_case : multi_branch->branches) {
mm.InsertGoto(MakeExpr<ExprGprEqual>(multi_branch->gpr, branch_case.cmp_value),
branch_case.address);
}
}
void DecompileShader(CFGRebuildState& state) {
state.manager->Init();
for (auto label : state.labels) {
state.manager->DeclareLabel(label);
}
for (const auto& block : state.block_info) {
if (state.labels.contains(block.start)) {
state.manager->InsertLabel(block.start);
}
const bool ignore = BlockBranchIsIgnored(block.branch);
const u32 end = ignore ? block.end + 1 : block.end;
state.manager->InsertBlock(block.start, end);
if (!ignore) {
InsertBranch(*state.manager, block.branch);
}
}
state.manager->Decompile();
}
} // Anonymous namespace
std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address,
const CompilerSettings& settings,
Registry& registry) {
auto result_out = std::make_unique<ShaderCharacteristics>();
if (settings.depth == CompileDepth::BruteForce) {
result_out->settings.depth = CompileDepth::BruteForce;
return result_out;
}
CFGRebuildState state{program_code, start_address, registry};
// Inspect Code and generate blocks
state.labels.clear();
state.labels.emplace(start_address);
state.inspect_queries.push_back(state.start);
while (!state.inspect_queries.empty()) {
if (!TryInspectAddress(state)) {
result_out->settings.depth = CompileDepth::BruteForce;
return result_out;
}
}
bool use_flow_stack = true;
bool decompiled = false;
if (settings.depth != CompileDepth::FlowStack) {
// Decompile Stacks
state.queries.push_back(Query{state.start, {}, {}});
decompiled = true;
while (!state.queries.empty()) {
if (!TryQuery(state)) {
decompiled = false;
break;
}
}
}
use_flow_stack = !decompiled;
// Sort and organize results
std::sort(state.block_info.begin(), state.block_info.end(),
[](const BlockInfo& a, const BlockInfo& b) -> bool { return a.start < b.start; });
if (decompiled && settings.depth != CompileDepth::NoFlowStack) {
ASTManager manager{settings.depth != CompileDepth::DecompileBackwards,
settings.disable_else_derivation};
state.manager = &manager;
DecompileShader(state);
decompiled = state.manager->IsFullyDecompiled();
if (!decompiled) {
if (settings.depth == CompileDepth::FullDecompile) {
LOG_CRITICAL(HW_GPU, "Failed to remove all the gotos!:");
} else {
LOG_CRITICAL(HW_GPU, "Failed to remove all backward gotos!:");
}
state.manager->ShowCurrentState("Of Shader");
state.manager->Clear();
} else {
auto characteristics = std::make_unique<ShaderCharacteristics>();
characteristics->start = start_address;
characteristics->settings.depth = settings.depth;
characteristics->manager = std::move(manager);
characteristics->end = state.block_info.back().end + 1;
return characteristics;
}
}
result_out->start = start_address;
result_out->settings.depth =
use_flow_stack ? CompileDepth::FlowStack : CompileDepth::NoFlowStack;
result_out->blocks.clear();
for (auto& block : state.block_info) {
ShaderBlock new_block{};
new_block.start = block.start;
new_block.end = block.end;
new_block.ignore_branch = BlockBranchIsIgnored(block.branch);
if (!new_block.ignore_branch) {
new_block.branch = block.branch;
}
result_out->end = std::max(result_out->end, block.end);
result_out->blocks.push_back(new_block);
}
if (!use_flow_stack) {
result_out->labels = std::move(state.labels);
return result_out;
}
auto back = result_out->blocks.begin();
auto next = std::next(back);
while (next != result_out->blocks.end()) {
if (!state.labels.contains(next->start) && next->start == back->end + 1) {
back->end = next->end;
next = result_out->blocks.erase(next);
continue;
}
back = next;
++next;
}
return result_out;
}
} // namespace VideoCommon::Shader

View File

@ -1,117 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <list>
#include <optional>
#include <set>
#include <variant>
#include "video_core/engines/shader_bytecode.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/compiler_settings.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace VideoCommon::Shader {
using Tegra::Shader::ConditionCode;
using Tegra::Shader::Pred;
constexpr s32 exit_branch = -1;
struct Condition {
Pred predicate{Pred::UnusedIndex};
ConditionCode cc{ConditionCode::T};
bool IsUnconditional() const {
return predicate == Pred::UnusedIndex && cc == ConditionCode::T;
}
bool operator==(const Condition& other) const {
return std::tie(predicate, cc) == std::tie(other.predicate, other.cc);
}
bool operator!=(const Condition& other) const {
return !operator==(other);
}
};
class SingleBranch {
public:
SingleBranch() = default;
explicit SingleBranch(Condition condition_, s32 address_, bool kill_, bool is_sync_,
bool is_brk_, bool ignore_)
: condition{condition_}, address{address_}, kill{kill_}, is_sync{is_sync_}, is_brk{is_brk_},
ignore{ignore_} {}
bool operator==(const SingleBranch& b) const {
return std::tie(condition, address, kill, is_sync, is_brk, ignore) ==
std::tie(b.condition, b.address, b.kill, b.is_sync, b.is_brk, b.ignore);
}
bool operator!=(const SingleBranch& b) const {
return !operator==(b);
}
Condition condition{};
s32 address{exit_branch};
bool kill{};
bool is_sync{};
bool is_brk{};
bool ignore{};
};
struct CaseBranch {
explicit CaseBranch(u32 cmp_value_, u32 address_) : cmp_value{cmp_value_}, address{address_} {}
u32 cmp_value;
u32 address;
};
class MultiBranch {
public:
explicit MultiBranch(u32 gpr_, std::vector<CaseBranch>&& branches_)
: gpr{gpr_}, branches{std::move(branches_)} {}
u32 gpr{};
std::vector<CaseBranch> branches{};
};
using BranchData = std::variant<SingleBranch, MultiBranch>;
using BlockBranchInfo = std::shared_ptr<BranchData>;
bool BlockBranchInfoAreEqual(BlockBranchInfo first, BlockBranchInfo second);
struct ShaderBlock {
u32 start{};
u32 end{};
bool ignore_branch{};
BlockBranchInfo branch{};
bool operator==(const ShaderBlock& sb) const {
return std::tie(start, end, ignore_branch) ==
std::tie(sb.start, sb.end, sb.ignore_branch) &&
BlockBranchInfoAreEqual(branch, sb.branch);
}
bool operator!=(const ShaderBlock& sb) const {
return !operator==(sb);
}
};
struct ShaderCharacteristics {
std::list<ShaderBlock> blocks{};
std::set<u32> labels{};
u32 start{};
u32 end{};
ASTManager manager{true, true};
CompilerSettings settings{};
};
std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address,
const CompilerSettings& settings,
Registry& registry);
} // namespace VideoCommon::Shader

Some files were not shown because too many files have changed in this diff Show More