gl_shader_decompiler: Use uint for images and fix SUATOM

In the process remove implementation of SUATOM.MIN and SUATOM.MAX as
these require a distinction between U32 and S32. These have to be
implemented with imageCompSwap loop.
This commit is contained in:
ReinUsesLisp
2019-09-18 01:50:40 -03:00
parent 675f23aedc
commit 44000971e2
7 changed files with 93 additions and 188 deletions

View File

@ -276,16 +276,13 @@ private:
bool is_shadow);
/// Accesses an image.
Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type,
std::optional<Tegra::Shader::ImageAtomicSize> size = {});
Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
/// Access a bindless image sampler.
Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type,
std::optional<Tegra::Shader::ImageAtomicSize> size = {});
Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type);
/// Tries to access an existing image, updating it's state as needed
Image* TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type,
std::optional<Tegra::Shader::ImageAtomicSize> size);
Image* TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type);
/// Extracts a sequence of bits from a node
Node BitfieldExtract(Node value, u32 offset, u32 bits);