mirror of
https://github.com/yuzu-emu/yuzu-mainline
synced 2024-11-22 14:23:35 +00:00
gl_shader_cache: Rework shader cache and remove post-specializations
Instead of pre-specializing shaders and then post-specializing them, drop the later and only "specialize" the shader while decoding it.
This commit is contained in:
parent
22e825a3bc
commit
bd8b9bbcee
19 changed files with 548 additions and 1100 deletions
|
@ -57,8 +57,6 @@ set(HASH_FILES
|
|||
"${VIDEO_CORE}/renderer_opengl/gl_shader_decompiler.h"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.cpp"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.h"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/decode/arithmetic.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/arithmetic_half.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/arithmetic_half_immediate.cpp"
|
||||
|
|
|
@ -38,8 +38,6 @@ add_custom_command(OUTPUT scm_rev.cpp
|
|||
"${VIDEO_CORE}/renderer_opengl/gl_shader_decompiler.h"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.cpp"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.h"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/decode/arithmetic.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/arithmetic_half.cpp"
|
||||
"${VIDEO_CORE}/shader/decode/arithmetic_half_immediate.cpp"
|
||||
|
|
|
@ -65,8 +65,6 @@ add_library(video_core STATIC
|
|||
renderer_opengl/gl_shader_decompiler.h
|
||||
renderer_opengl/gl_shader_disk_cache.cpp
|
||||
renderer_opengl/gl_shader_disk_cache.h
|
||||
renderer_opengl/gl_shader_gen.cpp
|
||||
renderer_opengl/gl_shader_gen.h
|
||||
renderer_opengl/gl_shader_manager.cpp
|
||||
renderer_opengl/gl_shader_manager.h
|
||||
renderer_opengl/gl_shader_util.cpp
|
||||
|
|
|
@ -4,13 +4,15 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/guest_driver.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
void GuestDriverProfile::DeduceTextureHandlerSize(std::vector<u32>&& bound_offsets) {
|
||||
if (texture_handler_size_deduced) {
|
||||
void GuestDriverProfile::DeduceTextureHandlerSize(std::vector<u32> bound_offsets) {
|
||||
if (texture_handler_size) {
|
||||
return;
|
||||
}
|
||||
const std::size_t size = bound_offsets.size();
|
||||
|
@ -29,7 +31,6 @@ void GuestDriverProfile::DeduceTextureHandlerSize(std::vector<u32>&& bound_offse
|
|||
if (min_val > 2) {
|
||||
return;
|
||||
}
|
||||
texture_handler_size_deduced = true;
|
||||
texture_handler_size = min_texture_handler_size * min_val;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
@ -17,25 +18,29 @@ namespace VideoCore {
|
|||
*/
|
||||
class GuestDriverProfile {
|
||||
public:
|
||||
void DeduceTextureHandlerSize(std::vector<u32>&& bound_offsets);
|
||||
explicit GuestDriverProfile() = default;
|
||||
explicit GuestDriverProfile(std::optional<u32> texture_handler_size)
|
||||
: texture_handler_size{texture_handler_size} {}
|
||||
|
||||
void DeduceTextureHandlerSize(std::vector<u32> bound_offsets);
|
||||
|
||||
u32 GetTextureHandlerSize() const {
|
||||
return texture_handler_size;
|
||||
return texture_handler_size.value_or(default_texture_handler_size);
|
||||
}
|
||||
|
||||
bool TextureHandlerSizeKnown() const {
|
||||
return texture_handler_size_deduced;
|
||||
bool IsTextureHandlerSizeKnown() const {
|
||||
return texture_handler_size.has_value();
|
||||
}
|
||||
|
||||
private:
|
||||
// Minimum size of texture handler any driver can use.
|
||||
static constexpr u32 min_texture_handler_size = 4;
|
||||
// This goes with Vulkan and OpenGL standards but Nvidia GPUs can easily
|
||||
// use 4 bytes instead. Thus, certain drivers may squish the size.
|
||||
|
||||
// This goes with Vulkan and OpenGL standards but Nvidia GPUs can easily use 4 bytes instead.
|
||||
// Thus, certain drivers may squish the size.
|
||||
static constexpr u32 default_texture_handler_size = 8;
|
||||
|
||||
u32 texture_handler_size = default_texture_handler_size;
|
||||
bool texture_handler_size_deduced = false;
|
||||
std::optional<u32> texture_handler_size = default_texture_handler_size;
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "video_core/renderer_opengl/gl_query_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_rasterizer.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||
#include "video_core/renderer_opengl/maxwell_to_gl.h"
|
||||
#include "video_core/renderer_opengl/renderer_opengl.h"
|
||||
|
||||
|
@ -76,7 +75,7 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
|
|||
}
|
||||
|
||||
std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer,
|
||||
const GLShader::ConstBufferEntry& entry) {
|
||||
const ConstBufferEntry& entry) {
|
||||
if (!entry.IsIndirect()) {
|
||||
return entry.GetSize();
|
||||
}
|
||||
|
@ -272,9 +271,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
|
|||
SetupDrawTextures(stage, shader);
|
||||
SetupDrawImages(stage, shader);
|
||||
|
||||
const ProgramVariant variant(primitive_mode);
|
||||
const auto program_handle = shader->GetHandle(variant);
|
||||
|
||||
const GLuint program_handle = shader->GetHandle();
|
||||
switch (program) {
|
||||
case Maxwell::ShaderProgram::VertexA:
|
||||
case Maxwell::ShaderProgram::VertexB:
|
||||
|
@ -295,7 +292,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
|
|||
// When a clip distance is enabled but not set in the shader it crops parts of the screen
|
||||
// (sometimes it's half the screen, sometimes three quarters). To avoid this, enable the
|
||||
// clip distances only when it's written by a shader stage.
|
||||
clip_distances |= shader->GetShaderEntries().clip_distances;
|
||||
clip_distances |= shader->GetEntries().clip_distances;
|
||||
|
||||
// When VertexA is enabled, we have dual vertex shaders
|
||||
if (program == Maxwell::ShaderProgram::VertexA) {
|
||||
|
@ -622,13 +619,7 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
|
|||
auto kernel = shader_cache.GetComputeKernel(code_addr);
|
||||
SetupComputeTextures(kernel);
|
||||
SetupComputeImages(kernel);
|
||||
|
||||
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
|
||||
const ProgramVariant variant(launch_desc.block_dim_x, launch_desc.block_dim_y,
|
||||
launch_desc.block_dim_z, launch_desc.shared_alloc,
|
||||
launch_desc.local_pos_alloc);
|
||||
glUseProgramStages(program_manager.GetHandle(), GL_COMPUTE_SHADER_BIT,
|
||||
kernel->GetHandle(variant));
|
||||
glUseProgramStages(program_manager.GetHandle(), GL_COMPUTE_SHADER_BIT, kernel->GetHandle());
|
||||
|
||||
const std::size_t buffer_size =
|
||||
Tegra::Engines::KeplerCompute::NumConstBuffers *
|
||||
|
@ -646,6 +637,7 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
|
|||
bind_ubo_pushbuffer.Bind();
|
||||
bind_ssbo_pushbuffer.Bind();
|
||||
|
||||
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
|
||||
glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z);
|
||||
++num_queued_commands;
|
||||
}
|
||||
|
@ -750,7 +742,7 @@ void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, const Shad
|
|||
const auto& shader_stage = stages[stage_index];
|
||||
|
||||
u32 binding = device.GetBaseBindings(stage_index).uniform_buffer;
|
||||
for (const auto& entry : shader->GetShaderEntries().const_buffers) {
|
||||
for (const auto& entry : shader->GetEntries().const_buffers) {
|
||||
const auto& buffer = shader_stage.const_buffers[entry.GetIndex()];
|
||||
SetupConstBuffer(binding++, buffer, entry);
|
||||
}
|
||||
|
@ -761,7 +753,7 @@ void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) {
|
|||
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
|
||||
|
||||
u32 binding = 0;
|
||||
for (const auto& entry : kernel->GetShaderEntries().const_buffers) {
|
||||
for (const auto& entry : kernel->GetEntries().const_buffers) {
|
||||
const auto& config = launch_desc.const_buffer_config[entry.GetIndex()];
|
||||
const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value();
|
||||
Tegra::Engines::ConstBufferInfo buffer;
|
||||
|
@ -773,7 +765,7 @@ void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) {
|
|||
}
|
||||
|
||||
void RasterizerOpenGL::SetupConstBuffer(u32 binding, const Tegra::Engines::ConstBufferInfo& buffer,
|
||||
const GLShader::ConstBufferEntry& entry) {
|
||||
const ConstBufferEntry& entry) {
|
||||
if (!buffer.enabled) {
|
||||
// Set values to zero to unbind buffers
|
||||
bind_ubo_pushbuffer.Push(binding, buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
|
||||
|
@ -797,7 +789,7 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
|
|||
const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage_index]};
|
||||
|
||||
u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer;
|
||||
for (const auto& entry : shader->GetShaderEntries().global_memory_entries) {
|
||||
for (const auto& entry : shader->GetEntries().global_memory_entries) {
|
||||
const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()};
|
||||
const auto gpu_addr{memory_manager.Read<u64>(addr)};
|
||||
const auto size{memory_manager.Read<u32>(addr + 8)};
|
||||
|
@ -811,7 +803,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
|
|||
const auto cbufs{gpu.KeplerCompute().launch_description.const_buffer_config};
|
||||
|
||||
u32 binding = 0;
|
||||
for (const auto& entry : kernel->GetShaderEntries().global_memory_entries) {
|
||||
for (const auto& entry : kernel->GetEntries().global_memory_entries) {
|
||||
const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
|
||||
const auto gpu_addr{memory_manager.Read<u64>(addr)};
|
||||
const auto size{memory_manager.Read<u32>(addr + 8)};
|
||||
|
@ -819,7 +811,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
|
|||
}
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GLShader::GlobalMemoryEntry& entry,
|
||||
void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& entry,
|
||||
GPUVAddr gpu_addr, std::size_t size) {
|
||||
const auto alignment{device.GetShaderStorageBufferAlignment()};
|
||||
const auto [ssbo, buffer_offset] =
|
||||
|
@ -831,7 +823,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
|
|||
MICROPROFILE_SCOPE(OpenGL_Texture);
|
||||
const auto& maxwell3d = system.GPU().Maxwell3D();
|
||||
u32 binding = device.GetBaseBindings(stage_index).sampler;
|
||||
for (const auto& entry : shader->GetShaderEntries().samplers) {
|
||||
for (const auto& entry : shader->GetEntries().samplers) {
|
||||
const auto shader_type = static_cast<ShaderType>(stage_index);
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i);
|
||||
|
@ -844,7 +836,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
|
|||
MICROPROFILE_SCOPE(OpenGL_Texture);
|
||||
const auto& compute = system.GPU().KeplerCompute();
|
||||
u32 binding = 0;
|
||||
for (const auto& entry : kernel->GetShaderEntries().samplers) {
|
||||
for (const auto& entry : kernel->GetEntries().samplers) {
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i);
|
||||
SetupTexture(binding++, texture, entry);
|
||||
|
@ -853,7 +845,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
|
|||
}
|
||||
|
||||
void RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture,
|
||||
const GLShader::SamplerEntry& entry) {
|
||||
const SamplerEntry& entry) {
|
||||
const auto view = texture_cache.GetTextureSurface(texture.tic, entry);
|
||||
if (!view) {
|
||||
// Can occur when texture addr is null or its memory is unmapped/invalid
|
||||
|
@ -876,7 +868,7 @@ void RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextu
|
|||
void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& shader) {
|
||||
const auto& maxwell3d = system.GPU().Maxwell3D();
|
||||
u32 binding = device.GetBaseBindings(stage_index).image;
|
||||
for (const auto& entry : shader->GetShaderEntries().images) {
|
||||
for (const auto& entry : shader->GetEntries().images) {
|
||||
const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage_index);
|
||||
const auto tic = GetTextureInfo(maxwell3d, entry, shader_type).tic;
|
||||
SetupImage(binding++, tic, entry);
|
||||
|
@ -886,14 +878,14 @@ void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& sh
|
|||
void RasterizerOpenGL::SetupComputeImages(const Shader& shader) {
|
||||
const auto& compute = system.GPU().KeplerCompute();
|
||||
u32 binding = 0;
|
||||
for (const auto& entry : shader->GetShaderEntries().images) {
|
||||
for (const auto& entry : shader->GetEntries().images) {
|
||||
const auto tic = GetTextureInfo(compute, entry, Tegra::Engines::ShaderType::Compute).tic;
|
||||
SetupImage(binding++, tic, entry);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic,
|
||||
const GLShader::ImageEntry& entry) {
|
||||
const ImageEntry& entry) {
|
||||
const auto view = texture_cache.GetImageSurface(tic, entry);
|
||||
if (!view) {
|
||||
glBindImageTexture(binding, 0, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R8);
|
||||
|
|
|
@ -98,7 +98,7 @@ private:
|
|||
|
||||
/// Configures a constant buffer.
|
||||
void SetupConstBuffer(u32 binding, const Tegra::Engines::ConstBufferInfo& buffer,
|
||||
const GLShader::ConstBufferEntry& entry);
|
||||
const ConstBufferEntry& entry);
|
||||
|
||||
/// Configures the current global memory entries to use for the draw command.
|
||||
void SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader);
|
||||
|
@ -107,7 +107,7 @@ private:
|
|||
void SetupComputeGlobalMemory(const Shader& kernel);
|
||||
|
||||
/// Configures a constant buffer.
|
||||
void SetupGlobalMemory(u32 binding, const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr,
|
||||
void SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& entry, GPUVAddr gpu_addr,
|
||||
std::size_t size);
|
||||
|
||||
/// Configures the current textures to use for the draw command.
|
||||
|
@ -118,7 +118,7 @@ private:
|
|||
|
||||
/// Configures a texture.
|
||||
void SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture,
|
||||
const GLShader::SamplerEntry& entry);
|
||||
const SamplerEntry& entry);
|
||||
|
||||
/// Configures images in a graphics shader.
|
||||
void SetupDrawImages(std::size_t stage_index, const Shader& shader);
|
||||
|
@ -127,8 +127,7 @@ private:
|
|||
void SetupComputeImages(const Shader& shader);
|
||||
|
||||
/// Configures an image.
|
||||
void SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic,
|
||||
const GLShader::ImageEntry& entry);
|
||||
void SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic, const ImageEntry& entry);
|
||||
|
||||
/// Syncs the viewport and depth range to match the guest state
|
||||
void SyncViewport();
|
||||
|
|
|
@ -2,12 +2,16 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <boost/functional/hash.hpp>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
|
@ -56,7 +60,7 @@ constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
|
|||
}
|
||||
|
||||
/// Calculates the size of a program stream
|
||||
std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
|
||||
std::size_t CalculateProgramSize(const ProgramCode& program) {
|
||||
constexpr std::size_t start_offset = 10;
|
||||
// This is the encoded version of BRA that jumps to itself. All Nvidia
|
||||
// shaders end with one.
|
||||
|
@ -109,32 +113,9 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) {
|
|||
}
|
||||
}
|
||||
|
||||
/// Describes primitive behavior on geometry shaders
|
||||
constexpr std::pair<const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) {
|
||||
switch (primitive_mode) {
|
||||
case GL_POINTS:
|
||||
return {"points", 1};
|
||||
case GL_LINES:
|
||||
case GL_LINE_STRIP:
|
||||
return {"lines", 2};
|
||||
case GL_LINES_ADJACENCY:
|
||||
case GL_LINE_STRIP_ADJACENCY:
|
||||
return {"lines_adjacency", 4};
|
||||
case GL_TRIANGLES:
|
||||
case GL_TRIANGLE_STRIP:
|
||||
case GL_TRIANGLE_FAN:
|
||||
return {"triangles", 3};
|
||||
case GL_TRIANGLES_ADJACENCY:
|
||||
case GL_TRIANGLE_STRIP_ADJACENCY:
|
||||
return {"triangles_adjacency", 6};
|
||||
default:
|
||||
return {"points", 1};
|
||||
}
|
||||
}
|
||||
|
||||
/// Hashes one (or two) program streams
|
||||
u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code,
|
||||
const ProgramCode& code_b) {
|
||||
const ProgramCode& code_b = {}) {
|
||||
u64 unique_identifier = boost::hash_value(code);
|
||||
if (is_a) {
|
||||
// VertexA programs include two programs
|
||||
|
@ -143,24 +124,6 @@ u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& co
|
|||
return unique_identifier;
|
||||
}
|
||||
|
||||
/// Creates an unspecialized program from code streams
|
||||
std::string GenerateGLSL(const Device& device, ShaderType shader_type, const ShaderIR& ir,
|
||||
const std::optional<ShaderIR>& ir_b) {
|
||||
switch (shader_type) {
|
||||
case ShaderType::Vertex:
|
||||
return GLShader::GenerateVertexShader(device, ir, ir_b ? &*ir_b : nullptr);
|
||||
case ShaderType::Geometry:
|
||||
return GLShader::GenerateGeometryShader(device, ir);
|
||||
case ShaderType::Fragment:
|
||||
return GLShader::GenerateFragmentShader(device, ir);
|
||||
case ShaderType::Compute:
|
||||
return GLShader::GenerateComputeShader(device, ir);
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented shader_type={}", static_cast<u32>(shader_type));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
constexpr const char* GetShaderTypeName(ShaderType shader_type) {
|
||||
switch (shader_type) {
|
||||
case ShaderType::Vertex:
|
||||
|
@ -196,102 +159,35 @@ constexpr ShaderType GetShaderType(Maxwell::ShaderProgram program_type) {
|
|||
return {};
|
||||
}
|
||||
|
||||
std::string GetShaderId(u64 unique_identifier, ShaderType shader_type) {
|
||||
std::string MakeShaderID(u64 unique_identifier, ShaderType shader_type) {
|
||||
return fmt::format("{}{:016X}", GetShaderTypeName(shader_type), unique_identifier);
|
||||
}
|
||||
|
||||
Tegra::Engines::ConstBufferEngineInterface& GetConstBufferEngineInterface(Core::System& system,
|
||||
ShaderType shader_type) {
|
||||
if (shader_type == ShaderType::Compute) {
|
||||
return system.GPU().KeplerCompute();
|
||||
} else {
|
||||
return system.GPU().Maxwell3D();
|
||||
std::shared_ptr<ConstBufferLocker> MakeLocker(const ShaderDiskCacheEntry& entry) {
|
||||
const VideoCore::GuestDriverProfile guest_profile{entry.texture_handler_size};
|
||||
auto locker = std::make_shared<ConstBufferLocker>(entry.type, guest_profile);
|
||||
locker->SetBoundBuffer(entry.bound_buffer);
|
||||
for (const auto& [address, value] : entry.keys) {
|
||||
const auto [buffer, offset] = address;
|
||||
locker->InsertKey(buffer, offset, value);
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<ConstBufferLocker> MakeLocker(Core::System& system, ShaderType shader_type) {
|
||||
return std::make_unique<ConstBufferLocker>(shader_type,
|
||||
GetConstBufferEngineInterface(system, shader_type));
|
||||
}
|
||||
|
||||
void FillLocker(ConstBufferLocker& locker, const ShaderDiskCacheUsage& usage) {
|
||||
locker.SetBoundBuffer(usage.bound_buffer);
|
||||
for (const auto& key : usage.keys) {
|
||||
const auto [buffer, offset] = key.first;
|
||||
locker.InsertKey(buffer, offset, key.second);
|
||||
for (const auto& [offset, sampler] : entry.bound_samplers) {
|
||||
locker->InsertBoundSampler(offset, sampler);
|
||||
}
|
||||
for (const auto& [offset, sampler] : usage.bound_samplers) {
|
||||
locker.InsertBoundSampler(offset, sampler);
|
||||
}
|
||||
for (const auto& [key, sampler] : usage.bindless_samplers) {
|
||||
for (const auto& [key, sampler] : entry.bindless_samplers) {
|
||||
const auto [buffer, offset] = key;
|
||||
locker.InsertBindlessSampler(buffer, offset, sampler);
|
||||
locker->InsertBindlessSampler(buffer, offset, sampler);
|
||||
}
|
||||
return locker;
|
||||
}
|
||||
|
||||
CachedProgram BuildShader(const Device& device, u64 unique_identifier, ShaderType shader_type,
|
||||
const ProgramCode& code, const ProgramCode& code_b,
|
||||
ConstBufferLocker& locker, const ProgramVariant& variant,
|
||||
bool hint_retrievable = false) {
|
||||
LOG_INFO(Render_OpenGL, "called. {}", GetShaderId(unique_identifier, shader_type));
|
||||
|
||||
const bool is_compute = shader_type == ShaderType::Compute;
|
||||
const u32 main_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
|
||||
const ShaderIR ir(code, main_offset, COMPILER_SETTINGS, locker);
|
||||
std::optional<ShaderIR> ir_b;
|
||||
if (!code_b.empty()) {
|
||||
ir_b.emplace(code_b, main_offset, COMPILER_SETTINGS, locker);
|
||||
}
|
||||
|
||||
std::string source = fmt::format(R"(// {}
|
||||
#version 430 core
|
||||
#extension GL_ARB_separate_shader_objects : enable
|
||||
)",
|
||||
GetShaderId(unique_identifier, shader_type));
|
||||
if (device.HasShaderBallot()) {
|
||||
source += "#extension GL_ARB_shader_ballot : require\n";
|
||||
}
|
||||
if (device.HasVertexViewportLayer()) {
|
||||
source += "#extension GL_ARB_shader_viewport_layer_array : require\n";
|
||||
}
|
||||
if (device.HasImageLoadFormatted()) {
|
||||
source += "#extension GL_EXT_shader_image_load_formatted : require\n";
|
||||
}
|
||||
if (device.HasWarpIntrinsics()) {
|
||||
source += "#extension GL_NV_gpu_shader5 : require\n"
|
||||
"#extension GL_NV_shader_thread_group : require\n"
|
||||
"#extension GL_NV_shader_thread_shuffle : require\n";
|
||||
}
|
||||
// This pragma stops Nvidia's driver from over optimizing math (probably using fp16 operations)
|
||||
// on places where we don't want to.
|
||||
// Thanks to Ryujinx for finding this workaround.
|
||||
source += "#pragma optionNV(fastmath off)\n";
|
||||
|
||||
if (shader_type == ShaderType::Geometry) {
|
||||
const auto [glsl_topology, max_vertices] = GetPrimitiveDescription(variant.primitive_mode);
|
||||
source += fmt::format("#define MAX_VERTEX_INPUT {}\n", max_vertices);
|
||||
source += fmt::format("layout ({}) in;\n", glsl_topology);
|
||||
}
|
||||
if (shader_type == ShaderType::Compute) {
|
||||
if (variant.local_memory_size > 0) {
|
||||
source += fmt::format("#define LOCAL_MEMORY_SIZE {}\n",
|
||||
Common::AlignUp(variant.local_memory_size, 4) / 4);
|
||||
}
|
||||
source +=
|
||||
fmt::format("layout (local_size_x = {}, local_size_y = {}, local_size_z = {}) in;\n",
|
||||
variant.block_x, variant.block_y, variant.block_z);
|
||||
|
||||
if (variant.shared_memory_size > 0) {
|
||||
// shared_memory_size is described in number of words
|
||||
source += fmt::format("shared uint smem[{}];\n", variant.shared_memory_size);
|
||||
}
|
||||
}
|
||||
|
||||
source += '\n';
|
||||
source += GenerateGLSL(device, shader_type, ir, ir_b);
|
||||
|
||||
std::shared_ptr<OGLProgram> BuildShader(const Device& device, ShaderType shader_type,
|
||||
u64 unique_identifier, const ShaderIR& ir,
|
||||
bool hint_retrievable = false) {
|
||||
LOG_INFO(Render_OpenGL, "{}", MakeShaderID(unique_identifier, shader_type));
|
||||
const std::string glsl = DecompileShader(device, ir, shader_type);
|
||||
OGLShader shader;
|
||||
shader.Create(source.c_str(), GetGLShaderType(shader_type));
|
||||
shader.Create(glsl.c_str(), GetGLShaderType(shader_type));
|
||||
|
||||
auto program = std::make_shared<OGLProgram>();
|
||||
program->Create(true, hint_retrievable, shader.handle);
|
||||
|
@ -299,7 +195,7 @@ CachedProgram BuildShader(const Device& device, u64 unique_identifier, ShaderTyp
|
|||
}
|
||||
|
||||
std::unordered_set<GLenum> GetSupportedFormats() {
|
||||
GLint num_formats{};
|
||||
GLint num_formats;
|
||||
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_formats);
|
||||
|
||||
std::vector<GLint> formats(num_formats);
|
||||
|
@ -314,115 +210,81 @@ std::unordered_set<GLenum> GetSupportedFormats() {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
CachedShader::CachedShader(const ShaderParameters& params, ShaderType shader_type,
|
||||
GLShader::ShaderEntries entries, ProgramCode code, ProgramCode code_b)
|
||||
: RasterizerCacheObject{params.host_ptr}, system{params.system},
|
||||
disk_cache{params.disk_cache}, device{params.device}, cpu_addr{params.cpu_addr},
|
||||
unique_identifier{params.unique_identifier}, shader_type{shader_type},
|
||||
entries{std::move(entries)}, code{std::move(code)}, code_b{std::move(code_b)} {
|
||||
if (!params.precompiled_variants) {
|
||||
return;
|
||||
}
|
||||
for (const auto& pair : *params.precompiled_variants) {
|
||||
auto locker = MakeLocker(system, shader_type);
|
||||
const auto& usage = pair->first;
|
||||
FillLocker(*locker, usage);
|
||||
CachedShader::CachedShader(const u8* host_ptr, VAddr cpu_addr, std::size_t size_in_bytes,
|
||||
std::shared_ptr<VideoCommon::Shader::ConstBufferLocker> locker,
|
||||
ShaderEntries entries, std::shared_ptr<OGLProgram> program)
|
||||
: RasterizerCacheObject{host_ptr}, locker{std::move(locker)}, entries{std::move(entries)},
|
||||
cpu_addr{cpu_addr}, size_in_bytes{size_in_bytes}, program{std::move(program)} {}
|
||||
|
||||
std::unique_ptr<LockerVariant>* locker_variant = nullptr;
|
||||
const auto it =
|
||||
std::find_if(locker_variants.begin(), locker_variants.end(), [&](const auto& variant) {
|
||||
return variant->locker->HasEqualKeys(*locker);
|
||||
});
|
||||
if (it == locker_variants.end()) {
|
||||
locker_variant = &locker_variants.emplace_back();
|
||||
*locker_variant = std::make_unique<LockerVariant>();
|
||||
locker_variant->get()->locker = std::move(locker);
|
||||
} else {
|
||||
locker_variant = &*it;
|
||||
}
|
||||
locker_variant->get()->programs.emplace(usage.variant, pair->second);
|
||||
CachedShader::~CachedShader() = default;
|
||||
|
||||
GLuint CachedShader::GetHandle() const {
|
||||
if (!locker->IsConsistent()) {
|
||||
std::abort();
|
||||
}
|
||||
return program->handle;
|
||||
}
|
||||
|
||||
Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
|
||||
Maxwell::ShaderProgram program_type, ProgramCode code,
|
||||
ProgramCode code_b) {
|
||||
const auto shader_type = GetShaderType(program_type);
|
||||
params.disk_cache.SaveRaw(
|
||||
ShaderDiskCacheRaw(params.unique_identifier, shader_type, code, code_b));
|
||||
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
||||
|
||||
ConstBufferLocker locker(shader_type, params.system.GPU().Maxwell3D());
|
||||
const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, locker);
|
||||
auto locker = std::make_shared<ConstBufferLocker>(shader_type, params.system.GPU().Maxwell3D());
|
||||
const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, *locker);
|
||||
// TODO(Rodrigo): Handle VertexA shaders
|
||||
// std::optional<ShaderIR> ir_b;
|
||||
// if (!code_b.empty()) {
|
||||
// ir_b.emplace(code_b, STAGE_MAIN_OFFSET);
|
||||
// }
|
||||
return std::shared_ptr<CachedShader>(new CachedShader(
|
||||
params, shader_type, GLShader::GetEntries(ir), std::move(code), std::move(code_b)));
|
||||
auto program = BuildShader(params.device, shader_type, params.unique_identifier, ir);
|
||||
|
||||
ShaderDiskCacheEntry entry;
|
||||
entry.type = shader_type;
|
||||
entry.code = std::move(code);
|
||||
entry.code_b = std::move(code_b);
|
||||
entry.unique_identifier = params.unique_identifier;
|
||||
entry.bound_buffer = locker->GetBoundBuffer();
|
||||
entry.keys = locker->GetKeys();
|
||||
entry.bound_samplers = locker->GetBoundSamplers();
|
||||
entry.bindless_samplers = locker->GetBindlessSamplers();
|
||||
params.disk_cache.SaveEntry(std::move(entry));
|
||||
|
||||
return std::shared_ptr<CachedShader>(new CachedShader(params.host_ptr, params.cpu_addr,
|
||||
size_in_bytes, std::move(locker),
|
||||
MakeEntries(ir), std::move(program)));
|
||||
}
|
||||
|
||||
Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) {
|
||||
params.disk_cache.SaveRaw(
|
||||
ShaderDiskCacheRaw(params.unique_identifier, ShaderType::Compute, code));
|
||||
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
||||
|
||||
ConstBufferLocker locker(Tegra::Engines::ShaderType::Compute,
|
||||
params.system.GPU().KeplerCompute());
|
||||
const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, locker);
|
||||
return std::shared_ptr<CachedShader>(new CachedShader(
|
||||
params, ShaderType::Compute, GLShader::GetEntries(ir), std::move(code), {}));
|
||||
auto locker = std::make_shared<ConstBufferLocker>(Tegra::Engines::ShaderType::Compute,
|
||||
params.system.GPU().KeplerCompute());
|
||||
const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, *locker);
|
||||
auto program = BuildShader(params.device, ShaderType::Compute, params.unique_identifier, ir);
|
||||
|
||||
ShaderDiskCacheEntry entry;
|
||||
entry.type = ShaderType::Compute;
|
||||
entry.code = std::move(code);
|
||||
entry.unique_identifier = params.unique_identifier;
|
||||
entry.bound_buffer = locker->GetBoundBuffer();
|
||||
entry.keys = locker->GetKeys();
|
||||
entry.bound_samplers = locker->GetBoundSamplers();
|
||||
entry.bindless_samplers = locker->GetBindlessSamplers();
|
||||
params.disk_cache.SaveEntry(std::move(entry));
|
||||
|
||||
return std::shared_ptr<CachedShader>(new CachedShader(params.host_ptr, params.cpu_addr,
|
||||
size_in_bytes, std::move(locker),
|
||||
MakeEntries(ir), std::move(program)));
|
||||
}
|
||||
|
||||
Shader CachedShader::CreateFromCache(const ShaderParameters& params,
|
||||
const UnspecializedShader& unspecialized) {
|
||||
return std::shared_ptr<CachedShader>(new CachedShader(params, unspecialized.type,
|
||||
unspecialized.entries, unspecialized.code,
|
||||
unspecialized.code_b));
|
||||
}
|
||||
|
||||
GLuint CachedShader::GetHandle(const ProgramVariant& variant) {
|
||||
EnsureValidLockerVariant();
|
||||
|
||||
const auto [entry, is_cache_miss] = curr_locker_variant->programs.try_emplace(variant);
|
||||
auto& program = entry->second;
|
||||
if (!is_cache_miss) {
|
||||
return program->handle;
|
||||
}
|
||||
|
||||
program = BuildShader(device, unique_identifier, shader_type, code, code_b,
|
||||
*curr_locker_variant->locker, variant);
|
||||
disk_cache.SaveUsage(GetUsage(variant, *curr_locker_variant->locker));
|
||||
|
||||
LabelGLObject(GL_PROGRAM, program->handle, cpu_addr);
|
||||
return program->handle;
|
||||
}
|
||||
|
||||
bool CachedShader::EnsureValidLockerVariant() {
|
||||
const auto previous_variant = curr_locker_variant;
|
||||
if (curr_locker_variant && !curr_locker_variant->locker->IsConsistent()) {
|
||||
curr_locker_variant = nullptr;
|
||||
}
|
||||
if (!curr_locker_variant) {
|
||||
for (auto& variant : locker_variants) {
|
||||
if (variant->locker->IsConsistent()) {
|
||||
curr_locker_variant = variant.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!curr_locker_variant) {
|
||||
auto& new_variant = locker_variants.emplace_back();
|
||||
new_variant = std::make_unique<LockerVariant>();
|
||||
new_variant->locker = MakeLocker(system, shader_type);
|
||||
curr_locker_variant = new_variant.get();
|
||||
}
|
||||
return previous_variant == curr_locker_variant;
|
||||
}
|
||||
|
||||
ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant,
|
||||
const ConstBufferLocker& locker) const {
|
||||
return ShaderDiskCacheUsage{unique_identifier, variant,
|
||||
locker.GetBoundBuffer(), locker.GetKeys(),
|
||||
locker.GetBoundSamplers(), locker.GetBindlessSamplers()};
|
||||
const PrecompiledShader& precompiled_shader,
|
||||
std::size_t size_in_bytes) {
|
||||
return std::shared_ptr<CachedShader>(
|
||||
new CachedShader(params.host_ptr, params.cpu_addr, size_in_bytes, precompiled_shader.locker,
|
||||
precompiled_shader.entries, precompiled_shader.program));
|
||||
}
|
||||
|
||||
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
|
||||
|
@ -432,16 +294,12 @@ ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System&
|
|||
|
||||
void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback) {
|
||||
const auto transferable = disk_cache.LoadTransferable();
|
||||
const std::optional transferable = disk_cache.LoadTransferable();
|
||||
if (!transferable) {
|
||||
return;
|
||||
}
|
||||
const auto [raws, shader_usages] = *transferable;
|
||||
if (!GenerateUnspecializedShaders(stop_loading, callback, raws) || stop_loading) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto dumps = disk_cache.LoadPrecompiled();
|
||||
const std::vector gl_cache = disk_cache.LoadPrecompiled();
|
||||
const auto supported_formats = GetSupportedFormats();
|
||||
|
||||
// Track if precompiled cache was altered during loading to know if we have to
|
||||
|
@ -450,77 +308,82 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
|||
|
||||
// Inform the frontend about shader build initialization
|
||||
if (callback) {
|
||||
callback(VideoCore::LoadCallbackStage::Build, 0, shader_usages.size());
|
||||
callback(VideoCore::LoadCallbackStage::Build, 0, transferable->size());
|
||||
}
|
||||
|
||||
std::mutex mutex;
|
||||
std::size_t built_shaders = 0; // It doesn't have be atomic since it's used behind a mutex
|
||||
std::atomic_bool compilation_failed = false;
|
||||
std::atomic_bool gl_cache_failed = false;
|
||||
|
||||
const auto Worker = [&](Core::Frontend::GraphicsContext* context, std::size_t begin,
|
||||
std::size_t end, const std::vector<ShaderDiskCacheUsage>& shader_usages,
|
||||
const ShaderDumpsMap& dumps) {
|
||||
const auto find_precompiled = [&gl_cache](u64 id) {
|
||||
return std::find_if(gl_cache.begin(), gl_cache.end(),
|
||||
[id](const auto& entry) { return entry.unique_identifier == id; });
|
||||
};
|
||||
|
||||
const auto worker = [&](Core::Frontend::GraphicsContext* context, std::size_t begin,
|
||||
std::size_t end) {
|
||||
context->MakeCurrent();
|
||||
SCOPE_EXIT({ return context->DoneCurrent(); });
|
||||
|
||||
for (std::size_t i = begin; i < end; ++i) {
|
||||
if (stop_loading || compilation_failed) {
|
||||
if (stop_loading) {
|
||||
return;
|
||||
}
|
||||
const auto& usage{shader_usages[i]};
|
||||
const auto& unspecialized{unspecialized_shaders.at(usage.unique_identifier)};
|
||||
const auto dump{dumps.find(usage)};
|
||||
const auto& entry = (*transferable)[i];
|
||||
const u64 unique_identifier = entry.unique_identifier;
|
||||
const auto it = find_precompiled(unique_identifier);
|
||||
const auto precompiled_entry = it != gl_cache.end() ? &*it : nullptr;
|
||||
|
||||
CachedProgram shader;
|
||||
if (dump != dumps.end()) {
|
||||
// If the shader is dumped, attempt to load it with
|
||||
shader = GeneratePrecompiledProgram(dump->second, supported_formats);
|
||||
if (!shader) {
|
||||
compilation_failed = true;
|
||||
return;
|
||||
const bool is_compute = entry.type == ShaderType::Compute;
|
||||
const u32 main_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
|
||||
auto locker = MakeLocker(entry);
|
||||
const ShaderIR ir(entry.code, main_offset, COMPILER_SETTINGS, *locker);
|
||||
|
||||
std::shared_ptr<OGLProgram> program;
|
||||
if (precompiled_entry) {
|
||||
// If the shader is precompiled, attempt to load it with
|
||||
program = GeneratePrecompiledProgram(entry, *precompiled_entry, supported_formats);
|
||||
if (!program) {
|
||||
gl_cache_failed = true;
|
||||
}
|
||||
}
|
||||
if (!shader) {
|
||||
auto locker{MakeLocker(system, unspecialized.type)};
|
||||
FillLocker(*locker, usage);
|
||||
|
||||
shader = BuildShader(device, usage.unique_identifier, unspecialized.type,
|
||||
unspecialized.code, unspecialized.code_b, *locker,
|
||||
usage.variant, true);
|
||||
if (!program) {
|
||||
// Otherwise compile it from GLSL
|
||||
program = BuildShader(device, entry.type, unique_identifier, ir, true);
|
||||
}
|
||||
|
||||
PrecompiledShader shader;
|
||||
shader.program = std::move(program);
|
||||
shader.locker = std::move(locker);
|
||||
shader.entries = MakeEntries(ir);
|
||||
|
||||
std::scoped_lock lock{mutex};
|
||||
if (callback) {
|
||||
callback(VideoCore::LoadCallbackStage::Build, ++built_shaders,
|
||||
shader_usages.size());
|
||||
transferable->size());
|
||||
}
|
||||
|
||||
precompiled_programs.emplace(usage, std::move(shader));
|
||||
|
||||
// TODO(Rodrigo): Is there a better way to do this?
|
||||
precompiled_variants[usage.unique_identifier].push_back(
|
||||
precompiled_programs.find(usage));
|
||||
runtime_cache.emplace(entry.unique_identifier, std::move(shader));
|
||||
}
|
||||
};
|
||||
|
||||
const auto num_workers{static_cast<std::size_t>(std::thread::hardware_concurrency() + 1ULL)};
|
||||
const std::size_t bucket_size{shader_usages.size() / num_workers};
|
||||
const std::size_t bucket_size{transferable->size() / num_workers};
|
||||
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> contexts(num_workers);
|
||||
std::vector<std::thread> threads(num_workers);
|
||||
for (std::size_t i = 0; i < num_workers; ++i) {
|
||||
const bool is_last_worker = i + 1 == num_workers;
|
||||
const std::size_t start{bucket_size * i};
|
||||
const std::size_t end{is_last_worker ? shader_usages.size() : start + bucket_size};
|
||||
const std::size_t end{is_last_worker ? transferable->size() : start + bucket_size};
|
||||
|
||||
// On some platforms the shared context has to be created from the GUI thread
|
||||
contexts[i] = emu_window.CreateSharedContext();
|
||||
threads[i] = std::thread(Worker, contexts[i].get(), start, end, shader_usages, dumps);
|
||||
threads[i] = std::thread(worker, contexts[i].get(), start, end);
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
if (compilation_failed) {
|
||||
if (gl_cache_failed) {
|
||||
// Invalidate the precompiled cache if a shader dumped shader was rejected
|
||||
disk_cache.InvalidatePrecompiled();
|
||||
precompiled_cache_altered = true;
|
||||
|
@ -533,11 +396,12 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
|||
// TODO(Rodrigo): Do state tracking for transferable shaders and do a dummy draw
|
||||
// before precompiling them
|
||||
|
||||
for (std::size_t i = 0; i < shader_usages.size(); ++i) {
|
||||
const auto& usage{shader_usages[i]};
|
||||
if (dumps.find(usage) == dumps.end()) {
|
||||
const auto& program{precompiled_programs.at(usage)};
|
||||
disk_cache.SaveDump(usage, program->handle);
|
||||
for (std::size_t i = 0; i < transferable->size(); ++i) {
|
||||
const u64 id = (*transferable)[i].unique_identifier;
|
||||
const auto it = find_precompiled(id);
|
||||
if (it == gl_cache.end()) {
|
||||
const GLuint program = runtime_cache.at(id).program->handle;
|
||||
disk_cache.SavePrecompiled(id, program);
|
||||
precompiled_cache_altered = true;
|
||||
}
|
||||
}
|
||||
|
@ -547,80 +411,29 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
|
|||
}
|
||||
}
|
||||
|
||||
const PrecompiledVariants* ShaderCacheOpenGL::GetPrecompiledVariants(u64 unique_identifier) const {
|
||||
const auto it = precompiled_variants.find(unique_identifier);
|
||||
return it == precompiled_variants.end() ? nullptr : &it->second;
|
||||
}
|
||||
|
||||
CachedProgram ShaderCacheOpenGL::GeneratePrecompiledProgram(
|
||||
const ShaderDiskCacheDump& dump, const std::unordered_set<GLenum>& supported_formats) {
|
||||
if (supported_formats.find(dump.binary_format) == supported_formats.end()) {
|
||||
LOG_INFO(Render_OpenGL, "Precompiled cache entry with unsupported format - removing");
|
||||
std::shared_ptr<OGLProgram> ShaderCacheOpenGL::GeneratePrecompiledProgram(
|
||||
const ShaderDiskCacheEntry& entry, const ShaderDiskCachePrecompiled& precompiled_entry,
|
||||
const std::unordered_set<GLenum>& supported_formats) {
|
||||
if (supported_formats.find(precompiled_entry.binary_format) == supported_formats.end()) {
|
||||
LOG_INFO(Render_OpenGL, "Precompiled cache entry with unsupported format, removing");
|
||||
return {};
|
||||
}
|
||||
|
||||
CachedProgram shader = std::make_shared<OGLProgram>();
|
||||
shader->handle = glCreateProgram();
|
||||
glProgramParameteri(shader->handle, GL_PROGRAM_SEPARABLE, GL_TRUE);
|
||||
glProgramBinary(shader->handle, dump.binary_format, dump.binary.data(),
|
||||
static_cast<GLsizei>(dump.binary.size()));
|
||||
auto program = std::make_shared<OGLProgram>();
|
||||
program->handle = glCreateProgram();
|
||||
glProgramParameteri(program->handle, GL_PROGRAM_SEPARABLE, GL_TRUE);
|
||||
glProgramBinary(program->handle, precompiled_entry.binary_format,
|
||||
precompiled_entry.binary.data(),
|
||||
static_cast<GLsizei>(precompiled_entry.binary.size()));
|
||||
|
||||
GLint link_status{};
|
||||
glGetProgramiv(shader->handle, GL_LINK_STATUS, &link_status);
|
||||
GLint link_status;
|
||||
glGetProgramiv(program->handle, GL_LINK_STATUS, &link_status);
|
||||
if (link_status == GL_FALSE) {
|
||||
LOG_INFO(Render_OpenGL, "Precompiled cache rejected by the driver - removing");
|
||||
LOG_INFO(Render_OpenGL, "Precompiled cache rejected by the driver, removing");
|
||||
return {};
|
||||
}
|
||||
|
||||
return shader;
|
||||
}
|
||||
|
||||
bool ShaderCacheOpenGL::GenerateUnspecializedShaders(
|
||||
const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback,
|
||||
const std::vector<ShaderDiskCacheRaw>& raws) {
|
||||
if (callback) {
|
||||
callback(VideoCore::LoadCallbackStage::Decompile, 0, raws.size());
|
||||
}
|
||||
|
||||
for (std::size_t i = 0; i < raws.size(); ++i) {
|
||||
if (stop_loading) {
|
||||
return false;
|
||||
}
|
||||
const auto& raw{raws[i]};
|
||||
const u64 unique_identifier{raw.GetUniqueIdentifier()};
|
||||
const u64 calculated_hash{
|
||||
GetUniqueIdentifier(raw.GetType(), raw.HasProgramA(), raw.GetCode(), raw.GetCodeB())};
|
||||
if (unique_identifier != calculated_hash) {
|
||||
LOG_ERROR(Render_OpenGL,
|
||||
"Invalid hash in entry={:016x} (obtained hash={:016x}) - "
|
||||
"removing shader cache",
|
||||
raw.GetUniqueIdentifier(), calculated_hash);
|
||||
disk_cache.InvalidateTransferable();
|
||||
return false;
|
||||
}
|
||||
|
||||
const u32 main_offset =
|
||||
raw.GetType() == ShaderType::Compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
|
||||
ConstBufferLocker locker(raw.GetType());
|
||||
const ShaderIR ir(raw.GetCode(), main_offset, COMPILER_SETTINGS, locker);
|
||||
// TODO(Rodrigo): Handle VertexA shaders
|
||||
// std::optional<ShaderIR> ir_b;
|
||||
// if (raw.HasProgramA()) {
|
||||
// ir_b.emplace(raw.GetProgramCodeB(), main_offset);
|
||||
// }
|
||||
|
||||
UnspecializedShader unspecialized;
|
||||
unspecialized.entries = GLShader::GetEntries(ir);
|
||||
unspecialized.type = raw.GetType();
|
||||
unspecialized.code = raw.GetCode();
|
||||
unspecialized.code_b = raw.GetCodeB();
|
||||
unspecialized_shaders.emplace(raw.GetUniqueIdentifier(), unspecialized);
|
||||
|
||||
if (callback) {
|
||||
callback(VideoCore::LoadCallbackStage::Decompile, i, raws.size());
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return program;
|
||||
}
|
||||
|
||||
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
||||
|
@ -648,17 +461,17 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
|||
|
||||
const auto unique_identifier = GetUniqueIdentifier(
|
||||
GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b);
|
||||
const auto precompiled_variants = GetPrecompiledVariants(unique_identifier);
|
||||
const auto cpu_addr{*memory_manager.GpuToCpuAddress(address)};
|
||||
const ShaderParameters params{system, disk_cache, precompiled_variants, device,
|
||||
const ShaderParameters params{system, disk_cache, device,
|
||||
cpu_addr, host_ptr, unique_identifier};
|
||||
|
||||
const auto found = unspecialized_shaders.find(unique_identifier);
|
||||
if (found == unspecialized_shaders.end()) {
|
||||
const auto found = runtime_cache.find(unique_identifier);
|
||||
if (found == runtime_cache.end()) {
|
||||
shader = CachedShader::CreateStageFromMemory(params, program, std::move(code),
|
||||
std::move(code_b));
|
||||
} else {
|
||||
shader = CachedShader::CreateFromCache(params, found->second);
|
||||
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
||||
shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
|
||||
}
|
||||
Register(shader);
|
||||
|
||||
|
@ -673,19 +486,19 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
|
|||
return kernel;
|
||||
}
|
||||
|
||||
// No kernel found - create a new one
|
||||
// No kernel found, create a new one
|
||||
auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
|
||||
const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code, {})};
|
||||
const auto precompiled_variants = GetPrecompiledVariants(unique_identifier);
|
||||
const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
|
||||
const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)};
|
||||
const ShaderParameters params{system, disk_cache, precompiled_variants, device,
|
||||
const ShaderParameters params{system, disk_cache, device,
|
||||
cpu_addr, host_ptr, unique_identifier};
|
||||
|
||||
const auto found = unspecialized_shaders.find(unique_identifier);
|
||||
if (found == unspecialized_shaders.end()) {
|
||||
const auto found = runtime_cache.find(unique_identifier);
|
||||
if (found == runtime_cache.end()) {
|
||||
kernel = CachedShader::CreateKernelFromMemory(params, std::move(code));
|
||||
} else {
|
||||
kernel = CachedShader::CreateFromCache(params, found->second);
|
||||
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
||||
kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
|
||||
}
|
||||
|
||||
Register(kernel);
|
||||
|
|
|
@ -41,22 +41,17 @@ class RasterizerOpenGL;
|
|||
struct UnspecializedShader;
|
||||
|
||||
using Shader = std::shared_ptr<CachedShader>;
|
||||
using CachedProgram = std::shared_ptr<OGLProgram>;
|
||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
||||
using PrecompiledPrograms = std::unordered_map<ShaderDiskCacheUsage, CachedProgram>;
|
||||
using PrecompiledVariants = std::vector<PrecompiledPrograms::iterator>;
|
||||
|
||||
struct UnspecializedShader {
|
||||
GLShader::ShaderEntries entries;
|
||||
Tegra::Engines::ShaderType type;
|
||||
ProgramCode code;
|
||||
ProgramCode code_b;
|
||||
struct PrecompiledShader {
|
||||
std::shared_ptr<OGLProgram> program;
|
||||
std::shared_ptr<VideoCommon::Shader::ConstBufferLocker> locker;
|
||||
ShaderEntries entries;
|
||||
};
|
||||
|
||||
struct ShaderParameters {
|
||||
Core::System& system;
|
||||
ShaderDiskCacheOpenGL& disk_cache;
|
||||
const PrecompiledVariants* precompiled_variants;
|
||||
const Device& device;
|
||||
VAddr cpu_addr;
|
||||
u8* host_ptr;
|
||||
|
@ -65,61 +60,45 @@ struct ShaderParameters {
|
|||
|
||||
class CachedShader final : public RasterizerCacheObject {
|
||||
public:
|
||||
~CachedShader();
|
||||
|
||||
/// Gets the GL program handle for the shader
|
||||
GLuint GetHandle() const;
|
||||
|
||||
/// Returns the guest CPU address of the shader
|
||||
VAddr GetCpuAddr() const override {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
/// Returns the size in bytes of the shader
|
||||
std::size_t GetSizeInBytes() const override {
|
||||
return size_in_bytes;
|
||||
}
|
||||
|
||||
/// Gets the shader entries for the shader
|
||||
const ShaderEntries& GetEntries() const {
|
||||
return entries;
|
||||
}
|
||||
|
||||
static Shader CreateStageFromMemory(const ShaderParameters& params,
|
||||
Maxwell::ShaderProgram program_type,
|
||||
ProgramCode program_code, ProgramCode program_code_b);
|
||||
static Shader CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code);
|
||||
|
||||
static Shader CreateFromCache(const ShaderParameters& params,
|
||||
const UnspecializedShader& unspecialized);
|
||||
|
||||
VAddr GetCpuAddr() const override {
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
std::size_t GetSizeInBytes() const override {
|
||||
return code.size() * sizeof(u64);
|
||||
}
|
||||
|
||||
/// Gets the shader entries for the shader
|
||||
const GLShader::ShaderEntries& GetShaderEntries() const {
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// Gets the GL program handle for the shader
|
||||
GLuint GetHandle(const ProgramVariant& variant);
|
||||
const PrecompiledShader& precompiled_shader,
|
||||
std::size_t size_in_bytes);
|
||||
|
||||
private:
|
||||
struct LockerVariant {
|
||||
std::unique_ptr<VideoCommon::Shader::ConstBufferLocker> locker;
|
||||
std::unordered_map<ProgramVariant, CachedProgram> programs;
|
||||
};
|
||||
explicit CachedShader(const u8* host_ptr, VAddr cpu_addr, std::size_t size_in_bytes,
|
||||
std::shared_ptr<VideoCommon::Shader::ConstBufferLocker> locker,
|
||||
ShaderEntries entries, std::shared_ptr<OGLProgram> program);
|
||||
|
||||
explicit CachedShader(const ShaderParameters& params, Tegra::Engines::ShaderType shader_type,
|
||||
GLShader::ShaderEntries entries, ProgramCode program_code,
|
||||
ProgramCode program_code_b);
|
||||
|
||||
bool EnsureValidLockerVariant();
|
||||
|
||||
ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant,
|
||||
const VideoCommon::Shader::ConstBufferLocker& locker) const;
|
||||
|
||||
Core::System& system;
|
||||
ShaderDiskCacheOpenGL& disk_cache;
|
||||
const Device& device;
|
||||
|
||||
VAddr cpu_addr{};
|
||||
|
||||
u64 unique_identifier{};
|
||||
Tegra::Engines::ShaderType shader_type{};
|
||||
|
||||
GLShader::ShaderEntries entries;
|
||||
|
||||
ProgramCode code;
|
||||
ProgramCode code_b;
|
||||
|
||||
LockerVariant* curr_locker_variant = nullptr;
|
||||
std::vector<std::unique_ptr<LockerVariant>> locker_variants;
|
||||
std::shared_ptr<VideoCommon::Shader::ConstBufferLocker> locker;
|
||||
ShaderEntries entries;
|
||||
VAddr cpu_addr = 0;
|
||||
std::size_t size_in_bytes = 0;
|
||||
std::shared_ptr<OGLProgram> program;
|
||||
};
|
||||
|
||||
class ShaderCacheOpenGL final : public RasterizerCache<Shader> {
|
||||
|
@ -142,25 +121,15 @@ protected:
|
|||
void FlushObjectInner(const Shader& object) override {}
|
||||
|
||||
private:
|
||||
bool GenerateUnspecializedShaders(const std::atomic_bool& stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback,
|
||||
const std::vector<ShaderDiskCacheRaw>& raws);
|
||||
|
||||
CachedProgram GeneratePrecompiledProgram(const ShaderDiskCacheDump& dump,
|
||||
const std::unordered_set<GLenum>& supported_formats);
|
||||
|
||||
const PrecompiledVariants* GetPrecompiledVariants(u64 unique_identifier) const;
|
||||
std::shared_ptr<OGLProgram> GeneratePrecompiledProgram(
|
||||
const ShaderDiskCacheEntry& entry, const ShaderDiskCachePrecompiled& precompiled_entry,
|
||||
const std::unordered_set<GLenum>& supported_formats);
|
||||
|
||||
Core::System& system;
|
||||
Core::Frontend::EmuWindow& emu_window;
|
||||
const Device& device;
|
||||
|
||||
ShaderDiskCacheOpenGL disk_cache;
|
||||
|
||||
PrecompiledPrograms precompiled_programs;
|
||||
std::unordered_map<u64, PrecompiledVariants> precompiled_variants;
|
||||
|
||||
std::unordered_map<u64, UnspecializedShader> unspecialized_shaders;
|
||||
std::unordered_map<u64, PrecompiledShader> runtime_cache;
|
||||
|
||||
std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
|
||||
};
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include "video_core/shader/node.h"
|
||||
#include "video_core/shader/shader_ir.h"
|
||||
|
||||
namespace OpenGL::GLShader {
|
||||
namespace OpenGL {
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -56,6 +56,25 @@ using TextureIR = std::variant<TextureOffset, TextureDerivates, TextureArgument>
|
|||
constexpr u32 MAX_CONSTBUFFER_ELEMENTS =
|
||||
static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float));
|
||||
|
||||
std::string_view CommonDeclarations = R"(#define ftoi floatBitsToInt
|
||||
#define ftou floatBitsToUint
|
||||
#define itof intBitsToFloat
|
||||
#define utof uintBitsToFloat
|
||||
|
||||
bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {{
|
||||
bvec2 is_nan1 = isnan(pair1);
|
||||
bvec2 is_nan2 = isnan(pair2);
|
||||
return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || is_nan2.y);
|
||||
}}
|
||||
|
||||
const float fswzadd_modifiers_a[] = float[4](-1.0f, 1.0f, -1.0f, 0.0f );
|
||||
const float fswzadd_modifiers_b[] = float[4](-1.0f, -1.0f, 1.0f, -1.0f );
|
||||
|
||||
layout (std140, binding = {}) uniform vs_config {{
|
||||
float y_direction;
|
||||
}};
|
||||
)";
|
||||
|
||||
class ShaderWriter final {
|
||||
public:
|
||||
void AddExpression(std::string_view text) {
|
||||
|
@ -270,11 +289,16 @@ const char* GetImageTypeDeclaration(Tegra::Shader::ImageType image_type) {
|
|||
}
|
||||
|
||||
/// Generates code to use for a swizzle operation.
|
||||
constexpr const char* GetSwizzle(u32 element) {
|
||||
constexpr const char* GetSwizzle(std::size_t element) {
|
||||
constexpr std::array swizzle = {".x", ".y", ".z", ".w"};
|
||||
return swizzle.at(element);
|
||||
}
|
||||
|
||||
constexpr const char* GetColorSwizzle(std::size_t element) {
|
||||
constexpr std::array swizzle = {".r", ".g", ".b", ".a"};
|
||||
return swizzle.at(element);
|
||||
}
|
||||
|
||||
/// Translate topology
|
||||
std::string GetTopologyName(Tegra::Shader::OutputTopology topology) {
|
||||
switch (topology) {
|
||||
|
@ -344,9 +368,48 @@ std::string FlowStackTopName(MetaStackClass stack) {
|
|||
class GLSLDecompiler final {
|
||||
public:
|
||||
explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ShaderType stage,
|
||||
std::string suffix)
|
||||
std::string_view suffix)
|
||||
: device{device}, ir{ir}, stage{stage}, suffix{suffix}, header{ir.GetHeader()} {}
|
||||
|
||||
void Decompile() {
|
||||
DeclareHeader();
|
||||
DeclareVertex();
|
||||
DeclareGeometry();
|
||||
DeclareFragment();
|
||||
DeclareRegisters();
|
||||
DeclareCustomVariables();
|
||||
DeclarePredicates();
|
||||
DeclareLocalMemory();
|
||||
DeclareInternalFlags();
|
||||
DeclareInputAttributes();
|
||||
DeclareOutputAttributes();
|
||||
DeclareConstantBuffers();
|
||||
DeclareGlobalMemory();
|
||||
DeclareSamplers();
|
||||
DeclareImages();
|
||||
DeclarePhysicalAttributeReader();
|
||||
|
||||
code.AddLine("void main() {{");
|
||||
++code.scope;
|
||||
|
||||
if (ir.IsDecompiled()) {
|
||||
DecompileAST();
|
||||
} else {
|
||||
DecompileBranchMode();
|
||||
}
|
||||
|
||||
--code.scope;
|
||||
code.AddLine("}}");
|
||||
}
|
||||
|
||||
std::string GetResult() {
|
||||
return code.GetResult();
|
||||
}
|
||||
|
||||
private:
|
||||
friend class ASTDecompiler;
|
||||
friend class ExprDecompiler;
|
||||
|
||||
void DecompileBranchMode() {
|
||||
// VM's program counter
|
||||
const auto first_address = ir.GetBasicBlocks().begin()->first;
|
||||
|
@ -387,43 +450,33 @@ public:
|
|||
|
||||
void DecompileAST();
|
||||
|
||||
void Decompile() {
|
||||
DeclareVertex();
|
||||
DeclareGeometry();
|
||||
DeclareRegisters();
|
||||
DeclareCustomVariables();
|
||||
DeclarePredicates();
|
||||
DeclareLocalMemory();
|
||||
DeclareInternalFlags();
|
||||
DeclareInputAttributes();
|
||||
DeclareOutputAttributes();
|
||||
DeclareConstantBuffers();
|
||||
DeclareGlobalMemory();
|
||||
DeclareSamplers();
|
||||
DeclareImages();
|
||||
DeclarePhysicalAttributeReader();
|
||||
|
||||
code.AddLine("void execute_{}() {{", suffix);
|
||||
++code.scope;
|
||||
|
||||
if (ir.IsDecompiled()) {
|
||||
DecompileAST();
|
||||
} else {
|
||||
DecompileBranchMode();
|
||||
void DeclareHeader() {
|
||||
code.AddLine("#version 450 compatibility");
|
||||
code.AddLine("#extension GL_ARB_separate_shader_objects : enable");
|
||||
if (device.HasShaderBallot()) {
|
||||
code.AddLine("#extension GL_ARB_shader_ballot : require");
|
||||
}
|
||||
if (device.HasVertexViewportLayer()) {
|
||||
code.AddLine("#extension GL_ARB_shader_viewport_layer_array : require");
|
||||
}
|
||||
if (device.HasImageLoadFormatted()) {
|
||||
code.AddLine("#extension GL_EXT_shader_image_load_formatted : require");
|
||||
}
|
||||
if (device.HasWarpIntrinsics()) {
|
||||
code.AddLine("#extension GL_NV_gpu_shader5 : require");
|
||||
code.AddLine("#extension GL_NV_shader_thread_group : require");
|
||||
code.AddLine("#extension GL_NV_shader_thread_shuffle : require");
|
||||
}
|
||||
// This pragma stops Nvidia's driver from over optimizing math (probably using fp16
|
||||
// operations) on places where we don't want to.
|
||||
// Thanks to Ryujinx for finding this workaround.
|
||||
code.AddLine("#pragma optionNV(fastmath off)");
|
||||
|
||||
--code.scope;
|
||||
code.AddLine("}}");
|
||||
code.AddNewLine();
|
||||
|
||||
code.AddLine(CommonDeclarations, EmulationUniformBlockBinding);
|
||||
}
|
||||
|
||||
std::string GetResult() {
|
||||
return code.GetResult();
|
||||
}
|
||||
|
||||
private:
|
||||
friend class ASTDecompiler;
|
||||
friend class ExprDecompiler;
|
||||
|
||||
void DeclareVertex() {
|
||||
if (!IsVertexShader(stage))
|
||||
return;
|
||||
|
@ -450,6 +503,24 @@ private:
|
|||
DeclareVertexRedeclarations();
|
||||
}
|
||||
|
||||
void DeclareFragment() {
|
||||
if (stage != ShaderType::Fragment) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool any = false;
|
||||
for (u32 render_target = 0; render_target < Maxwell::NumRenderTargets; ++render_target) {
|
||||
if (!IsRenderTargetEnabled(render_target)) {
|
||||
continue;
|
||||
}
|
||||
code.AddLine("layout (location = {}) out vec4 frag_color{};", render_target, render_target);
|
||||
any = true;
|
||||
}
|
||||
if (any) {
|
||||
code.AddNewLine();
|
||||
}
|
||||
}
|
||||
|
||||
void DeclareVertexRedeclarations() {
|
||||
code.AddLine("out gl_PerVertex {{");
|
||||
++code.scope;
|
||||
|
@ -1945,7 +2016,7 @@ private:
|
|||
// TODO(Subv): Figure out how dual-source blending is configured in the Switch.
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (header.ps.IsColorComponentOutputEnabled(render_target, component)) {
|
||||
code.AddLine("FragColor{}[{}] = {};", render_target, component,
|
||||
code.AddLine("frag_color{}{} = {};", render_target, GetColorSwizzle(component),
|
||||
SafeGetRegister(current_reg).AsFloat());
|
||||
++current_reg;
|
||||
}
|
||||
|
@ -2298,7 +2369,11 @@ private:
|
|||
}
|
||||
|
||||
std::string GetLocalMemory() const {
|
||||
return "lmem_" + suffix;
|
||||
if (suffix.empty()) {
|
||||
return "lmem";
|
||||
} else {
|
||||
return "lmem_" + std::string{suffix};
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetInternalFlag(InternalFlag flag) const {
|
||||
|
@ -2307,7 +2382,11 @@ private:
|
|||
const auto index = static_cast<u32>(flag);
|
||||
ASSERT(index < static_cast<u32>(InternalFlag::Amount));
|
||||
|
||||
return fmt::format("{}_{}", InternalFlagNames[index], suffix);
|
||||
if (suffix.empty()) {
|
||||
return InternalFlagNames[index];
|
||||
} else {
|
||||
return fmt::format("{}_{}", InternalFlagNames[index], suffix);
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetSampler(const Sampler& sampler) const {
|
||||
|
@ -2319,7 +2398,11 @@ private:
|
|||
}
|
||||
|
||||
std::string GetDeclarationWithSuffix(u32 index, std::string_view name) const {
|
||||
return fmt::format("{}_{}_{}", name, index, suffix);
|
||||
if (suffix.empty()) {
|
||||
return fmt::format("{}{}", name, index);
|
||||
} else {
|
||||
return fmt::format("{}{}_{}", name, index, suffix);
|
||||
}
|
||||
}
|
||||
|
||||
u32 GetNumPhysicalInputAttributes() const {
|
||||
|
@ -2334,17 +2417,26 @@ private:
|
|||
return std::min<u32>(device.GetMaxVaryings(), Maxwell::NumVaryings);
|
||||
}
|
||||
|
||||
bool IsRenderTargetEnabled(u32 render_target) const {
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (header.ps.IsColorComponentOutputEnabled(render_target, component)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const Device& device;
|
||||
const ShaderIR& ir;
|
||||
const ShaderType stage;
|
||||
const std::string suffix;
|
||||
const std::string_view suffix;
|
||||
const Header header;
|
||||
|
||||
ShaderWriter code;
|
||||
};
|
||||
|
||||
std::string GetFlowVariable(u32 i) {
|
||||
return fmt::format("flow_var_{}", i);
|
||||
std::string GetFlowVariable(u32 index) {
|
||||
return fmt::format("flow_var{}", index);
|
||||
}
|
||||
|
||||
class ExprDecompiler {
|
||||
|
@ -2531,7 +2623,7 @@ void GLSLDecompiler::DecompileAST() {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
ShaderEntries GetEntries(const VideoCommon::Shader::ShaderIR& ir) {
|
||||
ShaderEntries MakeEntries(const VideoCommon::Shader::ShaderIR& ir) {
|
||||
ShaderEntries entries;
|
||||
for (const auto& cbuf : ir.GetConstantBuffers()) {
|
||||
entries.const_buffers.emplace_back(cbuf.second.GetMaxOffset(), cbuf.second.IsIndirect(),
|
||||
|
@ -2555,28 +2647,11 @@ ShaderEntries GetEntries(const VideoCommon::Shader::ShaderIR& ir) {
|
|||
return entries;
|
||||
}
|
||||
|
||||
std::string GetCommonDeclarations() {
|
||||
return R"(#define ftoi floatBitsToInt
|
||||
#define ftou floatBitsToUint
|
||||
#define itof intBitsToFloat
|
||||
#define utof uintBitsToFloat
|
||||
|
||||
bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {
|
||||
bvec2 is_nan1 = isnan(pair1);
|
||||
bvec2 is_nan2 = isnan(pair2);
|
||||
return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || is_nan2.y);
|
||||
}
|
||||
|
||||
const float fswzadd_modifiers_a[] = float[4](-1.0f, 1.0f, -1.0f, 0.0f );
|
||||
const float fswzadd_modifiers_b[] = float[4](-1.0f, -1.0f, 1.0f, -1.0f );
|
||||
)";
|
||||
}
|
||||
|
||||
std::string Decompile(const Device& device, const ShaderIR& ir, ShaderType stage,
|
||||
const std::string& suffix) {
|
||||
std::string DecompileShader(const Device& device, const ShaderIR& ir, ShaderType stage,
|
||||
std::string_view suffix) {
|
||||
GLSLDecompiler decompiler(device, ir, stage, suffix);
|
||||
decompiler.Decompile();
|
||||
return decompiler.GetResult();
|
||||
}
|
||||
|
||||
} // namespace OpenGL::GLShader
|
||||
} // namespace OpenGL
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
|
@ -18,10 +19,8 @@ class ShaderIR;
|
|||
}
|
||||
|
||||
namespace OpenGL {
|
||||
class Device;
|
||||
}
|
||||
|
||||
namespace OpenGL::GLShader {
|
||||
class Device;
|
||||
|
||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
||||
using SamplerEntry = VideoCommon::Shader::Sampler;
|
||||
|
@ -78,11 +77,9 @@ struct ShaderEntries {
|
|||
std::size_t shader_length{};
|
||||
};
|
||||
|
||||
ShaderEntries GetEntries(const VideoCommon::Shader::ShaderIR& ir);
|
||||
ShaderEntries MakeEntries(const VideoCommon::Shader::ShaderIR& ir);
|
||||
|
||||
std::string GetCommonDeclarations();
|
||||
std::string DecompileShader(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
|
||||
Tegra::Engines::ShaderType stage, std::string_view suffix = {});
|
||||
|
||||
std::string Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
|
||||
Tegra::Engines::ShaderType stage, const std::string& suffix);
|
||||
|
||||
} // namespace OpenGL::GLShader
|
||||
} // namespace OpenGL
|
||||
|
|
|
@ -31,32 +31,24 @@ namespace {
|
|||
|
||||
using ShaderCacheVersionHash = std::array<u8, 64>;
|
||||
|
||||
enum class TransferableEntryKind : u32 {
|
||||
Raw,
|
||||
Usage,
|
||||
};
|
||||
|
||||
struct ConstBufferKey {
|
||||
u32 cbuf{};
|
||||
u32 offset{};
|
||||
u32 value{};
|
||||
u32 cbuf = 0;
|
||||
u32 offset = 0;
|
||||
u32 value = 0;
|
||||
};
|
||||
|
||||
struct BoundSamplerKey {
|
||||
u32 offset{};
|
||||
Tegra::Engines::SamplerDescriptor sampler{};
|
||||
u32 offset = 0;
|
||||
Tegra::Engines::SamplerDescriptor sampler;
|
||||
};
|
||||
|
||||
struct BindlessSamplerKey {
|
||||
u32 cbuf{};
|
||||
u32 offset{};
|
||||
Tegra::Engines::SamplerDescriptor sampler{};
|
||||
u32 cbuf = 0;
|
||||
u32 offset = 0;
|
||||
Tegra::Engines::SamplerDescriptor sampler;
|
||||
};
|
||||
|
||||
constexpr u32 NativeVersion = 12;
|
||||
|
||||
// Making sure sizes doesn't change by accident
|
||||
static_assert(sizeof(ProgramVariant) == 20);
|
||||
constexpr u32 NativeVersion = 16;
|
||||
|
||||
ShaderCacheVersionHash GetShaderCacheVersionHash() {
|
||||
ShaderCacheVersionHash hash{};
|
||||
|
@ -67,61 +59,122 @@ ShaderCacheVersionHash GetShaderCacheVersionHash() {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, ShaderType type, ProgramCode code,
|
||||
ProgramCode code_b)
|
||||
: unique_identifier{unique_identifier}, type{type}, code{std::move(code)}, code_b{std::move(
|
||||
code_b)} {}
|
||||
ShaderDiskCacheEntry::ShaderDiskCacheEntry() = default;
|
||||
|
||||
ShaderDiskCacheRaw::ShaderDiskCacheRaw() = default;
|
||||
ShaderDiskCacheEntry::~ShaderDiskCacheEntry() = default;
|
||||
|
||||
ShaderDiskCacheRaw::~ShaderDiskCacheRaw() = default;
|
||||
|
||||
bool ShaderDiskCacheRaw::Load(FileUtil::IOFile& file) {
|
||||
if (file.ReadBytes(&unique_identifier, sizeof(u64)) != sizeof(u64) ||
|
||||
file.ReadBytes(&type, sizeof(u32)) != sizeof(u32)) {
|
||||
bool ShaderDiskCacheEntry::Load(FileUtil::IOFile& file) {
|
||||
if (file.ReadBytes(&type, sizeof(u32)) != sizeof(u32)) {
|
||||
return false;
|
||||
}
|
||||
u32 code_size{};
|
||||
u32 code_size_b{};
|
||||
u32 code_size;
|
||||
u32 code_size_b;
|
||||
if (file.ReadBytes(&code_size, sizeof(u32)) != sizeof(u32) ||
|
||||
file.ReadBytes(&code_size_b, sizeof(u32)) != sizeof(u32)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
code.resize(code_size);
|
||||
code_b.resize(code_size_b);
|
||||
|
||||
if (file.ReadArray(code.data(), code_size) != code_size)
|
||||
if (file.ReadArray(code.data(), code_size) != code_size) {
|
||||
return false;
|
||||
|
||||
}
|
||||
if (HasProgramA() && file.ReadArray(code_b.data(), code_size_b) != code_size_b) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_texture_handler_size_known;
|
||||
u32 texture_handler_size_value;
|
||||
u32 num_keys;
|
||||
u32 num_bound_samplers;
|
||||
u32 num_bindless_samplers;
|
||||
if (file.ReadArray(&unique_identifier, 1) != 1 || file.ReadArray(&bound_buffer, 1) != 1 ||
|
||||
file.ReadArray(&is_texture_handler_size_known, 1) != 1 ||
|
||||
file.ReadArray(&texture_handler_size_value, 1) != 1 || file.ReadArray(&num_keys, 1) != 1 ||
|
||||
file.ReadArray(&num_bound_samplers, 1) != 1 ||
|
||||
file.ReadArray(&num_bindless_samplers, 1) != 1) {
|
||||
return false;
|
||||
}
|
||||
if (is_texture_handler_size_known) {
|
||||
texture_handler_size = texture_handler_size_value;
|
||||
}
|
||||
|
||||
std::vector<ConstBufferKey> flat_keys(num_keys);
|
||||
std::vector<BoundSamplerKey> flat_bound_samplers(num_bound_samplers);
|
||||
std::vector<BindlessSamplerKey> flat_bindless_samplers(num_bindless_samplers);
|
||||
if (file.ReadArray(flat_keys.data(), flat_keys.size()) != flat_keys.size() ||
|
||||
file.ReadArray(flat_bound_samplers.data(), flat_bound_samplers.size()) !=
|
||||
flat_bound_samplers.size() ||
|
||||
file.ReadArray(flat_bindless_samplers.data(), flat_bindless_samplers.size()) !=
|
||||
flat_bindless_samplers.size()) {
|
||||
return false;
|
||||
}
|
||||
for (const auto& key : flat_keys) {
|
||||
keys.insert({{key.cbuf, key.offset}, key.value});
|
||||
}
|
||||
for (const auto& key : flat_bound_samplers) {
|
||||
bound_samplers.emplace(key.offset, key.sampler);
|
||||
}
|
||||
for (const auto& key : flat_bindless_samplers) {
|
||||
bindless_samplers.insert({{key.cbuf, key.offset}, key.sampler});
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ShaderDiskCacheRaw::Save(FileUtil::IOFile& file) const {
|
||||
if (file.WriteObject(unique_identifier) != 1 || file.WriteObject(static_cast<u32>(type)) != 1 ||
|
||||
bool ShaderDiskCacheEntry::Save(FileUtil::IOFile& file) const {
|
||||
if (file.WriteObject(static_cast<u32>(type)) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(code.size())) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(code_b.size())) != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (file.WriteArray(code.data(), code.size()) != code.size())
|
||||
if (file.WriteArray(code.data(), code.size()) != code.size()) {
|
||||
return false;
|
||||
|
||||
}
|
||||
if (HasProgramA() && file.WriteArray(code_b.data(), code_b.size()) != code_b.size()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
if (file.WriteObject(unique_identifier) != 1 || file.WriteObject(bound_buffer) != 1 ||
|
||||
file.WriteObject(texture_handler_size.has_value()) != 1 ||
|
||||
file.WriteObject(texture_handler_size.value_or(0)) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(keys.size())) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(bound_samplers.size())) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(bindless_samplers.size())) != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<ConstBufferKey> flat_keys;
|
||||
flat_keys.reserve(keys.size());
|
||||
for (const auto& [address, value] : keys) {
|
||||
flat_keys.push_back(ConstBufferKey{address.first, address.second, value});
|
||||
}
|
||||
|
||||
std::vector<BoundSamplerKey> flat_bound_samplers;
|
||||
flat_bound_samplers.reserve(bound_samplers.size());
|
||||
for (const auto& [address, sampler] : bound_samplers) {
|
||||
flat_bound_samplers.push_back(BoundSamplerKey{address, sampler});
|
||||
}
|
||||
|
||||
std::vector<BindlessSamplerKey> flat_bindless_samplers;
|
||||
flat_bindless_samplers.reserve(bindless_samplers.size());
|
||||
for (const auto& [address, sampler] : bindless_samplers) {
|
||||
flat_bindless_samplers.push_back(
|
||||
BindlessSamplerKey{address.first, address.second, sampler});
|
||||
}
|
||||
|
||||
return file.WriteArray(flat_keys.data(), flat_keys.size()) == flat_keys.size() &&
|
||||
file.WriteArray(flat_bound_samplers.data(), flat_bound_samplers.size()) ==
|
||||
flat_bound_samplers.size() &&
|
||||
file.WriteArray(flat_bindless_samplers.data(), flat_bindless_samplers.size()) ==
|
||||
flat_bindless_samplers.size();
|
||||
}
|
||||
|
||||
ShaderDiskCacheOpenGL::ShaderDiskCacheOpenGL(Core::System& system) : system{system} {}
|
||||
|
||||
ShaderDiskCacheOpenGL::~ShaderDiskCacheOpenGL() = default;
|
||||
|
||||
std::optional<std::pair<std::vector<ShaderDiskCacheRaw>, std::vector<ShaderDiskCacheUsage>>>
|
||||
ShaderDiskCacheOpenGL::LoadTransferable() {
|
||||
std::optional<std::vector<ShaderDiskCacheEntry>> ShaderDiskCacheOpenGL::LoadTransferable() {
|
||||
// Skip games without title id
|
||||
const bool has_title_id = system.CurrentProcess()->GetTitleID() != 0;
|
||||
if (!Settings::values.use_disk_shader_cache || !has_title_id) {
|
||||
|
@ -130,17 +183,14 @@ ShaderDiskCacheOpenGL::LoadTransferable() {
|
|||
|
||||
FileUtil::IOFile file(GetTransferablePath(), "rb");
|
||||
if (!file.IsOpen()) {
|
||||
LOG_INFO(Render_OpenGL, "No transferable shader cache found for game with title id={}",
|
||||
GetTitleID());
|
||||
LOG_INFO(Render_OpenGL, "No transferable shader cache found");
|
||||
is_usable = true;
|
||||
return {};
|
||||
}
|
||||
|
||||
u32 version{};
|
||||
if (file.ReadBytes(&version, sizeof(version)) != sizeof(version)) {
|
||||
LOG_ERROR(Render_OpenGL,
|
||||
"Failed to get transferable cache version for title id={}, skipping",
|
||||
GetTitleID());
|
||||
LOG_ERROR(Render_OpenGL, "Failed to get transferable cache version, skipping it");
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -158,105 +208,42 @@ ShaderDiskCacheOpenGL::LoadTransferable() {
|
|||
}
|
||||
|
||||
// Version is valid, load the shaders
|
||||
constexpr const char error_loading[] = "Failed to load transferable raw entry, skipping";
|
||||
std::vector<ShaderDiskCacheRaw> raws;
|
||||
std::vector<ShaderDiskCacheUsage> usages;
|
||||
std::vector<ShaderDiskCacheEntry> entries;
|
||||
while (file.Tell() < file.GetSize()) {
|
||||
TransferableEntryKind kind{};
|
||||
if (file.ReadBytes(&kind, sizeof(u32)) != sizeof(u32)) {
|
||||
LOG_ERROR(Render_OpenGL, "Failed to read transferable file, skipping");
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (kind) {
|
||||
case TransferableEntryKind::Raw: {
|
||||
ShaderDiskCacheRaw entry;
|
||||
if (!entry.Load(file)) {
|
||||
LOG_ERROR(Render_OpenGL, error_loading);
|
||||
return {};
|
||||
}
|
||||
transferable.insert({entry.GetUniqueIdentifier(), {}});
|
||||
raws.push_back(std::move(entry));
|
||||
break;
|
||||
}
|
||||
case TransferableEntryKind::Usage: {
|
||||
ShaderDiskCacheUsage usage;
|
||||
|
||||
u32 num_keys{};
|
||||
u32 num_bound_samplers{};
|
||||
u32 num_bindless_samplers{};
|
||||
if (file.ReadArray(&usage.unique_identifier, 1) != 1 ||
|
||||
file.ReadArray(&usage.variant, 1) != 1 ||
|
||||
file.ReadArray(&usage.bound_buffer, 1) != 1 || file.ReadArray(&num_keys, 1) != 1 ||
|
||||
file.ReadArray(&num_bound_samplers, 1) != 1 ||
|
||||
file.ReadArray(&num_bindless_samplers, 1) != 1) {
|
||||
LOG_ERROR(Render_OpenGL, error_loading);
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ConstBufferKey> keys(num_keys);
|
||||
std::vector<BoundSamplerKey> bound_samplers(num_bound_samplers);
|
||||
std::vector<BindlessSamplerKey> bindless_samplers(num_bindless_samplers);
|
||||
if (file.ReadArray(keys.data(), keys.size()) != keys.size() ||
|
||||
file.ReadArray(bound_samplers.data(), bound_samplers.size()) !=
|
||||
bound_samplers.size() ||
|
||||
file.ReadArray(bindless_samplers.data(), bindless_samplers.size()) !=
|
||||
bindless_samplers.size()) {
|
||||
LOG_ERROR(Render_OpenGL, error_loading);
|
||||
return {};
|
||||
}
|
||||
for (const auto& key : keys) {
|
||||
usage.keys.insert({{key.cbuf, key.offset}, key.value});
|
||||
}
|
||||
for (const auto& key : bound_samplers) {
|
||||
usage.bound_samplers.emplace(key.offset, key.sampler);
|
||||
}
|
||||
for (const auto& key : bindless_samplers) {
|
||||
usage.bindless_samplers.insert({{key.cbuf, key.offset}, key.sampler});
|
||||
}
|
||||
|
||||
usages.push_back(std::move(usage));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Render_OpenGL, "Unknown transferable shader cache entry kind={}, skipping",
|
||||
static_cast<u32>(kind));
|
||||
ShaderDiskCacheEntry& entry = entries.emplace_back();
|
||||
if (!entry.Load(file)) {
|
||||
LOG_ERROR(Render_OpenGL, "Failed to load transferable raw entry, skipping");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
is_usable = true;
|
||||
return {{std::move(raws), std::move(usages)}};
|
||||
return {std::move(entries)};
|
||||
}
|
||||
|
||||
std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>
|
||||
ShaderDiskCacheOpenGL::LoadPrecompiled() {
|
||||
std::vector<ShaderDiskCachePrecompiled> ShaderDiskCacheOpenGL::LoadPrecompiled() {
|
||||
if (!is_usable) {
|
||||
return {};
|
||||
}
|
||||
|
||||
std::string path = GetPrecompiledPath();
|
||||
FileUtil::IOFile file(path, "rb");
|
||||
FileUtil::IOFile file(GetPrecompiledPath(), "rb");
|
||||
if (!file.IsOpen()) {
|
||||
LOG_INFO(Render_OpenGL, "No precompiled shader cache found for game with title id={}",
|
||||
GetTitleID());
|
||||
LOG_INFO(Render_OpenGL, "No precompiled shader cache found");
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto result = LoadPrecompiledFile(file);
|
||||
if (!result) {
|
||||
LOG_INFO(Render_OpenGL,
|
||||
"Failed to load precompiled cache for game with title id={}, removing",
|
||||
GetTitleID());
|
||||
file.Close();
|
||||
InvalidatePrecompiled();
|
||||
return {};
|
||||
if (const auto result = LoadPrecompiledFile(file)) {
|
||||
return *result;
|
||||
}
|
||||
return *result;
|
||||
|
||||
LOG_INFO(Render_OpenGL, "Failed to load precompiled cache");
|
||||
file.Close();
|
||||
InvalidatePrecompiled();
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>>
|
||||
ShaderDiskCacheOpenGL::LoadPrecompiledFile(FileUtil::IOFile& file) {
|
||||
std::optional<std::vector<ShaderDiskCachePrecompiled>> ShaderDiskCacheOpenGL::LoadPrecompiledFile(
|
||||
FileUtil::IOFile& file) {
|
||||
// Read compressed file from disk and decompress to virtual precompiled cache file
|
||||
std::vector<u8> compressed(file.GetSize());
|
||||
file.ReadBytes(compressed.data(), compressed.size());
|
||||
|
@ -275,58 +262,22 @@ ShaderDiskCacheOpenGL::LoadPrecompiledFile(FileUtil::IOFile& file) {
|
|||
return {};
|
||||
}
|
||||
|
||||
ShaderDumpsMap dumps;
|
||||
std::vector<ShaderDiskCachePrecompiled> entries;
|
||||
while (precompiled_cache_virtual_file_offset < precompiled_cache_virtual_file.GetSize()) {
|
||||
u32 num_keys{};
|
||||
u32 num_bound_samplers{};
|
||||
u32 num_bindless_samplers{};
|
||||
ShaderDiskCacheUsage usage;
|
||||
if (!LoadObjectFromPrecompiled(usage.unique_identifier) ||
|
||||
!LoadObjectFromPrecompiled(usage.variant) ||
|
||||
!LoadObjectFromPrecompiled(usage.bound_buffer) ||
|
||||
!LoadObjectFromPrecompiled(num_keys) ||
|
||||
!LoadObjectFromPrecompiled(num_bound_samplers) ||
|
||||
!LoadObjectFromPrecompiled(num_bindless_samplers)) {
|
||||
return {};
|
||||
}
|
||||
std::vector<ConstBufferKey> keys(num_keys);
|
||||
std::vector<BoundSamplerKey> bound_samplers(num_bound_samplers);
|
||||
std::vector<BindlessSamplerKey> bindless_samplers(num_bindless_samplers);
|
||||
if (!LoadArrayFromPrecompiled(keys.data(), keys.size()) ||
|
||||
!LoadArrayFromPrecompiled(bound_samplers.data(), bound_samplers.size()) !=
|
||||
bound_samplers.size() ||
|
||||
!LoadArrayFromPrecompiled(bindless_samplers.data(), bindless_samplers.size()) !=
|
||||
bindless_samplers.size()) {
|
||||
return {};
|
||||
}
|
||||
for (const auto& key : keys) {
|
||||
usage.keys.insert({{key.cbuf, key.offset}, key.value});
|
||||
}
|
||||
for (const auto& key : bound_samplers) {
|
||||
usage.bound_samplers.emplace(key.offset, key.sampler);
|
||||
}
|
||||
for (const auto& key : bindless_samplers) {
|
||||
usage.bindless_samplers.insert({{key.cbuf, key.offset}, key.sampler});
|
||||
}
|
||||
|
||||
ShaderDiskCacheDump dump;
|
||||
if (!LoadObjectFromPrecompiled(dump.binary_format)) {
|
||||
u32 binary_size;
|
||||
auto& entry = entries.emplace_back();
|
||||
if (!LoadObjectFromPrecompiled(entry.unique_identifier) ||
|
||||
!LoadObjectFromPrecompiled(entry.binary_format) ||
|
||||
!LoadObjectFromPrecompiled(binary_size)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
u32 binary_length{};
|
||||
if (!LoadObjectFromPrecompiled(binary_length)) {
|
||||
entry.binary.resize(binary_size);
|
||||
if (!LoadArrayFromPrecompiled(entry.binary.data(), entry.binary.size())) {
|
||||
return {};
|
||||
}
|
||||
|
||||
dump.binary.resize(binary_length);
|
||||
if (!LoadArrayFromPrecompiled(dump.binary.data(), dump.binary.size())) {
|
||||
return {};
|
||||
}
|
||||
|
||||
dumps.emplace(std::move(usage), dump);
|
||||
}
|
||||
return dumps;
|
||||
return entries;
|
||||
}
|
||||
|
||||
void ShaderDiskCacheOpenGL::InvalidateTransferable() {
|
||||
|
@ -346,13 +297,13 @@ void ShaderDiskCacheOpenGL::InvalidatePrecompiled() {
|
|||
}
|
||||
}
|
||||
|
||||
void ShaderDiskCacheOpenGL::SaveRaw(const ShaderDiskCacheRaw& entry) {
|
||||
void ShaderDiskCacheOpenGL::SaveEntry(const ShaderDiskCacheEntry& entry) {
|
||||
if (!is_usable) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u64 id = entry.GetUniqueIdentifier();
|
||||
if (transferable.find(id) != transferable.end()) {
|
||||
const u64 id = entry.unique_identifier;
|
||||
if (stored_transferable.find(id) != stored_transferable.end()) {
|
||||
// The shader already exists
|
||||
return;
|
||||
}
|
||||
|
@ -361,71 +312,17 @@ void ShaderDiskCacheOpenGL::SaveRaw(const ShaderDiskCacheRaw& entry) {
|
|||
if (!file.IsOpen()) {
|
||||
return;
|
||||
}
|
||||
if (file.WriteObject(TransferableEntryKind::Raw) != 1 || !entry.Save(file)) {
|
||||
if (!entry.Save(file)) {
|
||||
LOG_ERROR(Render_OpenGL, "Failed to save raw transferable cache entry, removing");
|
||||
file.Close();
|
||||
InvalidateTransferable();
|
||||
return;
|
||||
}
|
||||
transferable.insert({id, {}});
|
||||
|
||||
stored_transferable.insert(id);
|
||||
}
|
||||
|
||||
void ShaderDiskCacheOpenGL::SaveUsage(const ShaderDiskCacheUsage& usage) {
|
||||
if (!is_usable) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto it = transferable.find(usage.unique_identifier);
|
||||
ASSERT_MSG(it != transferable.end(), "Saving shader usage without storing raw previously");
|
||||
|
||||
auto& usages{it->second};
|
||||
if (usages.find(usage) != usages.end()) {
|
||||
// Skip this variant since the shader is already stored.
|
||||
return;
|
||||
}
|
||||
usages.insert(usage);
|
||||
|
||||
FileUtil::IOFile file = AppendTransferableFile();
|
||||
if (!file.IsOpen())
|
||||
return;
|
||||
const auto Close = [&] {
|
||||
LOG_ERROR(Render_OpenGL, "Failed to save usage transferable cache entry, removing");
|
||||
file.Close();
|
||||
InvalidateTransferable();
|
||||
};
|
||||
|
||||
if (file.WriteObject(TransferableEntryKind::Usage) != 1 ||
|
||||
file.WriteObject(usage.unique_identifier) != 1 || file.WriteObject(usage.variant) != 1 ||
|
||||
file.WriteObject(usage.bound_buffer) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(usage.keys.size())) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(usage.bound_samplers.size())) != 1 ||
|
||||
file.WriteObject(static_cast<u32>(usage.bindless_samplers.size())) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
for (const auto& [pair, value] : usage.keys) {
|
||||
const auto [cbuf, offset] = pair;
|
||||
if (file.WriteObject(ConstBufferKey{cbuf, offset, value}) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (const auto& [offset, sampler] : usage.bound_samplers) {
|
||||
if (file.WriteObject(BoundSamplerKey{offset, sampler}) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (const auto& [pair, sampler] : usage.bindless_samplers) {
|
||||
const auto [cbuf, offset] = pair;
|
||||
if (file.WriteObject(BindlessSamplerKey{cbuf, offset, sampler}) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderDiskCacheOpenGL::SaveDump(const ShaderDiskCacheUsage& usage, GLuint program) {
|
||||
void ShaderDiskCacheOpenGL::SavePrecompiled(u64 unique_identifier, GLuint program) {
|
||||
if (!is_usable) {
|
||||
return;
|
||||
}
|
||||
|
@ -437,51 +334,19 @@ void ShaderDiskCacheOpenGL::SaveDump(const ShaderDiskCacheUsage& usage, GLuint p
|
|||
SavePrecompiledHeaderToVirtualPrecompiledCache();
|
||||
}
|
||||
|
||||
GLint binary_length{};
|
||||
GLint binary_length;
|
||||
glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH, &binary_length);
|
||||
|
||||
GLenum binary_format{};
|
||||
GLenum binary_format;
|
||||
std::vector<u8> binary(binary_length);
|
||||
glGetProgramBinary(program, binary_length, nullptr, &binary_format, binary.data());
|
||||
|
||||
const auto Close = [&] {
|
||||
LOG_ERROR(Render_OpenGL, "Failed to save binary program file in shader={:016X}, removing",
|
||||
usage.unique_identifier);
|
||||
InvalidatePrecompiled();
|
||||
};
|
||||
|
||||
if (!SaveObjectToPrecompiled(usage.unique_identifier) ||
|
||||
!SaveObjectToPrecompiled(usage.variant) || !SaveObjectToPrecompiled(usage.bound_buffer) ||
|
||||
!SaveObjectToPrecompiled(static_cast<u32>(usage.keys.size())) ||
|
||||
!SaveObjectToPrecompiled(static_cast<u32>(usage.bound_samplers.size())) ||
|
||||
!SaveObjectToPrecompiled(static_cast<u32>(usage.bindless_samplers.size()))) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
for (const auto& [pair, value] : usage.keys) {
|
||||
const auto [cbuf, offset] = pair;
|
||||
if (SaveObjectToPrecompiled(ConstBufferKey{cbuf, offset, value}) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (const auto& [offset, sampler] : usage.bound_samplers) {
|
||||
if (SaveObjectToPrecompiled(BoundSamplerKey{offset, sampler}) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (const auto& [pair, sampler] : usage.bindless_samplers) {
|
||||
const auto [cbuf, offset] = pair;
|
||||
if (SaveObjectToPrecompiled(BindlessSamplerKey{cbuf, offset, sampler}) != 1) {
|
||||
Close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!SaveObjectToPrecompiled(static_cast<u32>(binary_format)) ||
|
||||
!SaveObjectToPrecompiled(static_cast<u32>(binary_length)) ||
|
||||
if (!SaveObjectToPrecompiled(unique_identifier) || !SaveObjectToPrecompiled(binary_format) ||
|
||||
!SaveObjectToPrecompiled(static_cast<u32>(binary.size())) ||
|
||||
!SaveArrayToPrecompiled(binary.data(), binary.size())) {
|
||||
Close();
|
||||
LOG_ERROR(Render_OpenGL, "Failed to save binary program file in shader={:016X}, removing",
|
||||
unique_identifier);
|
||||
InvalidatePrecompiled();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -534,7 +399,6 @@ void ShaderDiskCacheOpenGL::SaveVirtualPrecompiledFile() {
|
|||
if (file.WriteBytes(compressed.data(), compressed.size()) != compressed.size()) {
|
||||
LOG_ERROR(Render_OpenGL, "Failed to write precompiled cache version in path={}",
|
||||
precompiled_path);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include "common/common_types.h"
|
||||
#include "core/file_sys/vfs_vector.h"
|
||||
#include "video_core/engines/shader_type.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||
#include "video_core/shader/const_buffer_locker.h"
|
||||
|
||||
namespace Core {
|
||||
|
@ -32,139 +31,37 @@ class IOFile;
|
|||
|
||||
namespace OpenGL {
|
||||
|
||||
struct ShaderDiskCacheUsage;
|
||||
struct ShaderDiskCacheDump;
|
||||
|
||||
using ProgramCode = std::vector<u64>;
|
||||
using ShaderDumpsMap = std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>;
|
||||
|
||||
/// Describes the different variants a program can be compiled with.
|
||||
struct ProgramVariant final {
|
||||
ProgramVariant() = default;
|
||||
|
||||
/// Graphics constructor.
|
||||
explicit constexpr ProgramVariant(GLenum primitive_mode) noexcept
|
||||
: primitive_mode{primitive_mode} {}
|
||||
|
||||
/// Compute constructor.
|
||||
explicit constexpr ProgramVariant(u32 block_x, u32 block_y, u32 block_z, u32 shared_memory_size,
|
||||
u32 local_memory_size) noexcept
|
||||
: block_x{block_x}, block_y{static_cast<u16>(block_y)}, block_z{static_cast<u16>(block_z)},
|
||||
shared_memory_size{shared_memory_size}, local_memory_size{local_memory_size} {}
|
||||
|
||||
// Graphics specific parameters.
|
||||
GLenum primitive_mode{};
|
||||
|
||||
// Compute specific parameters.
|
||||
u32 block_x{};
|
||||
u16 block_y{};
|
||||
u16 block_z{};
|
||||
u32 shared_memory_size{};
|
||||
u32 local_memory_size{};
|
||||
|
||||
bool operator==(const ProgramVariant& rhs) const noexcept {
|
||||
return std::tie(primitive_mode, block_x, block_y, block_z, shared_memory_size,
|
||||
local_memory_size) == std::tie(rhs.primitive_mode, rhs.block_x, rhs.block_y,
|
||||
rhs.block_z, rhs.shared_memory_size,
|
||||
rhs.local_memory_size);
|
||||
}
|
||||
|
||||
bool operator!=(const ProgramVariant& rhs) const noexcept {
|
||||
return !operator==(rhs);
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_copyable_v<ProgramVariant>);
|
||||
|
||||
/// Describes how a shader is used.
|
||||
struct ShaderDiskCacheUsage {
|
||||
u64 unique_identifier{};
|
||||
ProgramVariant variant;
|
||||
u32 bound_buffer{};
|
||||
VideoCommon::Shader::KeyMap keys;
|
||||
VideoCommon::Shader::BoundSamplerMap bound_samplers;
|
||||
VideoCommon::Shader::BindlessSamplerMap bindless_samplers;
|
||||
|
||||
bool operator==(const ShaderDiskCacheUsage& rhs) const {
|
||||
return std::tie(unique_identifier, variant, keys, bound_samplers, bindless_samplers) ==
|
||||
std::tie(rhs.unique_identifier, rhs.variant, rhs.keys, rhs.bound_samplers,
|
||||
rhs.bindless_samplers);
|
||||
}
|
||||
|
||||
bool operator!=(const ShaderDiskCacheUsage& rhs) const {
|
||||
return !operator==(rhs);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace OpenGL
|
||||
|
||||
namespace std {
|
||||
|
||||
template <>
|
||||
struct hash<OpenGL::ProgramVariant> {
|
||||
std::size_t operator()(const OpenGL::ProgramVariant& variant) const noexcept {
|
||||
return (static_cast<std::size_t>(variant.primitive_mode) << 6) ^
|
||||
static_cast<std::size_t>(variant.block_x) ^
|
||||
(static_cast<std::size_t>(variant.block_y) << 32) ^
|
||||
(static_cast<std::size_t>(variant.block_z) << 48) ^
|
||||
(static_cast<std::size_t>(variant.shared_memory_size) << 16) ^
|
||||
(static_cast<std::size_t>(variant.local_memory_size) << 36);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<OpenGL::ShaderDiskCacheUsage> {
|
||||
std::size_t operator()(const OpenGL::ShaderDiskCacheUsage& usage) const noexcept {
|
||||
return static_cast<std::size_t>(usage.unique_identifier) ^
|
||||
std::hash<OpenGL::ProgramVariant>{}(usage.variant);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
|
||||
namespace OpenGL {
|
||||
|
||||
/// Describes a shader how it's used by the guest GPU
|
||||
class ShaderDiskCacheRaw {
|
||||
public:
|
||||
explicit ShaderDiskCacheRaw(u64 unique_identifier, Tegra::Engines::ShaderType type,
|
||||
ProgramCode code, ProgramCode code_b = {});
|
||||
ShaderDiskCacheRaw();
|
||||
~ShaderDiskCacheRaw();
|
||||
/// Describes a shader and how it's used by the guest GPU
|
||||
struct ShaderDiskCacheEntry {
|
||||
ShaderDiskCacheEntry();
|
||||
~ShaderDiskCacheEntry();
|
||||
|
||||
bool Load(FileUtil::IOFile& file);
|
||||
|
||||
bool Save(FileUtil::IOFile& file) const;
|
||||
|
||||
u64 GetUniqueIdentifier() const {
|
||||
return unique_identifier;
|
||||
}
|
||||
|
||||
bool HasProgramA() const {
|
||||
return !code.empty() && !code_b.empty();
|
||||
}
|
||||
|
||||
Tegra::Engines::ShaderType GetType() const {
|
||||
return type;
|
||||
}
|
||||
|
||||
const ProgramCode& GetCode() const {
|
||||
return code;
|
||||
}
|
||||
|
||||
const ProgramCode& GetCodeB() const {
|
||||
return code_b;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 unique_identifier{};
|
||||
Tegra::Engines::ShaderType type{};
|
||||
ProgramCode code;
|
||||
ProgramCode code_b;
|
||||
|
||||
u64 unique_identifier = 0;
|
||||
u32 bound_buffer = 0;
|
||||
std::optional<u32> texture_handler_size;
|
||||
VideoCommon::Shader::KeyMap keys;
|
||||
VideoCommon::Shader::BoundSamplerMap bound_samplers;
|
||||
VideoCommon::Shader::BindlessSamplerMap bindless_samplers;
|
||||
};
|
||||
|
||||
/// Contains an OpenGL dumped binary program
|
||||
struct ShaderDiskCacheDump {
|
||||
GLenum binary_format{};
|
||||
struct ShaderDiskCachePrecompiled {
|
||||
u64 unique_identifier = 0;
|
||||
GLenum binary_format = 0;
|
||||
std::vector<u8> binary;
|
||||
};
|
||||
|
||||
|
@ -174,11 +71,10 @@ public:
|
|||
~ShaderDiskCacheOpenGL();
|
||||
|
||||
/// Loads transferable cache. If file has a old version or on failure, it deletes the file.
|
||||
std::optional<std::pair<std::vector<ShaderDiskCacheRaw>, std::vector<ShaderDiskCacheUsage>>>
|
||||
LoadTransferable();
|
||||
std::optional<std::vector<ShaderDiskCacheEntry>> LoadTransferable();
|
||||
|
||||
/// Loads current game's precompiled cache. Invalidates on failure.
|
||||
std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump> LoadPrecompiled();
|
||||
std::vector<ShaderDiskCachePrecompiled> LoadPrecompiled();
|
||||
|
||||
/// Removes the transferable (and precompiled) cache file.
|
||||
void InvalidateTransferable();
|
||||
|
@ -187,21 +83,18 @@ public:
|
|||
void InvalidatePrecompiled();
|
||||
|
||||
/// Saves a raw dump to the transferable file. Checks for collisions.
|
||||
void SaveRaw(const ShaderDiskCacheRaw& entry);
|
||||
|
||||
/// Saves shader usage to the transferable file. Does not check for collisions.
|
||||
void SaveUsage(const ShaderDiskCacheUsage& usage);
|
||||
void SaveEntry(const ShaderDiskCacheEntry& entry);
|
||||
|
||||
/// Saves a dump entry to the precompiled file. Does not check for collisions.
|
||||
void SaveDump(const ShaderDiskCacheUsage& usage, GLuint program);
|
||||
void SavePrecompiled(u64 unique_identifier, GLuint program);
|
||||
|
||||
/// Serializes virtual precompiled shader cache file to real file
|
||||
void SaveVirtualPrecompiledFile();
|
||||
|
||||
private:
|
||||
/// Loads the transferable cache. Returns empty on failure.
|
||||
std::optional<std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>>
|
||||
LoadPrecompiledFile(FileUtil::IOFile& file);
|
||||
std::optional<std::vector<ShaderDiskCachePrecompiled>> LoadPrecompiledFile(
|
||||
FileUtil::IOFile& file);
|
||||
|
||||
/// Opens current game's transferable file and write it's header if it doesn't exist
|
||||
FileUtil::IOFile AppendTransferableFile() const;
|
||||
|
@ -270,7 +163,7 @@ private:
|
|||
std::size_t precompiled_cache_virtual_file_offset = 0;
|
||||
|
||||
// Stored transferable shaders
|
||||
std::unordered_map<u64, std::unordered_set<ShaderDiskCacheUsage>> transferable;
|
||||
std::unordered_set<u64> stored_transferable;
|
||||
|
||||
// The cache has been loaded at boot
|
||||
bool is_usable{};
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
// Copyright 2018 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <string>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/engines/shader_type.h"
|
||||
#include "video_core/renderer_opengl/gl_device.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||
#include "video_core/shader/shader_ir.h"
|
||||
|
||||
namespace OpenGL::GLShader {
|
||||
|
||||
using Tegra::Engines::Maxwell3D;
|
||||
using Tegra::Engines::ShaderType;
|
||||
using VideoCommon::Shader::CompileDepth;
|
||||
using VideoCommon::Shader::CompilerSettings;
|
||||
using VideoCommon::Shader::ProgramCode;
|
||||
using VideoCommon::Shader::ShaderIR;
|
||||
|
||||
std::string GenerateVertexShader(const Device& device, const ShaderIR& ir, const ShaderIR* ir_b) {
|
||||
std::string out = GetCommonDeclarations();
|
||||
out += fmt::format(R"(
|
||||
layout (std140, binding = {}) uniform vs_config {{
|
||||
float y_direction;
|
||||
}};
|
||||
|
||||
)",
|
||||
EmulationUniformBlockBinding);
|
||||
out += Decompile(device, ir, ShaderType::Vertex, "vertex");
|
||||
if (ir_b) {
|
||||
out += Decompile(device, *ir_b, ShaderType::Vertex, "vertex_b");
|
||||
}
|
||||
|
||||
out += R"(
|
||||
void main() {
|
||||
gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
execute_vertex();
|
||||
)";
|
||||
if (ir_b) {
|
||||
out += " execute_vertex_b();";
|
||||
}
|
||||
out += "}\n";
|
||||
return out;
|
||||
}
|
||||
|
||||
std::string GenerateGeometryShader(const Device& device, const ShaderIR& ir) {
|
||||
std::string out = GetCommonDeclarations();
|
||||
out += fmt::format(R"(
|
||||
layout (std140, binding = {}) uniform gs_config {{
|
||||
float y_direction;
|
||||
}};
|
||||
|
||||
)",
|
||||
EmulationUniformBlockBinding);
|
||||
out += Decompile(device, ir, ShaderType::Geometry, "geometry");
|
||||
|
||||
out += R"(
|
||||
void main() {
|
||||
execute_geometry();
|
||||
}
|
||||
)";
|
||||
return out;
|
||||
}
|
||||
|
||||
std::string GenerateFragmentShader(const Device& device, const ShaderIR& ir) {
|
||||
std::string out = GetCommonDeclarations();
|
||||
out += fmt::format(R"(
|
||||
layout (location = 0) out vec4 FragColor0;
|
||||
layout (location = 1) out vec4 FragColor1;
|
||||
layout (location = 2) out vec4 FragColor2;
|
||||
layout (location = 3) out vec4 FragColor3;
|
||||
layout (location = 4) out vec4 FragColor4;
|
||||
layout (location = 5) out vec4 FragColor5;
|
||||
layout (location = 6) out vec4 FragColor6;
|
||||
layout (location = 7) out vec4 FragColor7;
|
||||
|
||||
layout (std140, binding = {}) uniform fs_config {{
|
||||
float y_direction;
|
||||
}};
|
||||
|
||||
)",
|
||||
EmulationUniformBlockBinding);
|
||||
out += Decompile(device, ir, ShaderType::Fragment, "fragment");
|
||||
|
||||
out += R"(
|
||||
void main() {
|
||||
execute_fragment();
|
||||
}
|
||||
)";
|
||||
return out;
|
||||
}
|
||||
|
||||
std::string GenerateComputeShader(const Device& device, const ShaderIR& ir) {
|
||||
std::string out = GetCommonDeclarations();
|
||||
out += Decompile(device, ir, ShaderType::Compute, "compute");
|
||||
out += R"(
|
||||
void main() {
|
||||
execute_compute();
|
||||
}
|
||||
)";
|
||||
return out;
|
||||
}
|
||||
|
||||
} // namespace OpenGL::GLShader
|
|
@ -1,34 +0,0 @@
|
|||
// Copyright 2018 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
|
||||
#include "video_core/shader/shader_ir.h"
|
||||
|
||||
namespace OpenGL {
|
||||
class Device;
|
||||
}
|
||||
|
||||
namespace OpenGL::GLShader {
|
||||
|
||||
using VideoCommon::Shader::ProgramCode;
|
||||
using VideoCommon::Shader::ShaderIR;
|
||||
|
||||
/// Generates the GLSL vertex shader program source code for the given VS program
|
||||
std::string GenerateVertexShader(const Device& device, const ShaderIR& ir, const ShaderIR* ir_b);
|
||||
|
||||
/// Generates the GLSL geometry shader program source code for the given GS program
|
||||
std::string GenerateGeometryShader(const Device& device, const ShaderIR& ir);
|
||||
|
||||
/// Generates the GLSL fragment shader program source code for the given FS program
|
||||
std::string GenerateFragmentShader(const Device& device, const ShaderIR& ir);
|
||||
|
||||
/// Generates the GLSL compute shader program source code for the given CS program
|
||||
std::string GenerateComputeShader(const Device& device, const ShaderIR& ir);
|
||||
|
||||
} // namespace OpenGL::GLShader
|
|
@ -14,8 +14,9 @@ namespace VideoCommon::Shader {
|
|||
|
||||
using Tegra::Engines::SamplerDescriptor;
|
||||
|
||||
ConstBufferLocker::ConstBufferLocker(Tegra::Engines::ShaderType shader_stage)
|
||||
: stage{shader_stage} {}
|
||||
ConstBufferLocker::ConstBufferLocker(Tegra::Engines::ShaderType shader_stage,
|
||||
VideoCore::GuestDriverProfile stored_guest_driver_profile)
|
||||
: stage{shader_stage}, stored_guest_driver_profile{stored_guest_driver_profile} {}
|
||||
|
||||
ConstBufferLocker::ConstBufferLocker(Tegra::Engines::ShaderType shader_stage,
|
||||
Tegra::Engines::ConstBufferEngineInterface& engine)
|
||||
|
@ -97,7 +98,7 @@ void ConstBufferLocker::SetBoundBuffer(u32 buffer) {
|
|||
|
||||
bool ConstBufferLocker::IsConsistent() const {
|
||||
if (!engine) {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
return std::all_of(keys.begin(), keys.end(),
|
||||
[this](const auto& pair) {
|
||||
|
|
|
@ -26,7 +26,8 @@ using BindlessSamplerMap =
|
|||
*/
|
||||
class ConstBufferLocker {
|
||||
public:
|
||||
explicit ConstBufferLocker(Tegra::Engines::ShaderType shader_stage);
|
||||
explicit ConstBufferLocker(Tegra::Engines::ShaderType shader_stage,
|
||||
VideoCore::GuestDriverProfile stored_guest_driver_profile);
|
||||
|
||||
explicit ConstBufferLocker(Tegra::Engines::ShaderType shader_stage,
|
||||
Tegra::Engines::ConstBufferEngineInterface& engine);
|
||||
|
@ -83,15 +84,13 @@ public:
|
|||
}
|
||||
|
||||
/// Obtains access to the guest driver's profile.
|
||||
VideoCore::GuestDriverProfile* AccessGuestDriverProfile() const {
|
||||
if (engine) {
|
||||
return &engine->AccessGuestDriverProfile();
|
||||
}
|
||||
return nullptr;
|
||||
VideoCore::GuestDriverProfile& AccessGuestDriverProfile() {
|
||||
return engine ? engine->AccessGuestDriverProfile() : stored_guest_driver_profile;
|
||||
}
|
||||
|
||||
private:
|
||||
const Tegra::Engines::ShaderType stage;
|
||||
VideoCore::GuestDriverProfile stored_guest_driver_profile;
|
||||
Tegra::Engines::ConstBufferEngineInterface* engine = nullptr;
|
||||
KeyMap keys;
|
||||
BoundSamplerMap bound_samplers;
|
||||
|
|
|
@ -34,13 +34,9 @@ constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
|
|||
return (absolute_offset % SchedPeriod) == 0;
|
||||
}
|
||||
|
||||
void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile* gpu_driver,
|
||||
void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
|
||||
const std::list<Sampler>& used_samplers) {
|
||||
if (gpu_driver == nullptr) {
|
||||
LOG_CRITICAL(HW_GPU, "GPU driver profile has not been created yet");
|
||||
return;
|
||||
}
|
||||
if (gpu_driver->TextureHandlerSizeKnown() || used_samplers.size() <= 1) {
|
||||
if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) {
|
||||
return;
|
||||
}
|
||||
u32 count{};
|
||||
|
@ -53,17 +49,13 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile* gpu_driver,
|
|||
bound_offsets.emplace_back(sampler.GetOffset());
|
||||
}
|
||||
if (count > 1) {
|
||||
gpu_driver->DeduceTextureHandlerSize(std::move(bound_offsets));
|
||||
gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
|
||||
VideoCore::GuestDriverProfile* gpu_driver,
|
||||
VideoCore::GuestDriverProfile& gpu_driver,
|
||||
const std::list<Sampler>& used_samplers) {
|
||||
if (gpu_driver == nullptr) {
|
||||
LOG_CRITICAL(HW_GPU, "GPU Driver profile has not been created yet");
|
||||
return std::nullopt;
|
||||
}
|
||||
const u32 base_offset = sampler_to_deduce.GetOffset();
|
||||
u32 max_offset{std::numeric_limits<u32>::max()};
|
||||
for (const auto& sampler : used_samplers) {
|
||||
|
@ -77,7 +69,7 @@ std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
|
|||
if (max_offset == std::numeric_limits<u32>::max()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return ((max_offset - base_offset) * 4) / gpu_driver->GetTextureHandlerSize();
|
||||
return ((max_offset - base_offset) * 4) / gpu_driver.GetTextureHandlerSize();
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
|
|
@ -94,13 +94,10 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons
|
|||
}
|
||||
auto [gpr, base_offset] = *pair;
|
||||
const auto offset_inm = std::get_if<ImmediateNode>(&*base_offset);
|
||||
auto gpu_driver = locker.AccessGuestDriverProfile();
|
||||
if (gpu_driver == nullptr) {
|
||||
return {};
|
||||
}
|
||||
const auto& gpu_driver = locker.AccessGuestDriverProfile();
|
||||
const u32 bindless_cv = NewCustomVariable();
|
||||
const Node op = Operation(OperationCode::UDiv, NO_PRECISE, gpr,
|
||||
Immediate(gpu_driver->GetTextureHandlerSize()));
|
||||
const Node op =
|
||||
Operation(OperationCode::UDiv, gpr, Immediate(gpu_driver.GetTextureHandlerSize()));
|
||||
|
||||
const Node cv_node = GetCustomVariable(bindless_cv);
|
||||
Node amend_op = Operation(OperationCode::Assign, cv_node, std::move(op));
|
||||
|
|
Loading…
Reference in a new issue