mirror of
https://github.com/yuzu-emu/yuzu-android
synced 2024-11-22 04:03:35 +00:00
Merge pull request #12608 from szepeviktor/typos
Fix typos in video_core
This commit is contained in:
commit
82b58668ed
20 changed files with 56 additions and 56 deletions
|
@ -59,8 +59,8 @@ public:
|
||||||
return start_address;
|
return start_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] bool IsPropietaryDriver() const noexcept {
|
[[nodiscard]] bool IsProprietaryDriver() const noexcept {
|
||||||
return is_propietary_driver;
|
return is_proprietary_driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -68,7 +68,7 @@ protected:
|
||||||
std::array<u32, 8> gp_passthrough_mask{};
|
std::array<u32, 8> gp_passthrough_mask{};
|
||||||
Stage stage{};
|
Stage stage{};
|
||||||
u32 start_address{};
|
u32 start_address{};
|
||||||
bool is_propietary_driver{};
|
bool is_proprietary_driver{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Shader
|
} // namespace Shader
|
||||||
|
|
|
@ -1084,7 +1084,7 @@ void ConstantPropagation(Environment& env, IR::Block& block, IR::Inst& inst) {
|
||||||
if (env.HasHLEMacroState()) {
|
if (env.HasHLEMacroState()) {
|
||||||
FoldConstBuffer(env, block, inst);
|
FoldConstBuffer(env, block, inst);
|
||||||
}
|
}
|
||||||
if (env.IsPropietaryDriver()) {
|
if (env.IsProprietaryDriver()) {
|
||||||
FoldDriverConstBuffer(env, block, inst, 1);
|
FoldDriverConstBuffer(env, block, inst, 1);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -267,10 +267,10 @@ private:
|
||||||
top_tier[page_index] = GetNewManager(base_cpu_addr);
|
top_tier[page_index] = GetNewManager(base_cpu_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Manager* GetNewManager(VAddr base_cpu_addess) {
|
Manager* GetNewManager(VAddr base_cpu_address) {
|
||||||
const auto on_return = [&] {
|
const auto on_return = [&] {
|
||||||
auto* new_manager = free_managers.front();
|
auto* new_manager = free_managers.front();
|
||||||
new_manager->SetCpuAddress(base_cpu_addess);
|
new_manager->SetCpuAddress(base_cpu_address);
|
||||||
free_managers.pop_front();
|
free_managers.pop_front();
|
||||||
return new_manager;
|
return new_manager;
|
||||||
};
|
};
|
||||||
|
|
|
@ -85,12 +85,12 @@ protected:
|
||||||
std::deque<size_t> free_channel_ids;
|
std::deque<size_t> free_channel_ids;
|
||||||
std::unordered_map<s32, size_t> channel_map;
|
std::unordered_map<s32, size_t> channel_map;
|
||||||
std::vector<size_t> active_channel_ids;
|
std::vector<size_t> active_channel_ids;
|
||||||
struct AddresSpaceRef {
|
struct AddressSpaceRef {
|
||||||
size_t ref_count;
|
size_t ref_count;
|
||||||
size_t storage_id;
|
size_t storage_id;
|
||||||
Tegra::MemoryManager* gpu_memory;
|
Tegra::MemoryManager* gpu_memory;
|
||||||
};
|
};
|
||||||
std::unordered_map<size_t, AddresSpaceRef> address_spaces;
|
std::unordered_map<size_t, AddressSpaceRef> address_spaces;
|
||||||
mutable std::mutex config_mutex;
|
mutable std::mutex config_mutex;
|
||||||
|
|
||||||
virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
|
virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
|
||||||
|
|
|
@ -38,7 +38,7 @@ void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& c
|
||||||
as_it->second.ref_count++;
|
as_it->second.ref_count++;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
AddresSpaceRef new_gpu_mem_ref{
|
AddressSpaceRef new_gpu_mem_ref{
|
||||||
.ref_count = 1,
|
.ref_count = 1,
|
||||||
.storage_id = address_spaces.size(),
|
.storage_id = address_spaces.size(),
|
||||||
.gpu_memory = channel.memory_manager.get(),
|
.gpu_memory = channel.memory_manager.get(),
|
||||||
|
|
|
@ -958,7 +958,7 @@ public:
|
||||||
enum class ClearReport : u32 {
|
enum class ClearReport : u32 {
|
||||||
ZPassPixelCount = 0x01,
|
ZPassPixelCount = 0x01,
|
||||||
ZCullStats = 0x02,
|
ZCullStats = 0x02,
|
||||||
StreamingPrimitvesNeededMinusSucceeded = 0x03,
|
StreamingPrimitivesNeededMinusSucceeded = 0x03,
|
||||||
AlphaBetaClocks = 0x04,
|
AlphaBetaClocks = 0x04,
|
||||||
StreamingPrimitivesSucceeded = 0x10,
|
StreamingPrimitivesSucceeded = 0x10,
|
||||||
StreamingPrimitivesNeeded = 0x11,
|
StreamingPrimitivesNeeded = 0x11,
|
||||||
|
@ -2383,8 +2383,8 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class Release : u32 {
|
enum class Release : u32 {
|
||||||
AfterAllPreceedingReads = 0,
|
AfterAllPrecedingReads = 0,
|
||||||
AfterAllPreceedingWrites = 1,
|
AfterAllPrecedingWrites = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class Acquire : u32 {
|
enum class Acquire : u32 {
|
||||||
|
@ -2869,7 +2869,7 @@ public:
|
||||||
u32 global_base_instance_index; ///< 0x1438
|
u32 global_base_instance_index; ///< 0x1438
|
||||||
INSERT_PADDING_BYTES_NOINIT(0x14);
|
INSERT_PADDING_BYTES_NOINIT(0x14);
|
||||||
RegisterWatermarks ps_warp_watermarks; ///< 0x1450
|
RegisterWatermarks ps_warp_watermarks; ///< 0x1450
|
||||||
RegisterWatermarks ps_regster_watermarks; ///< 0x1454
|
RegisterWatermarks ps_register_watermarks; ///< 0x1454
|
||||||
INSERT_PADDING_BYTES_NOINIT(0xC);
|
INSERT_PADDING_BYTES_NOINIT(0xC);
|
||||||
u32 store_zcull; ///< 0x1464
|
u32 store_zcull; ///< 0x1464
|
||||||
INSERT_PADDING_BYTES_NOINIT(0x18);
|
INSERT_PADDING_BYTES_NOINIT(0x18);
|
||||||
|
@ -3444,7 +3444,7 @@ ASSERT_REG_POSITION(invalidate_texture_header_cache_no_wfi, 0x1428);
|
||||||
ASSERT_REG_POSITION(global_base_vertex_index, 0x1434);
|
ASSERT_REG_POSITION(global_base_vertex_index, 0x1434);
|
||||||
ASSERT_REG_POSITION(global_base_instance_index, 0x1438);
|
ASSERT_REG_POSITION(global_base_instance_index, 0x1438);
|
||||||
ASSERT_REG_POSITION(ps_warp_watermarks, 0x1450);
|
ASSERT_REG_POSITION(ps_warp_watermarks, 0x1450);
|
||||||
ASSERT_REG_POSITION(ps_regster_watermarks, 0x1454);
|
ASSERT_REG_POSITION(ps_register_watermarks, 0x1454);
|
||||||
ASSERT_REG_POSITION(store_zcull, 0x1464);
|
ASSERT_REG_POSITION(store_zcull, 0x1464);
|
||||||
ASSERT_REG_POSITION(iterated_blend_constants, 0x1480);
|
ASSERT_REG_POSITION(iterated_blend_constants, 0x1480);
|
||||||
ASSERT_REG_POSITION(load_zcull, 0x1500);
|
ASSERT_REG_POSITION(load_zcull, 0x1500);
|
||||||
|
|
|
@ -171,12 +171,12 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
|
||||||
const bool no_passthrough =
|
const bool no_passthrough =
|
||||||
src.format != dst.format || src_extent_x != dst_extent_x || src_extent_y != dst_extent_y;
|
src.format != dst.format || src_extent_x != dst_extent_x || src_extent_y != dst_extent_y;
|
||||||
|
|
||||||
const auto convertion_phase_same_format = [&]() {
|
const auto conversion_phase_same_format = [&]() {
|
||||||
NearestNeighbor(impl->src_buffer, impl->dst_buffer, src_extent_x, src_extent_y,
|
NearestNeighbor(impl->src_buffer, impl->dst_buffer, src_extent_x, src_extent_y,
|
||||||
dst_extent_x, dst_extent_y, dst_bytes_per_pixel);
|
dst_extent_x, dst_extent_y, dst_bytes_per_pixel);
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto convertion_phase_ir = [&]() {
|
const auto conversion_phase_ir = [&]() {
|
||||||
auto* input_converter = impl->converter_factory.GetFormatConverter(src.format);
|
auto* input_converter = impl->converter_factory.GetFormatConverter(src.format);
|
||||||
impl->intermediate_src.resize_destructive((src_copy_size / src_bytes_per_pixel) *
|
impl->intermediate_src.resize_destructive((src_copy_size / src_bytes_per_pixel) *
|
||||||
ir_components);
|
ir_components);
|
||||||
|
@ -211,9 +211,9 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
|
||||||
// Conversion Phase
|
// Conversion Phase
|
||||||
if (no_passthrough) {
|
if (no_passthrough) {
|
||||||
if (src.format != dst.format || config.filter == Fermi2D::Filter::Bilinear) {
|
if (src.format != dst.format || config.filter == Fermi2D::Filter::Bilinear) {
|
||||||
convertion_phase_ir();
|
conversion_phase_ir();
|
||||||
} else {
|
} else {
|
||||||
convertion_phase_same_format();
|
conversion_phase_same_format();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
impl->dst_buffer.swap(impl->src_buffer);
|
impl->dst_buffer.swap(impl->src_buffer);
|
||||||
|
|
|
@ -42,7 +42,7 @@ private:
|
||||||
u8 raw;
|
u8 raw;
|
||||||
BitField<0, 2, u8> tile_format;
|
BitField<0, 2, u8> tile_format;
|
||||||
BitField<2, 3, u8> gob_height;
|
BitField<2, 3, u8> gob_height;
|
||||||
BitField<5, 3, u8> reserverd_surface_format;
|
BitField<5, 3, u8> reserved_surface_format;
|
||||||
};
|
};
|
||||||
u8 error_conceal_on; // 1: error conceal on; 0: off
|
u8 error_conceal_on; // 1: error conceal on; 0: off
|
||||||
u32 first_part_size; // the size of first partition(frame header and mb header partition)
|
u32 first_part_size; // the size of first partition(frame header and mb header partition)
|
||||||
|
|
|
@ -803,7 +803,7 @@ void UnquantizeTexelWeights(uvec2 size, bool is_dual_plane) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint GetUnquantizedTexelWieght(uint offset_base, uint plane, bool is_dual_plane) {
|
uint GetUnquantizedTexelWeight(uint offset_base, uint plane, bool is_dual_plane) {
|
||||||
const uint offset = is_dual_plane ? 2 * offset_base + plane : offset_base;
|
const uint offset = is_dual_plane ? 2 * offset_base + plane : offset_base;
|
||||||
return result_vector[offset];
|
return result_vector[offset];
|
||||||
}
|
}
|
||||||
|
@ -833,23 +833,23 @@ uvec4 GetUnquantizedWeightVector(uint t, uint s, uvec2 size, uint plane_index, b
|
||||||
|
|
||||||
if (v0 < area) {
|
if (v0 < area) {
|
||||||
const uint offset_base = v0;
|
const uint offset_base = v0;
|
||||||
p0.x = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
|
p0.x = GetUnquantizedTexelWeight(offset_base, 0, is_dual_plane);
|
||||||
p1.x = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
|
p1.x = GetUnquantizedTexelWeight(offset_base, 1, is_dual_plane);
|
||||||
}
|
}
|
||||||
if ((v0 + 1) < (area)) {
|
if ((v0 + 1) < (area)) {
|
||||||
const uint offset_base = v0 + 1;
|
const uint offset_base = v0 + 1;
|
||||||
p0.y = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
|
p0.y = GetUnquantizedTexelWeight(offset_base, 0, is_dual_plane);
|
||||||
p1.y = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
|
p1.y = GetUnquantizedTexelWeight(offset_base, 1, is_dual_plane);
|
||||||
}
|
}
|
||||||
if ((v0 + size.x) < (area)) {
|
if ((v0 + size.x) < (area)) {
|
||||||
const uint offset_base = v0 + size.x;
|
const uint offset_base = v0 + size.x;
|
||||||
p0.z = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
|
p0.z = GetUnquantizedTexelWeight(offset_base, 0, is_dual_plane);
|
||||||
p1.z = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
|
p1.z = GetUnquantizedTexelWeight(offset_base, 1, is_dual_plane);
|
||||||
}
|
}
|
||||||
if ((v0 + size.x + 1) < (area)) {
|
if ((v0 + size.x + 1) < (area)) {
|
||||||
const uint offset_base = v0 + size.x + 1;
|
const uint offset_base = v0 + size.x + 1;
|
||||||
p0.w = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
|
p0.w = GetUnquantizedTexelWeight(offset_base, 0, is_dual_plane);
|
||||||
p1.w = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
|
p1.w = GetUnquantizedTexelWeight(offset_base, 1, is_dual_plane);
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint primary_weight = (uint(dot(p0, w)) + 8) >> 4;
|
const uint primary_weight = (uint(dot(p0, w)) + 8) >> 4;
|
||||||
|
|
|
@ -269,7 +269,7 @@ void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
query_base->value += streamer->GetAmmendValue();
|
query_base->value += streamer->GetAmendValue();
|
||||||
streamer->SetAccumulationValue(query_base->value);
|
streamer->SetAccumulationValue(query_base->value);
|
||||||
if (True(query_base->flags & QueryFlagBits::HasTimestamp)) {
|
if (True(query_base->flags & QueryFlagBits::HasTimestamp)) {
|
||||||
u64 timestamp = impl->gpu.GetTicks();
|
u64 timestamp = impl->gpu.GetTicks();
|
||||||
|
|
|
@ -78,12 +78,12 @@ public:
|
||||||
return dependence_mask;
|
return dependence_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetAmmendValue() const {
|
u64 GetAmendValue() const {
|
||||||
return ammend_value;
|
return amend_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetAccumulationValue(u64 new_value) {
|
void SetAccumulationValue(u64 new_value) {
|
||||||
acumulation_value = new_value;
|
accumulation_value = new_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -95,8 +95,8 @@ protected:
|
||||||
const size_t id;
|
const size_t id;
|
||||||
u64 dependence_mask;
|
u64 dependence_mask;
|
||||||
u64 dependent_mask;
|
u64 dependent_mask;
|
||||||
u64 ammend_value{};
|
u64 amend_value{};
|
||||||
u64 acumulation_value{};
|
u64 accumulation_value{};
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename QueryType>
|
template <typename QueryType>
|
||||||
|
|
|
@ -231,10 +231,10 @@ void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) {
|
||||||
|
|
||||||
void FixedPipelineState::DynamicState::Refresh2(const Maxwell& regs,
|
void FixedPipelineState::DynamicState::Refresh2(const Maxwell& regs,
|
||||||
Maxwell::PrimitiveTopology topology_,
|
Maxwell::PrimitiveTopology topology_,
|
||||||
bool base_feautures_supported) {
|
bool base_features_supported) {
|
||||||
logic_op.Assign(PackLogicOp(regs.logic_op.op));
|
logic_op.Assign(PackLogicOp(regs.logic_op.op));
|
||||||
|
|
||||||
if (base_feautures_supported) {
|
if (base_features_supported) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ struct FixedPipelineState {
|
||||||
|
|
||||||
void Refresh(const Maxwell& regs);
|
void Refresh(const Maxwell& regs);
|
||||||
void Refresh2(const Maxwell& regs, Maxwell::PrimitiveTopology topology,
|
void Refresh2(const Maxwell& regs, Maxwell::PrimitiveTopology topology,
|
||||||
bool base_feautures_supported);
|
bool base_features_supported);
|
||||||
void Refresh3(const Maxwell& regs);
|
void Refresh3(const Maxwell& regs);
|
||||||
|
|
||||||
Maxwell::ComparisonOp DepthTestFunc() const noexcept {
|
Maxwell::ComparisonOp DepthTestFunc() const noexcept {
|
||||||
|
|
|
@ -130,7 +130,7 @@ private:
|
||||||
vk::DescriptorPool descriptor_pool;
|
vk::DescriptorPool descriptor_pool;
|
||||||
vk::DescriptorSetLayout descriptor_set_layout;
|
vk::DescriptorSetLayout descriptor_set_layout;
|
||||||
vk::PipelineLayout pipeline_layout;
|
vk::PipelineLayout pipeline_layout;
|
||||||
vk::Pipeline nearest_neightbor_pipeline;
|
vk::Pipeline nearest_neighbor_pipeline;
|
||||||
vk::Pipeline bilinear_pipeline;
|
vk::Pipeline bilinear_pipeline;
|
||||||
vk::Pipeline bicubic_pipeline;
|
vk::Pipeline bicubic_pipeline;
|
||||||
vk::Pipeline gaussian_pipeline;
|
vk::Pipeline gaussian_pipeline;
|
||||||
|
|
|
@ -120,8 +120,8 @@ public:
|
||||||
scheduler{scheduler_}, memory_allocator{memory_allocator_} {
|
scheduler{scheduler_}, memory_allocator{memory_allocator_} {
|
||||||
current_bank = nullptr;
|
current_bank = nullptr;
|
||||||
current_query = nullptr;
|
current_query = nullptr;
|
||||||
ammend_value = 0;
|
amend_value = 0;
|
||||||
acumulation_value = 0;
|
accumulation_value = 0;
|
||||||
queries_prefix_scan_pass = std::make_unique<QueriesPrefixScanPass>(
|
queries_prefix_scan_pass = std::make_unique<QueriesPrefixScanPass>(
|
||||||
device, scheduler, descriptor_pool, compute_pass_descriptor_queue);
|
device, scheduler, descriptor_pool, compute_pass_descriptor_queue);
|
||||||
|
|
||||||
|
@ -176,8 +176,8 @@ public:
|
||||||
}
|
}
|
||||||
AbandonCurrentQuery();
|
AbandonCurrentQuery();
|
||||||
std::function<void()> func([this, counts = pending_flush_queries.size()] {
|
std::function<void()> func([this, counts = pending_flush_queries.size()] {
|
||||||
ammend_value = 0;
|
amend_value = 0;
|
||||||
acumulation_value = 0;
|
accumulation_value = 0;
|
||||||
});
|
});
|
||||||
rasterizer->SyncOperation(std::move(func));
|
rasterizer->SyncOperation(std::move(func));
|
||||||
accumulation_since_last_sync = false;
|
accumulation_since_last_sync = false;
|
||||||
|
@ -307,7 +307,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
ReplicateCurrentQueryIfNeeded();
|
ReplicateCurrentQueryIfNeeded();
|
||||||
std::function<void()> func([this] { ammend_value = acumulation_value; });
|
std::function<void()> func([this] { amend_value = accumulation_value; });
|
||||||
rasterizer->SyncOperation(std::move(func));
|
rasterizer->SyncOperation(std::move(func));
|
||||||
AbandonCurrentQuery();
|
AbandonCurrentQuery();
|
||||||
num_slots_used = 0;
|
num_slots_used = 0;
|
||||||
|
@ -512,7 +512,7 @@ private:
|
||||||
pending_flush_queries.push_back(index);
|
pending_flush_queries.push_back(index);
|
||||||
std::function<void()> func([this, index] {
|
std::function<void()> func([this, index] {
|
||||||
auto* query = GetQuery(index);
|
auto* query = GetQuery(index);
|
||||||
query->value += GetAmmendValue();
|
query->value += GetAmendValue();
|
||||||
SetAccumulationValue(query->value);
|
SetAccumulationValue(query->value);
|
||||||
Free(index);
|
Free(index);
|
||||||
});
|
});
|
||||||
|
@ -1169,7 +1169,7 @@ struct QueryCacheRuntimeImpl {
|
||||||
primitives_succeeded_streamer(
|
primitives_succeeded_streamer(
|
||||||
static_cast<size_t>(QueryType::StreamingPrimitivesSucceeded), runtime, tfb_streamer,
|
static_cast<size_t>(QueryType::StreamingPrimitivesSucceeded), runtime, tfb_streamer,
|
||||||
cpu_memory_),
|
cpu_memory_),
|
||||||
primitives_needed_minus_suceeded_streamer(
|
primitives_needed_minus_succeeded_streamer(
|
||||||
static_cast<size_t>(QueryType::StreamingPrimitivesNeededMinusSucceeded), runtime, 0u),
|
static_cast<size_t>(QueryType::StreamingPrimitivesNeededMinusSucceeded), runtime, 0u),
|
||||||
hcr_setup{}, hcr_is_set{}, is_hcr_running{}, maxwell3d{} {
|
hcr_setup{}, hcr_is_set{}, is_hcr_running{}, maxwell3d{} {
|
||||||
|
|
||||||
|
@ -1208,7 +1208,7 @@ struct QueryCacheRuntimeImpl {
|
||||||
SamplesStreamer sample_streamer;
|
SamplesStreamer sample_streamer;
|
||||||
TFBCounterStreamer tfb_streamer;
|
TFBCounterStreamer tfb_streamer;
|
||||||
PrimitivesSucceededStreamer primitives_succeeded_streamer;
|
PrimitivesSucceededStreamer primitives_succeeded_streamer;
|
||||||
VideoCommon::StubStreamer<QueryCacheParams> primitives_needed_minus_suceeded_streamer;
|
VideoCommon::StubStreamer<QueryCacheParams> primitives_needed_minus_succeeded_streamer;
|
||||||
|
|
||||||
std::vector<std::pair<VAddr, VAddr>> little_cache;
|
std::vector<std::pair<VAddr, VAddr>> little_cache;
|
||||||
std::vector<std::pair<VkBuffer, VkDeviceSize>> buffers_to_upload_to;
|
std::vector<std::pair<VkBuffer, VkDeviceSize>> buffers_to_upload_to;
|
||||||
|
@ -1433,7 +1433,7 @@ VideoCommon::StreamerInterface* QueryCacheRuntime::GetStreamerInterface(QueryTyp
|
||||||
case QueryType::StreamingPrimitivesSucceeded:
|
case QueryType::StreamingPrimitivesSucceeded:
|
||||||
return &impl->primitives_succeeded_streamer;
|
return &impl->primitives_succeeded_streamer;
|
||||||
case QueryType::StreamingPrimitivesNeededMinusSucceeded:
|
case QueryType::StreamingPrimitivesNeededMinusSucceeded:
|
||||||
return &impl->primitives_needed_minus_suceeded_streamer;
|
return &impl->primitives_needed_minus_succeeded_streamer;
|
||||||
default:
|
default:
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -236,14 +236,14 @@ void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) {
|
||||||
auto& entries = staging.entries;
|
auto& entries = staging.entries;
|
||||||
const size_t old_size = entries.size();
|
const size_t old_size = entries.size();
|
||||||
|
|
||||||
const auto is_deleteable = [this](const StagingBuffer& entry) {
|
const auto is_deletable = [this](const StagingBuffer& entry) {
|
||||||
return scheduler.IsFree(entry.tick);
|
return scheduler.IsFree(entry.tick);
|
||||||
};
|
};
|
||||||
const size_t begin_offset = staging.delete_index;
|
const size_t begin_offset = staging.delete_index;
|
||||||
const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
|
const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
|
||||||
const auto begin = entries.begin() + begin_offset;
|
const auto begin = entries.begin() + begin_offset;
|
||||||
const auto end = entries.begin() + end_offset;
|
const auto end = entries.begin() + end_offset;
|
||||||
entries.erase(std::remove_if(begin, end, is_deleteable), end);
|
entries.erase(std::remove_if(begin, end, is_deletable), end);
|
||||||
|
|
||||||
const size_t new_size = entries.size();
|
const size_t new_size = entries.size();
|
||||||
staging.delete_index += deletions_per_tick;
|
staging.delete_index += deletions_per_tick;
|
||||||
|
|
|
@ -125,7 +125,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||||
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, false, info.format);
|
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, false, info.format);
|
||||||
VkImageCreateFlags flags{};
|
VkImageCreateFlags flags{};
|
||||||
if (info.type == ImageType::e2D && info.resources.layers >= 6 &&
|
if (info.type == ImageType::e2D && info.resources.layers >= 6 &&
|
||||||
info.size.width == info.size.height && !device.HasBrokenCubeImageCompability()) {
|
info.size.width == info.size.height && !device.HasBrokenCubeImageCompatibility()) {
|
||||||
flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
|
flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
|
||||||
}
|
}
|
||||||
if (info.type == ImageType::e3D) {
|
if (info.type == ImageType::e3D) {
|
||||||
|
|
|
@ -322,7 +322,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
|
||||||
ASSERT(local_size <= std::numeric_limits<u32>::max());
|
ASSERT(local_size <= std::numeric_limits<u32>::max());
|
||||||
local_memory_size = static_cast<u32>(local_size) + sph.common3.shader_local_memory_crs_size;
|
local_memory_size = static_cast<u32>(local_size) + sph.common3.shader_local_memory_crs_size;
|
||||||
texture_bound = maxwell3d->regs.bindless_texture_const_buffer_slot;
|
texture_bound = maxwell3d->regs.bindless_texture_const_buffer_slot;
|
||||||
is_propietary_driver = texture_bound == 2;
|
is_proprietary_driver = texture_bound == 2;
|
||||||
has_hle_engine_state =
|
has_hle_engine_state =
|
||||||
maxwell3d->engine_state == Tegra::Engines::Maxwell3D::EngineHint::OnHLEMacro;
|
maxwell3d->engine_state == Tegra::Engines::Maxwell3D::EngineHint::OnHLEMacro;
|
||||||
}
|
}
|
||||||
|
@ -404,7 +404,7 @@ ComputeEnvironment::ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_com
|
||||||
stage = Shader::Stage::Compute;
|
stage = Shader::Stage::Compute;
|
||||||
local_memory_size = qmd.local_pos_alloc + qmd.local_crs_alloc;
|
local_memory_size = qmd.local_pos_alloc + qmd.local_crs_alloc;
|
||||||
texture_bound = kepler_compute->regs.tex_cb_index;
|
texture_bound = kepler_compute->regs.tex_cb_index;
|
||||||
is_propietary_driver = texture_bound == 2;
|
is_proprietary_driver = texture_bound == 2;
|
||||||
shared_memory_size = qmd.shared_alloc;
|
shared_memory_size = qmd.shared_alloc;
|
||||||
workgroup_size = {qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z};
|
workgroup_size = {qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z};
|
||||||
}
|
}
|
||||||
|
@ -509,7 +509,7 @@ void FileEnvironment::Deserialize(std::ifstream& file) {
|
||||||
file.read(reinterpret_cast<char*>(&gp_passthrough_mask), sizeof(gp_passthrough_mask));
|
file.read(reinterpret_cast<char*>(&gp_passthrough_mask), sizeof(gp_passthrough_mask));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
is_propietary_driver = texture_bound == 2;
|
is_proprietary_driver = texture_bound == 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileEnvironment::Dump(u64 pipeline_hash, u64 shader_hash) {
|
void FileEnvironment::Dump(u64 pipeline_hash, u64 shader_hash) {
|
||||||
|
|
|
@ -596,7 +596,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true when the device does not properly support cube compatibility.
|
/// Returns true when the device does not properly support cube compatibility.
|
||||||
bool HasBrokenCubeImageCompability() const {
|
bool HasBrokenCubeImageCompatibility() const {
|
||||||
return has_broken_cube_compatibility;
|
return has_broken_cube_compatibility;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ struct Range {
|
||||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePreferedVmaFlags(MemoryUsage usage) {
|
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePreferredVmaFlags(MemoryUsage usage) {
|
||||||
return usage != MemoryUsage::DeviceLocal ? VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
return usage != MemoryUsage::DeviceLocal ? VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
||||||
: VkMemoryPropertyFlagBits{};
|
: VkMemoryPropertyFlagBits{};
|
||||||
}
|
}
|
||||||
|
@ -256,7 +256,7 @@ vk::Buffer MemoryAllocator::CreateBuffer(const VkBufferCreateInfo& ci, MemoryUsa
|
||||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage),
|
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage),
|
||||||
.usage = MemoryUsageVma(usage),
|
.usage = MemoryUsageVma(usage),
|
||||||
.requiredFlags = 0,
|
.requiredFlags = 0,
|
||||||
.preferredFlags = MemoryUsagePreferedVmaFlags(usage),
|
.preferredFlags = MemoryUsagePreferredVmaFlags(usage),
|
||||||
.memoryTypeBits = usage == MemoryUsage::Stream ? 0u : valid_memory_types,
|
.memoryTypeBits = usage == MemoryUsage::Stream ? 0u : valid_memory_types,
|
||||||
.pool = VK_NULL_HANDLE,
|
.pool = VK_NULL_HANDLE,
|
||||||
.pUserData = nullptr,
|
.pUserData = nullptr,
|
||||||
|
|
Loading…
Reference in a new issue