rsx: Use strict bounds testing when replacing memory via blit engine

This commit is contained in:
kd-11 2024-11-03 04:20:26 +03:00 committed by kd-11
parent 9afebfdd72
commit 3e427c57f0

View file

@ -137,7 +137,7 @@ namespace rsx
struct intersecting_set struct intersecting_set
{ {
std::vector<section_storage_type*> sections = {}; rsx::simple_array<section_storage_type*> sections = {};
address_range invalidate_range = {}; address_range invalidate_range = {};
bool has_flushables = false; bool has_flushables = false;
}; };
@ -925,13 +925,28 @@ namespace rsx
AUDIT(fault_range_in.valid()); AUDIT(fault_range_in.valid());
address_range fault_range = fault_range_in.to_page_range(); address_range fault_range = fault_range_in.to_page_range();
const intersecting_set trampled_set = get_intersecting_set(fault_range); intersecting_set trampled_set = get_intersecting_set(fault_range);
thrashed_set result = {}; thrashed_set result = {};
result.cause = cause; result.cause = cause;
result.fault_range = fault_range; result.fault_range = fault_range;
result.invalidate_range = trampled_set.invalidate_range; result.invalidate_range = trampled_set.invalidate_range;
if (cause.use_strict_data_bounds())
{
// Drop all sections outside the actual target range. This is useful when we simply need to tag that we'll be updating some memory content on the CPU
// But we don't really care about writeback or invalidation of anything outside the update range.
if (trampled_set.sections.erase_if(FN(!x->overlaps(fault_range_in, section_bounds::full_range))))
{
trampled_set.has_flushables = trampled_set.sections.any(FN(x->is_flushable()));
}
}
if (trampled_set.sections.empty())
{
return {};
}
// Fast code-path for keeping the fault range protection when not flushing anything // Fast code-path for keeping the fault range protection when not flushing anything
if (cause.keep_fault_range_protection() && cause.skip_flush() && !trampled_set.sections.empty()) if (cause.keep_fault_range_protection() && cause.skip_flush() && !trampled_set.sections.empty())
{ {
@ -998,8 +1013,6 @@ namespace rsx
// Decide which sections to flush, unprotect, and exclude // Decide which sections to flush, unprotect, and exclude
if (!trampled_set.sections.empty())
{
update_cache_tag(); update_cache_tag();
for (auto &obj : trampled_set.sections) for (auto &obj : trampled_set.sections)
@ -1136,9 +1149,6 @@ namespace rsx
return result; return result;
} }
return {};
}
public: public:
texture_cache() : m_storage(this), m_predictor(this) {} texture_cache() : m_storage(this), m_predictor(this) {}
@ -2709,9 +2719,12 @@ namespace rsx
} }
else else
{ {
// Surface exists in local memory. // Surface exists in main memory.
use_null_region = (is_copy_op && !is_format_convert); use_null_region = (is_copy_op && !is_format_convert);
// Now we have a blit write into main memory. This really could be anything, so we need to be careful here.
// If we have a pitched write, or a suspiciously large transfer, we likely have a valid write.
// Invalidate surfaces in range. Sample tests should catch overlaps in theory. // Invalidate surfaces in range. Sample tests should catch overlaps in theory.
m_rtts.invalidate_range(utils::address_range::start_length(dst_address, dst.pitch * dst_h)); m_rtts.invalidate_range(utils::address_range::start_length(dst_address, dst.pitch * dst_h));
} }
@ -3227,7 +3240,7 @@ namespace rsx
// NOTE: Write flag set to remove all other overlapping regions (e.g shader_read or blit_src) // NOTE: Write flag set to remove all other overlapping regions (e.g shader_read or blit_src)
// NOTE: This step can potentially invalidate the newly created src image as well. // NOTE: This step can potentially invalidate the newly created src image as well.
invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::write, {}, std::forward<Args>(extras)...); invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::cause_is_write | invalidation_cause::cause_uses_strict_data_bounds, {}, std::forward<Args>(extras)...);
if (use_null_region) [[likely]] if (use_null_region) [[likely]]
{ {