diff --git a/3rdparty/curl/curl b/3rdparty/curl/curl
index 1c31498817..4dacb79fcd 160000
--- a/3rdparty/curl/curl
+++ b/3rdparty/curl/curl
@@ -1 +1 @@
-Subproject commit 1c3149881769e7bd79b072e48374e4c2b3678b2f
+Subproject commit 4dacb79fcdd9364c1083e06f6a011d797a344f47
diff --git a/3rdparty/curl/libcurl.vcxproj b/3rdparty/curl/libcurl.vcxproj
index 6c8cd23ffb..c4a96abdc4 100644
--- a/3rdparty/curl/libcurl.vcxproj
+++ b/3rdparty/curl/libcurl.vcxproj
@@ -62,8 +62,8 @@
-
-
+
+
@@ -77,6 +77,17 @@
+
+
+
+
+
+
+
+
+
+
+
@@ -85,7 +96,6 @@
-
@@ -99,12 +109,12 @@
-
+
@@ -115,14 +125,11 @@
-
-
-
@@ -138,7 +145,6 @@
-
@@ -152,7 +158,6 @@
-
@@ -182,18 +187,18 @@
-
-
-
+
+
+
+
-
@@ -203,7 +208,6 @@
-
@@ -266,9 +270,19 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
-
@@ -283,7 +297,6 @@
-
@@ -300,12 +313,12 @@
-
+
@@ -316,7 +329,6 @@
-
@@ -334,7 +346,6 @@
-
@@ -344,7 +355,6 @@
-
@@ -374,19 +384,19 @@
-
-
-
+
+
+
+
-
@@ -399,7 +409,6 @@
-
diff --git a/3rdparty/curl/libcurl.vcxproj.filters b/3rdparty/curl/libcurl.vcxproj.filters
index 15fe93ce39..32eb05f40e 100644
--- a/3rdparty/curl/libcurl.vcxproj.filters
+++ b/3rdparty/curl/libcurl.vcxproj.filters
@@ -24,12 +24,6 @@
Source Files
-
- Source Files
-
-
- Source Files
-
Source Files
@@ -69,9 +63,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -132,9 +123,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -144,9 +132,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -177,9 +162,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -210,9 +192,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -300,9 +279,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -315,9 +291,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -396,15 +369,9 @@
Source Files
-
- Source Files
-
Source Files
-
- Source Files
-
Source Files
@@ -438,9 +405,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -504,9 +468,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -525,9 +486,6 @@
Source Files
-
- Source Files
-
Source Files
@@ -537,6 +495,60 @@
Source Files
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
@@ -608,15 +620,9 @@
Header Files
-
- Header Files
-
Header Files
-
- Header Files
-
Header Files
@@ -656,9 +662,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -758,9 +761,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -779,9 +779,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -875,9 +872,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -890,9 +884,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -935,18 +926,12 @@
Header Files
-
- Header Files
-
Header Files
Header Files
-
- Header Files
-
Header Files
@@ -977,9 +962,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -1052,9 +1034,6 @@
Header Files
-
- Header Files
-
Header Files
@@ -1076,15 +1055,63 @@
Header Files
-
- Header Files
-
Header Files
Header Files
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
diff --git a/Utilities/address_range.h b/Utilities/address_range.h
index 71fbd4a382..8da0279ef1 100644
--- a/Utilities/address_range.h
+++ b/Utilities/address_range.h
@@ -8,125 +8,134 @@
namespace utils
{
+ template
class address_range_vector;
/**
* Helpers
*/
- static inline u32 page_start(u32 addr)
+ template
+ T page_start(T addr)
{
- return addr & ~(get_page_size() - 1);
+ return addr & ~static_cast(get_page_size() - 1);
}
- static inline u32 next_page(u32 addr)
+ template
+ static inline T next_page(T addr)
{
- return page_start(addr) + get_page_size();
+ return page_start(addr) + static_cast(get_page_size());
}
- static inline u32 page_end(u32 addr)
+ template
+ static inline T page_end(T addr)
{
return next_page(addr) - 1;
}
- static inline u32 is_page_aligned(u32 val)
+ template
+ static inline T is_page_aligned(T val)
{
- return (val & (get_page_size() - 1)) == 0;
+ return (val & static_cast(get_page_size() - 1)) == 0;
}
/**
* Address Range utility class
*/
+ template
class address_range
{
public:
- u32 start = umax; // First address in range
- u32 end = 0; // Last address
+ T start = umax; // First address in range
+ T end = 0; // Last address
+
+ using signed_type_t = std::make_signed::type;
private:
// Helper constexprs
- static constexpr inline bool range_overlaps(u32 start1, u32 end1, u32 start2, u32 end2)
+ static constexpr inline bool range_overlaps(T start1, T end1, T start2, T end2)
{
return (start1 <= end2 && start2 <= end1);
}
- static constexpr inline bool address_overlaps(u32 address, u32 start, u32 end)
+ static constexpr inline bool address_overlaps(T address, T start, T end)
{
return (start <= address && address <= end);
}
- static constexpr inline bool range_inside_range(u32 start1, u32 end1, u32 start2, u32 end2)
+ static constexpr inline bool range_inside_range(T start1, T end1, T start2, T end2)
{
return (start1 >= start2 && end1 <= end2);
}
- constexpr address_range(u32 _start, u32 _end) : start(_start), end(_end) {}
+ constexpr address_range(T _start, T _end) : start(_start), end(_end) {}
public:
// Constructors
constexpr address_range() = default;
- static constexpr address_range start_length(u32 _start, u32 _length)
+ static constexpr address_range start_length(T _start, T _length)
{
if (!_length)
{
return {};
}
- return {_start, _start + (_length - 1)};
+ const T _end = static_cast(_start + _length - 1);
+ return {_start, _end};
}
- static constexpr address_range start_end(u32 _start, u32 _end)
+ static constexpr address_range start_end(T _start, T _end)
{
return {_start, _end};
}
// Length
- u32 length() const
+ T length() const
{
AUDIT(valid());
return end - start + 1;
}
- void set_length(const u32 new_length)
+ void set_length(const T new_length)
{
end = start + new_length - 1;
ensure(valid());
}
- u32 next_address() const
+ T next_address() const
{
return end + 1;
}
- u32 prev_address() const
+ T prev_address() const
{
return start - 1;
}
// Overlapping checks
- bool overlaps(const address_range &other) const
+ bool overlaps(const address_range& other) const
{
AUDIT(valid() && other.valid());
return range_overlaps(start, end, other.start, other.end);
}
- bool overlaps(const u32 addr) const
+ bool overlaps(const T addr) const
{
AUDIT(valid());
return address_overlaps(addr, start, end);
}
- bool inside(const address_range &other) const
+ bool inside(const address_range& other) const
{
AUDIT(valid() && other.valid());
return range_inside_range(start, end, other.start, other.end);
}
- inline bool inside(const address_range_vector &vec) const;
- inline bool overlaps(const address_range_vector &vec) const;
+ inline bool inside(const address_range_vector& vec) const;
+ inline bool overlaps(const address_range_vector& vec) const;
- bool touches(const address_range &other) const
+ bool touches(const address_range& other) const
{
AUDIT(valid() && other.valid());
// returns true if there is overlap, or if sections are side-by-side
@@ -134,7 +143,7 @@ namespace utils
}
// Utilities
- s32 signed_distance(const address_range &other) const
+ signed_type_t signed_distance(const address_range& other) const
{
if (touches(other))
{
@@ -144,15 +153,15 @@ namespace utils
// other after this
if (other.start > end)
{
- return static_cast(other.start - end - 1);
+ return static_cast(other.start - end - 1);
}
// this after other
AUDIT(start > other.end);
- return -static_cast(start - other.end - 1);
+ return -static_cast(start - other.end - 1);
}
- u32 distance(const address_range &other) const
+ T distance(const address_range& other) const
{
if (touches(other))
{
@@ -170,7 +179,7 @@ namespace utils
return (start - other.end - 1);
}
- address_range get_min_max(const address_range &other) const
+ address_range get_min_max(const address_range& other) const
{
return {
std::min(valid() ? start : umax, other.valid() ? other.start : umax),
@@ -178,7 +187,7 @@ namespace utils
};
}
- void set_min_max(const address_range &other)
+ void set_min_max(const address_range& other)
{
*this = get_min_max(other);
}
@@ -188,7 +197,7 @@ namespace utils
return (valid() && is_page_aligned(start) && is_page_aligned(length()));
}
- address_range to_page_range() const
+ address_range to_page_range() const
{
AUDIT(valid());
return { page_start(start), page_end(end) };
@@ -202,7 +211,7 @@ namespace utils
AUDIT(is_page_range());
}
- address_range get_intersect(const address_range &clamp) const
+ address_range get_intersect(const address_range& clamp) const
{
if (!valid() || !clamp.valid())
{
@@ -212,7 +221,7 @@ namespace utils
return { std::max(start, clamp.start), std::min(end, clamp.end) };
}
- void intersect(const address_range &clamp)
+ void intersect(const address_range& clamp)
{
if (!clamp.valid())
{
@@ -238,7 +247,7 @@ namespace utils
}
// Comparison Operators
- bool operator ==(const address_range& other) const
+ bool operator ==(const address_range& other) const
{
return (start == other.start && end == other.end);
}
@@ -252,21 +261,27 @@ namespace utils
}
};
- static inline address_range page_for(u32 addr)
+ using address_range16 = address_range;
+ using address_range32 = address_range;
+ using address_range64 = address_range;
+
+ template
+ static inline address_range page_for(T addr)
{
- return address_range::start_end(page_start(addr), page_end(addr));
+ return address_range::start_end(page_start(addr), page_end(addr));
}
/**
* Address Range Vector utility class
*
- * Collection of address_range objects. Allows for merging and removing ranges from the set.
+ * Collection of address_range objects. Allows for merging and removing ranges from the set.
*/
+ template
class address_range_vector
{
public:
- using vector_type = std::vector;
+ using vector_type = std::vector>;
using iterator = vector_type::iterator;
using const_iterator = vector_type::const_iterator;
using size_type = vector_type::size_type;
@@ -280,8 +295,8 @@ namespace utils
inline void clear() { data.clear(); }
inline size_type size() const { return data.size(); }
inline bool empty() const { return data.empty(); }
- inline address_range& operator[](size_type n) { return data[n]; }
- inline const address_range& operator[](size_type n) const { return data[n]; }
+ inline address_range& operator[](size_type n) { return data[n]; }
+ inline const address_range& operator[](size_type n) const { return data[n]; }
inline iterator begin() { return data.begin(); }
inline const_iterator begin() const { return data.begin(); }
inline iterator end() { return data.end(); }
@@ -289,7 +304,7 @@ namespace utils
// Search for ranges that touch new_range. If found, merge instead of adding new_range.
// When adding a new range, re-use invalid ranges whenever possible
- void merge(const address_range &new_range)
+ void merge(const address_range& new_range)
{
// Note the case where we have
// AAAA BBBB
@@ -301,8 +316,8 @@ namespace utils
return;
}
- address_range *found = nullptr;
- address_range *invalid = nullptr;
+ address_range *found = nullptr;
+ address_range *invalid = nullptr;
for (auto &existing : data)
{
@@ -347,22 +362,22 @@ namespace utils
AUDIT(check_consistency());
}
- void merge(const address_range_vector &other)
+ void merge(const address_range_vector& other)
{
- for (const address_range &new_range : other)
+ for (const address_range& new_range : other)
{
merge(new_range);
}
}
// Exclude a given range from data
- void exclude(const address_range &exclusion)
+ void exclude(const address_range& exclusion)
{
// Note the case where we have
// AAAAAAA
// EEE
// where data={A} and exclusion=E.
- // In this case, we need to reduce A to the head (before E starts), and then create a new address_range B for the tail (after E ends), i.e.
+ // In this case, we need to reduce A to the head (before E starts), and then create a new address_range B for the tail (after E ends), i.e.
// AA BB
// EEE
@@ -371,13 +386,13 @@ namespace utils
return;
}
- address_range *invalid = nullptr; // try to re-use an invalid range instead of calling push_back
+ address_range *invalid = nullptr; // try to re-use an invalid range instead of calling push_back
// We use index access because we might have to push_back within the loop, which could invalidate the iterators
size_type _size = data.size();
for (size_type n = 0; n < _size; ++n)
{
- address_range &existing = data[n];
+ address_range& existing = data[n];
if (!existing.valid())
{
@@ -430,7 +445,7 @@ namespace utils
else
{
// IMPORTANT: adding to data invalidates "existing". This must be done last!
- data.push_back(address_range::start_end(exclusion.next_address(), tail_end));
+ data.push_back(address_range::start_end(exclusion.next_address(), tail_end));
}
}
}
@@ -438,9 +453,9 @@ namespace utils
AUDIT(!overlaps(exclusion));
}
- void exclude(const address_range_vector &other)
+ void exclude(const address_range_vector& other)
{
- for (const address_range &exclusion : other)
+ for (const address_range& exclusion : other)
{
exclude(exclusion);
}
@@ -478,25 +493,25 @@ namespace utils
}
// Test for overlap with a given range
- bool overlaps(const address_range &range) const
+ bool overlaps(const address_range& range) const
{
- return std::any_of(data.cbegin(), data.cend(), [&range](const address_range& cur)
+ return std::any_of(data.cbegin(), data.cend(), [&range](const address_range& cur)
{
return cur.valid() && cur.overlaps(range);
});
}
- // Test for overlap with a given address_range vector
- bool overlaps(const address_range_vector &other) const
+ // Test for overlap with a given address_range vector
+ bool overlaps(const address_range_vector& other) const
{
- for (const address_range &rng1 : data)
+ for (const address_range& rng1 : data)
{
if (!rng1.valid())
{
continue;
}
- for (const address_range &rng2 : other.data)
+ for (const address_range& rng2 : other.data)
{
if (!rng2.valid())
{
@@ -513,36 +528,56 @@ namespace utils
}
// Test if a given range is fully contained inside this vector
- bool contains(const address_range &range) const
+ bool contains(const address_range& range) const
{
- return std::any_of(this->begin(), this->end(), [&range](const address_range& cur)
+ return std::any_of(this->begin(), this->end(), [&range](const address_range& cur)
{
return cur.valid() && cur.inside(range);
});
}
// Test if all ranges in this vector are full contained inside a specific range
- bool inside(const address_range &range) const
+ bool inside(const address_range& range) const
{
- return std::all_of(this->begin(), this->end(), [&range](const address_range& cur)
+ return std::all_of(this->begin(), this->end(), [&range](const address_range& cur)
{
return !cur.valid() || cur.inside(range);
});
}
+
+ // Count valid entries
+ usz valid_count() const
+ {
+ usz count = 0;
+ for (const auto& e : data)
+ {
+ if (e.valid())
+ {
+ count++;
+ }
+ }
+ return count;
+ }
};
// These declarations must be done after address_range_vector has been defined
- bool address_range::inside(const address_range_vector &vec) const
+ template
+ bool address_range::inside(const address_range_vector& vec) const
{
return vec.contains(*this);
}
- bool address_range::overlaps(const address_range_vector &vec) const
+ template
+ bool address_range::overlaps(const address_range_vector& vec) const
{
return vec.overlaps(*this);
}
+ using address_range_vector16 = address_range_vector;
+ using address_range_vector32 = address_range_vector;
+ using address_range_vector64 = address_range_vector;
+
} // namespace utils
@@ -551,9 +586,9 @@ namespace std
static_assert(sizeof(usz) >= 2 * sizeof(u32), "usz must be at least twice the size of u32");
template <>
- struct hash
+ struct hash
{
- usz operator()(const utils::address_range& k) const
+ usz operator()(const utils::address_range32& k) const
{
// we can guarantee a unique hash since our type is 64 bits and usz as well
return (usz{ k.start } << 32) | usz{ k.end };
diff --git a/rpcs3/CMakeLists.txt b/rpcs3/CMakeLists.txt
index 727bee8cae..395ea52e87 100644
--- a/rpcs3/CMakeLists.txt
+++ b/rpcs3/CMakeLists.txt
@@ -200,6 +200,7 @@ if(BUILD_RPCS3_TESTS)
tests/test.cpp
tests/test_fmt.cpp
tests/test_simple_array.cpp
+ tests/test_address_range.cpp
)
target_link_libraries(rpcs3_test
diff --git a/rpcs3/Emu/Cell/PPUModule.cpp b/rpcs3/Emu/Cell/PPUModule.cpp
index eca1177768..65ab0fe18d 100644
--- a/rpcs3/Emu/Cell/PPUModule.cpp
+++ b/rpcs3/Emu/Cell/PPUModule.cpp
@@ -2088,7 +2088,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
{
if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz)
{
- using addr_range = utils::address_range;
+ using addr_range = utils::address_range32;
const addr_range r = addr_range::start_length(static_cast(prog.p_vaddr), static_cast(prog.p_memsz));
@@ -2852,7 +2852,7 @@ std::pair, CellError> ppu_load_overlay(const ppu_exec_ob
{
if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz)
{
- using addr_range = utils::address_range;
+ using addr_range = utils::address_range32;
const addr_range r = addr_range::start_length(::narrow(prog.p_vaddr), ::narrow(prog.p_memsz));
diff --git a/rpcs3/Emu/Cell/SPUCommonRecompiler.cpp b/rpcs3/Emu/Cell/SPUCommonRecompiler.cpp
index d73efc8e84..41e81ac4e2 100644
--- a/rpcs3/Emu/Cell/SPUCommonRecompiler.cpp
+++ b/rpcs3/Emu/Cell/SPUCommonRecompiler.cpp
@@ -8314,7 +8314,7 @@ void spu_recompiler_base::add_pattern(bool fill_all, inst_attr attr, u32 start,
end = start;
}
- m_patterns[start] = pattern_info{utils::address_range::start_end(start, end)};
+ m_patterns[start] = pattern_info{utils::address_range32::start_end(start, end)};
for (u32 i = start; i <= (fill_all ? end : start); i += 4)
{
diff --git a/rpcs3/Emu/Cell/SPULLVMRecompiler.cpp b/rpcs3/Emu/Cell/SPULLVMRecompiler.cpp
index 0df3e3a854..cc2fea2d38 100644
--- a/rpcs3/Emu/Cell/SPULLVMRecompiler.cpp
+++ b/rpcs3/Emu/Cell/SPULLVMRecompiler.cpp
@@ -1083,7 +1083,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
m_ir->SetInsertPoint(_body);
}
- void putllc16_pattern(const spu_program& /*prog*/, utils::address_range range)
+ void putllc16_pattern(const spu_program& /*prog*/, utils::address_range32 range)
{
// Prevent store elimination
m_block->store_context_ctr[s_reg_mfc_eal]++;
@@ -1376,7 +1376,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
m_ir->SetInsertPoint(_final);
}
- void putllc0_pattern(const spu_program& /*prog*/, utils::address_range /*range*/)
+ void putllc0_pattern(const spu_program& /*prog*/, utils::address_range32 /*range*/)
{
// Prevent store elimination
m_block->store_context_ctr[s_reg_mfc_eal]++;
diff --git a/rpcs3/Emu/Cell/SPURecompiler.h b/rpcs3/Emu/Cell/SPURecompiler.h
index 03b69583d8..ddee888b1e 100644
--- a/rpcs3/Emu/Cell/SPURecompiler.h
+++ b/rpcs3/Emu/Cell/SPURecompiler.h
@@ -397,7 +397,7 @@ protected:
struct pattern_info
{
- utils::address_range range;
+ utils::address_range32 range;
};
std::unordered_map m_patterns;
diff --git a/rpcs3/Emu/Cell/lv2/sys_rsx.cpp b/rpcs3/Emu/Cell/lv2/sys_rsx.cpp
index 0c912490bd..4ef4e04d85 100644
--- a/rpcs3/Emu/Cell/lv2/sys_rsx.cpp
+++ b/rpcs3/Emu/Cell/lv2/sys_rsx.cpp
@@ -716,7 +716,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
//const u32 bank = (((a4 >> 32) & 0xFFFFFFFF) >> 4) & 0xF;
const bool bound = ((a4 >> 32) & 0x3) != 0;
- const auto range = utils::address_range::start_length(offset, size);
+ const auto range = utils::address_range32::start_length(offset, size);
if (bound)
{
@@ -800,7 +800,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
if (bound)
{
- const auto cull_range = utils::address_range::start_length(cullStart, width * height);
+ const auto cull_range = utils::address_range32::start_length(cullStart, width * height);
// cullStart is an offset inside ZCULL RAM which is 3MB long, check bounds
// width and height are not allowed to be zero (checked by range.valid())
diff --git a/rpcs3/Emu/Memory/vm.cpp b/rpcs3/Emu/Memory/vm.cpp
index 891f0910c1..d7109bf78a 100644
--- a/rpcs3/Emu/Memory/vm.cpp
+++ b/rpcs3/Emu/Memory/vm.cpp
@@ -342,7 +342,7 @@ namespace vm
utils::prefetch_read(g_range_lock_set + 2);
utils::prefetch_read(g_range_lock_set + 4);
- const auto range = utils::address_range::start_length(addr, size);
+ const auto range = utils::address_range32::start_length(addr, size);
u64 to_clear = get_range_lock_bits(false).load();
@@ -350,7 +350,7 @@ namespace vm
{
to_clear = for_all_range_locks(to_clear, [&](u32 addr2, u32 size2)
{
- if (range.overlaps(utils::address_range::start_length(addr2, size2))) [[unlikely]]
+ if (range.overlaps(utils::address_range32::start_length(addr2, size2))) [[unlikely]]
{
return 1;
}
@@ -1816,7 +1816,7 @@ namespace vm
static bool _test_map(u32 addr, u32 size)
{
- const auto range = utils::address_range::start_length(addr, size);
+ const auto range = utils::address_range32::start_length(addr, size);
if (!range.valid())
{
@@ -1830,7 +1830,7 @@ namespace vm
continue;
}
- if (range.overlaps(utils::address_range::start_length(block->addr, block->size)))
+ if (range.overlaps(utils::address_range32::start_length(block->addr, block->size)))
{
return false;
}
diff --git a/rpcs3/Emu/Memory/vm.h b/rpcs3/Emu/Memory/vm.h
index 2b2eccc24d..3c39d0aa4a 100644
--- a/rpcs3/Emu/Memory/vm.h
+++ b/rpcs3/Emu/Memory/vm.h
@@ -22,7 +22,9 @@ void ppubreak(ppu_thread& ppu);
namespace utils
{
class shm;
- class address_range;
+
+ template class address_range;
+ using address_range32 = address_range;
}
namespace vm
diff --git a/rpcs3/Emu/RSX/Common/ranged_map.hpp b/rpcs3/Emu/RSX/Common/ranged_map.hpp
index 8103e36972..cd728ad48a 100644
--- a/rpcs3/Emu/RSX/Common/ranged_map.hpp
+++ b/rpcs3/Emu/RSX/Common/ranged_map.hpp
@@ -36,7 +36,7 @@ namespace rsx
return block_id * BlockSize;
}
- void broadcast_insert(const utils::address_range& range)
+ void broadcast_insert(const utils::address_range32& range)
{
const auto head_block = block_for(range.start);
for (auto meta = &m_metadata[head_block]; meta <= &m_metadata[block_for(range.end)]; ++meta)
@@ -98,7 +98,7 @@ namespace rsx
m_it = where;
}
- void begin_range(const utils::address_range& range)
+ void begin_range(const utils::address_range32& range)
{
const auto start_block_id = range.start / BlockSize;
const auto& metadata = m_metadata_ptr[start_block_id];
@@ -177,7 +177,7 @@ namespace rsx
std::for_each(m_metadata.begin(), m_metadata.end(), [&](auto& meta) { meta.id = static_cast(&meta - m_metadata.data()); });
}
- void emplace(const utils::address_range& range, T&& value)
+ void emplace(const utils::address_range32& range, T&& value)
{
broadcast_insert(range);
m_data[block_for(range.start)].insert_or_assign(range.start, std::forward(value));
@@ -220,7 +220,7 @@ namespace rsx
m_data[block_for(address)].erase(address);
}
- iterator begin_range(const utils::address_range& range)
+ iterator begin_range(const utils::address_range32& range)
{
iterator ret = { this };
ret.begin_range(range);
diff --git a/rpcs3/Emu/RSX/Common/surface_cache_dma.hpp b/rpcs3/Emu/RSX/Common/surface_cache_dma.hpp
index a4aa58b3e3..02c507fde2 100644
--- a/rpcs3/Emu/RSX/Common/surface_cache_dma.hpp
+++ b/rpcs3/Emu/RSX/Common/surface_cache_dma.hpp
@@ -49,7 +49,7 @@ namespace rsx
}
}
- surface_cache_dma& with_range(command_list_type cmd, const utils::address_range& range)
+ surface_cache_dma& with_range(command_list_type cmd, const utils::address_range32& range)
{
// Prepare underlying memory so that the range specified is provisioned and contiguous
// 1. Check if we have a pre-existing bo layer
@@ -57,7 +57,7 @@ namespace rsx
if (this_entry)
{
const auto bo = this_entry.get();
- const auto buffer_range = utils::address_range::start_length(bo.base_address, ::size32(*bo));
+ const auto buffer_range = utils::address_range32::start_length(bo.base_address, ::size32(*bo));
if (range.inside(buffer_range))
{
@@ -94,11 +94,11 @@ namespace rsx
return *this;
}
- utils::address_range to_block_range(const utils::address_range& range)
+ utils::address_range32 to_block_range(const utils::address_range32& range)
{
u32 start = block_address(block_for(range.start));
u32 end = block_address(block_for(range.end + BlockSize - 1));
- return utils::address_range::start_end(start, end - 1);
+ return utils::address_range32::start_end(start, end - 1);
}
std::tuple get(u32 address)
@@ -107,7 +107,7 @@ namespace rsx
return { block.get(), block.base_address - address };
}
- void touch(const utils::address_range& range)
+ void touch(const utils::address_range32& range)
{
const u64 stamp = rsx::get_shared_tag();
for (usz i = block_for(range.start); i <= block_for(range.end); i++)
diff --git a/rpcs3/Emu/RSX/Common/surface_store.h b/rpcs3/Emu/RSX/Common/surface_store.h
index 9b266a54ed..ce1e9083e9 100644
--- a/rpcs3/Emu/RSX/Common/surface_store.h
+++ b/rpcs3/Emu/RSX/Common/surface_store.h
@@ -52,8 +52,8 @@ namespace rsx
surface_ranged_map m_render_targets_storage = {};
surface_ranged_map m_depth_stencil_storage = {};
- rsx::address_range m_render_targets_memory_range;
- rsx::address_range m_depth_stencil_memory_range;
+ rsx::address_range32 m_render_targets_memory_range;
+ rsx::address_range32 m_depth_stencil_memory_range;
surface_cache_dma_map m_dma_block;
@@ -244,7 +244,7 @@ namespace rsx
template
void intersect_surface_region(command_list_type cmd, u32 address, surface_type new_surface, surface_type prev_surface)
{
- auto scan_list = [&new_surface, address](const rsx::address_range& mem_range,
+ auto scan_list = [&new_surface, address](const rsx::address_range32& mem_range,
surface_ranged_map& data) -> std::vector>
{
std::vector> result;
@@ -277,7 +277,7 @@ namespace rsx
}
// Range check
- const rsx::address_range this_range = surface->get_memory_range();
+ const rsx::address_range32 this_range = surface->get_memory_range();
if (!this_range.overlaps(mem_range))
{
continue;
@@ -290,7 +290,7 @@ namespace rsx
return result;
};
- const rsx::address_range mem_range = new_surface->get_memory_range();
+ const rsx::address_range32 mem_range = new_surface->get_memory_range();
auto list1 = scan_list(mem_range, m_render_targets_storage);
auto list2 = scan_list(mem_range, m_depth_stencil_storage);
@@ -410,7 +410,7 @@ namespace rsx
// Workaround. Preserve new surface tag value because pitch convert is unimplemented
u64 new_content_tag = 0;
- address_range* storage_bounds;
+ address_range32* storage_bounds;
surface_ranged_map* primary_storage;
surface_ranged_map* secondary_storage;
if constexpr (depth)
@@ -488,7 +488,7 @@ namespace rsx
{
// Range test
const auto aa_factor_v = get_aa_factor_v(antialias);
- rsx::address_range range = rsx::address_range::start_length(address, static_cast(pitch * height * aa_factor_v));
+ rsx::address_range32 range = rsx::address_range32::start_length(address, static_cast(pitch * height * aa_factor_v));
*storage_bounds = range.get_min_max(*storage_bounds);
// Search invalidated resources for a suitable surface
@@ -629,10 +629,10 @@ namespace rsx
invalidated_resources.push_back(std::move(storage));
}
- int remove_duplicates_fast_impl(std::vector& sections, const rsx::address_range& range)
+ int remove_duplicates_fast_impl(std::vector& sections, const rsx::address_range32& range)
{
// Range tests to check for gaps
- std::list m_ranges;
+ std::list m_ranges;
bool invalidate_sections = false;
int removed_count = 0;
@@ -696,7 +696,7 @@ namespace rsx
return removed_count;
}
- void remove_duplicates_fallback_impl(std::vector& sections, const rsx::address_range& range)
+ void remove_duplicates_fallback_impl(std::vector& sections, const rsx::address_range32& range)
{
// Originally used to debug crashes but this function breaks often enough that I'll leave the checks in for now.
// Safe to remove after some time if no asserts are reported.
@@ -866,10 +866,10 @@ namespace rsx
}
std::tuple, std::vector>
- find_overlapping_set(const utils::address_range& range) const
+ find_overlapping_set(const utils::address_range32& range) const
{
std::vector color_result, depth_result;
- utils::address_range result_range;
+ utils::address_range32 result_range;
if (m_render_targets_memory_range.valid() &&
range.overlaps(m_render_targets_memory_range))
@@ -904,7 +904,7 @@ namespace rsx
void write_to_dma_buffers(
command_list_type command_list,
- const utils::address_range& range)
+ const utils::address_range32& range)
{
auto block_range = m_dma_block.to_block_range(range);
auto [color_data, depth_stencil_data] = find_overlapping_set(block_range);
@@ -1102,7 +1102,7 @@ namespace rsx
return {};
}
- const auto test_range = utils::address_range::start_length(texaddr, (required_pitch * required_height) - (required_pitch - surface_internal_pitch));
+ const auto test_range = utils::address_range32::start_length(texaddr, (required_pitch * required_height) - (required_pitch - surface_internal_pitch));
auto process_list_function = [&](surface_ranged_map& data, bool is_depth)
{
@@ -1237,7 +1237,7 @@ namespace rsx
void check_for_duplicates(std::vector& sections)
{
- utils::address_range test_range;
+ utils::address_range32 test_range;
for (const auto& section : sections)
{
const auto range = section.surface->get_memory_range();
@@ -1294,7 +1294,7 @@ namespace rsx
void invalidate_all()
{
// Unbind and invalidate all resources
- auto free_resource_list = [&](auto &data, const utils::address_range& range)
+ auto free_resource_list = [&](auto &data, const utils::address_range32& range)
{
for (auto it = data.begin_range(range); it != data.end(); ++it)
{
@@ -1317,7 +1317,7 @@ namespace rsx
}
}
- void invalidate_range(const rsx::address_range& range)
+ void invalidate_range(const rsx::address_range32& range)
{
for (auto it = m_render_targets_storage.begin_range(range); it != m_render_targets_storage.end(); ++it)
{
@@ -1383,7 +1383,7 @@ namespace rsx
void collapse_dirty_surfaces(command_list_type cmd, problem_severity severity)
{
- auto process_list_function = [&](surface_ranged_map& data, const utils::address_range& range)
+ auto process_list_function = [&](surface_ranged_map& data, const utils::address_range32& range)
{
for (auto It = data.begin_range(range); It != data.end();)
{
diff --git a/rpcs3/Emu/RSX/Common/surface_utils.h b/rpcs3/Emu/RSX/Common/surface_utils.h
index 03c5b5e6f2..bf7dee2db3 100644
--- a/rpcs3/Emu/RSX/Common/surface_utils.h
+++ b/rpcs3/Emu/RSX/Common/surface_utils.h
@@ -146,7 +146,7 @@ namespace rsx
u8 samples_x = 1;
u8 samples_y = 1;
- rsx::address_range memory_range;
+ rsx::address_range32 memory_range;
std::unique_ptr> resolve_surface;
surface_sample_layout sample_layout = surface_sample_layout::null;
@@ -367,7 +367,7 @@ namespace rsx
const u32 internal_height = get_surface_height();
const u32 excess = (rsx_pitch - native_pitch);
- memory_range = rsx::address_range::start_length(base_addr, internal_height * rsx_pitch - excess);
+ memory_range = rsx::address_range32::start_length(base_addr, internal_height * rsx_pitch - excess);
}
void sync_tag()
@@ -419,7 +419,7 @@ namespace rsx
const u32 internal_height = get_surface_height();
const u32 excess = (rsx_pitch - native_pitch);
- memory_range = rsx::address_range::start_length(base_addr, internal_height * rsx_pitch - excess);
+ memory_range = rsx::address_range32::start_length(base_addr, internal_height * rsx_pitch - excess);
}
void sync_tag()
@@ -658,7 +658,7 @@ namespace rsx
return { 0, 0, internal_width, internal_height };
}
- inline rsx::address_range get_memory_range() const
+ inline rsx::address_range32 get_memory_range() const
{
return memory_range;
}
diff --git a/rpcs3/Emu/RSX/Common/texture_cache.cpp b/rpcs3/Emu/RSX/Common/texture_cache.cpp
index d1cdd25a34..ab884c407b 100644
--- a/rpcs3/Emu/RSX/Common/texture_cache.cpp
+++ b/rpcs3/Emu/RSX/Common/texture_cache.cpp
@@ -7,7 +7,7 @@ namespace rsx
{
constexpr u32 min_lockable_data_size = 4096; // Increasing this value has worse results even on systems with pages > 4k
- void buffered_section::init_lockable_range(const address_range& range)
+ void buffered_section::init_lockable_range(const address_range32& range)
{
locked_range = range.to_page_range();
AUDIT((locked_range.start == page_start(range.start)) || (locked_range.start == next_page(range.start)));
@@ -15,11 +15,11 @@ namespace rsx
ensure(locked_range.is_page_range());
}
- void buffered_section::reset(const address_range& memory_range)
+ void buffered_section::reset(const address_range32& memory_range)
{
ensure(memory_range.valid() && locked == false);
- cpu_range = address_range(memory_range);
+ cpu_range = address_range32(memory_range);
confirmed_range.invalidate();
locked_range.invalidate();
@@ -110,7 +110,7 @@ namespace rsx
}
else
{
- confirmed_range = address_range::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
+ confirmed_range = address_range32::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
ensure(!locked || locked_range.inside(confirmed_range.to_page_range()));
}
@@ -139,7 +139,7 @@ namespace rsx
locked = false;
}
- const address_range& buffered_section::get_bounds(section_bounds bounds) const
+ const address_range32& buffered_section::get_bounds(section_bounds bounds) const
{
switch (bounds)
{
diff --git a/rpcs3/Emu/RSX/Common/texture_cache.h b/rpcs3/Emu/RSX/Common/texture_cache.h
index a5ed283534..91e41e6179 100644
--- a/rpcs3/Emu/RSX/Common/texture_cache.h
+++ b/rpcs3/Emu/RSX/Common/texture_cache.h
@@ -65,8 +65,8 @@ namespace rsx
u32 num_discarded = 0;
u64 cache_tag = 0;
- address_range fault_range;
- address_range invalidate_range;
+ address_range32 fault_range;
+ address_range32 invalidate_range;
void clear_sections()
{
@@ -136,7 +136,7 @@ namespace rsx
struct intersecting_set
{
rsx::simple_array sections = {};
- address_range invalidate_range = {};
+ address_range32 invalidate_range = {};
bool has_flushables = false;
};
@@ -150,7 +150,7 @@ namespace rsx
u16 x = 0;
u16 y = 0;
- utils::address_range cache_range;
+ utils::address_range32 cache_range;
bool do_not_cache = false;
deferred_subresource() = default;
@@ -445,8 +445,8 @@ namespace rsx
atomic_t m_cache_update_tag = {0};
- address_range read_only_range;
- address_range no_access_range;
+ address_range32 read_only_range;
+ address_range32 no_access_range;
//Map of messages to only emit once
std::unordered_set m_once_only_messages_set;
@@ -455,7 +455,7 @@ namespace rsx
bool read_only_tex_invalidate = false;
//Store of all objects in a flush_always state. A lazy readback is attempted every draw call
- std::unordered_map m_flush_always_cache;
+ std::unordered_map m_flush_always_cache;
u64 m_flush_always_update_timestamp = 0;
//Memory usage
@@ -484,11 +484,11 @@ namespace rsx
virtual image_view_type create_temporary_subresource_view(commandbuffer_type&, image_resource_type* src, u32 gcm_format, u16 x, u16 y, u16 w, u16 h, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type create_temporary_subresource_view(commandbuffer_type&, image_storage_type* src, u32 gcm_format, u16 x, u16 y, u16 w, u16 h, const texture_channel_remap_t& remap_vector) = 0;
virtual void release_temporary_subresource(image_view_type rsc) = 0;
- virtual section_storage_type* create_new_texture(commandbuffer_type&, const address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
+ virtual section_storage_type* create_new_texture(commandbuffer_type&, const address_range32 &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, component_order swizzle_flags, rsx::flags32_t flags) = 0;
- virtual section_storage_type* upload_image_from_cpu(commandbuffer_type&, const address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format, texture_upload_context context,
+ virtual section_storage_type* upload_image_from_cpu(commandbuffer_type&, const address_range32 &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format, texture_upload_context context,
const std::vector& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) = 0;
- virtual section_storage_type* create_nul_section(commandbuffer_type&, const address_range &rsx_range, const image_section_attributes_t& attrs, const GCM_tile_reference& tile, bool memory_load) = 0;
+ virtual section_storage_type* create_nul_section(commandbuffer_type&, const address_range32 &rsx_range, const image_section_attributes_t& attrs, const GCM_tile_reference& tile, bool memory_load) = 0;
virtual void set_component_order(section_storage_type& section, u32 gcm_format, component_order expected) = 0;
virtual void insert_texture_barrier(commandbuffer_type&, image_storage_type* tex, bool strong_ordering = true) = 0;
virtual image_view_type generate_cubemap_from_images(commandbuffer_type&, u32 gcm_format, u16 size, const std::vector& sources, const texture_channel_remap_t& remap_vector) = 0;
@@ -545,7 +545,7 @@ namespace rsx
* Internal implementation methods and helpers
*/
- inline bool region_intersects_cache(const address_range &test_range, bool is_writing)
+ inline bool region_intersects_cache(const address_range32 &test_range, bool is_writing)
{
AUDIT(test_range.valid());
@@ -685,7 +685,7 @@ namespace rsx
// Merges the protected ranges of the sections in "sections" into "result"
- void merge_protected_ranges(address_range_vector &result, const std::vector §ions)
+ void merge_protected_ranges(address_range_vector32 &result, const std::vector §ions)
{
result.reserve(result.size() + sections.size());
@@ -704,7 +704,7 @@ namespace rsx
// Otherwise the page protections will end up incorrect and things will break!
void unprotect_set(thrashed_set& data)
{
- auto protect_ranges = [](address_range_vector& _set, utils::protection _prot)
+ auto protect_ranges = [](address_range_vector32& _set, utils::protection _prot)
{
//u32 count = 0;
for (auto &range : _set)
@@ -734,8 +734,8 @@ namespace rsx
AUDIT(data.is_flushed());
// Merge ranges to unprotect
- address_range_vector ranges_to_unprotect;
- address_range_vector ranges_to_protect_ro;
+ address_range_vector32 ranges_to_unprotect;
+ address_range_vector32 ranges_to_protect_ro;
ranges_to_unprotect.reserve(data.sections_to_unprotect.size() + data.sections_to_flush.size() + data.sections_to_exclude.size());
merge_protected_ranges(ranges_to_unprotect, data.sections_to_unprotect);
@@ -751,7 +751,7 @@ namespace rsx
for (const auto &excluded : data.sections_to_exclude)
{
ensure(excluded->is_locked(true));
- address_range exclusion_range = excluded->get_locked_range();
+ address_range32 exclusion_range = excluded->get_locked_range();
// We need to make sure that the exclusion range is *inside* invalidate range
exclusion_range.intersect(data.invalidate_range);
@@ -824,14 +824,14 @@ namespace rsx
// Return a set containing all sections that should be flushed/unprotected/reprotected
atomic_t m_last_section_cache_tag = 0;
- intersecting_set get_intersecting_set(const address_range &fault_range)
+ intersecting_set get_intersecting_set(const address_range32 &fault_range)
{
AUDIT(fault_range.is_page_range());
const u64 cache_tag = ++m_last_section_cache_tag;
intersecting_set result = {};
- address_range &invalidate_range = result.invalidate_range;
+ address_range32 &invalidate_range = result.invalidate_range;
invalidate_range = fault_range; // Sections fully inside this range will be invalidated, others will be deemed false positives
// Loop through cache and find pages that overlap the invalidate_range
@@ -920,7 +920,7 @@ namespace rsx
template
thrashed_set invalidate_range_impl_base(
commandbuffer_type& cmd,
- const address_range &fault_range_in,
+ const address_range32 &fault_range_in,
invalidation_cause cause,
std::function on_data_transfer_completed = {},
Args&&... extras)
@@ -932,7 +932,7 @@ namespace rsx
AUDIT(cause.valid());
AUDIT(fault_range_in.valid());
- address_range fault_range = fault_range_in.to_page_range();
+ address_range32 fault_range = fault_range_in.to_page_range();
intersecting_set trampled_set = get_intersecting_set(fault_range);
@@ -1005,7 +1005,7 @@ namespace rsx
#endif
// If invalidate_range is fault_range, we can stop now
- const address_range invalidate_range = trampled_set.invalidate_range;
+ const address_range32 invalidate_range = trampled_set.invalidate_range;
if (invalidate_range == fault_range)
{
result.violation_handled = true;
@@ -1187,7 +1187,7 @@ namespace rsx
}
template
- std::vector find_texture_from_range(const address_range &test_range, u32 required_pitch = 0, u32 context_mask = 0xFF)
+ std::vector find_texture_from_range(const address_range32 &test_range, u32 required_pitch = 0, u32 context_mask = 0xFF)
{
std::vector results;
@@ -1239,7 +1239,7 @@ namespace rsx
return nullptr;
}
- section_storage_type* find_cached_texture(const address_range &range, const image_section_attributes_t& attr, bool create_if_not_found, bool confirm_dimensions, bool allow_dirty)
+ section_storage_type* find_cached_texture(const address_range32 &range, const image_section_attributes_t& attr, bool create_if_not_found, bool confirm_dimensions, bool allow_dirty)
{
auto &block = m_storage.block_for(range);
@@ -1329,7 +1329,7 @@ namespace rsx
return tex;
}
- section_storage_type* find_flushable_section(const address_range &memory_range)
+ section_storage_type* find_flushable_section(const address_range32 &memory_range)
{
auto &block = m_storage.block_for(memory_range);
for (auto &tex : block)
@@ -1345,7 +1345,7 @@ namespace rsx
}
template
- void lock_memory_region(commandbuffer_type& cmd, image_storage_type* image, const address_range &rsx_range, bool is_active_surface, u16 width, u16 height, u32 pitch, Args&&... extras)
+ void lock_memory_region(commandbuffer_type& cmd, image_storage_type* image, const address_range32 &rsx_range, bool is_active_surface, u16 width, u16 height, u32 pitch, Args&&... extras)
{
AUDIT(g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer); // this method is only called when either WCB or WDB are enabled
@@ -1414,7 +1414,7 @@ namespace rsx
}
template
- void commit_framebuffer_memory_region(commandbuffer_type& cmd, const address_range &rsx_range, Args&&... extras)
+ void commit_framebuffer_memory_region(commandbuffer_type& cmd, const address_range32 &rsx_range, Args&&... extras)
{
AUDIT(!g_cfg.video.write_color_buffers || !g_cfg.video.write_depth_buffer);
@@ -1426,7 +1426,7 @@ namespace rsx
}
template
- void discard_framebuffer_memory_region(commandbuffer_type& /*cmd*/, const address_range& rsx_range, Args&&... /*extras*/)
+ void discard_framebuffer_memory_region(commandbuffer_type& /*cmd*/, const address_range32& rsx_range, Args&&... /*extras*/)
{
if (g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer)
{
@@ -1439,7 +1439,7 @@ namespace rsx
}
}
- void set_memory_read_flags(const address_range &memory_range, memory_read_flags flags)
+ void set_memory_read_flags(const address_range32 &memory_range, memory_read_flags flags)
{
std::lock_guard lock(m_cache_mutex);
@@ -1492,7 +1492,7 @@ namespace rsx
private:
inline void update_flush_always_cache(section_storage_type §ion, bool add)
{
- const address_range& range = section.get_section_range();
+ const address_range32& range = section.get_section_range();
if (add)
{
// Add to m_flush_always_cache
@@ -1529,7 +1529,7 @@ namespace rsx
template
thrashed_set invalidate_range(
commandbuffer_type& cmd,
- const address_range &range,
+ const address_range32 &range,
invalidation_cause cause,
std::function on_data_transfer_completed = {},
Args&&... extras)
@@ -1568,7 +1568,7 @@ namespace rsx
}
template
- bool flush_if_cache_miss_likely(commandbuffer_type& cmd, const address_range &range, Args&&... extras)
+ bool flush_if_cache_miss_likely(commandbuffer_type& cmd, const address_range32 &range, Args&&... extras)
{
u32 cur_flushes_this_frame = (m_flushes_this_frame + m_speculations_this_frame);
@@ -1834,7 +1834,7 @@ namespace rsx
m_uncached_subresources.clear();
}
- void notify_surface_changed(const utils::address_range& range)
+ void notify_surface_changed(const utils::address_range32& range)
{
for (auto It = m_temporary_subresource_cache.begin(); It != m_temporary_subresource_cache.end();)
{
@@ -1858,7 +1858,7 @@ namespace rsx
const size3f& scale,
const texture_channel_remap_t& remap,
const texture_cache_search_options& options,
- const utils::address_range& memory_range,
+ const utils::address_range32& memory_range,
rsx::texture_dimension_extended extended_dimension,
SurfaceStoreType& m_rtts, Args&&... /*extras*/)
{
@@ -2362,7 +2362,7 @@ namespace rsx
extended_dimension = std::max(extended_dimension, rsx::texture_dimension_extended::texture_dimension_2d);
}
- const auto lookup_range = utils::address_range::start_length(attributes.address, attributes.pitch * required_surface_height);
+ const auto lookup_range = utils::address_range32::start_length(attributes.address, attributes.pitch * required_surface_height);
reader_lock lock(m_cache_mutex);
auto result = fast_texture_search(cmd, attributes, scale, tex.decoded_remap(),
@@ -2439,7 +2439,7 @@ namespace rsx
attr2.pitch = attr2.width * attr2.bpp;
}
- const auto range = utils::address_range::start_length(attr2.address, attr2.pitch * attr2.height);
+ const auto range = utils::address_range32::start_length(attr2.address, attr2.pitch * attr2.height);
auto ret = fast_texture_search(cmd, attr2, scale, tex.decoded_remap(),
options, range, extended_dimension, m_rtts, std::forward(extras)...);
@@ -2477,7 +2477,7 @@ namespace rsx
}
const u32 cache_end = attr2.address + (attr2.pitch * attr2.height);
- result.external_subresource_desc.cache_range = utils::address_range::start_end(attributes.address, cache_end);
+ result.external_subresource_desc.cache_range = utils::address_range32::start_end(attributes.address, cache_end);
result.external_subresource_desc.sections_to_copy = std::move(sections);
return result;
@@ -2498,7 +2498,7 @@ namespace rsx
lock.upgrade();
// Invalidate
- const address_range tex_range = address_range::start_length(attributes.address, tex_size);
+ const address_range32 tex_range = address_range32::start_length(attributes.address, tex_size);
invalidate_range_impl_base(cmd, tex_range, invalidation_cause::read, {}, std::forward(extras)...);
// Upload from CPU. Note that sRGB conversion is handled in the FS
@@ -2595,7 +2595,7 @@ namespace rsx
src_address += (src.width - src_w) * src_bpp;
}
- const auto get_tiled_region = [&](const utils::address_range& range)
+ const auto get_tiled_region = [&](const utils::address_range32& range)
{
auto rsxthr = rsx::get_current_renderer();
return rsxthr->get_tiled_memory_region(range);
@@ -2683,7 +2683,7 @@ namespace rsx
return true;
};
- auto validate_fbo_integrity = [&](const utils::address_range& range, bool is_depth_texture)
+ auto validate_fbo_integrity = [&](const utils::address_range32& range, bool is_depth_texture)
{
const bool will_upload = is_depth_texture ? !!g_cfg.video.read_depth_buffer : !!g_cfg.video.read_color_buffers;
if (!will_upload)
@@ -2705,8 +2705,8 @@ namespace rsx
};
// Check tiled mem
- const auto dst_tile = get_tiled_region(utils::address_range::start_length(dst_address, dst.pitch * dst.clip_height));
- const auto src_tile = get_tiled_region(utils::address_range::start_length(src_address, src.pitch * src.height));
+ const auto dst_tile = get_tiled_region(utils::address_range32::start_length(dst_address, dst.pitch * dst.clip_height));
+ const auto src_tile = get_tiled_region(utils::address_range32::start_length(src_address, src.pitch * src.height));
const auto dst_is_tiled = !!dst_tile;
const auto src_is_tiled = !!src_tile;
@@ -2735,7 +2735,7 @@ namespace rsx
// If we have a pitched write, or a suspiciously large transfer, we likely have a valid write.
// Invalidate surfaces in range. Sample tests should catch overlaps in theory.
- m_rtts.invalidate_range(utils::address_range::start_length(dst_address, dst.pitch * dst_h));
+ m_rtts.invalidate_range(utils::address_range32::start_length(dst_address, dst.pitch * dst_h));
}
// FBO re-validation. It is common for GPU and CPU data to desync as we do not have a way to share memory pages directly between the two (in most setups)
@@ -2863,7 +2863,7 @@ namespace rsx
const auto src_payload_length = (src.pitch * (src_h - 1) + (src_w * src_bpp));
const auto dst_payload_length = (dst.pitch * (dst_h - 1) + (dst_w * dst_bpp));
- const auto dst_range = address_range::start_length(dst_address, dst_payload_length);
+ const auto dst_range = address_range32::start_length(dst_address, dst_payload_length);
if (!use_null_region && !dst_is_render_target)
{
@@ -3035,7 +3035,7 @@ namespace rsx
{
// NOTE: Src address already takes into account the flipped nature of the overlap!
const u32 lookup_mask = rsx::texture_upload_context::blit_engine_src | rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::shader_read;
- auto overlapping_surfaces = find_texture_from_range(address_range::start_length(src_address, src_payload_length), src.pitch, lookup_mask);
+ auto overlapping_surfaces = find_texture_from_range(address_range32::start_length(src_address, src_payload_length), src.pitch, lookup_mask);
auto old_src_area = src_area;
for (const auto &surface : overlapping_surfaces)
@@ -3168,7 +3168,7 @@ namespace rsx
subresource_layout.push_back(subres);
const u32 gcm_format = helpers::get_sized_blit_format(src_is_argb8, dst_is_depth_surface, is_format_convert);
- const auto rsx_range = address_range::start_length(image_base, src.pitch * image_height);
+ const auto rsx_range = address_range32::start_length(image_base, src.pitch * image_height);
lock.upgrade();
@@ -3243,7 +3243,7 @@ namespace rsx
dst_dimensions.height = align2(usable_section_length, dst.pitch) / dst.pitch;
const u32 full_section_length = ((dst_dimensions.height - 1) * dst.pitch) + (dst_dimensions.width * dst_bpp);
- const auto rsx_range = address_range::start_length(dst_base_address, full_section_length);
+ const auto rsx_range = address_range32::start_length(dst_base_address, full_section_length);
lock.upgrade();
@@ -3502,7 +3502,7 @@ namespace rsx
return m_predictor;
}
- bool is_protected(u32 section_base_address, const address_range& test_range, rsx::texture_upload_context context)
+ bool is_protected(u32 section_base_address, const address_range32& test_range, rsx::texture_upload_context context)
{
reader_lock lock(m_cache_mutex);
diff --git a/rpcs3/Emu/RSX/Common/texture_cache_checker.h b/rpcs3/Emu/RSX/Common/texture_cache_checker.h
index 094220ac33..cb283cc96a 100644
--- a/rpcs3/Emu/RSX/Common/texture_cache_checker.h
+++ b/rpcs3/Emu/RSX/Common/texture_cache_checker.h
@@ -112,7 +112,7 @@ namespace rsx {
}
public:
- void set_protection(const address_range& range, utils::protection prot)
+ void set_protection(const address_range32& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro || prot == utils::protection::rw);
@@ -123,7 +123,7 @@ namespace rsx {
}
}
- void discard(const address_range& range)
+ void discard(const address_range32& range)
{
set_protection(range, utils::protection::rw);
}
@@ -136,7 +136,7 @@ namespace rsx {
}
}
- void add(const address_range& range, utils::protection prot)
+ void add(const address_range32& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro);
@@ -147,7 +147,7 @@ namespace rsx {
}
}
- void remove(const address_range& range, utils::protection prot)
+ void remove(const address_range32& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro);
@@ -160,7 +160,7 @@ namespace rsx {
// Returns the a lower bound as to how many locked sections are known to be within the given range with each protection {NA,RO}
// The assumption here is that the page in the given range with the largest number of refcounted sections represents the lower bound to how many there must be
- std::pair get_minimum_number_of_sections(const address_range& range) const
+ std::pair get_minimum_number_of_sections(const address_range32& range) const
{
AUDIT(range.is_page_range());
@@ -175,7 +175,7 @@ namespace rsx {
return { no,ro };
}
- void check_unprotected(const address_range& range, bool allow_ro = false, bool must_be_empty = true) const
+ void check_unprotected(const address_range32& range, bool allow_ro = false, bool must_be_empty = true) const
{
AUDIT(range.is_page_range());
for (const per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)
diff --git a/rpcs3/Emu/RSX/Common/texture_cache_helpers.h b/rpcs3/Emu/RSX/Common/texture_cache_helpers.h
index 617f3b75db..dfe4cb5ee9 100644
--- a/rpcs3/Emu/RSX/Common/texture_cache_helpers.h
+++ b/rpcs3/Emu/RSX/Common/texture_cache_helpers.h
@@ -69,9 +69,9 @@ namespace rsx
blit_op_result(bool success) : succeeded(success)
{}
- inline address_range to_address_range() const
+ inline address_range32 to_address_range() const
{
- return address_range::start_length(real_dst_address, real_dst_size);
+ return address_range32::start_length(real_dst_address, real_dst_size);
}
};
@@ -182,7 +182,7 @@ namespace rsx
static inline blit_target_properties get_optimal_blit_target_properties(
bool src_is_render_target,
- address_range dst_range,
+ address_range32 dst_range,
u32 dst_pitch,
const sizeu src_dimensions,
const sizeu dst_dimensions)
@@ -209,7 +209,7 @@ namespace rsx
continue;
}
- const auto buffer_range = address_range::start_length(rsx::get_address(buffer.offset, CELL_GCM_LOCATION_LOCAL), pitch * (buffer.height - 1) + (buffer.width * bpp));
+ const auto buffer_range = address_range32::start_length(rsx::get_address(buffer.offset, CELL_GCM_LOCATION_LOCAL), pitch * (buffer.height - 1) + (buffer.width * bpp));
if (dst_range.inside(buffer_range))
{
// Match found
diff --git a/rpcs3/Emu/RSX/Common/texture_cache_predictor.h b/rpcs3/Emu/RSX/Common/texture_cache_predictor.h
index a18fc93c76..bfa500ef1a 100644
--- a/rpcs3/Emu/RSX/Common/texture_cache_predictor.h
+++ b/rpcs3/Emu/RSX/Common/texture_cache_predictor.h
@@ -76,14 +76,14 @@ namespace rsx
using texture_format = typename traits::texture_format;
using section_storage_type = typename traits::section_storage_type;
- address_range cpu_range;
+ address_range32 cpu_range;
texture_format format;
texture_upload_context context;
// Constructors
texture_cache_predictor_key() = default;
- texture_cache_predictor_key(const address_range& _cpu_range, texture_format _format, texture_upload_context _context)
+ texture_cache_predictor_key(const address_range32& _cpu_range, texture_format _format, texture_upload_context _context)
: cpu_range(_cpu_range)
, format(_format)
, context(_context)
@@ -398,7 +398,7 @@ struct std::hash>
{
usz operator()(const rsx::texture_cache_predictor_key& k) const
{
- usz result = std::hash{}(k.cpu_range);
+ usz result = std::hash{}(k.cpu_range);
result ^= static_cast(k.format);
result ^= (static_cast(k.context) << 16);
return result;
diff --git a/rpcs3/Emu/RSX/Common/texture_cache_utils.h b/rpcs3/Emu/RSX/Common/texture_cache_utils.h
index 3a87deceb7..8c5defdd0b 100644
--- a/rpcs3/Emu/RSX/Common/texture_cache_utils.h
+++ b/rpcs3/Emu/RSX/Common/texture_cache_utils.h
@@ -27,7 +27,7 @@ namespace rsx
hash
};
- static inline void memory_protect(const address_range& range, utils::protection prot)
+ static inline void memory_protect(const address_range32& range, utils::protection prot)
{
ensure(range.is_page_range());
@@ -232,7 +232,7 @@ namespace rsx
private:
u32 index = 0;
- address_range range = {};
+ address_range32 range = {};
block_container_type sections = {};
unowned_container_type unowned; // pointers to sections from other blocks that overlap this block
atomic_t exists_count = 0;
@@ -269,7 +269,7 @@ namespace rsx
m_storage = storage;
index = _index;
- range = address_range::start_length(index * block_size, block_size);
+ range = address_range32::start_length(index * block_size, block_size);
AUDIT(range.is_page_range() && get_start() / block_size == index);
}
@@ -346,12 +346,12 @@ namespace rsx
}
// Address range
- inline const address_range& get_range() const { return range; }
+ inline const address_range32& get_range() const { return range; }
inline u32 get_start() const { return range.start; }
inline u32 get_end() const { return range.end; }
inline u32 get_index() const { return index; }
inline bool overlaps(const section_storage_type& section, section_bounds bounds = full_range) const { return section.overlaps(range, bounds); }
- inline bool overlaps(const address_range& _range) const { return range.overlaps(_range); }
+ inline bool overlaps(const address_range32& _range) const { return range.overlaps(_range); }
/**
* Section callbacks
@@ -511,7 +511,7 @@ namespace rsx
return blocks[address / block_size];
}
- inline block_type& block_for(const address_range &range)
+ inline block_type& block_for(const address_range32 &range)
{
AUDIT(range.valid());
return block_for(range.start);
@@ -689,7 +689,7 @@ namespace rsx
// Constructors
range_iterator_tmpl() = default; // end iterator
- explicit range_iterator_tmpl(parent_type &storage, const address_range &_range, section_bounds _bounds, bool _locked_only)
+ explicit range_iterator_tmpl(parent_type &storage, const address_range32 &_range, section_bounds _bounds, bool _locked_only)
: range(_range)
, bounds(_bounds)
, block(&storage.block_for(range.start))
@@ -704,7 +704,7 @@ namespace rsx
private:
// Members
- address_range range;
+ address_range32 range;
section_bounds bounds;
block_type *block = nullptr;
@@ -825,16 +825,16 @@ namespace rsx
using range_iterator = range_iterator_tmpl;
using range_const_iterator = range_iterator_tmpl;
- inline range_iterator range_begin(const address_range &range, section_bounds bounds, bool locked_only = false) {
+ inline range_iterator range_begin(const address_range32 &range, section_bounds bounds, bool locked_only = false) {
return range_iterator(*this, range, bounds, locked_only);
}
- inline range_const_iterator range_begin(const address_range &range, section_bounds bounds, bool locked_only = false) const {
+ inline range_const_iterator range_begin(const address_range32 &range, section_bounds bounds, bool locked_only = false) const {
return range_const_iterator(*this, range, bounds, locked_only);
}
inline range_const_iterator range_begin(u32 address, section_bounds bounds, bool locked_only = false) const {
- return range_const_iterator(*this, address_range::start_length(address, 1), bounds, locked_only);
+ return range_const_iterator(*this, address_range32::start_length(address, 1), bounds, locked_only);
}
constexpr range_iterator range_end()
@@ -881,9 +881,9 @@ namespace rsx
class buffered_section
{
private:
- address_range locked_range;
- address_range cpu_range = {};
- address_range confirmed_range;
+ address_range32 locked_range;
+ address_range32 cpu_range = {};
+ address_range32 confirmed_range;
utils::protection protection = utils::protection::rw;
@@ -891,7 +891,7 @@ namespace rsx
u64 mem_hash = 0;
bool locked = false;
- void init_lockable_range(const address_range& range);
+ void init_lockable_range(const address_range32& range);
u64 fast_hash_internal() const;
public:
@@ -899,7 +899,7 @@ namespace rsx
buffered_section() = default;
~buffered_section() = default;
- void reset(const address_range& memory_range);
+ void reset(const address_range32& memory_range);
protected:
void invalidate_range();
@@ -911,7 +911,7 @@ namespace rsx
bool sync() const;
void discard();
- const address_range& get_bounds(section_bounds bounds) const;
+ const address_range32& get_bounds(section_bounds bounds) const;
bool is_locked(bool actual_page_flags = false) const;
@@ -923,12 +923,12 @@ namespace rsx
return get_bounds(bounds).overlaps(address);
}
- inline bool overlaps(const address_range& other, section_bounds bounds) const
+ inline bool overlaps(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
- inline bool overlaps(const address_range_vector& other, section_bounds bounds) const
+ inline bool overlaps(const address_range_vector32& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
@@ -938,12 +938,12 @@ namespace rsx
return get_bounds(bounds).overlaps(other.get_bounds(bounds));
}
- inline bool inside(const address_range& other, section_bounds bounds) const
+ inline bool inside(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
- inline bool inside(const address_range_vector& other, section_bounds bounds) const
+ inline bool inside(const address_range_vector32& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
@@ -953,12 +953,12 @@ namespace rsx
return get_bounds(bounds).inside(other.get_bounds(bounds));
}
- inline s32 signed_distance(const address_range& other, section_bounds bounds) const
+ inline s32 signed_distance(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).signed_distance(other);
}
- inline u32 distance(const address_range& other, section_bounds bounds) const
+ inline u32 distance(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).distance(other);
}
@@ -981,18 +981,18 @@ namespace rsx
return cpu_range.valid() ? cpu_range.length() : 0;
}
- inline const address_range& get_locked_range() const
+ inline const address_range32& get_locked_range() const
{
AUDIT(locked);
return locked_range;
}
- inline const address_range& get_section_range() const
+ inline const address_range32& get_section_range() const
{
return cpu_range;
}
- const address_range& get_confirmed_range() const
+ const address_range32& get_confirmed_range() const
{
return confirmed_range.valid() ? confirmed_range : cpu_range;
}
@@ -1005,7 +1005,7 @@ namespace rsx
return { confirmed_range.start - cpu_range.start, confirmed_range.length() };
}
- inline bool matches(const address_range& range) const
+ inline bool matches(const address_range32& range) const
{
return cpu_range.valid() && cpu_range == range;
}
@@ -1015,7 +1015,7 @@ namespace rsx
return protection;
}
- inline address_range get_min_max(const address_range& current_min_max, section_bounds bounds) const
+ inline address_range32 get_min_max(const address_range32& current_min_max, section_bounds bounds) const
{
return get_bounds(bounds).get_min_max(current_min_max);
}
@@ -1088,7 +1088,7 @@ namespace rsx
rsx::texture_upload_context context = rsx::texture_upload_context::shader_read;
rsx::texture_dimension_extended image_type = rsx::texture_dimension_extended::texture_dimension_2d;
- address_range_vector flush_exclusions; // Address ranges that will be skipped during flush
+ address_range_vector32 flush_exclusions; // Address ranges that will be skipped during flush
predictor_type *m_predictor = nullptr;
usz m_predictor_key_hash = 0;
@@ -1124,7 +1124,7 @@ namespace rsx
/**
* Reset
*/
- void reset(const address_range &memory_range)
+ void reset(const address_range32 &memory_range)
{
AUDIT(memory_range.valid());
AUDIT(!is_locked());
@@ -1537,7 +1537,7 @@ namespace rsx
void imp_flush_memcpy(u32 vm_dst, u8* src, u32 len) const
{
u8 *dst = get_ptr(vm_dst);
- address_range copy_range = address_range::start_length(vm_dst, len);
+ address_range32 copy_range = address_range32::start_length(vm_dst, len);
if (flush_exclusions.empty() || !copy_range.overlaps(flush_exclusions))
{
@@ -1553,7 +1553,7 @@ namespace rsx
// Otherwise, we need to filter the memcpy with our flush exclusions
// Should be relatively rare
- address_range_vector vec;
+ address_range_vector32 vec;
vec.merge(copy_range);
vec.exclude(flush_exclusions);
@@ -1673,7 +1673,7 @@ namespace rsx
cleanup_flush();
}
- void add_flush_exclusion(const address_range& rng)
+ void add_flush_exclusion(const address_range32& rng)
{
AUDIT(is_locked() && is_flushable());
const auto _rng = rng.get_intersect(get_section_range());
@@ -1804,7 +1804,7 @@ namespace rsx
/**
* Comparison
*/
- inline bool matches(const address_range &memory_range) const
+ inline bool matches(const address_range32 &memory_range) const
{
return valid_range() && rsx::buffered_section::matches(memory_range);
}
@@ -1846,7 +1846,7 @@ namespace rsx
return matches(format, width, height, depth, mipmaps);
}
- bool matches(const address_range& memory_range, u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
+ bool matches(const address_range32& memory_range, u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
{
if (!valid_range())
return false;
diff --git a/rpcs3/Emu/RSX/Core/RSXContext.cpp b/rpcs3/Emu/RSX/Core/RSXContext.cpp
index 3d770b8ce2..aa5962c745 100644
--- a/rpcs3/Emu/RSX/Core/RSXContext.cpp
+++ b/rpcs3/Emu/RSX/Core/RSXContext.cpp
@@ -4,7 +4,7 @@
namespace rsx
{
- GCM_tile_reference GCM_context::get_tiled_memory_region(const utils::address_range& range) const
+ GCM_tile_reference GCM_context::get_tiled_memory_region(const utils::address_range32& range) const
{
if (rsx::get_location(range.start) != CELL_GCM_LOCATION_MAIN)
{
@@ -27,7 +27,7 @@ namespace rsx
}
const auto tile_base_address = iomap_table.get_addr(tile.offset);
- const auto tile_range = utils::address_range::start_length(tile_base_address, tile.size);
+ const auto tile_range = utils::address_range32::start_length(tile_base_address, tile.size);
if (range.inside(tile_range))
{
@@ -39,12 +39,12 @@ namespace rsx
return {};
}
- utils::address_range GCM_tile_reference::tile_align(const utils::address_range& range) const
+ utils::address_range32 GCM_tile_reference::tile_align(const utils::address_range32& range) const
{
const auto alignment = 64 * tile->pitch;
const u32 start_offset = rsx::align_down2(range.start - base_address, alignment);
const u32 end_offset = rsx::align2(range.end - base_address + 1, alignment);
- return utils::address_range::start_length(start_offset + base_address, end_offset - start_offset);
+ return utils::address_range32::start_length(start_offset + base_address, end_offset - start_offset);
}
}
diff --git a/rpcs3/Emu/RSX/Core/RSXContext.h b/rpcs3/Emu/RSX/Core/RSXContext.h
index 3b69d569aa..dd18747012 100644
--- a/rpcs3/Emu/RSX/Core/RSXContext.h
+++ b/rpcs3/Emu/RSX/Core/RSXContext.h
@@ -27,7 +27,7 @@ namespace rsx
return !!tile;
}
- utils::address_range tile_align(const rsx::address_range& range) const;
+ utils::address_range32 tile_align(const rsx::address_range32& range) const;
};
struct GCM_context
@@ -53,6 +53,6 @@ namespace rsx
atomic_t unsent_gcm_events = 0; // Unsent event bits when aborting RSX/VBLANK thread (will be sent on savestate load)
- GCM_tile_reference get_tiled_memory_region(const utils::address_range& range) const;
+ GCM_tile_reference get_tiled_memory_region(const utils::address_range32& range) const;
};
}
diff --git a/rpcs3/Emu/RSX/Core/RSXReservationLock.hpp b/rpcs3/Emu/RSX/Core/RSXReservationLock.hpp
index 06ae47796e..7a4ab76624 100644
--- a/rpcs3/Emu/RSX/Core/RSXReservationLock.hpp
+++ b/rpcs3/Emu/RSX/Core/RSXReservationLock.hpp
@@ -49,9 +49,9 @@ namespace rsx
return;
}
- const auto range1 = utils::address_range::start_length(dst_addr, dst_length);
- const auto range2 = utils::address_range::start_length(src_addr, src_length);
- utils::address_range target_range;
+ const auto range1 = utils::address_range32::start_length(dst_addr, dst_length);
+ const auto range2 = utils::address_range32::start_length(src_addr, src_length);
+ utils::address_range32 target_range;
if (!range1.overlaps(range2)) [[likely]]
{
diff --git a/rpcs3/Emu/RSX/GL/GLDMA.cpp b/rpcs3/Emu/RSX/GL/GLDMA.cpp
index 758205214e..3c12009f67 100644
--- a/rpcs3/Emu/RSX/GL/GLDMA.cpp
+++ b/rpcs3/Emu/RSX/GL/GLDMA.cpp
@@ -29,7 +29,7 @@ namespace gl
gl::check_state();
}
- void* dma_block::map(const utils::address_range& range) const
+ void* dma_block::map(const utils::address_range32& range) const
{
ensure(range.inside(this->range()));
return vm::get_super_ptr(range.start);
@@ -58,7 +58,7 @@ namespace gl
}
}
- bool dma_block::can_map(const utils::address_range& range) const
+ bool dma_block::can_map(const utils::address_range32& range) const
{
if (m_parent)
{
@@ -73,11 +73,11 @@ namespace gl
g_dma_pool.clear();
}
- utils::address_range to_dma_block_range(u32 start, u32 length)
+ utils::address_range32 to_dma_block_range(u32 start, u32 length)
{
const auto start_block_address = start & s_dma_block_mask;
const auto end_block_address = (start + length + s_dma_block_size - 1) & s_dma_block_mask;
- return utils::address_range::start_end(start_block_address, end_block_address);
+ return utils::address_range32::start_end(start_block_address, end_block_address);
}
const dma_block& get_block(u32 start, u32 length)
@@ -91,7 +91,7 @@ namespace gl
return *block;
}
- const auto range = utils::address_range::start_length(start, length);
+ const auto range = utils::address_range32::start_length(start, length);
if (block->can_map(range)) [[ likely ]]
{
return *block;
diff --git a/rpcs3/Emu/RSX/GL/GLDMA.h b/rpcs3/Emu/RSX/GL/GLDMA.h
index 1e4b31bae0..c5862f5166 100644
--- a/rpcs3/Emu/RSX/GL/GLDMA.h
+++ b/rpcs3/Emu/RSX/GL/GLDMA.h
@@ -21,17 +21,17 @@ namespace gl
void allocate(u32 base_address, u32 block_size);
void resize(u32 new_length);
- void* map(const utils::address_range& range) const;
+ void* map(const utils::address_range32& range) const;
void set_parent(const dma_block* other);
const dma_block* head() const { return m_parent ? m_parent : this; }
- bool can_map(const utils::address_range& range) const;
+ bool can_map(const utils::address_range32& range) const;
u32 base_addr() const { return m_base_address; }
u32 length() const { return m_data ? static_cast(m_data->size()) : 0; }
bool empty() const { return length() == 0; }
buffer* get() const { return m_data.get(); }
- utils::address_range range() const { return utils::address_range::start_length(m_base_address, length()); }
+ utils::address_range32 range() const { return utils::address_range32::start_length(m_base_address, length()); }
protected:
u32 m_base_address = 0;
diff --git a/rpcs3/Emu/RSX/GL/GLGSRender.cpp b/rpcs3/Emu/RSX/GL/GLGSRender.cpp
index b6c27c4dd8..1470f8fead 100644
--- a/rpcs3/Emu/RSX/GL/GLGSRender.cpp
+++ b/rpcs3/Emu/RSX/GL/GLGSRender.cpp
@@ -1240,7 +1240,7 @@ bool GLGSRender::on_access_violation(u32 address, bool is_writing)
return true;
}
-void GLGSRender::on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause)
+void GLGSRender::on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause)
{
gl::command_context cmd{ gl_state };
auto data = m_gl_texture_cache.invalidate_range(cmd, range, cause);
diff --git a/rpcs3/Emu/RSX/GL/GLGSRender.h b/rpcs3/Emu/RSX/GL/GLGSRender.h
index 59a7e075b9..51b126cc73 100644
--- a/rpcs3/Emu/RSX/GL/GLGSRender.h
+++ b/rpcs3/Emu/RSX/GL/GLGSRender.h
@@ -223,7 +223,7 @@ protected:
void do_local_task(rsx::FIFO::state state) override;
bool on_access_violation(u32 address, bool is_writing) override;
- void on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause) override;
+ void on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause) override;
void notify_tile_unbound(u32 tile) override;
void on_semaphore_acquire_wait() override;
};
diff --git a/rpcs3/Emu/RSX/GL/GLPresent.cpp b/rpcs3/Emu/RSX/GL/GLPresent.cpp
index c73415edb2..ed71652063 100644
--- a/rpcs3/Emu/RSX/GL/GLPresent.cpp
+++ b/rpcs3/Emu/RSX/GL/GLPresent.cpp
@@ -129,7 +129,7 @@ gl::texture* GLGSRender::get_present_source(gl::present_surface_info* info, cons
initialize_scratch_image();
gl::command_context cmd{ gl_state };
- const auto range = utils::address_range::start_length(info->address, info->pitch * info->height);
+ const auto range = utils::address_range32::start_length(info->address, info->pitch * info->height);
m_gl_texture_cache.invalidate_range(cmd, range, rsx::invalidation_cause::read);
flip_image->copy_from(vm::base(info->address), static_cast(expected_format), gl::texture::type::uint_8_8_8_8, unpack_settings);
diff --git a/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp b/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp
index 87e5ee5645..c222262699 100644
--- a/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp
+++ b/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp
@@ -153,7 +153,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool /*
{
if (m_surface_info[i].pitch && g_cfg.video.write_color_buffers)
{
- const utils::address_range surface_range = m_surface_info[i].get_memory_range();
+ const utils::address_range32 surface_range = m_surface_info[i].get_memory_range();
m_gl_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_gl_texture_cache.flush_if_cache_miss_likely(cmd, surface_range);
}
@@ -182,7 +182,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool /*
if (m_depth_surface_info.pitch && g_cfg.video.write_depth_buffer)
{
- const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
+ const utils::address_range32 surface_range = m_depth_surface_info.get_memory_range();
m_gl_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_gl_texture_cache.flush_if_cache_miss_likely(cmd, surface_range);
}
diff --git a/rpcs3/Emu/RSX/GL/GLTextureCache.h b/rpcs3/Emu/RSX/GL/GLTextureCache.h
index 45d787c184..f3b37e7f27 100644
--- a/rpcs3/Emu/RSX/GL/GLTextureCache.h
+++ b/rpcs3/Emu/RSX/GL/GLTextureCache.h
@@ -148,7 +148,7 @@ namespace gl
}
}
- void dma_transfer(gl::command_context& cmd, gl::texture* src, const areai& /*src_area*/, const utils::address_range& /*valid_range*/, u32 pitch)
+ void dma_transfer(gl::command_context& cmd, gl::texture* src, const areai& /*src_area*/, const utils::address_range32& /*valid_range*/, u32 pitch)
{
init_buffer(src);
glGetError();
@@ -600,7 +600,7 @@ namespace gl
copy_transfer_regions_impl(cmd, dst->image(), region);
}
- cached_texture_section* create_new_texture(gl::command_context& cmd, const utils::address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
+ cached_texture_section* create_new_texture(gl::command_context& cmd, const utils::address_range32 &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t /*flags*/) override
{
const rsx::image_section_attributes_t search_desc = { .gcm_format = gcm_format, .width = width, .height = height, .depth = depth, .mipmaps = mipmaps };
@@ -708,7 +708,7 @@ namespace gl
cached_texture_section* create_nul_section(
gl::command_context& /*cmd*/,
- const utils::address_range& rsx_range,
+ const utils::address_range32& rsx_range,
const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& /*tile*/,
bool /*memory_load*/) override
@@ -726,7 +726,7 @@ namespace gl
return &cached;
}
- cached_texture_section* upload_image_from_cpu(gl::command_context& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
+ cached_texture_section* upload_image_from_cpu(gl::command_context& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector& subresource_layout, rsx::texture_dimension_extended type, bool input_swizzled) override
{
auto section = create_new_texture(cmd, rsx_range, width, height, depth, mipmaps, pitch, gcm_format, context, type, input_swizzled,
diff --git a/rpcs3/Emu/RSX/GL/glutils/ring_buffer.cpp b/rpcs3/Emu/RSX/GL/glutils/ring_buffer.cpp
index da77b50cbe..146136e4b4 100644
--- a/rpcs3/Emu/RSX/GL/glutils/ring_buffer.cpp
+++ b/rpcs3/Emu/RSX/GL/glutils/ring_buffer.cpp
@@ -280,7 +280,7 @@ namespace gl
void scratch_ring_buffer::pop_barrier(u32 start, u32 length)
{
- const auto range = utils::address_range::start_length(start, length);
+ const auto range = utils::address_range32::start_length(start, length);
m_barriers.erase(std::remove_if(m_barriers.begin(), m_barriers.end(), [&range](auto& barrier_)
{
if (barrier_.range.overlaps(range))
@@ -302,7 +302,7 @@ namespace gl
}
barrier barrier_;
- barrier_.range = utils::address_range::start_length(start, length);
+ barrier_.range = utils::address_range32::start_length(start, length);
barrier_.signal.create();
m_barriers.emplace_back(barrier_);
}
diff --git a/rpcs3/Emu/RSX/GL/glutils/ring_buffer.h b/rpcs3/Emu/RSX/GL/glutils/ring_buffer.h
index 97f802ddf2..37ba0e4bdf 100644
--- a/rpcs3/Emu/RSX/GL/glutils/ring_buffer.h
+++ b/rpcs3/Emu/RSX/GL/glutils/ring_buffer.h
@@ -88,7 +88,7 @@ namespace gl
struct barrier
{
fence signal;
- utils::address_range range;
+ utils::address_range32 range;
};
buffer m_storage;
diff --git a/rpcs3/Emu/RSX/Host/MM.cpp b/rpcs3/Emu/RSX/Host/MM.cpp
index cf21b6e046..e1313cc13a 100644
--- a/rpcs3/Emu/RSX/Host/MM.cpp
+++ b/rpcs3/Emu/RSX/Host/MM.cpp
@@ -18,7 +18,7 @@ namespace rsx
{
for (const auto& block : g_deferred_mprotect_queue)
{
- utils::memory_protect(reinterpret_cast(block.start), block.length, block.prot);
+ utils::memory_protect(reinterpret_cast(block.range.start), block.range.length(), block.prot);
}
g_deferred_mprotect_queue.clear();
@@ -28,7 +28,7 @@ namespace rsx
{
// We could stack and merge requests here, but that is more trouble than it is truly worth.
// A fresh call to memory_protect only takes a few nanoseconds of setup overhead, it is not worth the risk of hanging because of conflicts.
- g_deferred_mprotect_queue.push_back({ start, length, prot });
+ g_deferred_mprotect_queue.push_back({ utils::address_range64::start_length(start, length), prot });
}
void mm_protect(void* ptr, u64 length, utils::protection prot)
@@ -41,7 +41,7 @@ namespace rsx
// Naive merge. Eventually it makes more sense to do conflict resolution, but it's not as important.
const auto start = reinterpret_cast(ptr);
- const auto end = start + length;
+ const auto range = utils::address_range64::start_length(start, length);
std::lock_guard lock(g_mprotect_queue_lock);
@@ -50,7 +50,7 @@ namespace rsx
// Basically an unlock op. Flush if any overlap is detected
for (const auto& block : g_deferred_mprotect_queue)
{
- if (block.overlaps(start, end))
+ if (block.overlaps(range))
{
mm_flush_mprotect_queue_internal();
break;
@@ -90,6 +90,24 @@ namespace rsx
}
}
+ void mm_flush(const rsx::simple_array& ranges)
+ {
+ std::lock_guard lock(g_mprotect_queue_lock);
+ if (g_deferred_mprotect_queue.empty())
+ {
+ return;
+ }
+
+ for (const auto& block : g_deferred_mprotect_queue)
+ {
+ if (ranges.any(FN(block.overlaps(x))))
+ {
+ mm_flush_mprotect_queue_internal();
+ return;
+ }
+ }
+ }
+
void mm_flush_lazy()
{
if (!g_cfg.video.multithreaded_rsx)
diff --git a/rpcs3/Emu/RSX/Host/MM.h b/rpcs3/Emu/RSX/Host/MM.h
index e9415a685f..43053cdd17 100644
--- a/rpcs3/Emu/RSX/Host/MM.h
+++ b/rpcs3/Emu/RSX/Host/MM.h
@@ -3,28 +3,24 @@
#include
#include
+#include "Emu/RSX/Common/simple_array.hpp"
+#include "Utilities/address_range.h"
+
namespace rsx
{
struct MM_block
{
- u64 start;
- u64 length;
+ utils::address_range64 range;
utils::protection prot;
- inline bool overlaps(u64 start, u64 end) const
+ inline bool overlaps(const utils::address_range64& test) const
{
- // [Start, End] is not a proper closed range, there is an off-by-one by design.
- // FIXME: Use address_range64
- const u64 this_end = this->start + this->length;
- return (this->start < end && start < this_end);
+ return range.overlaps(test);
}
inline bool overlaps(u64 addr) const
{
- // [Start, End] is not a proper closed range, there is an off-by-one by design.
- // FIXME: Use address_range64
- const u64 this_end = this->start + this->length;
- return (addr >= start && addr < this_end);
+ return range.overlaps(addr);
}
};
@@ -36,5 +32,6 @@ namespace rsx
void mm_protect(void* start, u64 length, utils::protection prot);
void mm_flush_lazy();
void mm_flush(u32 vm_address);
+ void mm_flush(const rsx::simple_array& ranges);
void mm_flush();
}
diff --git a/rpcs3/Emu/RSX/NV47/HW/nv0039.cpp b/rpcs3/Emu/RSX/NV47/HW/nv0039.cpp
index fb20d93a69..a41fae11dc 100644
--- a/rpcs3/Emu/RSX/NV47/HW/nv0039.cpp
+++ b/rpcs3/Emu/RSX/NV47/HW/nv0039.cpp
@@ -3,6 +3,7 @@
#include "Emu/RSX/RSXThread.h"
#include "Emu/RSX/Core/RSXReservationLock.hpp"
+#include "Emu/RSX/Host/MM.h"
#include "context_accessors.define.h"
@@ -57,8 +58,15 @@ namespace rsx
auto res = ::rsx::reservation_lock(write_address, write_length, read_address, read_length);
- u8 *dst = vm::_ptr(write_address);
- const u8 *src = vm::_ptr(read_address);
+ u8* dst = vm::_ptr(write_address);
+ const u8* src = vm::_ptr(read_address);
+
+ rsx::simple_array flush_mm_ranges =
+ {
+ utils::address_range64::start_length(reinterpret_cast(dst), write_length),
+ utils::address_range64::start_length(reinterpret_cast(src), read_length)
+ };
+ rsx::mm_flush(flush_mm_ranges);
const bool is_overlapping = dst_dma == src_dma && [&]() -> bool
{
diff --git a/rpcs3/Emu/RSX/NV47/HW/nv3089.cpp b/rpcs3/Emu/RSX/NV47/HW/nv3089.cpp
index 1082ae41b9..cbc2e54296 100644
--- a/rpcs3/Emu/RSX/NV47/HW/nv3089.cpp
+++ b/rpcs3/Emu/RSX/NV47/HW/nv3089.cpp
@@ -328,8 +328,8 @@ namespace rsx
{
const bool is_overlapping = !src_is_modified && dst.dma == src.dma && [&]() -> bool
{
- const auto src_range = utils::address_range::start_length(src.rsx_address, src.pitch * (src.height - 1) + (src.bpp * src.width));
- const auto dst_range = utils::address_range::start_length(dst.rsx_address, dst.pitch * (dst.clip_height - 1) + (dst.bpp * dst.clip_width));
+ const auto src_range = utils::address_range32::start_length(src.rsx_address, src.pitch * (src.height - 1) + (src.bpp * src.width));
+ const auto dst_range = utils::address_range32::start_length(dst.rsx_address, dst.pitch * (dst.clip_height - 1) + (dst.bpp * dst.clip_width));
return src_range.overlaps(dst_range);
}();
@@ -612,7 +612,7 @@ namespace rsx
const bool interpolate = in_inter == blit_engine::transfer_interpolator::foh;
auto real_dst = dst.pixels;
- const auto tiled_region = RSX(ctx)->get_tiled_memory_region(utils::address_range::start_length(dst.rsx_address, dst.pitch * dst.clip_height));
+ const auto tiled_region = RSX(ctx)->get_tiled_memory_region(utils::address_range32::start_length(dst.rsx_address, dst.pitch * dst.clip_height));
std::vector tmp;
if (tiled_region)
diff --git a/rpcs3/Emu/RSX/RSXOffload.cpp b/rpcs3/Emu/RSX/RSXOffload.cpp
index 5f8737543e..f306784d59 100644
--- a/rpcs3/Emu/RSX/RSXOffload.cpp
+++ b/rpcs3/Emu/RSX/RSXOffload.cpp
@@ -212,7 +212,7 @@ namespace rsx
}
// Fault recovery
- utils::address_range dma_manager::get_fault_range(bool writing) const
+ utils::address_range32 dma_manager::get_fault_range(bool writing) const
{
const auto m_current_job = ensure(m_thread->m_current_job);
@@ -237,6 +237,6 @@ namespace rsx
fmt::throw_exception("Unreachable");
}
- return utils::address_range::start_length(vm::get_addr(address), range);
+ return utils::address_range32::start_length(vm::get_addr(address), range);
}
}
diff --git a/rpcs3/Emu/RSX/RSXOffload.h b/rpcs3/Emu/RSX/RSXOffload.h
index 8622134962..6e1e5d7476 100644
--- a/rpcs3/Emu/RSX/RSXOffload.h
+++ b/rpcs3/Emu/RSX/RSXOffload.h
@@ -83,6 +83,6 @@ namespace rsx
void clear_mem_fault_flag();
// Fault recovery
- utils::address_range get_fault_range(bool writing) const;
+ utils::address_range32 get_fault_range(bool writing) const;
};
}
diff --git a/rpcs3/Emu/RSX/RSXThread.cpp b/rpcs3/Emu/RSX/RSXThread.cpp
index ffd09a742f..f0a72c05fc 100644
--- a/rpcs3/Emu/RSX/RSXThread.cpp
+++ b/rpcs3/Emu/RSX/RSXThread.cpp
@@ -1233,7 +1233,7 @@ namespace rsx
{
std::lock_guard lock(m_mtx_task);
- m_invalidated_memory_range = utils::address_range::start_end(0x2 << 28, constants::local_mem_base + local_mem_size - 1);
+ m_invalidated_memory_range = utils::address_range32::start_end(0x2 << 28, constants::local_mem_base + local_mem_size - 1);
handle_invalidated_memory_range();
}
}
@@ -2299,8 +2299,8 @@ namespace rsx
return false;
}
- const auto current_fragment_shader_range = address_range::start_length(shader_offset, current_fragment_program.total_length);
- if (!current_fragment_shader_range.overlaps(address_range::start_length(dst_offset, size)))
+ const auto current_fragment_shader_range = address_range32::start_length(shader_offset, current_fragment_program.total_length);
+ if (!current_fragment_shader_range.overlaps(address_range32::start_length(dst_offset, size)))
{
// No range overlap
return false;
@@ -2832,7 +2832,7 @@ namespace rsx
reader_lock lock(m_mtx_task);
- const auto map_range = address_range::start_length(address, size);
+ const auto map_range = address_range32::start_length(address, size);
if (!m_invalidated_memory_range.valid())
return;
@@ -2918,7 +2918,7 @@ namespace rsx
// Queue up memory invalidation
std::lock_guard lock(m_mtx_task);
const bool existing_range_valid = m_invalidated_memory_range.valid();
- const auto unmap_range = address_range::start_length(address, size);
+ const auto unmap_range = address_range32::start_length(address, size);
if (existing_range_valid && m_invalidated_memory_range.touches(unmap_range))
{
diff --git a/rpcs3/Emu/RSX/RSXThread.h b/rpcs3/Emu/RSX/RSXThread.h
index 6b078a522b..73aacf360a 100644
--- a/rpcs3/Emu/RSX/RSXThread.h
+++ b/rpcs3/Emu/RSX/RSXThread.h
@@ -149,7 +149,7 @@ namespace rsx
virtual f64 get_display_refresh_rate() const = 0;
// Invalidated memory range
- address_range m_invalidated_memory_range;
+ address_range32 m_invalidated_memory_range;
// Profiler
rsx::profiling_timer m_profiler;
@@ -353,7 +353,7 @@ namespace rsx
virtual void flip(const display_flip_info_t& info) = 0;
virtual u64 timestamp();
virtual bool on_access_violation(u32 /*address*/, bool /*is_writing*/) { return false; }
- virtual void on_invalidate_memory_range(const address_range & /*range*/, rsx::invalidation_cause) {}
+ virtual void on_invalidate_memory_range(const address_range32 & /*range*/, rsx::invalidation_cause) {}
virtual void notify_tile_unbound(u32 /*tile*/) {}
// control
diff --git a/rpcs3/Emu/RSX/RSXZCULL.cpp b/rpcs3/Emu/RSX/RSXZCULL.cpp
index 117caa7b4e..3dd9d1a776 100644
--- a/rpcs3/Emu/RSX/RSXZCULL.cpp
+++ b/rpcs3/Emu/RSX/RSXZCULL.cpp
@@ -788,7 +788,7 @@ namespace rsx
u32 ZCULL_control::copy_reports_to(u32 start, u32 range, u32 dest)
{
u32 bytes_to_write = 0;
- const auto memory_range = utils::address_range::start_length(start, range);
+ const auto memory_range = utils::address_range32::start_length(start, range);
for (auto& writer : m_pending_writes)
{
if (!writer.sink)
diff --git a/rpcs3/Emu/RSX/VK/VKDMA.cpp b/rpcs3/Emu/RSX/VK/VKDMA.cpp
index 0aa7d864b7..09834d6f99 100644
--- a/rpcs3/Emu/RSX/VK/VKDMA.cpp
+++ b/rpcs3/Emu/RSX/VK/VKDMA.cpp
@@ -27,7 +27,7 @@ namespace vk
free();
}
- void* dma_block::map_range(const utils::address_range& range)
+ void* dma_block::map_range(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@@ -142,7 +142,7 @@ namespace vk
inheritance_info.block_offset = (addr - parent->base_address);
}
- void dma_block::flush(const utils::address_range& range)
+ void dma_block::flush(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@@ -158,7 +158,7 @@ namespace vk
// NOTE: Do not unmap. This can be extremely slow on some platforms.
}
- void dma_block::load(const utils::address_range& range)
+ void dma_block::load(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@@ -174,7 +174,7 @@ namespace vk
// NOTE: Do not unmap. This can be extremely slow on some platforms.
}
- dma_mapping_handle dma_block::get(const utils::address_range& range)
+ dma_mapping_handle dma_block::get(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@@ -264,7 +264,7 @@ namespace vk
s_allocated_dma_pool_size += allocated_memory->size();
}
- void* dma_block_EXT::map_range(const utils::address_range& range)
+ void* dma_block_EXT::map_range(const utils::address_range32& range)
{
return vm::get_super_ptr(range.start);
}
@@ -274,12 +274,12 @@ namespace vk
// NOP
}
- void dma_block_EXT::flush(const utils::address_range&)
+ void dma_block_EXT::flush(const utils::address_range32&)
{
// NOP
}
- void dma_block_EXT::load(const utils::address_range&)
+ void dma_block_EXT::load(const utils::address_range32&)
{
// NOP
}
@@ -336,7 +336,7 @@ namespace vk
// Not much contention expected here, avoid searching twice
std::lock_guard lock(g_dma_mutex);
- const auto map_range = utils::address_range::start_length(local_address, length);
+ const auto map_range = utils::address_range32::start_length(local_address, length);
auto first_block = (local_address & s_dma_block_mask);
if (auto found = g_dma_pool.find(first_block); found != g_dma_pool.end())
@@ -454,7 +454,7 @@ namespace vk
if (auto found = g_dma_pool.find(block); found != g_dma_pool.end())
{
const auto sync_end = std::min(limit, found->second->end());
- const auto range = utils::address_range::start_end(local_address, sync_end);
+ const auto range = utils::address_range32::start_end(local_address, sync_end);
if constexpr (load)
{
diff --git a/rpcs3/Emu/RSX/VK/VKDMA.h b/rpcs3/Emu/RSX/VK/VKDMA.h
index e718733649..3cc0855f57 100644
--- a/rpcs3/Emu/RSX/VK/VKDMA.h
+++ b/rpcs3/Emu/RSX/VK/VKDMA.h
@@ -28,7 +28,7 @@ namespace vk
virtual void allocate(const render_device& dev, usz size);
virtual void free();
- virtual void* map_range(const utils::address_range& range);
+ virtual void* map_range(const utils::address_range32& range);
virtual void unmap();
public:
@@ -38,9 +38,9 @@ namespace vk
virtual void init(const render_device& dev, u32 addr, usz size);
virtual void init(dma_block* parent, u32 addr, usz size);
- virtual void flush(const utils::address_range& range);
- virtual void load(const utils::address_range& range);
- std::pair get(const utils::address_range& range);
+ virtual void flush(const utils::address_range32& range);
+ virtual void load(const utils::address_range32& range);
+ std::pair get(const utils::address_range32& range);
u32 start() const;
u32 end() const;
@@ -56,11 +56,11 @@ namespace vk
{
private:
void allocate(const render_device& dev, usz size) override;
- void* map_range(const utils::address_range& range) override;
+ void* map_range(const utils::address_range32& range) override;
void unmap() override;
public:
- void flush(const utils::address_range& range) override;
- void load(const utils::address_range& range) override;
+ void flush(const utils::address_range32& range) override;
+ void load(const utils::address_range32& range) override;
};
}
diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.cpp b/rpcs3/Emu/RSX/VK/VKGSRender.cpp
index 9ac52774ea..107cd7b399 100644
--- a/rpcs3/Emu/RSX/VK/VKGSRender.cpp
+++ b/rpcs3/Emu/RSX/VK/VKGSRender.cpp
@@ -976,7 +976,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
return true;
}
-void VKGSRender::on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause)
+void VKGSRender::on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause)
{
std::lock_guard lock(m_secondary_cb_guard);
@@ -2438,7 +2438,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
// Flush old address if we keep missing it
if (m_surface_info[i].pitch && g_cfg.video.write_color_buffers)
{
- const utils::address_range rsx_range = m_surface_info[i].get_memory_range();
+ const utils::address_range32 rsx_range = m_surface_info[i].get_memory_range();
m_texture_cache.set_memory_read_flags(rsx_range, rsx::memory_read_flags::flush_once);
m_texture_cache.flush_if_cache_miss_likely(*m_current_command_buffer, rsx_range);
}
@@ -2455,7 +2455,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
{
if (m_depth_surface_info.pitch && g_cfg.video.write_depth_buffer)
{
- const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
+ const utils::address_range32 surface_range = m_depth_surface_info.get_memory_range();
m_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_texture_cache.flush_if_cache_miss_likely(*m_current_command_buffer, surface_range);
}
@@ -2572,7 +2572,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
{
if (!m_surface_info[index].address || !m_surface_info[index].pitch) continue;
- const utils::address_range surface_range = m_surface_info[index].get_memory_range();
+ const utils::address_range32 surface_range = m_surface_info[index].get_memory_range();
if (g_cfg.video.write_color_buffers)
{
m_texture_cache.lock_memory_region(
@@ -2588,7 +2588,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
if (m_depth_surface_info.address && m_depth_surface_info.pitch)
{
- const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
+ const utils::address_range32 surface_range = m_depth_surface_info.get_memory_range();
if (g_cfg.video.write_depth_buffer)
{
const u32 gcm_format = (m_depth_surface_info.depth_format == rsx::surface_depth_format::z16) ? CELL_GCM_TEXTURE_DEPTH16 : CELL_GCM_TEXTURE_DEPTH24_D8;
diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.h b/rpcs3/Emu/RSX/VK/VKGSRender.h
index f37c0d411b..e16d8d1afa 100644
--- a/rpcs3/Emu/RSX/VK/VKGSRender.h
+++ b/rpcs3/Emu/RSX/VK/VKGSRender.h
@@ -170,7 +170,7 @@ private:
// Offloader thread deadlock recovery
rsx::atomic_bitmask_t m_queue_status;
- utils::address_range m_offloader_fault_range;
+ utils::address_range32 m_offloader_fault_range;
rsx::invalidation_cause m_offloader_fault_cause;
vk::draw_call_t m_current_draw {};
@@ -289,6 +289,6 @@ protected:
void notify_tile_unbound(u32 tile) override;
bool on_access_violation(u32 address, bool is_writing) override;
- void on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause) override;
+ void on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause) override;
void on_semaphore_acquire_wait() override;
};
diff --git a/rpcs3/Emu/RSX/VK/VKHelpers.h b/rpcs3/Emu/RSX/VK/VKHelpers.h
index 302abfb805..9d07a4581e 100644
--- a/rpcs3/Emu/RSX/VK/VKHelpers.h
+++ b/rpcs3/Emu/RSX/VK/VKHelpers.h
@@ -86,7 +86,7 @@ namespace vk
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags);
std::pair detile_memory_block(
- const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region, const utils::address_range& range,
+ const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region, const utils::address_range32& range,
u16 width, u16 height, u8 bpp);
// Other texture management helpers
diff --git a/rpcs3/Emu/RSX/VK/VKPresent.cpp b/rpcs3/Emu/RSX/VK/VKPresent.cpp
index 0d2912df89..a685dbabaa 100644
--- a/rpcs3/Emu/RSX/VK/VKPresent.cpp
+++ b/rpcs3/Emu/RSX/VK/VKPresent.cpp
@@ -325,7 +325,7 @@ vk::viewable_image* VKGSRender::get_present_source(/* inout */ vk::present_surfa
if (!image_to_flip) [[ unlikely ]]
{
// Read from cell
- const auto range = utils::address_range::start_length(info->address, info->pitch * info->height);
+ const auto range = utils::address_range32::start_length(info->address, info->pitch * info->height);
const u32 lookup_mask = rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::framebuffer_storage;
const auto overlap = m_texture_cache.find_texture_from_range(range, 0, lookup_mask);
diff --git a/rpcs3/Emu/RSX/VK/VKRenderTargets.cpp b/rpcs3/Emu/RSX/VK/VKRenderTargets.cpp
index 11e7747f2f..0356a1c7d8 100644
--- a/rpcs3/Emu/RSX/VK/VKRenderTargets.cpp
+++ b/rpcs3/Emu/RSX/VK/VKRenderTargets.cpp
@@ -93,7 +93,7 @@ namespace vk
// Drop MSAA resolve/unresolve caches. Only trigger when a hard sync is guaranteed to follow else it will cause even more problems!
// 2-pass to ensure resources are available where they are most needed
- auto relieve_memory_pressure = [&](auto& list, const utils::address_range& range)
+ auto relieve_memory_pressure = [&](auto& list, const utils::address_range32& range)
{
for (auto it = list.begin_range(range); it != list.end(); ++it)
{
@@ -254,7 +254,7 @@ namespace vk
std::vector sorted_list;
sorted_list.reserve(1024);
- auto process_list_function = [&](auto& list, const utils::address_range& range)
+ auto process_list_function = [&](auto& list, const utils::address_range32& range)
{
for (auto it = list.begin_range(range); it != list.end(); ++it)
{
diff --git a/rpcs3/Emu/RSX/VK/VKTexture.cpp b/rpcs3/Emu/RSX/VK/VKTexture.cpp
index d8d06420da..23b76df52b 100644
--- a/rpcs3/Emu/RSX/VK/VKTexture.cpp
+++ b/rpcs3/Emu/RSX/VK/VKTexture.cpp
@@ -1247,7 +1247,7 @@ namespace vk
}
std::pair detile_memory_block(const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region,
- const utils::address_range& range, u16 width, u16 height, u8 bpp)
+ const utils::address_range32& range, u16 width, u16 height, u8 bpp)
{
// Calculate the true length of the usable memory section
const auto available_tile_size = tiled_region.tile->size - (range.start - tiled_region.base_address);
diff --git a/rpcs3/Emu/RSX/VK/VKTextureCache.cpp b/rpcs3/Emu/RSX/VK/VKTextureCache.cpp
index 7107e1f32b..fc1efb3533 100644
--- a/rpcs3/Emu/RSX/VK/VKTextureCache.cpp
+++ b/rpcs3/Emu/RSX/VK/VKTextureCache.cpp
@@ -62,7 +62,7 @@ namespace vk
}
}
- void cached_texture_section::dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch)
+ void cached_texture_section::dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range32& valid_range, u32 pitch)
{
ensure(src->samples() == 1);
@@ -921,7 +921,7 @@ namespace vk
dst->pop_layout(cmd);
}
- cached_texture_section* texture_cache::create_new_texture(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
+ cached_texture_section* texture_cache::create_new_texture(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t flags)
{
const auto section_depth = depth;
@@ -1076,7 +1076,7 @@ namespace vk
cached_texture_section* texture_cache::create_nul_section(
vk::command_buffer& /*cmd*/,
- const utils::address_range& rsx_range,
+ const utils::address_range32& rsx_range,
const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& tile,
bool memory_load)
@@ -1101,7 +1101,7 @@ namespace vk
return ®ion;
}
- cached_texture_section* texture_cache::upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
+ cached_texture_section* texture_cache::upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector& subresource_layout, rsx::texture_dimension_extended type, bool swizzled)
{
if (context != rsx::texture_upload_context::shader_read)
diff --git a/rpcs3/Emu/RSX/VK/VKTextureCache.h b/rpcs3/Emu/RSX/VK/VKTextureCache.h
index 0228bd538e..8bbd131fe5 100644
--- a/rpcs3/Emu/RSX/VK/VKTextureCache.h
+++ b/rpcs3/Emu/RSX/VK/VKTextureCache.h
@@ -186,7 +186,7 @@ namespace vk
return flushed;
}
- void dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch);
+ void dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range32& valid_range, u32 pitch);
void copy_texture(vk::command_buffer& cmd, bool miss)
{
@@ -477,13 +477,13 @@ namespace vk
void update_image_contents(vk::command_buffer& cmd, vk::image_view* dst_view, vk::image* src, u16 width, u16 height) override;
- cached_texture_section* create_new_texture(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
+ cached_texture_section* create_new_texture(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t flags) override;
- cached_texture_section* create_nul_section(vk::command_buffer& cmd, const utils::address_range& rsx_range, const rsx::image_section_attributes_t& attrs,
+ cached_texture_section* create_nul_section(vk::command_buffer& cmd, const utils::address_range32& rsx_range, const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& tile, bool memory_load) override;
- cached_texture_section* upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
+ cached_texture_section* upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) override;
void set_component_order(cached_texture_section& section, u32 gcm_format, rsx::component_order expected_flags) override;
diff --git a/rpcs3/Emu/RSX/rsx_utils.h b/rpcs3/Emu/RSX/rsx_utils.h
index 37874933f4..7a0aa3c14f 100644
--- a/rpcs3/Emu/RSX/rsx_utils.h
+++ b/rpcs3/Emu/RSX/rsx_utils.h
@@ -14,9 +14,9 @@ extern "C"
namespace rsx
{
- // Import address_range utilities
- using utils::address_range;
- using utils::address_range_vector;
+ // Import address_range32 utilities
+ using utils::address_range32;
+ using utils::address_range_vector32;
using utils::page_for;
using utils::page_start;
using utils::page_end;
@@ -120,7 +120,7 @@ namespace rsx
u8 bpp = 0;
u8 samples = 0;
- address_range range{};
+ address_range32 range{};
gcm_framebuffer_info() = default;
@@ -131,16 +131,16 @@ namespace rsx
// Account for the last line of the block not reaching the end
const u32 block_size = pitch * (height - 1) * aa_factor_v;
const u32 line_size = width * aa_factor_u * bpp;
- range = address_range::start_length(address, block_size + line_size);
+ range = address_range32::start_length(address, block_size + line_size);
}
- address_range get_memory_range(const u32* aa_factors)
+ address_range32 get_memory_range(const u32* aa_factors)
{
calculate_memory_range(aa_factors[0], aa_factors[1]);
return range;
}
- address_range get_memory_range() const
+ address_range32 get_memory_range() const
{
ensure(range.start == address);
return range;
@@ -260,7 +260,7 @@ namespace rsx
static inline u32 get_location(u32 addr)
{
// We don't really care about the actual memory map, it shouldn't be possible to use the mmio bar region anyway
- constexpr address_range local_mem_range = address_range::start_length(rsx::constants::local_mem_base, 0x1000'0000);
+ constexpr address_range32 local_mem_range = address_range32::start_length(rsx::constants::local_mem_base, 0x1000'0000);
return local_mem_range.overlaps(addr) ?
CELL_GCM_LOCATION_LOCAL :
CELL_GCM_LOCATION_MAIN;
diff --git a/rpcs3/tests/rpcs3_test.vcxproj b/rpcs3/tests/rpcs3_test.vcxproj
index 3351d9d8b8..d59172a231 100644
--- a/rpcs3/tests/rpcs3_test.vcxproj
+++ b/rpcs3/tests/rpcs3_test.vcxproj
@@ -89,6 +89,7 @@
+
diff --git a/rpcs3/tests/test_address_range.cpp b/rpcs3/tests/test_address_range.cpp
new file mode 100644
index 0000000000..5a7e8b2faf
--- /dev/null
+++ b/rpcs3/tests/test_address_range.cpp
@@ -0,0 +1,497 @@
+#include
+
+#define private public
+#include "Utilities/address_range.h"
+#undef private
+
+using namespace utils;
+
+namespace utils
+{
+ TEST(AddressRange, Constructors)
+ {
+ // Default constructor
+ address_range32 empty;
+ EXPECT_FALSE(empty.valid());
+ EXPECT_EQ(empty.start, umax);
+ EXPECT_EQ(empty.end, 0);
+
+ // Static factory methods
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
+ EXPECT_EQ(r1.start, 0x1000);
+ EXPECT_EQ(r1.end, 0x1FFF);
+ EXPECT_EQ(r1.length(), 0x1000);
+ EXPECT_TRUE(r1.valid());
+
+ address_range32 r2 = address_range32::start_end(0x2000, 0x2FFF);
+ EXPECT_EQ(r2.start, 0x2000);
+ EXPECT_EQ(r2.end, 0x2FFF);
+ EXPECT_EQ(r2.length(), 0x1000);
+ EXPECT_TRUE(r2.valid());
+
+ // Edge cases
+ address_range32 zero_length = address_range32::start_length(0x1000, 0);
+ EXPECT_FALSE(zero_length.valid());
+
+ address_range32 single_byte = address_range32::start_length(0x1000, 1);
+ EXPECT_TRUE(single_byte.valid());
+ EXPECT_EQ(single_byte.start, 0x1000);
+ EXPECT_EQ(single_byte.end, 0x1000);
+ EXPECT_EQ(single_byte.length(), 1);
+ }
+
+ TEST(AddressRange, LengthAndBoundaries)
+ {
+ address_range32 r = address_range32::start_length(0x1000, 0x1000);
+
+ // Test length
+ EXPECT_EQ(r.length(), 0x1000);
+
+ // Test set_length
+ r.set_length(0x2000);
+ EXPECT_EQ(r.start, 0x1000);
+ EXPECT_EQ(r.end, 0x2FFF);
+ EXPECT_EQ(r.length(), 0x2000);
+
+ // Test next_address and prev_address
+ EXPECT_EQ(r.next_address(), 0x3000);
+ EXPECT_EQ(r.prev_address(), 0xFFF);
+ }
+
+ TEST(AddressRange, Overlapping)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+
+ // Complete overlap
+ address_range32 r2 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+ EXPECT_TRUE(r1.overlaps(r2));
+ EXPECT_TRUE(r2.overlaps(r1));
+
+ // Partial overlap at start
+ address_range32 r3 = address_range32::start_length(0x800, 0x1000); // 0x800-0x17FF
+ EXPECT_TRUE(r1.overlaps(r3));
+ EXPECT_TRUE(r3.overlaps(r1));
+
+ // Partial overlap at end
+ address_range32 r4 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
+ EXPECT_TRUE(r1.overlaps(r4));
+ EXPECT_TRUE(r4.overlaps(r1));
+
+ // No overlap, before
+ address_range32 r5 = address_range32::start_length(0x0, 0x1000); // 0x0-0xFFF
+ EXPECT_FALSE(r1.overlaps(r5));
+ EXPECT_FALSE(r5.overlaps(r1));
+
+ // No overlap, after
+ address_range32 r6 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
+ EXPECT_FALSE(r1.overlaps(r6));
+ EXPECT_FALSE(r6.overlaps(r1));
+
+ // Single address overlap at start
+ address_range32 r7 = address_range32::start_length(0x800, 0x801); // 0x800-0x1000
+ EXPECT_TRUE(r1.overlaps(r7));
+ EXPECT_TRUE(r7.overlaps(r1));
+
+ // Single address overlap at end
+ address_range32 r8 = address_range32::start_length(0x1FFF, 0x1000); // 0x1FFF-0x2FFE
+ EXPECT_TRUE(r1.overlaps(r8));
+ EXPECT_TRUE(r8.overlaps(r1));
+
+ // Address overlap test
+ EXPECT_TRUE(r1.overlaps(0x1000)); // Start boundary
+ EXPECT_TRUE(r1.overlaps(0x1FFF)); // End boundary
+ EXPECT_TRUE(r1.overlaps(0x1800)); // Middle
+ EXPECT_FALSE(r1.overlaps(0xFFF)); // Just before
+ EXPECT_FALSE(r1.overlaps(0x2000)); // Just after
+ }
+
+ TEST(AddressRange, Inside)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+
+ // Same range
+ address_range32 r2 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+ EXPECT_TRUE(r1.inside(r2));
+ EXPECT_TRUE(r2.inside(r1));
+
+ // Smaller range inside
+ address_range32 r3 = address_range32::start_length(0x1200, 0x800); // 0x1200-0x19FF
+ EXPECT_TRUE(r3.inside(r1));
+ EXPECT_FALSE(r1.inside(r3));
+
+ // Larger range outside
+ address_range32 r4 = address_range32::start_length(0x800, 0x2000); // 0x800-0x27FF
+ EXPECT_TRUE(r1.inside(r4));
+ EXPECT_FALSE(r4.inside(r1));
+
+ // Partially overlapping
+ address_range32 r5 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
+ EXPECT_FALSE(r1.inside(r5));
+ EXPECT_FALSE(r5.inside(r1));
+
+ // No overlap
+ address_range32 r6 = address_range32::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
+ EXPECT_FALSE(r1.inside(r6));
+ EXPECT_FALSE(r6.inside(r1));
+ }
+
+ TEST(AddressRange, Touches)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+
+ // Same range (overlaps)
+ address_range32 r2 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+ EXPECT_TRUE(r1.touches(r2));
+
+ // Overlapping ranges
+ address_range32 r3 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
+ EXPECT_TRUE(r1.touches(r3));
+
+ // Adjacent at end of r1
+ address_range32 r4 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
+ EXPECT_TRUE(r1.touches(r4));
+ EXPECT_TRUE(r4.touches(r1));
+
+ // Adjacent at start of r1
+ address_range32 r5 = address_range32::start_length(0x0, 0x1000); // 0x0-0xFFF
+ EXPECT_TRUE(r1.touches(r5));
+ EXPECT_TRUE(r5.touches(r1));
+
+ // Not touching
+ address_range32 r6 = address_range32::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
+ EXPECT_FALSE(r1.touches(r6));
+ EXPECT_FALSE(r6.touches(r1));
+ }
+
+ TEST(AddressRange, Distance)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+
+ // Touching ranges
+ address_range32 r2 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
+ EXPECT_EQ(r1.distance(r2), 0);
+ EXPECT_EQ(r2.distance(r1), 0);
+ EXPECT_EQ(r1.signed_distance(r2), 0);
+ EXPECT_EQ(r2.signed_distance(r1), 0);
+
+ // Gap of 0x1000 (r3 after r1)
+ address_range32 r3 = address_range32::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
+ EXPECT_EQ(r1.distance(r3), 0x1000);
+ EXPECT_EQ(r3.distance(r1), 0x1000);
+ EXPECT_EQ(r1.signed_distance(r3), 0x1000);
+ EXPECT_EQ(r3.signed_distance(r1), -0x1000);
+
+ // Gap of 0x1000 (r4 before r1)
+ address_range32 r4 = address_range32::start_end(0, 0xEFF); // 0x0-0xEFF
+ EXPECT_EQ(r1.distance(r4), 0x100);
+ EXPECT_EQ(r4.distance(r1), 0x100);
+ EXPECT_EQ(r1.signed_distance(r4), -0x100);
+ EXPECT_EQ(r4.signed_distance(r1), 0x100);
+
+ // Overlapping ranges
+ address_range32 r5 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
+ EXPECT_EQ(r1.distance(r5), 0);
+ EXPECT_EQ(r5.distance(r1), 0);
+ EXPECT_EQ(r1.signed_distance(r5), 0);
+ EXPECT_EQ(r5.signed_distance(r1), 0);
+ }
+
+ TEST(AddressRange, MinMax)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+ address_range32 r2 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
+
+ // Get min-max
+ address_range32 min_max = r1.get_min_max(r2);
+ EXPECT_EQ(min_max.start, 0x1000);
+ EXPECT_EQ(min_max.end, 0x27FF);
+
+ // Set min-max
+ address_range32 r3 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
+ r3.set_min_max(r1);
+ EXPECT_EQ(r3.start, 0x1000);
+ EXPECT_EQ(r3.end, 0x2FFF);
+
+ // Test with invalid ranges
+ address_range32 empty;
+ address_range32 min_max2 = r1.get_min_max(empty);
+ EXPECT_EQ(min_max2.start, r1.start);
+ EXPECT_EQ(min_max2.end, r1.end);
+
+ address_range32 min_max3 = empty.get_min_max(r1);
+ EXPECT_EQ(min_max3.start, r1.start);
+ EXPECT_EQ(min_max3.end, r1.end);
+
+ address_range32 min_max4 = empty.get_min_max(empty);
+ EXPECT_EQ(min_max4.start, umax);
+ EXPECT_EQ(min_max4.end, 0);
+ }
+
+ TEST(AddressRange, Intersect)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x2000); // 0x1000-0x2FFF
+
+ // Complete overlap
+ address_range32 r2 = address_range32::start_length(0x0, 0x4000); // 0x0-0x3FFF
+ address_range32 i1 = r1.get_intersect(r2);
+ EXPECT_EQ(i1.start, 0x1000);
+ EXPECT_EQ(i1.end, 0x2FFF);
+
+ // Partial overlap at start
+ address_range32 r3 = address_range32::start_length(0x0, 0x2000); // 0x0-0x1FFF
+ address_range32 i2 = r1.get_intersect(r3);
+ EXPECT_EQ(i2.start, 0x1000);
+ EXPECT_EQ(i2.end, 0x1FFF);
+
+ // Partial overlap at end
+ address_range32 r4 = address_range32::start_length(0x2000, 0x2000); // 0x2000-0x3FFF
+ address_range32 i3 = r1.get_intersect(r4);
+ EXPECT_EQ(i3.start, 0x2000);
+ EXPECT_EQ(i3.end, 0x2FFF);
+
+ // No overlap
+ address_range32 r5 = address_range32::start_length(0x4000, 0x1000); // 0x4000-0x4FFF
+ address_range32 i4 = r1.get_intersect(r5);
+ EXPECT_FALSE(i4.valid());
+
+ // Test intersect method
+ address_range32 r6 = address_range32::start_length(0x1000, 0x2000); // 0x1000-0x2FFF
+ r6.intersect(r3);
+ EXPECT_EQ(r6.start, 0x1000);
+ EXPECT_EQ(r6.end, 0x1FFF);
+ }
+
+ TEST(AddressRange, Validity)
+ {
+ // Valid range
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
+ EXPECT_TRUE(r1.valid());
+
+ // Invalid range (default constructor)
+ address_range32 r2;
+ EXPECT_FALSE(r2.valid());
+
+ // Invalid range (start > end)
+ address_range32 r3 = address_range32::start_end(0x2000, 0x1000);
+ EXPECT_FALSE(r3.valid());
+
+ // Invalidate
+ r1.invalidate();
+ EXPECT_FALSE(r1.valid());
+ EXPECT_EQ(r1.start, umax);
+ EXPECT_EQ(r1.end, 0);
+ }
+
+ TEST(AddressRange, Comparison)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
+ address_range32 r2 = address_range32::start_length(0x1000, 0x1000);
+ address_range32 r3 = address_range32::start_length(0x2000, 0x1000);
+
+ EXPECT_TRUE(r1 == r2);
+ EXPECT_FALSE(r1 == r3);
+ }
+
+ TEST(AddressRange, StringRepresentation)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
+ std::string str = r1.str();
+
+ // The exact format may vary, but it should contain the start and end addresses
+ EXPECT_NE(str.find("1000"), std::string::npos);
+ EXPECT_NE(str.find("1fff"), std::string::npos);
+ }
+
+ // Tests for address_range_vector32
+ TEST(AddressRangeVector, BasicOperations)
+ {
+ address_range_vector32 vec;
+ EXPECT_TRUE(vec.empty());
+ EXPECT_EQ(vec.size(), 0);
+
+ // Add a range
+ vec.merge(address_range32::start_length(0x1000, 0x1000));
+ EXPECT_FALSE(vec.empty());
+ EXPECT_EQ(vec.size(), 1);
+
+ // Clear
+ vec.clear();
+ EXPECT_TRUE(vec.empty());
+ EXPECT_EQ(vec.size(), 0);
+ }
+
+ TEST(AddressRangeVector, MergeOperations)
+ {
+ address_range_vector32 vec;
+
+ // Add non-touching ranges
+ vec.merge(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
+ vec.merge(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
+ EXPECT_EQ(vec.valid_count(), 2);
+
+ // Add a range that touches the first range
+ vec.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
+ // Should merge all three ranges
+ EXPECT_EQ(vec.valid_count(), 1);
+ EXPECT_TRUE(vec.contains(address_range32::start_end(0x1000, 0x3FFF)));
+
+ // Add a non-touching range
+ vec.merge(address_range32::start_length(0x5000, 0x1000)); // 0x5000-0x5FFF
+ EXPECT_EQ(vec.valid_count(), 2);
+
+ // Add an overlapping range
+ vec.merge(address_range32::start_length(0x4000, 0x2000)); // 0x4000-0x5FFF
+ EXPECT_EQ(vec.valid_count(), 1);
+ EXPECT_TRUE(vec.contains(address_range32::start_end(0x1000, 0x5FFF)));
+ }
+
+ TEST(AddressRangeVector, ExcludeOperations)
+ {
+ address_range_vector32 vec;
+ vec.merge(address_range32::start_length(0x1000, 0x4000)); // 0x1000-0x4FFF
+
+ // Exclude from the middle
+ vec.exclude(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
+ EXPECT_EQ(vec.valid_count(), 2);
+
+ auto it = vec.begin();
+ EXPECT_EQ(it->start, 0x1000);
+ EXPECT_EQ(it->end, 0x1FFF);
+ ++it;
+ EXPECT_EQ(it->start, 0x3000);
+ EXPECT_EQ(it->end, 0x4FFF);
+
+ // Exclude from the start
+ vec.exclude(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
+ EXPECT_EQ(vec.valid_count(), 1);
+ EXPECT_TRUE(vec.contains(address_range32::start_end(0x3000, 0x4FFF)));
+
+ // Exclude from the end
+ vec.exclude(address_range32::start_length(0x4000, 0x1000)); // 0x4000-0x4FFF
+ EXPECT_EQ(vec.valid_count(), 1);
+ EXPECT_TRUE(vec.contains(address_range32::start_end(0x3000, 0x3FFF)));
+
+ // Exclude entire range
+ vec.exclude(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
+ EXPECT_EQ(vec.valid_count(), 0);
+
+ // Test excluding with another vector
+ vec.merge(address_range32::start_length(0x1000, 0x4000)); // 0x1000-0x4FFF
+
+ address_range_vector32 vec2;
+ vec2.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
+ vec2.merge(address_range32::start_length(0x4000, 0x1000)); // 0x4000-0x4FFF
+
+ vec.exclude(vec2);
+ EXPECT_EQ(vec.valid_count(), 2);
+
+ EXPECT_TRUE(vec.contains(address_range32::start_end(0x1000, 0x1FFF)));
+ EXPECT_TRUE(vec.contains(address_range32::start_end(0x3000, 0x3FFF)));
+ }
+
+ TEST(AddressRangeVector, ConsistencyCheck)
+ {
+ address_range_vector32 vec;
+ vec.merge(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
+ vec.merge(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
+
+ EXPECT_TRUE(vec.check_consistency());
+
+ // This would cause inconsistency, but merge should handle it
+ vec.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
+ EXPECT_TRUE(vec.check_consistency());
+ EXPECT_EQ(vec.valid_count(), 1);
+ }
+
+ TEST(AddressRangeVector, OverlapsAndContains)
+ {
+ address_range_vector32 vec;
+ vec.merge(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
+ vec.merge(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
+
+ // Test overlaps with range
+ EXPECT_TRUE(vec.overlaps(address_range32::start_length(0x1500, 0x1000))); // 0x1500-0x24FF
+ EXPECT_TRUE(vec.overlaps(address_range32::start_length(0x3500, 0x1000))); // 0x3500-0x44FF
+ EXPECT_FALSE(vec.overlaps(address_range32::start_length(0x2000, 0x1000))); // 0x2000-0x2FFF
+
+ // Test contains
+ EXPECT_TRUE(vec.contains(address_range32::start_length(0x1000, 0x1000))); // 0x1000-0x1FFF
+ EXPECT_TRUE(vec.contains(address_range32::start_length(0x3000, 0x1000))); // 0x3000-0x3FFF
+ EXPECT_FALSE(vec.contains(address_range32::start_length(0x1500, 0x1000))); // 0x1500-0x24FF
+
+ // Test overlaps with another vector
+ address_range_vector32 vec2;
+ vec2.merge(address_range32::start_length(0x1500, 0x1000)); // 0x1500-0x24FF
+ EXPECT_TRUE(vec.overlaps(vec2));
+
+ address_range_vector32 vec3;
+ vec3.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
+ EXPECT_FALSE(vec.overlaps(vec3));
+
+ // Test inside
+ address_range32 big_range = address_range32::start_length(0x0, 0x5000); // 0x0-0x4FFF
+ EXPECT_TRUE(vec.inside(big_range));
+
+ address_range32 small_range = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
+ EXPECT_FALSE(vec.inside(small_range));
+ }
+
+ // Test the std::hash implementation for address_range32
+ TEST(AddressRange, Hash)
+ {
+ address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
+ address_range32 r2 = address_range32::start_length(0x1000, 0x1000);
+ address_range32 r3 = address_range32::start_length(0x2000, 0x1000);
+
+ std::hash hasher;
+ EXPECT_EQ(hasher(r1), hasher(r2));
+ EXPECT_NE(hasher(r1), hasher(r3));
+ }
+
+ // Test invalidation rules around umax
+ TEST(AddressRange, Invalidate32)
+ {
+ address_range32 r1 = address_range32::start_length(0x0, 0x1000);
+ r1.invalidate();
+
+ EXPECT_FALSE(r1.valid());
+ EXPECT_EQ(r1.start, 0xffff'ffffu);
+ EXPECT_EQ(r1.end, 0u);
+ }
+
+ TEST(AddressRange, Invalidate64)
+ {
+ address_range64 r1 = address_range64::start_length(0x0, 0x1000);
+ r1.invalidate();
+
+ EXPECT_FALSE(r1.valid());
+ EXPECT_EQ(r1.start, 0xffff'ffff'ffff'ffffull);
+ EXPECT_EQ(r1.end, 0ull);
+ }
+
+ TEST(AddressRange, Invalidate16)
+ {
+ const u16 start = 0x1000, length = 0x1000;
+ address_range16 r1 = address_range16::start_length(start, length);
+ r1.invalidate();
+
+ EXPECT_FALSE(r1.valid());
+ EXPECT_EQ(r1.start, 0xffff);
+ EXPECT_EQ(r1.end, 0);
+ }
+
+ TEST(AddressRange, InvalidConstruction)
+ {
+ address_range32 r1 = address_range32::start_length(umax, u32{umax} / 2);
+ EXPECT_FALSE(r1.valid());
+ }
+
+ TEST(AddressRange, LargeValues64)
+ {
+ const u32 start = umax, length = u32{umax} / 2;
+ address_range64 r1 = address_range64::start_length(start, length);
+
+ EXPECT_EQ(r1.start, 0xffff'ffffull);
+ EXPECT_EQ(r1.end, 0x1'7fff'fffd);
+ }
+}