utils: Rename address_range to address_range32 to allow implementation of address_range16 and address_range64

This commit is contained in:
kd-11 2025-05-26 02:49:20 +03:00 committed by kd-11
parent 79bcb7790c
commit 4f7c82ba8a
51 changed files with 368 additions and 368 deletions

View file

@ -37,7 +37,7 @@ namespace utils
/**
* Address Range utility class
*/
class address_range
class address_range32
{
public:
u32 start = umax; // First address in range
@ -60,13 +60,13 @@ namespace utils
return (start1 >= start2 && end1 <= end2);
}
constexpr address_range(u32 _start, u32 _end) : start(_start), end(_end) {}
constexpr address_range32(u32 _start, u32 _end) : start(_start), end(_end) {}
public:
// Constructors
constexpr address_range() = default;
constexpr address_range32() = default;
static constexpr address_range start_length(u32 _start, u32 _length)
static constexpr address_range32 start_length(u32 _start, u32 _length)
{
if (!_length)
{
@ -76,7 +76,7 @@ namespace utils
return {_start, _start + (_length - 1)};
}
static constexpr address_range start_end(u32 _start, u32 _end)
static constexpr address_range32 start_end(u32 _start, u32 _end)
{
return {_start, _end};
}
@ -105,7 +105,7 @@ namespace utils
}
// Overlapping checks
bool overlaps(const address_range &other) const
bool overlaps(const address_range32 &other) const
{
AUDIT(valid() && other.valid());
return range_overlaps(start, end, other.start, other.end);
@ -117,7 +117,7 @@ namespace utils
return address_overlaps(addr, start, end);
}
bool inside(const address_range &other) const
bool inside(const address_range32 &other) const
{
AUDIT(valid() && other.valid());
return range_inside_range(start, end, other.start, other.end);
@ -126,7 +126,7 @@ namespace utils
inline bool inside(const address_range_vector &vec) const;
inline bool overlaps(const address_range_vector &vec) const;
bool touches(const address_range &other) const
bool touches(const address_range32 &other) const
{
AUDIT(valid() && other.valid());
// returns true if there is overlap, or if sections are side-by-side
@ -134,7 +134,7 @@ namespace utils
}
// Utilities
s32 signed_distance(const address_range &other) const
s32 signed_distance(const address_range32 &other) const
{
if (touches(other))
{
@ -152,7 +152,7 @@ namespace utils
return -static_cast<s32>(start - other.end - 1);
}
u32 distance(const address_range &other) const
u32 distance(const address_range32 &other) const
{
if (touches(other))
{
@ -170,7 +170,7 @@ namespace utils
return (start - other.end - 1);
}
address_range get_min_max(const address_range &other) const
address_range32 get_min_max(const address_range32 &other) const
{
return {
std::min(valid() ? start : umax, other.valid() ? other.start : umax),
@ -178,7 +178,7 @@ namespace utils
};
}
void set_min_max(const address_range &other)
void set_min_max(const address_range32 &other)
{
*this = get_min_max(other);
}
@ -188,7 +188,7 @@ namespace utils
return (valid() && is_page_aligned(start) && is_page_aligned(length()));
}
address_range to_page_range() const
address_range32 to_page_range() const
{
AUDIT(valid());
return { page_start(start), page_end(end) };
@ -202,7 +202,7 @@ namespace utils
AUDIT(is_page_range());
}
address_range get_intersect(const address_range &clamp) const
address_range32 get_intersect(const address_range32 &clamp) const
{
if (!valid() || !clamp.valid())
{
@ -212,7 +212,7 @@ namespace utils
return { std::max(start, clamp.start), std::min(end, clamp.end) };
}
void intersect(const address_range &clamp)
void intersect(const address_range32 &clamp)
{
if (!clamp.valid())
{
@ -238,7 +238,7 @@ namespace utils
}
// Comparison Operators
bool operator ==(const address_range& other) const
bool operator ==(const address_range32& other) const
{
return (start == other.start && end == other.end);
}
@ -252,21 +252,21 @@ namespace utils
}
};
static inline address_range page_for(u32 addr)
static inline address_range32 page_for(u32 addr)
{
return address_range::start_end(page_start(addr), page_end(addr));
return address_range32::start_end(page_start(addr), page_end(addr));
}
/**
* Address Range Vector utility class
*
* Collection of address_range objects. Allows for merging and removing ranges from the set.
* Collection of address_range32 objects. Allows for merging and removing ranges from the set.
*/
class address_range_vector
{
public:
using vector_type = std::vector<address_range>;
using vector_type = std::vector<address_range32>;
using iterator = vector_type::iterator;
using const_iterator = vector_type::const_iterator;
using size_type = vector_type::size_type;
@ -280,8 +280,8 @@ namespace utils
inline void clear() { data.clear(); }
inline size_type size() const { return data.size(); }
inline bool empty() const { return data.empty(); }
inline address_range& operator[](size_type n) { return data[n]; }
inline const address_range& operator[](size_type n) const { return data[n]; }
inline address_range32& operator[](size_type n) { return data[n]; }
inline const address_range32& operator[](size_type n) const { return data[n]; }
inline iterator begin() { return data.begin(); }
inline const_iterator begin() const { return data.begin(); }
inline iterator end() { return data.end(); }
@ -289,7 +289,7 @@ namespace utils
// Search for ranges that touch new_range. If found, merge instead of adding new_range.
// When adding a new range, re-use invalid ranges whenever possible
void merge(const address_range &new_range)
void merge(const address_range32 &new_range)
{
// Note the case where we have
// AAAA BBBB
@ -301,8 +301,8 @@ namespace utils
return;
}
address_range *found = nullptr;
address_range *invalid = nullptr;
address_range32 *found = nullptr;
address_range32 *invalid = nullptr;
for (auto &existing : data)
{
@ -349,20 +349,20 @@ namespace utils
void merge(const address_range_vector &other)
{
for (const address_range &new_range : other)
for (const address_range32 &new_range : other)
{
merge(new_range);
}
}
// Exclude a given range from data
void exclude(const address_range &exclusion)
void exclude(const address_range32 &exclusion)
{
// Note the case where we have
// AAAAAAA
// EEE
// where data={A} and exclusion=E.
// In this case, we need to reduce A to the head (before E starts), and then create a new address_range B for the tail (after E ends), i.e.
// In this case, we need to reduce A to the head (before E starts), and then create a new address_range32 B for the tail (after E ends), i.e.
// AA BB
// EEE
@ -371,13 +371,13 @@ namespace utils
return;
}
address_range *invalid = nullptr; // try to re-use an invalid range instead of calling push_back
address_range32 *invalid = nullptr; // try to re-use an invalid range instead of calling push_back
// We use index access because we might have to push_back within the loop, which could invalidate the iterators
size_type _size = data.size();
for (size_type n = 0; n < _size; ++n)
{
address_range &existing = data[n];
address_range32 &existing = data[n];
if (!existing.valid())
{
@ -430,7 +430,7 @@ namespace utils
else
{
// IMPORTANT: adding to data invalidates "existing". This must be done last!
data.push_back(address_range::start_end(exclusion.next_address(), tail_end));
data.push_back(address_range32::start_end(exclusion.next_address(), tail_end));
}
}
}
@ -440,7 +440,7 @@ namespace utils
void exclude(const address_range_vector &other)
{
for (const address_range &exclusion : other)
for (const address_range32 &exclusion : other)
{
exclude(exclusion);
}
@ -478,25 +478,25 @@ namespace utils
}
// Test for overlap with a given range
bool overlaps(const address_range &range) const
bool overlaps(const address_range32 &range) const
{
return std::any_of(data.cbegin(), data.cend(), [&range](const address_range& cur)
return std::any_of(data.cbegin(), data.cend(), [&range](const address_range32& cur)
{
return cur.valid() && cur.overlaps(range);
});
}
// Test for overlap with a given address_range vector
// Test for overlap with a given address_range32 vector
bool overlaps(const address_range_vector &other) const
{
for (const address_range &rng1 : data)
for (const address_range32 &rng1 : data)
{
if (!rng1.valid())
{
continue;
}
for (const address_range &rng2 : other.data)
for (const address_range32 &rng2 : other.data)
{
if (!rng2.valid())
{
@ -513,18 +513,18 @@ namespace utils
}
// Test if a given range is fully contained inside this vector
bool contains(const address_range &range) const
bool contains(const address_range32 &range) const
{
return std::any_of(this->begin(), this->end(), [&range](const address_range& cur)
return std::any_of(this->begin(), this->end(), [&range](const address_range32& cur)
{
return cur.valid() && cur.inside(range);
});
}
// Test if all ranges in this vector are full contained inside a specific range
bool inside(const address_range &range) const
bool inside(const address_range32 &range) const
{
return std::all_of(this->begin(), this->end(), [&range](const address_range& cur)
return std::all_of(this->begin(), this->end(), [&range](const address_range32& cur)
{
return !cur.valid() || cur.inside(range);
});
@ -547,12 +547,12 @@ namespace utils
// These declarations must be done after address_range_vector has been defined
bool address_range::inside(const address_range_vector &vec) const
bool address_range32::inside(const address_range_vector &vec) const
{
return vec.contains(*this);
}
bool address_range::overlaps(const address_range_vector &vec) const
bool address_range32::overlaps(const address_range_vector &vec) const
{
return vec.overlaps(*this);
}
@ -565,9 +565,9 @@ namespace std
static_assert(sizeof(usz) >= 2 * sizeof(u32), "usz must be at least twice the size of u32");
template <>
struct hash<utils::address_range>
struct hash<utils::address_range32>
{
usz operator()(const utils::address_range& k) const
usz operator()(const utils::address_range32& k) const
{
// we can guarantee a unique hash since our type is 64 bits and usz as well
return (usz{ k.start } << 32) | usz{ k.end };

View file

@ -2088,7 +2088,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
{
if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz)
{
using addr_range = utils::address_range;
using addr_range = utils::address_range32;
const addr_range r = addr_range::start_length(static_cast<u32>(prog.p_vaddr), static_cast<u32>(prog.p_memsz));
@ -2852,7 +2852,7 @@ std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_ob
{
if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz)
{
using addr_range = utils::address_range;
using addr_range = utils::address_range32;
const addr_range r = addr_range::start_length(::narrow<u32>(prog.p_vaddr), ::narrow<u32>(prog.p_memsz));

View file

@ -8314,7 +8314,7 @@ void spu_recompiler_base::add_pattern(bool fill_all, inst_attr attr, u32 start,
end = start;
}
m_patterns[start] = pattern_info{utils::address_range::start_end(start, end)};
m_patterns[start] = pattern_info{utils::address_range32::start_end(start, end)};
for (u32 i = start; i <= (fill_all ? end : start); i += 4)
{

View file

@ -1083,7 +1083,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
m_ir->SetInsertPoint(_body);
}
void putllc16_pattern(const spu_program& /*prog*/, utils::address_range range)
void putllc16_pattern(const spu_program& /*prog*/, utils::address_range32 range)
{
// Prevent store elimination
m_block->store_context_ctr[s_reg_mfc_eal]++;
@ -1376,7 +1376,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
m_ir->SetInsertPoint(_final);
}
void putllc0_pattern(const spu_program& /*prog*/, utils::address_range /*range*/)
void putllc0_pattern(const spu_program& /*prog*/, utils::address_range32 /*range*/)
{
// Prevent store elimination
m_block->store_context_ctr[s_reg_mfc_eal]++;

View file

@ -397,7 +397,7 @@ protected:
struct pattern_info
{
utils::address_range range;
utils::address_range32 range;
};
std::unordered_map<u32, pattern_info> m_patterns;

View file

@ -716,7 +716,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
//const u32 bank = (((a4 >> 32) & 0xFFFFFFFF) >> 4) & 0xF;
const bool bound = ((a4 >> 32) & 0x3) != 0;
const auto range = utils::address_range::start_length(offset, size);
const auto range = utils::address_range32::start_length(offset, size);
if (bound)
{
@ -800,7 +800,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
if (bound)
{
const auto cull_range = utils::address_range::start_length(cullStart, width * height);
const auto cull_range = utils::address_range32::start_length(cullStart, width * height);
// cullStart is an offset inside ZCULL RAM which is 3MB long, check bounds
// width and height are not allowed to be zero (checked by range.valid())

View file

@ -342,7 +342,7 @@ namespace vm
utils::prefetch_read(g_range_lock_set + 2);
utils::prefetch_read(g_range_lock_set + 4);
const auto range = utils::address_range::start_length(addr, size);
const auto range = utils::address_range32::start_length(addr, size);
u64 to_clear = get_range_lock_bits(false).load();
@ -350,7 +350,7 @@ namespace vm
{
to_clear = for_all_range_locks(to_clear, [&](u32 addr2, u32 size2)
{
if (range.overlaps(utils::address_range::start_length(addr2, size2))) [[unlikely]]
if (range.overlaps(utils::address_range32::start_length(addr2, size2))) [[unlikely]]
{
return 1;
}
@ -1816,7 +1816,7 @@ namespace vm
static bool _test_map(u32 addr, u32 size)
{
const auto range = utils::address_range::start_length(addr, size);
const auto range = utils::address_range32::start_length(addr, size);
if (!range.valid())
{
@ -1830,7 +1830,7 @@ namespace vm
continue;
}
if (range.overlaps(utils::address_range::start_length(block->addr, block->size)))
if (range.overlaps(utils::address_range32::start_length(block->addr, block->size)))
{
return false;
}

View file

@ -22,7 +22,7 @@ void ppubreak(ppu_thread& ppu);
namespace utils
{
class shm;
class address_range;
class address_range32;
}
namespace vm

View file

@ -36,7 +36,7 @@ namespace rsx
return block_id * BlockSize;
}
void broadcast_insert(const utils::address_range& range)
void broadcast_insert(const utils::address_range32& range)
{
const auto head_block = block_for(range.start);
for (auto meta = &m_metadata[head_block]; meta <= &m_metadata[block_for(range.end)]; ++meta)
@ -98,7 +98,7 @@ namespace rsx
m_it = where;
}
void begin_range(const utils::address_range& range)
void begin_range(const utils::address_range32& range)
{
const auto start_block_id = range.start / BlockSize;
const auto& metadata = m_metadata_ptr[start_block_id];
@ -177,7 +177,7 @@ namespace rsx
std::for_each(m_metadata.begin(), m_metadata.end(), [&](auto& meta) { meta.id = static_cast<u32>(&meta - m_metadata.data()); });
}
void emplace(const utils::address_range& range, T&& value)
void emplace(const utils::address_range32& range, T&& value)
{
broadcast_insert(range);
m_data[block_for(range.start)].insert_or_assign(range.start, std::forward<T>(value));
@ -220,7 +220,7 @@ namespace rsx
m_data[block_for(address)].erase(address);
}
iterator begin_range(const utils::address_range& range)
iterator begin_range(const utils::address_range32& range)
{
iterator ret = { this };
ret.begin_range(range);

View file

@ -49,7 +49,7 @@ namespace rsx
}
}
surface_cache_dma& with_range(command_list_type cmd, const utils::address_range& range)
surface_cache_dma& with_range(command_list_type cmd, const utils::address_range32& range)
{
// Prepare underlying memory so that the range specified is provisioned and contiguous
// 1. Check if we have a pre-existing bo layer
@ -57,7 +57,7 @@ namespace rsx
if (this_entry)
{
const auto bo = this_entry.get();
const auto buffer_range = utils::address_range::start_length(bo.base_address, ::size32(*bo));
const auto buffer_range = utils::address_range32::start_length(bo.base_address, ::size32(*bo));
if (range.inside(buffer_range))
{
@ -94,11 +94,11 @@ namespace rsx
return *this;
}
utils::address_range to_block_range(const utils::address_range& range)
utils::address_range32 to_block_range(const utils::address_range32& range)
{
u32 start = block_address(block_for(range.start));
u32 end = block_address(block_for(range.end + BlockSize - 1));
return utils::address_range::start_end(start, end - 1);
return utils::address_range32::start_end(start, end - 1);
}
std::tuple<buffer_object_type, u32, u64> get(u32 address)
@ -107,7 +107,7 @@ namespace rsx
return { block.get(), block.base_address - address };
}
void touch(const utils::address_range& range)
void touch(const utils::address_range32& range)
{
const u64 stamp = rsx::get_shared_tag();
for (usz i = block_for(range.start); i <= block_for(range.end); i++)

View file

@ -52,8 +52,8 @@ namespace rsx
surface_ranged_map m_render_targets_storage = {};
surface_ranged_map m_depth_stencil_storage = {};
rsx::address_range m_render_targets_memory_range;
rsx::address_range m_depth_stencil_memory_range;
rsx::address_range32 m_render_targets_memory_range;
rsx::address_range32 m_depth_stencil_memory_range;
surface_cache_dma_map m_dma_block;
@ -244,7 +244,7 @@ namespace rsx
template <bool is_depth_surface>
void intersect_surface_region(command_list_type cmd, u32 address, surface_type new_surface, surface_type prev_surface)
{
auto scan_list = [&new_surface, address](const rsx::address_range& mem_range,
auto scan_list = [&new_surface, address](const rsx::address_range32& mem_range,
surface_ranged_map& data) -> std::vector<std::pair<u32, surface_type>>
{
std::vector<std::pair<u32, surface_type>> result;
@ -277,7 +277,7 @@ namespace rsx
}
// Range check
const rsx::address_range this_range = surface->get_memory_range();
const rsx::address_range32 this_range = surface->get_memory_range();
if (!this_range.overlaps(mem_range))
{
continue;
@ -290,7 +290,7 @@ namespace rsx
return result;
};
const rsx::address_range mem_range = new_surface->get_memory_range();
const rsx::address_range32 mem_range = new_surface->get_memory_range();
auto list1 = scan_list(mem_range, m_render_targets_storage);
auto list2 = scan_list(mem_range, m_depth_stencil_storage);
@ -410,7 +410,7 @@ namespace rsx
// Workaround. Preserve new surface tag value because pitch convert is unimplemented
u64 new_content_tag = 0;
address_range* storage_bounds;
address_range32* storage_bounds;
surface_ranged_map* primary_storage;
surface_ranged_map* secondary_storage;
if constexpr (depth)
@ -488,7 +488,7 @@ namespace rsx
{
// Range test
const auto aa_factor_v = get_aa_factor_v(antialias);
rsx::address_range range = rsx::address_range::start_length(address, static_cast<u32>(pitch * height * aa_factor_v));
rsx::address_range32 range = rsx::address_range32::start_length(address, static_cast<u32>(pitch * height * aa_factor_v));
*storage_bounds = range.get_min_max(*storage_bounds);
// Search invalidated resources for a suitable surface
@ -629,10 +629,10 @@ namespace rsx
invalidated_resources.push_back(std::move(storage));
}
int remove_duplicates_fast_impl(std::vector<surface_overlap_info>& sections, const rsx::address_range& range)
int remove_duplicates_fast_impl(std::vector<surface_overlap_info>& sections, const rsx::address_range32& range)
{
// Range tests to check for gaps
std::list<utils::address_range> m_ranges;
std::list<utils::address_range32> m_ranges;
bool invalidate_sections = false;
int removed_count = 0;
@ -696,7 +696,7 @@ namespace rsx
return removed_count;
}
void remove_duplicates_fallback_impl(std::vector<surface_overlap_info>& sections, const rsx::address_range& range)
void remove_duplicates_fallback_impl(std::vector<surface_overlap_info>& sections, const rsx::address_range32& range)
{
// Originally used to debug crashes but this function breaks often enough that I'll leave the checks in for now.
// Safe to remove after some time if no asserts are reported.
@ -866,10 +866,10 @@ namespace rsx
}
std::tuple<std::vector<surface_type>, std::vector<surface_type>>
find_overlapping_set(const utils::address_range& range) const
find_overlapping_set(const utils::address_range32& range) const
{
std::vector<surface_type> color_result, depth_result;
utils::address_range result_range;
utils::address_range32 result_range;
if (m_render_targets_memory_range.valid() &&
range.overlaps(m_render_targets_memory_range))
@ -904,7 +904,7 @@ namespace rsx
void write_to_dma_buffers(
command_list_type command_list,
const utils::address_range& range)
const utils::address_range32& range)
{
auto block_range = m_dma_block.to_block_range(range);
auto [color_data, depth_stencil_data] = find_overlapping_set(block_range);
@ -1102,7 +1102,7 @@ namespace rsx
return {};
}
const auto test_range = utils::address_range::start_length(texaddr, (required_pitch * required_height) - (required_pitch - surface_internal_pitch));
const auto test_range = utils::address_range32::start_length(texaddr, (required_pitch * required_height) - (required_pitch - surface_internal_pitch));
auto process_list_function = [&](surface_ranged_map& data, bool is_depth)
{
@ -1237,7 +1237,7 @@ namespace rsx
void check_for_duplicates(std::vector<surface_overlap_info>& sections)
{
utils::address_range test_range;
utils::address_range32 test_range;
for (const auto& section : sections)
{
const auto range = section.surface->get_memory_range();
@ -1294,7 +1294,7 @@ namespace rsx
void invalidate_all()
{
// Unbind and invalidate all resources
auto free_resource_list = [&](auto &data, const utils::address_range& range)
auto free_resource_list = [&](auto &data, const utils::address_range32& range)
{
for (auto it = data.begin_range(range); it != data.end(); ++it)
{
@ -1317,7 +1317,7 @@ namespace rsx
}
}
void invalidate_range(const rsx::address_range& range)
void invalidate_range(const rsx::address_range32& range)
{
for (auto it = m_render_targets_storage.begin_range(range); it != m_render_targets_storage.end(); ++it)
{
@ -1383,7 +1383,7 @@ namespace rsx
void collapse_dirty_surfaces(command_list_type cmd, problem_severity severity)
{
auto process_list_function = [&](surface_ranged_map& data, const utils::address_range& range)
auto process_list_function = [&](surface_ranged_map& data, const utils::address_range32& range)
{
for (auto It = data.begin_range(range); It != data.end();)
{

View file

@ -146,7 +146,7 @@ namespace rsx
u8 samples_x = 1;
u8 samples_y = 1;
rsx::address_range memory_range;
rsx::address_range32 memory_range;
std::unique_ptr<typename std::remove_pointer_t<image_storage_type>> resolve_surface;
surface_sample_layout sample_layout = surface_sample_layout::null;
@ -367,7 +367,7 @@ namespace rsx
const u32 internal_height = get_surface_height<rsx::surface_metrics::samples>();
const u32 excess = (rsx_pitch - native_pitch);
memory_range = rsx::address_range::start_length(base_addr, internal_height * rsx_pitch - excess);
memory_range = rsx::address_range32::start_length(base_addr, internal_height * rsx_pitch - excess);
}
void sync_tag()
@ -419,7 +419,7 @@ namespace rsx
const u32 internal_height = get_surface_height<rsx::surface_metrics::samples>();
const u32 excess = (rsx_pitch - native_pitch);
memory_range = rsx::address_range::start_length(base_addr, internal_height * rsx_pitch - excess);
memory_range = rsx::address_range32::start_length(base_addr, internal_height * rsx_pitch - excess);
}
void sync_tag()
@ -658,7 +658,7 @@ namespace rsx
return { 0, 0, internal_width, internal_height };
}
inline rsx::address_range get_memory_range() const
inline rsx::address_range32 get_memory_range() const
{
return memory_range;
}

View file

@ -7,7 +7,7 @@ namespace rsx
{
constexpr u32 min_lockable_data_size = 4096; // Increasing this value has worse results even on systems with pages > 4k
void buffered_section::init_lockable_range(const address_range& range)
void buffered_section::init_lockable_range(const address_range32& range)
{
locked_range = range.to_page_range();
AUDIT((locked_range.start == page_start(range.start)) || (locked_range.start == next_page(range.start)));
@ -15,11 +15,11 @@ namespace rsx
ensure(locked_range.is_page_range());
}
void buffered_section::reset(const address_range& memory_range)
void buffered_section::reset(const address_range32& memory_range)
{
ensure(memory_range.valid() && locked == false);
cpu_range = address_range(memory_range);
cpu_range = address_range32(memory_range);
confirmed_range.invalidate();
locked_range.invalidate();
@ -110,7 +110,7 @@ namespace rsx
}
else
{
confirmed_range = address_range::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
confirmed_range = address_range32::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
ensure(!locked || locked_range.inside(confirmed_range.to_page_range()));
}
@ -139,7 +139,7 @@ namespace rsx
locked = false;
}
const address_range& buffered_section::get_bounds(section_bounds bounds) const
const address_range32& buffered_section::get_bounds(section_bounds bounds) const
{
switch (bounds)
{

View file

@ -65,8 +65,8 @@ namespace rsx
u32 num_discarded = 0;
u64 cache_tag = 0;
address_range fault_range;
address_range invalidate_range;
address_range32 fault_range;
address_range32 invalidate_range;
void clear_sections()
{
@ -136,7 +136,7 @@ namespace rsx
struct intersecting_set
{
rsx::simple_array<section_storage_type*> sections = {};
address_range invalidate_range = {};
address_range32 invalidate_range = {};
bool has_flushables = false;
};
@ -150,7 +150,7 @@ namespace rsx
u16 x = 0;
u16 y = 0;
utils::address_range cache_range;
utils::address_range32 cache_range;
bool do_not_cache = false;
deferred_subresource() = default;
@ -445,8 +445,8 @@ namespace rsx
atomic_t<u64> m_cache_update_tag = {0};
address_range read_only_range;
address_range no_access_range;
address_range32 read_only_range;
address_range32 no_access_range;
//Map of messages to only emit once
std::unordered_set<std::string> m_once_only_messages_set;
@ -455,7 +455,7 @@ namespace rsx
bool read_only_tex_invalidate = false;
//Store of all objects in a flush_always state. A lazy readback is attempted every draw call
std::unordered_map<address_range, section_storage_type*> m_flush_always_cache;
std::unordered_map<address_range32, section_storage_type*> m_flush_always_cache;
u64 m_flush_always_update_timestamp = 0;
//Memory usage
@ -484,11 +484,11 @@ namespace rsx
virtual image_view_type create_temporary_subresource_view(commandbuffer_type&, image_resource_type* src, u32 gcm_format, u16 x, u16 y, u16 w, u16 h, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type create_temporary_subresource_view(commandbuffer_type&, image_storage_type* src, u32 gcm_format, u16 x, u16 y, u16 w, u16 h, const texture_channel_remap_t& remap_vector) = 0;
virtual void release_temporary_subresource(image_view_type rsc) = 0;
virtual section_storage_type* create_new_texture(commandbuffer_type&, const address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
virtual section_storage_type* create_new_texture(commandbuffer_type&, const address_range32 &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, component_order swizzle_flags, rsx::flags32_t flags) = 0;
virtual section_storage_type* upload_image_from_cpu(commandbuffer_type&, const address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format, texture_upload_context context,
virtual section_storage_type* upload_image_from_cpu(commandbuffer_type&, const address_range32 &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format, texture_upload_context context,
const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) = 0;
virtual section_storage_type* create_nul_section(commandbuffer_type&, const address_range &rsx_range, const image_section_attributes_t& attrs, const GCM_tile_reference& tile, bool memory_load) = 0;
virtual section_storage_type* create_nul_section(commandbuffer_type&, const address_range32 &rsx_range, const image_section_attributes_t& attrs, const GCM_tile_reference& tile, bool memory_load) = 0;
virtual void set_component_order(section_storage_type& section, u32 gcm_format, component_order expected) = 0;
virtual void insert_texture_barrier(commandbuffer_type&, image_storage_type* tex, bool strong_ordering = true) = 0;
virtual image_view_type generate_cubemap_from_images(commandbuffer_type&, u32 gcm_format, u16 size, const std::vector<copy_region_descriptor>& sources, const texture_channel_remap_t& remap_vector) = 0;
@ -545,7 +545,7 @@ namespace rsx
* Internal implementation methods and helpers
*/
inline bool region_intersects_cache(const address_range &test_range, bool is_writing)
inline bool region_intersects_cache(const address_range32 &test_range, bool is_writing)
{
AUDIT(test_range.valid());
@ -751,7 +751,7 @@ namespace rsx
for (const auto &excluded : data.sections_to_exclude)
{
ensure(excluded->is_locked(true));
address_range exclusion_range = excluded->get_locked_range();
address_range32 exclusion_range = excluded->get_locked_range();
// We need to make sure that the exclusion range is *inside* invalidate range
exclusion_range.intersect(data.invalidate_range);
@ -824,14 +824,14 @@ namespace rsx
// Return a set containing all sections that should be flushed/unprotected/reprotected
atomic_t<u64> m_last_section_cache_tag = 0;
intersecting_set get_intersecting_set(const address_range &fault_range)
intersecting_set get_intersecting_set(const address_range32 &fault_range)
{
AUDIT(fault_range.is_page_range());
const u64 cache_tag = ++m_last_section_cache_tag;
intersecting_set result = {};
address_range &invalidate_range = result.invalidate_range;
address_range32 &invalidate_range = result.invalidate_range;
invalidate_range = fault_range; // Sections fully inside this range will be invalidated, others will be deemed false positives
// Loop through cache and find pages that overlap the invalidate_range
@ -920,7 +920,7 @@ namespace rsx
template <typename ...Args>
thrashed_set invalidate_range_impl_base(
commandbuffer_type& cmd,
const address_range &fault_range_in,
const address_range32 &fault_range_in,
invalidation_cause cause,
std::function<void()> on_data_transfer_completed = {},
Args&&... extras)
@ -932,7 +932,7 @@ namespace rsx
AUDIT(cause.valid());
AUDIT(fault_range_in.valid());
address_range fault_range = fault_range_in.to_page_range();
address_range32 fault_range = fault_range_in.to_page_range();
intersecting_set trampled_set = get_intersecting_set(fault_range);
@ -1005,7 +1005,7 @@ namespace rsx
#endif
// If invalidate_range is fault_range, we can stop now
const address_range invalidate_range = trampled_set.invalidate_range;
const address_range32 invalidate_range = trampled_set.invalidate_range;
if (invalidate_range == fault_range)
{
result.violation_handled = true;
@ -1187,7 +1187,7 @@ namespace rsx
}
template <bool check_unlocked = false>
std::vector<section_storage_type*> find_texture_from_range(const address_range &test_range, u32 required_pitch = 0, u32 context_mask = 0xFF)
std::vector<section_storage_type*> find_texture_from_range(const address_range32 &test_range, u32 required_pitch = 0, u32 context_mask = 0xFF)
{
std::vector<section_storage_type*> results;
@ -1239,7 +1239,7 @@ namespace rsx
return nullptr;
}
section_storage_type* find_cached_texture(const address_range &range, const image_section_attributes_t& attr, bool create_if_not_found, bool confirm_dimensions, bool allow_dirty)
section_storage_type* find_cached_texture(const address_range32 &range, const image_section_attributes_t& attr, bool create_if_not_found, bool confirm_dimensions, bool allow_dirty)
{
auto &block = m_storage.block_for(range);
@ -1329,7 +1329,7 @@ namespace rsx
return tex;
}
section_storage_type* find_flushable_section(const address_range &memory_range)
section_storage_type* find_flushable_section(const address_range32 &memory_range)
{
auto &block = m_storage.block_for(memory_range);
for (auto &tex : block)
@ -1345,7 +1345,7 @@ namespace rsx
}
template <typename ...FlushArgs, typename ...Args>
void lock_memory_region(commandbuffer_type& cmd, image_storage_type* image, const address_range &rsx_range, bool is_active_surface, u16 width, u16 height, u32 pitch, Args&&... extras)
void lock_memory_region(commandbuffer_type& cmd, image_storage_type* image, const address_range32 &rsx_range, bool is_active_surface, u16 width, u16 height, u32 pitch, Args&&... extras)
{
AUDIT(g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer); // this method is only called when either WCB or WDB are enabled
@ -1414,7 +1414,7 @@ namespace rsx
}
template <typename ...Args>
void commit_framebuffer_memory_region(commandbuffer_type& cmd, const address_range &rsx_range, Args&&... extras)
void commit_framebuffer_memory_region(commandbuffer_type& cmd, const address_range32 &rsx_range, Args&&... extras)
{
AUDIT(!g_cfg.video.write_color_buffers || !g_cfg.video.write_depth_buffer);
@ -1426,7 +1426,7 @@ namespace rsx
}
template <typename ...Args>
void discard_framebuffer_memory_region(commandbuffer_type& /*cmd*/, const address_range& rsx_range, Args&&... /*extras*/)
void discard_framebuffer_memory_region(commandbuffer_type& /*cmd*/, const address_range32& rsx_range, Args&&... /*extras*/)
{
if (g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer)
{
@ -1439,7 +1439,7 @@ namespace rsx
}
}
void set_memory_read_flags(const address_range &memory_range, memory_read_flags flags)
void set_memory_read_flags(const address_range32 &memory_range, memory_read_flags flags)
{
std::lock_guard lock(m_cache_mutex);
@ -1492,7 +1492,7 @@ namespace rsx
private:
inline void update_flush_always_cache(section_storage_type &section, bool add)
{
const address_range& range = section.get_section_range();
const address_range32& range = section.get_section_range();
if (add)
{
// Add to m_flush_always_cache
@ -1529,7 +1529,7 @@ namespace rsx
template <typename ...Args>
thrashed_set invalidate_range(
commandbuffer_type& cmd,
const address_range &range,
const address_range32 &range,
invalidation_cause cause,
std::function<void()> on_data_transfer_completed = {},
Args&&... extras)
@ -1568,7 +1568,7 @@ namespace rsx
}
template <typename ...Args>
bool flush_if_cache_miss_likely(commandbuffer_type& cmd, const address_range &range, Args&&... extras)
bool flush_if_cache_miss_likely(commandbuffer_type& cmd, const address_range32 &range, Args&&... extras)
{
u32 cur_flushes_this_frame = (m_flushes_this_frame + m_speculations_this_frame);
@ -1834,7 +1834,7 @@ namespace rsx
m_uncached_subresources.clear();
}
void notify_surface_changed(const utils::address_range& range)
void notify_surface_changed(const utils::address_range32& range)
{
for (auto It = m_temporary_subresource_cache.begin(); It != m_temporary_subresource_cache.end();)
{
@ -1858,7 +1858,7 @@ namespace rsx
const size3f& scale,
const texture_channel_remap_t& remap,
const texture_cache_search_options& options,
const utils::address_range& memory_range,
const utils::address_range32& memory_range,
rsx::texture_dimension_extended extended_dimension,
SurfaceStoreType& m_rtts, Args&&... /*extras*/)
{
@ -2362,7 +2362,7 @@ namespace rsx
extended_dimension = std::max(extended_dimension, rsx::texture_dimension_extended::texture_dimension_2d);
}
const auto lookup_range = utils::address_range::start_length(attributes.address, attributes.pitch * required_surface_height);
const auto lookup_range = utils::address_range32::start_length(attributes.address, attributes.pitch * required_surface_height);
reader_lock lock(m_cache_mutex);
auto result = fast_texture_search(cmd, attributes, scale, tex.decoded_remap(),
@ -2439,7 +2439,7 @@ namespace rsx
attr2.pitch = attr2.width * attr2.bpp;
}
const auto range = utils::address_range::start_length(attr2.address, attr2.pitch * attr2.height);
const auto range = utils::address_range32::start_length(attr2.address, attr2.pitch * attr2.height);
auto ret = fast_texture_search(cmd, attr2, scale, tex.decoded_remap(),
options, range, extended_dimension, m_rtts, std::forward<Args>(extras)...);
@ -2477,7 +2477,7 @@ namespace rsx
}
const u32 cache_end = attr2.address + (attr2.pitch * attr2.height);
result.external_subresource_desc.cache_range = utils::address_range::start_end(attributes.address, cache_end);
result.external_subresource_desc.cache_range = utils::address_range32::start_end(attributes.address, cache_end);
result.external_subresource_desc.sections_to_copy = std::move(sections);
return result;
@ -2498,7 +2498,7 @@ namespace rsx
lock.upgrade();
// Invalidate
const address_range tex_range = address_range::start_length(attributes.address, tex_size);
const address_range32 tex_range = address_range32::start_length(attributes.address, tex_size);
invalidate_range_impl_base(cmd, tex_range, invalidation_cause::read, {}, std::forward<Args>(extras)...);
// Upload from CPU. Note that sRGB conversion is handled in the FS
@ -2595,7 +2595,7 @@ namespace rsx
src_address += (src.width - src_w) * src_bpp;
}
const auto get_tiled_region = [&](const utils::address_range& range)
const auto get_tiled_region = [&](const utils::address_range32& range)
{
auto rsxthr = rsx::get_current_renderer();
return rsxthr->get_tiled_memory_region(range);
@ -2683,7 +2683,7 @@ namespace rsx
return true;
};
auto validate_fbo_integrity = [&](const utils::address_range& range, bool is_depth_texture)
auto validate_fbo_integrity = [&](const utils::address_range32& range, bool is_depth_texture)
{
const bool will_upload = is_depth_texture ? !!g_cfg.video.read_depth_buffer : !!g_cfg.video.read_color_buffers;
if (!will_upload)
@ -2705,8 +2705,8 @@ namespace rsx
};
// Check tiled mem
const auto dst_tile = get_tiled_region(utils::address_range::start_length(dst_address, dst.pitch * dst.clip_height));
const auto src_tile = get_tiled_region(utils::address_range::start_length(src_address, src.pitch * src.height));
const auto dst_tile = get_tiled_region(utils::address_range32::start_length(dst_address, dst.pitch * dst.clip_height));
const auto src_tile = get_tiled_region(utils::address_range32::start_length(src_address, src.pitch * src.height));
const auto dst_is_tiled = !!dst_tile;
const auto src_is_tiled = !!src_tile;
@ -2735,7 +2735,7 @@ namespace rsx
// If we have a pitched write, or a suspiciously large transfer, we likely have a valid write.
// Invalidate surfaces in range. Sample tests should catch overlaps in theory.
m_rtts.invalidate_range(utils::address_range::start_length(dst_address, dst.pitch * dst_h));
m_rtts.invalidate_range(utils::address_range32::start_length(dst_address, dst.pitch * dst_h));
}
// FBO re-validation. It is common for GPU and CPU data to desync as we do not have a way to share memory pages directly between the two (in most setups)
@ -2863,7 +2863,7 @@ namespace rsx
const auto src_payload_length = (src.pitch * (src_h - 1) + (src_w * src_bpp));
const auto dst_payload_length = (dst.pitch * (dst_h - 1) + (dst_w * dst_bpp));
const auto dst_range = address_range::start_length(dst_address, dst_payload_length);
const auto dst_range = address_range32::start_length(dst_address, dst_payload_length);
if (!use_null_region && !dst_is_render_target)
{
@ -3035,7 +3035,7 @@ namespace rsx
{
// NOTE: Src address already takes into account the flipped nature of the overlap!
const u32 lookup_mask = rsx::texture_upload_context::blit_engine_src | rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::shader_read;
auto overlapping_surfaces = find_texture_from_range<false>(address_range::start_length(src_address, src_payload_length), src.pitch, lookup_mask);
auto overlapping_surfaces = find_texture_from_range<false>(address_range32::start_length(src_address, src_payload_length), src.pitch, lookup_mask);
auto old_src_area = src_area;
for (const auto &surface : overlapping_surfaces)
@ -3168,7 +3168,7 @@ namespace rsx
subresource_layout.push_back(subres);
const u32 gcm_format = helpers::get_sized_blit_format(src_is_argb8, dst_is_depth_surface, is_format_convert);
const auto rsx_range = address_range::start_length(image_base, src.pitch * image_height);
const auto rsx_range = address_range32::start_length(image_base, src.pitch * image_height);
lock.upgrade();
@ -3243,7 +3243,7 @@ namespace rsx
dst_dimensions.height = align2(usable_section_length, dst.pitch) / dst.pitch;
const u32 full_section_length = ((dst_dimensions.height - 1) * dst.pitch) + (dst_dimensions.width * dst_bpp);
const auto rsx_range = address_range::start_length(dst_base_address, full_section_length);
const auto rsx_range = address_range32::start_length(dst_base_address, full_section_length);
lock.upgrade();
@ -3502,7 +3502,7 @@ namespace rsx
return m_predictor;
}
bool is_protected(u32 section_base_address, const address_range& test_range, rsx::texture_upload_context context)
bool is_protected(u32 section_base_address, const address_range32& test_range, rsx::texture_upload_context context)
{
reader_lock lock(m_cache_mutex);

View file

@ -112,7 +112,7 @@ namespace rsx {
}
public:
void set_protection(const address_range& range, utils::protection prot)
void set_protection(const address_range32& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro || prot == utils::protection::rw);
@ -123,7 +123,7 @@ namespace rsx {
}
}
void discard(const address_range& range)
void discard(const address_range32& range)
{
set_protection(range, utils::protection::rw);
}
@ -136,7 +136,7 @@ namespace rsx {
}
}
void add(const address_range& range, utils::protection prot)
void add(const address_range32& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro);
@ -147,7 +147,7 @@ namespace rsx {
}
}
void remove(const address_range& range, utils::protection prot)
void remove(const address_range32& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro);
@ -160,7 +160,7 @@ namespace rsx {
// Returns the a lower bound as to how many locked sections are known to be within the given range with each protection {NA,RO}
// The assumption here is that the page in the given range with the largest number of refcounted sections represents the lower bound to how many there must be
std::pair<u8,u8> get_minimum_number_of_sections(const address_range& range) const
std::pair<u8,u8> get_minimum_number_of_sections(const address_range32& range) const
{
AUDIT(range.is_page_range());
@ -175,7 +175,7 @@ namespace rsx {
return { no,ro };
}
void check_unprotected(const address_range& range, bool allow_ro = false, bool must_be_empty = true) const
void check_unprotected(const address_range32& range, bool allow_ro = false, bool must_be_empty = true) const
{
AUDIT(range.is_page_range());
for (const per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)

View file

@ -69,9 +69,9 @@ namespace rsx
blit_op_result(bool success) : succeeded(success)
{}
inline address_range to_address_range() const
inline address_range32 to_address_range() const
{
return address_range::start_length(real_dst_address, real_dst_size);
return address_range32::start_length(real_dst_address, real_dst_size);
}
};
@ -182,7 +182,7 @@ namespace rsx
static inline blit_target_properties get_optimal_blit_target_properties(
bool src_is_render_target,
address_range dst_range,
address_range32 dst_range,
u32 dst_pitch,
const sizeu src_dimensions,
const sizeu dst_dimensions)
@ -209,7 +209,7 @@ namespace rsx
continue;
}
const auto buffer_range = address_range::start_length(rsx::get_address(buffer.offset, CELL_GCM_LOCATION_LOCAL), pitch * (buffer.height - 1) + (buffer.width * bpp));
const auto buffer_range = address_range32::start_length(rsx::get_address(buffer.offset, CELL_GCM_LOCATION_LOCAL), pitch * (buffer.height - 1) + (buffer.width * bpp));
if (dst_range.inside(buffer_range))
{
// Match found

View file

@ -76,14 +76,14 @@ namespace rsx
using texture_format = typename traits::texture_format;
using section_storage_type = typename traits::section_storage_type;
address_range cpu_range;
address_range32 cpu_range;
texture_format format;
texture_upload_context context;
// Constructors
texture_cache_predictor_key() = default;
texture_cache_predictor_key(const address_range& _cpu_range, texture_format _format, texture_upload_context _context)
texture_cache_predictor_key(const address_range32& _cpu_range, texture_format _format, texture_upload_context _context)
: cpu_range(_cpu_range)
, format(_format)
, context(_context)
@ -398,7 +398,7 @@ struct std::hash<rsx::texture_cache_predictor_key<Traits>>
{
usz operator()(const rsx::texture_cache_predictor_key<Traits>& k) const
{
usz result = std::hash<utils::address_range>{}(k.cpu_range);
usz result = std::hash<utils::address_range32>{}(k.cpu_range);
result ^= static_cast<usz>(k.format);
result ^= (static_cast<usz>(k.context) << 16);
return result;

View file

@ -27,7 +27,7 @@ namespace rsx
hash
};
static inline void memory_protect(const address_range& range, utils::protection prot)
static inline void memory_protect(const address_range32& range, utils::protection prot)
{
ensure(range.is_page_range());
@ -232,7 +232,7 @@ namespace rsx
private:
u32 index = 0;
address_range range = {};
address_range32 range = {};
block_container_type sections = {};
unowned_container_type unowned; // pointers to sections from other blocks that overlap this block
atomic_t<u32> exists_count = 0;
@ -269,7 +269,7 @@ namespace rsx
m_storage = storage;
index = _index;
range = address_range::start_length(index * block_size, block_size);
range = address_range32::start_length(index * block_size, block_size);
AUDIT(range.is_page_range() && get_start() / block_size == index);
}
@ -346,12 +346,12 @@ namespace rsx
}
// Address range
inline const address_range& get_range() const { return range; }
inline const address_range32& get_range() const { return range; }
inline u32 get_start() const { return range.start; }
inline u32 get_end() const { return range.end; }
inline u32 get_index() const { return index; }
inline bool overlaps(const section_storage_type& section, section_bounds bounds = full_range) const { return section.overlaps(range, bounds); }
inline bool overlaps(const address_range& _range) const { return range.overlaps(_range); }
inline bool overlaps(const address_range32& _range) const { return range.overlaps(_range); }
/**
* Section callbacks
@ -511,7 +511,7 @@ namespace rsx
return blocks[address / block_size];
}
inline block_type& block_for(const address_range &range)
inline block_type& block_for(const address_range32 &range)
{
AUDIT(range.valid());
return block_for(range.start);
@ -689,7 +689,7 @@ namespace rsx
// Constructors
range_iterator_tmpl() = default; // end iterator
explicit range_iterator_tmpl(parent_type &storage, const address_range &_range, section_bounds _bounds, bool _locked_only)
explicit range_iterator_tmpl(parent_type &storage, const address_range32 &_range, section_bounds _bounds, bool _locked_only)
: range(_range)
, bounds(_bounds)
, block(&storage.block_for(range.start))
@ -704,7 +704,7 @@ namespace rsx
private:
// Members
address_range range;
address_range32 range;
section_bounds bounds;
block_type *block = nullptr;
@ -825,16 +825,16 @@ namespace rsx
using range_iterator = range_iterator_tmpl<section_storage_type, typename block_type::unowned_iterator, typename block_type::iterator, block_type, ranged_storage>;
using range_const_iterator = range_iterator_tmpl<const section_storage_type, typename block_type::unowned_const_iterator, typename block_type::const_iterator, const block_type, const ranged_storage>;
inline range_iterator range_begin(const address_range &range, section_bounds bounds, bool locked_only = false) {
inline range_iterator range_begin(const address_range32 &range, section_bounds bounds, bool locked_only = false) {
return range_iterator(*this, range, bounds, locked_only);
}
inline range_const_iterator range_begin(const address_range &range, section_bounds bounds, bool locked_only = false) const {
inline range_const_iterator range_begin(const address_range32 &range, section_bounds bounds, bool locked_only = false) const {
return range_const_iterator(*this, range, bounds, locked_only);
}
inline range_const_iterator range_begin(u32 address, section_bounds bounds, bool locked_only = false) const {
return range_const_iterator(*this, address_range::start_length(address, 1), bounds, locked_only);
return range_const_iterator(*this, address_range32::start_length(address, 1), bounds, locked_only);
}
constexpr range_iterator range_end()
@ -881,9 +881,9 @@ namespace rsx
class buffered_section
{
private:
address_range locked_range;
address_range cpu_range = {};
address_range confirmed_range;
address_range32 locked_range;
address_range32 cpu_range = {};
address_range32 confirmed_range;
utils::protection protection = utils::protection::rw;
@ -891,7 +891,7 @@ namespace rsx
u64 mem_hash = 0;
bool locked = false;
void init_lockable_range(const address_range& range);
void init_lockable_range(const address_range32& range);
u64 fast_hash_internal() const;
public:
@ -899,7 +899,7 @@ namespace rsx
buffered_section() = default;
~buffered_section() = default;
void reset(const address_range& memory_range);
void reset(const address_range32& memory_range);
protected:
void invalidate_range();
@ -911,7 +911,7 @@ namespace rsx
bool sync() const;
void discard();
const address_range& get_bounds(section_bounds bounds) const;
const address_range32& get_bounds(section_bounds bounds) const;
bool is_locked(bool actual_page_flags = false) const;
@ -923,7 +923,7 @@ namespace rsx
return get_bounds(bounds).overlaps(address);
}
inline bool overlaps(const address_range& other, section_bounds bounds) const
inline bool overlaps(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
@ -938,7 +938,7 @@ namespace rsx
return get_bounds(bounds).overlaps(other.get_bounds(bounds));
}
inline bool inside(const address_range& other, section_bounds bounds) const
inline bool inside(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
@ -953,12 +953,12 @@ namespace rsx
return get_bounds(bounds).inside(other.get_bounds(bounds));
}
inline s32 signed_distance(const address_range& other, section_bounds bounds) const
inline s32 signed_distance(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).signed_distance(other);
}
inline u32 distance(const address_range& other, section_bounds bounds) const
inline u32 distance(const address_range32& other, section_bounds bounds) const
{
return get_bounds(bounds).distance(other);
}
@ -981,18 +981,18 @@ namespace rsx
return cpu_range.valid() ? cpu_range.length() : 0;
}
inline const address_range& get_locked_range() const
inline const address_range32& get_locked_range() const
{
AUDIT(locked);
return locked_range;
}
inline const address_range& get_section_range() const
inline const address_range32& get_section_range() const
{
return cpu_range;
}
const address_range& get_confirmed_range() const
const address_range32& get_confirmed_range() const
{
return confirmed_range.valid() ? confirmed_range : cpu_range;
}
@ -1005,7 +1005,7 @@ namespace rsx
return { confirmed_range.start - cpu_range.start, confirmed_range.length() };
}
inline bool matches(const address_range& range) const
inline bool matches(const address_range32& range) const
{
return cpu_range.valid() && cpu_range == range;
}
@ -1015,7 +1015,7 @@ namespace rsx
return protection;
}
inline address_range get_min_max(const address_range& current_min_max, section_bounds bounds) const
inline address_range32 get_min_max(const address_range32& current_min_max, section_bounds bounds) const
{
return get_bounds(bounds).get_min_max(current_min_max);
}
@ -1124,7 +1124,7 @@ namespace rsx
/**
* Reset
*/
void reset(const address_range &memory_range)
void reset(const address_range32 &memory_range)
{
AUDIT(memory_range.valid());
AUDIT(!is_locked());
@ -1537,7 +1537,7 @@ namespace rsx
void imp_flush_memcpy(u32 vm_dst, u8* src, u32 len) const
{
u8 *dst = get_ptr<u8>(vm_dst);
address_range copy_range = address_range::start_length(vm_dst, len);
address_range32 copy_range = address_range32::start_length(vm_dst, len);
if (flush_exclusions.empty() || !copy_range.overlaps(flush_exclusions))
{
@ -1673,7 +1673,7 @@ namespace rsx
cleanup_flush();
}
void add_flush_exclusion(const address_range& rng)
void add_flush_exclusion(const address_range32& rng)
{
AUDIT(is_locked() && is_flushable());
const auto _rng = rng.get_intersect(get_section_range());
@ -1804,7 +1804,7 @@ namespace rsx
/**
* Comparison
*/
inline bool matches(const address_range &memory_range) const
inline bool matches(const address_range32 &memory_range) const
{
return valid_range() && rsx::buffered_section::matches(memory_range);
}
@ -1846,7 +1846,7 @@ namespace rsx
return matches(format, width, height, depth, mipmaps);
}
bool matches(const address_range& memory_range, u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
bool matches(const address_range32& memory_range, u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
{
if (!valid_range())
return false;

View file

@ -4,7 +4,7 @@
namespace rsx
{
GCM_tile_reference GCM_context::get_tiled_memory_region(const utils::address_range& range) const
GCM_tile_reference GCM_context::get_tiled_memory_region(const utils::address_range32& range) const
{
if (rsx::get_location(range.start) != CELL_GCM_LOCATION_MAIN)
{
@ -27,7 +27,7 @@ namespace rsx
}
const auto tile_base_address = iomap_table.get_addr(tile.offset);
const auto tile_range = utils::address_range::start_length(tile_base_address, tile.size);
const auto tile_range = utils::address_range32::start_length(tile_base_address, tile.size);
if (range.inside(tile_range))
{
@ -39,12 +39,12 @@ namespace rsx
return {};
}
utils::address_range GCM_tile_reference::tile_align(const utils::address_range& range) const
utils::address_range32 GCM_tile_reference::tile_align(const utils::address_range32& range) const
{
const auto alignment = 64 * tile->pitch;
const u32 start_offset = rsx::align_down2(range.start - base_address, alignment);
const u32 end_offset = rsx::align2(range.end - base_address + 1, alignment);
return utils::address_range::start_length(start_offset + base_address, end_offset - start_offset);
return utils::address_range32::start_length(start_offset + base_address, end_offset - start_offset);
}
}

View file

@ -27,7 +27,7 @@ namespace rsx
return !!tile;
}
utils::address_range tile_align(const rsx::address_range& range) const;
utils::address_range32 tile_align(const rsx::address_range32& range) const;
};
struct GCM_context
@ -53,6 +53,6 @@ namespace rsx
atomic_t<u64> unsent_gcm_events = 0; // Unsent event bits when aborting RSX/VBLANK thread (will be sent on savestate load)
GCM_tile_reference get_tiled_memory_region(const utils::address_range& range) const;
GCM_tile_reference get_tiled_memory_region(const utils::address_range32& range) const;
};
}

View file

@ -49,9 +49,9 @@ namespace rsx
return;
}
const auto range1 = utils::address_range::start_length(dst_addr, dst_length);
const auto range2 = utils::address_range::start_length(src_addr, src_length);
utils::address_range target_range;
const auto range1 = utils::address_range32::start_length(dst_addr, dst_length);
const auto range2 = utils::address_range32::start_length(src_addr, src_length);
utils::address_range32 target_range;
if (!range1.overlaps(range2)) [[likely]]
{

View file

@ -29,7 +29,7 @@ namespace gl
gl::check_state();
}
void* dma_block::map(const utils::address_range& range) const
void* dma_block::map(const utils::address_range32& range) const
{
ensure(range.inside(this->range()));
return vm::get_super_ptr(range.start);
@ -58,7 +58,7 @@ namespace gl
}
}
bool dma_block::can_map(const utils::address_range& range) const
bool dma_block::can_map(const utils::address_range32& range) const
{
if (m_parent)
{
@ -73,11 +73,11 @@ namespace gl
g_dma_pool.clear();
}
utils::address_range to_dma_block_range(u32 start, u32 length)
utils::address_range32 to_dma_block_range(u32 start, u32 length)
{
const auto start_block_address = start & s_dma_block_mask;
const auto end_block_address = (start + length + s_dma_block_size - 1) & s_dma_block_mask;
return utils::address_range::start_end(start_block_address, end_block_address);
return utils::address_range32::start_end(start_block_address, end_block_address);
}
const dma_block& get_block(u32 start, u32 length)
@ -91,7 +91,7 @@ namespace gl
return *block;
}
const auto range = utils::address_range::start_length(start, length);
const auto range = utils::address_range32::start_length(start, length);
if (block->can_map(range)) [[ likely ]]
{
return *block;

View file

@ -21,17 +21,17 @@ namespace gl
void allocate(u32 base_address, u32 block_size);
void resize(u32 new_length);
void* map(const utils::address_range& range) const;
void* map(const utils::address_range32& range) const;
void set_parent(const dma_block* other);
const dma_block* head() const { return m_parent ? m_parent : this; }
bool can_map(const utils::address_range& range) const;
bool can_map(const utils::address_range32& range) const;
u32 base_addr() const { return m_base_address; }
u32 length() const { return m_data ? static_cast<u32>(m_data->size()) : 0; }
bool empty() const { return length() == 0; }
buffer* get() const { return m_data.get(); }
utils::address_range range() const { return utils::address_range::start_length(m_base_address, length()); }
utils::address_range32 range() const { return utils::address_range32::start_length(m_base_address, length()); }
protected:
u32 m_base_address = 0;

View file

@ -1240,7 +1240,7 @@ bool GLGSRender::on_access_violation(u32 address, bool is_writing)
return true;
}
void GLGSRender::on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause)
void GLGSRender::on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause)
{
gl::command_context cmd{ gl_state };
auto data = m_gl_texture_cache.invalidate_range(cmd, range, cause);

View file

@ -223,7 +223,7 @@ protected:
void do_local_task(rsx::FIFO::state state) override;
bool on_access_violation(u32 address, bool is_writing) override;
void on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause) override;
void on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause) override;
void notify_tile_unbound(u32 tile) override;
void on_semaphore_acquire_wait() override;
};

View file

@ -129,7 +129,7 @@ gl::texture* GLGSRender::get_present_source(gl::present_surface_info* info, cons
initialize_scratch_image();
gl::command_context cmd{ gl_state };
const auto range = utils::address_range::start_length(info->address, info->pitch * info->height);
const auto range = utils::address_range32::start_length(info->address, info->pitch * info->height);
m_gl_texture_cache.invalidate_range(cmd, range, rsx::invalidation_cause::read);
flip_image->copy_from(vm::base(info->address), static_cast<gl::texture::format>(expected_format), gl::texture::type::uint_8_8_8_8, unpack_settings);

View file

@ -153,7 +153,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool /*
{
if (m_surface_info[i].pitch && g_cfg.video.write_color_buffers)
{
const utils::address_range surface_range = m_surface_info[i].get_memory_range();
const utils::address_range32 surface_range = m_surface_info[i].get_memory_range();
m_gl_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_gl_texture_cache.flush_if_cache_miss_likely(cmd, surface_range);
}
@ -182,7 +182,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool /*
if (m_depth_surface_info.pitch && g_cfg.video.write_depth_buffer)
{
const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
const utils::address_range32 surface_range = m_depth_surface_info.get_memory_range();
m_gl_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_gl_texture_cache.flush_if_cache_miss_likely(cmd, surface_range);
}

View file

@ -148,7 +148,7 @@ namespace gl
}
}
void dma_transfer(gl::command_context& cmd, gl::texture* src, const areai& /*src_area*/, const utils::address_range& /*valid_range*/, u32 pitch)
void dma_transfer(gl::command_context& cmd, gl::texture* src, const areai& /*src_area*/, const utils::address_range32& /*valid_range*/, u32 pitch)
{
init_buffer(src);
glGetError();
@ -600,7 +600,7 @@ namespace gl
copy_transfer_regions_impl(cmd, dst->image(), region);
}
cached_texture_section* create_new_texture(gl::command_context& cmd, const utils::address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
cached_texture_section* create_new_texture(gl::command_context& cmd, const utils::address_range32 &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t /*flags*/) override
{
const rsx::image_section_attributes_t search_desc = { .gcm_format = gcm_format, .width = width, .height = height, .depth = depth, .mipmaps = mipmaps };
@ -708,7 +708,7 @@ namespace gl
cached_texture_section* create_nul_section(
gl::command_context& /*cmd*/,
const utils::address_range& rsx_range,
const utils::address_range32& rsx_range,
const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& /*tile*/,
bool /*memory_load*/) override
@ -726,7 +726,7 @@ namespace gl
return &cached;
}
cached_texture_section* upload_image_from_cpu(gl::command_context& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
cached_texture_section* upload_image_from_cpu(gl::command_context& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool input_swizzled) override
{
auto section = create_new_texture(cmd, rsx_range, width, height, depth, mipmaps, pitch, gcm_format, context, type, input_swizzled,

View file

@ -280,7 +280,7 @@ namespace gl
void scratch_ring_buffer::pop_barrier(u32 start, u32 length)
{
const auto range = utils::address_range::start_length(start, length);
const auto range = utils::address_range32::start_length(start, length);
m_barriers.erase(std::remove_if(m_barriers.begin(), m_barriers.end(), [&range](auto& barrier_)
{
if (barrier_.range.overlaps(range))
@ -302,7 +302,7 @@ namespace gl
}
barrier barrier_;
barrier_.range = utils::address_range::start_length(start, length);
barrier_.range = utils::address_range32::start_length(start, length);
barrier_.signal.create();
m_barriers.emplace_back(barrier_);
}

View file

@ -88,7 +88,7 @@ namespace gl
struct barrier
{
fence signal;
utils::address_range range;
utils::address_range32 range;
};
buffer m_storage;

View file

@ -90,7 +90,7 @@ namespace rsx
}
}
void mm_flush(const rsx::simple_array<utils::address_range>& ranges)
void mm_flush(const rsx::simple_array<utils::address_range32>& ranges)
{
std::lock_guard lock(g_mprotect_queue_lock);
if (g_deferred_mprotect_queue.empty())

View file

@ -39,6 +39,6 @@ namespace rsx
void mm_protect(void* start, u64 length, utils::protection prot);
void mm_flush_lazy();
void mm_flush(u32 vm_address);
void mm_flush(const rsx::simple_array<utils::address_range>& ranges);
void mm_flush(const rsx::simple_array<utils::address_range32>& ranges);
void mm_flush();
}

View file

@ -58,10 +58,10 @@ namespace rsx
auto res = ::rsx::reservation_lock<true>(write_address, write_length, read_address, read_length);
rsx::simple_array<utils::address_range> flush_mm_ranges =
rsx::simple_array<utils::address_range32> flush_mm_ranges =
{
utils::address_range::start_length(write_address, write_length).to_page_range(),
utils::address_range::start_length(read_address, read_length).to_page_range()
utils::address_range32::start_length(write_address, write_length).to_page_range(),
utils::address_range32::start_length(read_address, read_length).to_page_range()
};
rsx::mm_flush(flush_mm_ranges);

View file

@ -328,8 +328,8 @@ namespace rsx
{
const bool is_overlapping = !src_is_modified && dst.dma == src.dma && [&]() -> bool
{
const auto src_range = utils::address_range::start_length(src.rsx_address, src.pitch * (src.height - 1) + (src.bpp * src.width));
const auto dst_range = utils::address_range::start_length(dst.rsx_address, dst.pitch * (dst.clip_height - 1) + (dst.bpp * dst.clip_width));
const auto src_range = utils::address_range32::start_length(src.rsx_address, src.pitch * (src.height - 1) + (src.bpp * src.width));
const auto dst_range = utils::address_range32::start_length(dst.rsx_address, dst.pitch * (dst.clip_height - 1) + (dst.bpp * dst.clip_width));
return src_range.overlaps(dst_range);
}();
@ -612,7 +612,7 @@ namespace rsx
const bool interpolate = in_inter == blit_engine::transfer_interpolator::foh;
auto real_dst = dst.pixels;
const auto tiled_region = RSX(ctx)->get_tiled_memory_region(utils::address_range::start_length(dst.rsx_address, dst.pitch * dst.clip_height));
const auto tiled_region = RSX(ctx)->get_tiled_memory_region(utils::address_range32::start_length(dst.rsx_address, dst.pitch * dst.clip_height));
std::vector<u8> tmp;
if (tiled_region)

View file

@ -212,7 +212,7 @@ namespace rsx
}
// Fault recovery
utils::address_range dma_manager::get_fault_range(bool writing) const
utils::address_range32 dma_manager::get_fault_range(bool writing) const
{
const auto m_current_job = ensure(m_thread->m_current_job);
@ -237,6 +237,6 @@ namespace rsx
fmt::throw_exception("Unreachable");
}
return utils::address_range::start_length(vm::get_addr(address), range);
return utils::address_range32::start_length(vm::get_addr(address), range);
}
}

View file

@ -83,6 +83,6 @@ namespace rsx
void clear_mem_fault_flag();
// Fault recovery
utils::address_range get_fault_range(bool writing) const;
utils::address_range32 get_fault_range(bool writing) const;
};
}

View file

@ -1233,7 +1233,7 @@ namespace rsx
{
std::lock_guard lock(m_mtx_task);
m_invalidated_memory_range = utils::address_range::start_end(0x2 << 28, constants::local_mem_base + local_mem_size - 1);
m_invalidated_memory_range = utils::address_range32::start_end(0x2 << 28, constants::local_mem_base + local_mem_size - 1);
handle_invalidated_memory_range();
}
}
@ -2299,8 +2299,8 @@ namespace rsx
return false;
}
const auto current_fragment_shader_range = address_range::start_length(shader_offset, current_fragment_program.total_length);
if (!current_fragment_shader_range.overlaps(address_range::start_length(dst_offset, size)))
const auto current_fragment_shader_range = address_range32::start_length(shader_offset, current_fragment_program.total_length);
if (!current_fragment_shader_range.overlaps(address_range32::start_length(dst_offset, size)))
{
// No range overlap
return false;
@ -2832,7 +2832,7 @@ namespace rsx
reader_lock lock(m_mtx_task);
const auto map_range = address_range::start_length(address, size);
const auto map_range = address_range32::start_length(address, size);
if (!m_invalidated_memory_range.valid())
return;
@ -2918,7 +2918,7 @@ namespace rsx
// Queue up memory invalidation
std::lock_guard lock(m_mtx_task);
const bool existing_range_valid = m_invalidated_memory_range.valid();
const auto unmap_range = address_range::start_length(address, size);
const auto unmap_range = address_range32::start_length(address, size);
if (existing_range_valid && m_invalidated_memory_range.touches(unmap_range))
{

View file

@ -149,7 +149,7 @@ namespace rsx
virtual f64 get_display_refresh_rate() const = 0;
// Invalidated memory range
address_range m_invalidated_memory_range;
address_range32 m_invalidated_memory_range;
// Profiler
rsx::profiling_timer m_profiler;
@ -353,7 +353,7 @@ namespace rsx
virtual void flip(const display_flip_info_t& info) = 0;
virtual u64 timestamp();
virtual bool on_access_violation(u32 /*address*/, bool /*is_writing*/) { return false; }
virtual void on_invalidate_memory_range(const address_range & /*range*/, rsx::invalidation_cause) {}
virtual void on_invalidate_memory_range(const address_range32 & /*range*/, rsx::invalidation_cause) {}
virtual void notify_tile_unbound(u32 /*tile*/) {}
// control

View file

@ -788,7 +788,7 @@ namespace rsx
u32 ZCULL_control::copy_reports_to(u32 start, u32 range, u32 dest)
{
u32 bytes_to_write = 0;
const auto memory_range = utils::address_range::start_length(start, range);
const auto memory_range = utils::address_range32::start_length(start, range);
for (auto& writer : m_pending_writes)
{
if (!writer.sink)

View file

@ -27,7 +27,7 @@ namespace vk
free();
}
void* dma_block::map_range(const utils::address_range& range)
void* dma_block::map_range(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@ -142,7 +142,7 @@ namespace vk
inheritance_info.block_offset = (addr - parent->base_address);
}
void dma_block::flush(const utils::address_range& range)
void dma_block::flush(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@ -158,7 +158,7 @@ namespace vk
// NOTE: Do not unmap. This can be extremely slow on some platforms.
}
void dma_block::load(const utils::address_range& range)
void dma_block::load(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@ -174,7 +174,7 @@ namespace vk
// NOTE: Do not unmap. This can be extremely slow on some platforms.
}
dma_mapping_handle dma_block::get(const utils::address_range& range)
dma_mapping_handle dma_block::get(const utils::address_range32& range)
{
if (inheritance_info.parent)
{
@ -264,7 +264,7 @@ namespace vk
s_allocated_dma_pool_size += allocated_memory->size();
}
void* dma_block_EXT::map_range(const utils::address_range& range)
void* dma_block_EXT::map_range(const utils::address_range32& range)
{
return vm::get_super_ptr<void>(range.start);
}
@ -274,12 +274,12 @@ namespace vk
// NOP
}
void dma_block_EXT::flush(const utils::address_range&)
void dma_block_EXT::flush(const utils::address_range32&)
{
// NOP
}
void dma_block_EXT::load(const utils::address_range&)
void dma_block_EXT::load(const utils::address_range32&)
{
// NOP
}
@ -336,7 +336,7 @@ namespace vk
// Not much contention expected here, avoid searching twice
std::lock_guard lock(g_dma_mutex);
const auto map_range = utils::address_range::start_length(local_address, length);
const auto map_range = utils::address_range32::start_length(local_address, length);
auto first_block = (local_address & s_dma_block_mask);
if (auto found = g_dma_pool.find(first_block); found != g_dma_pool.end())
@ -454,7 +454,7 @@ namespace vk
if (auto found = g_dma_pool.find(block); found != g_dma_pool.end())
{
const auto sync_end = std::min(limit, found->second->end());
const auto range = utils::address_range::start_end(local_address, sync_end);
const auto range = utils::address_range32::start_end(local_address, sync_end);
if constexpr (load)
{

View file

@ -28,7 +28,7 @@ namespace vk
virtual void allocate(const render_device& dev, usz size);
virtual void free();
virtual void* map_range(const utils::address_range& range);
virtual void* map_range(const utils::address_range32& range);
virtual void unmap();
public:
@ -38,9 +38,9 @@ namespace vk
virtual void init(const render_device& dev, u32 addr, usz size);
virtual void init(dma_block* parent, u32 addr, usz size);
virtual void flush(const utils::address_range& range);
virtual void load(const utils::address_range& range);
std::pair<u32, buffer*> get(const utils::address_range& range);
virtual void flush(const utils::address_range32& range);
virtual void load(const utils::address_range32& range);
std::pair<u32, buffer*> get(const utils::address_range32& range);
u32 start() const;
u32 end() const;
@ -56,11 +56,11 @@ namespace vk
{
private:
void allocate(const render_device& dev, usz size) override;
void* map_range(const utils::address_range& range) override;
void* map_range(const utils::address_range32& range) override;
void unmap() override;
public:
void flush(const utils::address_range& range) override;
void load(const utils::address_range& range) override;
void flush(const utils::address_range32& range) override;
void load(const utils::address_range32& range) override;
};
}

View file

@ -976,7 +976,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
return true;
}
void VKGSRender::on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause)
void VKGSRender::on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause)
{
std::lock_guard lock(m_secondary_cb_guard);
@ -2438,7 +2438,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
// Flush old address if we keep missing it
if (m_surface_info[i].pitch && g_cfg.video.write_color_buffers)
{
const utils::address_range rsx_range = m_surface_info[i].get_memory_range();
const utils::address_range32 rsx_range = m_surface_info[i].get_memory_range();
m_texture_cache.set_memory_read_flags(rsx_range, rsx::memory_read_flags::flush_once);
m_texture_cache.flush_if_cache_miss_likely(*m_current_command_buffer, rsx_range);
}
@ -2455,7 +2455,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
{
if (m_depth_surface_info.pitch && g_cfg.video.write_depth_buffer)
{
const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
const utils::address_range32 surface_range = m_depth_surface_info.get_memory_range();
m_texture_cache.set_memory_read_flags(surface_range, rsx::memory_read_flags::flush_once);
m_texture_cache.flush_if_cache_miss_likely(*m_current_command_buffer, surface_range);
}
@ -2572,7 +2572,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
{
if (!m_surface_info[index].address || !m_surface_info[index].pitch) continue;
const utils::address_range surface_range = m_surface_info[index].get_memory_range();
const utils::address_range32 surface_range = m_surface_info[index].get_memory_range();
if (g_cfg.video.write_color_buffers)
{
m_texture_cache.lock_memory_region(
@ -2588,7 +2588,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
if (m_depth_surface_info.address && m_depth_surface_info.pitch)
{
const utils::address_range surface_range = m_depth_surface_info.get_memory_range();
const utils::address_range32 surface_range = m_depth_surface_info.get_memory_range();
if (g_cfg.video.write_depth_buffer)
{
const u32 gcm_format = (m_depth_surface_info.depth_format == rsx::surface_depth_format::z16) ? CELL_GCM_TEXTURE_DEPTH16 : CELL_GCM_TEXTURE_DEPTH24_D8;

View file

@ -170,7 +170,7 @@ private:
// Offloader thread deadlock recovery
rsx::atomic_bitmask_t<flush_queue_state> m_queue_status;
utils::address_range m_offloader_fault_range;
utils::address_range32 m_offloader_fault_range;
rsx::invalidation_cause m_offloader_fault_cause;
vk::draw_call_t m_current_draw {};
@ -289,6 +289,6 @@ protected:
void notify_tile_unbound(u32 tile) override;
bool on_access_violation(u32 address, bool is_writing) override;
void on_invalidate_memory_range(const utils::address_range &range, rsx::invalidation_cause cause) override;
void on_invalidate_memory_range(const utils::address_range32 &range, rsx::invalidation_cause cause) override;
void on_semaphore_acquire_wait() override;
};

View file

@ -86,7 +86,7 @@ namespace vk
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align, rsx::flags32_t image_setup_flags);
std::pair<buffer*, u32> detile_memory_block(
const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region, const utils::address_range& range,
const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region, const utils::address_range32& range,
u16 width, u16 height, u8 bpp);
// Other texture management helpers

View file

@ -325,7 +325,7 @@ vk::viewable_image* VKGSRender::get_present_source(/* inout */ vk::present_surfa
if (!image_to_flip) [[ unlikely ]]
{
// Read from cell
const auto range = utils::address_range::start_length(info->address, info->pitch * info->height);
const auto range = utils::address_range32::start_length(info->address, info->pitch * info->height);
const u32 lookup_mask = rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::framebuffer_storage;
const auto overlap = m_texture_cache.find_texture_from_range<true>(range, 0, lookup_mask);

View file

@ -93,7 +93,7 @@ namespace vk
// Drop MSAA resolve/unresolve caches. Only trigger when a hard sync is guaranteed to follow else it will cause even more problems!
// 2-pass to ensure resources are available where they are most needed
auto relieve_memory_pressure = [&](auto& list, const utils::address_range& range)
auto relieve_memory_pressure = [&](auto& list, const utils::address_range32& range)
{
for (auto it = list.begin_range(range); it != list.end(); ++it)
{
@ -254,7 +254,7 @@ namespace vk
std::vector<render_target*> sorted_list;
sorted_list.reserve(1024);
auto process_list_function = [&](auto& list, const utils::address_range& range)
auto process_list_function = [&](auto& list, const utils::address_range32& range)
{
for (auto it = list.begin_range(range); it != list.end(); ++it)
{

View file

@ -1247,7 +1247,7 @@ namespace vk
}
std::pair<buffer*, u32> detile_memory_block(const vk::command_buffer& cmd, const rsx::GCM_tile_reference& tiled_region,
const utils::address_range& range, u16 width, u16 height, u8 bpp)
const utils::address_range32& range, u16 width, u16 height, u8 bpp)
{
// Calculate the true length of the usable memory section
const auto available_tile_size = tiled_region.tile->size - (range.start - tiled_region.base_address);

View file

@ -62,7 +62,7 @@ namespace vk
}
}
void cached_texture_section::dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch)
void cached_texture_section::dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range32& valid_range, u32 pitch)
{
ensure(src->samples() == 1);
@ -921,7 +921,7 @@ namespace vk
dst->pop_layout(cmd);
}
cached_texture_section* texture_cache::create_new_texture(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
cached_texture_section* texture_cache::create_new_texture(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t flags)
{
const auto section_depth = depth;
@ -1076,7 +1076,7 @@ namespace vk
cached_texture_section* texture_cache::create_nul_section(
vk::command_buffer& /*cmd*/,
const utils::address_range& rsx_range,
const utils::address_range32& rsx_range,
const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& tile,
bool memory_load)
@ -1101,7 +1101,7 @@ namespace vk
return &region;
}
cached_texture_section* texture_cache::upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
cached_texture_section* texture_cache::upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled)
{
if (context != rsx::texture_upload_context::shader_read)

View file

@ -186,7 +186,7 @@ namespace vk
return flushed;
}
void dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch);
void dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range32& valid_range, u32 pitch);
void copy_texture(vk::command_buffer& cmd, bool miss)
{
@ -477,13 +477,13 @@ namespace vk
void update_image_contents(vk::command_buffer& cmd, vk::image_view* dst_view, vk::image* src, u16 width, u16 height) override;
cached_texture_section* create_new_texture(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
cached_texture_section* create_new_texture(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch,
u32 gcm_format, rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, rsx::component_order swizzle_flags, rsx::flags32_t flags) override;
cached_texture_section* create_nul_section(vk::command_buffer& cmd, const utils::address_range& rsx_range, const rsx::image_section_attributes_t& attrs,
cached_texture_section* create_nul_section(vk::command_buffer& cmd, const utils::address_range32& rsx_range, const rsx::image_section_attributes_t& attrs,
const rsx::GCM_tile_reference& tile, bool memory_load) override;
cached_texture_section* upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
cached_texture_section* upload_image_from_cpu(vk::command_buffer& cmd, const utils::address_range32& rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) override;
void set_component_order(cached_texture_section& section, u32 gcm_format, rsx::component_order expected_flags) override;

View file

@ -14,8 +14,8 @@ extern "C"
namespace rsx
{
// Import address_range utilities
using utils::address_range;
// Import address_range32 utilities
using utils::address_range32;
using utils::address_range_vector;
using utils::page_for;
using utils::page_start;
@ -120,7 +120,7 @@ namespace rsx
u8 bpp = 0;
u8 samples = 0;
address_range range{};
address_range32 range{};
gcm_framebuffer_info() = default;
@ -131,16 +131,16 @@ namespace rsx
// Account for the last line of the block not reaching the end
const u32 block_size = pitch * (height - 1) * aa_factor_v;
const u32 line_size = width * aa_factor_u * bpp;
range = address_range::start_length(address, block_size + line_size);
range = address_range32::start_length(address, block_size + line_size);
}
address_range get_memory_range(const u32* aa_factors)
address_range32 get_memory_range(const u32* aa_factors)
{
calculate_memory_range(aa_factors[0], aa_factors[1]);
return range;
}
address_range get_memory_range() const
address_range32 get_memory_range() const
{
ensure(range.start == address);
return range;
@ -260,7 +260,7 @@ namespace rsx
static inline u32 get_location(u32 addr)
{
// We don't really care about the actual memory map, it shouldn't be possible to use the mmio bar region anyway
constexpr address_range local_mem_range = address_range::start_length(rsx::constants::local_mem_base, 0x1000'0000);
constexpr address_range32 local_mem_range = address_range32::start_length(rsx::constants::local_mem_base, 0x1000'0000);
return local_mem_range.overlaps(addr) ?
CELL_GCM_LOCATION_LOCAL :
CELL_GCM_LOCATION_MAIN;

View file

@ -11,29 +11,29 @@ namespace utils
TEST(AddressRange, Constructors)
{
// Default constructor
address_range empty;
address_range32 empty;
EXPECT_FALSE(empty.valid());
EXPECT_EQ(empty.start, umax);
EXPECT_EQ(empty.end, 0);
// Static factory methods
address_range r1 = address_range::start_length(0x1000, 0x1000);
address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
EXPECT_EQ(r1.start, 0x1000);
EXPECT_EQ(r1.end, 0x1FFF);
EXPECT_EQ(r1.length(), 0x1000);
EXPECT_TRUE(r1.valid());
address_range r2 = address_range::start_end(0x2000, 0x2FFF);
address_range32 r2 = address_range32::start_end(0x2000, 0x2FFF);
EXPECT_EQ(r2.start, 0x2000);
EXPECT_EQ(r2.end, 0x2FFF);
EXPECT_EQ(r2.length(), 0x1000);
EXPECT_TRUE(r2.valid());
// Edge cases
address_range zero_length = address_range::start_length(0x1000, 0);
address_range32 zero_length = address_range32::start_length(0x1000, 0);
EXPECT_FALSE(zero_length.valid());
address_range single_byte = address_range::start_length(0x1000, 1);
address_range32 single_byte = address_range32::start_length(0x1000, 1);
EXPECT_TRUE(single_byte.valid());
EXPECT_EQ(single_byte.start, 0x1000);
EXPECT_EQ(single_byte.end, 0x1000);
@ -42,7 +42,7 @@ namespace utils
TEST(AddressRange, LengthAndBoundaries)
{
address_range r = address_range::start_length(0x1000, 0x1000);
address_range32 r = address_range32::start_length(0x1000, 0x1000);
// Test length
EXPECT_EQ(r.length(), 0x1000);
@ -60,40 +60,40 @@ namespace utils
TEST(AddressRange, Overlapping)
{
address_range r1 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
// Complete overlap
address_range r2 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r2 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
EXPECT_TRUE(r1.overlaps(r2));
EXPECT_TRUE(r2.overlaps(r1));
// Partial overlap at start
address_range r3 = address_range::start_length(0x800, 0x1000); // 0x800-0x17FF
address_range32 r3 = address_range32::start_length(0x800, 0x1000); // 0x800-0x17FF
EXPECT_TRUE(r1.overlaps(r3));
EXPECT_TRUE(r3.overlaps(r1));
// Partial overlap at end
address_range r4 = address_range::start_length(0x1800, 0x1000); // 0x1800-0x27FF
address_range32 r4 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
EXPECT_TRUE(r1.overlaps(r4));
EXPECT_TRUE(r4.overlaps(r1));
// No overlap, before
address_range r5 = address_range::start_length(0x0, 0x1000); // 0x0-0xFFF
address_range32 r5 = address_range32::start_length(0x0, 0x1000); // 0x0-0xFFF
EXPECT_FALSE(r1.overlaps(r5));
EXPECT_FALSE(r5.overlaps(r1));
// No overlap, after
address_range r6 = address_range::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
address_range32 r6 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
EXPECT_FALSE(r1.overlaps(r6));
EXPECT_FALSE(r6.overlaps(r1));
// Single address overlap at start
address_range r7 = address_range::start_length(0x800, 0x801); // 0x800-0x1000
address_range32 r7 = address_range32::start_length(0x800, 0x801); // 0x800-0x1000
EXPECT_TRUE(r1.overlaps(r7));
EXPECT_TRUE(r7.overlaps(r1));
// Single address overlap at end
address_range r8 = address_range::start_length(0x1FFF, 0x1000); // 0x1FFF-0x2FFE
address_range32 r8 = address_range32::start_length(0x1FFF, 0x1000); // 0x1FFF-0x2FFE
EXPECT_TRUE(r1.overlaps(r8));
EXPECT_TRUE(r8.overlaps(r1));
@ -107,89 +107,89 @@ namespace utils
TEST(AddressRange, Inside)
{
address_range r1 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
// Same range
address_range r2 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r2 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
EXPECT_TRUE(r1.inside(r2));
EXPECT_TRUE(r2.inside(r1));
// Smaller range inside
address_range r3 = address_range::start_length(0x1200, 0x800); // 0x1200-0x19FF
address_range32 r3 = address_range32::start_length(0x1200, 0x800); // 0x1200-0x19FF
EXPECT_TRUE(r3.inside(r1));
EXPECT_FALSE(r1.inside(r3));
// Larger range outside
address_range r4 = address_range::start_length(0x800, 0x2000); // 0x800-0x27FF
address_range32 r4 = address_range32::start_length(0x800, 0x2000); // 0x800-0x27FF
EXPECT_TRUE(r1.inside(r4));
EXPECT_FALSE(r4.inside(r1));
// Partially overlapping
address_range r5 = address_range::start_length(0x1800, 0x1000); // 0x1800-0x27FF
address_range32 r5 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
EXPECT_FALSE(r1.inside(r5));
EXPECT_FALSE(r5.inside(r1));
// No overlap
address_range r6 = address_range::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
address_range32 r6 = address_range32::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
EXPECT_FALSE(r1.inside(r6));
EXPECT_FALSE(r6.inside(r1));
}
TEST(AddressRange, Touches)
{
address_range r1 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
// Same range (overlaps)
address_range r2 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r2 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
EXPECT_TRUE(r1.touches(r2));
// Overlapping ranges
address_range r3 = address_range::start_length(0x1800, 0x1000); // 0x1800-0x27FF
address_range32 r3 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
EXPECT_TRUE(r1.touches(r3));
// Adjacent at end of r1
address_range r4 = address_range::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
address_range32 r4 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
EXPECT_TRUE(r1.touches(r4));
EXPECT_TRUE(r4.touches(r1));
// Adjacent at start of r1
address_range r5 = address_range::start_length(0x0, 0x1000); // 0x0-0xFFF
address_range32 r5 = address_range32::start_length(0x0, 0x1000); // 0x0-0xFFF
EXPECT_TRUE(r1.touches(r5));
EXPECT_TRUE(r5.touches(r1));
// Not touching
address_range r6 = address_range::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
address_range32 r6 = address_range32::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
EXPECT_FALSE(r1.touches(r6));
EXPECT_FALSE(r6.touches(r1));
}
TEST(AddressRange, Distance)
{
address_range r1 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
// Touching ranges
address_range r2 = address_range::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
address_range32 r2 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
EXPECT_EQ(r1.distance(r2), 0);
EXPECT_EQ(r2.distance(r1), 0);
EXPECT_EQ(r1.signed_distance(r2), 0);
EXPECT_EQ(r2.signed_distance(r1), 0);
// Gap of 0x1000 (r3 after r1)
address_range r3 = address_range::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
address_range32 r3 = address_range32::start_length(0x3000, 0x1000); // 0x3000-0x3FFF
EXPECT_EQ(r1.distance(r3), 0x1000);
EXPECT_EQ(r3.distance(r1), 0x1000);
EXPECT_EQ(r1.signed_distance(r3), 0x1000);
EXPECT_EQ(r3.signed_distance(r1), -0x1000);
// Gap of 0x1000 (r4 before r1)
address_range r4 = address_range::start_end(0, 0xEFF); // 0x0-0xEFF
address_range32 r4 = address_range32::start_end(0, 0xEFF); // 0x0-0xEFF
EXPECT_EQ(r1.distance(r4), 0x100);
EXPECT_EQ(r4.distance(r1), 0x100);
EXPECT_EQ(r1.signed_distance(r4), -0x100);
EXPECT_EQ(r4.signed_distance(r1), 0x100);
// Overlapping ranges
address_range r5 = address_range::start_length(0x1800, 0x1000); // 0x1800-0x27FF
address_range32 r5 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
EXPECT_EQ(r1.distance(r5), 0);
EXPECT_EQ(r5.distance(r1), 0);
EXPECT_EQ(r1.signed_distance(r5), 0);
@ -198,64 +198,64 @@ namespace utils
TEST(AddressRange, MinMax)
{
address_range r1 = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range r2 = address_range::start_length(0x1800, 0x1000); // 0x1800-0x27FF
address_range32 r1 = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 r2 = address_range32::start_length(0x1800, 0x1000); // 0x1800-0x27FF
// Get min-max
address_range min_max = r1.get_min_max(r2);
address_range32 min_max = r1.get_min_max(r2);
EXPECT_EQ(min_max.start, 0x1000);
EXPECT_EQ(min_max.end, 0x27FF);
// Set min-max
address_range r3 = address_range::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
address_range32 r3 = address_range32::start_length(0x2000, 0x1000); // 0x2000-0x2FFF
r3.set_min_max(r1);
EXPECT_EQ(r3.start, 0x1000);
EXPECT_EQ(r3.end, 0x2FFF);
// Test with invalid ranges
address_range empty;
address_range min_max2 = r1.get_min_max(empty);
address_range32 empty;
address_range32 min_max2 = r1.get_min_max(empty);
EXPECT_EQ(min_max2.start, r1.start);
EXPECT_EQ(min_max2.end, r1.end);
address_range min_max3 = empty.get_min_max(r1);
address_range32 min_max3 = empty.get_min_max(r1);
EXPECT_EQ(min_max3.start, r1.start);
EXPECT_EQ(min_max3.end, r1.end);
address_range min_max4 = empty.get_min_max(empty);
address_range32 min_max4 = empty.get_min_max(empty);
EXPECT_EQ(min_max4.start, umax);
EXPECT_EQ(min_max4.end, 0);
}
TEST(AddressRange, Intersect)
{
address_range r1 = address_range::start_length(0x1000, 0x2000); // 0x1000-0x2FFF
address_range32 r1 = address_range32::start_length(0x1000, 0x2000); // 0x1000-0x2FFF
// Complete overlap
address_range r2 = address_range::start_length(0x0, 0x4000); // 0x0-0x3FFF
address_range i1 = r1.get_intersect(r2);
address_range32 r2 = address_range32::start_length(0x0, 0x4000); // 0x0-0x3FFF
address_range32 i1 = r1.get_intersect(r2);
EXPECT_EQ(i1.start, 0x1000);
EXPECT_EQ(i1.end, 0x2FFF);
// Partial overlap at start
address_range r3 = address_range::start_length(0x0, 0x2000); // 0x0-0x1FFF
address_range i2 = r1.get_intersect(r3);
address_range32 r3 = address_range32::start_length(0x0, 0x2000); // 0x0-0x1FFF
address_range32 i2 = r1.get_intersect(r3);
EXPECT_EQ(i2.start, 0x1000);
EXPECT_EQ(i2.end, 0x1FFF);
// Partial overlap at end
address_range r4 = address_range::start_length(0x2000, 0x2000); // 0x2000-0x3FFF
address_range i3 = r1.get_intersect(r4);
address_range32 r4 = address_range32::start_length(0x2000, 0x2000); // 0x2000-0x3FFF
address_range32 i3 = r1.get_intersect(r4);
EXPECT_EQ(i3.start, 0x2000);
EXPECT_EQ(i3.end, 0x2FFF);
// No overlap
address_range r5 = address_range::start_length(0x4000, 0x1000); // 0x4000-0x4FFF
address_range i4 = r1.get_intersect(r5);
address_range32 r5 = address_range32::start_length(0x4000, 0x1000); // 0x4000-0x4FFF
address_range32 i4 = r1.get_intersect(r5);
EXPECT_FALSE(i4.valid());
// Test intersect method
address_range r6 = address_range::start_length(0x1000, 0x2000); // 0x1000-0x2FFF
address_range32 r6 = address_range32::start_length(0x1000, 0x2000); // 0x1000-0x2FFF
r6.intersect(r3);
EXPECT_EQ(r6.start, 0x1000);
EXPECT_EQ(r6.end, 0x1FFF);
@ -264,15 +264,15 @@ namespace utils
TEST(AddressRange, Validity)
{
// Valid range
address_range r1 = address_range::start_length(0x1000, 0x1000);
address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
EXPECT_TRUE(r1.valid());
// Invalid range (default constructor)
address_range r2;
address_range32 r2;
EXPECT_FALSE(r2.valid());
// Invalid range (start > end)
address_range r3 = address_range::start_end(0x2000, 0x1000);
address_range32 r3 = address_range32::start_end(0x2000, 0x1000);
EXPECT_FALSE(r3.valid());
// Invalidate
@ -284,9 +284,9 @@ namespace utils
TEST(AddressRange, Comparison)
{
address_range r1 = address_range::start_length(0x1000, 0x1000);
address_range r2 = address_range::start_length(0x1000, 0x1000);
address_range r3 = address_range::start_length(0x2000, 0x1000);
address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
address_range32 r2 = address_range32::start_length(0x1000, 0x1000);
address_range32 r3 = address_range32::start_length(0x2000, 0x1000);
EXPECT_TRUE(r1 == r2);
EXPECT_FALSE(r1 == r3);
@ -294,7 +294,7 @@ namespace utils
TEST(AddressRange, StringRepresentation)
{
address_range r1 = address_range::start_length(0x1000, 0x1000);
address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
std::string str = r1.str();
// The exact format may vary, but it should contain the start and end addresses
@ -310,7 +310,7 @@ namespace utils
EXPECT_EQ(vec.size(), 0);
// Add a range
vec.merge(address_range::start_length(0x1000, 0x1000));
vec.merge(address_range32::start_length(0x1000, 0x1000));
EXPECT_FALSE(vec.empty());
EXPECT_EQ(vec.size(), 1);
@ -325,33 +325,33 @@ namespace utils
address_range_vector vec;
// Add non-touching ranges
vec.merge(address_range::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.merge(address_range::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
vec.merge(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.merge(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
EXPECT_EQ(vec.valid_count(), 2);
// Add a range that touches the first range
vec.merge(address_range::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
vec.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
// Should merge all three ranges
EXPECT_EQ(vec.valid_count(), 1);
EXPECT_TRUE(vec.contains(address_range::start_end(0x1000, 0x3FFF)));
EXPECT_TRUE(vec.contains(address_range32::start_end(0x1000, 0x3FFF)));
// Add a non-touching range
vec.merge(address_range::start_length(0x5000, 0x1000)); // 0x5000-0x5FFF
vec.merge(address_range32::start_length(0x5000, 0x1000)); // 0x5000-0x5FFF
EXPECT_EQ(vec.valid_count(), 2);
// Add an overlapping range
vec.merge(address_range::start_length(0x4000, 0x2000)); // 0x4000-0x5FFF
vec.merge(address_range32::start_length(0x4000, 0x2000)); // 0x4000-0x5FFF
EXPECT_EQ(vec.valid_count(), 1);
EXPECT_TRUE(vec.contains(address_range::start_end(0x1000, 0x5FFF)));
EXPECT_TRUE(vec.contains(address_range32::start_end(0x1000, 0x5FFF)));
}
TEST(AddressRangeVector, ExcludeOperations)
{
address_range_vector vec;
vec.merge(address_range::start_length(0x1000, 0x4000)); // 0x1000-0x4FFF
vec.merge(address_range32::start_length(0x1000, 0x4000)); // 0x1000-0x4FFF
// Exclude from the middle
vec.exclude(address_range::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
vec.exclude(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
EXPECT_EQ(vec.valid_count(), 2);
auto it = vec.begin();
@ -362,43 +362,43 @@ namespace utils
EXPECT_EQ(it->end, 0x4FFF);
// Exclude from the start
vec.exclude(address_range::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.exclude(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
EXPECT_EQ(vec.valid_count(), 1);
EXPECT_TRUE(vec.contains(address_range::start_end(0x3000, 0x4FFF)));
EXPECT_TRUE(vec.contains(address_range32::start_end(0x3000, 0x4FFF)));
// Exclude from the end
vec.exclude(address_range::start_length(0x4000, 0x1000)); // 0x4000-0x4FFF
vec.exclude(address_range32::start_length(0x4000, 0x1000)); // 0x4000-0x4FFF
EXPECT_EQ(vec.valid_count(), 1);
EXPECT_TRUE(vec.contains(address_range::start_end(0x3000, 0x3FFF)));
EXPECT_TRUE(vec.contains(address_range32::start_end(0x3000, 0x3FFF)));
// Exclude entire range
vec.exclude(address_range::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
vec.exclude(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
EXPECT_EQ(vec.valid_count(), 0);
// Test excluding with another vector
vec.merge(address_range::start_length(0x1000, 0x4000)); // 0x1000-0x4FFF
vec.merge(address_range32::start_length(0x1000, 0x4000)); // 0x1000-0x4FFF
address_range_vector vec2;
vec2.merge(address_range::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
vec2.merge(address_range::start_length(0x4000, 0x1000)); // 0x4000-0x4FFF
vec2.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
vec2.merge(address_range32::start_length(0x4000, 0x1000)); // 0x4000-0x4FFF
vec.exclude(vec2);
EXPECT_EQ(vec.valid_count(), 2);
EXPECT_TRUE(vec.contains(address_range::start_end(0x1000, 0x1FFF)));
EXPECT_TRUE(vec.contains(address_range::start_end(0x3000, 0x3FFF)));
EXPECT_TRUE(vec.contains(address_range32::start_end(0x1000, 0x1FFF)));
EXPECT_TRUE(vec.contains(address_range32::start_end(0x3000, 0x3FFF)));
}
TEST(AddressRangeVector, ConsistencyCheck)
{
address_range_vector vec;
vec.merge(address_range::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.merge(address_range::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
vec.merge(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.merge(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
EXPECT_TRUE(vec.check_consistency());
// This would cause inconsistency, but merge should handle it
vec.merge(address_range::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
vec.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
EXPECT_TRUE(vec.check_consistency());
EXPECT_EQ(vec.valid_count(), 1);
}
@ -406,44 +406,44 @@ namespace utils
TEST(AddressRangeVector, OverlapsAndContains)
{
address_range_vector vec;
vec.merge(address_range::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.merge(address_range::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
vec.merge(address_range32::start_length(0x1000, 0x1000)); // 0x1000-0x1FFF
vec.merge(address_range32::start_length(0x3000, 0x1000)); // 0x3000-0x3FFF
// Test overlaps with range
EXPECT_TRUE(vec.overlaps(address_range::start_length(0x1500, 0x1000))); // 0x1500-0x24FF
EXPECT_TRUE(vec.overlaps(address_range::start_length(0x3500, 0x1000))); // 0x3500-0x44FF
EXPECT_FALSE(vec.overlaps(address_range::start_length(0x2000, 0x1000))); // 0x2000-0x2FFF
EXPECT_TRUE(vec.overlaps(address_range32::start_length(0x1500, 0x1000))); // 0x1500-0x24FF
EXPECT_TRUE(vec.overlaps(address_range32::start_length(0x3500, 0x1000))); // 0x3500-0x44FF
EXPECT_FALSE(vec.overlaps(address_range32::start_length(0x2000, 0x1000))); // 0x2000-0x2FFF
// Test contains
EXPECT_TRUE(vec.contains(address_range::start_length(0x1000, 0x1000))); // 0x1000-0x1FFF
EXPECT_TRUE(vec.contains(address_range::start_length(0x3000, 0x1000))); // 0x3000-0x3FFF
EXPECT_FALSE(vec.contains(address_range::start_length(0x1500, 0x1000))); // 0x1500-0x24FF
EXPECT_TRUE(vec.contains(address_range32::start_length(0x1000, 0x1000))); // 0x1000-0x1FFF
EXPECT_TRUE(vec.contains(address_range32::start_length(0x3000, 0x1000))); // 0x3000-0x3FFF
EXPECT_FALSE(vec.contains(address_range32::start_length(0x1500, 0x1000))); // 0x1500-0x24FF
// Test overlaps with another vector
address_range_vector vec2;
vec2.merge(address_range::start_length(0x1500, 0x1000)); // 0x1500-0x24FF
vec2.merge(address_range32::start_length(0x1500, 0x1000)); // 0x1500-0x24FF
EXPECT_TRUE(vec.overlaps(vec2));
address_range_vector vec3;
vec3.merge(address_range::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
vec3.merge(address_range32::start_length(0x2000, 0x1000)); // 0x2000-0x2FFF
EXPECT_FALSE(vec.overlaps(vec3));
// Test inside
address_range big_range = address_range::start_length(0x0, 0x5000); // 0x0-0x4FFF
address_range32 big_range = address_range32::start_length(0x0, 0x5000); // 0x0-0x4FFF
EXPECT_TRUE(vec.inside(big_range));
address_range small_range = address_range::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
address_range32 small_range = address_range32::start_length(0x1000, 0x1000); // 0x1000-0x1FFF
EXPECT_FALSE(vec.inside(small_range));
}
// Test the std::hash implementation for address_range
// Test the std::hash implementation for address_range32
TEST(AddressRange, Hash)
{
address_range r1 = address_range::start_length(0x1000, 0x1000);
address_range r2 = address_range::start_length(0x1000, 0x1000);
address_range r3 = address_range::start_length(0x2000, 0x1000);
address_range32 r1 = address_range32::start_length(0x1000, 0x1000);
address_range32 r2 = address_range32::start_length(0x1000, 0x1000);
address_range32 r3 = address_range32::start_length(0x2000, 0x1000);
std::hash<address_range> hasher;
std::hash<address_range32> hasher;
EXPECT_EQ(hasher(r1), hasher(r2));
EXPECT_NE(hasher(r1), hasher(r3));
}