Aarch64 fixes for RSX

This commit is contained in:
kd-11 2022-06-29 23:54:53 +03:00 committed by kd-11
parent fcd297ffb2
commit 5cafaef0a9
3 changed files with 25 additions and 17 deletions

View file

@ -5,33 +5,39 @@
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
#if defined(ARCH_X64)
#define HOST_PAGE_SIZE() 4096u
#else
#define HOST_PAGE_SIZE get_page_size
#endif
namespace utils namespace utils
{ {
class address_range_vector; class address_range_vector;
long get_page_size();
/** /**
* Constexprs * Helpers
*/ */
constexpr inline u32 page_start(u32 addr) static inline u32 page_start(u32 addr)
{ {
return addr & ~4095u; return addr & ~(HOST_PAGE_SIZE() - 1);
} }
constexpr inline u32 next_page(u32 addr) static inline u32 next_page(u32 addr)
{ {
return page_start(addr + 4096u); return page_start(addr) + HOST_PAGE_SIZE();
} }
constexpr inline u32 page_end(u32 addr) static inline u32 page_end(u32 addr)
{ {
return next_page(addr) - 1; return next_page(addr) - 1;
} }
constexpr inline u32 is_page_aligned(u32 addr) static inline u32 is_page_aligned(u32 val)
{ {
return page_start(addr) == addr; return (val & (HOST_PAGE_SIZE() - 1)) == 0;
} }
@ -186,7 +192,7 @@ namespace utils
bool is_page_range() const bool is_page_range() const
{ {
return (valid() && start % 4096u == 0 && length() % 4096u == 0); return (valid() && is_page_aligned(start) && is_page_aligned(length()));
} }
address_range to_page_range() const address_range to_page_range() const

View file

@ -5,6 +5,8 @@
namespace rsx namespace rsx
{ {
constexpr u32 min_lockable_data_size = 4096; // Increasing this value has worse results even on systems with pages > 4k
void buffered_section::init_lockable_range(const address_range& range) void buffered_section::init_lockable_range(const address_range& range)
{ {
locked_range = range.to_page_range(); locked_range = range.to_page_range();
@ -27,7 +29,7 @@ namespace rsx
init_lockable_range(cpu_range); init_lockable_range(cpu_range);
if (memory_range.length() < 4096) if (memory_range.length() < min_lockable_data_size)
{ {
protection_strat = section_protection_strategy::hash; protection_strat = section_protection_strategy::hash;
mem_hash = 0; mem_hash = 0;

View file

@ -23,7 +23,7 @@ namespace rsx
{ {
if (p.second.prot != utils::protection::rw) if (p.second.prot != utils::protection::rw)
{ {
utils::memory_protect(vm::base(p.first), 4096, utils::protection::rw); utils::memory_protect(vm::base(p.first), utils::get_page_size(), utils::protection::rw);
} }
} }
@ -790,13 +790,13 @@ namespace rsx
if (!m_pages_accessed[location]) [[ likely ]] if (!m_pages_accessed[location]) [[ likely ]]
{ {
const auto page_address = static_cast<u32>(address) & ~0xfff; const auto page_address = utils::page_start(static_cast<u32>(address));
auto& page = m_locked_pages[location][page_address]; auto& page = m_locked_pages[location][page_address];
page.add_ref(); page.add_ref();
if (page.prot == utils::protection::rw) if (page.prot == utils::protection::rw)
{ {
utils::memory_protect(vm::base(page_address), 4096, utils::protection::no); utils::memory_protect(vm::base(page_address), utils::get_page_size(), utils::protection::no);
page.prot = utils::protection::no; page.prot = utils::protection::no;
} }
} }
@ -811,7 +811,7 @@ namespace rsx
const auto location = rsx::classify_location(address); const auto location = rsx::classify_location(address);
if (!m_pages_accessed[location]) if (!m_pages_accessed[location])
{ {
const auto page_address = static_cast<u32>(address) & ~0xfff; const auto page_address = utils::page_start(static_cast<u32>(address));
std::scoped_lock lock(m_pages_mutex); std::scoped_lock lock(m_pages_mutex);
if (auto found = m_locked_pages[location].find(page_address); if (auto found = m_locked_pages[location].find(page_address);
@ -844,7 +844,7 @@ namespace rsx
if (page.prot != utils::protection::rw) if (page.prot != utils::protection::rw)
{ {
utils::memory_protect(vm::base(this_address), 4096, utils::protection::rw); utils::memory_protect(vm::base(this_address), utils::get_page_size(), utils::protection::rw);
page.prot = utils::protection::rw; page.prot = utils::protection::rw;
} }
@ -860,7 +860,7 @@ namespace rsx
bool ZCULL_control::on_access_violation(u32 address) bool ZCULL_control::on_access_violation(u32 address)
{ {
const auto page_address = address & ~0xfff; const auto page_address = utils::page_start(address);
const auto location = rsx::classify_location(address); const auto location = rsx::classify_location(address);
if (m_pages_accessed[location]) if (m_pages_accessed[location])
@ -890,7 +890,7 @@ namespace rsx
else else
{ {
// R/W to stale block, unload it and move on // R/W to stale block, unload it and move on
utils::memory_protect(vm::base(page_address), 4096, utils::protection::rw); utils::memory_protect(vm::base(page_address), utils::get_page_size(), utils::protection::rw);
m_locked_pages[location].erase(page_address); m_locked_pages[location].erase(page_address);
return true; return true;