vm::spu max address was overflowing resulting in issues, so cast to u64 where needed. Fixes #6145.
    Use vm::get_addr instead of manually substructing vm::base(0) from pointer in texture cache code.
    Prefer std::atomic_thread_fence over _mm_?fence(), adjust usage to be more correct.
    Used sequantially consistent ordering in semaphore_release for TSX path as well.
    Improved memory ordering for sys_rsx_context_iounmap/map.
    Fixed sync bugs in HLE gcm because of not using atomic instructions.
    Use release memory barrier in lwsync for PPU LLVM, according to this xbox360 programming guide lwsync is a hw release memory barrier.
    Also use release barrier where lwsync was originally used in liblv2 sys_lwmutex and cellSync.
    Use acquire barrier for isync instruction, see https://devblogs.microsoft.com/oldnewthing/20180814-00/?p=99485
This commit is contained in:
Eladash 2019-06-29 18:48:42 +03:00 committed by Ivan
parent 1ee7b91646
commit 43f919c04b
20 changed files with 85 additions and 65 deletions

View file

@ -750,7 +750,7 @@ namespace vm
const u32 size = ::align(orig_size, min_page_size);
// return if addr or size is invalid
if (!size || addr < this->addr || addr + u64{size} > this->addr + this->size || flags & 0x10)
if (!size || addr < this->addr || addr + u64{size} > this->addr + u64{this->size} || flags & 0x10)
{
return 0;
}
@ -823,7 +823,7 @@ namespace vm
std::pair<u32, std::shared_ptr<utils::shm>> block_t::get(u32 addr, u32 size)
{
if (addr < this->addr || addr + u64{size} > this->addr + this->size)
if (addr < this->addr || addr + u64{size} > this->addr + u64{this->size})
{
return {addr, nullptr};
}
@ -852,7 +852,7 @@ namespace vm
}
// Range check
if (std::max<u32>(size, addr - found->first + size) > found->second.second->size())
if (addr + u64{size} > found->first + u64{found->second.second->size()})
{
return {addr, nullptr};
}