vm::spu max address was overflowing resulting in issues, so cast to u64 where needed. Fixes #6145.
    Use vm::get_addr instead of manually substructing vm::base(0) from pointer in texture cache code.
    Prefer std::atomic_thread_fence over _mm_?fence(), adjust usage to be more correct.
    Used sequantially consistent ordering in semaphore_release for TSX path as well.
    Improved memory ordering for sys_rsx_context_iounmap/map.
    Fixed sync bugs in HLE gcm because of not using atomic instructions.
    Use release memory barrier in lwsync for PPU LLVM, according to this xbox360 programming guide lwsync is a hw release memory barrier.
    Also use release barrier where lwsync was originally used in liblv2 sys_lwmutex and cellSync.
    Use acquire barrier for isync instruction, see https://devblogs.microsoft.com/oldnewthing/20180814-00/?p=99485
This commit is contained in:
Eladash 2019-06-29 18:48:42 +03:00 committed by Ivan
parent 1ee7b91646
commit 43f919c04b
20 changed files with 85 additions and 65 deletions

View file

@ -1662,7 +1662,7 @@ void spu_thread::do_mfc(bool wait)
if (&args - mfc_queue <= removed)
{
// Remove barrier-class command if it's the first in the queue
_mm_mfence();
std::atomic_thread_fence(std::memory_order_seq_cst);
removed++;
return true;
}
@ -2086,7 +2086,7 @@ bool spu_thread::process_mfc_cmd()
{
if (mfc_size == 0)
{
_mm_mfence();
std::atomic_thread_fence(std::memory_order_seq_cst);
}
else
{
@ -3025,12 +3025,13 @@ bool spu_thread::stop_and_signal(u32 code)
case 0x100:
{
// SPU thread group yield (TODO)
if (ch_out_mbox.get_count())
{
fmt::throw_exception("STOP code 0x100: Out_MBox is not empty" HERE);
}
_mm_mfence();
std::atomic_thread_fence(std::memory_order_seq_cst);
return true;
}