SPU: use reservation spinlocks on writes (non-TSX)

This should decrease contention by avoiding global lock
This commit is contained in:
Nekotekina 2018-05-21 20:25:05 +03:00
parent 182259e4a5
commit 72574b11ff
5 changed files with 209 additions and 134 deletions

View file

@ -9,6 +9,7 @@
#include "Emu/Cell/lv2/sys_memory.h"
#include "Emu/RSX/GSRender.h"
#include <atomic>
#include <thread>
#include <deque>
static_assert(sizeof(notifier) == 8, "Unexpected size of notifier");
@ -232,6 +233,26 @@ namespace vm
}
}
void reservation_lock_internal(atomic_t<u64>& res)
{
for (u64 i = 0;; i++)
{
if (LIKELY(!atomic_storage<u64>::bts(res.raw(), 0)))
{
break;
}
if (i < 15)
{
busy_wait(500);
}
else
{
std::this_thread::yield();
}
}
}
// Page information
struct memory_page
{
@ -479,12 +500,21 @@ namespace vm
, size(size)
, flags(flags)
{
// Allocate compressed reservation info area (avoid RSX and SPU areas)
if (addr != 0xc0000000 && addr != 0xe0000000)
// Allocate compressed reservation info area (avoid SPU MMIO area)
if (addr != 0xe0000000)
{
utils::memory_commit(g_reservations + addr / 16, size / 16);
utils::memory_commit(g_reservations2 + addr / 16, size / 16);
}
else
{
// RawSPU LS
for (u32 i = 0; i < 6; i++)
{
utils::memory_commit(g_reservations + addr / 16 + i * 0x10000, 0x4000);
utils::memory_commit(g_reservations2 + addr / 16 + i * 0x10000, 0x4000);
}
}
}
block_t::~block_t()