cellSpurs: Fix HLE workload signalling, taskset fixes

This commit is contained in:
Eladash 2020-10-02 15:15:23 +03:00 committed by Ivan
parent 4cafd5a31c
commit a0b387e0a9
3 changed files with 178 additions and 92 deletions

View file

@ -3700,15 +3700,37 @@ s32 _spurs::create_task(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id,
// TODO: Verify the ELF header is proper and all its load segments are at address >= 0x3000 // TODO: Verify the ELF header is proper and all its load segments are at address >= 0x3000
u32 tmp_task_id; u32 tmp_task_id;
for (tmp_task_id = 0; tmp_task_id < CELL_SPURS_MAX_TASK; tmp_task_id++)
{ {
if (!taskset->enabled.value()._bit[tmp_task_id]) auto addr = taskset.ptr(&CellSpursTaskset::enabled).addr();
auto [res, rtime] = vm::reservation_lock(addr, 16, vm::dma_lockb);
// NOTE: Realfw processes this using 4 32-bits atomic loops
// But here its processed within a single 128-bit atomic op
vm::_ref<atomic_be_t<v128>>(addr).fetch_op([&](be_t<v128>& value)
{ {
auto enabled = taskset->enabled.value(); auto value0 = value.value();
enabled._bit[tmp_task_id] = true;
taskset->enabled = enabled; if (auto pos = std::countl_one(+value0._u64[0]); pos != 64)
break; {
tmp_task_id = pos;
value0._u64[0] |= (1ull << 63) >> pos;
value = value0;
return true;
} }
if (auto pos = std::countl_one(+value0._u64[1]); pos != 64)
{
tmp_task_id = pos + 64;
value0._u64[1] |= (1ull << 63) >> pos;
value = value0;
return true;
}
tmp_task_id = CELL_SPURS_MAX_TASK;
return false;
});
res.release(rtime + 128);
} }
if (tmp_task_id >= CELL_SPURS_MAX_TASK) if (tmp_task_id >= CELL_SPURS_MAX_TASK)
@ -3730,13 +3752,14 @@ s32 _spurs::create_task(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id,
s32 _spurs::task_start(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId) s32 _spurs::task_start(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId)
{ {
auto pendingReady = taskset->pending_ready.value(); auto [res, rtime] = vm::reservation_lock(taskset.ptr(&CellSpursTaskset::pending_ready).addr(), 16, vm::dma_lockb);
pendingReady._bit[taskId] = true; taskset->pending_ready.values[taskId / 32] |= (1u << 31) >> (taskId % 32);
taskset->pending_ready = pendingReady; res.release(rtime + 128);
cellSpursSendWorkloadSignal(ppu, taskset->spurs, taskset->wid); auto spurs = +taskset->spurs;
ppu_execute<&cellSpursSendWorkloadSignal>(ppu, spurs, +taskset->wid);
if (s32 rc = cellSpursWakeUp(ppu, taskset->spurs)) if (s32 rc = ppu_execute<&cellSpursWakeUp>(ppu, spurs))
{ {
if (rc + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) if (rc + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT)
{ {
@ -3782,6 +3805,8 @@ s32 cellSpursCreateTask(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, vm::
s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId) s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId)
{ {
cellSpurs.trace("_cellSpursSendSignal(taskset=*0x%x, taskId=0x%x)", taskset, taskId);
if (!taskset) if (!taskset)
{ {
return CELL_SPURS_TASK_ERROR_NULL_POINTER; return CELL_SPURS_TASK_ERROR_NULL_POINTER;
@ -3797,30 +3822,59 @@ s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32
return CELL_SPURS_TASK_ERROR_INVAL; return CELL_SPURS_TASK_ERROR_INVAL;
} }
be_t<v128> _0(v128::from32(0)); int signal;
bool disabled = taskset->enabled.value()._bit[taskId]; for (;;)
auto invalid = (taskset->ready & taskset->pending_ready) != _0 || (taskset->running & taskset->waiting) != _0 || disabled ||
((taskset->running | taskset->ready | taskset->pending_ready | taskset->waiting | taskset->signalled) & ~taskset->enabled) != _0;
if (invalid)
{ {
return CELL_SPURS_TASK_ERROR_SRCH; const u32 addr = taskset.ptr(&CellSpursTaskset::signalled).ptr(&decltype(CellSpursTaskset::signalled)::values, taskId / 32).addr();
u32 signalled = ppu_lwarx(ppu, addr);
const u32 running = taskset->running.values[taskId / 32];
const u32 ready = taskset->ready.values[taskId / 32];
const u32 waiting = taskset->waiting.values[taskId / 32];
const u32 enabled = taskset->enabled.values[taskId / 32];
const u32 pready = taskset->pending_ready.values[taskId / 32];
const u32 mask = (1u << 31) >> (taskId % 32);
if ((running & waiting) || (ready & pready) ||
((signalled | waiting | pready | running | ready) & ~enabled) || !(enabled & mask))
{
// Error conditions:
// 1) Cannot have a waiting bit and running bit set at the same time
// 2) Cannot have a read bit and pending_ready bit at the same time
// 3) Any disabled bit in enabled mask must be not set
// 4) Specified task must be enabled
signal = -1;
}
else
{
signal = !!(~signalled & waiting & mask);
signalled |= (signal ? mask : 0);
} }
auto shouldSignal = ((taskset->waiting & ~taskset->signalled) & be_t<v128>(v128::fromBit(taskId))) != _0 ? true : false; if (ppu_stwcx(ppu, addr, signalled))
auto signalled = taskset->signalled.value();
signalled._bit[taskId] = true;
taskset->signalled = signalled;
if (shouldSignal)
{ {
cellSpursSendWorkloadSignal(ppu, taskset->spurs, taskset->wid); break;
auto rc = cellSpursWakeUp(ppu, taskset->spurs); }
}
switch (signal)
{
case 0: break;
case 1:
{
auto spurs = +taskset->spurs;
ppu_execute<&cellSpursSendWorkloadSignal>(ppu, spurs, +taskset->wid);
auto rc = ppu_execute<&cellSpursWakeUp>(ppu, spurs);
if (rc + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) if (rc + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT)
{ {
return CELL_SPURS_TASK_ERROR_STAT; return CELL_SPURS_TASK_ERROR_STAT;
} }
ASSERT(rc == CELL_OK); return rc;
}
default: return CELL_SPURS_TASK_ERROR_SRCH;
} }
return CELL_OK; return CELL_OK;

View file

@ -811,12 +811,22 @@ struct alignas(128) CellSpursTaskset
CHECK_SIZE(TaskInfo, 48); CHECK_SIZE(TaskInfo, 48);
be_t<v128> running; // 0x00 struct atomic_tasks_bitset
be_t<v128> ready; // 0x10 {
be_t<v128> pending_ready; // 0x20 atomic_be_t<u32> values[4];
be_t<v128> enabled; // 0x30
be_t<v128> signalled; // 0x40 u32 get_bit(u32 bit) const
be_t<v128> waiting; // 0x50 {
return values[bit / 32] & ((1u << 31) >> (bit % 32));
}
};
atomic_tasks_bitset running; // 0x00
atomic_tasks_bitset ready; // 0x10
atomic_tasks_bitset pending_ready; // 0x20
atomic_tasks_bitset enabled; // 0x30
atomic_tasks_bitset signalled; // 0x40
atomic_tasks_bitset waiting; // 0x50
vm::bptr<CellSpurs, u64> spurs; // 0x60 vm::bptr<CellSpurs, u64> spurs; // 0x60
be_t<u64> args; // 0x68 be_t<u64> args; // 0x68
u8 enable_clear_ls; // 0x70 u8 enable_clear_ls; // 0x70

View file

@ -2,6 +2,7 @@
#include "Loader/ELF.h" #include "Loader/ELF.h"
#include "Emu/Cell/PPUModule.h" #include "Emu/Cell/PPUModule.h"
#include "Emu/Memory/vm_reservation.h"
#include "Emu/Cell/SPUThread.h" #include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/SPURecompiler.h" #include "Emu/Cell/SPURecompiler.h"
#include "Emu/Cell/lv2/sys_lwmutex.h" #include "Emu/Cell/lv2/sys_lwmutex.h"
@ -120,24 +121,29 @@ void cellSpursModuleExit(spu_thread& spu)
} }
// Execute a DMA operation // Execute a DMA operation
bool spursDma(spu_thread& spu, u32 cmd, u64 ea, u32 lsa, u32 size, u32 tag) bool spursDma(spu_thread& spu, const spu_mfc_cmd& args)
{ {
spu.set_ch_value(MFC_LSA, lsa); spu.ch_mfc_cmd = args;
spu.set_ch_value(MFC_EAH, static_cast<u32>(ea >> 32));
spu.set_ch_value(MFC_EAL, static_cast<u32>(ea));
spu.set_ch_value(MFC_Size, size);
spu.set_ch_value(MFC_TagID, tag);
spu.set_ch_value(MFC_Cmd, cmd);
if (cmd == MFC_GETLLAR_CMD || cmd == MFC_PUTLLC_CMD || cmd == MFC_PUTLLUC_CMD) if (!spu.process_mfc_cmd())
{ {
const u32 rv = static_cast<u32>(spu.get_ch_value(MFC_RdAtomicStat)); spu_runtime::g_escape(&spu);
return cmd == MFC_PUTLLC_CMD ? !rv : true; }
if (args.cmd == MFC_GETLLAR_CMD || args.cmd == MFC_PUTLLC_CMD || args.cmd == MFC_PUTLLUC_CMD)
{
return static_cast<u32>(spu.get_ch_value(MFC_RdAtomicStat)) != MFC_PUTLLC_FAILURE;
} }
return true; return true;
} }
// Execute a DMA operation
bool spursDma(spu_thread& spu, u32 cmd, u64 ea, u32 lsa, u32 size, u32 tag)
{
return spursDma(spu, {MFC(cmd), static_cast<u8>(tag & 0x1f), static_cast<u16>(size & 0x7fff), lsa, static_cast<u32>(ea), static_cast<u32>(ea >> 32)});
}
// Get the status of DMA operations // Get the status of DMA operations
u32 spursDmaGetCompletionStatus(spu_thread& spu, u32 tagMask) u32 spursDmaGetCompletionStatus(spu_thread& spu, u32 tagMask)
{ {
@ -1402,23 +1408,31 @@ s32 spursTasksetProcessRequest(spu_thread& spu, s32 request, u32* taskId, u32* i
auto ctxt = spu._ptr<SpursTasksetContext>(0x2700); auto ctxt = spu._ptr<SpursTasksetContext>(0x2700);
s32 rc = CELL_OK; s32 rc = CELL_OK;
s32 numNewlyReadyTasks; s32 numNewlyReadyTasks = 0;
//vm::reservation_op(vm::cast(ctxt->taskset.addr(), HERE), 128, [&]() //vm::reservation_op(vm::cast(ctxt->taskset.addr(), HERE), 128, [&]()
{ {
auto taskset = ctxt->taskset.get_ptr(); auto taskset = ctxt->taskset;
v128 waiting = vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::waiting));
v128 running = vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::running));
v128 ready = vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::ready));
v128 pready = vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::pending_ready));
v128 enabled = vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::enabled));
v128 signalled = vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::signalled));
// Verify taskset state is valid // Verify taskset state is valid
be_t<v128> _0(v128::from32(0)); if ((waiting & running) != v128{} || (ready & pready) != v128{} ||
if ((taskset->waiting & taskset->running) != _0 || (taskset->ready & taskset->pending_ready) != _0 || (v128::andnot(enabled, running | ready | pready | signalled | waiting) != v128{}))
((taskset->running | taskset->ready | taskset->pending_ready | taskset->signalled | taskset->waiting) & ~taskset->enabled) != _0)
{ {
spu_log.error("Invalid taskset state"); spu_log.error("Invalid taskset state");
spursHalt(spu); spursHalt(spu);
} }
// Find the number of tasks that have become ready since the last iteration // Find the number of tasks that have become ready since the last iteration
auto newlyReadyTasks = (taskset->signalled | taskset->pending_ready) & ~taskset->ready.value(); {
numNewlyReadyTasks = 0; auto newlyReadyTasks = v128::andnot(ready, signalled | pready);
// TODO: Optimize this shit with std::popcount when it's known to be fixed
for (auto i = 0; i < 128; i++) for (auto i = 0; i < 128; i++)
{ {
if (newlyReadyTasks._bit[i]) if (newlyReadyTasks._bit[i])
@ -1426,52 +1440,61 @@ s32 spursTasksetProcessRequest(spu_thread& spu, s32 request, u32* taskId, u32* i
numNewlyReadyTasks++; numNewlyReadyTasks++;
} }
} }
}
v128 readyButNotRunning; v128 readyButNotRunning;
u8 selectedTaskId; u8 selectedTaskId;
v128 running = taskset->running.value(); v128 signalled0 = (signalled & (ready | pready));
v128 waiting = taskset->waiting.value(); v128 ready0 = (signalled | ready | pready);
v128 enabled = taskset->enabled.value();
v128 signalled = (taskset->signalled & (taskset->ready | taskset->pending_ready));
v128 ready = (taskset->signalled | taskset->ready | taskset->pending_ready);
switch (request) switch (request)
{ {
case SPURS_TASKSET_REQUEST_POLL_SIGNAL: case SPURS_TASKSET_REQUEST_POLL_SIGNAL:
rc = signalled._bit[ctxt->taskId] ? 1 : 0; {
signalled._bit[ctxt->taskId] = false; rc = signalled0._bit[ctxt->taskId] ? 1 : 0;
signalled0._bit[ctxt->taskId] = false;
break; break;
}
case SPURS_TASKSET_REQUEST_DESTROY_TASK: case SPURS_TASKSET_REQUEST_DESTROY_TASK:
{
numNewlyReadyTasks--; numNewlyReadyTasks--;
running._bit[ctxt->taskId] = false; running._bit[ctxt->taskId] = false;
enabled._bit[ctxt->taskId] = false; enabled._bit[ctxt->taskId] = false;
signalled._bit[ctxt->taskId] = false; signalled0._bit[ctxt->taskId] = false;
ready._bit[ctxt->taskId] = false; ready0._bit[ctxt->taskId] = false;
break; break;
}
case SPURS_TASKSET_REQUEST_YIELD_TASK: case SPURS_TASKSET_REQUEST_YIELD_TASK:
{
running._bit[ctxt->taskId] = false; running._bit[ctxt->taskId] = false;
waiting._bit[ctxt->taskId] = true; waiting._bit[ctxt->taskId] = true;
break; break;
}
case SPURS_TASKSET_REQUEST_WAIT_SIGNAL: case SPURS_TASKSET_REQUEST_WAIT_SIGNAL:
if (signalled._bit[ctxt->taskId] == false) {
if (signalled0._bit[ctxt->taskId] == false)
{ {
numNewlyReadyTasks--; numNewlyReadyTasks--;
running._bit[ctxt->taskId] = false; running._bit[ctxt->taskId] = false;
waiting._bit[ctxt->taskId] = true; waiting._bit[ctxt->taskId] = true;
signalled._bit[ctxt->taskId] = false; signalled0._bit[ctxt->taskId] = false;
ready._bit[ctxt->taskId] = false; ready0._bit[ctxt->taskId] = false;
} }
break; break;
}
case SPURS_TASKSET_REQUEST_POLL: case SPURS_TASKSET_REQUEST_POLL:
readyButNotRunning = ready & ~running; {
readyButNotRunning = v128::andnot(running, ready0);
if (taskset->wkl_flag_wait_task < CELL_SPURS_MAX_TASK) if (taskset->wkl_flag_wait_task < CELL_SPURS_MAX_TASK)
{ {
readyButNotRunning = readyButNotRunning & ~(v128::fromBit(taskset->wkl_flag_wait_task)); readyButNotRunning._bit[taskset->wkl_flag_wait_task] = false;
} }
rc = readyButNotRunning != _0 ? 1 : 0; rc = readyButNotRunning != v128{} ? 1 : 0;
break; break;
}
case SPURS_TASKSET_REQUEST_WAIT_WKL_FLAG: case SPURS_TASKSET_REQUEST_WAIT_WKL_FLAG:
{
if (taskset->wkl_flag_wait_task == 0x81) if (taskset->wkl_flag_wait_task == 0x81)
{ {
// A workload flag is already pending so consume it // A workload flag is already pending so consume it
@ -1493,11 +1516,13 @@ s32 spursTasksetProcessRequest(spu_thread& spu, s32 request, u32* taskId, u32* i
rc = CELL_SPURS_TASK_ERROR_BUSY; rc = CELL_SPURS_TASK_ERROR_BUSY;
} }
break; break;
}
case SPURS_TASKSET_REQUEST_SELECT_TASK: case SPURS_TASKSET_REQUEST_SELECT_TASK:
readyButNotRunning = ready & ~running; {
readyButNotRunning = v128::andnot(running, ready0);
if (taskset->wkl_flag_wait_task < CELL_SPURS_MAX_TASK) if (taskset->wkl_flag_wait_task < CELL_SPURS_MAX_TASK)
{ {
readyButNotRunning = readyButNotRunning & ~(v128::fromBit(taskset->wkl_flag_wait_task)); readyButNotRunning._bit[taskset->wkl_flag_wait_task] = false;
} }
// Select a task from the readyButNotRunning set to run. Start from the task after the last scheduled task to ensure fairness. // Select a task from the readyButNotRunning set to run. Start from the task after the last scheduled task to ensure fairness.
@ -1534,7 +1559,9 @@ s32 spursTasksetProcessRequest(spu_thread& spu, s32 request, u32* taskId, u32* i
waiting._bit[selectedTaskId] = false; waiting._bit[selectedTaskId] = false;
} }
break; break;
}
case SPURS_TASKSET_REQUEST_RECV_WKL_FLAG: case SPURS_TASKSET_REQUEST_RECV_WKL_FLAG:
{
if (taskset->wkl_flag_wait_task < CELL_SPURS_MAX_TASK) if (taskset->wkl_flag_wait_task < CELL_SPURS_MAX_TASK)
{ {
// There is a task waiting for the workload flag // There is a task waiting for the workload flag
@ -1549,41 +1576,36 @@ s32 spursTasksetProcessRequest(spu_thread& spu, s32 request, u32* taskId, u32* i
rc = 0; rc = 0;
} }
break; break;
}
default: default:
spu_log.error("Unknown taskset request"); spu_log.error("Unknown taskset request");
spursHalt(spu); spursHalt(spu);
} }
taskset->pending_ready = _0; vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::waiting)) = waiting;
taskset->running = running; vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::running)) = running;
taskset->waiting = waiting; vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::ready)) = ready;
taskset->enabled = enabled; vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::pending_ready)) = v128{};
taskset->signalled = signalled; vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::enabled)) = enabled;
taskset->ready = ready; vm::_ref<v128>(ctxt->taskset.addr() + ::offset32(&CellSpursTaskset::signalled)) = signalled;
std::memcpy(spu._ptr<void>(0x2700), taskset, 128); std::memcpy(spu._ptr<void>(0x2700), spu._ptr<void>(0x100), 128); // Copy data
}//); }//);
// Increment the ready count of the workload by the number of tasks that have become ready // Increment the ready count of the workload by the number of tasks that have become ready
//vm::reservation_op(vm::cast(kernelCtxt->spurs.addr(), HERE), 128, [&]() if (numNewlyReadyTasks)
{ {
auto spurs = kernelCtxt->spurs.get_ptr(); auto spurs = kernelCtxt->spurs;
s32 readyCount = kernelCtxt->wklCurrentId < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[kernelCtxt->wklCurrentId].load() : spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F].load(); auto [res, rtime] = vm::reservation_lock(spurs.addr(), 128, vm::dma_lockb);
readyCount += numNewlyReadyTasks; spurs->readyCount(kernelCtxt->wklCurrentId).fetch_op([&](u8& val)
readyCount = readyCount < 0 ? 0 : readyCount > 0xFF ? 0xFF : readyCount;
if (kernelCtxt->wklCurrentId < CELL_SPURS_MAX_WORKLOAD)
{ {
spurs->wklReadyCount1[kernelCtxt->wklCurrentId] = readyCount; const s32 _new = val + numNewlyReadyTasks;
val = static_cast<u8>(std::clamp<s32>(_new, 0, 0xFF));
});
res.release(rtime + 128);
} }
else
{
spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F] = readyCount;
}
std::memcpy(spu._ptr<void>(0x100), spurs, 128);
}//);
return rc; return rc;
} }