SPU syscall improvements

This commit is contained in:
Nekotekina 2015-03-05 00:51:14 +03:00
parent 30fe3dc6f5
commit 9e49a33b3c
27 changed files with 692 additions and 705 deletions

View file

@ -2,7 +2,9 @@
#include "Log.h" #include "Log.h"
#include "rpcs3/Ini.h" #include "rpcs3/Ini.h"
#include "Emu/System.h" #include "Emu/System.h"
#include "Emu/CPU/CPUThreadManager.h"
#include "Emu/CPU/CPUThread.h" #include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/RawSPUThread.h"
#include "Emu/SysCalls/SysCalls.h" #include "Emu/SysCalls/SysCalls.h"
#include "Thread.h" #include "Thread.h"
@ -105,8 +107,8 @@ enum x64_reg_t : u32
enum x64_op_t : u32 enum x64_op_t : u32
{ {
X64OP_NONE, X64OP_NONE,
X64OP_LOAD, // obtain and put the value into x64 register (from Memory.ReadMMIO32, for example) X64OP_LOAD, // obtain and put the value into x64 register
X64OP_STORE, // take the value from x64 register or an immediate and use it (pass in Memory.WriteMMIO32, for example) X64OP_STORE, // take the value from x64 register or an immediate and use it
// example: add eax,[rax] -> X64OP_LOAD_ADD (add the value to x64 register) // example: add eax,[rax] -> X64OP_LOAD_ADD (add the value to x64 register)
// example: add [rax],eax -> X64OP_LOAD_ADD_STORE (this will probably never happen for MMIO registers) // example: add [rax],eax -> X64OP_LOAD_ADD_STORE (this will probably never happen for MMIO registers)
@ -768,18 +770,27 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
// check if address is RawSPU MMIO register // check if address is RawSPU MMIO register
if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET) if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET)
{ {
auto t = Emu.GetCPU().GetRawSPUThread((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET);
if (!t)
{
return false;
}
if (a_size != 4 || !d_size || !i_size) if (a_size != 4 || !d_size || !i_size)
{ {
LOG_ERROR(MEMORY, "Invalid or unsupported instruction (op=%d, reg=%d, d_size=%lld, a_size=0x%llx, i_size=%lld)", op, reg, d_size, a_size, i_size); LOG_ERROR(MEMORY, "Invalid or unsupported instruction (op=%d, reg=%d, d_size=%lld, a_size=0x%llx, i_size=%lld)", op, reg, d_size, a_size, i_size);
return false; return false;
} }
auto& spu = static_cast<RawSPUThread&>(*t);
switch (op) switch (op)
{ {
case X64OP_LOAD: case X64OP_LOAD:
{ {
u32 value; u32 value;
if (is_writing || !Memory.ReadMMIO32(addr, value) || !put_x64_reg_value(context, reg, d_size, re32(value))) if (is_writing || !spu.ReadReg(addr, value) || !put_x64_reg_value(context, reg, d_size, re32(value)))
{ {
return false; return false;
} }
@ -789,7 +800,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
case X64OP_STORE: case X64OP_STORE:
{ {
u64 reg_value; u64 reg_value;
if (!is_writing || !get_x64_reg_value(context, reg, d_size, i_size, reg_value) || !Memory.WriteMMIO32(addr, re32((u32)reg_value))) if (!is_writing || !get_x64_reg_value(context, reg, d_size, i_size, reg_value) || !spu.WriteReg(addr, re32((u32)reg_value)))
{ {
return false; return false;
} }

View file

@ -232,7 +232,7 @@ void ARMv7Thread::FastStop()
armv7_thread::armv7_thread(u32 entry, const std::string& name, u32 stack_size, s32 prio) armv7_thread::armv7_thread(u32 entry, const std::string& name, u32 stack_size, s32 prio)
{ {
thread = &Emu.GetCPU().AddThread(CPU_THREAD_ARMv7); thread = Emu.GetCPU().AddThread(CPU_THREAD_ARMv7);
thread->SetName(name); thread->SetName(name);
thread->SetEntry(entry); thread->SetEntry(entry);
@ -277,11 +277,13 @@ cpu_thread& armv7_thread::args(std::initializer_list<std::string> values)
cpu_thread& armv7_thread::run() cpu_thread& armv7_thread::run()
{ {
thread->Run(); auto& armv7 = static_cast<ARMv7Thread&>(*thread);
armv7.Run();
// set arguments // set arguments
static_cast<ARMv7Thread*>(thread)->context.GPR[0] = argc; armv7.context.GPR[0] = argc;
static_cast<ARMv7Thread*>(thread)->context.GPR[1] = argv; armv7.context.GPR[1] = argv;
return *this; return *this;
} }

View file

@ -47,18 +47,17 @@ s32 sceKernelCreateThread(
sceLibKernel.Warning("sceKernelCreateThread(pName=0x%x, entry=0x%x, initPriority=%d, stackSize=0x%x, attr=0x%x, cpuAffinityMask=0x%x, pOptParam=0x%x)", sceLibKernel.Warning("sceKernelCreateThread(pName=0x%x, entry=0x%x, initPriority=%d, stackSize=0x%x, attr=0x%x, cpuAffinityMask=0x%x, pOptParam=0x%x)",
pName, entry, initPriority, stackSize, attr, cpuAffinityMask, pOptParam); pName, entry, initPriority, stackSize, attr, cpuAffinityMask, pOptParam);
ARMv7Thread& new_thread = static_cast<ARMv7Thread&>(Emu.GetCPU().AddThread(CPU_THREAD_ARMv7)); auto t = Emu.GetCPU().AddThread(CPU_THREAD_ARMv7);
const auto id = new_thread.GetId(); auto& armv7 = static_cast<ARMv7Thread&>(*t);
new_thread.SetEntry(entry.addr());
new_thread.SetPrio(initPriority);
new_thread.SetStackSize(stackSize);
new_thread.SetName(pName.get_ptr());
sceLibKernel.Warning("*** New ARMv7 Thread [%s] (entry=0x%x): id -> 0x%x", pName.get_ptr(), entry, id); armv7.SetEntry(entry.addr());
armv7.SetPrio(initPriority);
armv7.SetStackSize(stackSize);
armv7.SetName(pName.get_ptr());
armv7.Run();
new_thread.Run(); return armv7.GetId();
return id;
} }
s32 sceKernelStartThread(s32 threadId, u32 argSize, vm::psv::ptr<const void> pArgBlock) s32 sceKernelStartThread(s32 threadId, u32 argSize, vm::psv::ptr<const void> pArgBlock)

View file

@ -112,6 +112,7 @@ public:
u32 entry; u32 entry;
u32 PC; u32 PC;
u32 nPC; u32 nPC;
u32 index;
u32 offset; u32 offset;
bool m_is_branch; bool m_is_branch;
bool m_trace_enabled; bool m_trace_enabled;
@ -223,7 +224,7 @@ CPUThread* GetCurrentCPUThread();
class cpu_thread class cpu_thread
{ {
protected: protected:
CPUThread* thread; std::shared_ptr<CPUThread> thread;
public: public:
u32 get_entry() const u32 get_entry() const

View file

@ -24,9 +24,9 @@ void CPUThreadManager::Close()
while(m_threads.size()) RemoveThread(m_threads[0]->GetId()); while(m_threads.size()) RemoveThread(m_threads[0]->GetId());
} }
CPUThread& CPUThreadManager::AddThread(CPUThreadType type) std::shared_ptr<CPUThread> CPUThreadManager::AddThread(CPUThreadType type)
{ {
std::lock_guard<std::mutex> lock(m_mtx_thread); std::lock_guard<std::mutex> lock(m_mutex);
std::shared_ptr<CPUThread> new_thread; std::shared_ptr<CPUThread> new_thread;
@ -43,8 +43,18 @@ CPUThread& CPUThreadManager::AddThread(CPUThreadType type)
break; break;
} }
case CPU_THREAD_RAW_SPU: case CPU_THREAD_RAW_SPU:
{
for (u32 i = 0; i < m_raw_spu.size(); i++)
{
if (!m_raw_spu[i])
{ {
new_thread.reset(new RawSPUThread()); new_thread.reset(new RawSPUThread());
new_thread->index = i;
m_raw_spu[i] = new_thread;
break;
}
}
break; break;
} }
case CPU_THREAD_ARMv7: case CPU_THREAD_ARMv7:
@ -55,17 +65,20 @@ CPUThread& CPUThreadManager::AddThread(CPUThreadType type)
default: assert(0); default: assert(0);
} }
if (new_thread)
{
new_thread->SetId(Emu.GetIdManager().GetNewID(new_thread->GetTypeString() + " Thread", new_thread)); new_thread->SetId(Emu.GetIdManager().GetNewID(new_thread->GetTypeString() + " Thread", new_thread));
m_threads.push_back(new_thread); m_threads.push_back(new_thread);
SendDbgCommand(DID_CREATE_THREAD, new_thread.get()); SendDbgCommand(DID_CREATE_THREAD, new_thread.get());
return *new_thread;
} }
void CPUThreadManager::RemoveThread(const u32 id) return new_thread;
}
void CPUThreadManager::RemoveThread(u32 id)
{ {
std::lock_guard<std::mutex> lock(m_mtx_thread); std::lock_guard<std::mutex> lock(m_mutex);
std::shared_ptr<CPUThread> thr; std::shared_ptr<CPUThread> thr;
u32 thread_index = 0; u32 thread_index = 0;
@ -84,6 +97,12 @@ void CPUThreadManager::RemoveThread(const u32 id)
thr->Close(); thr->Close();
m_threads.erase(m_threads.begin() + thread_index); m_threads.erase(m_threads.begin() + thread_index);
if (thr->GetType() == CPU_THREAD_RAW_SPU)
{
assert(thr->index < m_raw_spu.size());
m_raw_spu[thr->index] = nullptr;
}
} }
// Removing the ID should trigger the actual deletion of the thread // Removing the ID should trigger the actual deletion of the thread
@ -91,21 +110,6 @@ void CPUThreadManager::RemoveThread(const u32 id)
Emu.CheckStatus(); Emu.CheckStatus();
} }
s32 CPUThreadManager::GetThreadNumById(CPUThreadType type, u32 id)
{
std::lock_guard<std::mutex> lock(m_mtx_thread);
s32 num = 0;
for(u32 i=0; i<m_threads.size(); ++i)
{
if(m_threads[i]->GetId() == id) return num;
if(m_threads[i]->GetType() == type) num++;
}
return -1;
}
std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id) std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id)
{ {
std::shared_ptr<CPUThread> res; std::shared_ptr<CPUThread> res;
@ -130,21 +134,19 @@ std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id, CPUThreadType typ
return res; return res;
} }
std::shared_ptr<CPUThread> CPUThreadManager::GetRawSPUThread(u32 num) std::shared_ptr<CPUThread> CPUThreadManager::GetRawSPUThread(u32 index)
{ {
if (num < sizeof(Memory.RawSPUMem) / sizeof(Memory.RawSPUMem[0])) if (index >= m_raw_spu.size())
{
return GetThread(((RawSPUThread*)Memory.RawSPUMem[num])->GetId());
}
else
{ {
return nullptr; return nullptr;
} }
return m_raw_spu[index];
} }
void CPUThreadManager::Exec() void CPUThreadManager::Exec()
{ {
std::lock_guard<std::mutex> lock(m_mtx_thread); std::lock_guard<std::mutex> lock(m_mutex);
for(u32 i = 0; i < m_threads.size(); ++i) for(u32 i = 0; i < m_threads.size(); ++i)
{ {

View file

@ -6,8 +6,10 @@ enum CPUThreadType : unsigned char;
class CPUThreadManager class CPUThreadManager
{ {
std::mutex m_mutex;
std::vector<std::shared_ptr<CPUThread>> m_threads; std::vector<std::shared_ptr<CPUThread>> m_threads;
std::mutex m_mtx_thread; std::array<std::shared_ptr<CPUThread>, 5> m_raw_spu;
public: public:
CPUThreadManager(); CPUThreadManager();
@ -15,14 +17,15 @@ public:
void Close(); void Close();
CPUThread& AddThread(CPUThreadType type); std::shared_ptr<CPUThread> AddThread(CPUThreadType type);
void RemoveThread(const u32 id);
void RemoveThread(u32 id);
std::vector<std::shared_ptr<CPUThread>> GetThreads() { std::lock_guard<std::mutex> lock(m_mutex); return m_threads; }
std::vector<std::shared_ptr<CPUThread>> GetThreads() { std::lock_guard<std::mutex> lock(m_mtx_thread); return m_threads; }
s32 GetThreadNumById(CPUThreadType type, u32 id);
std::shared_ptr<CPUThread> GetThread(u32 id); std::shared_ptr<CPUThread> GetThread(u32 id);
std::shared_ptr<CPUThread> GetThread(u32 id, CPUThreadType type); std::shared_ptr<CPUThread> GetThread(u32 id, CPUThreadType type);
std::shared_ptr<CPUThread> GetRawSPUThread(u32 num); std::shared_ptr<CPUThread> GetRawSPUThread(u32 index);
void Exec(); void Exec();
void Task(); void Task();

View file

@ -228,7 +228,7 @@ void PPUThread::Task()
ppu_thread::ppu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio) ppu_thread::ppu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio)
{ {
thread = &Emu.GetCPU().AddThread(CPU_THREAD_PPU); thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
thread->SetName(name); thread->SetName(name);
thread->SetEntry(entry); thread->SetEntry(entry);
@ -277,7 +277,7 @@ ppu_thread& ppu_thread::gpr(uint index, u64 value)
{ {
assert(index < 32); assert(index < 32);
static_cast<PPUThread*>(thread)->GPR[index] = value; static_cast<PPUThread&>(*thread).GPR[index] = value;
return *this; return *this;
} }

View file

@ -10,15 +10,11 @@ thread_local spu_mfc_arg_t raw_spu_mfc[8] = {};
RawSPUThread::RawSPUThread(CPUThreadType type) RawSPUThread::RawSPUThread(CPUThreadType type)
: SPUThread(type) : SPUThread(type)
, MemoryBlock()
{ {
m_index = Memory.InitRawSPU(this);
Reset();
} }
RawSPUThread::~RawSPUThread() RawSPUThread::~RawSPUThread()
{ {
Memory.CloseRawSPU(this, m_index);
} }
void RawSPUThread::start() void RawSPUThread::start()
@ -29,54 +25,54 @@ void RawSPUThread::start()
// (probably because Exec() creates new thread, faults of this thread aren't handled by this handler anymore) // (probably because Exec() creates new thread, faults of this thread aren't handled by this handler anymore)
Emu.GetCallbackManager().Async([this](PPUThread& PPU) Emu.GetCallbackManager().Async([this](PPUThread& PPU)
{ {
Exec(); FastRun();
}); });
} }
bool RawSPUThread::Read32(const u32 addr, u32* value) bool RawSPUThread::ReadReg(const u32 addr, u32& value)
{ {
const u32 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET; const u32 offset = addr - RAW_SPU_BASE_ADDR - index * RAW_SPU_OFFSET - RAW_SPU_PROB_OFFSET;
switch (offset) switch (offset)
{ {
case MFC_CMDStatus_offs: case MFC_CMDStatus_offs:
{ {
*value = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL; value = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL;
return true; return true;
} }
case MFC_QStatus_offs: case MFC_QStatus_offs:
{ {
*value = MFC_PROXY_COMMAND_QUEUE_EMPTY_FLAG | MFC_PPU_MAX_QUEUE_SPACE; value = MFC_PROXY_COMMAND_QUEUE_EMPTY_FLAG | MFC_PPU_MAX_QUEUE_SPACE;
return true; return true;
} }
case SPU_Out_MBox_offs: case SPU_Out_MBox_offs:
{ {
*value = ch_out_mbox.pop_uncond(); value = ch_out_mbox.pop_uncond();
return true; return true;
} }
case SPU_MBox_Status_offs: case SPU_MBox_Status_offs:
{ {
*value = (ch_out_mbox.get_count() & 0xff) | ((4 - ch_in_mbox.get_count()) << 8 & 0xff) | (ch_out_intr_mbox.get_count() << 16 & 0xff); value = (ch_out_mbox.get_count() & 0xff) | ((4 - ch_in_mbox.get_count()) << 8 & 0xff) | (ch_out_intr_mbox.get_count() << 16 & 0xff);
return true; return true;
} }
case SPU_Status_offs: case SPU_Status_offs:
{ {
*value = status.read_relaxed(); value = status.read_relaxed();
return true; return true;
} }
} }
LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Read32(): unknown/illegal offset (0x%x)", m_index, offset); LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Read32(0x%x): unknown/illegal offset (0x%x)", index, addr, offset);
return false; return false;
} }
bool RawSPUThread::Write32(const u32 addr, const u32 value) bool RawSPUThread::WriteReg(const u32 addr, const u32 value)
{ {
const u32 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET; const u32 offset = addr - RAW_SPU_BASE_ADDR - index * RAW_SPU_OFFSET - RAW_SPU_PROB_OFFSET;
switch (offset) switch (offset)
{ {
@ -87,19 +83,19 @@ bool RawSPUThread::Write32(const u32 addr, const u32 value)
break; break;
} }
raw_spu_mfc[m_index].lsa = value; raw_spu_mfc[index].lsa = value;
return true; return true;
} }
case MFC_EAH_offs: case MFC_EAH_offs:
{ {
raw_spu_mfc[m_index].eah = value; raw_spu_mfc[index].eah = value;
return true; return true;
} }
case MFC_EAL_offs: case MFC_EAL_offs:
{ {
raw_spu_mfc[m_index].eal = value; raw_spu_mfc[index].eal = value;
return true; return true;
} }
@ -110,14 +106,14 @@ bool RawSPUThread::Write32(const u32 addr, const u32 value)
break; break;
} }
raw_spu_mfc[m_index].size_tag = value; raw_spu_mfc[index].size_tag = value;
return true; return true;
} }
case MFC_Class_CMD_offs: case MFC_Class_CMD_offs:
{ {
do_dma_transfer(value & ~MFC_START_MASK, raw_spu_mfc[m_index]); do_dma_transfer(value & ~MFC_START_MASK, raw_spu_mfc[index]);
raw_spu_mfc[m_index] = {}; // clear non-persistent data raw_spu_mfc[index] = {}; // clear non-persistent data
if (value & MFC_START_MASK) if (value & MFC_START_MASK)
{ {
@ -167,7 +163,7 @@ bool RawSPUThread::Write32(const u32 addr, const u32 value)
else if (value == SPU_RUNCNTL_STOP_REQUEST) else if (value == SPU_RUNCNTL_STOP_REQUEST)
{ {
status &= ~SPU_STATUS_RUNNING; status &= ~SPU_STATUS_RUNNING;
Stop(); FastStop();
} }
else else
{ {
@ -180,8 +176,7 @@ bool RawSPUThread::Write32(const u32 addr, const u32 value)
case SPU_NPC_offs: case SPU_NPC_offs:
{ {
// check if interrupts are enabled if ((value & 2) || value >= 0x40000)
if ((value & 3) != 1 || value >= 0x40000)
{ {
break; break;
} }
@ -203,21 +198,10 @@ bool RawSPUThread::Write32(const u32 addr, const u32 value)
} }
} }
LOG_ERROR(SPU, "RawSPUThread[%d]: Write32(value=0x%x): unknown/illegal offset (0x%x)", m_index, value, offset); LOG_ERROR(SPU, "RawSPUThread[%d]: Write32(0x%x, value=0x%x): unknown/illegal offset (0x%x)", index, addr, value, offset);
return false; return false;
} }
void RawSPUThread::InitRegs()
{
offset = GetStartAddr() + RAW_SPU_LS_OFFSET;
SPUThread::InitRegs();
}
u32 RawSPUThread::GetIndex() const
{
return m_index;
}
void RawSPUThread::Task() void RawSPUThread::Task()
{ {
PC = npc.exchange(0) & ~3; PC = npc.exchange(0) & ~3;

View file

@ -6,24 +6,16 @@ __forceinline static u32 GetRawSPURegAddrByNum(int num, int offset)
return RAW_SPU_OFFSET * num + RAW_SPU_BASE_ADDR + RAW_SPU_PROB_OFFSET + offset; return RAW_SPU_OFFSET * num + RAW_SPU_BASE_ADDR + RAW_SPU_PROB_OFFSET + offset;
} }
class RawSPUThread class RawSPUThread : public SPUThread
: public SPUThread
, public MemoryBlock
{ {
u32 m_index;
public: public:
RawSPUThread(CPUThreadType type = CPU_THREAD_RAW_SPU); RawSPUThread(CPUThreadType type = CPU_THREAD_RAW_SPU);
virtual ~RawSPUThread(); virtual ~RawSPUThread();
void start(); void start();
bool Read32(const u32 addr, u32* value); bool ReadReg(const u32 addr, u32& value);
bool Write32(const u32 addr, const u32 value); bool WriteReg(const u32 addr, const u32 value);
public:
virtual void InitRegs();
u32 GetIndex() const;
private: private:
virtual void Task(); virtual void Task();

View file

@ -67,6 +67,7 @@ void SPUThread::Task()
void SPUThread::DoReset() void SPUThread::DoReset()
{ {
InitRegs();
} }
void SPUThread::InitRegs() void SPUThread::InitRegs()
@ -178,6 +179,12 @@ void SPUThread::FastStop()
m_status = Stopped; m_status = Stopped;
} }
void SPUThread::FastRun()
{
m_status = Running;
Exec();
}
void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args) void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
{ {
if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK)) if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK))
@ -187,17 +194,17 @@ void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
u32 eal = vm::cast(args.ea, "ea"); u32 eal = vm::cast(args.ea, "ea");
if (eal >= SYS_SPU_THREAD_BASE_LOW && tg_id && m_type == CPU_THREAD_SPU) // SPU Thread Group MMIO (LS and SNR) if (eal >= SYS_SPU_THREAD_BASE_LOW && m_type == CPU_THREAD_SPU) // SPU Thread Group MMIO (LS and SNR)
{ {
const u32 num = (eal & SYS_SPU_THREAD_BASE_MASK) / SYS_SPU_THREAD_OFFSET; // thread number in group const u32 index = (eal - SYS_SPU_THREAD_BASE_LOW) / SYS_SPU_THREAD_OFFSET; // thread number in group
const u32 offset = (eal & SYS_SPU_THREAD_BASE_MASK) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register const u32 offset = (eal - SYS_SPU_THREAD_BASE_LOW) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register
std::shared_ptr<spu_group_t> group = tg.lock();
std::shared_ptr<CPUThread> t; std::shared_ptr<CPUThread> t;
std::shared_ptr<SpuGroupInfo> tg;
if (Emu.GetIdManager().GetIDData(tg_id, tg) && num < tg->list.size() && tg->list[num] && (t = Emu.GetCPU().GetThread(tg->list[num])) && t->GetType() == CPU_THREAD_SPU) if (group && index < group->num && (t = group->threads[index]))
{ {
SPUThread& spu = static_cast<SPUThread&>(*t); auto& spu = static_cast<SPUThread&>(*t);
if (offset + args.size - 1 < 0x40000) // LS access if (offset + args.size - 1 < 0x40000) // LS access
{ {
@ -903,7 +910,7 @@ void SPUThread::stop_and_signal(u32 code)
status &= ~SPU_STATUS_RUNNING; status &= ~SPU_STATUS_RUNNING;
}); });
Stop(); FastStop();
int2.set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT); int2.set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT);
return; return;
@ -1013,29 +1020,34 @@ void SPUThread::stop_and_signal(u32 code)
throw __FUNCTION__; throw __FUNCTION__;
} }
std::shared_ptr<SpuGroupInfo> tg;
if (!Emu.GetIdManager().GetIDData(tg_id, tg))
{
LOG_ERROR(SPU, "sys_spu_thread_group_exit(status=0x%x): invalid group (%d)", value, tg_id);
throw __FUNCTION__;
}
if (Ini.HLELogging.GetValue()) if (Ini.HLELogging.GetValue())
{ {
LOG_NOTICE(SPU, "sys_spu_thread_group_exit(status=0x%x)", value); LOG_NOTICE(SPU, "sys_spu_thread_group_exit(status=0x%x)", value);
} }
tg->m_group_exit = true; LV2_LOCK;
tg->m_exit_status = value;
for (auto& v : tg->list) std::shared_ptr<spu_group_t> group = tg.lock();
if (group)
{ {
if (std::shared_ptr<CPUThread> t = Emu.GetCPU().GetThread(v)) LOG_ERROR(SPU, "sys_spu_thread_group_exit(status=0x%x): invalid group", value);
throw __FUNCTION__;
}
for (auto t : group->threads)
{ {
t->Stop(); if (t)
{
auto& spu = static_cast<SPUThread&>(*t);
spu.FastStop();
} }
} }
group->state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
group->exit_status = value;
group->join_state |= STGJSF_GROUP_EXIT;
group->join_cv.notify_one();
return; return;
} }
@ -1054,8 +1066,10 @@ void SPUThread::stop_and_signal(u32 code)
LOG_NOTICE(SPU, "sys_spu_thread_exit(status=0x%x)", ch_out_mbox.get_value()); LOG_NOTICE(SPU, "sys_spu_thread_exit(status=0x%x)", ch_out_mbox.get_value());
} }
LV2_LOCK;
status |= SPU_STATUS_STOPPED_BY_STOP; status |= SPU_STATUS_STOPPED_BY_STOP;
Stop(); FastStop();
return; return;
} }
} }
@ -1087,7 +1101,7 @@ void SPUThread::halt()
status &= ~SPU_STATUS_RUNNING; status &= ~SPU_STATUS_RUNNING;
}); });
Stop(); FastStop();
int2.set(SPU_INT2_STAT_SPU_HALT_OR_STEP_INT); int2.set(SPU_INT2_STAT_SPU_HALT_OR_STEP_INT);
return; return;
@ -1099,7 +1113,7 @@ void SPUThread::halt()
spu_thread::spu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio) spu_thread::spu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio)
{ {
thread = &Emu.GetCPU().AddThread(CPU_THREAD_SPU); thread = Emu.GetCPU().AddThread(CPU_THREAD_SPU);
thread->SetName(name); thread->SetName(name);
thread->SetEntry(entry); thread->SetEntry(entry);

View file

@ -5,7 +5,7 @@
#include "MFC.h" #include "MFC.h"
struct event_queue_t; struct event_queue_t;
struct event_port_t; struct spu_group_t;
// SPU Channels // SPU Channels
enum : u32 enum : u32
@ -102,10 +102,9 @@ enum
enum : u32 enum : u32
{ {
SYS_SPU_THREAD_BASE_LOW = 0xf0000000, SYS_SPU_THREAD_BASE_LOW = 0xf0000000,
SYS_SPU_THREAD_BASE_MASK = 0xfffffff, SYS_SPU_THREAD_OFFSET = 0x100000,
SYS_SPU_THREAD_OFFSET = 0x00100000, SYS_SPU_THREAD_SNR1 = 0x5400c,
SYS_SPU_THREAD_SNR1 = 0x05400c, SYS_SPU_THREAD_SNR2 = 0x5C00c,
SYS_SPU_THREAD_SNR2 = 0x05C00c,
}; };
enum enum
@ -505,7 +504,7 @@ public:
spu_interrupt_tag_t int0; // SPU Class 0 Interrupt Management spu_interrupt_tag_t int0; // SPU Class 0 Interrupt Management
spu_interrupt_tag_t int2; // SPU Class 2 Interrupt Management spu_interrupt_tag_t int2; // SPU Class 2 Interrupt Management
u32 tg_id; // SPU Thread Group Id std::weak_ptr<spu_group_t> tg; // SPU Thread Group Id
std::unordered_map<u32, std::shared_ptr<event_queue_t>> spuq; // Event Queue Keys for SPU Thread std::unordered_map<u32, std::shared_ptr<event_queue_t>> spuq; // Event Queue Keys for SPU Thread
std::weak_ptr<event_queue_t> spup[64]; // SPU Ports std::weak_ptr<event_queue_t> spup[64]; // SPU Ports
@ -653,6 +652,7 @@ public:
virtual void Task(); virtual void Task();
void FastCall(u32 ls_addr); void FastCall(u32 ls_addr);
void FastStop(); void FastStop();
void FastRun();
protected: protected:
virtual void DoReset(); virtual void DoReset();
@ -701,11 +701,13 @@ public:
cpu_thread& run() override cpu_thread& run() override
{ {
thread->Run(); auto& spu = static_cast<SPUThread&>(*thread);
static_cast<SPUThread*>(thread)->GPR[3].from64(argc); spu.Run();
static_cast<SPUThread*>(thread)->GPR[4].from64(argv.addr());
static_cast<SPUThread*>(thread)->GPR[5].from64(envp.addr()); spu.GPR[3].from64(argc);
spu.GPR[4].from64(argv.addr());
spu.GPR[5].from64(envp.addr());
return *this; return *this;
} }

View file

@ -1,51 +1,16 @@
#include "stdafx.h" #include "stdafx.h"
#include "Utilities/Log.h" #include "Utilities/Log.h"
#include "Emu/System.h"
#include "Memory.h" #include "Memory.h"
#include "Emu/Cell/RawSPUThread.h"
MemoryBase Memory; MemoryBase Memory;
u32 MemoryBase::InitRawSPU(MemoryBlock* raw_spu) std::mutex g_memory_mutex;
{
LV2_LOCK;
u32 index;
for (index = 0; index < sizeof(RawSPUMem) / sizeof(RawSPUMem[0]); index++)
{
if (!RawSPUMem[index])
{
RawSPUMem[index] = raw_spu;
break;
}
}
MemoryBlocks.push_back(raw_spu->SetRange(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, RAW_SPU_PROB_OFFSET));
return index;
}
void MemoryBase::CloseRawSPU(MemoryBlock* raw_spu, const u32 num)
{
LV2_LOCK;
for (int i = 0; i < MemoryBlocks.size(); ++i)
{
if (MemoryBlocks[i] == raw_spu)
{
MemoryBlocks.erase(MemoryBlocks.begin() + i);
break;
}
}
if (num < sizeof(RawSPUMem) / sizeof(RawSPUMem[0])) RawSPUMem[num] = nullptr;
}
void MemoryBase::Init(MemoryType type) void MemoryBase::Init(MemoryType type)
{ {
if (m_inited) return; if (m_inited) return;
m_inited = true; m_inited = true;
memset(RawSPUMem, 0, sizeof(RawSPUMem));
LOG_NOTICE(MEMORY, "Initializing memory: g_base_addr = 0x%llx, g_priv_addr = 0x%llx", (u64)vm::g_base_addr, (u64)vm::g_priv_addr); LOG_NOTICE(MEMORY, "Initializing memory: g_base_addr = 0x%llx, g_priv_addr = 0x%llx", (u64)vm::g_base_addr, (u64)vm::g_priv_addr);
#ifdef _WIN32 #ifdef _WIN32
@ -101,35 +66,11 @@ void MemoryBase::Close()
MemoryBlocks.clear(); MemoryBlocks.clear();
} }
bool MemoryBase::WriteMMIO32(u32 addr, const u32 data)
{
LV2_LOCK;
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] && ((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Write32(addr, data))
{
return true;
}
return false;
}
bool MemoryBase::ReadMMIO32(u32 addr, u32& result)
{
LV2_LOCK;
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] && ((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Read32(addr, &result))
{
return true;
}
return false;
}
bool MemoryBase::Map(const u32 addr, const u32 size) bool MemoryBase::Map(const u32 addr, const u32 size)
{ {
assert(size && (size | addr) % 4096 == 0); assert(size && (size | addr) % 4096 == 0);
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++) for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{ {
@ -147,7 +88,7 @@ bool MemoryBase::Map(const u32 addr, const u32 size)
bool MemoryBase::Unmap(const u32 addr) bool MemoryBase::Unmap(const u32 addr)
{ {
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
for (u32 i = 0; i < MemoryBlocks.size(); i++) for (u32 i = 0; i < MemoryBlocks.size(); i++)
{ {
@ -234,7 +175,7 @@ DynamicMemoryBlockBase::DynamicMemoryBlockBase()
const u32 DynamicMemoryBlockBase::GetUsedSize() const const u32 DynamicMemoryBlockBase::GetUsedSize() const
{ {
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
u32 size = 0; u32 size = 0;
@ -253,7 +194,7 @@ bool DynamicMemoryBlockBase::IsInMyRange(const u32 addr, const u32 size)
MemoryBlock* DynamicMemoryBlockBase::SetRange(const u32 start, const u32 size) MemoryBlock* DynamicMemoryBlockBase::SetRange(const u32 start, const u32 size)
{ {
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
m_max_size = PAGE_4K(size); m_max_size = PAGE_4K(size);
if (!MemoryBlock::SetRange(start, 0)) if (!MemoryBlock::SetRange(start, 0))
@ -267,7 +208,7 @@ MemoryBlock* DynamicMemoryBlockBase::SetRange(const u32 start, const u32 size)
void DynamicMemoryBlockBase::Delete() void DynamicMemoryBlockBase::Delete()
{ {
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
m_allocated.clear(); m_allocated.clear();
m_max_size = 0; m_max_size = 0;
@ -289,7 +230,7 @@ bool DynamicMemoryBlockBase::AllocFixed(u32 addr, u32 size)
return false; return false;
} }
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
for (u32 i = 0; i<m_allocated.size(); ++i) for (u32 i = 0; i<m_allocated.size(); ++i)
{ {
@ -330,7 +271,7 @@ u32 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
exsize = size + align - 1; exsize = size + align - 1;
} }
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
for (u32 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;) for (u32 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
{ {
@ -371,7 +312,7 @@ bool DynamicMemoryBlockBase::Alloc()
bool DynamicMemoryBlockBase::Free(u32 addr) bool DynamicMemoryBlockBase::Free(u32 addr)
{ {
LV2_LOCK; std::lock_guard<std::mutex> lock(g_memory_mutex);
for (u32 num = 0; num < m_allocated.size(); num++) for (u32 num = 0; num < m_allocated.size(); num++)
{ {

View file

@ -33,7 +33,6 @@ public:
DynamicMemoryBlock Userspace; DynamicMemoryBlock Userspace;
DynamicMemoryBlock RSXFBMem; DynamicMemoryBlock RSXFBMem;
DynamicMemoryBlock StackMem; DynamicMemoryBlock StackMem;
MemoryBlock* RawSPUMem[(0x100000000 - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET];
VirtualMemoryBlock RSXIOMem; VirtualMemoryBlock RSXIOMem;
struct struct
@ -67,18 +66,10 @@ public:
void UnregisterPages(u32 addr, u32 size); void UnregisterPages(u32 addr, u32 size);
u32 InitRawSPU(MemoryBlock* raw_spu);
void CloseRawSPU(MemoryBlock* raw_spu, const u32 num);
void Init(MemoryType type); void Init(MemoryType type);
void Close(); void Close();
bool WriteMMIO32(u32 addr, const u32 data);
bool ReadMMIO32(u32 addr, u32& result);
u32 GetUserMemTotalSize() u32 GetUserMemTotalSize()
{ {
return UserMemory->GetSize(); return UserMemory->GetSize();

View file

@ -61,30 +61,30 @@ void CallbackManager::Init()
if (Memory.PSV.RAM.GetStartAddr()) if (Memory.PSV.RAM.GetStartAddr())
{ {
m_cb_thread = &Emu.GetCPU().AddThread(CPU_THREAD_ARMv7); m_cb_thread = Emu.GetCPU().AddThread(CPU_THREAD_ARMv7);
m_cb_thread->SetName("Callback Thread"); m_cb_thread->SetName("Callback Thread");
m_cb_thread->SetEntry(0); m_cb_thread->SetEntry(0);
m_cb_thread->SetPrio(1001); m_cb_thread->SetPrio(1001);
m_cb_thread->SetStackSize(0x10000); m_cb_thread->SetStackSize(0x10000);
m_cb_thread->InitStack(); m_cb_thread->InitStack();
m_cb_thread->InitRegs(); m_cb_thread->InitRegs();
static_cast<ARMv7Thread*>(m_cb_thread)->DoRun(); static_cast<ARMv7Thread&>(*m_cb_thread).DoRun();
} }
else else
{ {
m_cb_thread = &Emu.GetCPU().AddThread(CPU_THREAD_PPU); m_cb_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
m_cb_thread->SetName("Callback Thread"); m_cb_thread->SetName("Callback Thread");
m_cb_thread->SetEntry(0); m_cb_thread->SetEntry(0);
m_cb_thread->SetPrio(1001); m_cb_thread->SetPrio(1001);
m_cb_thread->SetStackSize(0x10000); m_cb_thread->SetStackSize(0x10000);
m_cb_thread->InitStack(); m_cb_thread->InitStack();
m_cb_thread->InitRegs(); m_cb_thread->InitRegs();
static_cast<PPUThread*>(m_cb_thread)->DoRun(); static_cast<PPUThread&>(*m_cb_thread).DoRun();
} }
thread_t cb_async_thread("CallbackManager thread", [this]() thread_t cb_async_thread("CallbackManager thread", [this]()
{ {
SetCurrentNamedThread(m_cb_thread); SetCurrentNamedThread(&*m_cb_thread);
while (!Emu.IsStopped()) while (!Emu.IsStopped())
{ {

View file

@ -10,7 +10,7 @@ class CallbackManager
std::mutex m_mutex; std::mutex m_mutex;
std::vector<std::function<s32(CPUThread&)>> m_cb_list; std::vector<std::function<s32(CPUThread&)>> m_cb_list;
std::vector<std::function<void(CPUThread&)>> m_async_list; std::vector<std::function<void(CPUThread&)>> m_async_list;
CPUThread* m_cb_thread; std::shared_ptr<CPUThread> m_cb_thread;
struct PauseResumeCBS struct PauseResumeCBS
{ {

View file

@ -223,7 +223,7 @@ u32 adecOpen(AudioDecoder* adec_ptr)
adec.id = adec_id; adec.id = adec_id;
adec.adecCb = (PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU); adec.adecCb = static_cast<PPUThread*>(Emu.GetCPU().AddThread(CPU_THREAD_PPU).get());
adec.adecCb->SetName(fmt::format("AudioDecoder[%d] Callback", adec_id)); adec.adecCb->SetName(fmt::format("AudioDecoder[%d] Callback", adec_id));
adec.adecCb->SetEntry(0); adec.adecCb->SetEntry(0);
adec.adecCb->SetPrio(1001); adec.adecCb->SetPrio(1001);

View file

@ -305,7 +305,7 @@ u32 dmuxOpen(Demuxer* dmux_ptr)
dmux.id = dmux_id; dmux.id = dmux_id;
dmux.dmuxCb = (PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU); dmux.dmuxCb = static_cast<PPUThread*>(Emu.GetCPU().AddThread(CPU_THREAD_PPU).get());
dmux.dmuxCb->SetName(fmt::format("Demuxer[%d] Callback", dmux_id)); dmux.dmuxCb->SetName(fmt::format("Demuxer[%d] Callback", dmux_id));
dmux.dmuxCb->SetEntry(0); dmux.dmuxCb->SetEntry(0);
dmux.dmuxCb->SetPrio(1001); dmux.dmuxCb->SetPrio(1001);

View file

@ -4,6 +4,7 @@
#include "Emu/SysCalls/Modules.h" #include "Emu/SysCalls/Modules.h"
#include "Emu/SysCalls/CB_FUNC.h" #include "Emu/SysCalls/CB_FUNC.h"
#include "Emu/CPU/CPUThreadManager.h"
#include "Emu/Cell/SPUThread.h" #include "Emu/Cell/SPUThread.h"
#include "Emu/SysCalls/lv2/sleep_queue.h" #include "Emu/SysCalls/lv2/sleep_queue.h"
#include "Emu/SysCalls/lv2/sys_lwmutex.h" #include "Emu/SysCalls/lv2/sys_lwmutex.h"
@ -143,24 +144,23 @@ s32 spursInit(
if (flags & SAF_UNKNOWN_FLAG_7) tgt |= 0x102; if (flags & SAF_UNKNOWN_FLAG_7) tgt |= 0x102;
if (flags & SAF_UNKNOWN_FLAG_8) tgt |= 0xC02; if (flags & SAF_UNKNOWN_FLAG_8) tgt |= 0xC02;
if (flags & SAF_UNKNOWN_FLAG_9) tgt |= 0x800; if (flags & SAF_UNKNOWN_FLAG_9) tgt |= 0x800;
auto tg = spu_thread_group_create(name + "CellSpursKernelGroup", nSpus, spuPriority, tgt, container); spurs->m.spuTG = spu_thread_group_create(name + "CellSpursKernelGroup", nSpus, spuPriority, tgt, container);
assert(tg); assert(spurs->m.spuTG.data());
spurs->m.spuTG = tg->m_id;
name += "CellSpursKernel0"; name += "CellSpursKernel0";
for (s32 num = 0; num < nSpus; num++, name[name.size() - 1]++) for (s32 num = 0; num < nSpus; num++, name[name.size() - 1]++)
{ {
auto spu = spu_thread_initialize(tg, num, spurs->m.spuImg, name, SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE, (u64)num << 32, spurs.addr(), 0, 0); const u32 id = spu_thread_initialize(spurs->m.spuTG, num, vm::ptr<sys_spu_image>::make(spurs.addr() + offsetof(CellSpurs, m.spuImg)), name, SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE, (u64)num << 32, spurs.addr(), 0, 0);
spu->RegisterHleFunction(spurs->m.spuImg.entry_point, spursKernelEntry); static_cast<SPUThread&>(*Emu.GetCPU().GetThread(id).get()).RegisterHleFunction(spurs->m.spuImg.entry_point, spursKernelEntry);
spurs->m.spus[num] = spu->GetId(); spurs->m.spus[num] = id;
} }
if (flags & SAF_SPU_PRINTF_ENABLED) if (flags & SAF_SPU_PRINTF_ENABLED)
{ {
// spu_printf: attach group // spu_printf: attach group
if (!spu_printf_agcb || spu_printf_agcb(tg->m_id) != CELL_OK) if (!spu_printf_agcb || spu_printf_agcb(spurs->m.spuTG) != CELL_OK)
{ {
// remove flag if failed // remove flag if failed
spurs->m.flags &= ~SAF_SPU_PRINTF_ENABLED; spurs->m.flags &= ~SAF_SPU_PRINTF_ENABLED;
@ -328,13 +328,13 @@ s32 spursInit(
return; return;
} }
} }
})->GetId(); });
spurs->m.ppu1 = ppu_thread_create(0, 0, ppuPriority, 0x8000, true, false, name + "SpursHdlr1", [spurs](PPUThread& CPU) spurs->m.ppu1 = ppu_thread_create(0, 0, ppuPriority, 0x8000, true, false, name + "SpursHdlr1", [spurs](PPUThread& CPU)
{ {
// TODO // TODO
})->GetId(); });
// enable exception event handler // enable exception event handler
if (spurs->m.enableEH.compare_and_swap_test(be_t<u32>::make(0), be_t<u32>::make(1))) if (spurs->m.enableEH.compare_and_swap_test(be_t<u32>::make(0), be_t<u32>::make(1)))

View file

@ -213,7 +213,7 @@ u32 vdecOpen(VideoDecoder* vdec_ptr)
vdec.id = vdec_id; vdec.id = vdec_id;
vdec.vdecCb = (PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU); vdec.vdecCb = static_cast<PPUThread*>(Emu.GetCPU().AddThread(CPU_THREAD_PPU).get());
vdec.vdecCb->SetName(fmt::format("VideoDecoder[%d] Callback", vdec_id)); vdec.vdecCb->SetName(fmt::format("VideoDecoder[%d] Callback", vdec_id));
vdec.vdecCb->SetEntry(0); vdec.vdecCb->SetEntry(0);
vdec.vdecCb->SetPrio(1001); vdec.vdecCb->SetPrio(1001);

View file

@ -333,14 +333,16 @@ int cellSurMixerCreate(vm::ptr<const CellSurMixerConfig> config)
{ {
AudioPortConfig& port = g_audio.ports[g_surmx.audio_port]; AudioPortConfig& port = g_audio.ports[g_surmx.audio_port];
PPUThread& cb_thread = *(PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU); auto cb_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
cb_thread.SetName("Surmixer Callback Thread");
cb_thread.SetEntry(0); auto& ppu = static_cast<PPUThread&>(*cb_thread);
cb_thread.SetPrio(1001); ppu.SetName("Surmixer Callback Thread");
cb_thread.SetStackSize(0x10000); ppu.SetEntry(0);
cb_thread.InitStack(); ppu.SetPrio(1001);
cb_thread.InitRegs(); ppu.SetStackSize(0x10000);
cb_thread.DoRun(); ppu.InitStack();
ppu.InitRegs();
ppu.DoRun();
while (port.state.read_relaxed() != AUDIO_PORT_STATE_CLOSED && !Emu.IsStopped()) while (port.state.read_relaxed() != AUDIO_PORT_STATE_CLOSED && !Emu.IsStopped())
{ {
@ -357,7 +359,7 @@ int cellSurMixerCreate(vm::ptr<const CellSurMixerConfig> config)
memset(mixdata, 0, sizeof(mixdata)); memset(mixdata, 0, sizeof(mixdata));
if (surMixerCb) if (surMixerCb)
{ {
surMixerCb(cb_thread, surMixerCbArg, (u32)mixcount, 256); surMixerCb(ppu, surMixerCbArg, (u32)mixcount, 256);
} }
//u64 stamp1 = get_system_time(); //u64 stamp1 = get_system_time();
@ -462,7 +464,7 @@ int cellSurMixerCreate(vm::ptr<const CellSurMixerConfig> config)
ssp.clear(); ssp.clear();
} }
Emu.GetCPU().RemoveThread(cb_thread.GetId()); Emu.GetCPU().RemoveThread(ppu.GetId());
surMixerCb.set(0); surMixerCb.set(0);
}); });

View file

@ -176,7 +176,7 @@ s32 sys_event_queue_receive(PPUThread& CPU, u32 equeue_id, vm::ptr<sys_event_t>
queue->cv.wait_for(lv2_lock, std::chrono::milliseconds(1)); queue->cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
} }
// event data is returned in registers, second arg is not used // event data is returned in registers (second arg is not used)
auto& event = queue->events.front(); auto& event = queue->events.front();
CPU.GPR[4] = event.source; CPU.GPR[4] = event.source;
CPU.GPR[5] = event.data1; CPU.GPR[5] = event.data1;
@ -206,10 +206,9 @@ s32 sys_event_queue_drain(u32 equeue_id)
u32 event_port_create(u64 name) u32 event_port_create(u64 name)
{ {
std::shared_ptr<event_port_t> eport(new event_port_t()); std::shared_ptr<event_port_t> eport(new event_port_t(SYS_EVENT_PORT_LOCAL, name));
const u32 id = sys_event.GetNewId(eport, TYPE_EVENT_PORT);
eport->name = name ? name : ((u64)process_getpid() << 32) | (u64)id; return sys_event.GetNewId(eport, TYPE_EVENT_PORT);
return id;
} }
s32 sys_event_port_create(vm::ptr<u32> eport_id, s32 port_type, u64 name) s32 sys_event_port_create(vm::ptr<u32> eport_id, s32 port_type, u64 name)
@ -224,7 +223,9 @@ s32 sys_event_port_create(vm::ptr<u32> eport_id, s32 port_type, u64 name)
LV2_LOCK; LV2_LOCK;
*eport_id = event_port_create(name); std::shared_ptr<event_port_t> eport(new event_port_t(port_type, name));
*eport_id = sys_event.GetNewId(eport, TYPE_EVENT_PORT);
return CELL_OK; return CELL_OK;
} }
@ -262,7 +263,10 @@ s32 sys_event_port_connect_local(u32 eport_id, u32 equeue_id)
return CELL_ESRCH; return CELL_ESRCH;
} }
// CELL_EINVAL is never returned (I have no idea if SYS_EVENT_PORT_LOCAL is the only possible type) if (port->type != SYS_EVENT_PORT_LOCAL)
{
return CELL_EINVAL;
}
if (!port->queue.expired()) if (!port->queue.expired())
{ {
@ -292,11 +296,15 @@ s32 sys_event_port_disconnect(u32 eport_id)
return CELL_ENOTCONN; return CELL_ENOTCONN;
} }
// CELL_EBUSY is not returned
//const u64 source = port->name ? port->name : ((u64)process_getpid() << 32) | (u64)eport_id;
//for (auto& event : queue->events) //for (auto& event : queue->events)
//{ //{
// if (event.source == port->name) // if (event.source == source)
// { // {
// return CELL_EBUSY; // not sure about it // return CELL_EBUSY; // ???
// } // }
//} //}
@ -328,7 +336,9 @@ s32 sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
return CELL_EBUSY; return CELL_EBUSY;
} }
queue->events.emplace_back(port->name, data1, data2, data3); const u64 source = port->name ? port->name : ((u64)process_getpid() << 32) | (u64)eport_id;
queue->events.emplace_back(source, data1, data2, data3);
queue->cv.notify_one(); queue->cv.notify_one();
return CELL_OK; return CELL_OK;
} }

View file

@ -79,6 +79,7 @@ struct event_queue_t
std::deque<event_t> events; std::deque<event_t> events;
// TODO: use sleep queue, remove condition variable (use thread's one instead)
std::condition_variable cv; std::condition_variable cv;
std::atomic<s32> waiters; std::atomic<s32> waiters;
@ -95,11 +96,13 @@ struct event_queue_t
struct event_port_t struct event_port_t
{ {
u64 name; // generated or user-specified code that is passed as event source const s32 type; // port type, must be SYS_EVENT_PORT_LOCAL
const u64 name; // passed as event source (generated from id and process id if not set)
std::weak_ptr<event_queue_t> queue; // event queue this port is connected to std::weak_ptr<event_queue_t> queue; // event queue this port is connected to
event_port_t(u64 name = 0) event_port_t(s32 type, u64 name)
: name(name) : type(type)
, name(name)
{ {
} }
}; };

View file

@ -68,7 +68,7 @@ s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread,
auto& tag = class_id ? spu.int2 : spu.int0; auto& tag = class_id ? spu.int2 : spu.int0;
// CELL_ESTAT is never returned (can't detect exact condition) // CELL_ESTAT is not returned (can't detect exact condition)
std::shared_ptr<CPUThread> it = Emu.GetCPU().GetThread((u32)intrthread); std::shared_ptr<CPUThread> it = Emu.GetCPU().GetThread((u32)intrthread);

View file

@ -162,68 +162,47 @@ s32 sys_ppu_thread_restart(u64 thread_id)
return CELL_OK; return CELL_OK;
} }
PPUThread* ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, const std::string& name, std::function<void(PPUThread&)> task) u32 ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, std::string name, std::function<void(PPUThread&)> task)
{ {
PPUThread& new_thread = *(PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU); auto new_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
// Note: (Syphurith) I haven't figured out the minimum stack size of PPU Thread. auto& ppu = static_cast<PPUThread&>(*new_thread);
// Maybe it can be done with pthread_attr_getstacksize function.
// And i toke 4096 (PTHREAD_STACK_MIN, and the smallest allocation unit) for this.
if ((stacksize % 4096) || (stacksize == 0)) {
// If not times of smallest allocation unit, round it up to the nearest one.
// And regard zero as a same condition.
sys_ppu_thread.Warning("sys_ppu_thread_create: stacksize increased from 0x%x to 0x%x.",
stacksize, SYS_PPU_THREAD_STACK_MIN * ((u32)(stacksize / SYS_PPU_THREAD_STACK_MIN) + 1));
stacksize = SYS_PPU_THREAD_STACK_MIN * ((u32)(stacksize / SYS_PPU_THREAD_STACK_MIN) + 1);
}
u32 id = new_thread.GetId(); ppu.SetEntry(entry);
new_thread.SetEntry(entry); ppu.SetPrio(prio);
new_thread.SetPrio(prio); ppu.SetStackSize(stacksize < 0x4000 ? 0x4000 : stacksize); // (hack) adjust minimal stack size
new_thread.SetStackSize(stacksize); ppu.SetJoinable(is_joinable);
new_thread.SetJoinable(is_joinable); ppu.SetName(name);
new_thread.SetName(name); ppu.custom_task = task;
new_thread.custom_task = task; ppu.Run();
new_thread.Run();
sys_ppu_thread.Notice("*** New PPU Thread [%s] (%s, entry=0x%x): id = %d", name.c_str(),
is_interrupt ? "interrupt" :
(is_joinable ? "joinable" : "detached"), entry, id);
if (!is_interrupt) if (!is_interrupt)
{ {
new_thread.GPR[3] = arg; ppu.GPR[3] = arg;
new_thread.Exec(); ppu.Exec();
} }
return &new_thread; return ppu.GetId();
} }
s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::ptr<const char> threadname) s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::ptr<const char> threadname)
{ {
sys_ppu_thread.Log("sys_ppu_thread_create(thread_id_addr=0x%x, entry=0x%x, arg=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname_addr=0x%x('%s'))", sys_ppu_thread.Warning("sys_ppu_thread_create(thread_id=*0x%x, entry=0x%x, arg=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=*0x%x)", thread_id, entry, arg, prio, stacksize, flags, threadname);
thread_id.addr(), entry, arg, prio, stacksize, flags, threadname.addr(), threadname ? threadname.get_ptr() : "");
bool is_joinable = false; if (prio < 0 || prio > 3071)
bool is_interrupt = false;
switch (flags)
{ {
case 0: break; return CELL_EINVAL;
case SYS_PPU_THREAD_CREATE_JOINABLE:
is_joinable = true;
break;
case SYS_PPU_THREAD_CREATE_INTERRUPT:
is_interrupt = true;
break;
default: sys_ppu_thread.Error("sys_ppu_thread_create(): unknown flags value (0x%llx)", flags); return CELL_EPERM;
} }
std::string name = threadname ? threadname.get_ptr() : ""; bool is_joinable = flags & SYS_PPU_THREAD_CREATE_JOINABLE;
bool is_interrupt = flags & SYS_PPU_THREAD_CREATE_INTERRUPT;
*thread_id = ppu_thread_create(entry, arg, prio, stacksize, is_joinable, is_interrupt, name)->GetId(); if (is_joinable && is_interrupt)
{
return CELL_EPERM;
}
*thread_id = ppu_thread_create(entry, arg, prio, stacksize, is_joinable, is_interrupt, threadname ? threadname.get_ptr() : "");
return CELL_OK; return CELL_OK;
} }
@ -249,12 +228,15 @@ s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<u64> thread_id)
s32 sys_ppu_thread_rename(u64 thread_id, vm::ptr<const char> name) s32 sys_ppu_thread_rename(u64 thread_id, vm::ptr<const char> name)
{ {
sys_ppu_thread.Log("sys_ppu_thread_rename(thread_id=%d, name_addr=0x%x('%s'))", thread_id, name.addr(), name.get_ptr()); sys_ppu_thread.Log("sys_ppu_thread_rename(thread_id=0x%llx, name=*0x%x)", thread_id, name);
std::shared_ptr<CPUThread> thr = Emu.GetCPU().GetThread(thread_id); std::shared_ptr<CPUThread> t = Emu.GetCPU().GetThread(thread_id, CPU_THREAD_PPU);
if (!thr)
if (!t)
{
return CELL_ESRCH; return CELL_ESRCH;
}
thr->SetThreadName(name.get_ptr()); t->SetThreadName(name.get_ptr());
return CELL_OK; return CELL_OK;
} }

View file

@ -20,7 +20,7 @@ enum stackSize
}; };
// Aux // Aux
PPUThread* ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, const std::string& name, std::function<void(PPUThread&)> task = nullptr); u32 ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, std::string name, std::function<void(PPUThread&)> task = nullptr);
// SysCalls // SysCalls
void sys_ppu_thread_exit(PPUThread& CPU, u64 errorcode); void sys_ppu_thread_exit(PPUThread& CPU, u64 errorcode);

File diff suppressed because it is too large Load diff

View file

@ -32,7 +32,7 @@ enum : u64
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY = 0xFFFFFFFF53505504ull, SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY = 0xFFFFFFFF53505504ull,
}; };
enum enum : u32
{ {
SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED, SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED,
SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_INITIALIZED,
@ -54,7 +54,7 @@ enum : s32
struct sys_spu_thread_group_attribute struct sys_spu_thread_group_attribute
{ {
be_t<u32> nsize; be_t<u32> nsize; // name length including NULL terminator
vm::bptr<const char> name; vm::bptr<const char> name;
be_t<s32> type; be_t<s32> type;
be_t<u32> ct; // memory container id be_t<u32> ct; // memory container id
@ -121,35 +121,50 @@ enum : u32
SYS_SPU_IMAGE_DIRECT = 1, SYS_SPU_IMAGE_DIRECT = 1,
}; };
struct SpuGroupInfo struct spu_arg_t
{ {
std::vector<u32> list; u64 arg1;
std::atomic<u32> lock; u64 arg2;
std::string m_name; u64 arg3;
u32 m_id; u64 arg4;
s32 m_prio; };
s32 m_type;
u32 m_ct;
u32 m_count;
s32 m_state; //SPU Thread Group State.
u32 m_exit_status;
bool m_group_exit;
SpuGroupInfo(const std::string& name, u32 num, s32 prio, s32 type, u32 ct) // SPU Thread Group Join State Flag
: m_name(name) enum : u32
, m_prio(prio) {
, m_type(type) STGJSF_IS_JOINING = (1 << 0),
, m_ct(ct) STGJSF_TERMINATED = (1 << 1), // set if SPU Thread Group is terminated by sys_spu_thread_group_terminate
, lock(0) STGJSF_GROUP_EXIT = (1 << 2), // set if SPU Thread Group is terminated by sys_spu_thread_group_exit
, m_count(num) };
, m_state(0)
, m_exit_status(0) struct spu_group_t
, m_group_exit(false) {
const std::string name;
const u32 num; // SPU Number
const s32 type; // SPU Thread Group Type
const u32 ct; // Memory Container Id
std::array<std::shared_ptr<CPUThread>, 256> threads;
std::array<spu_arg_t, 256> args; // SPU Thread Arguments
std::array<vm::ptr<sys_spu_image>, 256> images; // SPU Thread Images
s32 prio; // SPU Thread Group Priority
u32 state; // SPU Thread Group State
s32 exit_status; // SPU Thread Group Exit Status
std::atomic<u32> join_state; // flags used to detect exit cause
std::condition_variable join_cv; // used to signal waiting PPU thread
spu_group_t(std::string name, u32 num, s32 prio, s32 type, u32 ct)
: name(name)
, num(num)
, prio(prio)
, type(type)
, ct(ct)
, state(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
, exit_status(0)
, join_state(0)
{ {
m_state = SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED; //Before all the nums done, it is not initialized.
list.resize(256);
for (auto& v : list) v = 0;
m_state = SPU_THREAD_GROUP_STATUS_INITIALIZED; //Then Ready to Start. Cause Reference use New i can only place this here.
} }
}; };
@ -161,22 +176,21 @@ u32 LoadSpuImage(vfsStream& stream, u32& spu_ep);
// Aux // Aux
s32 spu_image_import(sys_spu_image& img, u32 src, u32 type); s32 spu_image_import(sys_spu_image& img, u32 src, u32 type);
std::shared_ptr<SpuGroupInfo> spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type, u32 container); u32 spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type, u32 container);
SPUThread* spu_thread_initialize(std::shared_ptr<SpuGroupInfo>& group, u32 spu_num, sys_spu_image& img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task = nullptr); u32 spu_thread_initialize(u32 group, u32 spu_num, vm::ptr<sys_spu_image> img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task = nullptr);
// SysCalls // SysCalls
s32 sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu); s32 sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu);
s32 sys_spu_image_open(vm::ptr<sys_spu_image> img, vm::ptr<const char> path); s32 sys_spu_image_open(vm::ptr<sys_spu_image> img, vm::ptr<const char> path);
s32 sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group, u32 spu_num, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg); s32 sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group, u32 spu_num, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg);
s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg); s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg);
s32 sys_spu_thread_group_create(vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr);
s32 sys_spu_thread_group_destroy(u32 id); s32 sys_spu_thread_group_destroy(u32 id);
s32 sys_spu_thread_group_start(u32 id); s32 sys_spu_thread_group_start(u32 id);
s32 sys_spu_thread_group_suspend(u32 id); s32 sys_spu_thread_group_suspend(u32 id);
s32 sys_spu_thread_group_resume(u32 id); s32 sys_spu_thread_group_resume(u32 id);
s32 sys_spu_thread_group_yield(u32 id); s32 sys_spu_thread_group_yield(u32 id);
s32 sys_spu_thread_group_terminate(u32 id, int value); s32 sys_spu_thread_group_terminate(u32 id, s32 value);
s32 sys_spu_thread_group_create(vm::ptr<u32> id, u32 num, int prio, vm::ptr<sys_spu_thread_group_attribute> attr);
s32 sys_spu_thread_create(vm::ptr<u32> thread_id, vm::ptr<u32> entry, u64 arg, int prio, u32 stacksize, u64 flags, u32 threadname_addr);
s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status); s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status);
s32 sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et); s32 sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et);
s32 sys_spu_thread_group_disconnect_event(u32 id, u32 et); s32 sys_spu_thread_group_disconnect_event(u32 id, u32 et);
@ -194,7 +208,7 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num);
s32 sys_spu_thread_unbind_queue(u32 id, u32 spuq_num); s32 sys_spu_thread_unbind_queue(u32 id, u32 spuq_num);
s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status); s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status);
s32 sys_raw_spu_create(vm::ptr<u32> id, u32 attr_addr); s32 sys_raw_spu_create(vm::ptr<u32> id, vm::ptr<void> attr);
s32 sys_raw_spu_destroy(u32 id); s32 sys_raw_spu_destroy(u32 id);
s32 sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag); s32 sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag);
s32 sys_raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask); s32 sys_raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask);