mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-07-08 07:51:28 +12:00
Partial commit: Cell
This commit is contained in:
parent
42e1d4d752
commit
c4e99dbdb2
32 changed files with 10685 additions and 12527 deletions
|
@ -1,15 +1,15 @@
|
|||
#include "stdafx.h"
|
||||
#include "Utilities/Config.h"
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/state.h"
|
||||
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "Emu/SysCalls/ErrorCodes.h"
|
||||
#include "Emu/SysCalls/lv2/sys_spu.h"
|
||||
#include "Emu/SysCalls/lv2/sys_event_flag.h"
|
||||
#include "Emu/SysCalls/lv2/sys_event.h"
|
||||
#include "Emu/SysCalls/lv2/sys_interrupt.h"
|
||||
#include "Emu/Cell/ErrorCodes.h"
|
||||
#include "Emu/Cell/lv2/sys_spu.h"
|
||||
#include "Emu/Cell/lv2/sys_event_flag.h"
|
||||
#include "Emu/Cell/lv2/sys_event.h"
|
||||
#include "Emu/Cell/lv2/sys_interrupt.h"
|
||||
|
||||
#include "Emu/Cell/SPUDisAsm.h"
|
||||
#include "Emu/Cell/SPUThread.h"
|
||||
|
@ -20,8 +20,24 @@
|
|||
|
||||
extern u64 get_timebased_time();
|
||||
|
||||
// defined here since SPUDisAsm.cpp doesn't exist
|
||||
const spu_opcode_table_t<void(SPUDisAsm::*)(spu_opcode_t)> SPUDisAsm::opcodes{ DEFINE_SPU_OPCODES(&SPUDisAsm::), &SPUDisAsm::UNK };
|
||||
enum class spu_decoder_type
|
||||
{
|
||||
precise,
|
||||
fast,
|
||||
asmjit,
|
||||
llvm,
|
||||
};
|
||||
|
||||
cfg::map_entry<spu_decoder_type> g_cfg_spu_decoder(cfg::root.core, "SPU Decoder", 2,
|
||||
{
|
||||
{ "Interpreter (precise)", spu_decoder_type::precise },
|
||||
{ "Interpreter (fast)", spu_decoder_type::fast },
|
||||
{ "Recompiler (ASMJIT)", spu_decoder_type::asmjit },
|
||||
{ "Recompiler (LLVM)", spu_decoder_type::llvm },
|
||||
});
|
||||
|
||||
const spu_decoder<spu_interpreter_precise> s_spu_interpreter_precise;
|
||||
const spu_decoder<spu_interpreter_fast> s_spu_interpreter_fast;
|
||||
|
||||
thread_local bool spu_channel_t::notification_required;
|
||||
|
||||
|
@ -31,7 +47,7 @@ void spu_int_ctrl_t::set(u64 ints)
|
|||
ints &= mask;
|
||||
|
||||
// notify if at least 1 bit was set
|
||||
if (ints && ~stat._or(ints) & ints && tag)
|
||||
if (ints && ~stat.fetch_or(ints) & ints && tag)
|
||||
{
|
||||
LV2_LOCK;
|
||||
|
||||
|
@ -44,112 +60,23 @@ void spu_int_ctrl_t::set(u64 ints)
|
|||
}
|
||||
}
|
||||
|
||||
void spu_int_ctrl_t::clear(u64 ints)
|
||||
{
|
||||
stat &= ~ints;
|
||||
}
|
||||
|
||||
const spu_imm_table_t g_spu_imm;
|
||||
|
||||
SPUThread::SPUThread(CPUThreadType type, const std::string& name, u32 index, u32 offset)
|
||||
: CPUThread(type, name)
|
||||
, index(index)
|
||||
, offset(offset)
|
||||
{
|
||||
}
|
||||
|
||||
SPUThread::SPUThread(const std::string& name, u32 index)
|
||||
: CPUThread(CPU_THREAD_SPU, name)
|
||||
, index(index)
|
||||
, offset(vm::alloc(0x40000, vm::main))
|
||||
{
|
||||
CHECK_ASSERTION(offset);
|
||||
}
|
||||
|
||||
SPUThread::~SPUThread()
|
||||
{
|
||||
// Deallocate Local Storage
|
||||
vm::dealloc_verbose_nothrow(offset);
|
||||
}
|
||||
|
||||
bool SPUThread::is_paused() const
|
||||
{
|
||||
if (CPUThread::is_paused())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (const auto group = tg.lock())
|
||||
{
|
||||
if (group->state >= SPU_THREAD_GROUP_STATUS_WAITING && group->state <= SPU_THREAD_GROUP_STATUS_SUSPENDED)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string SPUThread::get_name() const
|
||||
{
|
||||
return fmt::format("%s[0x%x] Thread (%s)[0x%05x]", CPUThread::GetTypeString(), m_id, CPUThread::get_name(), pc);
|
||||
return fmt::format("%sSPU[0x%x] Thread (%s)", offset > RAW_SPU_BASE_ADDR ? "Raw" : "", id, name);
|
||||
}
|
||||
|
||||
void SPUThread::dump_info() const
|
||||
std::string SPUThread::dump() const
|
||||
{
|
||||
CPUThread::dump_info();
|
||||
std::string ret = "Registers:\n=========\n";
|
||||
|
||||
for (uint i = 0; i<128; ++i) ret += fmt::format("GPR[%d] = 0x%s\n", i, gpr[i].to_hex().c_str());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void SPUThread::cpu_task()
|
||||
{
|
||||
std::fesetround(FE_TOWARDZERO);
|
||||
|
||||
if (!custom_task && !m_dec)
|
||||
{
|
||||
// Select opcode table (TODO)
|
||||
const auto& table = rpcs3::state.config.core.spu_decoder.value() == spu_decoder_type::interpreter_precise ? spu_interpreter::precise::g_spu_opcode_table : spu_interpreter::fast::g_spu_opcode_table;
|
||||
|
||||
// LS base address
|
||||
const auto base = vm::_ptr<const u32>(offset);
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (!m_state)
|
||||
{
|
||||
// read opcode
|
||||
const u32 opcode = base[pc / 4];
|
||||
|
||||
// call interpreter function
|
||||
table[opcode](*this, { opcode });
|
||||
|
||||
// next instruction
|
||||
pc += 4;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (check_status())
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (custom_task)
|
||||
{
|
||||
if (check_status()) return;
|
||||
|
||||
return custom_task(*this);
|
||||
}
|
||||
|
||||
while (!m_state || !check_status())
|
||||
{
|
||||
// decode instruction using specified decoder
|
||||
pc += m_dec->DecodeMemory(pc + offset);
|
||||
}
|
||||
}
|
||||
|
||||
void SPUThread::init_regs()
|
||||
void SPUThread::cpu_init()
|
||||
{
|
||||
gpr = {};
|
||||
fpscr.Reset();
|
||||
|
@ -190,75 +117,96 @@ void SPUThread::init_regs()
|
|||
gpr[1]._u32[3] = 0x3FFF0; // initial stack frame pointer
|
||||
}
|
||||
|
||||
void SPUThread::init_stack()
|
||||
void SPUThread::cpu_task()
|
||||
{
|
||||
// nothing to do
|
||||
}
|
||||
std::fesetround(FE_TOWARDZERO);
|
||||
|
||||
void SPUThread::close_stack()
|
||||
{
|
||||
// nothing to do here
|
||||
}
|
||||
|
||||
void SPUThread::do_run()
|
||||
{
|
||||
m_dec.reset();
|
||||
|
||||
switch (auto mode = rpcs3::state.config.core.spu_decoder.value())
|
||||
if (custom_task)
|
||||
{
|
||||
case spu_decoder_type::interpreter_precise: // Interpreter 1 (Precise)
|
||||
case spu_decoder_type::interpreter_fast: // Interpreter 2 (Fast)
|
||||
{
|
||||
break;
|
||||
if (check_status()) return;
|
||||
|
||||
return custom_task(*this);
|
||||
}
|
||||
|
||||
case spu_decoder_type::recompiler_asmjit:
|
||||
_log::g_tls_make_prefix = [](const auto&, auto, const auto&)
|
||||
{
|
||||
m_dec.reset(new SPURecompilerDecoder(*this));
|
||||
break;
|
||||
const auto cpu = static_cast<SPUThread*>(get_current_cpu_thread());
|
||||
|
||||
return fmt::format("%s [0x%05x]", cpu->get_name(), cpu->pc);
|
||||
};
|
||||
|
||||
if (g_cfg_spu_decoder.get() == spu_decoder_type::asmjit)
|
||||
{
|
||||
if (!spu_db) spu_db = fxm::get_always<SPUDatabase>();
|
||||
return spu_recompiler_base::enter(*this);
|
||||
}
|
||||
|
||||
default:
|
||||
// Select opcode table
|
||||
const auto& table = *(
|
||||
g_cfg_spu_decoder.get() == spu_decoder_type::precise ? &s_spu_interpreter_precise.get_table() :
|
||||
g_cfg_spu_decoder.get() == spu_decoder_type::fast ? &s_spu_interpreter_fast.get_table() :
|
||||
throw std::logic_error("Invalid SPU decoder"));
|
||||
|
||||
// LS base address
|
||||
const auto base = vm::_ptr<const u32>(offset);
|
||||
|
||||
while (true)
|
||||
{
|
||||
LOG_ERROR(SPU, "Invalid SPU decoder mode: %d", (u8)mode);
|
||||
Emu.Pause();
|
||||
}
|
||||
if (!state.load())
|
||||
{
|
||||
// Read opcode
|
||||
const u32 op = base[pc / 4];
|
||||
|
||||
// Call interpreter function
|
||||
table[spu_decode(op)](*this, { op });
|
||||
|
||||
// Next instruction
|
||||
pc += 4;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (check_status()) return;
|
||||
}
|
||||
}
|
||||
|
||||
void SPUThread::fast_call(u32 ls_addr)
|
||||
SPUThread::SPUThread(const std::string & name, u32 index)
|
||||
: cpu_thread(cpu_type::spu, name)
|
||||
, index(index)
|
||||
, offset(vm::alloc(0x40000, vm::main))
|
||||
{
|
||||
if (!is_current())
|
||||
Ensures(offset);
|
||||
}
|
||||
|
||||
SPUThread::~SPUThread()
|
||||
{
|
||||
// Deallocate Local Storage
|
||||
vm::dealloc_verbose_nothrow(offset);
|
||||
}
|
||||
|
||||
void SPUThread::push_snr(u32 number, u32 value)
|
||||
{
|
||||
// get channel
|
||||
const auto channel =
|
||||
number == 0 ? &ch_snr1 :
|
||||
number == 1 ? &ch_snr2 : throw EXCEPTION("Unexpected");
|
||||
|
||||
// check corresponding SNR register settings
|
||||
if ((snr_config >> number) & 1)
|
||||
{
|
||||
throw EXCEPTION("Called from the wrong thread");
|
||||
channel->push_or(value);
|
||||
}
|
||||
else
|
||||
{
|
||||
channel->push(value);
|
||||
}
|
||||
|
||||
// LS:0x0: this is originally the entry point of the interrupt handler, but interrupts are not implemented
|
||||
_ref<u32>(0) = 0x00000002; // STOP 2
|
||||
|
||||
auto old_pc = pc;
|
||||
auto old_lr = gpr[0]._u32[3];
|
||||
auto old_stack = gpr[1]._u32[3]; // only saved and restored (may be wrong)
|
||||
auto old_task = std::move(custom_task);
|
||||
|
||||
pc = ls_addr;
|
||||
gpr[0]._u32[3] = 0x0;
|
||||
custom_task = nullptr;
|
||||
|
||||
try
|
||||
if (channel->notification_required)
|
||||
{
|
||||
cpu_task();
|
||||
}
|
||||
catch (CPUThreadReturn)
|
||||
{
|
||||
}
|
||||
// lock for reliable notification
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
m_state &= ~CPU_STATE_RETURN;
|
||||
|
||||
pc = old_pc;
|
||||
gpr[0]._u32[3] = old_lr;
|
||||
gpr[1]._u32[3] = old_stack;
|
||||
custom_task = std::move(old_task);
|
||||
cv.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
|
||||
|
@ -268,9 +216,9 @@ void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
|
|||
_mm_mfence();
|
||||
}
|
||||
|
||||
u32 eal = VM_CAST(args.ea);
|
||||
u32 eal = vm::cast(args.ea, HERE);
|
||||
|
||||
if (eal >= SYS_SPU_THREAD_BASE_LOW && m_type == CPU_THREAD_SPU) // SPU Thread Group MMIO (LS and SNR)
|
||||
if (eal >= SYS_SPU_THREAD_BASE_LOW && offset >= RAW_SPU_BASE_ADDR) // SPU Thread Group MMIO (LS and SNR)
|
||||
{
|
||||
const u32 index = (eal - SYS_SPU_THREAD_BASE_LOW) / SYS_SPU_THREAD_OFFSET; // thread number in group
|
||||
const u32 offset = (eal - SYS_SPU_THREAD_BASE_LOW) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register
|
||||
|
@ -413,7 +361,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
|||
break;
|
||||
}
|
||||
|
||||
const u32 raddr = VM_CAST(ch_mfc_args.ea);
|
||||
const u32 raddr = vm::cast(ch_mfc_args.ea, HERE);
|
||||
|
||||
vm::reservation_acquire(vm::base(offset + ch_mfc_args.lsa), raddr, 128);
|
||||
|
||||
|
@ -434,7 +382,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
|||
break;
|
||||
}
|
||||
|
||||
if (vm::reservation_update(VM_CAST(ch_mfc_args.ea), vm::base(offset + ch_mfc_args.lsa), 128))
|
||||
if (vm::reservation_update(vm::cast(ch_mfc_args.ea, HERE), vm::base(offset + ch_mfc_args.lsa), 128))
|
||||
{
|
||||
if (last_raddr == 0)
|
||||
{
|
||||
|
@ -466,9 +414,9 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
|||
break;
|
||||
}
|
||||
|
||||
vm::reservation_op(VM_CAST(ch_mfc_args.ea), 128, [this]()
|
||||
vm::reservation_op(vm::cast(ch_mfc_args.ea, HERE), 128, [this]()
|
||||
{
|
||||
std::memcpy(vm::base_priv(VM_CAST(ch_mfc_args.ea)), vm::base(offset + ch_mfc_args.lsa), 128);
|
||||
std::memcpy(vm::base_priv(vm::cast(ch_mfc_args.ea, HERE)), vm::base(offset + ch_mfc_args.lsa), 128);
|
||||
});
|
||||
|
||||
if (last_raddr != 0 && vm::g_tls_did_break_reservation)
|
||||
|
@ -539,7 +487,7 @@ void SPUThread::set_events(u32 mask)
|
|||
}
|
||||
|
||||
// set new events, get old event mask
|
||||
const u32 old_stat = ch_event_stat._or(mask);
|
||||
const u32 old_stat = ch_event_stat.fetch_or(mask);
|
||||
|
||||
// notify if some events were set
|
||||
if (~old_stat & mask && old_stat & SPU_EVENT_WAITING)
|
||||
|
@ -617,7 +565,10 @@ u32 SPUThread::get_ch_value(u32 ch)
|
|||
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
if (!lock)
|
||||
{
|
||||
|
@ -658,7 +609,10 @@ u32 SPUThread::get_ch_value(u32 ch)
|
|||
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
if (!lock)
|
||||
{
|
||||
|
@ -723,14 +677,14 @@ u32 SPUThread::get_ch_value(u32 ch)
|
|||
if (ch_event_mask & SPU_EVENT_LR)
|
||||
{
|
||||
// register waiter if polling reservation status is required
|
||||
vm::wait_op(*this, last_raddr, 128, WRAP_EXPR(get_events(true) || is_stopped()));
|
||||
vm::wait_op(*this, last_raddr, 128, WRAP_EXPR(get_events(true) || state & cpu_state::stop));
|
||||
}
|
||||
else
|
||||
{
|
||||
lock.lock();
|
||||
|
||||
// simple waiting loop otherwise
|
||||
while (!get_events(true) && !is_stopped())
|
||||
while (!get_events(true) && !(state & cpu_state::stop))
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
|
@ -740,7 +694,10 @@ u32 SPUThread::get_ch_value(u32 ch)
|
|||
|
||||
ch_event_stat &= ~SPU_EVENT_WAITING;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
return get_events();
|
||||
}
|
||||
|
@ -767,7 +724,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
// break;
|
||||
case SPU_WrOutIntrMbox:
|
||||
{
|
||||
if (m_type == CPU_THREAD_RAW_SPU)
|
||||
if (offset >= RAW_SPU_BASE_ADDR)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
|
||||
|
||||
|
@ -775,7 +732,10 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
if (!lock)
|
||||
{
|
||||
|
@ -824,12 +784,12 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
return ch_in_mbox.set_values(1, CELL_ENOTCONN); // TODO: check error passing
|
||||
}
|
||||
|
||||
if (queue->events.size() >= queue->size)
|
||||
if (queue->events() >= queue->size)
|
||||
{
|
||||
return ch_in_mbox.set_values(1, CELL_EBUSY);
|
||||
}
|
||||
|
||||
queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, m_id, ((u64)spup << 32) | (value & 0x00ffffff), data);
|
||||
queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, id, ((u64)spup << 32) | (value & 0x00ffffff), data);
|
||||
|
||||
return ch_in_mbox.set_values(1, CELL_OK);
|
||||
}
|
||||
|
@ -861,13 +821,13 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
}
|
||||
|
||||
// TODO: check passing spup value
|
||||
if (queue->events.size() >= queue->size)
|
||||
if (queue->events() >= queue->size)
|
||||
{
|
||||
LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (queue is full)", spup, (value & 0x00ffffff), data);
|
||||
return;
|
||||
}
|
||||
|
||||
queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, m_id, ((u64)spup << 32) | (value & 0x00ffffff), data);
|
||||
queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, id, ((u64)spup << 32) | (value & 0x00ffffff), data);
|
||||
return;
|
||||
}
|
||||
else if (code == 128)
|
||||
|
@ -979,7 +939,10 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
if (!lock)
|
||||
{
|
||||
|
@ -1136,7 +1099,7 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
{
|
||||
LOG_TRACE(SPU, "stop_and_signal(code=0x%x)", code);
|
||||
|
||||
if (m_type == CPU_THREAD_RAW_SPU)
|
||||
if (offset >= RAW_SPU_BASE_ADDR)
|
||||
{
|
||||
status.atomic_op([code](u32& status)
|
||||
{
|
||||
|
@ -1146,8 +1109,7 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
});
|
||||
|
||||
int_ctrl[2].set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT);
|
||||
|
||||
return stop();
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
switch (code)
|
||||
|
@ -1160,7 +1122,7 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
|
||||
case 0x002:
|
||||
{
|
||||
m_state |= CPU_STATE_RETURN;
|
||||
state += cpu_state::ret;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1241,7 +1203,10 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
group->cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
@ -1253,7 +1218,10 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
|
||||
for (auto& thread : group->threads)
|
||||
{
|
||||
if (thread) thread->sleep(); // trigger status check
|
||||
if (thread)
|
||||
{
|
||||
thread->state += cpu_state::suspend;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -1261,24 +1229,25 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
throw EXCEPTION("Unexpected SPU Thread Group state (%d)", group->state);
|
||||
}
|
||||
|
||||
if (queue->events.size())
|
||||
if (queue->events())
|
||||
{
|
||||
auto& event = queue->events.front();
|
||||
const auto event = queue->pop(lv2_lock);
|
||||
ch_in_mbox.set_values(4, CELL_OK, static_cast<u32>(std::get<1>(event)), static_cast<u32>(std::get<2>(event)), static_cast<u32>(std::get<3>(event)));
|
||||
|
||||
queue->events.pop_front();
|
||||
}
|
||||
else
|
||||
{
|
||||
// add waiter; protocol is ignored in current implementation
|
||||
sleep_queue_entry_t waiter(*this, queue->sq);
|
||||
sleep_entry<cpu_thread> waiter(queue->thread_queue(lv2_lock), *this);
|
||||
|
||||
// wait on the event queue
|
||||
while (!unsignal())
|
||||
while (!state.test_and_reset(cpu_state::signal))
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (is_stopped()) throw CPUThreadStop{};
|
||||
if (state & cpu_state::stop)
|
||||
{
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
cv.wait(lv2_lock);
|
||||
}
|
||||
|
@ -1302,9 +1271,14 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
|
||||
for (auto& thread : group->threads)
|
||||
{
|
||||
if (thread) thread->awake(); // untrigger status check
|
||||
if (thread && thread.get() != this)
|
||||
{
|
||||
thread->state -= cpu_state::suspend;
|
||||
thread->safe_notify();
|
||||
}
|
||||
}
|
||||
|
||||
state -= cpu_state::suspend;
|
||||
group->cv.notify_all();
|
||||
|
||||
return;
|
||||
|
@ -1338,7 +1312,8 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
{
|
||||
if (thread && thread.get() != this)
|
||||
{
|
||||
thread->stop();
|
||||
thread->state += cpu_state::stop;
|
||||
thread->safe_notify();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1347,7 +1322,7 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
group->join_state |= SPU_TGJSF_GROUP_EXIT;
|
||||
group->cv.notify_one();
|
||||
|
||||
return stop();
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
case 0x102:
|
||||
|
@ -1373,7 +1348,7 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
status |= SPU_STATUS_STOPPED_BY_STOP;
|
||||
group->cv.notify_one();
|
||||
|
||||
return stop();
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1391,7 +1366,7 @@ void SPUThread::halt()
|
|||
{
|
||||
LOG_TRACE(SPU, "halt()");
|
||||
|
||||
if (m_type == CPU_THREAD_RAW_SPU)
|
||||
if (offset >= RAW_SPU_BASE_ADDR)
|
||||
{
|
||||
status.atomic_op([](u32& status)
|
||||
{
|
||||
|
@ -1401,18 +1376,41 @@ void SPUThread::halt()
|
|||
|
||||
int_ctrl[2].set(SPU_INT2_STAT_SPU_HALT_OR_STEP_INT);
|
||||
|
||||
return stop();
|
||||
throw cpu_state::stop;
|
||||
}
|
||||
|
||||
status |= SPU_STATUS_STOPPED_BY_HALT;
|
||||
throw EXCEPTION("Halt");
|
||||
}
|
||||
|
||||
spu_thread::spu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio)
|
||||
void SPUThread::fast_call(u32 ls_addr)
|
||||
{
|
||||
auto spu = idm::make_ptr<SPUThread>(name, 0x13370666);
|
||||
// LS:0x0: this is originally the entry point of the interrupt handler, but interrupts are not implemented
|
||||
_ref<u32>(0) = 0x00000002; // STOP 2
|
||||
|
||||
spu->pc = entry;
|
||||
auto old_pc = pc;
|
||||
auto old_lr = gpr[0]._u32[3];
|
||||
auto old_stack = gpr[1]._u32[3]; // only saved and restored (may be wrong)
|
||||
auto old_task = std::move(custom_task);
|
||||
|
||||
thread = std::move(spu);
|
||||
pc = ls_addr;
|
||||
gpr[0]._u32[3] = 0x0;
|
||||
custom_task = nullptr;
|
||||
|
||||
try
|
||||
{
|
||||
cpu_task();
|
||||
}
|
||||
catch (cpu_state _s)
|
||||
{
|
||||
state += _s;
|
||||
if (_s != cpu_state::ret) throw;
|
||||
}
|
||||
|
||||
state -= cpu_state::ret;
|
||||
|
||||
pc = old_pc;
|
||||
gpr[0]._u32[3] = old_lr;
|
||||
gpr[1]._u32[3] = old_stack;
|
||||
custom_task = std::move(old_task);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue