SPU: Implement events channel count, minor interrupts fixes

This commit is contained in:
eladash 2018-08-06 10:19:47 +03:00 committed by kd-11
parent cdc3ee6c1c
commit 36ac68b436
7 changed files with 160 additions and 267 deletions

View file

@ -858,8 +858,11 @@ std::string spu_thread::dump_regs() const
fmt::append(ret, "r%d: %s\n", i, gpr[i]);
}
fmt::append(ret, "\nEvent Stat: 0x%x\n", +ch_event_stat);
fmt::append(ret, "Event Mask: 0x%x\n", +ch_event_mask);
const auto events = ch_events.load();
fmt::append(ret, "\nEvent Stat: 0x%x\n", events.events);
fmt::append(ret, "Event Mask: 0x%x\n", events.mask);
fmt::append(ret, "Event Count: %u\n", events.count);
fmt::append(ret, "SRR0: 0x%05x\n", srr0);
fmt::append(ret, "Stall Stat: %s\n", ch_stall_stat);
fmt::append(ret, "Stall Mask: 0x%x\n", ch_stall_mask);
@ -872,7 +875,7 @@ std::string spu_thread::dump_regs() const
fmt::append(ret, "Reservation Addr: none\n");
fmt::append(ret, "Atomic Stat: %s\n", ch_atomic_stat); // TODO: use mfc_atomic_status formatting
fmt::append(ret, "Interrupts Enabled: %s\n", interrupts_enabled.load());
fmt::append(ret, "Interrupts: %s\n", interrupts_enabled ? "Enabled" : "Disabled");
fmt::append(ret, "Inbound Mailbox: %s\n", ch_in_mbox);
fmt::append(ret, "Out Mailbox: %s\n", ch_out_mbox);
fmt::append(ret, "Out Interrupts Mailbox: %s\n", ch_out_intr_mbox);
@ -975,9 +978,8 @@ void spu_thread::cpu_init()
ch_out_mbox.data.raw() = {};
ch_out_intr_mbox.data.raw() = {};
ch_event_mask.raw() = 0;
ch_event_stat.raw() = 0;
interrupts_enabled.raw() = false;
ch_events.raw() = {};
interrupts_enabled = false;
raddr = 0;
ch_dec_start_timestamp = get_timebased_time();
@ -1805,7 +1807,7 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
if (!ch_stall_stat.get_count())
{
ch_event_stat |= SPU_EVENT_SN;
set_events(SPU_EVENT_SN);
}
ch_stall_stat.set_value(utils::rol32(1, args.tag) | ch_stall_stat.get_value());
@ -1926,7 +1928,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
// Last check for event before we clear the reservation
if (raddr == addr || rtime != (vm::reservation_acquire(raddr, 128) & (-128 | vm::dma_lockb)) || !cmp_rdata(rdata, vm::_ref<decltype(rdata)>(raddr)))
{
ch_event_stat |= SPU_EVENT_LR;
set_events(SPU_EVENT_LR);
}
}
@ -2137,9 +2139,8 @@ void spu_thread::do_mfc(bool wait)
bool spu_thread::check_mfc_interrupts(u32 next_pc)
{
if (interrupts_enabled && (ch_event_mask & ch_event_stat & SPU_EVENT_INTR_IMPLEMENTED) > 0)
if (ch_events.load().count && std::exchange(interrupts_enabled, false))
{
interrupts_enabled.release(false);
srr0 = next_pc;
// Test for BR/BRA instructions (they are equivalent at zero pc)
@ -2251,7 +2252,7 @@ bool spu_thread::process_mfc_cmd()
// Last check for event before we replace the reservation with a new one
if ((vm::reservation_acquire(raddr, 128) & (-128 | vm::dma_lockb)) != rtime || !cmp_rdata(rdata, vm::_ref<decltype(rdata)>(raddr)))
{
ch_event_stat |= SPU_EVENT_LR;
set_events(SPU_EVENT_LR);
}
}
else if (raddr == addr)
@ -2259,7 +2260,7 @@ bool spu_thread::process_mfc_cmd()
// Lost previous reservation on polling
if (ntime != rtime || !cmp_rdata(rdata, dst))
{
ch_event_stat |= SPU_EVENT_LR;
set_events(SPU_EVENT_LR);
}
}
@ -2413,19 +2414,19 @@ bool spu_thread::process_mfc_cmd()
ch_mfc_cmd.cmd, ch_mfc_cmd.lsa, ch_mfc_cmd.eal, ch_mfc_cmd.tag, ch_mfc_cmd.size);
}
u32 spu_thread::get_events(u32 mask_hint, bool waiting)
spu_thread::ch_events_t spu_thread::get_events(u32 mask_hint, bool waiting, bool reading)
{
const u32 mask1 = ch_event_mask;
if (mask1 & ~SPU_EVENT_IMPLEMENTED)
if (auto mask1 = ch_events.load().mask; mask1 & ~SPU_EVENT_IMPLEMENTED)
{
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, mask1);
}
u32 collect = 0;
// Check reservation status and set SPU_EVENT_LR if lost
if (mask_hint & SPU_EVENT_LR && raddr && ((vm::reservation_acquire(raddr, sizeof(rdata)) & -128) != rtime || !cmp_rdata(rdata, vm::_ref<decltype(rdata)>(raddr))))
{
ch_event_stat |= SPU_EVENT_LR;
collect |= SPU_EVENT_LR;
raddr = 0;
}
@ -2436,42 +2437,47 @@ u32 spu_thread::get_events(u32 mask_hint, bool waiting)
{
// Set next event to the next time the decrementer underflows
ch_dec_start_timestamp -= res << 32;
if ((ch_event_stat & SPU_EVENT_TM) == 0)
{
ch_event_stat |= SPU_EVENT_TM;
}
collect |= SPU_EVENT_TM;
}
}
// Simple polling or polling with atomically set/removed SPU_EVENT_WAITING flag
return !waiting ? ch_event_stat & mask1 : ch_event_stat.atomic_op([&](u32& stat) -> u32
if (collect)
{
if (u32 res = stat & mask1)
{
stat &= ~SPU_EVENT_WAITING;
return res;
}
set_events(collect);
}
stat |= SPU_EVENT_WAITING;
return 0;
});
return ch_events.fetch_op([&](ch_events_t& events)
{
if (!reading)
return false;
if (waiting)
events.waiting = !events.count;
events.count = false;
return true;
}).first;
}
void spu_thread::set_events(u32 mask)
void spu_thread::set_events(u32 bits)
{
if (mask & ~SPU_EVENT_IMPLEMENTED)
{
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, mask);
}
ASSUME(!(bits & ~0xffff));
// Set new events, get old event mask
const u32 old_stat = ch_event_stat.fetch_or(mask);
// Notify if some events were set
if (~old_stat & mask && old_stat & SPU_EVENT_WAITING && ch_event_stat & SPU_EVENT_WAITING)
if (ch_events.atomic_op([&](ch_events_t& events)
{
notify();
events.events |= bits;
// If one masked event was fired, set the channel count (even if the event bit was already 1)
if (events.mask & bits)
{
events.count = true;
return !!events.waiting;
}
return false;
}))
{
// Preserved for external events implementation
//notify();
}
}
@ -2480,17 +2486,13 @@ void spu_thread::set_interrupt_status(bool enable)
if (enable)
{
// Detect enabling interrupts with events masked
if (ch_event_mask & ~SPU_EVENT_INTR_IMPLEMENTED)
if (auto mask = ch_events.load().mask; mask & ~SPU_EVENT_INTR_IMPLEMENTED)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, +ch_event_mask);
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, mask);
}
}
interrupts_enabled = true;
}
else
{
interrupts_enabled = false;
}
interrupts_enabled = enable;
}
u32 spu_thread::get_ch_count(u32 ch)
@ -2508,7 +2510,7 @@ u32 spu_thread::get_ch_count(u32 ch)
case SPU_RdSigNotify1: return ch_snr1.get_count();
case SPU_RdSigNotify2: return ch_snr2.get_count();
case MFC_RdAtomicStat: return ch_atomic_stat.get_count();
case SPU_RdEventStat: return get_events() != 0;
case SPU_RdEventStat: return get_events().count;
case MFC_Cmd: return 16 - mfc_size;
}
@ -2645,18 +2647,17 @@ s64 spu_thread::get_ch_value(u32 ch)
case SPU_RdEventMask:
{
return ch_event_mask;
return ch_events.load().mask;
}
case SPU_RdEventStat:
{
const u32 mask1 = ch_event_mask;
const u32 mask1 = ch_events.load().mask;
auto events = get_events(mask1, false, true);
u32 res = get_events(mask1);
if (res)
if (events.count)
{
return res;
return events.events & mask1;
}
spu_function_logger logger(*this, "MFC Events read");
@ -2669,7 +2670,7 @@ s64 spu_thread::get_ch_value(u32 ch)
fmt::throw_exception("Not supported: event mask 0x%x" HERE, mask1);
}
while (res = get_events(mask1), !res)
for (; !events.count; events = get_events(mask1, false, true))
{
state += cpu_flag::wait;
@ -2682,10 +2683,10 @@ s64 spu_thread::get_ch_value(u32 ch)
}
check_state();
return res;
return events.events & mask1;
}
while (res = get_events(mask1, true), !res)
for (; !events.count; events = get_events(mask1, true, true))
{
state += cpu_flag::wait;
@ -2698,7 +2699,7 @@ s64 spu_thread::get_ch_value(u32 ch)
}
check_state();
return res;
return events.events & mask1;
}
case SPU_RdMachStat:
@ -3006,7 +3007,28 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventMask:
{
ch_event_mask = value;
get_events(value);
if (ch_events.atomic_op([&](ch_events_t& events)
{
events.mask = value;
if (events.events & events.mask)
{
events.count = true;
return true;
}
return false;
}))
{
// Check interrupts in case count is 1
if (check_mfc_interrupts(pc + 4))
{
spu_runtime::g_escape(this);
}
}
return true;
}
@ -3014,7 +3036,27 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
{
// "Collect" events before final acknowledgment
get_events(value);
ch_event_stat &= ~value;
if (ch_events.atomic_op([&](ch_events_t& events)
{
events.events &= ~value;
if (events.events & events.mask)
{
events.count = true;
return true;
}
return false;
}))
{
// Check interrupts in case count is 1
if (check_mfc_interrupts(pc + 4))
{
spu_runtime::g_escape(this);
}
}
return true;
}