mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-07-12 09:48:37 +12:00
Bugfix of sys_lwmutex_destroy
This commit is contained in:
parent
06f4dfb9f1
commit
8c3a4c260e
3 changed files with 68 additions and 73 deletions
|
@ -140,7 +140,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
|
|||
}
|
||||
else if (mode == 1)
|
||||
{
|
||||
ensure(mutex->add_waiter(result));
|
||||
mutex->add_waiter(result);
|
||||
result = nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||
{
|
||||
ensure(!mutex->signaled);
|
||||
std::lock_guard lock(mutex->mutex);
|
||||
ensure(mutex->add_waiter(cpu));
|
||||
mutex->add_waiter(cpu);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -286,22 +286,8 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||
return;
|
||||
}
|
||||
|
||||
// Try to increment lwmutex's lwcond's waiters count
|
||||
if (!mutex->lwcond_waiters.fetch_op([](s32& val)
|
||||
{
|
||||
if (val == smin)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
val++;
|
||||
return true;
|
||||
}).second)
|
||||
{
|
||||
// Failed - lwmutex was detroyed and all waiters have quit
|
||||
mutex.reset();
|
||||
return;
|
||||
}
|
||||
// Increment lwmutex's lwcond's waiters count
|
||||
mutex->lwcond_waiters++;
|
||||
|
||||
std::lock_guard lock(cond.mutex);
|
||||
|
||||
|
|
|
@ -40,56 +40,76 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
|
|||
|
||||
sys_lwmutex.warning("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
|
||||
|
||||
auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
std::shared_ptr<lv2_lwmutex> _mutex;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (std::scoped_lock lock(mutex->mutex); mutex->sq.empty())
|
||||
s32 old_val = 0;
|
||||
|
||||
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
|
||||
{
|
||||
// Set "destroyed" bit
|
||||
if (mutex->lwcond_waiters.fetch_or(smin) & 0x7fff'ffff)
|
||||
// Ignore check on first iteration
|
||||
if (_mutex && std::addressof(mutex) != _mutex.get())
|
||||
{
|
||||
// Other thread has destroyed the lwmutex earlier
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex.mutex);
|
||||
|
||||
if (!mutex.sq.empty())
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
old_val = mutex.lwcond_waiters.or_fetch(smin);
|
||||
|
||||
if (old_val != smin)
|
||||
{
|
||||
// Deschedule if waiters were found
|
||||
lv2_obj::sleep(ppu);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
// Wait for all lwcond waiters to quit
|
||||
if (const s32 old = mutex->lwcond_waiters; old != smin)
|
||||
{
|
||||
if (old >= 0)
|
||||
{
|
||||
// Sleep queue is no longer empty
|
||||
// Was set to positive value to announce it
|
||||
continue;
|
||||
// Repeat loop: there are lwcond waiters
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
||||
thread_ctrl::wait_on(mutex->lwcond_waiters, old);
|
||||
return {};
|
||||
});
|
||||
|
||||
if (ppu.is_stopped())
|
||||
if (!ptr)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
{
|
||||
if (ret != CELL_EAGAIN)
|
||||
{
|
||||
return {};
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!idm::remove_verify<lv2_obj, lv2_lwmutex>(lwmutex_id, std::move(mutex)))
|
||||
{
|
||||
// Other thread has destroyed the lwmutex earlier
|
||||
return CELL_ESRCH;
|
||||
_mutex = std::move(ptr);
|
||||
|
||||
// Wait for all lwcond waiters to quit
|
||||
while (old_val + 0u > 1u << 31)
|
||||
{
|
||||
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
|
||||
|
||||
if (ppu.is_stopped())
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
old_val = _mutex->lwcond_waiters;
|
||||
}
|
||||
|
||||
// Wake up from sleep
|
||||
ppu.check_state();
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -133,17 +153,12 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
|||
return true;
|
||||
}
|
||||
|
||||
if (!mutex.add_waiter(&ppu))
|
||||
{
|
||||
ppu.gpr[3] = CELL_ESRCH;
|
||||
return true;
|
||||
}
|
||||
|
||||
mutex.add_waiter(&ppu);
|
||||
mutex.sleep(ppu, timeout);
|
||||
return false;
|
||||
});
|
||||
|
||||
if (!mutex || ppu.gpr[3] == CELL_ESRCH)
|
||||
if (!mutex)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
|
|
@ -71,10 +71,10 @@ struct lv2_lwmutex final : lv2_obj
|
|||
{
|
||||
}
|
||||
|
||||
// Try to add a waiter
|
||||
bool add_waiter(cpu_thread* cpu)
|
||||
// Add a waiter
|
||||
void add_waiter(cpu_thread* cpu)
|
||||
{
|
||||
if (const auto old = lwcond_waiters.fetch_op([](s32& val)
|
||||
const bool notify = lwcond_waiters.fetch_op([](s32& val)
|
||||
{
|
||||
if (val + 0u <= 1u << 31)
|
||||
{
|
||||
|
@ -83,24 +83,18 @@ struct lv2_lwmutex final : lv2_obj
|
|||
}
|
||||
|
||||
// lwmutex was set to be destroyed, but there are lwcond waiters
|
||||
// Turn off the "destroying" bit as we are adding an lwmutex waiter
|
||||
// Turn off the "lwcond_waiters notification" bit as we are adding an lwmutex waiter
|
||||
val &= 0x7fff'ffff;
|
||||
return true;
|
||||
}).first; old != smin)
|
||||
}).second;
|
||||
|
||||
sq.emplace_back(cpu);
|
||||
|
||||
if (notify)
|
||||
{
|
||||
sq.emplace_back(cpu);
|
||||
|
||||
if (old < 0)
|
||||
{
|
||||
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
|
||||
lwcond_waiters.notify_all();
|
||||
}
|
||||
|
||||
return true;
|
||||
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
|
||||
lwcond_waiters.notify_all();
|
||||
}
|
||||
|
||||
// Failed - lwmutex was set to be destroyed and all lwcond waiters quit
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue