mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-07-12 09:48:37 +12:00
Bugfix of sys_lwmutex_destroy
This commit is contained in:
parent
06f4dfb9f1
commit
8c3a4c260e
3 changed files with 68 additions and 73 deletions
|
@ -140,7 +140,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
|
||||||
}
|
}
|
||||||
else if (mode == 1)
|
else if (mode == 1)
|
||||||
{
|
{
|
||||||
ensure(mutex->add_waiter(result));
|
mutex->add_waiter(result);
|
||||||
result = nullptr;
|
result = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||||
{
|
{
|
||||||
ensure(!mutex->signaled);
|
ensure(!mutex->signaled);
|
||||||
std::lock_guard lock(mutex->mutex);
|
std::lock_guard lock(mutex->mutex);
|
||||||
ensure(mutex->add_waiter(cpu));
|
mutex->add_waiter(cpu);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -286,22 +286,8 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to increment lwmutex's lwcond's waiters count
|
// Increment lwmutex's lwcond's waiters count
|
||||||
if (!mutex->lwcond_waiters.fetch_op([](s32& val)
|
mutex->lwcond_waiters++;
|
||||||
{
|
|
||||||
if (val == smin)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
val++;
|
|
||||||
return true;
|
|
||||||
}).second)
|
|
||||||
{
|
|
||||||
// Failed - lwmutex was detroyed and all waiters have quit
|
|
||||||
mutex.reset();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::lock_guard lock(cond.mutex);
|
std::lock_guard lock(cond.mutex);
|
||||||
|
|
||||||
|
|
|
@ -40,56 +40,76 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
|
||||||
|
|
||||||
sys_lwmutex.warning("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
|
sys_lwmutex.warning("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
|
||||||
|
|
||||||
auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id);
|
std::shared_ptr<lv2_lwmutex> _mutex;
|
||||||
|
|
||||||
if (!mutex)
|
|
||||||
{
|
|
||||||
return CELL_ESRCH;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
if (std::scoped_lock lock(mutex->mutex); mutex->sq.empty())
|
s32 old_val = 0;
|
||||||
|
|
||||||
|
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
|
||||||
{
|
{
|
||||||
// Set "destroyed" bit
|
// Ignore check on first iteration
|
||||||
if (mutex->lwcond_waiters.fetch_or(smin) & 0x7fff'ffff)
|
if (_mutex && std::addressof(mutex) != _mutex.get())
|
||||||
|
{
|
||||||
|
// Other thread has destroyed the lwmutex earlier
|
||||||
|
return CELL_ESRCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::lock_guard lock(mutex.mutex);
|
||||||
|
|
||||||
|
if (!mutex.sq.empty())
|
||||||
|
{
|
||||||
|
return CELL_EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
old_val = mutex.lwcond_waiters.or_fetch(smin);
|
||||||
|
|
||||||
|
if (old_val != smin)
|
||||||
{
|
{
|
||||||
// Deschedule if waiters were found
|
// Deschedule if waiters were found
|
||||||
lv2_obj::sleep(ppu);
|
lv2_obj::sleep(ppu);
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return CELL_EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for all lwcond waiters to quit
|
// Repeat loop: there are lwcond waiters
|
||||||
if (const s32 old = mutex->lwcond_waiters; old != smin)
|
return CELL_EAGAIN;
|
||||||
{
|
|
||||||
if (old >= 0)
|
|
||||||
{
|
|
||||||
// Sleep queue is no longer empty
|
|
||||||
// Was set to positive value to announce it
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
thread_ctrl::wait_on(mutex->lwcond_waiters, old);
|
return {};
|
||||||
|
});
|
||||||
|
|
||||||
if (ppu.is_stopped())
|
if (!ptr)
|
||||||
|
{
|
||||||
|
return CELL_ESRCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
{
|
||||||
|
if (ret != CELL_EAGAIN)
|
||||||
{
|
{
|
||||||
return {};
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (!idm::remove_verify<lv2_obj, lv2_lwmutex>(lwmutex_id, std::move(mutex)))
|
_mutex = std::move(ptr);
|
||||||
{
|
|
||||||
// Other thread has destroyed the lwmutex earlier
|
// Wait for all lwcond waiters to quit
|
||||||
return CELL_ESRCH;
|
while (old_val + 0u > 1u << 31)
|
||||||
|
{
|
||||||
|
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
|
||||||
|
|
||||||
|
if (ppu.is_stopped())
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
old_val = _mutex->lwcond_waiters;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wake up from sleep
|
||||||
|
ppu.check_state();
|
||||||
}
|
}
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
|
@ -133,17 +153,12 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mutex.add_waiter(&ppu))
|
mutex.add_waiter(&ppu);
|
||||||
{
|
|
||||||
ppu.gpr[3] = CELL_ESRCH;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex.sleep(ppu, timeout);
|
mutex.sleep(ppu, timeout);
|
||||||
return false;
|
return false;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!mutex || ppu.gpr[3] == CELL_ESRCH)
|
if (!mutex)
|
||||||
{
|
{
|
||||||
return CELL_ESRCH;
|
return CELL_ESRCH;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,10 +71,10 @@ struct lv2_lwmutex final : lv2_obj
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to add a waiter
|
// Add a waiter
|
||||||
bool add_waiter(cpu_thread* cpu)
|
void add_waiter(cpu_thread* cpu)
|
||||||
{
|
{
|
||||||
if (const auto old = lwcond_waiters.fetch_op([](s32& val)
|
const bool notify = lwcond_waiters.fetch_op([](s32& val)
|
||||||
{
|
{
|
||||||
if (val + 0u <= 1u << 31)
|
if (val + 0u <= 1u << 31)
|
||||||
{
|
{
|
||||||
|
@ -83,24 +83,18 @@ struct lv2_lwmutex final : lv2_obj
|
||||||
}
|
}
|
||||||
|
|
||||||
// lwmutex was set to be destroyed, but there are lwcond waiters
|
// lwmutex was set to be destroyed, but there are lwcond waiters
|
||||||
// Turn off the "destroying" bit as we are adding an lwmutex waiter
|
// Turn off the "lwcond_waiters notification" bit as we are adding an lwmutex waiter
|
||||||
val &= 0x7fff'ffff;
|
val &= 0x7fff'ffff;
|
||||||
return true;
|
return true;
|
||||||
}).first; old != smin)
|
}).second;
|
||||||
|
|
||||||
|
sq.emplace_back(cpu);
|
||||||
|
|
||||||
|
if (notify)
|
||||||
{
|
{
|
||||||
sq.emplace_back(cpu);
|
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
|
||||||
|
lwcond_waiters.notify_all();
|
||||||
if (old < 0)
|
|
||||||
{
|
|
||||||
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
|
|
||||||
lwcond_waiters.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Failed - lwmutex was set to be destroyed and all lwcond waiters quit
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue