mirror of
https://github.com/cemu-project/Cemu.git
synced 2025-07-15 11:18:29 +12:00
Make controller button code thread-safe (#405)
* Refactor spinlock to meet Lockable requirements * Input: Refactor button code and make it thread-safe
This commit is contained in:
parent
c40466f3a8
commit
028b3f7992
28 changed files with 311 additions and 220 deletions
|
@ -24,28 +24,28 @@ class DebugSymbolStorage
|
|||
public:
|
||||
static void StoreDataType(MPTR address, DEBUG_SYMBOL_TYPE type)
|
||||
{
|
||||
s_lock.acquire();
|
||||
s_lock.lock();
|
||||
s_typeStorage[address] = type;
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
}
|
||||
|
||||
static DEBUG_SYMBOL_TYPE GetDataType(MPTR address)
|
||||
{
|
||||
s_lock.acquire();
|
||||
s_lock.lock();
|
||||
auto itr = s_typeStorage.find(address);
|
||||
if (itr == s_typeStorage.end())
|
||||
{
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
return DEBUG_SYMBOL_TYPE::UNDEFINED;
|
||||
}
|
||||
DEBUG_SYMBOL_TYPE t = itr->second;
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
return t;
|
||||
}
|
||||
|
||||
static void ClearRange(MPTR address, uint32 length)
|
||||
{
|
||||
s_lock.acquire();
|
||||
s_lock.lock();
|
||||
while (length > 0)
|
||||
{
|
||||
auto itr = s_typeStorage.find(address);
|
||||
|
@ -54,7 +54,7 @@ public:
|
|||
address += 4;
|
||||
length -= 4;
|
||||
}
|
||||
s_lock.release();
|
||||
s_lock.unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -129,7 +129,7 @@ FSpinlock sTimerSpinlock;
|
|||
// thread safe
|
||||
uint64 PPCTimer_getFromRDTSC()
|
||||
{
|
||||
sTimerSpinlock.acquire();
|
||||
sTimerSpinlock.lock();
|
||||
_mm_mfence();
|
||||
uint64 rdtscCurrentMeasure = __rdtsc();
|
||||
uint64 rdtscDif = rdtscCurrentMeasure - _rdtscLastMeasure;
|
||||
|
@ -165,6 +165,6 @@ uint64 PPCTimer_getFromRDTSC()
|
|||
|
||||
_tickSummary += elapsedTick;
|
||||
|
||||
sTimerSpinlock.release();
|
||||
sTimerSpinlock.unlock();
|
||||
return _tickSummary;
|
||||
}
|
||||
|
|
|
@ -47,20 +47,20 @@ void PPCRecompiler_visitAddressNoBlock(uint32 enterAddress)
|
|||
if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] != PPCRecompiler_leaveRecompilerCode_unvisited)
|
||||
return;
|
||||
// try to acquire lock
|
||||
if (!PPCRecompilerState.recompilerSpinlock.tryAcquire())
|
||||
if (!PPCRecompilerState.recompilerSpinlock.try_lock())
|
||||
return;
|
||||
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4];
|
||||
if (funcPtr != PPCRecompiler_leaveRecompilerCode_unvisited)
|
||||
{
|
||||
// was visited since previous check
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
return;
|
||||
}
|
||||
// add to recompilation queue and flag as visited
|
||||
PPCRecompilerState.targetQueue.emplace(enterAddress);
|
||||
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] = PPCRecompiler_leaveRecompilerCode_visited;
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
}
|
||||
|
||||
void PPCRecompiler_recompileIfUnvisited(uint32 enterAddress)
|
||||
|
@ -193,13 +193,13 @@ PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PP
|
|||
bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFunctionBoundaryTracker::PPCRange_t& range, PPCRecFunction_t* ppcRecFunc, std::vector<std::pair<MPTR, uint32>>& entryPoints)
|
||||
{
|
||||
// update jump table
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
|
||||
// check if the initial entrypoint is still flagged for recompilation
|
||||
// its possible that the range has been invalidated during the time it took to translate the function
|
||||
if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[initialEntryPoint / 4] != PPCRecompiler_leaveRecompilerCode_visited)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -221,7 +221,7 @@ bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFun
|
|||
PPCRecompilerState.invalidationRanges.clear();
|
||||
if (isInvalidated)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -249,7 +249,7 @@ bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFun
|
|||
{
|
||||
r.storedRange = rangeStore_ppcRanges.storeRange(ppcRecFunc, r.ppcAddress, r.ppcAddress + r.ppcSize);
|
||||
}
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
|
||||
|
||||
return true;
|
||||
|
@ -272,13 +272,13 @@ void PPCRecompiler_recompileAtAddress(uint32 address)
|
|||
// todo - use info from previously compiled ranges to determine full size of this function (and merge all the entryAddresses)
|
||||
|
||||
// collect all currently known entry points for this range
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
|
||||
std::set<uint32> entryAddresses;
|
||||
|
||||
entryAddresses.emplace(address);
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
|
||||
std::vector<std::pair<MPTR, uint32>> functionEntryPoints;
|
||||
auto func = PPCRecompiler_recompileFunction(range, entryAddresses, functionEntryPoints);
|
||||
|
@ -302,10 +302,10 @@ void PPCRecompiler_thread()
|
|||
// 3) if yes -> calculate size, gather all entry points, recompile and update jump table
|
||||
while (true)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
if (PPCRecompilerState.targetQueue.empty())
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
break;
|
||||
}
|
||||
auto enterAddress = PPCRecompilerState.targetQueue.front();
|
||||
|
@ -315,10 +315,10 @@ void PPCRecompiler_thread()
|
|||
if (funcPtr != PPCRecompiler_leaveRecompilerCode_visited)
|
||||
{
|
||||
// only recompile functions if marked as visited
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
continue;
|
||||
}
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
|
||||
PPCRecompiler_recompileAtAddress(enterAddress);
|
||||
}
|
||||
|
@ -376,7 +376,7 @@ struct ppcRecompilerFuncRange_t
|
|||
|
||||
bool PPCRecompiler_findFuncRanges(uint32 addr, ppcRecompilerFuncRange_t* rangesOut, size_t* countInOut)
|
||||
{
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
size_t countIn = *countInOut;
|
||||
size_t countOut = 0;
|
||||
|
||||
|
@ -392,7 +392,7 @@ bool PPCRecompiler_findFuncRanges(uint32 addr, ppcRecompilerFuncRange_t* rangesO
|
|||
countOut++;
|
||||
}
|
||||
);
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
*countInOut = countOut;
|
||||
if (countOut > countIn)
|
||||
return false;
|
||||
|
@ -420,7 +420,7 @@ void PPCRecompiler_invalidateTableRange(uint32 offset, uint32 size)
|
|||
void PPCRecompiler_deleteFunction(PPCRecFunction_t* func)
|
||||
{
|
||||
// assumes PPCRecompilerState.recompilerSpinlock is already held
|
||||
cemu_assert_debug(PPCRecompilerState.recompilerSpinlock.isHolding());
|
||||
cemu_assert_debug(PPCRecompilerState.recompilerSpinlock.is_locked());
|
||||
for (auto& r : func->list_ranges)
|
||||
{
|
||||
PPCRecompiler_invalidateTableRange(r.ppcAddress, r.ppcSize);
|
||||
|
@ -439,7 +439,7 @@ void PPCRecompiler_invalidateRange(uint32 startAddr, uint32 endAddr)
|
|||
return;
|
||||
cemu_assert_debug(endAddr >= startAddr);
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.acquire();
|
||||
PPCRecompilerState.recompilerSpinlock.lock();
|
||||
|
||||
uint32 rStart;
|
||||
uint32 rEnd;
|
||||
|
@ -458,7 +458,7 @@ void PPCRecompiler_invalidateRange(uint32 startAddr, uint32 endAddr)
|
|||
PPCRecompiler_deleteFunction(rFunc);
|
||||
}
|
||||
|
||||
PPCRecompilerState.recompilerSpinlock.release();
|
||||
PPCRecompilerState.recompilerSpinlock.unlock();
|
||||
}
|
||||
|
||||
void PPCRecompiler_init()
|
||||
|
|
|
@ -516,16 +516,16 @@ FSpinlock s_spinlockFetchShaderCache;
|
|||
|
||||
LatteFetchShader* LatteFetchShader::RegisterInCache(CacheHash fsHash)
|
||||
{
|
||||
s_spinlockFetchShaderCache.acquire();
|
||||
s_spinlockFetchShaderCache.lock();
|
||||
auto itr = s_fetchShaderByHash.find(fsHash);
|
||||
if (itr != s_fetchShaderByHash.end())
|
||||
{
|
||||
LatteFetchShader* fs = itr->second;
|
||||
s_spinlockFetchShaderCache.release();
|
||||
s_spinlockFetchShaderCache.unlock();
|
||||
return fs;
|
||||
}
|
||||
s_fetchShaderByHash.emplace(fsHash, this);
|
||||
s_spinlockFetchShaderCache.release();
|
||||
s_spinlockFetchShaderCache.unlock();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -533,11 +533,11 @@ void LatteFetchShader::UnregisterInCache()
|
|||
{
|
||||
if (!m_isRegistered)
|
||||
return;
|
||||
s_spinlockFetchShaderCache.acquire();
|
||||
s_spinlockFetchShaderCache.lock();
|
||||
auto itr = s_fetchShaderByHash.find(m_cacheHash);
|
||||
cemu_assert(itr == s_fetchShaderByHash.end());
|
||||
s_fetchShaderByHash.erase(itr);
|
||||
s_spinlockFetchShaderCache.release();
|
||||
s_spinlockFetchShaderCache.unlock();
|
||||
}
|
||||
|
||||
std::unordered_map<LatteFetchShader::CacheHash, LatteFetchShader*> LatteFetchShader::s_fetchShaderByHash;
|
||||
|
|
|
@ -1074,19 +1074,19 @@ void LatteBufferCache_notifyDCFlush(MPTR address, uint32 size)
|
|||
|
||||
uint32 firstPage = address / CACHE_PAGE_SIZE;
|
||||
uint32 lastPage = (address + size - 1) / CACHE_PAGE_SIZE;
|
||||
g_spinlockDCFlushQueue.acquire();
|
||||
g_spinlockDCFlushQueue.lock();
|
||||
for (uint32 i = firstPage; i <= lastPage; i++)
|
||||
s_DCFlushQueue->Set(i);
|
||||
g_spinlockDCFlushQueue.release();
|
||||
g_spinlockDCFlushQueue.unlock();
|
||||
}
|
||||
|
||||
void LatteBufferCache_processDCFlushQueue()
|
||||
{
|
||||
if (s_DCFlushQueue->Empty()) // quick check to avoid locking if there is no work to do
|
||||
return;
|
||||
g_spinlockDCFlushQueue.acquire();
|
||||
g_spinlockDCFlushQueue.lock();
|
||||
std::swap(s_DCFlushQueue, s_DCFlushQueueAlternate);
|
||||
g_spinlockDCFlushQueue.release();
|
||||
g_spinlockDCFlushQueue.unlock();
|
||||
s_DCFlushQueueAlternate->ForAllAndClear([](uint32 index) {LatteBufferCache_invalidatePage(index * CACHE_PAGE_SIZE); });
|
||||
}
|
||||
|
||||
|
|
|
@ -37,16 +37,16 @@ public:
|
|||
|
||||
void TrackDependency(class PipelineInfo* pipelineInfo)
|
||||
{
|
||||
s_spinlockDependency.acquire();
|
||||
s_spinlockDependency.lock();
|
||||
m_usedByPipelines.emplace_back(pipelineInfo);
|
||||
s_spinlockDependency.release();
|
||||
s_spinlockDependency.unlock();
|
||||
}
|
||||
|
||||
void RemoveDependency(class PipelineInfo* pipelineInfo)
|
||||
{
|
||||
s_spinlockDependency.acquire();
|
||||
s_spinlockDependency.lock();
|
||||
vectorRemoveByValue(m_usedByPipelines, pipelineInfo);
|
||||
s_spinlockDependency.release();
|
||||
s_spinlockDependency.unlock();
|
||||
}
|
||||
|
||||
[[nodiscard]] const VkExtent2D& GetExtend() const { return m_extend;}
|
||||
|
|
|
@ -37,16 +37,16 @@ public:
|
|||
|
||||
void TrackDependency(class PipelineInfo* p)
|
||||
{
|
||||
s_dependencyLock.acquire();
|
||||
s_dependencyLock.lock();
|
||||
list_pipelineInfo.emplace_back(p);
|
||||
s_dependencyLock.release();
|
||||
s_dependencyLock.unlock();
|
||||
}
|
||||
|
||||
void RemoveDependency(class PipelineInfo* p)
|
||||
{
|
||||
s_dependencyLock.acquire();
|
||||
s_dependencyLock.lock();
|
||||
vectorRemoveByValue(list_pipelineInfo, p);
|
||||
s_dependencyLock.release();
|
||||
s_dependencyLock.unlock();
|
||||
}
|
||||
|
||||
void PreponeCompilation(bool isRenderThread) override;
|
||||
|
|
|
@ -206,18 +206,18 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
|
|||
|
||||
// deserialize file
|
||||
LatteContextRegister* lcr = new LatteContextRegister();
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
CachedPipeline* cachedPipeline = new CachedPipeline();
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
|
||||
MemStreamReader streamReader(fileData.data(), fileData.size());
|
||||
if (!DeserializePipeline(streamReader, *cachedPipeline))
|
||||
{
|
||||
// failed to deserialize
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
delete lcr;
|
||||
delete cachedPipeline;
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
return;
|
||||
}
|
||||
// restored register view from compacted state
|
||||
|
@ -264,18 +264,18 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
|
|||
}
|
||||
auto renderPass = __CreateTemporaryRenderPass(pixelShader, *lcr);
|
||||
// create pipeline info
|
||||
m_pipelineIsCachedLock.acquire();
|
||||
m_pipelineIsCachedLock.lock();
|
||||
PipelineInfo* pipelineInfo = new PipelineInfo(0, 0, vertexShader->compatibleFetchShader, vertexShader, pixelShader, geometryShader);
|
||||
m_pipelineIsCachedLock.release();
|
||||
m_pipelineIsCachedLock.unlock();
|
||||
// compile
|
||||
{
|
||||
PipelineCompiler pp;
|
||||
if (!pp.InitFromCurrentGPUState(pipelineInfo, *lcr, renderPass))
|
||||
{
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
delete lcr;
|
||||
delete cachedPipeline;
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
return;
|
||||
}
|
||||
pp.Compile(true, true, false);
|
||||
|
@ -284,16 +284,16 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
|
|||
// on success, calculate pipeline hash and flag as present in cache
|
||||
uint64 pipelineBaseHash = vertexShader->baseHash;
|
||||
uint64 pipelineStateHash = VulkanRenderer::draw_calculateGraphicsPipelineHash(vertexShader->compatibleFetchShader, vertexShader, geometryShader, pixelShader, renderPass, *lcr);
|
||||
m_pipelineIsCachedLock.acquire();
|
||||
m_pipelineIsCachedLock.lock();
|
||||
m_pipelineIsCached.emplace(pipelineBaseHash, pipelineStateHash);
|
||||
m_pipelineIsCachedLock.release();
|
||||
m_pipelineIsCachedLock.unlock();
|
||||
// clean up
|
||||
s_spinlockSharedInternal.acquire();
|
||||
s_spinlockSharedInternal.lock();
|
||||
delete pipelineInfo;
|
||||
delete lcr;
|
||||
delete cachedPipeline;
|
||||
VulkanRenderer::GetInstance()->releaseDestructibleObject(renderPass);
|
||||
s_spinlockSharedInternal.release();
|
||||
s_spinlockSharedInternal.unlock();
|
||||
}
|
||||
|
||||
bool VulkanPipelineStableCache::HasPipelineCached(uint64 baseHash, uint64 pipelineStateHash)
|
||||
|
|
|
@ -3447,14 +3447,14 @@ void VulkanRenderer::releaseDestructibleObject(VKRDestructibleObject* destructib
|
|||
return;
|
||||
}
|
||||
// otherwise put on queue
|
||||
m_spinlockDestructionQueue.acquire();
|
||||
m_spinlockDestructionQueue.lock();
|
||||
m_destructionQueue.emplace_back(destructibleObject);
|
||||
m_spinlockDestructionQueue.release();
|
||||
m_spinlockDestructionQueue.unlock();
|
||||
}
|
||||
|
||||
void VulkanRenderer::ProcessDestructionQueue2()
|
||||
{
|
||||
m_spinlockDestructionQueue.acquire();
|
||||
m_spinlockDestructionQueue.lock();
|
||||
for (auto it = m_destructionQueue.begin(); it != m_destructionQueue.end();)
|
||||
{
|
||||
if ((*it)->canDestroy())
|
||||
|
@ -3465,7 +3465,7 @@ void VulkanRenderer::ProcessDestructionQueue2()
|
|||
}
|
||||
++it;
|
||||
}
|
||||
m_spinlockDestructionQueue.release();
|
||||
m_spinlockDestructionQueue.unlock();
|
||||
}
|
||||
|
||||
VkDescriptorSetInfo::~VkDescriptorSetInfo()
|
||||
|
@ -4010,9 +4010,9 @@ void VulkanRenderer::AppendOverlayDebugInfo()
|
|||
ImGui::Text("ImageView %u", performanceMonitor.vk.numImageViews.get());
|
||||
ImGui::Text("RenderPass %u", performanceMonitor.vk.numRenderPass.get());
|
||||
ImGui::Text("Framebuffer %u", performanceMonitor.vk.numFramebuffer.get());
|
||||
m_spinlockDestructionQueue.acquire();
|
||||
m_spinlockDestructionQueue.lock();
|
||||
ImGui::Text("DestructionQ %u", (unsigned int)m_destructionQueue.size());
|
||||
m_spinlockDestructionQueue.release();
|
||||
m_spinlockDestructionQueue.unlock();
|
||||
|
||||
|
||||
ImGui::Text("BeginRP/f %u", performanceMonitor.vk.numBeginRenderpassPerFrame.get());
|
||||
|
|
|
@ -234,38 +234,38 @@ namespace iosu
|
|||
|
||||
void _IPCInitDispatchablePool()
|
||||
{
|
||||
sIPCDispatchableCommandPoolLock.acquire();
|
||||
sIPCDispatchableCommandPoolLock.lock();
|
||||
while (!sIPCFreeDispatchableCommands.empty())
|
||||
sIPCFreeDispatchableCommands.pop();
|
||||
for (size_t i = 0; i < sIPCDispatchableCommandPool.GetCount(); i++)
|
||||
sIPCFreeDispatchableCommands.push(sIPCDispatchableCommandPool.GetPtr()+i);
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
}
|
||||
|
||||
IOSDispatchableCommand* _IPCAllocateDispatchableCommand()
|
||||
{
|
||||
sIPCDispatchableCommandPoolLock.acquire();
|
||||
sIPCDispatchableCommandPoolLock.lock();
|
||||
if (sIPCFreeDispatchableCommands.empty())
|
||||
{
|
||||
cemuLog_log(LogType::Force, "IOS: Exhausted pool of dispatchable commands");
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
return nullptr;
|
||||
}
|
||||
IOSDispatchableCommand* cmd = sIPCFreeDispatchableCommands.front();
|
||||
sIPCFreeDispatchableCommands.pop();
|
||||
cemu_assert_debug(!cmd->isAllocated);
|
||||
cmd->isAllocated = true;
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
return cmd;
|
||||
}
|
||||
|
||||
void _IPCReleaseDispatchableCommand(IOSDispatchableCommand* cmd)
|
||||
{
|
||||
sIPCDispatchableCommandPoolLock.acquire();
|
||||
sIPCDispatchableCommandPoolLock.lock();
|
||||
cemu_assert_debug(cmd->isAllocated);
|
||||
cmd->isAllocated = false;
|
||||
sIPCFreeDispatchableCommands.push(cmd);
|
||||
sIPCDispatchableCommandPoolLock.release();
|
||||
sIPCDispatchableCommandPoolLock.unlock();
|
||||
}
|
||||
|
||||
static constexpr size_t MAX_NUM_ACTIVE_DEV_HANDLES = 96; // per process
|
||||
|
|
|
@ -8,27 +8,27 @@ struct CoreinitAsyncCallback
|
|||
|
||||
static void queue(MPTR functionMPTR, uint32 numParameters, uint32 r3, uint32 r4, uint32 r5, uint32 r6, uint32 r7, uint32 r8, uint32 r9, uint32 r10)
|
||||
{
|
||||
s_asyncCallbackSpinlock.acquire();
|
||||
s_asyncCallbackSpinlock.lock();
|
||||
s_asyncCallbackQueue.emplace_back(allocateAndInitFromPool(functionMPTR, numParameters, r3, r4, r5, r6, r7, r8, r9, r10));
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
}
|
||||
|
||||
static void callNextFromQueue()
|
||||
{
|
||||
s_asyncCallbackSpinlock.acquire();
|
||||
s_asyncCallbackSpinlock.lock();
|
||||
if (s_asyncCallbackQueue.empty())
|
||||
{
|
||||
cemuLog_log(LogType::Force, "AsyncCallbackQueue is empty. Unexpected behavior");
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
return;
|
||||
}
|
||||
CoreinitAsyncCallback* cb = s_asyncCallbackQueue[0];
|
||||
s_asyncCallbackQueue.erase(s_asyncCallbackQueue.begin());
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
cb->doCall();
|
||||
s_asyncCallbackSpinlock.acquire();
|
||||
s_asyncCallbackSpinlock.lock();
|
||||
releaseToPool(cb);
|
||||
s_asyncCallbackSpinlock.release();
|
||||
s_asyncCallbackSpinlock.unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -39,7 +39,7 @@ private:
|
|||
|
||||
static CoreinitAsyncCallback* allocateAndInitFromPool(MPTR functionMPTR, uint32 numParameters, uint32 r3, uint32 r4, uint32 r5, uint32 r6, uint32 r7, uint32 r8, uint32 r9, uint32 r10)
|
||||
{
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.isHolding());
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.is_locked());
|
||||
if (s_asyncCallbackPool.empty())
|
||||
{
|
||||
CoreinitAsyncCallback* cb = new CoreinitAsyncCallback(functionMPTR, numParameters, r3, r4, r5, r6, r7, r8, r9, r10);
|
||||
|
@ -54,7 +54,7 @@ private:
|
|||
|
||||
static void releaseToPool(CoreinitAsyncCallback* cb)
|
||||
{
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.isHolding());
|
||||
cemu_assert_debug(s_asyncCallbackSpinlock.is_locked());
|
||||
s_asyncCallbackPool.emplace_back(cb);
|
||||
}
|
||||
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
// titles that utilize MP task queue: Yoshi's Woolly World, Fast Racing Neo, Tokyo Mirage Sessions, Mii Maker
|
||||
|
||||
#define AcquireMPQLock() s_workaroundSpinlock.acquire()
|
||||
#define ReleaseMPQLock() s_workaroundSpinlock.release()
|
||||
#define AcquireMPQLock() s_workaroundSpinlock.lock()
|
||||
#define ReleaseMPQLock() s_workaroundSpinlock.unlock()
|
||||
|
||||
namespace coreinit
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ namespace coreinit
|
|||
|
||||
void MPInitTask(MPTask* task, void* func, void* data, uint32 size)
|
||||
{
|
||||
s_workaroundSpinlock.acquire();
|
||||
s_workaroundSpinlock.lock();
|
||||
task->thisptr = task;
|
||||
|
||||
task->coreIndex = PPC_CORE_COUNT;
|
||||
|
@ -48,7 +48,7 @@ namespace coreinit
|
|||
|
||||
task->userdata = nullptr;
|
||||
task->runtime = 0;
|
||||
s_workaroundSpinlock.release();
|
||||
s_workaroundSpinlock.unlock();
|
||||
}
|
||||
|
||||
bool MPTermTask(MPTask* task)
|
||||
|
|
|
@ -465,12 +465,12 @@ namespace coreinit
|
|||
|
||||
void _OSFastMutex_AcquireContention(OSFastMutex* fastMutex)
|
||||
{
|
||||
g_fastMutexSpinlock.acquire();
|
||||
g_fastMutexSpinlock.lock();
|
||||
}
|
||||
|
||||
void _OSFastMutex_ReleaseContention(OSFastMutex* fastMutex)
|
||||
{
|
||||
g_fastMutexSpinlock.release();
|
||||
g_fastMutexSpinlock.unlock();
|
||||
}
|
||||
|
||||
void OSFastMutex_LockInternal(OSFastMutex* fastMutex)
|
||||
|
|
|
@ -778,7 +778,7 @@ namespace snd_core
|
|||
|
||||
void AXIst_SyncVPB(AXVPBInternal_t** lastProcessedDSPShadowCopy, AXVPBInternal_t** lastProcessedPPCShadowCopy)
|
||||
{
|
||||
__AXVoiceListSpinlock.acquire();
|
||||
__AXVoiceListSpinlock.lock();
|
||||
|
||||
AXVPBInternal_t* previousInternalDSP = nullptr;
|
||||
AXVPBInternal_t* previousInternalPPC = nullptr;
|
||||
|
@ -869,7 +869,7 @@ namespace snd_core
|
|||
else
|
||||
*lastProcessedPPCShadowCopy = nullptr;
|
||||
}
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
}
|
||||
|
||||
void AXIst_HandleFrameCallbacks()
|
||||
|
|
|
@ -393,7 +393,7 @@ namespace snd_core
|
|||
AXVPB* AXAcquireVoiceEx(uint32 priority, MPTR callbackEx, MPTR userParam)
|
||||
{
|
||||
cemu_assert(priority != AX_PRIORITY_FREE && priority < AX_PRIORITY_MAX);
|
||||
__AXVoiceListSpinlock.acquire();
|
||||
__AXVoiceListSpinlock.lock();
|
||||
AXVPB* vpb = AXVoiceList_GetFreeVoice();
|
||||
if (vpb != nullptr)
|
||||
{
|
||||
|
@ -410,7 +410,7 @@ namespace snd_core
|
|||
if (droppedVoice == nullptr)
|
||||
{
|
||||
// no voice available
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
return nullptr;
|
||||
}
|
||||
vpb->userParam = userParam;
|
||||
|
@ -418,18 +418,18 @@ namespace snd_core
|
|||
vpb->callbackEx = callbackEx;
|
||||
AXVPB_SetVoiceDefault(vpb);
|
||||
}
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
return vpb;
|
||||
}
|
||||
|
||||
void AXFreeVoice(AXVPB* vpb)
|
||||
{
|
||||
cemu_assert(vpb != nullptr);
|
||||
__AXVoiceListSpinlock.acquire();
|
||||
__AXVoiceListSpinlock.lock();
|
||||
if (vpb->priority == (uint32be)AX_PRIORITY_FREE)
|
||||
{
|
||||
forceLog_printf("AXFreeVoice() called on free voice\n");
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
return;
|
||||
}
|
||||
AXVoiceProtection_Release(vpb);
|
||||
|
@ -442,7 +442,7 @@ namespace snd_core
|
|||
vpb->callback = MPTR_NULL;
|
||||
vpb->callbackEx = MPTR_NULL;
|
||||
AXVoiceList_AddFreeVoice(vpb);
|
||||
__AXVoiceListSpinlock.release();
|
||||
__AXVoiceListSpinlock.unlock();
|
||||
}
|
||||
|
||||
void AXVPBInit()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue