diff --git a/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp b/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp index 805d5db0ad..d3c67f62bd 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp @@ -36,12 +36,8 @@ lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv , key(key) , pshared(pshared) , ct(ct) - , shm(std::make_shared(size, 1 /* shareable flag */)) + , shm(null_ptr) { -#ifndef _WIN32 - // Optimization that's useless on Windows :puke: - utils::memory_lock(shm->map_self(), size); -#endif } lv2_memory::lv2_memory(utils::serial& ar) @@ -51,23 +47,17 @@ lv2_memory::lv2_memory(utils::serial& ar) , key(ar) , pshared(ar) , ct(lv2_memory_container::search(ar.pop())) - , shm([&](u32 addr) + , shm([&](u32 addr) -> shared_ptr> { if (addr) { - return ensure(vm::get(vm::any, addr)->peek(addr).second); + return make_single_value(ensure(vm::get(vm::any, addr)->peek(addr).second)); } - const auto _shm = std::make_shared(size, 1); - ar(std::span(_shm->map_self(), size)); - return _shm; + return null_ptr; }(ar.pop())) , counter(ar) { -#ifndef _WIN32 - // Optimization that's useless on Windows :puke: - utils::memory_lock(shm->map_self(), size); -#endif } CellError lv2_memory::on_id_create() @@ -96,13 +86,7 @@ void lv2_memory::save(utils::serial& ar) USING_SERIALIZATION_VERSION(lv2_memory); ar(size, align, flags, key, pshared, ct->id); - ar(counter ? vm::get_shm_addr(shm) : 0); - - if (!counter) - { - ar(std::span(shm->map_self(), size)); - } - + ar(counter ? vm::get_shm_addr(*shm.load()) : 0); ar(counter); } @@ -645,6 +629,18 @@ error_code sys_mmapper_map_shared_memory(ppu_thread& ppu, u32 addr, u32 mem_id, return CELL_EALIGN; } + while (!mem.shm) + { + // Insert atomically the memory handle (laziliy allocated) + auto shm = make_single_value(std::make_shared(mem.size, 1 /* shareable flag */)); + stx::shared_ptr> null; + + if (mem.shm.compare_exchange(null, std::move(shm))) + { + break; + } + } + mem.counter++; return {}; }); @@ -659,7 +655,9 @@ error_code sys_mmapper_map_shared_memory(ppu_thread& ppu, u32 addr, u32 mem_id, return mem.ret; } - if (!area->falloc(addr, mem->size, &mem->shm, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M)) + auto shm_ptr = *mem->shm.load(); + + if (!area->falloc(addr, mem->size, &shm_ptr, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M)) { mem->counter--; @@ -697,6 +695,18 @@ error_code sys_mmapper_search_and_map(ppu_thread& ppu, u32 start_addr, u32 mem_i return CELL_EALIGN; } + while (!mem.shm) + { + // Insert atomically the memory handle (laziliy allocated) + auto shm = make_single_value(std::make_shared(mem.size, 1 /* shareable flag */)); + stx::shared_ptr> null; + + if (mem.shm.compare_exchange(null, std::move(shm))) + { + break; + } + } + mem.counter++; return {}; }); @@ -711,7 +721,9 @@ error_code sys_mmapper_search_and_map(ppu_thread& ppu, u32 start_addr, u32 mem_i return mem.ret; } - const u32 addr = area->alloc(mem->size, &mem->shm, mem->align, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M); + auto shm_ptr = *mem->shm.load(); + + const u32 addr = area->alloc(mem->size, &shm_ptr, mem->align, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M); if (!addr) { @@ -755,7 +767,7 @@ error_code sys_mmapper_unmap_shared_memory(ppu_thread& ppu, u32 addr, vm::ptr([&](u32 id, lv2_memory& mem) -> u32 { - if (mem.shm.get() == shm.second.get()) + if (mem.shm.load()->get() == shm.second.get()) { return id; } diff --git a/rpcs3/Emu/Cell/lv2/sys_mmapper.h b/rpcs3/Emu/Cell/lv2/sys_mmapper.h index 544ff91ee8..aec727840f 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mmapper.h +++ b/rpcs3/Emu/Cell/lv2/sys_mmapper.h @@ -5,6 +5,8 @@ #include "Emu/Memory/vm_ptr.h" #include "Emu/Cell/ErrorCodes.h" +#include "util/shared_ptr.hpp" + #include struct lv2_memory_container; @@ -24,7 +26,7 @@ struct lv2_memory : lv2_obj const u64 key; // IPC key const bool pshared; // Process shared flag lv2_memory_container* const ct; // Associated memory container - const std::shared_ptr shm; + atomic_ptr> shm; atomic_t counter{0};