Memory mirror support

Implemented utils::memory_release (not used)
Implemented utils::shm class (handler for shared memory)
Improved sys_mmapper syscalls
Rewritten ppu_patch function
Implemented vm::get_super_ptr (ignores memory protection)
Minimal allocation alignment increased to 0x10000
This commit is contained in:
Nekotekina 2018-05-07 21:57:06 +03:00
parent fe4c3c4d84
commit 5d15d64ec8
14 changed files with 541 additions and 232 deletions

View file

@ -7,7 +7,6 @@
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/lv2/sys_memory.h"
#include "Emu/RSX/GSRender.h"
#include <atomic>
#include <deque>
@ -299,8 +298,10 @@ namespace vm
}
}
void _page_map(u32 addr, u32 size, u8 flags)
static void _page_map(u32 addr, u8 flags, utils::shm& shm)
{
const u32 size = shm.size();
if (!size || (size | addr) % 4096 || flags & page_allocated)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
@ -314,7 +315,10 @@ namespace vm
}
}
utils::memory_commit(g_base_addr + addr, size);
if (shm.map_critical(g_base_addr + addr) != g_base_addr + addr)
{
fmt::throw_exception("Memory mapping failed - blame Windows (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
}
if (flags & page_executable)
{
@ -393,19 +397,28 @@ namespace vm
return true;
}
void _page_unmap(u32 addr, u32 size)
static void _page_unmap(u32 addr, utils::shm& shm)
{
const u32 size = shm.size();
if (!size || (size | addr) % 4096)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
}
bool is_exec = false;
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if ((g_pages[i].flags & page_allocated) == 0)
{
fmt::throw_exception("Memory not mapped (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
}
if (g_pages[i].flags & page_executable)
{
is_exec = true;
}
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
@ -416,8 +429,12 @@ namespace vm
}
}
utils::memory_decommit(g_base_addr + addr, size);
utils::memory_decommit(g_exec_addr + addr, size);
shm.unmap_critical(g_base_addr + addr);
if (is_exec)
{
utils::memory_decommit(g_exec_addr + addr, size);
}
if (g_cfg.core.ppu_debug)
{
@ -438,7 +455,7 @@ namespace vm
return true;
}
u32 alloc(u32 size, memory_location_t location, u32 align, u32 sup)
u32 alloc(u32 size, memory_location_t location, u32 align)
{
const auto block = get(location);
@ -447,10 +464,10 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u)" HERE, (uint)location);
}
return block->alloc(size, align, nullptr, sup);
return block->alloc(size, align);
}
u32 falloc(u32 addr, u32 size, memory_location_t location, u32 sup)
u32 falloc(u32 addr, u32 size, memory_location_t location)
{
const auto block = get(location, addr);
@ -459,10 +476,10 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, (uint)location, addr);
}
return block->falloc(addr, size, nullptr, sup);
return block->falloc(addr, size);
}
u32 dealloc(u32 addr, memory_location_t location, u32* sup_out)
u32 dealloc(u32 addr, memory_location_t location)
{
const auto block = get(location, addr);
@ -471,7 +488,7 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, (uint)location, addr);
}
return block->dealloc(addr, nullptr, sup_out);
return block->dealloc(addr);
}
void dealloc_verbose_nothrow(u32 addr, memory_location_t location) noexcept
@ -491,8 +508,10 @@ namespace vm
}
}
bool block_t::try_alloc(u32 addr, u32 size, u8 flags, u32 sup)
bool block_t::try_alloc(u32 addr, u8 flags, std::shared_ptr<utils::shm>&& shm)
{
const u32 size = shm->size();
// Check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
@ -503,13 +522,10 @@ namespace vm
}
// Map "real" memory pages
_page_map(addr, size, flags);
_page_map(addr, flags, *shm);
// Add entry
m_map[addr] = size;
// Add supplementary info if necessary
if (sup) m_sup[addr] = sup;
m_map[addr] = std::move(shm);
return true;
}
@ -528,24 +544,32 @@ namespace vm
block_t::~block_t()
{
vm::writer_lock lock(0);
// Deallocate all memory
for (auto& entry : m_map)
{
_page_unmap(entry.first, entry.second);
vm::writer_lock lock(0);
// Deallocate all memory
for (auto& entry : m_map)
{
_page_unmap(entry.first, *entry.second);
}
}
// Notify rsx to invalidate range (TODO)
if (const auto rsxthr = fxm::check_unlocked<GSRender>())
{
rsxthr->on_notify_memory_unmapped(addr, size);
}
}
u32 block_t::alloc(const u32 orig_size, u32 align, const uchar* data, u32 sup)
u32 block_t::alloc(const u32 orig_size, u32 align, const std::shared_ptr<utils::shm>* src)
{
vm::writer_lock lock(0);
// Align to minimal page size
const u32 size = ::align(orig_size, 4096);
const u32 size = ::align(orig_size, 0x10000);
// Check alignment (it's page allocation, so passing small values there is just silly)
if (align < 4096 || align != (0x80000000u >> cntlz32(align, true)))
if (align < 0x10000 || align != (0x80000000u >> cntlz32(align, true)))
{
fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)" HERE, size, align);
}
@ -567,16 +591,14 @@ namespace vm
pflags |= page_64k_size;
}
// Create or import shared memory object
std::shared_ptr<utils::shm> shm = src ? std::shared_ptr<utils::shm>(*src) : std::make_shared<utils::shm>(size);
// Search for an appropriate place (unoptimized)
for (u32 addr = ::align(this->addr, align); addr < this->addr + this->size - 1; addr += align)
{
if (try_alloc(addr, size, pflags, sup))
if (try_alloc(addr, pflags, std::move(shm)))
{
if (data)
{
std::memcpy(vm::base(addr), data, orig_size);
}
return addr;
}
}
@ -584,12 +606,12 @@ namespace vm
return 0;
}
u32 block_t::falloc(u32 addr, const u32 orig_size, const uchar* data, u32 sup)
u32 block_t::falloc(u32 addr, const u32 orig_size, const std::shared_ptr<utils::shm>* src)
{
vm::writer_lock lock(0);
// align to minimal page size
const u32 size = ::align(orig_size, 4096);
const u32 size = ::align(orig_size, 0x10000);
// return if addr or size is invalid
if (!size || size > this->size || addr < this->addr || addr + size - 1 > this->addr + this->size - 1)
@ -608,54 +630,81 @@ namespace vm
pflags |= page_64k_size;
}
if (!try_alloc(addr, size, pflags, sup))
if (!try_alloc(addr, pflags, src ? std::shared_ptr<utils::shm>(*src) : std::make_shared<utils::shm>(size)))
{
return 0;
}
if (data)
{
std::memcpy(vm::base(addr), data, orig_size);
}
return addr;
}
u32 block_t::dealloc(u32 addr, uchar* data_out, u32* sup_out)
u32 block_t::dealloc(u32 addr, const std::shared_ptr<utils::shm>* src)
{
vm::writer_lock lock(0);
const auto found = m_map.find(addr);
if (found != m_map.end())
u32 result = 0;
{
const u32 size = found->second;
const auto rsxthr = fxm::get<GSRender>();
vm::writer_lock lock(0);
const auto found = m_map.find(addr);
if (found == m_map.end())
{
return 0;
}
if (src && found->second.get() != src->get())
{
return 0;
}
result = found->second->size();
// Unmap "real" memory pages
_page_unmap(addr, *found->second);
// Remove entry
m_map.erase(found);
if (data_out)
{
std::memcpy(data_out, vm::base(addr), size);
}
// Unmap "real" memory pages
_page_unmap(addr, size);
// Notify rsx to invalidate range
if (rsxthr != nullptr) rsxthr->on_notify_memory_unmapped(addr, size);
// Write supplementary info if necessary
if (sup_out) *sup_out = m_sup[addr];
// Remove supplementary info
m_sup.erase(addr);
return size;
}
return 0;
// Notify rsx to invalidate range (TODO)
if (const auto rsxthr = fxm::check_unlocked<GSRender>())
{
rsxthr->on_notify_memory_unmapped(addr, result);
}
return result;
}
std::pair<const u32, std::shared_ptr<utils::shm>> block_t::get(u32 addr, u32 size)
{
if (addr < this->addr || std::max<u32>(size, addr - this->addr + size) >= this->size)
{
return {addr, nullptr};
}
vm::reader_lock lock;
const auto upper = m_map.upper_bound(addr);
if (upper == m_map.begin())
{
return {addr, nullptr};
}
const auto found = std::prev(upper);
// Exact address condition (size == 0)
if (size == 0 && found->first != addr)
{
return {addr, nullptr};
}
// Range check
if (std::max<u32>(size, addr - found->first + size) > found->second->size())
{
return {addr, nullptr};
}
return *found;
}
u32 block_t::imp_used(const vm::writer_lock&)
@ -664,7 +713,7 @@ namespace vm
for (auto& entry : m_map)
{
result += entry.second;
result += entry.second->size();
}
return result;