New reservations

Memory system cleanup
sys_memory_get_page_attribute
This commit is contained in:
Nekotekina 2017-02-17 22:35:57 +03:00
parent 7cdb5f3123
commit 5e3bacbd9b
26 changed files with 1536 additions and 1531 deletions

View file

@ -1,7 +1,10 @@
#include "stdafx.h"
#include "Memory.h"
#include "Emu/System.h"
#include "Utilities/mutex.h"
#include "Utilities/Thread.h"
#include "Utilities/VirtualMemory.h"
#include "Emu/CPU/CPUThread.h"
#ifdef _WIN32
#include <Windows.h>
@ -18,312 +21,173 @@
#endif
#endif
#include "wait_engine.h"
#include <mutex>
#include <atomic>
#include <deque>
namespace vm
{
thread_local u64 g_tls_fault_count{};
// Emulated virtual memory (4 GiB)
u8* const g_base_addr = static_cast<u8*>(memory_helper::reserve_memory(0x100000000));
template<std::size_t Size> struct mapped_ptr_deleter
// Memory locations
std::vector<std::shared_ptr<block_t>> g_locations;
// Reservations (lock lines) in a single memory page
using reservation_info = std::array<std::atomic<u64>, 4096 / 128>;
// Registered waiters
std::deque<vm::waiter*> g_waiters;
// Memory mutex
shared_mutex g_mutex;
// Page information
struct memory_page
{
void operator ()(void* ptr)
// Memory flags
atomic_t<u8> flags;
atomic_t<u32> waiters;
// Reservations
atomic_t<reservation_info*> reservations;
// Access reservation info
std::atomic<u64>& operator [](u32 addr)
{
#ifdef _WIN32
::UnmapViewOfFile(ptr);
#else
::munmap(ptr, Size);
#endif
auto ptr = reservations.load();
if (!ptr)
{
// Opportunistic memory allocation
ptr = new reservation_info{};
if (auto old_ptr = reservations.compare_and_swap(nullptr, ptr))
{
delete ptr;
ptr = old_ptr;
}
}
return (*ptr)[(addr & 0xfff) >> 7];
}
};
using mapped_ptr_t = std::unique_ptr<u8[], mapped_ptr_deleter<0x100000000>>;
std::array<mapped_ptr_t, 2> initialize()
template <typename T = writer_lock>
struct mem_lock
{
#ifdef _WIN32
const HANDLE memory_handle = ::CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE | SEC_RESERVE, 0x1, 0x0, NULL);
cpu_thread* thread;
T lock;
if (memory_handle == NULL)
template <typename X>
mem_lock(X&& mtx)
: thread(find_thread())
, lock(std::forward<X>(mtx))
{
MessageBoxA(0, fmt::format("CreateFileMapping() failed (0x%x).", GetLastError()).c_str(), "vm::initialize()", MB_ICONERROR);
std::abort();
}
mapped_ptr_t base_addr(static_cast<u8*>(::MapViewOfFile(memory_handle, FILE_MAP_WRITE, 0, 0, 0x100000000)));
mapped_ptr_t priv_addr(static_cast<u8*>(::MapViewOfFile(memory_handle, FILE_MAP_WRITE, 0, 0, 0x100000000)));
::CloseHandle(memory_handle);
#else
const int memory_handle = ::shm_open("/rpcs3_vm", O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
if (memory_handle == -1)
~mem_lock()
{
std::printf("shm_open('/rpcs3_vm') failed (%d).\n", errno);
std::abort();
}
if (::ftruncate(memory_handle, 0x100000000) == -1)
{
std::printf("ftruncate(memory_handle) failed (%d).\n", errno);
::shm_unlink("/rpcs3_vm");
std::abort();
}
mapped_ptr_t base_addr(static_cast<u8*>(::mmap(nullptr, 0x100000000, PROT_NONE, MAP_SHARED, memory_handle, 0)));
mapped_ptr_t priv_addr(static_cast<u8*>(::mmap(nullptr, 0x100000000, PROT_NONE, MAP_SHARED, memory_handle, 0)));
::shm_unlink("/rpcs3_vm");
#endif
std::printf("vm::g_base_addr = %p\nvm::g_priv_addr = %p\n", base_addr.get(), priv_addr.get());
return{ std::move(base_addr), std::move(priv_addr) };
}
const auto g_addr_set = vm::initialize();
u8* const g_base_addr = g_addr_set[0].get();
u8* const g_priv_addr = g_addr_set[1].get();
std::array<atomic_t<u8>, 0x100000000ull / 4096> g_pages{}; // information about every page
std::vector<std::shared_ptr<block_t>> g_locations; // memory locations
access_violation::access_violation(u64 addr, const char* cause)
: std::runtime_error(fmt::format("Access violation %s address 0x%llx", cause, addr))
{
g_tls_fault_count &= ~(1ull << 63);
}
using reservation_mutex_t = std::mutex;
thread_ctrl* volatile g_reservation_owner = nullptr;
u32 g_reservation_addr = 0;
u32 g_reservation_size = 0;
thread_local bool g_tls_did_break_reservation = false;
reservation_mutex_t g_reservation_mutex;
void _reservation_set(u32 addr, bool no_access = false)
{
#ifdef _WIN32
DWORD old;
if (!::VirtualProtect(vm::base(addr & ~0xfff), 4096, no_access ? PAGE_NOACCESS : PAGE_READONLY, &old))
#else
if (::mprotect(vm::base(addr & ~0xfff), 4096, no_access ? PROT_NONE : PROT_READ))
#endif
{
fmt::throw_exception("System failure (addr=0x%x)" HERE, addr);
}
}
bool _reservation_break(u32 addr)
{
if (g_reservation_addr >> 12 == addr >> 12)
{
#ifdef _WIN32
DWORD old;
if (!::VirtualProtect(vm::base(addr & ~0xfff), 4096, PAGE_READWRITE, &old))
#else
if (::mprotect(vm::base(addr & ~0xfff), 4096, PROT_READ | PROT_WRITE))
#endif
if (thread)
{
fmt::throw_exception("System failure (addr=0x%x)" HERE, addr);
thread->state -= cpu_flag::is_waiting;
}
}
static cpu_thread* find_thread()
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::is_waiting;
return cpu;
}
g_reservation_addr = 0;
g_reservation_size = 0;
g_reservation_owner = nullptr;
return true;
return nullptr;
}
};
return false;
// Memory pages
std::array<memory_page, 0x100000000 / 4096> g_pages{};
u64 reservation_acquire(u32 addr, u32 _size)
{
// Access reservation info: stamp and the lock bit
return g_pages[addr >> 12][addr].load(std::memory_order_acquire);
}
void reservation_break(u32 addr)
void reservation_update(u32 addr, u32 _size)
{
std::unique_lock<reservation_mutex_t> lock(g_reservation_mutex);
// Update reservation info with new timestamp (unsafe, assume allocated)
(*g_pages[addr >> 12].reservations)[(addr & 0xfff) >> 7].store(__rdtsc(), std::memory_order_release);
}
const u32 raddr = g_reservation_addr;
const u32 rsize = g_reservation_size;
void waiter::init()
{
// Register waiter
writer_lock lock(g_mutex);
if ((g_tls_did_break_reservation = _reservation_break(addr)))
g_waiters.emplace_back(this);
}
void waiter::test() const
{
if (std::memcmp(data, vm::base(addr), size) == 0)
{
lock.unlock(), vm::notify_at(raddr, rsize);
return;
}
memory_page& page = g_pages[addr >> 12];
if (page.reservations == nullptr)
{
return;
}
if (stamp >= (*page.reservations)[(addr & 0xfff) >> 7].load())
{
return;
}
if (owner)
{
owner->notify();
}
}
void reservation_acquire(void* data, u32 addr, u32 size)
waiter::~waiter()
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
const u64 align = 0x80000000ull >> cntlz32(size, true);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
if (owner)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
}
// Unregister waiter
writer_lock lock(g_mutex);
const u8 flags = g_pages[addr >> 12];
// Find waiter
const auto found = std::find(g_waiters.cbegin(), g_waiters.cend(), this);
if (!(flags & page_writable) || !(flags & page_allocated) || (flags & page_no_reservations))
{
fmt::throw_exception("Invalid page flags (addr=0x%x, size=0x%x, flags=0x%x)" HERE, addr, size, flags);
}
// break the reservation
g_tls_did_break_reservation = g_reservation_owner && _reservation_break(g_reservation_addr);
// change memory protection to read-only
_reservation_set(addr);
// may not be necessary
_mm_mfence();
// set additional information
g_reservation_addr = addr;
g_reservation_size = size;
g_reservation_owner = thread_ctrl::get_current();
// copy data
std::memcpy(data, vm::base(addr), size);
}
bool reservation_update(u32 addr, const void* data, u32 size)
{
std::unique_lock<reservation_mutex_t> lock(g_reservation_mutex);
const u64 align = 0x80000000ull >> cntlz32(size, true);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
}
if (g_reservation_owner != thread_ctrl::get_current() || g_reservation_addr != addr || g_reservation_size != size)
{
// atomic update failed
return false;
}
// change memory protection to no access
_reservation_set(addr, true);
// update memory using privileged access
std::memcpy(vm::base_priv(addr), data, size);
// free the reservation and restore memory protection
_reservation_break(addr);
// notify waiter
lock.unlock(), vm::notify_at(addr, size);
// atomic update succeeded
return true;
}
bool reservation_query(u32 addr, u32 size, bool is_writing, std::function<bool()> callback)
{
std::unique_lock<reservation_mutex_t> lock(g_reservation_mutex);
if (!check_addr(addr))
{
return false;
}
// check if current reservation and address may overlap
if (g_reservation_addr >> 12 == addr >> 12 && is_writing)
{
const bool result = callback();
if (result && size && addr + size - 1 >= g_reservation_addr && g_reservation_addr + g_reservation_size - 1 >= addr)
if (found != g_waiters.cend())
{
const u32 raddr = g_reservation_addr;
const u32 rsize = g_reservation_size;
// break the reservation if overlap
if ((g_tls_did_break_reservation = _reservation_break(addr)))
{
lock.unlock(), vm::notify_at(raddr, rsize);
}
}
return result;
}
return true;
}
bool reservation_test(thread_ctrl* current)
{
const auto owner = g_reservation_owner;
return owner && owner == current;
}
void reservation_free()
{
auto thread = thread_ctrl::get_current();
if (reservation_test(thread))
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
if (g_reservation_owner && g_reservation_owner == thread)
{
g_tls_did_break_reservation = _reservation_break(g_reservation_addr);
g_waiters.erase(found);
}
}
}
void reservation_op(u32 addr, u32 size, std::function<void()> proc)
void notify(u32 addr, u32 size)
{
std::unique_lock<reservation_mutex_t> lock(g_reservation_mutex);
const u64 align = 0x80000000ull >> cntlz32(size, true);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
for (const waiter* ptr : g_waiters)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
}
g_tls_did_break_reservation = false;
// check and possibly break previous reservation
if (g_reservation_owner != thread_ctrl::get_current() || g_reservation_addr != addr || g_reservation_size != size)
{
if (g_reservation_owner)
if (ptr->addr / 128 == addr / 128)
{
_reservation_break(g_reservation_addr);
ptr->test();
}
g_tls_did_break_reservation = true;
}
}
// change memory protection to no access
_reservation_set(addr, true);
// set additional information
g_reservation_addr = addr;
g_reservation_size = size;
g_reservation_owner = thread_ctrl::get_current();
// may not be necessary
_mm_mfence();
// do the operation
proc();
// remove the reservation
_reservation_break(addr);
// notify waiter
lock.unlock(), vm::notify_at(addr, size);
void notify_all()
{
for (const waiter* ptr : g_waiters)
{
ptr->test();
}
}
void _page_map(u32 addr, u32 size, u8 flags)
@ -335,21 +199,20 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i])
if (g_pages[i].flags)
{
fmt::throw_exception("Memory already mapped (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)" HERE, addr, size, flags, i * 4096);
}
}
void* real_addr = vm::base(addr);
void* priv_addr = vm::base_priv(addr);
#ifdef _WIN32
auto protection = flags & page_writable ? PAGE_READWRITE : (flags & page_readable ? PAGE_READONLY : PAGE_NOACCESS);
if (!::VirtualAlloc(priv_addr, size, MEM_COMMIT, PAGE_READWRITE) || !::VirtualAlloc(real_addr, size, MEM_COMMIT, protection))
if (!::VirtualAlloc(real_addr, size, MEM_COMMIT, protection))
#else
auto protection = flags & page_writable ? PROT_WRITE | PROT_READ : (flags & page_readable ? PROT_READ : PROT_NONE);
if (::mprotect(priv_addr, size, PROT_READ | PROT_WRITE) || ::mprotect(real_addr, size, protection))
if (::mprotect(real_addr, size, protection))
#endif
{
fmt::throw_exception("System failure (addr=0x%x, size=0x%x, flags=0x%x)" HERE, addr, size, flags);
@ -357,65 +220,75 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i].exchange(flags | page_allocated))
if (g_pages[i].flags.exchange(flags | page_allocated))
{
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)" HERE, addr, size, flags, i * 4096);
}
}
std::memset(priv_addr, 0, size); // ???
}
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
if (!size || (size | addr) % 4096)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
}
const u8 flags_inv = flags_set & flags_clear;
const u8 flags_both = flags_set & flags_clear;
flags_test |= page_allocated;
flags_test |= page_allocated;
flags_set &= ~flags_both;
flags_clear &= ~flags_both;
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if ((g_pages[i] & flags_test) != (flags_test | page_allocated))
if ((g_pages[i].flags & flags_test) != (flags_test | page_allocated))
{
return false;
}
}
if (!flags_inv && !flags_set && !flags_clear)
if (!flags_set && !flags_clear)
{
return true;
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
u8 start_value = 0xff;
for (u32 start = addr / 4096, end = start + size / 4096, i = start; i < end + 1; i++)
{
_reservation_break(i * 4096);
u8 new_val = 0xff;
const u8 f1 = g_pages[i].fetch_or(flags_set & ~flags_inv) & (page_writable | page_readable);
g_pages[i].fetch_and(~(flags_clear & ~flags_inv));
const u8 f2 = (g_pages[i] ^= flags_inv) & (page_writable | page_readable);
if (f1 != f2)
if (i < end)
{
void* real_addr = vm::base(i * 4096);
g_pages[i].flags |= flags_set;
g_pages[i].flags &= ~flags_clear;
#ifdef _WIN32
DWORD old;
new_val = g_pages[i].flags & (page_readable | page_writable);
}
auto protection = f2 & page_writable ? PAGE_READWRITE : (f2 & page_readable ? PAGE_READONLY : PAGE_NOACCESS);
if (!::VirtualProtect(real_addr, 4096, protection, &old))
#else
auto protection = f2 & page_writable ? PROT_WRITE | PROT_READ : (f2 & page_readable ? PROT_READ : PROT_NONE);
if (::mprotect(real_addr, 4096, protection))
#endif
if (new_val != start_value)
{
if (u32 page_size = (i - start) * 4096)
{
fmt::throw_exception("System failure (addr=0x%x, size=0x%x, flags_test=0x%x, flags_set=0x%x, flags_clear=0x%x)" HERE, addr, size, flags_test, flags_set, flags_clear);
#ifdef _WIN32
DWORD old;
auto protection = start_value & page_writable ? PAGE_READWRITE : (start_value & page_readable ? PAGE_READONLY : PAGE_NOACCESS);
if (!::VirtualProtect(vm::base(start * 4096), page_size, protection, &old))
#else
auto protection = start_value & page_writable ? PROT_WRITE | PROT_READ : (start_value & page_readable ? PROT_READ : PROT_NONE);
if (::mprotect(vm::base(start * 4096), page_size, protection))
#endif
{
fmt::throw_exception("System failure (addr=0x%x, size=0x%x, flags_test=0x%x, flags_set=0x%x, flags_clear=0x%x)" HERE, addr, size, flags_test, flags_set, flags_clear);
}
}
start_value = new_val;
start = i;
}
}
@ -431,7 +304,7 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if ((g_pages[i] & page_allocated) == 0)
if ((g_pages[i].flags & page_allocated) == 0)
{
fmt::throw_exception("Memory not mapped (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
}
@ -439,39 +312,29 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
_reservation_break(i * 4096);
if (!(g_pages[i].exchange(0) & page_allocated))
if (!(g_pages[i].flags.exchange(0) & page_allocated))
{
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
}
}
void* real_addr = vm::base(addr);
void* priv_addr = vm::base_priv(addr);
#ifdef _WIN32
DWORD old;
if (!::VirtualProtect(real_addr, size, PAGE_NOACCESS, &old) || !::VirtualProtect(priv_addr, size, PAGE_NOACCESS, &old))
if (!::VirtualFree(real_addr, size, MEM_DECOMMIT))
#else
if (::mprotect(real_addr, size, PROT_NONE) || ::mprotect(priv_addr, size, PROT_NONE))
if (::madvise(real_addr, size, MADV_REMOVE) || ::mprotect(real_addr, size, PROT_NONE))
#endif
{
fmt::throw_exception("System failure (addr=0x%x, size=0x%x)" HERE, addr, size);
}
}
bool check_addr(u32 addr, u32 size)
bool check_addr(u32 addr, u32 size, u8 flags)
{
if (addr + (size - 1) < addr)
{
return false;
}
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
if ((g_pages[i] & page_allocated) == 0)
if (UNLIKELY((g_pages[i % g_pages.size()].flags & flags) != flags))
{
return false;
}
@ -533,19 +396,19 @@ namespace vm
}
}
bool block_t::try_alloc(u32 addr, u32 size, u32 sup)
bool block_t::try_alloc(u32 addr, u32 size, u8 flags, u32 sup)
{
// Check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
if (g_pages[i])
if (g_pages[i].flags)
{
return false;
}
}
// Map "real" memory pages
_page_map(addr, size, page_readable | page_writable);
_page_map(addr, size, flags);
// Add entry
m_map[addr] = size;
@ -565,7 +428,7 @@ namespace vm
block_t::~block_t()
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
// Deallocate all memory
for (auto& entry : m_map)
@ -576,7 +439,7 @@ namespace vm
u32 block_t::alloc(u32 size, u32 align, u32 sup)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
// Align to minimal page size
size = ::align(size, 4096);
@ -593,10 +456,21 @@ namespace vm
return 0;
}
u8 pflags = page_readable | page_writable;
if (align >= 0x100000)
{
pflags |= page_1m_size;
}
else if (align >= 0x10000)
{
pflags |= page_64k_size;
}
// Search for an appropriate place (unoptimized)
for (u32 addr = ::align(this->addr, align); addr < this->addr + this->size - 1; addr += align)
{
if (try_alloc(addr, size, sup))
if (try_alloc(addr, size, pflags, sup))
{
return addr;
}
@ -607,7 +481,7 @@ namespace vm
u32 block_t::falloc(u32 addr, u32 size, u32 sup)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
// align to minimal page size
size = ::align(size, 4096);
@ -618,7 +492,7 @@ namespace vm
return 0;
}
if (!try_alloc(addr, size, sup))
if (!try_alloc(addr, size, page_readable | page_writable, sup))
{
return 0;
}
@ -628,7 +502,7 @@ namespace vm
u32 block_t::dealloc(u32 addr, u32* sup_out)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
const auto found = m_map.find(addr);
@ -656,7 +530,7 @@ namespace vm
u32 block_t::used()
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<reader_lock> lock(g_mutex);
u32 result = 0;
@ -670,7 +544,7 @@ namespace vm
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
if (!size || (size | addr) % 4096)
{
@ -692,7 +566,7 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i])
if (g_pages[i].flags)
{
fmt::throw_exception("Unexpected pages allocated (current_addr=0x%x)" HERE, i * 4096);
}
@ -707,7 +581,7 @@ namespace vm
std::shared_ptr<block_t> unmap(u32 addr, bool must_be_empty)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<writer_lock> lock(g_mutex);
for (auto it = g_locations.begin(); it != g_locations.end(); it++)
{
@ -729,7 +603,7 @@ namespace vm
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
mem_lock<reader_lock> lock(g_mutex);
if (location != any)
{
@ -754,8 +628,6 @@ namespace vm
return nullptr;
}
extern void start();
namespace ps3
{
void init()
@ -768,8 +640,6 @@ namespace vm
std::make_shared<block_t>(0xD0000000, 0x10000000), // stack
std::make_shared<block_t>(0xE0000000, 0x20000000), // SPU reserved
};
vm::start();
}
}
@ -784,8 +654,6 @@ namespace vm
std::make_shared<block_t>(0xC0000000, 0x10000000), // video (arbitrarily)
std::make_shared<block_t>(0xD0000000, 0x10000000), // stack (arbitrarily)
};
vm::start();
}
}
@ -803,19 +671,14 @@ namespace vm
std::make_shared<block_t>(0x00010000, 0x00004000), // scratchpad
std::make_shared<block_t>(0x88000000, 0x00800000), // kernel
};
vm::start();
}
}
void close()
{
g_locations.clear();
}
[[noreturn]] void throw_access_violation(u64 addr, const char* cause)
{
throw access_violation(addr, cause);
memory_helper::free_reserved_memory(g_base_addr, 0x100000000);
}
}
@ -844,30 +707,29 @@ void fmt_class_string<vm::_ptr_base<const char>>::format(std::string& out, u64 a
const auto start = out.size();
try
{
out += u8"";
out += u8"";
for (vm::_ptr_base<const volatile char> ptr = vm::cast(arg);; ptr++)
for (vm::_ptr_base<const volatile char> ptr = vm::cast(arg);; ptr++)
{
if (!vm::check_addr(ptr.addr()))
{
if (const char ch = *ptr)
{
out += ch;
}
else
{
break;
}
// TODO: optimize checks
out.resize(start);
out += u8"«INVALID_ADDRESS:";
fmt_class_string<u32>::format(out, arg);
out += u8"»";
return;
}
out += u8"";
}
catch (const vm::access_violation&)
{
// Recover from invalid memory access
out.resize(start);
out += u8"«INVALID_ADDRESS:";
fmt_class_string<u32>::format(out, arg);
out += u8"»";
if (const char ch = *ptr)
{
out += ch;
}
else
{
break;
}
}
out += u8"";
}