mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-07-07 15:31:26 +12:00
Silly bugs fixed
This commit is contained in:
parent
48c75105e2
commit
b8a27d8a4c
13 changed files with 105 additions and 104 deletions
|
@ -137,23 +137,23 @@ union spu_channel_t
|
||||||
u32 value;
|
u32 value;
|
||||||
};
|
};
|
||||||
|
|
||||||
atomic_t<sync_var_t> sync_var; // atomic variable
|
atomic_t<sync_var_t> sync_var;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
bool try_push(u32 value)
|
bool try_push(u32 value)
|
||||||
{
|
{
|
||||||
bool out_result;
|
return sync_var.atomic_op([=](sync_var_t& data) -> bool
|
||||||
|
|
||||||
sync_var.atomic_op([&out_result, value](sync_var_t& data)
|
|
||||||
{
|
{
|
||||||
if ((out_result = data.count == 0))
|
if (data.count == 0)
|
||||||
{
|
{
|
||||||
data.count = 1;
|
data.count = 1;
|
||||||
data.value = value;
|
data.value = value;
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return out_result;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void push_bit_or(u32 value)
|
void push_bit_or(u32 value)
|
||||||
|
@ -168,33 +168,31 @@ public:
|
||||||
|
|
||||||
bool try_pop(u32& out_value)
|
bool try_pop(u32& out_value)
|
||||||
{
|
{
|
||||||
bool out_result;
|
return sync_var.atomic_op([&](sync_var_t& data) -> bool
|
||||||
|
|
||||||
sync_var.atomic_op([&out_result, &out_value](sync_var_t& data)
|
|
||||||
{
|
{
|
||||||
if ((out_result = data.count != 0))
|
if (data.count != 0)
|
||||||
{
|
{
|
||||||
out_value = data.value;
|
out_value = data.value;
|
||||||
|
|
||||||
data.count = 0;
|
data.count = 0;
|
||||||
data.value = 0;
|
data.value = 0;
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return out_result;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 pop_uncond()
|
u32 pop_uncond()
|
||||||
{
|
{
|
||||||
u32 out_value;
|
return sync_var.atomic_op([](sync_var_t& data) -> u32
|
||||||
|
|
||||||
sync_var.atomic_op([&out_value](sync_var_t& data)
|
|
||||||
{
|
{
|
||||||
out_value = data.value;
|
|
||||||
data.count = 0;
|
data.count = 0;
|
||||||
// value is not cleared and may be read again
|
|
||||||
});
|
|
||||||
|
|
||||||
return out_value;
|
// value is not cleared and may be read again
|
||||||
|
return data.value;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_value(u32 value, u32 count = 1)
|
void set_value(u32 value, u32 count = 1)
|
||||||
|
|
|
@ -46,6 +46,7 @@ public:
|
||||||
VirtualMemoryBlock() = default;
|
VirtualMemoryBlock() = default;
|
||||||
|
|
||||||
VirtualMemoryBlock* SetRange(const u32 start, const u32 size);
|
VirtualMemoryBlock* SetRange(const u32 start, const u32 size);
|
||||||
|
void Clear() { m_mapped_memory.clear(); m_reserve_size = 0; m_range_start = 0; m_range_size = 0; }
|
||||||
u32 GetStartAddr() const { return m_range_start; }
|
u32 GetStartAddr() const { return m_range_start; }
|
||||||
u32 GetSize() const { return m_range_size; }
|
u32 GetSize() const { return m_range_size; }
|
||||||
bool IsInMyRange(const u32 addr, const u32 size);
|
bool IsInMyRange(const u32 addr, const u32 size);
|
||||||
|
|
|
@ -85,9 +85,7 @@ namespace vm
|
||||||
std::mutex m_mutex;
|
std::mutex m_mutex;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
reservation_mutex_t()
|
reservation_mutex_t() = default;
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool do_notify;
|
bool do_notify;
|
||||||
|
|
||||||
|
@ -130,10 +128,9 @@ namespace vm
|
||||||
m_cv.notify_one();
|
m_cv.notify_one();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const thread_ctrl_t* g_reservation_owner = nullptr;
|
const thread_ctrl_t* volatile g_reservation_owner = nullptr;
|
||||||
|
|
||||||
u32 g_reservation_addr = 0;
|
u32 g_reservation_addr = 0;
|
||||||
u32 g_reservation_size = 0;
|
u32 g_reservation_size = 0;
|
||||||
|
@ -167,9 +164,9 @@ namespace vm
|
||||||
throw EXCEPTION("System failure (addr=0x%x)", addr);
|
throw EXCEPTION("System failure (addr=0x%x)", addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
g_reservation_owner = nullptr;
|
|
||||||
g_reservation_addr = 0;
|
g_reservation_addr = 0;
|
||||||
g_reservation_size = 0;
|
g_reservation_size = 0;
|
||||||
|
g_reservation_owner = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,9 +267,16 @@ namespace vm
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool reservation_test()
|
||||||
|
{
|
||||||
|
const auto owner = g_reservation_owner;
|
||||||
|
|
||||||
|
return owner && owner == get_current_thread_ctrl();
|
||||||
|
}
|
||||||
|
|
||||||
void reservation_free()
|
void reservation_free()
|
||||||
{
|
{
|
||||||
if (g_reservation_owner && g_reservation_owner == get_current_thread_ctrl())
|
if (reservation_test())
|
||||||
{
|
{
|
||||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||||
|
|
||||||
|
@ -404,10 +408,8 @@ namespace vm
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void page_unmap(u32 addr, u32 size)
|
void _page_unmap(u32 addr, u32 size)
|
||||||
{
|
{
|
||||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
|
||||||
|
|
||||||
assert(size && (size | addr) % 4096 == 0);
|
assert(size && (size | addr) % 4096 == 0);
|
||||||
|
|
||||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||||
|
@ -544,11 +546,13 @@ namespace vm
|
||||||
|
|
||||||
block_t::~block_t()
|
block_t::~block_t()
|
||||||
{
|
{
|
||||||
|
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||||
|
|
||||||
// deallocate all memory
|
// deallocate all memory
|
||||||
for (auto& entry : m_map)
|
for (auto& entry : m_map)
|
||||||
{
|
{
|
||||||
// unmap memory pages
|
// unmap memory pages
|
||||||
vm::page_unmap(entry.first, entry.second);
|
vm::_page_unmap(entry.first, entry.second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -568,7 +572,7 @@ namespace vm
|
||||||
// return if size is invalid
|
// return if size is invalid
|
||||||
if (!size || size > this->size)
|
if (!size || size > this->size)
|
||||||
{
|
{
|
||||||
return false;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// search for an appropriate place (unoptimized)
|
// search for an appropriate place (unoptimized)
|
||||||
|
@ -578,9 +582,14 @@ namespace vm
|
||||||
{
|
{
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (used.load() + size > this->size)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 block_t::falloc(u32 addr, u32 size)
|
u32 block_t::falloc(u32 addr, u32 size)
|
||||||
|
@ -593,12 +602,12 @@ namespace vm
|
||||||
// return if addr or size is invalid
|
// return if addr or size is invalid
|
||||||
if (!size || size > this->size || addr < this->addr || addr + size - 1 >= this->addr + this->size - 1)
|
if (!size || size > this->size || addr < this->addr || addr + size - 1 >= this->addr + this->size - 1)
|
||||||
{
|
{
|
||||||
return false;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!try_alloc(addr, size))
|
if (!try_alloc(addr, size))
|
||||||
{
|
{
|
||||||
return false;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
|
@ -614,28 +623,28 @@ namespace vm
|
||||||
{
|
{
|
||||||
const u32 size = found->second;
|
const u32 size = found->second;
|
||||||
|
|
||||||
// unmap memory pages
|
|
||||||
vm::page_unmap(addr, size);
|
|
||||||
|
|
||||||
// remove entry
|
// remove entry
|
||||||
m_map.erase(found);
|
m_map.erase(found);
|
||||||
|
|
||||||
// return "physical" memory
|
// return "physical" memory
|
||||||
used -= size;
|
used -= size;
|
||||||
|
|
||||||
|
// unmap memory pages
|
||||||
|
std::lock_guard<reservation_mutex_t>{ g_reservation_mutex }, vm::_page_unmap(addr, size);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<block_t> map(u32 addr, u32 size, u32 flags)
|
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
|
||||||
{
|
{
|
||||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||||
|
|
||||||
if (!size || (size | addr) % 4096 || flags)
|
if (!size || (size | addr) % 4096)
|
||||||
{
|
{
|
||||||
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
|
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& block : g_locations)
|
for (auto& block : g_locations)
|
||||||
|
@ -659,7 +668,7 @@ namespace vm
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto block = std::make_shared<block_t>(addr, size);
|
auto block = std::make_shared<block_t>(addr, size, flags);
|
||||||
|
|
||||||
g_locations.emplace_back(block);
|
g_locations.emplace_back(block);
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,9 @@ namespace vm
|
||||||
// Process a memory access error if it's caused by the reservation
|
// Process a memory access error if it's caused by the reservation
|
||||||
bool reservation_query(u32 addr, u32 size, bool is_writing, std::function<bool()> callback);
|
bool reservation_query(u32 addr, u32 size, bool is_writing, std::function<bool()> callback);
|
||||||
|
|
||||||
|
// Returns true if the current thread owns reservation
|
||||||
|
bool reservation_test();
|
||||||
|
|
||||||
// Break all reservations created by the current thread
|
// Break all reservations created by the current thread
|
||||||
void reservation_free();
|
void reservation_free();
|
||||||
|
|
||||||
|
@ -66,7 +69,8 @@ namespace vm
|
||||||
// Unmap memory at specified address (in optionally specified memory location)
|
// Unmap memory at specified address (in optionally specified memory location)
|
||||||
bool dealloc(u32 addr, memory_location_t location = any);
|
bool dealloc(u32 addr, memory_location_t location = any);
|
||||||
|
|
||||||
class block_t
|
// Object that handles memory allocations inside specific constant bounds ("location"), currently non-virtual
|
||||||
|
class block_t final
|
||||||
{
|
{
|
||||||
std::map<u32, u32> m_map; // addr -> size mapping of mapped locations
|
std::map<u32, u32> m_map; // addr -> size mapping of mapped locations
|
||||||
std::mutex m_mutex;
|
std::mutex m_mutex;
|
||||||
|
@ -76,9 +80,10 @@ namespace vm
|
||||||
public:
|
public:
|
||||||
block_t() = delete;
|
block_t() = delete;
|
||||||
|
|
||||||
block_t(u32 addr, u32 size)
|
block_t(u32 addr, u32 size, u64 flags = 0)
|
||||||
: addr(addr)
|
: addr(addr)
|
||||||
, size(size)
|
, size(size)
|
||||||
|
, flags(flags)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,6 +92,7 @@ namespace vm
|
||||||
public:
|
public:
|
||||||
const u32 addr; // start address
|
const u32 addr; // start address
|
||||||
const u32 size; // total size
|
const u32 size; // total size
|
||||||
|
const u64 flags; // currently unused
|
||||||
|
|
||||||
atomic_t<u32> used{}; // amount of memory used, may be increased manually prevent some memory from allocating
|
atomic_t<u32> used{}; // amount of memory used, may be increased manually prevent some memory from allocating
|
||||||
|
|
||||||
|
@ -101,7 +107,7 @@ namespace vm
|
||||||
};
|
};
|
||||||
|
|
||||||
// create new memory block with specified parameters and return it
|
// create new memory block with specified parameters and return it
|
||||||
std::shared_ptr<block_t> map(u32 addr, u32 size, u32 flags);
|
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags = 0);
|
||||||
|
|
||||||
// delete existing memory block with specified start address
|
// delete existing memory block with specified start address
|
||||||
std::shared_ptr<block_t> unmap(u32 addr);
|
std::shared_ptr<block_t> unmap(u32 addr);
|
||||||
|
|
|
@ -307,7 +307,7 @@ Module cellFiber("cellFiber", []()
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuExit);
|
REG_FUNC_NR(cellFiber, cellFiberPpuExit);
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuYield);
|
REG_FUNC_NR(cellFiber, cellFiberPpuYield);
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuJoinFiber);
|
REG_FUNC_NR(cellFiber, cellFiberPpuJoinFiber);
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuSelf);
|
REG_FUNC(cellFiber, cellFiberPpuSelf);
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuSendSignal);
|
REG_FUNC_NR(cellFiber, cellFiberPpuSendSignal);
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuWaitSignal);
|
REG_FUNC_NR(cellFiber, cellFiberPpuWaitSignal);
|
||||||
REG_FUNC_NR(cellFiber, cellFiberPpuWaitFlag);
|
REG_FUNC_NR(cellFiber, cellFiberPpuWaitFlag);
|
||||||
|
|
|
@ -324,15 +324,17 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
||||||
|
|
||||||
libmixer.Warning("*** surMixer created (ch1=%d, ch2=%d, ch6=%d, ch8=%d)", config->chStrips1, config->chStrips2, config->chStrips6, config->chStrips8);
|
libmixer.Warning("*** surMixer created (ch1=%d, ch2=%d, ch6=%d, ch8=%d)", config->chStrips1, config->chStrips2, config->chStrips6, config->chStrips8);
|
||||||
|
|
||||||
auto ppu = Emu.GetIdManager().make_ptr<PPUThread>("Surmixer Thread");
|
const auto ppu = Emu.GetIdManager().make_ptr<PPUThread>("Surmixer Thread");
|
||||||
ppu->prio = 1001;
|
ppu->prio = 1001;
|
||||||
ppu->stack_size = 0x10000;
|
ppu->stack_size = 0x10000;
|
||||||
ppu->custom_task = [](PPUThread& CPU)
|
ppu->custom_task = [](PPUThread& ppu)
|
||||||
{
|
{
|
||||||
AudioPortConfig& port = g_audio.ports[g_surmx.audio_port];
|
AudioPortConfig& port = g_audio.ports[g_surmx.audio_port];
|
||||||
|
|
||||||
while (port.state.load() != AUDIO_PORT_STATE_CLOSED && !Emu.IsStopped())
|
while (port.state.load() != AUDIO_PORT_STATE_CLOSED)
|
||||||
{
|
{
|
||||||
|
CHECK_EMU_STATUS;
|
||||||
|
|
||||||
if (mixcount > (port.tag + 0)) // adding positive value (1-15): preemptive buffer filling (hack)
|
if (mixcount > (port.tag + 0)) // adding positive value (1-15): preemptive buffer filling (hack)
|
||||||
{
|
{
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||||
|
@ -346,7 +348,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
||||||
memset(mixdata, 0, sizeof(mixdata));
|
memset(mixdata, 0, sizeof(mixdata));
|
||||||
if (surMixerCb)
|
if (surMixerCb)
|
||||||
{
|
{
|
||||||
surMixerCb(CPU, surMixerCbArg, (u32)mixcount, 256);
|
surMixerCb(ppu, surMixerCbArg, (u32)mixcount, 256);
|
||||||
}
|
}
|
||||||
|
|
||||||
//u64 stamp1 = get_system_time();
|
//u64 stamp1 = get_system_time();
|
||||||
|
@ -453,14 +455,12 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
||||||
|
|
||||||
surMixerCb.set(0);
|
surMixerCb.set(0);
|
||||||
|
|
||||||
const u32 id = CPU.GetId();
|
Emu.GetIdManager().remove<PPUThread>(ppu.GetId());
|
||||||
|
|
||||||
CallAfter([id]()
|
|
||||||
{
|
|
||||||
Emu.GetIdManager().remove<PPUThread>(id);
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
ppu->Run();
|
||||||
|
ppu->Exec();
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribu
|
||||||
|
|
||||||
LV2_LOCK;
|
LV2_LOCK;
|
||||||
|
|
||||||
const auto mutex = std::move(Emu.GetIdManager().get<lv2_mutex_t>(mutex_id));
|
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(mutex_id);
|
||||||
|
|
||||||
if (!mutex)
|
if (!mutex)
|
||||||
{
|
{
|
||||||
|
@ -211,7 +211,7 @@ s32 sys_cond_wait(PPUThread& ppu, u32 cond_id, u64 timeout)
|
||||||
// reown the mutex (could be set when notified)
|
// reown the mutex (could be set when notified)
|
||||||
if (!cond->mutex->owner)
|
if (!cond->mutex->owner)
|
||||||
{
|
{
|
||||||
cond->mutex->owner = std::move(ppu.shared_from_this());
|
cond->mutex->owner = ppu.shared_from_this();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cond->mutex->owner.get() != &ppu)
|
if (cond->mutex->owner.get() != &ppu)
|
||||||
|
|
|
@ -49,27 +49,10 @@ s32 sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Available memory reserved for containers
|
|
||||||
u32 available = 0;
|
|
||||||
|
|
||||||
// Check all containers
|
|
||||||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
|
||||||
{
|
|
||||||
available += ct->size - ct->used;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto area = vm::get(vm::user_space);
|
|
||||||
|
|
||||||
// Check available memory
|
|
||||||
if (area->size < area->used.load() + available + size)
|
|
||||||
{
|
|
||||||
return CELL_ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate memory
|
// Allocate memory
|
||||||
const u32 addr =
|
const u32 addr =
|
||||||
flags == SYS_MEMORY_PAGE_SIZE_1M ? area->alloc(size, 0x100000) :
|
flags == SYS_MEMORY_PAGE_SIZE_1M ? vm::alloc(size, vm::user_space, 0x100000) :
|
||||||
flags == SYS_MEMORY_PAGE_SIZE_64K ? area->alloc(size, 0x10000) :
|
flags == SYS_MEMORY_PAGE_SIZE_64K ? vm::alloc(size, vm::user_space, 0x10000) :
|
||||||
throw EXCEPTION("Unexpected flags");
|
throw EXCEPTION("Unexpected flags");
|
||||||
|
|
||||||
if (!addr)
|
if (!addr)
|
||||||
|
@ -137,10 +120,15 @@ s32 sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32
|
||||||
return CELL_ENOMEM;
|
return CELL_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const auto area = vm::get(vm::user_space);
|
||||||
|
|
||||||
|
// Return "physical" memory required for allocation
|
||||||
|
area->used -= size;
|
||||||
|
|
||||||
// Allocate memory
|
// Allocate memory
|
||||||
const u32 addr =
|
const u32 addr =
|
||||||
flags == SYS_MEMORY_PAGE_SIZE_1M ? vm::alloc(size, vm::user_space, 0x100000) :
|
flags == SYS_MEMORY_PAGE_SIZE_1M ? area->alloc(size, 0x100000) :
|
||||||
flags == SYS_MEMORY_PAGE_SIZE_64K ? vm::alloc(size, vm::user_space, 0x10000) :
|
flags == SYS_MEMORY_PAGE_SIZE_64K ? area->alloc(size, 0x10000) :
|
||||||
throw EXCEPTION("Unexpected flags");
|
throw EXCEPTION("Unexpected flags");
|
||||||
|
|
||||||
if (!addr)
|
if (!addr)
|
||||||
|
@ -164,6 +152,8 @@ s32 sys_memory_free(u32 addr)
|
||||||
|
|
||||||
LV2_LOCK;
|
LV2_LOCK;
|
||||||
|
|
||||||
|
const auto area = vm::get(vm::user_space);
|
||||||
|
|
||||||
// Check all memory containers
|
// Check all memory containers
|
||||||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
||||||
{
|
{
|
||||||
|
@ -171,20 +161,25 @@ s32 sys_memory_free(u32 addr)
|
||||||
|
|
||||||
if (found != ct->allocs.end())
|
if (found != ct->allocs.end())
|
||||||
{
|
{
|
||||||
if (!vm::dealloc(addr, vm::user_space))
|
const u32 size = found->second;
|
||||||
|
|
||||||
|
if (!area->dealloc(addr))
|
||||||
{
|
{
|
||||||
throw EXCEPTION("Memory not deallocated (cid=0x%x, addr=0x%x, size=0x%x)", ct->id, addr, found->second);
|
throw EXCEPTION("Memory not deallocated (cid=0x%x, addr=0x%x, size=0x%x)", ct->id, addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return memory size
|
// Return memory
|
||||||
ct->used -= found->second;
|
ct->used -= size;
|
||||||
ct->allocs.erase(found);
|
ct->allocs.erase(found);
|
||||||
|
|
||||||
|
// Fix "physical" memory
|
||||||
|
area->used += size;
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vm::dealloc(addr, vm::user_space))
|
if (!area->dealloc(addr))
|
||||||
{
|
{
|
||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -213,20 +208,18 @@ s32 sys_memory_get_user_memory_size(vm::ptr<sys_memory_info_t> mem_info)
|
||||||
LV2_LOCK;
|
LV2_LOCK;
|
||||||
|
|
||||||
u32 reserved = 0;
|
u32 reserved = 0;
|
||||||
u32 available = 0;
|
|
||||||
|
|
||||||
// Check all memory containers
|
// Check all memory containers
|
||||||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
||||||
{
|
{
|
||||||
reserved += ct->size;
|
reserved += ct->size;
|
||||||
available += ct->size - ct->used;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto area = vm::get(vm::user_space);
|
const auto area = vm::get(vm::user_space);
|
||||||
|
|
||||||
// Fetch the user memory available
|
// Fetch the user memory available
|
||||||
mem_info->total_user_memory = area->size - reserved;
|
mem_info->total_user_memory = area->size - reserved;
|
||||||
mem_info->available_user_memory = area->size - area->used.load() - available;
|
mem_info->available_user_memory = area->size - area->used.load();
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
}
|
}
|
||||||
|
@ -246,19 +239,17 @@ s32 sys_memory_container_create(vm::ptr<u32> cid, u32 size)
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 reserved = 0;
|
u32 reserved = 0;
|
||||||
u32 available = 0;
|
|
||||||
|
|
||||||
// Check all memory containers
|
// Check all memory containers
|
||||||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
||||||
{
|
{
|
||||||
reserved += ct->size;
|
reserved += ct->size;
|
||||||
available += ct->size - ct->used;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto area = vm::get(vm::user_space);
|
const auto area = vm::get(vm::user_space);
|
||||||
|
|
||||||
if (area->size < reserved + size ||
|
if (area->size < reserved + size ||
|
||||||
area->size - area->used.load() < available + size)
|
area->size - area->used.load() < size)
|
||||||
{
|
{
|
||||||
return CELL_ENOMEM;
|
return CELL_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32
|
||||||
{
|
{
|
||||||
for (u32 addr = ::align(0x30000000, alignment); addr < 0xC0000000; addr += static_cast<u32>(alignment))
|
for (u32 addr = ::align(0x30000000, alignment); addr < 0xC0000000; addr += static_cast<u32>(alignment))
|
||||||
{
|
{
|
||||||
if (const auto area = vm::map(addr, static_cast<u32>(size), 0))
|
if (const auto area = vm::map(addr, static_cast<u32>(size), flags))
|
||||||
{
|
{
|
||||||
*alloc_addr = addr;
|
*alloc_addr = addr;
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ s32 sys_mmapper_allocate_fixed_address()
|
||||||
|
|
||||||
LV2_LOCK;
|
LV2_LOCK;
|
||||||
|
|
||||||
if (!vm::map(0xB0000000, 0x10000000, 0))
|
if (!vm::map(0xB0000000, 0x10000000)) // TODO: set correct flags (they aren't used currently though)
|
||||||
{
|
{
|
||||||
return CELL_EEXIST;
|
return CELL_EEXIST;
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ s32 sys_mutex_destroy(u32 mutex_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// assuming that the mutex is locked immediately by another waiting thread when unlocked
|
// assuming that the mutex is locked immediately by another waiting thread when unlocked
|
||||||
if (!mutex->owner || !mutex->sq.empty())
|
if (mutex->owner || mutex->sq.size())
|
||||||
{
|
{
|
||||||
return CELL_EBUSY;
|
return CELL_EBUSY;
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ s32 sys_mutex_lock(PPUThread& ppu, u32 mutex_id, u64 timeout)
|
||||||
// lock immediately if not locked
|
// lock immediately if not locked
|
||||||
if (!mutex->owner)
|
if (!mutex->owner)
|
||||||
{
|
{
|
||||||
mutex->owner = std::move(ppu.shared_from_this());
|
mutex->owner = ppu.shared_from_this();
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ s32 sys_mutex_trylock(PPUThread& ppu, u32 mutex_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// own the mutex if free
|
// own the mutex if free
|
||||||
mutex->owner = std::move(ppu.shared_from_this());
|
mutex->owner = ppu.shared_from_this();
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,12 +31,7 @@ void _sys_ppu_thread_exit(PPUThread& ppu, u64 errorcode)
|
||||||
|
|
||||||
if (!ppu.is_joinable)
|
if (!ppu.is_joinable)
|
||||||
{
|
{
|
||||||
const u32 id = ppu.GetId();
|
Emu.GetIdManager().remove<PPUThread>(ppu.GetId());
|
||||||
|
|
||||||
CallAfter([id]()
|
|
||||||
{
|
|
||||||
Emu.GetIdManager().remove<PPUThread>(id);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ppu.Exit();
|
ppu.Exit();
|
||||||
|
|
|
@ -19,7 +19,7 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::p
|
||||||
const u32 new_addr = vm::check_addr(0x60000000) ? 0x70000000 : 0x60000000;
|
const u32 new_addr = vm::check_addr(0x60000000) ? 0x70000000 : 0x60000000;
|
||||||
|
|
||||||
// Map memory
|
// Map memory
|
||||||
const auto area = vm::map(new_addr, vsize, 0);
|
const auto area = vm::map(new_addr, vsize, flag);
|
||||||
|
|
||||||
// Alloc memory
|
// Alloc memory
|
||||||
if (!area || !area->alloc(vsize))
|
if (!area || !area->alloc(vsize))
|
||||||
|
|
|
@ -402,6 +402,7 @@ void Emulator::Stop()
|
||||||
GetModuleManager().Close();
|
GetModuleManager().Close();
|
||||||
|
|
||||||
CurGameInfo.Reset();
|
CurGameInfo.Reset();
|
||||||
|
RSXIOMem.Clear();
|
||||||
vm::close();
|
vm::close();
|
||||||
|
|
||||||
finalize_ppu_exec_map();
|
finalize_ppu_exec_map();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue