mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-07-04 05:51:27 +12:00
Merge branch 'master' of https://github.com/DHrpcs3/rpcs3
This commit is contained in:
commit
7ab1e64aab
5 changed files with 155 additions and 123 deletions
|
@ -591,6 +591,18 @@ bool get_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, size_
|
|||
case 8: out_value = (u64)imm_value; return true; // sign-extended
|
||||
}
|
||||
}
|
||||
else if (reg == X64_IMM16)
|
||||
{
|
||||
// load the immediate value (assuming it's at the end of the instruction)
|
||||
out_value = *(s16*)(RIP(context) + i_size - 2);
|
||||
return true;
|
||||
}
|
||||
else if (reg == X64_IMM8)
|
||||
{
|
||||
// load the immediate value (assuming it's at the end of the instruction)
|
||||
out_value = *(s8*)(RIP(context) + i_size - 1);
|
||||
return true;
|
||||
}
|
||||
else if (reg == X64R_ECX)
|
||||
{
|
||||
out_value = (u32)RCX(context);
|
||||
|
|
|
@ -2284,14 +2284,18 @@ s32 cellSpursShutdownTaskset(vm::ptr<CellSpursTaskset> taskset)
|
|||
|
||||
u32 _cellSpursGetSdkVersion()
|
||||
{
|
||||
s32 sdk_version;
|
||||
// Commenting this out for now since process_get_sdk_version does not return
|
||||
// the correct SDK version and instead returns a version too high for the game
|
||||
// and causes SPURS to fail.
|
||||
//s32 sdk_version;
|
||||
|
||||
if (process_get_sdk_version(process_getpid(), sdk_version) != CELL_OK)
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
//if (process_get_sdk_version(process_getpid(), sdk_version) != CELL_OK)
|
||||
//{
|
||||
// throw __FUNCTION__;
|
||||
//}
|
||||
|
||||
return sdk_version;
|
||||
//return sdk_version;
|
||||
return 1;
|
||||
}
|
||||
|
||||
s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::ptr<u32> elf_addr, vm::ptr<u32> context_addr, u32 context_size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg)
|
||||
|
@ -2333,10 +2337,11 @@ s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm:
|
|||
alloc_ls_blocks = context_size > 0x3D400 ? 0x7A : ((context_size - 0x400) >> 11);
|
||||
if (ls_pattern.addr() != 0)
|
||||
{
|
||||
u128 ls_pattern_128 = u128::from64r(ls_pattern->_u64[0], ls_pattern->_u64[1]);
|
||||
u32 ls_blocks = 0;
|
||||
for (auto i = 0; i < 128; i++)
|
||||
{
|
||||
if (ls_pattern->_u128.value()._bit[i])
|
||||
if (ls_pattern_128._bit[i])
|
||||
{
|
||||
ls_blocks++;
|
||||
}
|
||||
|
@ -2348,7 +2353,7 @@ s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm:
|
|||
}
|
||||
|
||||
u128 _0 = u128::from32(0);
|
||||
if ((ls_pattern->_u128.value() & u128::from32r(0xFC000000)) != _0)
|
||||
if ((ls_pattern_128 & u128::from32r(0xFC000000)) != _0)
|
||||
{
|
||||
// Prevent save/restore to SPURS management area
|
||||
return CELL_SPURS_TASK_ERROR_INVAL;
|
||||
|
@ -2639,7 +2644,7 @@ s32 _cellSpursTaskAttribute2Initialize(vm::ptr<CellSpursTaskAttribute2> attribut
|
|||
|
||||
for (s32 c = 0; c < 4; c++)
|
||||
{
|
||||
attribute->lsPattern._u128 = u128::from64r(0);
|
||||
attribute->lsPattern._u32[c] = 0;
|
||||
}
|
||||
|
||||
attribute->name_addr = 0;
|
||||
|
|
|
@ -623,12 +623,14 @@ static_assert(sizeof(CellSpursEventFlag) == CellSpursEventFlag::size, "Wrong Cel
|
|||
|
||||
union CellSpursTaskArgument
|
||||
{
|
||||
be_t<u128> _u128;
|
||||
be_t<u32> _u32[4];
|
||||
be_t<u64> _u64[2];
|
||||
};
|
||||
|
||||
union CellSpursTaskLsPattern
|
||||
{
|
||||
be_t<u128> _u128;
|
||||
be_t<u32> _u32[4];
|
||||
be_t<u64> _u64[2];
|
||||
};
|
||||
|
||||
struct CellSpursTaskset
|
||||
|
|
|
@ -1140,7 +1140,7 @@ void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) {
|
|||
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.ls_offset + 0x2700);
|
||||
|
||||
spu.GPR[2].clear();
|
||||
spu.GPR[3] = taskArgs._u128;
|
||||
spu.GPR[3] = u128::from64r(taskArgs._u64[0], taskArgs._u64[1]);
|
||||
spu.GPR[4]._u64[1] = taskset->m.args;
|
||||
spu.GPR[4]._u64[0] = taskset->m.spurs.addr();
|
||||
for (auto i = 5; i < 128; i++) {
|
||||
|
@ -1382,8 +1382,9 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
|||
|
||||
u32 allocLsBlocks = taskInfo->context_save_storage_and_alloc_ls_blocks & 0x7F;
|
||||
u32 lsBlocks = 0;
|
||||
u128 ls_pattern = u128::from64r(taskInfo->ls_pattern._u64[0], taskInfo->ls_pattern._u64[1]);
|
||||
for (auto i = 0; i < 128; i++) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i]) {
|
||||
if (ls_pattern._bit[i]) {
|
||||
lsBlocks++;
|
||||
}
|
||||
}
|
||||
|
@ -1394,7 +1395,7 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
|||
|
||||
// Make sure the stack is area is specified in the ls pattern
|
||||
for (auto i = (ctxt->savedContextSp.value()._u32[3]) >> 11; i < 128; i++) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i] == false) {
|
||||
if (ls_pattern._bit[i] == false) {
|
||||
return CELL_SPURS_TASK_ERROR_STAT;
|
||||
}
|
||||
}
|
||||
|
@ -1414,7 +1415,7 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
|||
|
||||
// Save LS context
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i]) {
|
||||
if (ls_pattern._bit[i]) {
|
||||
// TODO: Combine DMA requests for consecutive blocks into a single request
|
||||
memcpy(vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), vm::get_ptr(spu.ls_offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), 0x800);
|
||||
}
|
||||
|
@ -1498,7 +1499,8 @@ void spursTasksetDispatch(SPUThread & spu) {
|
|||
}
|
||||
|
||||
// If the entire LS is saved then there is no need to load the ELF as it will be be saved in the context save area as well
|
||||
if (taskInfo->ls_pattern._u128.value() != u128::from64r(0x03FFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull)) {
|
||||
u128 ls_pattern = u128::from64r(taskInfo->ls_pattern._u64[0], taskInfo->ls_pattern._u64[1]);
|
||||
if (ls_pattern != u128::from64r(0x03FFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull)) {
|
||||
// Load the ELF
|
||||
u32 entryPoint;
|
||||
if (spursTasksetLoadElf(spu, &entryPoint, nullptr, taskInfo->elf_addr.addr(), true) != CELL_OK) {
|
||||
|
@ -1512,7 +1514,7 @@ void spursTasksetDispatch(SPUThread & spu) {
|
|||
const u32 contextSaveStorage = vm::cast(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
|
||||
memcpy(vm::get_ptr(spu.ls_offset + 0x2C80), vm::get_ptr(contextSaveStorage), 0x380);
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i]) {
|
||||
if (ls_pattern._bit[i]) {
|
||||
// TODO: Combine DMA requests for consecutive blocks into a single request
|
||||
memcpy(vm::get_ptr(spu.ls_offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), 0x800);
|
||||
}
|
||||
|
|
|
@ -162,6 +162,17 @@ PPUThread* ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool i
|
|||
{
|
||||
PPUThread& new_thread = *(PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
|
||||
// Note: (Syphurith) I haven't figured out the minimum stack size of PPU Thread.
|
||||
// Maybe it can be done with pthread_attr_getstacksize function.
|
||||
// And i toke 4096 (PTHREAD_STACK_MIN, and the smallest allocation unit) for this.
|
||||
if ((stacksize % 4096) || (stacksize == 0)) {
|
||||
// If not times of smallest allocation unit, round it up to the nearest one.
|
||||
// And regard zero as a same condition.
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_create: stacksize increased from 0x%x to 0x%x.",
|
||||
stacksize, 4096 * ((u32)(stacksize / 4096) + 1));
|
||||
stacksize = 4096 * ((u32)(stacksize / 4096) + 1);
|
||||
}
|
||||
|
||||
u32 id = new_thread.GetId();
|
||||
new_thread.SetEntry(entry);
|
||||
new_thread.SetPrio(prio);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue