diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e798c1a7..6ae4b892 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -177,9 +177,6 @@ jobs: build-macos: runs-on: macos-14 - strategy: - matrix: - arch: [x86_64, arm64] steps: - name: "Checkout repo" uses: actions/checkout@v4 @@ -205,7 +202,7 @@ jobs: - name: "Install molten-vk" run: | - curl -L -O https://github.com/KhronosGroup/MoltenVK/releases/download/v1.3.0/MoltenVK-macos.tar + curl -L -O https://github.com/KhronosGroup/MoltenVK/releases/download/v1.2.9/MoltenVK-macos.tar tar xf MoltenVK-macos.tar sudo mkdir -p /usr/local/lib sudo cp MoltenVK/MoltenVK/dynamic/dylib/macOS/libMoltenVK.dylib /usr/local/lib @@ -239,7 +236,7 @@ jobs: cd build cmake .. ${{ env.BUILD_FLAGS }} \ -DCMAKE_BUILD_TYPE=${{ env.BUILD_MODE }} \ - -DCMAKE_OSX_ARCHITECTURES=${{ matrix.arch }} \ + -DCMAKE_OSX_ARCHITECTURES=x86_64 \ -DMACOS_BUNDLE=ON \ -G Ninja @@ -262,5 +259,5 @@ jobs: - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: cemu-bin-macos-${{ matrix.arch }} + name: cemu-bin-macos-x64 path: ./bin/Cemu.dmg diff --git a/.gitmodules b/.gitmodules index 8f9772d3..dc69c441 100644 --- a/.gitmodules +++ b/.gitmodules @@ -18,6 +18,3 @@ path = dependencies/imgui url = https://github.com/ocornut/imgui shallow = true -[submodule "dependencies/xbyak_aarch64"] - path = dependencies/xbyak_aarch64 - url = https://github.com/fujitsu/xbyak_aarch64 diff --git a/CMakeLists.txt b/CMakeLists.txt index aa491b9e..560728f2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -166,7 +166,7 @@ if (UNIX AND NOT APPLE) if(ENABLE_BLUEZ) find_package(bluez REQUIRED) - set(SUPPORTS_WIIMOTE ON) + set(ENABLE_WIIMOTE ON) add_compile_definitions(HAS_BLUEZ) endif() @@ -188,7 +188,7 @@ endif() if (ENABLE_HIDAPI) find_package(hidapi REQUIRED) - set(SUPPORTS_WIIMOTE ON) + set(ENABLE_WIIMOTE ON) add_compile_definitions(HAS_HIDAPI) endif () @@ -222,18 +222,9 @@ endif() add_subdirectory("dependencies/ih264d" EXCLUDE_FROM_ALL) -if (CMAKE_OSX_ARCHITECTURES) - set(CEMU_ARCHITECTURE ${CMAKE_OSX_ARCHITECTURES}) -else() - set(CEMU_ARCHITECTURE ${CMAKE_SYSTEM_PROCESSOR}) -endif() -if(CEMU_ARCHITECTURE MATCHES "(aarch64)|(AARCH64)|(arm64)|(ARM64)") - add_subdirectory("dependencies/xbyak_aarch64" EXCLUDE_FROM_ALL) -endif() - find_package(ZArchive) if (NOT ZArchive_FOUND) add_subdirectory("dependencies/ZArchive" EXCLUDE_FROM_ALL) endif() -add_subdirectory(src) \ No newline at end of file +add_subdirectory(src) diff --git a/boost.natvis b/boost.natvis deleted file mode 100644 index 2781a585..00000000 --- a/boost.natvis +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - m_holder.m_size - - m_holder.m_size - m_holder.m_start - - - - - - {{ size={m_holder.m_size} }} - - m_holder.m_size - static_capacity - - m_holder.m_size - ($T1*)m_holder.storage.data - - - - - diff --git a/dependencies/ih264d/CMakeLists.txt b/dependencies/ih264d/CMakeLists.txt index 64ac0931..686a9d08 100644 --- a/dependencies/ih264d/CMakeLists.txt +++ b/dependencies/ih264d/CMakeLists.txt @@ -183,9 +183,6 @@ target_sources(ih264d PRIVATE "decoder/arm/ih264d_function_selector.c" ) target_compile_options(ih264d PRIVATE -DARMV8) -if(APPLE) - target_sources(ih264d PRIVATE "common/armv8/macos_arm_symbol_aliases.s") -endif() else() message(FATAL_ERROR "ih264d unknown architecture: ${IH264D_ARCHITECTURE}") endif() diff --git a/dependencies/ih264d/common/armv8/ih264_intra_pred_chroma_av8.s b/dependencies/ih264d/common/armv8/ih264_intra_pred_chroma_av8.s index c0d9cf99..39c02560 100644 --- a/dependencies/ih264d/common/armv8/ih264_intra_pred_chroma_av8.s +++ b/dependencies/ih264d/common/armv8/ih264_intra_pred_chroma_av8.s @@ -429,13 +429,8 @@ ih264_intra_pred_chroma_8x8_mode_plane_av8: rev64 v7.4h, v2.4h ld1 {v3.2s}, [x10] sub x5, x3, #8 -#ifdef __APPLE__ - adrp x12, _ih264_gai1_intrapred_chroma_plane_coeffs1@GOTPAGE - ldr x12, [x12, _ih264_gai1_intrapred_chroma_plane_coeffs1@GOTPAGEOFF] -#else adrp x12, :got:ih264_gai1_intrapred_chroma_plane_coeffs1 ldr x12, [x12, #:got_lo12:ih264_gai1_intrapred_chroma_plane_coeffs1] -#endif usubl v10.8h, v5.8b, v1.8b ld1 {v8.8b, v9.8b}, [x12] // Load multiplication factors 1 to 8 into D3 mov v8.d[1], v9.d[0] @@ -489,13 +484,10 @@ ih264_intra_pred_chroma_8x8_mode_plane_av8: zip1 v1.8h, v0.8h, v2.8h zip2 v2.8h, v0.8h, v2.8h mov v0.16b, v1.16b -#ifdef __APPLE__ - adrp x12, _ih264_gai1_intrapred_chroma_plane_coeffs2@GOTPAGE - ldr x12, [x12, _ih264_gai1_intrapred_chroma_plane_coeffs2@GOTPAGEOFF] -#else + adrp x12, :got:ih264_gai1_intrapred_chroma_plane_coeffs2 ldr x12, [x12, #:got_lo12:ih264_gai1_intrapred_chroma_plane_coeffs2] -#endif + ld1 {v8.2s, v9.2s}, [x12] mov v8.d[1], v9.d[0] mov v10.16b, v8.16b diff --git a/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_16x16_av8.s b/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_16x16_av8.s index 2422d8cd..fa19c121 100644 --- a/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_16x16_av8.s +++ b/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_16x16_av8.s @@ -431,13 +431,10 @@ ih264_intra_pred_luma_16x16_mode_plane_av8: mov x10, x1 //top_left mov x4, #-1 ld1 {v2.2s}, [x1], x8 -#ifdef __APPLE__ - adrp x7, _ih264_gai1_intrapred_luma_plane_coeffs@GOTPAGE - ldr x7, [x7, _ih264_gai1_intrapred_luma_plane_coeffs@GOTPAGEOFF] -#else + adrp x7, :got:ih264_gai1_intrapred_luma_plane_coeffs ldr x7, [x7, #:got_lo12:ih264_gai1_intrapred_luma_plane_coeffs] -#endif + ld1 {v0.2s}, [x1] rev64 v2.8b, v2.8b ld1 {v6.2s, v7.2s}, [x7] diff --git a/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_8x8_av8.s b/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_8x8_av8.s index 6fa31ded..273aa81b 100644 --- a/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_8x8_av8.s +++ b/dependencies/ih264d/common/armv8/ih264_intra_pred_luma_8x8_av8.s @@ -1029,13 +1029,9 @@ ih264_intra_pred_luma_8x8_mode_horz_u_av8: mov v3.d[0], v2.d[1] ext v4.16b, v2.16b , v2.16b , #1 mov v5.d[0], v4.d[1] -#ifdef __APPLE__ - adrp x12, _ih264_gai1_intrapred_luma_8x8_horz_u@GOTPAGE - ldr x12, [x12, _ih264_gai1_intrapred_luma_8x8_horz_u@GOTPAGEOFF] -#else + adrp x12, :got:ih264_gai1_intrapred_luma_8x8_horz_u ldr x12, [x12, #:got_lo12:ih264_gai1_intrapred_luma_8x8_horz_u] -#endif uaddl v20.8h, v0.8b, v2.8b uaddl v22.8h, v1.8b, v3.8b uaddl v24.8h, v2.8b, v4.8b diff --git a/dependencies/ih264d/common/armv8/ih264_weighted_bi_pred_av8.s b/dependencies/ih264d/common/armv8/ih264_weighted_bi_pred_av8.s index 8d6aa995..475f690e 100644 --- a/dependencies/ih264d/common/armv8/ih264_weighted_bi_pred_av8.s +++ b/dependencies/ih264d/common/armv8/ih264_weighted_bi_pred_av8.s @@ -142,22 +142,14 @@ ih264_weighted_bi_pred_luma_av8: sxtw x4, w4 sxtw x5, w5 stp x19, x20, [sp, #-16]! -#ifndef __APPLE__ ldr w8, [sp, #80] //Load wt2 in w8 ldr w9, [sp, #88] //Load ofst1 in w9 - ldr w10, [sp, #96] //Load ofst2 in w10 - ldr w11, [sp, #104] //Load ht in w11 - ldr w12, [sp, #112] //Load wd in w12 -#else - ldr w8, [sp, #80] //Load wt2 in w8 - ldr w9, [sp, #84] //Load ofst1 in w9 - ldr w10, [sp, #88] //Load ofst2 in w10 - ldr w11, [sp, #92] //Load ht in w11 - ldr w12, [sp, #96] //Load wd in w12 -#endif add w6, w6, #1 //w6 = log_WD + 1 neg w10, w6 //w10 = -(log_WD + 1) dup v0.8h, w10 //Q0 = -(log_WD + 1) (32-bit) + ldr w10, [sp, #96] //Load ofst2 in w10 + ldr w11, [sp, #104] //Load ht in w11 + ldr w12, [sp, #112] //Load wd in w12 add w9, w9, #1 //w9 = ofst1 + 1 add w9, w9, w10 //w9 = ofst1 + ofst2 + 1 mov v2.s[0], w7 @@ -432,24 +424,17 @@ ih264_weighted_bi_pred_chroma_av8: sxtw x5, w5 stp x19, x20, [sp, #-16]! -#ifndef __APPLE__ + ldr w8, [sp, #80] //Load wt2 in w8 - ldr w9, [sp, #88] //Load ofst1 in w9 - ldr w10, [sp, #96] //Load ofst2 in w10 - ldr w11, [sp, #104] //Load ht in w11 - ldr w12, [sp, #112] //Load wd in w12 -#else - ldr w8, [sp, #80] //Load wt2 in w8 - ldr w9, [sp, #84] //Load ofst1 in w9 - ldr w10, [sp, #88] //Load ofst2 in w10 - ldr w11, [sp, #92] //Load ht in w11 - ldr w12, [sp, #96] //Load wd in w12 -#endif dup v4.4s, w8 //Q2 = (wt2_u, wt2_v) (32-bit) dup v2.4s, w7 //Q1 = (wt1_u, wt1_v) (32-bit) add w6, w6, #1 //w6 = log_WD + 1 + ldr w9, [sp, #88] //Load ofst1 in w9 + ldr w10, [sp, #96] //Load ofst2 in w10 neg w20, w6 //w20 = -(log_WD + 1) dup v0.8h, w20 //Q0 = -(log_WD + 1) (16-bit) + ldr w11, [sp, #104] //Load ht in x11 + ldr w12, [sp, #112] //Load wd in x12 dup v20.8h, w9 //0ffset1 dup v21.8h, w10 //0ffset2 srhadd v6.8b, v20.8b, v21.8b diff --git a/dependencies/ih264d/common/armv8/macos_arm_symbol_aliases.s b/dependencies/ih264d/common/armv8/macos_arm_symbol_aliases.s deleted file mode 100644 index 3639f1b3..00000000 --- a/dependencies/ih264d/common/armv8/macos_arm_symbol_aliases.s +++ /dev/null @@ -1,185 +0,0 @@ -// macOS clang compilers append preceding underscores to function names, this is to prevent -// mismatches with the assembly function names and the C functions as defined in the header. - -.global _ih264_deblk_chroma_horz_bs4_av8 -_ih264_deblk_chroma_horz_bs4_av8 = ih264_deblk_chroma_horz_bs4_av8 - -.global _ih264_deblk_chroma_horz_bslt4_av8 -_ih264_deblk_chroma_horz_bslt4_av8 = ih264_deblk_chroma_horz_bslt4_av8 - -.global _ih264_deblk_chroma_vert_bs4_av8 -_ih264_deblk_chroma_vert_bs4_av8 = ih264_deblk_chroma_vert_bs4_av8 - -.global _ih264_deblk_chroma_vert_bslt4_av8 -_ih264_deblk_chroma_vert_bslt4_av8 = ih264_deblk_chroma_vert_bslt4_av8 - -.global _ih264_deblk_luma_horz_bs4_av8 -_ih264_deblk_luma_horz_bs4_av8 = ih264_deblk_luma_horz_bs4_av8 - -.global _ih264_deblk_luma_horz_bslt4_av8 -_ih264_deblk_luma_horz_bslt4_av8 = ih264_deblk_luma_horz_bslt4_av8 - -.global _ih264_deblk_luma_vert_bs4_av8 -_ih264_deblk_luma_vert_bs4_av8 = ih264_deblk_luma_vert_bs4_av8 - -.global _ih264_deblk_luma_vert_bslt4_av8 -_ih264_deblk_luma_vert_bslt4_av8 = ih264_deblk_luma_vert_bslt4_av8 - -.global _ih264_default_weighted_pred_chroma_av8 -_ih264_default_weighted_pred_chroma_av8 = ih264_default_weighted_pred_chroma_av8 - -.global _ih264_default_weighted_pred_luma_av8 -_ih264_default_weighted_pred_luma_av8 = ih264_default_weighted_pred_luma_av8 - -.global _ih264_ihadamard_scaling_4x4_av8 -_ih264_ihadamard_scaling_4x4_av8 = ih264_ihadamard_scaling_4x4_av8 - -.global _ih264_inter_pred_chroma_av8 -_ih264_inter_pred_chroma_av8 = ih264_inter_pred_chroma_av8 - -.global _ih264_inter_pred_luma_copy_av8 -_ih264_inter_pred_luma_copy_av8 = ih264_inter_pred_luma_copy_av8 - -.global _ih264_inter_pred_luma_horz_av8 -_ih264_inter_pred_luma_horz_av8 = ih264_inter_pred_luma_horz_av8 - -.global _ih264_inter_pred_luma_horz_hpel_vert_hpel_av8 -_ih264_inter_pred_luma_horz_hpel_vert_hpel_av8 = ih264_inter_pred_luma_horz_hpel_vert_hpel_av8 - -.global _ih264_inter_pred_luma_horz_hpel_vert_qpel_av8 -_ih264_inter_pred_luma_horz_hpel_vert_qpel_av8 = ih264_inter_pred_luma_horz_hpel_vert_qpel_av8 - -.global _ih264_inter_pred_luma_horz_qpel_av8 -_ih264_inter_pred_luma_horz_qpel_av8 = ih264_inter_pred_luma_horz_qpel_av8 - -.global _ih264_inter_pred_luma_horz_qpel_vert_hpel_av8 -_ih264_inter_pred_luma_horz_qpel_vert_hpel_av8 = ih264_inter_pred_luma_horz_qpel_vert_hpel_av8 - -.global _ih264_inter_pred_luma_horz_qpel_vert_qpel_av8 -_ih264_inter_pred_luma_horz_qpel_vert_qpel_av8 = ih264_inter_pred_luma_horz_qpel_vert_qpel_av8 - -.global _ih264_inter_pred_luma_vert_av8 -_ih264_inter_pred_luma_vert_av8 = ih264_inter_pred_luma_vert_av8 - -.global _ih264_inter_pred_luma_vert_qpel_av8 -_ih264_inter_pred_luma_vert_qpel_av8 = ih264_inter_pred_luma_vert_qpel_av8 - -.global _ih264_intra_pred_chroma_8x8_mode_horz_av8 -_ih264_intra_pred_chroma_8x8_mode_horz_av8 = ih264_intra_pred_chroma_8x8_mode_horz_av8 - -.global _ih264_intra_pred_chroma_8x8_mode_plane_av8 -_ih264_intra_pred_chroma_8x8_mode_plane_av8 = ih264_intra_pred_chroma_8x8_mode_plane_av8 - -.global _ih264_intra_pred_chroma_8x8_mode_vert_av8 -_ih264_intra_pred_chroma_8x8_mode_vert_av8 = ih264_intra_pred_chroma_8x8_mode_vert_av8 - -.global _ih264_intra_pred_luma_16x16_mode_dc_av8 -_ih264_intra_pred_luma_16x16_mode_dc_av8 = ih264_intra_pred_luma_16x16_mode_dc_av8 - -.global _ih264_intra_pred_luma_16x16_mode_horz_av8 -_ih264_intra_pred_luma_16x16_mode_horz_av8 = ih264_intra_pred_luma_16x16_mode_horz_av8 - -.global _ih264_intra_pred_luma_16x16_mode_plane_av8 -_ih264_intra_pred_luma_16x16_mode_plane_av8 = ih264_intra_pred_luma_16x16_mode_plane_av8 - -.global _ih264_intra_pred_luma_16x16_mode_vert_av8 -_ih264_intra_pred_luma_16x16_mode_vert_av8 = ih264_intra_pred_luma_16x16_mode_vert_av8 - -.global _ih264_intra_pred_luma_4x4_mode_dc_av8 -_ih264_intra_pred_luma_4x4_mode_dc_av8 = ih264_intra_pred_luma_4x4_mode_dc_av8 - -.global _ih264_intra_pred_luma_4x4_mode_diag_dl_av8 -_ih264_intra_pred_luma_4x4_mode_diag_dl_av8 = ih264_intra_pred_luma_4x4_mode_diag_dl_av8 - -.global _ih264_intra_pred_luma_4x4_mode_diag_dr_av8 -_ih264_intra_pred_luma_4x4_mode_diag_dr_av8 = ih264_intra_pred_luma_4x4_mode_diag_dr_av8 - -.global _ih264_intra_pred_luma_4x4_mode_horz_av8 -_ih264_intra_pred_luma_4x4_mode_horz_av8 = ih264_intra_pred_luma_4x4_mode_horz_av8 - -.global _ih264_intra_pred_luma_4x4_mode_horz_d_av8 -_ih264_intra_pred_luma_4x4_mode_horz_d_av8 = ih264_intra_pred_luma_4x4_mode_horz_d_av8 - -.global _ih264_intra_pred_luma_4x4_mode_horz_u_av8 -_ih264_intra_pred_luma_4x4_mode_horz_u_av8 = ih264_intra_pred_luma_4x4_mode_horz_u_av8 - -.global _ih264_intra_pred_luma_4x4_mode_vert_av8 -_ih264_intra_pred_luma_4x4_mode_vert_av8 = ih264_intra_pred_luma_4x4_mode_vert_av8 - -.global _ih264_intra_pred_luma_4x4_mode_vert_l_av8 -_ih264_intra_pred_luma_4x4_mode_vert_l_av8 = ih264_intra_pred_luma_4x4_mode_vert_l_av8 - -.global _ih264_intra_pred_luma_4x4_mode_vert_r_av8 -_ih264_intra_pred_luma_4x4_mode_vert_r_av8 = ih264_intra_pred_luma_4x4_mode_vert_r_av8 - -.global _ih264_intra_pred_luma_8x8_mode_dc_av8 -_ih264_intra_pred_luma_8x8_mode_dc_av8 = ih264_intra_pred_luma_8x8_mode_dc_av8 - -.global _ih264_intra_pred_luma_8x8_mode_diag_dl_av8 -_ih264_intra_pred_luma_8x8_mode_diag_dl_av8 = ih264_intra_pred_luma_8x8_mode_diag_dl_av8 - -.global _ih264_intra_pred_luma_8x8_mode_diag_dr_av8 -_ih264_intra_pred_luma_8x8_mode_diag_dr_av8 = ih264_intra_pred_luma_8x8_mode_diag_dr_av8 - -.global _ih264_intra_pred_luma_8x8_mode_horz_av8 -_ih264_intra_pred_luma_8x8_mode_horz_av8 = ih264_intra_pred_luma_8x8_mode_horz_av8 - -.global _ih264_intra_pred_luma_8x8_mode_horz_d_av8 -_ih264_intra_pred_luma_8x8_mode_horz_d_av8 = ih264_intra_pred_luma_8x8_mode_horz_d_av8 - -.global _ih264_intra_pred_luma_8x8_mode_horz_u_av8 -_ih264_intra_pred_luma_8x8_mode_horz_u_av8 = ih264_intra_pred_luma_8x8_mode_horz_u_av8 - -.global _ih264_intra_pred_luma_8x8_mode_vert_av8 -_ih264_intra_pred_luma_8x8_mode_vert_av8 = ih264_intra_pred_luma_8x8_mode_vert_av8 - -.global _ih264_intra_pred_luma_8x8_mode_vert_l_av8 -_ih264_intra_pred_luma_8x8_mode_vert_l_av8 = ih264_intra_pred_luma_8x8_mode_vert_l_av8 - -.global _ih264_intra_pred_luma_8x8_mode_vert_r_av8 -_ih264_intra_pred_luma_8x8_mode_vert_r_av8 = ih264_intra_pred_luma_8x8_mode_vert_r_av8 - -.global _ih264_iquant_itrans_recon_4x4_av8 -_ih264_iquant_itrans_recon_4x4_av8 = ih264_iquant_itrans_recon_4x4_av8 - -.global _ih264_iquant_itrans_recon_4x4_dc_av8 -_ih264_iquant_itrans_recon_4x4_dc_av8 = ih264_iquant_itrans_recon_4x4_dc_av8 - -.global _ih264_iquant_itrans_recon_8x8_av8 -_ih264_iquant_itrans_recon_8x8_av8 = ih264_iquant_itrans_recon_8x8_av8 - -.global _ih264_iquant_itrans_recon_8x8_dc_av8 -_ih264_iquant_itrans_recon_8x8_dc_av8 = ih264_iquant_itrans_recon_8x8_dc_av8 - -.global _ih264_iquant_itrans_recon_chroma_4x4_av8 -_ih264_iquant_itrans_recon_chroma_4x4_av8 = ih264_iquant_itrans_recon_chroma_4x4_av8 - -.global _ih264_iquant_itrans_recon_chroma_4x4_dc_av8 -_ih264_iquant_itrans_recon_chroma_4x4_dc_av8 = ih264_iquant_itrans_recon_chroma_4x4_dc_av8 - -.global _ih264_pad_left_chroma_av8 -_ih264_pad_left_chroma_av8 = ih264_pad_left_chroma_av8 - -.global _ih264_pad_left_luma_av8 -_ih264_pad_left_luma_av8 = ih264_pad_left_luma_av8 - -.global _ih264_pad_right_chroma_av8 -_ih264_pad_right_chroma_av8 = ih264_pad_right_chroma_av8 - -.global _ih264_pad_right_luma_av8 -_ih264_pad_right_luma_av8 = ih264_pad_right_luma_av8 - -.global _ih264_pad_top_av8 -_ih264_pad_top_av8 = ih264_pad_top_av8 - -.global _ih264_weighted_bi_pred_chroma_av8 -_ih264_weighted_bi_pred_chroma_av8 = ih264_weighted_bi_pred_chroma_av8 - -.global _ih264_weighted_bi_pred_luma_av8 -_ih264_weighted_bi_pred_luma_av8 = ih264_weighted_bi_pred_luma_av8 - -.global _ih264_weighted_pred_chroma_av8 -_ih264_weighted_pred_chroma_av8 = ih264_weighted_pred_chroma_av8 - -.global _ih264_weighted_pred_luma_av8 -_ih264_weighted_pred_luma_av8 = ih264_weighted_pred_luma_av8 \ No newline at end of file diff --git a/dependencies/vcpkg b/dependencies/vcpkg index 533a5fda..a4275b7e 160000 --- a/dependencies/vcpkg +++ b/dependencies/vcpkg @@ -1 +1 @@ -Subproject commit 533a5fda5c0646d1771345fb572e759283444d5f +Subproject commit a4275b7eee79fb24ec2e135481ef5fce8b41c339 diff --git a/dependencies/xbyak_aarch64 b/dependencies/xbyak_aarch64 deleted file mode 160000 index 904b8923..00000000 --- a/dependencies/xbyak_aarch64 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 904b8923457f3ec0d6f82ea2d6832a792851194d diff --git a/dist/linux/info.cemu.Cemu.desktop b/dist/linux/info.cemu.Cemu.desktop index 6eeb0120..5003d4a6 100644 --- a/dist/linux/info.cemu.Cemu.desktop +++ b/dist/linux/info.cemu.Cemu.desktop @@ -24,4 +24,3 @@ Comment[it]=Software per emulare giochi e applicazioni per Wii U su PC Categories=Game;Emulator; Keywords=Nintendo; MimeType=application/x-wii-u-rom; -StartupWMClass=Cemu diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 04b6dfdd..79471321 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -49,6 +49,7 @@ add_subdirectory(audio) add_subdirectory(util) add_subdirectory(imgui) add_subdirectory(resource) +add_subdirectory(asm) add_executable(CemuBin main.cpp @@ -101,21 +102,13 @@ if (MACOS_BUNDLE) endforeach(folder) if(CMAKE_BUILD_TYPE STREQUAL "Debug") - set(LIBUSB_PATH "${CMAKE_BINARY_DIR}/vcpkg_installed/${VCPKG_TARGET_TRIPLET}/debug/lib/libusb-1.0.0.dylib") + set(LIBUSB_PATH "${CMAKE_BINARY_DIR}/vcpkg_installed/x64-osx/debug/lib/libusb-1.0.0.dylib") else() - set(LIBUSB_PATH "${CMAKE_BINARY_DIR}/vcpkg_installed/${VCPKG_TARGET_TRIPLET}/lib/libusb-1.0.0.dylib") + set(LIBUSB_PATH "${CMAKE_BINARY_DIR}/vcpkg_installed/x64-osx/lib/libusb-1.0.0.dylib") endif() - if (EXISTS "/usr/local/lib/libMoltenVK.dylib") - set(MOLTENVK_PATH "/usr/local/lib/libMoltenVK.dylib") - elseif (EXISTS "/opt/homebrew/lib/libMoltenVK.dylib") - set(MOLTENVK_PATH "/opt/homebrew/lib/libMoltenVK.dylib") - else() - message(FATAL_ERROR "failed to find libMoltenVK.dylib") - endif () - add_custom_command (TARGET CemuBin POST_BUILD - COMMAND ${CMAKE_COMMAND} ARGS -E copy "${MOLTENVK_PATH}" "${CMAKE_SOURCE_DIR}/bin/${OUTPUT_NAME}.app/Contents/Frameworks/libMoltenVK.dylib" + COMMAND ${CMAKE_COMMAND} ARGS -E copy "/usr/local/lib/libMoltenVK.dylib" "${CMAKE_SOURCE_DIR}/bin/${OUTPUT_NAME}.app/Contents/Frameworks/libMoltenVK.dylib" COMMAND ${CMAKE_COMMAND} ARGS -E copy "${LIBUSB_PATH}" "${CMAKE_SOURCE_DIR}/bin/${OUTPUT_NAME}.app/Contents/Frameworks/libusb-1.0.0.dylib" COMMAND ${CMAKE_COMMAND} ARGS -E copy "${CMAKE_SOURCE_DIR}/src/resource/update.sh" "${CMAKE_SOURCE_DIR}/bin/${OUTPUT_NAME}.app/Contents/MacOS/update.sh" COMMAND bash -c "install_name_tool -add_rpath @executable_path/../Frameworks ${CMAKE_SOURCE_DIR}/bin/${OUTPUT_NAME}.app/Contents/MacOS/${OUTPUT_NAME}" diff --git a/src/Cafe/CMakeLists.txt b/src/Cafe/CMakeLists.txt index 2900059b..d51d58d5 100644 --- a/src/Cafe/CMakeLists.txt +++ b/src/Cafe/CMakeLists.txt @@ -67,31 +67,24 @@ add_library(CemuCafe HW/Espresso/Recompiler/PPCFunctionBoundaryTracker.h HW/Espresso/Recompiler/PPCRecompiler.cpp HW/Espresso/Recompiler/PPCRecompiler.h - HW/Espresso/Recompiler/IML/IML.h - HW/Espresso/Recompiler/IML/IMLSegment.cpp - HW/Espresso/Recompiler/IML/IMLSegment.h - HW/Espresso/Recompiler/IML/IMLInstruction.cpp - HW/Espresso/Recompiler/IML/IMLInstruction.h - HW/Espresso/Recompiler/IML/IMLDebug.cpp - HW/Espresso/Recompiler/IML/IMLAnalyzer.cpp - HW/Espresso/Recompiler/IML/IMLOptimizer.cpp - HW/Espresso/Recompiler/IML/IMLRegisterAllocator.cpp - HW/Espresso/Recompiler/IML/IMLRegisterAllocator.h - HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.cpp - HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.h + HW/Espresso/Recompiler/PPCRecompilerImlAnalyzer.cpp HW/Espresso/Recompiler/PPCRecompilerImlGen.cpp HW/Espresso/Recompiler/PPCRecompilerImlGenFPU.cpp HW/Espresso/Recompiler/PPCRecompilerIml.h + HW/Espresso/Recompiler/PPCRecompilerImlOptimizer.cpp + HW/Espresso/Recompiler/PPCRecompilerImlRanges.cpp + HW/Espresso/Recompiler/PPCRecompilerImlRanges.h + HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator2.cpp + HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator.cpp HW/Espresso/Recompiler/PPCRecompilerIntermediate.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64AVX.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64BMI.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64FPU.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64Gen.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64GenFPU.cpp - HW/Espresso/Recompiler/BackendX64/BackendX64.h - HW/Espresso/Recompiler/BackendX64/X64Emit.hpp - HW/Espresso/Recompiler/BackendX64/x86Emitter.h + HW/Espresso/Recompiler/PPCRecompilerX64AVX.cpp + HW/Espresso/Recompiler/PPCRecompilerX64BMI.cpp + HW/Espresso/Recompiler/PPCRecompilerX64.cpp + HW/Espresso/Recompiler/PPCRecompilerX64FPU.cpp + HW/Espresso/Recompiler/PPCRecompilerX64Gen.cpp + HW/Espresso/Recompiler/PPCRecompilerX64GenFPU.cpp + HW/Espresso/Recompiler/PPCRecompilerX64.h + HW/Espresso/Recompiler/x64Emit.hpp HW/Latte/Common/RegisterSerializer.cpp HW/Latte/Common/RegisterSerializer.h HW/Latte/Common/ShaderSerializer.cpp @@ -476,10 +469,6 @@ add_library(CemuCafe OS/libs/nsyshid/Infinity.h OS/libs/nsyshid/Skylander.cpp OS/libs/nsyshid/Skylander.h - OS/libs/nsyshid/SkylanderXbox360.cpp - OS/libs/nsyshid/SkylanderXbox360.h - OS/libs/nsyshid/g721/g721.cpp - OS/libs/nsyshid/g721/g721.h OS/libs/nsyskbd/nsyskbd.cpp OS/libs/nsyskbd/nsyskbd.h OS/libs/nsysnet/nsysnet.cpp @@ -537,14 +526,6 @@ if(APPLE) target_sources(CemuCafe PRIVATE "HW/Latte/Renderer/Vulkan/CocoaSurface.mm") endif() -if(CEMU_ARCHITECTURE MATCHES "(aarch64)|(AARCH64)|(arm64)|(ARM64)") - target_sources(CemuCafe PRIVATE - HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.cpp - HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.h - ) - target_link_libraries(CemuCafe PRIVATE xbyak_aarch64) -endif() - set_property(TARGET CemuCafe PROPERTY MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") target_include_directories(CemuCafe PUBLIC "../") @@ -552,10 +533,11 @@ target_include_directories(CemuCafe PUBLIC "../") if (glslang_VERSION VERSION_LESS "15.0.0") set(glslang_target "glslang::SPIRV") else() - set(glslang_target "glslang::glslang") + set(glslang_target "glslang") endif() target_link_libraries(CemuCafe PRIVATE + CemuAsm CemuAudio CemuCommon CemuComponents diff --git a/src/Cafe/CafeSystem.cpp b/src/Cafe/CafeSystem.cpp index d20ccd9d..1bf3755e 100644 --- a/src/Cafe/CafeSystem.cpp +++ b/src/Cafe/CafeSystem.cpp @@ -844,7 +844,7 @@ namespace CafeSystem module->TitleStart(); cemu_initForGame(); // enter scheduler - if ((ActiveSettings::GetCPUMode() == CPUMode::MulticoreRecompiler || LaunchSettings::ForceMultiCoreInterpreter()) && !LaunchSettings::ForceInterpreter()) + if (ActiveSettings::GetCPUMode() == CPUMode::MulticoreRecompiler && !LaunchSettings::ForceInterpreter()) coreinit::OSSchedulerBegin(3); else coreinit::OSSchedulerBegin(1); diff --git a/src/Cafe/Filesystem/FST/FST.cpp b/src/Cafe/Filesystem/FST/FST.cpp index ec112b9a..f1255778 100644 --- a/src/Cafe/Filesystem/FST/FST.cpp +++ b/src/Cafe/Filesystem/FST/FST.cpp @@ -13,8 +13,6 @@ #define SET_FST_ERROR(__code) if (errorCodeOut) *errorCodeOut = ErrorCode::__code -static_assert(sizeof(NCrypto::AesIv) == 16); // make sure IV is actually 16 bytes - class FSTDataSource { public: @@ -870,7 +868,7 @@ static_assert(sizeof(FSTHashedBlock) == BLOCK_SIZE); struct FSTCachedRawBlock { FSTRawBlock blockData; - NCrypto::AesIv ivForNextBlock; + uint8 ivForNextBlock[16]; uint64 lastAccess; }; @@ -921,13 +919,13 @@ void FSTVolume::TrimCacheIfRequired(FSTCachedRawBlock** droppedRawBlock, FSTCach } } -void FSTVolume::DetermineUnhashedBlockIV(uint32 clusterIndex, uint32 blockIndex, NCrypto::AesIv& ivOut) +void FSTVolume::DetermineUnhashedBlockIV(uint32 clusterIndex, uint32 blockIndex, uint8 ivOut[16]) { - ivOut = {}; + memset(ivOut, 0, sizeof(ivOut)); if(blockIndex == 0) { - ivOut.iv[0] = (uint8)(clusterIndex >> 8); - ivOut.iv[1] = (uint8)(clusterIndex >> 0); + ivOut[0] = (uint8)(clusterIndex >> 8); + ivOut[1] = (uint8)(clusterIndex >> 0); } else { @@ -938,20 +936,20 @@ void FSTVolume::DetermineUnhashedBlockIV(uint32 clusterIndex, uint32 blockIndex, auto itr = m_cacheDecryptedRawBlocks.find(cacheBlockId); if (itr != m_cacheDecryptedRawBlocks.end()) { - ivOut = itr->second->ivForNextBlock; + memcpy(ivOut, itr->second->ivForNextBlock, 16); } else { - cemu_assert(m_sectorSize >= NCrypto::AesIv::SIZE); + cemu_assert(m_sectorSize >= 16); uint64 clusterOffset = (uint64)m_cluster[clusterIndex].offset * m_sectorSize; - NCrypto::AesIv prevIV{}; - if (m_dataSource->readData(clusterIndex, clusterOffset, blockIndex * m_sectorSize - NCrypto::AesIv::SIZE, prevIV.iv, NCrypto::AesIv::SIZE) != NCrypto::AesIv::SIZE) + uint8 prevIV[16]; + if (m_dataSource->readData(clusterIndex, clusterOffset, blockIndex * m_sectorSize - 16, prevIV, 16) != 16) { cemuLog_log(LogType::Force, "Failed to read IV for raw FST block"); m_detectedCorruption = true; return; } - ivOut = prevIV; + memcpy(ivOut, prevIV, 16); } } } @@ -986,10 +984,10 @@ FSTCachedRawBlock* FSTVolume::GetDecryptedRawBlock(uint32 clusterIndex, uint32 b return nullptr; } // decrypt hash data - NCrypto::AesIv iv{}; + uint8 iv[16]{}; DetermineUnhashedBlockIV(clusterIndex, blockIndex, iv); - std::copy(block->blockData.rawData.data() + m_sectorSize - NCrypto::AesIv::SIZE, block->blockData.rawData.data() + m_sectorSize, block->ivForNextBlock.iv); - AES128_CBC_decrypt(block->blockData.rawData.data(), block->blockData.rawData.data(), m_sectorSize, m_partitionTitlekey.b, iv.iv); + memcpy(block->ivForNextBlock, block->blockData.rawData.data() + m_sectorSize - 16, 16); + AES128_CBC_decrypt(block->blockData.rawData.data(), block->blockData.rawData.data(), m_sectorSize, m_partitionTitlekey.b, iv); // if this is the next block, then hash it if(cluster.hasContentHash) { diff --git a/src/Cafe/Filesystem/FST/FST.h b/src/Cafe/Filesystem/FST/FST.h index 26201c32..601799ce 100644 --- a/src/Cafe/Filesystem/FST/FST.h +++ b/src/Cafe/Filesystem/FST/FST.h @@ -83,6 +83,7 @@ public: } private: + /* FST data (in memory) */ enum class ClusterHashMode : uint8 { @@ -192,7 +193,7 @@ private: std::unordered_map m_cacheDecryptedHashedBlocks; uint64 m_cacheAccessCounter{}; - void DetermineUnhashedBlockIV(uint32 clusterIndex, uint32 blockIndex, NCrypto::AesIv& ivOut); + void DetermineUnhashedBlockIV(uint32 clusterIndex, uint32 blockIndex, uint8 ivOut[16]); struct FSTCachedRawBlock* GetDecryptedRawBlock(uint32 clusterIndex, uint32 blockIndex); struct FSTCachedHashedBlock* GetDecryptedHashedBlock(uint32 clusterIndex, uint32 blockIndex); diff --git a/src/Cafe/GraphicPack/GraphicPack2.cpp b/src/Cafe/GraphicPack/GraphicPack2.cpp index 6ae05c5b..f21bb89d 100644 --- a/src/Cafe/GraphicPack/GraphicPack2.cpp +++ b/src/Cafe/GraphicPack/GraphicPack2.cpp @@ -821,7 +821,7 @@ void GraphicPack2::AddConstantsForCurrentPreset(ExpressionParser& ep) } } -void GraphicPack2::_iterateReplacedFiles(const fs::path& currentPath, bool isAOC, const char* virtualMountBase) +void GraphicPack2::_iterateReplacedFiles(const fs::path& currentPath, bool isAOC) { uint64 currentTitleId = CafeSystem::GetForegroundTitleId(); uint64 aocTitleId = (currentTitleId & 0xFFFFFFFFull) | 0x0005000c00000000ull; @@ -836,7 +836,7 @@ void GraphicPack2::_iterateReplacedFiles(const fs::path& currentPath, bool isAOC } else { - virtualMountPath = fs::path(virtualMountBase) / virtualMountPath; + virtualMountPath = fs::path("vol/content/") / virtualMountPath; } fscDeviceRedirect_add(virtualMountPath.generic_string(), it.file_size(), it.path().generic_string(), m_fs_priority); } @@ -861,7 +861,7 @@ void GraphicPack2::LoadReplacedFiles() { // setup redirections fscDeviceRedirect_map(); - _iterateReplacedFiles(contentPath, false, "vol/content/"); + _iterateReplacedFiles(contentPath, false); } // /aoc/ fs::path aocPath(gfxPackPath); @@ -874,18 +874,7 @@ void GraphicPack2::LoadReplacedFiles() aocTitleId |= 0x0005000c00000000ULL; // setup redirections fscDeviceRedirect_map(); - _iterateReplacedFiles(aocPath, true, nullptr); - } - - // /code/ - fs::path codePath(gfxPackPath); - codePath.append("code"); - - if (fs::exists(codePath, ec)) - { - // setup redirections - fscDeviceRedirect_map(); - _iterateReplacedFiles(codePath, false, CafeSystem::GetInternalVirtualCodeFolder().c_str()); + _iterateReplacedFiles(aocPath, true); } } diff --git a/src/Cafe/GraphicPack/GraphicPack2.h b/src/Cafe/GraphicPack/GraphicPack2.h index fc9603cd..9b6a86d4 100644 --- a/src/Cafe/GraphicPack/GraphicPack2.h +++ b/src/Cafe/GraphicPack/GraphicPack2.h @@ -260,7 +260,7 @@ private: CustomShader LoadShader(const fs::path& path, uint64 shader_base_hash, uint64 shader_aux_hash, GP_SHADER_TYPE shader_type) const; void ApplyShaderPresets(std::string& shader_source) const; void LoadReplacedFiles(); - void _iterateReplacedFiles(const fs::path& currentPath, bool isAOC, const char* virtualMountBase); + void _iterateReplacedFiles(const fs::path& currentPath, bool isAOC); // ram mappings std::vector> m_ramMappings; diff --git a/src/Cafe/HW/Espresso/Debugger/Debugger.cpp b/src/Cafe/HW/Espresso/Debugger/Debugger.cpp index e84c9fda..37e374d6 100644 --- a/src/Cafe/HW/Espresso/Debugger/Debugger.cpp +++ b/src/Cafe/HW/Espresso/Debugger/Debugger.cpp @@ -8,7 +8,6 @@ #include "gui/debugger/DebuggerWindow2.h" #include "Cafe/OS/libs/coreinit/coreinit.h" -#include "util/helpers/helpers.h" #if BOOST_OS_WINDOWS #include @@ -137,6 +136,11 @@ void debugger_createCodeBreakpoint(uint32 address, uint8 bpType) debugger_updateExecutionBreakpoint(address); } +void debugger_createExecuteBreakpoint(uint32 address) +{ + debugger_createCodeBreakpoint(address, DEBUGGER_BP_T_NORMAL); +} + namespace coreinit { std::vector& OSGetSchedulerThreads(); @@ -290,23 +294,8 @@ void debugger_toggleExecuteBreakpoint(uint32 address) } else { - // create new execution breakpoint - debugger_createCodeBreakpoint(address, DEBUGGER_BP_T_NORMAL); - } -} - -void debugger_toggleLoggingBreakpoint(uint32 address) -{ - auto existingBP = debugger_getFirstBP(address, DEBUGGER_BP_T_LOGGING); - if (existingBP) - { - // delete existing breakpoint - debugger_deleteBreakpoint(existingBP); - } - else - { - // create new logging breakpoint - debugger_createCodeBreakpoint(address, DEBUGGER_BP_T_LOGGING); + // create new breakpoint + debugger_createExecuteBreakpoint(address); } } @@ -549,48 +538,7 @@ void debugger_enterTW(PPCInterpreter_t* hCPU) { if (bp->bpType == DEBUGGER_BP_T_LOGGING && bp->enabled) { - std::string comment = !bp->comment.empty() ? boost::nowide::narrow(bp->comment) : fmt::format("Breakpoint at 0x{:08X} (no comment)", bp->address); - - auto replacePlaceholders = [&](const std::string& prefix, const auto& formatFunc) - { - size_t pos = 0; - while ((pos = comment.find(prefix, pos)) != std::string::npos) - { - size_t endPos = comment.find('}', pos); - if (endPos == std::string::npos) - break; - - try - { - if (int regNum = ConvertString(comment.substr(pos + prefix.length(), endPos - pos - prefix.length())); regNum >= 0 && regNum < 32) - { - std::string replacement = formatFunc(regNum); - comment.replace(pos, endPos - pos + 1, replacement); - pos += replacement.length(); - } - else - { - pos = endPos + 1; - } - } - catch (...) - { - pos = endPos + 1; - } - } - }; - - // Replace integer register placeholders {rX} - replacePlaceholders("{r", [&](int regNum) { - return fmt::format("0x{:08X}", hCPU->gpr[regNum]); - }); - - // Replace floating point register placeholders {fX} - replacePlaceholders("{f", [&](int regNum) { - return fmt::format("{}", hCPU->fpr[regNum].fpr); - }); - - std::string logName = "Breakpoint '" + comment + "'"; + std::string logName = !bp->comment.empty() ? "Breakpoint '"+boost::nowide::narrow(bp->comment)+"'" : fmt::format("Breakpoint at 0x{:08X} (no comment)", bp->address); std::string logContext = fmt::format("Thread: {:08x} LR: 0x{:08x}", MEMPTR(coreinit::OSGetCurrentThread()).GetMPTR(), hCPU->spr.LR, cemuLog_advancedPPCLoggingEnabled() ? " Stack Trace:" : ""); cemuLog_log(LogType::Force, "[Debugger] {} was executed! {}", logName, logContext); if (cemuLog_advancedPPCLoggingEnabled()) diff --git a/src/Cafe/HW/Espresso/Debugger/Debugger.h b/src/Cafe/HW/Espresso/Debugger/Debugger.h index c220eb8a..249c47b8 100644 --- a/src/Cafe/HW/Espresso/Debugger/Debugger.h +++ b/src/Cafe/HW/Espresso/Debugger/Debugger.h @@ -100,8 +100,8 @@ extern debuggerState_t debuggerState; // new API DebuggerBreakpoint* debugger_getFirstBP(uint32 address); void debugger_createCodeBreakpoint(uint32 address, uint8 bpType); +void debugger_createExecuteBreakpoint(uint32 address); void debugger_toggleExecuteBreakpoint(uint32 address); // create/remove execute breakpoint -void debugger_toggleLoggingBreakpoint(uint32 address); // create/remove logging breakpoint void debugger_toggleBreakpoint(uint32 address, bool state, DebuggerBreakpoint* bp); void debugger_createMemoryBreakpoint(uint32 address, bool onRead, bool onWrite); diff --git a/src/Cafe/HW/Espresso/EspressoISA.h b/src/Cafe/HW/Espresso/EspressoISA.h index 5e09763b..b3ae45c3 100644 --- a/src/Cafe/HW/Espresso/EspressoISA.h +++ b/src/Cafe/HW/Espresso/EspressoISA.h @@ -10,18 +10,6 @@ namespace Espresso CR_BIT_INDEX_SO = 3, }; - enum class PSQ_LOAD_TYPE - { - TYPE_F32 = 0, - TYPE_UNUSED1 = 1, - TYPE_UNUSED2 = 2, - TYPE_UNUSED3 = 3, - TYPE_U8 = 4, - TYPE_U16 = 5, - TYPE_S8 = 6, - TYPE_S16 = 7, - }; - enum class PrimaryOpcode { // underscore at the end of the name means that this instruction always updates CR0 (as if RC bit is set) @@ -103,15 +91,13 @@ namespace Espresso BCCTR = 528 }; - enum class Opcode31 + enum class OPCODE_31 { - TW = 4, - MFTB = 371, + }; inline PrimaryOpcode GetPrimaryOpcode(uint32 opcode) { return (PrimaryOpcode)(opcode >> 26); }; inline Opcode19 GetGroup19Opcode(uint32 opcode) { return (Opcode19)((opcode >> 1) & 0x3FF); }; - inline Opcode31 GetGroup31Opcode(uint32 opcode) { return (Opcode31)((opcode >> 1) & 0x3FF); }; struct BOField { @@ -146,12 +132,6 @@ namespace Espresso uint8 bo; }; - // returns true if LK bit is set, only valid for branch instructions - inline bool DecodeLK(uint32 opcode) - { - return (opcode & 1) != 0; - } - inline void _decodeForm_I(uint32 opcode, uint32& LI, bool& AA, bool& LK) { LI = opcode & 0x3fffffc; @@ -203,7 +183,13 @@ namespace Espresso _decodeForm_D_branch(opcode, BD, BO, BI, AA, LK); } - inline void decodeOp_BCSPR(uint32 opcode, BOField& BO, uint32& BI, bool& LK) // BCLR and BCSPR + inline void decodeOp_BCLR(uint32 opcode, BOField& BO, uint32& BI, bool& LK) + { + // form XL (with BD field expected to be zero) + _decodeForm_XL(opcode, BO, BI, LK); + } + + inline void decodeOp_BCCTR(uint32 opcode, BOField& BO, uint32& BI, bool& LK) { // form XL (with BD field expected to be zero) _decodeForm_XL(opcode, BO, BI, LK); diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterALU.hpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterALU.hpp index 2fe07509..fe9316f0 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterALU.hpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterALU.hpp @@ -3,12 +3,12 @@ static void PPCInterpreter_setXerOV(PPCInterpreter_t* hCPU, bool hasOverflow) { if (hasOverflow) { - hCPU->xer_so = 1; - hCPU->xer_ov = 1; + hCPU->spr.XER |= XER_SO; + hCPU->spr.XER |= XER_OV; } else { - hCPU->xer_ov = 0; + hCPU->spr.XER &= ~XER_OV; } } @@ -41,7 +41,7 @@ static void PPCInterpreter_ADD(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_ADDO(PPCInterpreter_t* hCPU, uint32 opcode) { - // Don't Starve Giant Edition uses this instruction + BSO + // untested (Don't Starve Giant Edition uses this instruction + BSO) PPC_OPC_TEMPL3_XO(); uint32 result = hCPU->gpr[rA] + hCPU->gpr[rB]; PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(hCPU->gpr[rA], hCPU->gpr[rB], result)); @@ -113,6 +113,7 @@ static void PPCInterpreter_ADDEO(PPCInterpreter_t* hCPU, uint32 opcode) else hCPU->xer_ca = 0; PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(a, b, hCPU->gpr[rD])); + // update CR if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -129,7 +130,7 @@ static void PPCInterpreter_ADDI(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_ADDIC(PPCInterpreter_t* hCPU, uint32 opcode) { - sint32 rD, rA; + int rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); uint32 a = hCPU->gpr[rA]; @@ -144,7 +145,7 @@ static void PPCInterpreter_ADDIC(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_ADDIC_(PPCInterpreter_t* hCPU, uint32 opcode) { - sint32 rD, rA; + int rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); uint32 a = hCPU->gpr[rA]; @@ -154,13 +155,14 @@ static void PPCInterpreter_ADDIC_(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->xer_ca = 1; else hCPU->xer_ca = 0; + // update cr0 flags ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); } static void PPCInterpreter_ADDIS(PPCInterpreter_t* hCPU, uint32 opcode) { - sint32 rD, rA; + int rD, rA; uint32 imm; PPC_OPC_TEMPL_D_Shift16(opcode, rD, rA, imm); hCPU->gpr[rD] = (rA ? hCPU->gpr[rA] : 0) + imm; @@ -183,23 +185,6 @@ static void PPCInterpreter_ADDZE(PPCInterpreter_t* hCPU, uint32 opcode) PPCInterpreter_nextInstruction(hCPU); } -static void PPCInterpreter_ADDZEO(PPCInterpreter_t* hCPU, uint32 opcode) -{ - PPC_OPC_TEMPL3_XO(); - PPC_ASSERT(rB == 0); - uint32 a = hCPU->gpr[rA]; - uint32 ca = hCPU->xer_ca; - hCPU->gpr[rD] = a + ca; - PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(a, 0, hCPU->gpr[rD])); - if ((a == 0xffffffff) && ca) - hCPU->xer_ca = 1; - else - hCPU->xer_ca = 0; - if (opHasRC()) - ppc_update_cr0(hCPU, hCPU->gpr[rD]); - PPCInterpreter_nextInstruction(hCPU); -} - static void PPCInterpreter_ADDME(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL3_XO(); @@ -216,23 +201,6 @@ static void PPCInterpreter_ADDME(PPCInterpreter_t* hCPU, uint32 opcode) PPCInterpreter_nextInstruction(hCPU); } -static void PPCInterpreter_ADDMEO(PPCInterpreter_t* hCPU, uint32 opcode) -{ - PPC_OPC_TEMPL3_XO(); - PPC_ASSERT(rB == 0); - uint32 a = hCPU->gpr[rA]; - uint32 ca = hCPU->xer_ca; - hCPU->gpr[rD] = a + ca + 0xffffffff; - PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(a, 0xffffffff, hCPU->gpr[rD])); - if (a || ca) - hCPU->xer_ca = 1; - else - hCPU->xer_ca = 0; - if (opHasRC()) - ppc_update_cr0(hCPU, hCPU->gpr[rD]); - PPCInterpreter_nextInstruction(hCPU); -} - static void PPCInterpreter_SUBF(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL3_XO(); @@ -278,7 +246,7 @@ static void PPCInterpreter_SUBFCO(PPCInterpreter_t* hCPU, uint32 opcode) uint32 a = hCPU->gpr[rA]; uint32 b = hCPU->gpr[rB]; hCPU->gpr[rD] = ~a + b + 1; - // update carry + // update xer if (ppc_carry_3(~a, b, 1)) hCPU->xer_ca = 1; else @@ -292,7 +260,7 @@ static void PPCInterpreter_SUBFCO(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_SUBFIC(PPCInterpreter_t* hCPU, uint32 opcode) { - sint32 rD, rA; + int rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); uint32 a = hCPU->gpr[rA]; @@ -316,6 +284,7 @@ static void PPCInterpreter_SUBFE(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->xer_ca = 1; else hCPU->xer_ca = 0; + // update cr0 if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -335,6 +304,7 @@ static void PPCInterpreter_SUBFEO(PPCInterpreter_t* hCPU, uint32 opcode) else hCPU->xer_ca = 0; PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(~a, b, result)); + // update cr0 if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -356,25 +326,9 @@ static void PPCInterpreter_SUBFZE(PPCInterpreter_t* hCPU, uint32 opcode) PPCInterpreter_nextInstruction(hCPU); } -static void PPCInterpreter_SUBFZEO(PPCInterpreter_t* hCPU, uint32 opcode) -{ - PPC_OPC_TEMPL3_XO(); - PPC_ASSERT(rB == 0); - uint32 a = hCPU->gpr[rA]; - uint32 ca = hCPU->xer_ca; - hCPU->gpr[rD] = ~a + ca; - PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(~a, 0, hCPU->gpr[rD])); - if (a == 0 && ca) - hCPU->xer_ca = 1; - else - hCPU->xer_ca = 0; - if (opHasRC()) - ppc_update_cr0(hCPU, hCPU->gpr[rD]); - PPCInterpreter_nextInstruction(hCPU); -} - static void PPCInterpreter_SUBFME(PPCInterpreter_t* hCPU, uint32 opcode) { + // untested PPC_OPC_TEMPL3_XO(); PPC_ASSERT(rB == 0); uint32 a = hCPU->gpr[rA]; @@ -385,24 +339,7 @@ static void PPCInterpreter_SUBFME(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->xer_ca = 1; else hCPU->xer_ca = 0; - if (opcode & PPC_OPC_RC) - ppc_update_cr0(hCPU, hCPU->gpr[rD]); - PPCInterpreter_nextInstruction(hCPU); -} - -static void PPCInterpreter_SUBFMEO(PPCInterpreter_t* hCPU, uint32 opcode) -{ - PPC_OPC_TEMPL3_XO(); - PPC_ASSERT(rB == 0); - uint32 a = hCPU->gpr[rA]; - uint32 ca = hCPU->xer_ca; - hCPU->gpr[rD] = ~a + 0xFFFFFFFF + ca; - PPCInterpreter_setXerOV(hCPU, checkAdditionOverflow(~a, 0xFFFFFFFF, hCPU->gpr[rD])); - // update xer carry - if (ppc_carry_3(~a, 0xFFFFFFFF, ca)) - hCPU->xer_ca = 1; - else - hCPU->xer_ca = 0; + // update cr0 if (opcode & PPC_OPC_RC) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -415,8 +352,13 @@ static void PPCInterpreter_MULHW_(PPCInterpreter_t* hCPU, uint32 opcode) sint64 b = (sint32)hCPU->gpr[rB]; sint64 c = a * b; hCPU->gpr[rD] = ((uint64)c) >> 32; - if (opHasRC()) + if (opcode & PPC_OPC_RC) { + // update cr0 flags +#ifdef CEMU_DEBUG_ASSERT + assert_dbg(); +#endif ppc_update_cr0(hCPU, hCPU->gpr[rD]); + } PPCInterpreter_nextInstruction(hCPU); } @@ -467,14 +409,14 @@ static void PPCInterpreter_MULLI(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_DIVW(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL3_XO(); - sint32 a = (sint32)hCPU->gpr[rA]; - sint32 b = (sint32)hCPU->gpr[rB]; + sint32 a = hCPU->gpr[rA]; + sint32 b = hCPU->gpr[rB]; if (b == 0) - hCPU->gpr[rD] = a < 0 ? 0xFFFFFFFF : 0; - else if (a == 0x80000000 && b == 0xFFFFFFFF) - hCPU->gpr[rD] = 0xFFFFFFFF; - else - hCPU->gpr[rD] = a / b; + { + cemuLog_logDebug(LogType::Force, "Error: Division by zero! [{:08x}]", (uint32)hCPU->instructionPointer); + b++; + } + hCPU->gpr[rD] = a / b; if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -483,23 +425,16 @@ static void PPCInterpreter_DIVW(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_DIVWO(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL3_XO(); - sint32 a = (sint32)hCPU->gpr[rA]; - sint32 b = (sint32)hCPU->gpr[rB]; + sint32 a = hCPU->gpr[rA]; + sint32 b = hCPU->gpr[rB]; if (b == 0) { PPCInterpreter_setXerOV(hCPU, true); - hCPU->gpr[rD] = a < 0 ? 0xFFFFFFFF : 0; - } - else if(a == 0x80000000 && b == 0xFFFFFFFF) - { - PPCInterpreter_setXerOV(hCPU, true); - hCPU->gpr[rD] = 0xFFFFFFFF; - } - else - { - hCPU->gpr[rD] = a / b; - PPCInterpreter_setXerOV(hCPU, false); + PPCInterpreter_nextInstruction(hCPU); + return; } + hCPU->gpr[rD] = a / b; + PPCInterpreter_setXerOV(hCPU, false); if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -508,14 +443,12 @@ static void PPCInterpreter_DIVWO(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_DIVWU(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL3_XO(); - uint32 a = hCPU->gpr[rA]; - uint32 b = hCPU->gpr[rB]; - if (b == 0) - hCPU->gpr[rD] = 0; - else if (a == 0x80000000 && b == 0xFFFFFFFF) - hCPU->gpr[rD] = 0; - else - hCPU->gpr[rD] = a / b; + if (hCPU->gpr[rB] == 0) + { + PPCInterpreter_nextInstruction(hCPU); + return; + } + hCPU->gpr[rD] = hCPU->gpr[rA] / hCPU->gpr[rB]; if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -524,23 +457,14 @@ static void PPCInterpreter_DIVWU(PPCInterpreter_t* hCPU, uint32 opcode) static void PPCInterpreter_DIVWUO(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL3_XO(); - uint32 a = hCPU->gpr[rA]; - uint32 b = hCPU->gpr[rB]; - if (b == 0) + if (hCPU->gpr[rB] == 0) { PPCInterpreter_setXerOV(hCPU, true); - hCPU->gpr[rD] = 0; - } - else if(a == 0x80000000 && b == 0xFFFFFFFF) - { - PPCInterpreter_setXerOV(hCPU, false); - hCPU->gpr[rD] = 0; - } - else - { - hCPU->gpr[rD] = a / b; - PPCInterpreter_setXerOV(hCPU, false); + PPCInterpreter_nextInstruction(hCPU); + return; } + hCPU->gpr[rD] = hCPU->gpr[rA] / hCPU->gpr[rB]; + PPCInterpreter_setXerOV(hCPU, false); if (opHasRC()) ppc_update_cr0(hCPU, hCPU->gpr[rD]); PPCInterpreter_nextInstruction(hCPU); @@ -567,13 +491,6 @@ static void PPCInterpreter_CRANDC(PPCInterpreter_t* hCPU, uint32 opcode) PPCInterpreter_nextInstruction(hCPU); } -static void PPCInterpreter_CRNAND(PPCInterpreter_t* hCPU, uint32 opcode) -{ - PPC_OPC_TEMPL_X_CR(); - ppc_setCRBit(hCPU, crD, (ppc_getCRBit(hCPU, crA)&ppc_getCRBit(hCPU, crB)) ^ 1); - PPCInterpreter_nextInstruction(hCPU); -} - static void PPCInterpreter_CROR(PPCInterpreter_t* hCPU, uint32 opcode) { PPC_OPC_TEMPL_X_CR(); @@ -931,7 +848,8 @@ static void PPCInterpreter_CMP(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->cr[cr * 4 + CR_BIT_GT] = 1; else hCPU->cr[cr * 4 + CR_BIT_EQ] = 1; - hCPU->cr[cr * 4 + CR_BIT_SO] = hCPU->xer_so; + if ((hCPU->spr.XER & XER_SO) != 0) + hCPU->cr[cr * 4 + CR_BIT_SO] = 1; PPCInterpreter_nextInstruction(hCPU); } @@ -953,7 +871,8 @@ static void PPCInterpreter_CMPL(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->cr[cr * 4 + CR_BIT_GT] = 1; else hCPU->cr[cr * 4 + CR_BIT_EQ] = 1; - hCPU->cr[cr * 4 + CR_BIT_SO] = hCPU->xer_so; + if ((hCPU->spr.XER & XER_SO) != 0) + hCPU->cr[cr * 4 + CR_BIT_SO] = 1; PPCInterpreter_nextInstruction(hCPU); } @@ -976,7 +895,8 @@ static void PPCInterpreter_CMPI(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->cr[cr * 4 + CR_BIT_GT] = 1; else hCPU->cr[cr * 4 + CR_BIT_EQ] = 1; - hCPU->cr[cr * 4 + CR_BIT_SO] = hCPU->xer_so; + if (hCPU->spr.XER & XER_SO) + hCPU->cr[cr * 4 + CR_BIT_SO] = 1; PPCInterpreter_nextInstruction(hCPU); } @@ -999,7 +919,8 @@ static void PPCInterpreter_CMPLI(PPCInterpreter_t* hCPU, uint32 opcode) hCPU->cr[cr * 4 + CR_BIT_GT] = 1; else hCPU->cr[cr * 4 + CR_BIT_EQ] = 1; - hCPU->cr[cr * 4 + CR_BIT_SO] = hCPU->xer_so; + if (hCPU->spr.XER & XER_SO) + hCPU->cr[cr * 4 + CR_BIT_SO] = 1; PPCInterpreter_nextInstruction(hCPU); } diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterFPU.cpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterFPU.cpp index 2c99b84c..aed571d7 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterFPU.cpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterFPU.cpp @@ -32,7 +32,7 @@ espresso_frsqrte_entry_t frsqrteLookupTable[32] = {0x20c1000, 0x35e},{0x1f12000, 0x332},{0x1d79000, 0x30a},{0x1bf4000, 0x2e6}, }; -ATTR_MS_ABI double frsqrte_espresso(double input) +double frsqrte_espresso(double input) { unsigned long long x = *(unsigned long long*)&input; @@ -111,7 +111,7 @@ espresso_fres_entry_t fresLookupTable[32] = {0x88400, 0x11a}, {0x65000, 0x11a}, {0x41c00, 0x108}, {0x20c00, 0x106} }; -ATTR_MS_ABI double fres_espresso(double input) +double fres_espresso(double input) { // based on testing we know that fres uses only the first 15 bits of the mantissa // seee eeee eeee mmmm mmmm mmmm mmmx xxxx .... (s = sign, e = exponent, m = mantissa, x = not used) diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterHLE.cpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterHLE.cpp index cf7ba195..24219e66 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterHLE.cpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterHLE.cpp @@ -2,70 +2,62 @@ #include "PPCInterpreterInternal.h" #include "PPCInterpreterHelper.h" -std::unordered_set s_unsupportedHLECalls; +std::unordered_set sUnsupportedHLECalls; void PPCInterpreter_handleUnsupportedHLECall(PPCInterpreter_t* hCPU) { const char* libFuncName = (char*)memory_getPointerFromVirtualOffset(hCPU->instructionPointer + 8); std::string tempString = fmt::format("Unsupported lib call: {}", libFuncName); - if (s_unsupportedHLECalls.find(tempString) == s_unsupportedHLECalls.end()) + if (sUnsupportedHLECalls.find(tempString) == sUnsupportedHLECalls.end()) { cemuLog_log(LogType::UnsupportedAPI, "{}", tempString); - s_unsupportedHLECalls.emplace(tempString); + sUnsupportedHLECalls.emplace(tempString); } hCPU->gpr[3] = 0; PPCInterpreter_nextInstruction(hCPU); } -static constexpr size_t HLE_TABLE_CAPACITY = 0x4000; -HLECALL s_ppcHleTable[HLE_TABLE_CAPACITY]{}; -sint32 s_ppcHleTableWriteIndex = 0; -std::mutex s_ppcHleTableMutex; +std::vector* sPPCHLETable{}; HLEIDX PPCInterpreter_registerHLECall(HLECALL hleCall, std::string hleName) { - std::unique_lock _l(s_ppcHleTableMutex); - if (s_ppcHleTableWriteIndex >= HLE_TABLE_CAPACITY) + if (!sPPCHLETable) + sPPCHLETable = new std::vector(); + for (sint32 i = 0; i < sPPCHLETable->size(); i++) { - cemuLog_log(LogType::Force, "HLE table is full"); - cemu_assert(false); - } - for (sint32 i = 0; i < s_ppcHleTableWriteIndex; i++) - { - if (s_ppcHleTable[i] == hleCall) - { + if ((*sPPCHLETable)[i] == hleCall) return i; - } } - cemu_assert(s_ppcHleTableWriteIndex < HLE_TABLE_CAPACITY); - s_ppcHleTable[s_ppcHleTableWriteIndex] = hleCall; - HLEIDX funcIndex = s_ppcHleTableWriteIndex; - s_ppcHleTableWriteIndex++; - return funcIndex; + HLEIDX newFuncIndex = (sint32)sPPCHLETable->size(); + sPPCHLETable->resize(sPPCHLETable->size() + 1); + (*sPPCHLETable)[newFuncIndex] = hleCall; + return newFuncIndex; } HLECALL PPCInterpreter_getHLECall(HLEIDX funcIndex) { - if (funcIndex < 0 || funcIndex >= HLE_TABLE_CAPACITY) + if (funcIndex < 0 || funcIndex >= sPPCHLETable->size()) return nullptr; - return s_ppcHleTable[funcIndex]; + return sPPCHLETable->data()[funcIndex]; } -std::mutex s_hleLogMutex; +std::mutex g_hleLogMutex; void PPCInterpreter_virtualHLE(PPCInterpreter_t* hCPU, unsigned int opcode) { uint32 hleFuncId = opcode & 0xFFFF; - if (hleFuncId == 0xFFD0) [[unlikely]] + if (hleFuncId == 0xFFD0) { - s_hleLogMutex.lock(); + g_hleLogMutex.lock(); PPCInterpreter_handleUnsupportedHLECall(hCPU); - s_hleLogMutex.unlock(); + g_hleLogMutex.unlock(); + return; } else { // os lib function - auto hleCall = PPCInterpreter_getHLECall(hleFuncId); + cemu_assert(hleFuncId < sPPCHLETable->size()); + auto hleCall = (*sPPCHLETable)[hleFuncId]; cemu_assert(hleCall); hleCall(hCPU); } diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterImpl.cpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterImpl.cpp index 547472ab..cacfa4a9 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterImpl.cpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterImpl.cpp @@ -428,6 +428,9 @@ public: } }; +uint32 testIP[100]; +uint32 testIPC = 0; + template class PPCInterpreterContainer { @@ -463,10 +466,6 @@ public: case 1: // virtual HLE PPCInterpreter_virtualHLE(hCPU, opcode); break; - case 3: - cemuLog_logDebug(LogType::Force, "Unsupported TWI instruction executed at {:08x}", hCPU->instructionPointer); - PPCInterpreter_nextInstruction(hCPU); - break; case 4: switch (PPC_getBits(opcode, 30, 5)) { @@ -483,9 +482,8 @@ public: PPCInterpreter_PS_CMPU1(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [4->0] at {:08x}", PPC_getBits(opcode, 25, 5), hCPU->instructionPointer); + debug_printf("Unknown execute %04X as [4->0] at %08X\n", PPC_getBits(opcode, 25, 5), hCPU->instructionPointer); cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; break; } break; @@ -511,9 +509,8 @@ public: PPCInterpreter_PS_ABS(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [4->8] at {:08x}", PPC_getBits(opcode, 25, 5), hCPU->instructionPointer); + debug_printf("Unknown execute %04X as [4->8] at %08X\n", PPC_getBits(opcode, 25, 5), hCPU->instructionPointer); cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; break; } break; @@ -551,9 +548,8 @@ public: PPCInterpreter_PS_MERGE11(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [4->16] at {:08x}", PPC_getBits(opcode, 25, 5), hCPU->instructionPointer); - cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; + debug_printf("Unknown execute %04X as [4->16] at %08X\n", PPC_getBits(opcode, 25, 5), hCPU->instructionPointer); + debugBreakpoint(); break; } break; @@ -594,9 +590,8 @@ public: PPCInterpreter_PS_NMADD(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [4] at {:08x}", PPC_getBits(opcode, 30, 5), hCPU->instructionPointer); + debug_printf("Unknown execute %04X as [4] at %08X\n", PPC_getBits(opcode, 30, 5), hCPU->instructionPointer); cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; break; } break; @@ -628,15 +623,12 @@ public: PPCInterpreter_BCX(hCPU, opcode); break; case 17: - if (PPC_getBits(opcode, 30, 1) == 1) - { + if (PPC_getBits(opcode, 30, 1) == 1) { PPCInterpreter_SC(hCPU, opcode); } - else - { - cemuLog_logDebug(LogType::Force, "Unsupported Opcode [0x17 --> 0x0]"); + else { + debug_printf("Unsupported Opcode [0x17 --> 0x0]\n"); cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; } break; case 18: @@ -666,9 +658,6 @@ public: case 193: PPCInterpreter_CRXOR(hCPU, opcode); break; - case 225: - PPCInterpreter_CRNAND(hCPU, opcode); - break; case 257: PPCInterpreter_CRAND(hCPU, opcode); break; @@ -685,9 +674,8 @@ public: PPCInterpreter_BCCTR(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [19] at {:08x}\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); + debug_printf("Unknown execute %04X as [19] at %08X\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; break; } break; @@ -725,6 +713,9 @@ public: PPCInterpreter_CMP(hCPU, opcode); break; case 4: + #ifdef CEMU_DEBUG_ASSERT + debug_printf("TW instruction executed at %08x\n", hCPU->instructionPointer); + #endif PPCInterpreter_TW(hCPU, opcode); break; case 8: @@ -904,12 +895,6 @@ public: case 522: PPCInterpreter_ADDCO(hCPU, opcode); break; - case 523: // 11 | OE - PPCInterpreter_MULHWU_(hCPU, opcode); // OE is ignored - break; - case 533: - PPCInterpreter_LSWX(hCPU, opcode); - break; case 534: PPCInterpreter_LWBRX(hCPU, opcode); break; @@ -928,9 +913,6 @@ public: case 567: PPCInterpreter_LFSUX(hCPU, opcode); break; - case 587: // 75 | OE - PPCInterpreter_MULHW_(hCPU, opcode); // OE is ignored for MULHW - break; case 595: PPCInterpreter_MFSR(hCPU, opcode); break; @@ -961,30 +943,15 @@ public: case 663: PPCInterpreter_STFSX(hCPU, opcode); break; - case 661: - PPCInterpreter_STSWX(hCPU, opcode); - break; case 695: PPCInterpreter_STFSUX(hCPU, opcode); break; - case 712: // 200 | OE - PPCInterpreter_SUBFZEO(hCPU, opcode); - break; - case 714: // 202 | OE - PPCInterpreter_ADDZEO(hCPU, opcode); - break; case 725: PPCInterpreter_STSWI(hCPU, opcode); break; case 727: PPCInterpreter_STFDX(hCPU, opcode); break; - case 744: // 232 | OE - PPCInterpreter_SUBFMEO(hCPU, opcode); - break; - case 746: // 234 | OE - PPCInterpreter_ADDMEO(hCPU, opcode); - break; case 747: PPCInterpreter_MULLWO(hCPU, opcode); break; @@ -1031,8 +998,10 @@ public: PPCInterpreter_DCBZ(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [31] at {:08x}\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); - cemu_assert_unimplemented(); + debug_printf("Unknown execute %04X as [31] at %08X\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); + #ifdef CEMU_DEBUG_ASSERT + assert_dbg(); + #endif hCPU->instructionPointer += 4; break; } @@ -1115,7 +1084,7 @@ public: case 57: PPCInterpreter_PSQ_LU(hCPU, opcode); break; - case 59: // opcode category + case 59: //Opcode category switch (PPC_getBits(opcode, 30, 5)) { case 18: @@ -1146,9 +1115,8 @@ public: PPCInterpreter_FNMADDS(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [59] at {:08x}\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); + debug_printf("Unknown execute %04X as [59] at %08X\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); cemu_assert_unimplemented(); - hCPU->instructionPointer += 4; break; } break; @@ -1227,19 +1195,18 @@ public: case 583: PPCInterpreter_MFFS(hCPU, opcode); break; - case 711: + case 711: // IBM documentation has this wrong as 771? PPCInterpreter_MTFSF(hCPU, opcode); break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} as [63] at {:08x}\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); + debug_printf("Unknown execute %04X as [63] at %08X\n", PPC_getBits(opcode, 30, 10), hCPU->instructionPointer); cemu_assert_unimplemented(); - PPCInterpreter_nextInstruction(hCPU); break; } } break; default: - cemuLog_logDebug(LogType::Force, "Unknown execute {:04x} at {:08x}\n", PPC_getBits(opcode, 5, 6), (unsigned int)hCPU->instructionPointer); + debug_printf("Unknown execute %04X at %08X\n", PPC_getBits(opcode, 5, 6), (unsigned int)hCPU->instructionPointer); cemu_assert_unimplemented(); } } diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h index 896fd21c..bc8458d9 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h @@ -50,9 +50,9 @@ #define CR_BIT_EQ 2 #define CR_BIT_SO 3 +#define XER_SO (1<<31) // summary overflow bit +#define XER_OV (1<<30) // overflow bit #define XER_BIT_CA (29) // carry bit index. To accelerate frequent access, this bit is stored as a separate uint8 -#define XER_BIT_SO (31) // summary overflow, counterpart to CR SO -#define XER_BIT_OV (30) // FPSCR #define FPSCR_VXSNAN (1<<24) @@ -118,8 +118,7 @@ static inline void ppc_update_cr0(PPCInterpreter_t* hCPU, uint32 r) { - cemu_assert_debug(hCPU->xer_so <= 1); - hCPU->cr[CR_BIT_SO] = hCPU->xer_so; + hCPU->cr[CR_BIT_SO] = (hCPU->spr.XER&XER_SO) ? 1 : 0; hCPU->cr[CR_BIT_LT] = ((r != 0) ? 1 : 0) & ((r & 0x80000000) ? 1 : 0); hCPU->cr[CR_BIT_EQ] = (r == 0); hCPU->cr[CR_BIT_GT] = hCPU->cr[CR_BIT_EQ] ^ hCPU->cr[CR_BIT_LT] ^ 1; // this works because EQ and LT can never be set at the same time. So the only case where GT becomes 1 is when LT=0 and EQ=0 @@ -191,8 +190,8 @@ inline double roundTo25BitAccuracy(double d) return *(double*)&v; } -ATTR_MS_ABI double fres_espresso(double input); -ATTR_MS_ABI double frsqrte_espresso(double input); +double fres_espresso(double input); +double frsqrte_espresso(double input); void fcmpu_espresso(PPCInterpreter_t* hCPU, int crfD, double a, double b); diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterLoadStore.hpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterLoadStore.hpp index ea7bb038..694e05e6 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterLoadStore.hpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterLoadStore.hpp @@ -31,7 +31,7 @@ static void PPCInterpreter_STW(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STWU(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS; + int rA, rS; uint32 imm; PPC_OPC_TEMPL_D_SImm(Opcode, rS, rA, imm); ppcItpCtrl::ppcMem_writeDataU32(hCPU, hCPU->gpr[rA] + imm, hCPU->gpr[rS]); @@ -42,7 +42,7 @@ static void PPCInterpreter_STWU(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STWX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU32(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], hCPU->gpr[rS]); PPCInterpreter_nextInstruction(hCPU); @@ -85,8 +85,7 @@ static void PPCInterpreter_STWCX(PPCInterpreter_t* hCPU, uint32 Opcode) ppc_setCRBit(hCPU, CR_BIT_GT, 0); ppc_setCRBit(hCPU, CR_BIT_EQ, 1); } - cemu_assert_debug(hCPU->xer_so <= 1); - ppc_setCRBit(hCPU, CR_BIT_SO, hCPU->xer_so); + ppc_setCRBit(hCPU, CR_BIT_SO, (hCPU->spr.XER&XER_SO) != 0 ? 1 : 0); // remove reservation hCPU->reservedMemAddr = 0; hCPU->reservedMemValue = 0; @@ -103,7 +102,7 @@ static void PPCInterpreter_STWCX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STWUX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU32(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], hCPU->gpr[rS]); if (rA) @@ -113,7 +112,7 @@ static void PPCInterpreter_STWUX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STWBRX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU32(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], _swapEndianU32(hCPU->gpr[rS])); PPCInterpreter_nextInstruction(hCPU); @@ -121,7 +120,7 @@ static void PPCInterpreter_STWBRX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STMW(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rS, rA; + int rS, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(Opcode, rS, rA, imm); uint32 ea = (rA ? hCPU->gpr[rA] : 0) + imm; @@ -136,7 +135,7 @@ static void PPCInterpreter_STMW(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STH(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS; + int rA, rS; uint32 imm; PPC_OPC_TEMPL_D_SImm(Opcode, rS, rA, imm); ppcItpCtrl::ppcMem_writeDataU16(hCPU, (rA ? hCPU->gpr[rA] : 0) + imm, (uint16)hCPU->gpr[rS]); @@ -145,7 +144,7 @@ static void PPCInterpreter_STH(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STHU(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS; + int rA, rS; uint32 imm; PPC_OPC_TEMPL_D_SImm(Opcode, rS, rA, imm); ppcItpCtrl::ppcMem_writeDataU16(hCPU, (rA ? hCPU->gpr[rA] : 0) + imm, (uint16)hCPU->gpr[rS]); @@ -156,7 +155,7 @@ static void PPCInterpreter_STHU(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STHX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU16(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], (uint16)hCPU->gpr[rS]); PPCInterpreter_nextInstruction(hCPU); @@ -164,7 +163,7 @@ static void PPCInterpreter_STHX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STHUX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU16(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], (uint16)hCPU->gpr[rS]); if (rA) @@ -174,7 +173,7 @@ static void PPCInterpreter_STHUX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STHBRX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU16(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], _swapEndianU16((uint16)hCPU->gpr[rS])); PPCInterpreter_nextInstruction(hCPU); @@ -182,7 +181,7 @@ static void PPCInterpreter_STHBRX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STB(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS; + int rA, rS; uint32 imm; PPC_OPC_TEMPL_D_SImm(Opcode, rS, rA, imm); ppcItpCtrl::ppcMem_writeDataU8(hCPU, (rA ? hCPU->gpr[rA] : 0) + imm, (uint8)hCPU->gpr[rS]); @@ -191,7 +190,7 @@ static void PPCInterpreter_STB(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STBU(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS; + int rA, rS; uint32 imm; PPC_OPC_TEMPL_D_SImm(Opcode, rS, rA, imm); ppcItpCtrl::ppcMem_writeDataU8(hCPU, hCPU->gpr[rA] + imm, (uint8)hCPU->gpr[rS]); @@ -201,7 +200,7 @@ static void PPCInterpreter_STBU(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STBX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU8(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], (uint8)hCPU->gpr[rS]); PPCInterpreter_nextInstruction(hCPU); @@ -209,7 +208,7 @@ static void PPCInterpreter_STBX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STBUX(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, rB; + int rA, rS, rB; PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); ppcItpCtrl::ppcMem_writeDataU8(hCPU, (rA ? hCPU->gpr[rA] : 0) + hCPU->gpr[rB], (uint8)hCPU->gpr[rS]); if (rA) @@ -219,7 +218,7 @@ static void PPCInterpreter_STBUX(PPCInterpreter_t* hCPU, uint32 Opcode) static void PPCInterpreter_STSWI(PPCInterpreter_t* hCPU, uint32 Opcode) { - sint32 rA, rS, nb; + int rA, rS, nb; PPC_OPC_TEMPL_X(Opcode, rS, rA, nb); if (nb == 0) nb = 32; uint32 ea = rA ? hCPU->gpr[rA] : 0; @@ -229,39 +228,7 @@ static void PPCInterpreter_STSWI(PPCInterpreter_t* hCPU, uint32 Opcode) { if (i == 0) { - r = rS < 32 ? hCPU->gpr[rS] : 0; // what happens if rS is out of bounds? - rS++; - rS %= 32; - i = 4; - } - ppcItpCtrl::ppcMem_writeDataU8(hCPU, ea, (r >> 24)); - r <<= 8; - ea++; - i--; - nb--; - } - PPCInterpreter_nextInstruction(hCPU); -} - -static void PPCInterpreter_STSWX(PPCInterpreter_t* hCPU, uint32 Opcode) -{ - sint32 rA, rS, rB; - PPC_OPC_TEMPL_X(Opcode, rS, rA, rB); - sint32 nb = hCPU->spr.XER&0x7F; - if (nb == 0) - { - PPCInterpreter_nextInstruction(hCPU); - return; - } - uint32 ea = rA ? hCPU->gpr[rA] : 0; - ea += hCPU->gpr[rB]; - uint32 r = 0; - int i = 0; - while (nb > 0) - { - if (i == 0) - { - r = rS < 32 ? hCPU->gpr[rS] : 0; // what happens if rS is out of bounds? + r = hCPU->gpr[rS]; rS++; rS %= 32; i = 4; @@ -492,51 +459,8 @@ static void PPCInterpreter_LSWI(PPCInterpreter_t* hCPU, uint32 Opcode) PPC_OPC_TEMPL_X(Opcode, rD, rA, nb); if (nb == 0) nb = 32; - uint32 ea = rA ? hCPU->gpr[rA] : 0; - uint32 r = 0; - int i = 4; - uint8 v; - while (nb>0) - { - if (i == 0) - { - i = 4; - if(rD < 32) - hCPU->gpr[rD] = r; - rD++; - rD %= 32; - r = 0; - } - v = ppcItpCtrl::ppcMem_readDataU8(hCPU, ea); - r <<= 8; - r |= v; - ea++; - i--; - nb--; - } - while (i) - { - r <<= 8; - i--; - } - if(rD < 32) - hCPU->gpr[rD] = r; - PPCInterpreter_nextInstruction(hCPU); -} -static void PPCInterpreter_LSWX(PPCInterpreter_t* hCPU, uint32 Opcode) -{ - sint32 rA, rD, rB; - PPC_OPC_TEMPL_X(Opcode, rD, rA, rB); - // byte count comes from XER - uint32 nb = (hCPU->spr.XER>>0)&0x7F; - if (nb == 0) - { - PPCInterpreter_nextInstruction(hCPU); - return; // no-op - } uint32 ea = rA ? hCPU->gpr[rA] : 0; - ea += hCPU->gpr[rB]; uint32 r = 0; int i = 4; uint8 v; @@ -545,8 +469,7 @@ static void PPCInterpreter_LSWX(PPCInterpreter_t* hCPU, uint32 Opcode) if (i == 0) { i = 4; - if(rD < 32) - hCPU->gpr[rD] = r; + hCPU->gpr[rD] = r; rD++; rD %= 32; r = 0; @@ -563,8 +486,7 @@ static void PPCInterpreter_LSWX(PPCInterpreter_t* hCPU, uint32 Opcode) r <<= 8; i--; } - if(rD < 32) - hCPU->gpr[rD] = r; + hCPU->gpr[rD] = r; PPCInterpreter_nextInstruction(hCPU); } diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterMain.cpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterMain.cpp index 4449f135..ace1601f 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterMain.cpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterMain.cpp @@ -63,25 +63,16 @@ void PPCInterpreter_setDEC(PPCInterpreter_t* hCPU, uint32 newValue) uint32 PPCInterpreter_getXER(PPCInterpreter_t* hCPU) { uint32 xerValue = hCPU->spr.XER; - xerValue &= ~(1 << XER_BIT_CA); - xerValue &= ~(1 << XER_BIT_SO); - xerValue &= ~(1 << XER_BIT_OV); - if (hCPU->xer_ca) - xerValue |= (1 << XER_BIT_CA); - if (hCPU->xer_so) - xerValue |= (1 << XER_BIT_SO); - if (hCPU->xer_ov) - xerValue |= (1 << XER_BIT_OV); + xerValue &= ~(1<xer_ca ) + xerValue |= (1<spr.XER = v & XER_MASK; - hCPU->xer_ca = (v >> XER_BIT_CA) & 1; - hCPU->xer_so = (v >> XER_BIT_SO) & 1; - hCPU->xer_ov = (v >> XER_BIT_OV) & 1; + hCPU->spr.XER = v; + hCPU->xer_ca = (v>>XER_BIT_CA)&1; } uint32 PPCInterpreter_getCoreIndex(PPCInterpreter_t* hCPU) diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.cpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.cpp index 7809a01d..12f86427 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.cpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.cpp @@ -5,6 +5,7 @@ #include "Cafe/OS/libs/coreinit/coreinit_CodeGen.h" #include "../Recompiler/PPCRecompiler.h" +#include "../Recompiler/PPCRecompilerX64.h" #include #include "Cafe/HW/Latte/Core/LatteBufferCache.h" @@ -93,6 +94,7 @@ void PPCInterpreter_MTCRF(PPCInterpreter_t* hCPU, uint32 Opcode) { // frequently used by GCC compiled code (e.g. SM64 port) // tested + uint32 rS; uint32 crfMask; PPC_OPC_TEMPL_XFX(Opcode, rS, crfMask); diff --git a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.hpp b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.hpp index 9bfcd53d..718162be 100644 --- a/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.hpp +++ b/src/Cafe/HW/Espresso/Interpreter/PPCInterpreterOPC.hpp @@ -68,8 +68,6 @@ static void PPCInterpreter_TW(PPCInterpreter_t* hCPU, uint32 opcode) PPC_OPC_TEMPL_X(opcode, to, rA, rB); cemu_assert_debug(to == 0); - if(to != 0) - PPCInterpreter_nextInstruction(hCPU); if (rA == DEBUGGER_BP_T_DEBUGGER) debugger_enterTW(hCPU); diff --git a/src/Cafe/HW/Espresso/PPCState.h b/src/Cafe/HW/Espresso/PPCState.h index fd943d39..c315ed0e 100644 --- a/src/Cafe/HW/Espresso/PPCState.h +++ b/src/Cafe/HW/Espresso/PPCState.h @@ -49,12 +49,12 @@ struct PPCInterpreter_t uint32 fpscr; uint8 cr[32]; // 0 -> bit not set, 1 -> bit set (upper 7 bits of each byte must always be zero) (cr0 starts at index 0, cr1 at index 4 ..) uint8 xer_ca; // carry from xer - uint8 xer_so; - uint8 xer_ov; + uint8 LSQE; + uint8 PSE; // thread remaining cycles sint32 remainingCycles; // if this value goes below zero, the next thread is scheduled sint32 skippedCycles; // number of skipped cycles - struct + struct { uint32 LR; uint32 CTR; @@ -67,10 +67,9 @@ struct PPCInterpreter_t uint32 reservedMemValue; // temporary storage for recompiler FPR_t temporaryFPR[8]; - uint32 temporaryGPR[4]; // deprecated, refactor backend dependency on this away - uint32 temporaryGPR_reg[4]; + uint32 temporaryGPR[4]; // values below this are not used by Cafe OS usermode - struct + struct { uint32 fpecr; // is this the same register as fpscr ? uint32 DEC; @@ -85,7 +84,7 @@ struct PPCInterpreter_t // DMA uint32 dmaU; uint32 dmaL; - // MMU + // MMU uint32 dbatU[8]; uint32 dbatL[8]; uint32 ibatU[8]; @@ -93,8 +92,6 @@ struct PPCInterpreter_t uint32 sr[16]; uint32 sdr1; }sprExtended; - uint8 LSQE; - uint8 PSE; // global CPU values PPCInterpreterGlobal_t* global; // interpreter control @@ -230,9 +227,9 @@ static inline float flushDenormalToZero(float f) // HLE interface -using HLECALL = void(*)(PPCInterpreter_t*); -using HLEIDX = sint32; +typedef void(*HLECALL)(PPCInterpreter_t* hCPU); +typedef sint32 HLEIDX; HLEIDX PPCInterpreter_registerHLECall(HLECALL hleCall, std::string hleName); HLECALL PPCInterpreter_getHLECall(HLEIDX funcIndex); diff --git a/src/Cafe/HW/Espresso/PPCTimer.cpp b/src/Cafe/HW/Espresso/PPCTimer.cpp index 257973a6..c27c94ee 100644 --- a/src/Cafe/HW/Espresso/PPCTimer.cpp +++ b/src/Cafe/HW/Espresso/PPCTimer.cpp @@ -1,4 +1,5 @@ #include "Cafe/HW/Espresso/Const.h" +#include "asm/x64util.h" #include "config/ActiveSettings.h" #include "util/helpers/fspinlock.h" #include "util/highresolutiontimer/HighResolutionTimer.h" diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.cpp b/src/Cafe/HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.cpp deleted file mode 100644 index 728460a4..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.cpp +++ /dev/null @@ -1,1695 +0,0 @@ -#include "BackendAArch64.h" - -#pragma push_macro("CSIZE") -#undef CSIZE -#include -#pragma pop_macro("CSIZE") -#include - -#include - -#include "../PPCRecompiler.h" -#include "Common/precompiled.h" -#include "Common/cpu_features.h" -#include "HW/Espresso/Interpreter/PPCInterpreterInternal.h" -#include "HW/Espresso/Interpreter/PPCInterpreterHelper.h" -#include "HW/Espresso/PPCState.h" - -using namespace Xbyak_aarch64; - -constexpr uint32 TEMP_GPR_1_ID = 25; -constexpr uint32 TEMP_GPR_2_ID = 26; -constexpr uint32 PPC_RECOMPILER_INSTANCE_DATA_REG_ID = 27; -constexpr uint32 MEMORY_BASE_REG_ID = 28; -constexpr uint32 HCPU_REG_ID = 29; - -constexpr uint32 TEMP_FPR_ID = 31; - -struct FPReg -{ - explicit FPReg(size_t index) - : index(index), VReg(index), QReg(index), DReg(index), SReg(index), HReg(index), BReg(index) - { - } - const size_t index; - const VReg VReg; - const QReg QReg; - const DReg DReg; - const SReg SReg; - const HReg HReg; - const BReg BReg; -}; - -struct GPReg -{ - explicit GPReg(size_t index) - : index(index), XReg(index), WReg(index) - { - } - const size_t index; - const XReg XReg; - const WReg WReg; -}; - -static const XReg HCPU_REG{HCPU_REG_ID}, PPC_REC_INSTANCE_REG{PPC_RECOMPILER_INSTANCE_DATA_REG_ID}, MEM_BASE_REG{MEMORY_BASE_REG_ID}; -static const GPReg TEMP_GPR1{TEMP_GPR_1_ID}; -static const GPReg TEMP_GPR2{TEMP_GPR_2_ID}; -static const GPReg LR{TEMP_GPR_2_ID}; - -static const FPReg TEMP_FPR{TEMP_FPR_ID}; - -static const util::Cpu s_cpu; - -class AArch64Allocator : public Allocator -{ - private: -#ifdef XBYAK_USE_MMAP_ALLOCATOR - inline static MmapAllocator s_allocator; -#else - inline static Allocator s_allocator; -#endif - Allocator* m_allocatorImpl; - bool m_freeDisabled = false; - - public: - AArch64Allocator() - : m_allocatorImpl(reinterpret_cast(&s_allocator)) {} - - uint32* alloc(size_t size) override - { - return m_allocatorImpl->alloc(size); - } - - void setFreeDisabled(bool disabled) - { - m_freeDisabled = disabled; - } - - void free(uint32* p) override - { - if (!m_freeDisabled) - m_allocatorImpl->free(p); - } - - [[nodiscard]] bool useProtect() const override - { - return !m_freeDisabled && m_allocatorImpl->useProtect(); - } -}; - -struct UnconditionalJumpInfo -{ - IMLSegment* target; -}; - -struct ConditionalRegJumpInfo -{ - IMLSegment* target; - WReg regBool; - bool mustBeTrue; -}; - -struct NegativeRegValueJumpInfo -{ - IMLSegment* target; - WReg regValue; -}; - -using JumpInfo = std::variant< - UnconditionalJumpInfo, - ConditionalRegJumpInfo, - NegativeRegValueJumpInfo>; - -struct AArch64GenContext_t : CodeGenerator -{ - explicit AArch64GenContext_t(Allocator* allocator = nullptr); - void enterRecompilerCode(); - void leaveRecompilerCode(); - - void r_name(IMLInstruction* imlInstruction); - void name_r(IMLInstruction* imlInstruction); - bool r_s32(IMLInstruction* imlInstruction); - bool r_r(IMLInstruction* imlInstruction); - bool r_r_s32(IMLInstruction* imlInstruction); - bool r_r_s32_carry(IMLInstruction* imlInstruction); - bool r_r_r(IMLInstruction* imlInstruction); - bool r_r_r_carry(IMLInstruction* imlInstruction); - void compare(IMLInstruction* imlInstruction); - void compare_s32(IMLInstruction* imlInstruction); - bool load(IMLInstruction* imlInstruction, bool indexed); - bool store(IMLInstruction* imlInstruction, bool indexed); - void atomic_cmp_store(IMLInstruction* imlInstruction); - bool macro(IMLInstruction* imlInstruction); - void call_imm(IMLInstruction* imlInstruction); - bool fpr_load(IMLInstruction* imlInstruction, bool indexed); - bool fpr_store(IMLInstruction* imlInstruction, bool indexed); - void fpr_r_r(IMLInstruction* imlInstruction); - void fpr_r_r_r(IMLInstruction* imlInstruction); - void fpr_r_r_r_r(IMLInstruction* imlInstruction); - void fpr_r(IMLInstruction* imlInstruction); - void fpr_compare(IMLInstruction* imlInstruction); - void cjump(IMLInstruction* imlInstruction, IMLSegment* imlSegment); - void jump(IMLSegment* imlSegment); - void conditionalJumpCycleCheck(IMLSegment* imlSegment); - - static constexpr size_t MAX_JUMP_INSTR_COUNT = 2; - std::list> jumps; - void prepareJump(JumpInfo&& jumpInfo) - { - jumps.emplace_back(getSize(), jumpInfo); - for (int i = 0; i < MAX_JUMP_INSTR_COUNT; ++i) - nop(); - } - - std::map segmentStarts; - void storeSegmentStart(IMLSegment* imlSegment) - { - segmentStarts[imlSegment] = getSize(); - } - - bool processAllJumps() - { - for (auto jump : jumps) - { - auto jumpStart = jump.first; - auto jumpInfo = jump.second; - bool success = std::visit( - [&, this](const auto& jump) { - setSize(jumpStart); - sint64 targetAddress = segmentStarts.at(jump.target); - sint64 addressOffset = targetAddress - jumpStart; - return handleJump(addressOffset, jump); - }, - jumpInfo); - if (!success) - { - return false; - } - } - return true; - } - - bool handleJump(sint64 addressOffset, const UnconditionalJumpInfo& jump) - { - // in +/-128MB - if (-0x8000000 <= addressOffset && addressOffset <= 0x7ffffff) - { - b(addressOffset); - return true; - } - - cemu_assert_suspicious(); - - return false; - } - - bool handleJump(sint64 addressOffset, const ConditionalRegJumpInfo& jump) - { - bool mustBeTrue = jump.mustBeTrue; - - // in +/-32KB - if (-0x8000 <= addressOffset && addressOffset <= 0x7fff) - { - if (mustBeTrue) - tbnz(jump.regBool, 0, addressOffset); - else - tbz(jump.regBool, 0, addressOffset); - return true; - } - - // in +/-1MB - if (-0x100000 <= addressOffset && addressOffset <= 0xfffff) - { - if (mustBeTrue) - cbnz(jump.regBool, addressOffset); - else - cbz(jump.regBool, addressOffset); - return true; - } - - Label skipJump; - if (mustBeTrue) - tbz(jump.regBool, 0, skipJump); - else - tbnz(jump.regBool, 0, skipJump); - addressOffset -= 4; - - // in +/-128MB - if (-0x8000000 <= addressOffset && addressOffset <= 0x7ffffff) - { - b(addressOffset); - L(skipJump); - return true; - } - - cemu_assert_suspicious(); - - return false; - } - - bool handleJump(sint64 addressOffset, const NegativeRegValueJumpInfo& jump) - { - // in +/-32KB - if (-0x8000 <= addressOffset && addressOffset <= 0x7fff) - { - tbnz(jump.regValue, 31, addressOffset); - return true; - } - - // in +/-1MB - if (-0x100000 <= addressOffset && addressOffset <= 0xfffff) - { - tst(jump.regValue, 0x80000000); - addressOffset -= 4; - bne(addressOffset); - return true; - } - - Label skipJump; - tbz(jump.regValue, 31, skipJump); - addressOffset -= 4; - - // in +/-128MB - if (-0x8000000 <= addressOffset && addressOffset <= 0x7ffffff) - { - b(addressOffset); - L(skipJump); - return true; - } - - cemu_assert_suspicious(); - - return false; - } -}; - -template T> -T fpReg(const IMLReg& imlReg) -{ - cemu_assert_debug(imlReg.GetRegFormat() == IMLRegFormat::F64); - auto regId = imlReg.GetRegID(); - cemu_assert_debug(regId >= IMLArchAArch64::PHYSREG_FPR_BASE && regId < IMLArchAArch64::PHYSREG_FPR_BASE + IMLArchAArch64::PHYSREG_FPR_COUNT); - return T(regId - IMLArchAArch64::PHYSREG_FPR_BASE); -} - -template T> -T gpReg(const IMLReg& imlReg) -{ - auto regFormat = imlReg.GetRegFormat(); - if (std::is_same_v) - cemu_assert_debug(regFormat == IMLRegFormat::I32); - else if (std::is_same_v) - cemu_assert_debug(regFormat == IMLRegFormat::I64); - else - cemu_assert_unimplemented(); - - auto regId = imlReg.GetRegID(); - cemu_assert_debug(regId >= IMLArchAArch64::PHYSREG_GPR_BASE && regId < IMLArchAArch64::PHYSREG_GPR_BASE + IMLArchAArch64::PHYSREG_GPR_COUNT); - return T(regId - IMLArchAArch64::PHYSREG_GPR_BASE); -} - -template To, std::derived_from From> -To aliasAs(const From& reg) -{ - return To(reg.getIdx()); -} - -template To, std::derived_from From> -To aliasAs(const From& reg) -{ - return To(reg.getIdx()); -} - -AArch64GenContext_t::AArch64GenContext_t(Allocator* allocator) - : CodeGenerator(DEFAULT_MAX_CODE_SIZE, AutoGrow, allocator) -{ -} - -constexpr uint64 ones(uint32 size) -{ - return (size == 64) ? 0xffffffffffffffff : ((uint64)1 << size) - 1; -} - -constexpr bool isAdrImmValidFPR(sint32 imm, uint32 bits) -{ - uint32 times = bits / 8; - uint32 sh = std::countr_zero(times); - return (0 <= imm && imm <= 4095 * times) && ((uint64)imm & ones(sh)) == 0; -} - -constexpr bool isAdrImmValidGPR(sint32 imm, uint32 bits = 32) -{ - uint32 size = std::countr_zero(bits / 8u); - sint32 times = 1 << size; - return (0 <= imm && imm <= 4095 * times) && ((uint64)imm & ones(size)) == 0; -} - -constexpr bool isAdrImmRangeValid(sint32 rangeStart, sint32 rangeOffset, sint32 bits, std::invocable auto check) -{ - for (sint32 i = rangeStart; i <= rangeStart + rangeOffset; i += bits / 8) - if (!check(i, bits)) - return false; - return true; -} - -constexpr bool isAdrImmRangeValidGPR(sint32 rangeStart, sint32 rangeOffset, sint32 bits = 32) -{ - return isAdrImmRangeValid(rangeStart, rangeOffset, bits, isAdrImmValidGPR); -} - -constexpr bool isAdrImmRangeValidFpr(sint32 rangeStart, sint32 rangeOffset, sint32 bits) -{ - return isAdrImmRangeValid(rangeStart, rangeOffset, bits, isAdrImmValidFPR); -} - -// Verify that all of the offsets for the PPCInterpreter_t members that we use in r_name/name_r have a valid imm value for AdrUimm -static_assert(isAdrImmRangeValidGPR(offsetof(PPCInterpreter_t, gpr), sizeof(uint32) * 31)); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, spr.LR))); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, spr.CTR))); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, spr.XER))); -static_assert(isAdrImmRangeValidGPR(offsetof(PPCInterpreter_t, spr.UGQR), sizeof(PPCInterpreter_t::spr.UGQR[0]) * (SPR_UGQR7 - SPR_UGQR0))); -static_assert(isAdrImmRangeValidGPR(offsetof(PPCInterpreter_t, temporaryGPR_reg), sizeof(uint32) * 3)); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, xer_ca), 8)); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, xer_so), 8)); -static_assert(isAdrImmRangeValidGPR(offsetof(PPCInterpreter_t, cr), PPCREC_NAME_CR_LAST - PPCREC_NAME_CR, 8)); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, reservedMemAddr))); -static_assert(isAdrImmValidGPR(offsetof(PPCInterpreter_t, reservedMemValue))); -static_assert(isAdrImmRangeValidFpr(offsetof(PPCInterpreter_t, fpr), sizeof(FPR_t) * 63, 64)); -static_assert(isAdrImmRangeValidFpr(offsetof(PPCInterpreter_t, temporaryFPR), sizeof(FPR_t) * 7, 128)); - -void AArch64GenContext_t::r_name(IMLInstruction* imlInstruction) -{ - uint32 name = imlInstruction->op_r_name.name; - - if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::I64) - { - XReg regRXReg = gpReg(imlInstruction->op_r_name.regR); - WReg regR = aliasAs(regRXReg); - if (name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0 + 32) - { - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, gpr) + sizeof(uint32) * (name - PPCREC_NAME_R0))); - } - else if (name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0 + 999) - { - uint32 sprIndex = (name - PPCREC_NAME_SPR0); - if (sprIndex == SPR_LR) - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.LR))); - else if (sprIndex == SPR_CTR) - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.CTR))); - else if (sprIndex == SPR_XER) - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.XER))); - else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7) - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0))); - else - cemu_assert_suspicious(); - } - else if (name >= PPCREC_NAME_TEMPORARY && name < PPCREC_NAME_TEMPORARY + 4) - { - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, temporaryGPR_reg) + sizeof(uint32) * (name - PPCREC_NAME_TEMPORARY))); - } - else if (name == PPCREC_NAME_XER_CA) - { - ldrb(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, xer_ca))); - } - else if (name == PPCREC_NAME_XER_SO) - { - ldrb(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, xer_so))); - } - else if (name >= PPCREC_NAME_CR && name <= PPCREC_NAME_CR_LAST) - { - ldrb(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, cr) + (name - PPCREC_NAME_CR))); - } - else if (name == PPCREC_NAME_CPU_MEMRES_EA) - { - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, reservedMemAddr))); - } - else if (name == PPCREC_NAME_CPU_MEMRES_VAL) - { - ldr(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, reservedMemValue))); - } - else - { - cemu_assert_suspicious(); - } - } - else if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::F64) - { - auto imlRegR = imlInstruction->op_r_name.regR; - - if (name >= PPCREC_NAME_FPR_HALF && name < (PPCREC_NAME_FPR_HALF + 64)) - { - uint32 regIndex = (name - PPCREC_NAME_FPR_HALF) / 2; - uint32 pairIndex = (name - PPCREC_NAME_FPR_HALF) % 2; - uint32 offset = offsetof(PPCInterpreter_t, fpr) + sizeof(FPR_t) * regIndex + (pairIndex ? sizeof(double) : 0); - ldr(fpReg(imlRegR), AdrUimm(HCPU_REG, offset)); - } - else if (name >= PPCREC_NAME_TEMPORARY_FPR0 && name < (PPCREC_NAME_TEMPORARY_FPR0 + 8)) - { - ldr(fpReg(imlRegR), AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, temporaryFPR) + sizeof(FPR_t) * (name - PPCREC_NAME_TEMPORARY_FPR0))); - } - else - { - cemu_assert_suspicious(); - } - } - else - { - cemu_assert_suspicious(); - } -} - -void AArch64GenContext_t::name_r(IMLInstruction* imlInstruction) -{ - uint32 name = imlInstruction->op_r_name.name; - - if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::I64) - { - XReg regRXReg = gpReg(imlInstruction->op_r_name.regR); - WReg regR = aliasAs(regRXReg); - if (name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0 + 32) - { - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, gpr) + sizeof(uint32) * (name - PPCREC_NAME_R0))); - } - else if (name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0 + 999) - { - uint32 sprIndex = (name - PPCREC_NAME_SPR0); - if (sprIndex == SPR_LR) - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.LR))); - else if (sprIndex == SPR_CTR) - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.CTR))); - else if (sprIndex == SPR_XER) - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.XER))); - else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7) - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0))); - else - cemu_assert_suspicious(); - } - else if (name >= PPCREC_NAME_TEMPORARY && name < PPCREC_NAME_TEMPORARY + 4) - { - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, temporaryGPR_reg) + sizeof(uint32) * (name - PPCREC_NAME_TEMPORARY))); - } - else if (name == PPCREC_NAME_XER_CA) - { - strb(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, xer_ca))); - } - else if (name == PPCREC_NAME_XER_SO) - { - strb(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, xer_so))); - } - else if (name >= PPCREC_NAME_CR && name <= PPCREC_NAME_CR_LAST) - { - strb(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, cr) + (name - PPCREC_NAME_CR))); - } - else if (name == PPCREC_NAME_CPU_MEMRES_EA) - { - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, reservedMemAddr))); - } - else if (name == PPCREC_NAME_CPU_MEMRES_VAL) - { - str(regR, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, reservedMemValue))); - } - else - { - cemu_assert_suspicious(); - } - } - else if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::F64) - { - auto imlRegR = imlInstruction->op_r_name.regR; - if (name >= PPCREC_NAME_FPR_HALF && name < (PPCREC_NAME_FPR_HALF + 64)) - { - uint32 regIndex = (name - PPCREC_NAME_FPR_HALF) / 2; - uint32 pairIndex = (name - PPCREC_NAME_FPR_HALF) % 2; - sint32 offset = offsetof(PPCInterpreter_t, fpr) + sizeof(FPR_t) * regIndex + pairIndex * sizeof(double); - str(fpReg(imlRegR), AdrUimm(HCPU_REG, offset)); - } - else if (name >= PPCREC_NAME_TEMPORARY_FPR0 && name < (PPCREC_NAME_TEMPORARY_FPR0 + 8)) - { - str(fpReg(imlRegR), AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, temporaryFPR) + sizeof(FPR_t) * (name - PPCREC_NAME_TEMPORARY_FPR0))); - } - else - { - cemu_assert_suspicious(); - } - } - else - { - cemu_assert_suspicious(); - } -} - -bool AArch64GenContext_t::r_r(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_r_r.regR); - WReg regA = gpReg(imlInstruction->op_r_r.regA); - - if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN) - { - mov(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ENDIAN_SWAP) - { - rev(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32) - { - sxtb(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32) - { - sxth(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_NOT) - { - mvn(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_NEG) - { - neg(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_CNTLZW) - { - clz(regR, regA); - } - else - { - cemuLog_log(LogType::Recompiler, "PPCRecompilerAArch64Gen_imlInstruction_r_r(): Unsupported operation {:x}", imlInstruction->operation); - return false; - } - return true; -} - -bool AArch64GenContext_t::r_s32(IMLInstruction* imlInstruction) -{ - sint32 imm32 = imlInstruction->op_r_immS32.immS32; - WReg reg = gpReg(imlInstruction->op_r_immS32.regR); - - if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN) - { - mov(reg, imm32); - } - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE) - { - ror(reg, reg, 32 - (imm32 & 0x1f)); - } - else - { - cemuLog_log(LogType::Recompiler, "PPCRecompilerAArch64Gen_imlInstruction_r_s32(): Unsupported operation {:x}", imlInstruction->operation); - return false; - } - return true; -} - -bool AArch64GenContext_t::r_r_s32(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_r_r_s32.regR); - WReg regA = gpReg(imlInstruction->op_r_r_s32.regA); - sint32 immS32 = imlInstruction->op_r_r_s32.immS32; - - if (imlInstruction->operation == PPCREC_IML_OP_ADD) - { - add_imm(regR, regA, immS32, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_SUB) - { - sub_imm(regR, regA, immS32, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_AND) - { - mov(TEMP_GPR1.WReg, immS32); - and_(regR, regA, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_OR) - { - mov(TEMP_GPR1.WReg, immS32); - orr(regR, regA, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_XOR) - { - mov(TEMP_GPR1.WReg, immS32); - eor(regR, regA, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED) - { - mov(TEMP_GPR1.WReg, immS32); - mul(regR, regA, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT) - { - lsl(regR, regA, (uint32)immS32 & 0x1f); - } - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U) - { - lsr(regR, regA, (uint32)immS32 & 0x1f); - } - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S) - { - asr(regR, regA, (uint32)immS32 & 0x1f); - } - else - { - cemuLog_log(LogType::Recompiler, "PPCRecompilerAArch64Gen_imlInstruction_r_r_s32(): Unsupported operation {:x}", imlInstruction->operation); - cemu_assert_suspicious(); - return false; - } - return true; -} - -bool AArch64GenContext_t::r_r_s32_carry(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_r_r_s32_carry.regR); - WReg regA = gpReg(imlInstruction->op_r_r_s32_carry.regA); - WReg regCarry = gpReg(imlInstruction->op_r_r_s32_carry.regCarry); - - sint32 immS32 = imlInstruction->op_r_r_s32_carry.immS32; - if (imlInstruction->operation == PPCREC_IML_OP_ADD) - { - adds_imm(regR, regA, immS32, TEMP_GPR1.WReg); - cset(regCarry, Cond::CS); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ADD_WITH_CARRY) - { - mov(TEMP_GPR1.WReg, immS32); - cmp(regCarry, 1); - adcs(regR, regA, TEMP_GPR1.WReg); - cset(regCarry, Cond::CS); - } - else - { - cemu_assert_suspicious(); - return false; - } - - return true; -} - -bool AArch64GenContext_t::r_r_r(IMLInstruction* imlInstruction) -{ - WReg regResult = gpReg(imlInstruction->op_r_r_r.regR); - XReg reg64Result = aliasAs(regResult); - WReg regOperand1 = gpReg(imlInstruction->op_r_r_r.regA); - WReg regOperand2 = gpReg(imlInstruction->op_r_r_r.regB); - - if (imlInstruction->operation == PPCREC_IML_OP_ADD) - { - add(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_SUB) - { - sub(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_OR) - { - orr(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_AND) - { - and_(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_XOR) - { - eor(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED) - { - mul(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_SLW) - { - tst(regOperand2, 32); - lsl(regResult, regOperand1, regOperand2); - csel(regResult, regResult, wzr, Cond::EQ); - } - else if (imlInstruction->operation == PPCREC_IML_OP_SRW) - { - tst(regOperand2, 32); - lsr(regResult, regOperand1, regOperand2); - csel(regResult, regResult, wzr, Cond::EQ); - } - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE) - { - neg(TEMP_GPR1.WReg, regOperand2); - ror(regResult, regOperand1, TEMP_GPR1.WReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S) - { - asr(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U) - { - lsr(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT) - { - lsl(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED) - { - sdiv(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_DIVIDE_UNSIGNED) - { - udiv(regResult, regOperand1, regOperand2); - } - else if (imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED) - { - smull(reg64Result, regOperand1, regOperand2); - lsr(reg64Result, reg64Result, 32); - } - else if (imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED) - { - umull(reg64Result, regOperand1, regOperand2); - lsr(reg64Result, reg64Result, 32); - } - else - { - cemuLog_log(LogType::Recompiler, "PPCRecompilerAArch64Gen_imlInstruction_r_r_r(): Unsupported operation {:x}", imlInstruction->operation); - return false; - } - return true; -} - -bool AArch64GenContext_t::r_r_r_carry(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_r_r_r_carry.regR); - WReg regA = gpReg(imlInstruction->op_r_r_r_carry.regA); - WReg regB = gpReg(imlInstruction->op_r_r_r_carry.regB); - WReg regCarry = gpReg(imlInstruction->op_r_r_r_carry.regCarry); - - if (imlInstruction->operation == PPCREC_IML_OP_ADD) - { - adds(regR, regA, regB); - cset(regCarry, Cond::CS); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ADD_WITH_CARRY) - { - cmp(regCarry, 1); - adcs(regR, regA, regB); - cset(regCarry, Cond::CS); - } - else - { - cemu_assert_suspicious(); - return false; - } - - return true; -} - -Cond ImlCondToArm64Cond(IMLCondition condition) -{ - switch (condition) - { - case IMLCondition::EQ: - return Cond::EQ; - case IMLCondition::NEQ: - return Cond::NE; - case IMLCondition::UNSIGNED_GT: - return Cond::HI; - case IMLCondition::UNSIGNED_LT: - return Cond::LO; - case IMLCondition::SIGNED_GT: - return Cond::GT; - case IMLCondition::SIGNED_LT: - return Cond::LT; - default: - { - cemu_assert_suspicious(); - return Cond::EQ; - } - } -} - -void AArch64GenContext_t::compare(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_compare.regR); - WReg regA = gpReg(imlInstruction->op_compare.regA); - WReg regB = gpReg(imlInstruction->op_compare.regB); - Cond cond = ImlCondToArm64Cond(imlInstruction->op_compare.cond); - cmp(regA, regB); - cset(regR, cond); -} - -void AArch64GenContext_t::compare_s32(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_compare.regR); - WReg regA = gpReg(imlInstruction->op_compare.regA); - sint32 imm = imlInstruction->op_compare_s32.immS32; - auto cond = ImlCondToArm64Cond(imlInstruction->op_compare.cond); - cmp_imm(regA, imm, TEMP_GPR1.WReg); - cset(regR, cond); -} - -void AArch64GenContext_t::cjump(IMLInstruction* imlInstruction, IMLSegment* imlSegment) -{ - auto regBool = gpReg(imlInstruction->op_conditional_jump.registerBool); - prepareJump(ConditionalRegJumpInfo{ - .target = imlSegment->nextSegmentBranchTaken, - .regBool = regBool, - .mustBeTrue = imlInstruction->op_conditional_jump.mustBeTrue, - }); -} - -void AArch64GenContext_t::jump(IMLSegment* imlSegment) -{ - prepareJump(UnconditionalJumpInfo{.target = imlSegment->nextSegmentBranchTaken}); -} - -void AArch64GenContext_t::conditionalJumpCycleCheck(IMLSegment* imlSegment) -{ - ldr(TEMP_GPR1.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, remainingCycles))); - prepareJump(NegativeRegValueJumpInfo{ - .target = imlSegment->nextSegmentBranchTaken, - .regValue = TEMP_GPR1.WReg, - }); -} - -void* PPCRecompiler_virtualHLE(PPCInterpreter_t* ppcInterpreter, uint32 hleFuncId) -{ - void* prevRSPTemp = ppcInterpreter->rspTemp; - if (hleFuncId == 0xFFD0) - { - ppcInterpreter->remainingCycles -= 500; // let subtract about 500 cycles for each HLE call - ppcInterpreter->gpr[3] = 0; - PPCInterpreter_nextInstruction(ppcInterpreter); - return PPCInterpreter_getCurrentInstance(); - } - else - { - auto hleCall = PPCInterpreter_getHLECall(hleFuncId); - cemu_assert(hleCall != nullptr); - hleCall(ppcInterpreter); - } - ppcInterpreter->rspTemp = prevRSPTemp; - return PPCInterpreter_getCurrentInstance(); -} - -bool AArch64GenContext_t::macro(IMLInstruction* imlInstruction) -{ - if (imlInstruction->operation == PPCREC_IML_MACRO_B_TO_REG) - { - WReg branchDstReg = gpReg(imlInstruction->op_macro.paramReg); - - mov(TEMP_GPR1.WReg, offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, branchDstReg, ShMod::LSL, 1); - ldr(TEMP_GPR1.XReg, AdrExt(PPC_REC_INSTANCE_REG, TEMP_GPR1.WReg, ExtMod::UXTW)); - mov(LR.WReg, branchDstReg); - br(TEMP_GPR1.XReg); - return true; - } - else if (imlInstruction->operation == PPCREC_IML_MACRO_BL) - { - uint32 newLR = imlInstruction->op_macro.param + 4; - - mov(TEMP_GPR1.WReg, newLR); - str(TEMP_GPR1.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, spr.LR))); - - uint32 newIP = imlInstruction->op_macro.param2; - uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; - mov(TEMP_GPR1.XReg, lookupOffset); - ldr(TEMP_GPR1.XReg, AdrReg(PPC_REC_INSTANCE_REG, TEMP_GPR1.XReg)); - mov(LR.WReg, newIP); - br(TEMP_GPR1.XReg); - return true; - } - else if (imlInstruction->operation == PPCREC_IML_MACRO_B_FAR) - { - uint32 newIP = imlInstruction->op_macro.param2; - uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; - mov(TEMP_GPR1.XReg, lookupOffset); - ldr(TEMP_GPR1.XReg, AdrReg(PPC_REC_INSTANCE_REG, TEMP_GPR1.XReg)); - mov(LR.WReg, newIP); - br(TEMP_GPR1.XReg); - return true; - } - else if (imlInstruction->operation == PPCREC_IML_MACRO_LEAVE) - { - uint32 currentInstructionAddress = imlInstruction->op_macro.param; - mov(TEMP_GPR1.XReg, (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); // newIP = 0 special value for recompiler exit - ldr(TEMP_GPR1.XReg, AdrReg(PPC_REC_INSTANCE_REG, TEMP_GPR1.XReg)); - mov(LR.WReg, currentInstructionAddress); - br(TEMP_GPR1.XReg); - return true; - } - else if (imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK) - { - brk(0xf000); - return true; - } - else if (imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES) - { - uint32 cycleCount = imlInstruction->op_macro.param; - AdrUimm adrCycles = AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, remainingCycles)); - ldr(TEMP_GPR1.WReg, adrCycles); - sub_imm(TEMP_GPR1.WReg, TEMP_GPR1.WReg, cycleCount, TEMP_GPR2.WReg); - str(TEMP_GPR1.WReg, adrCycles); - return true; - } - else if (imlInstruction->operation == PPCREC_IML_MACRO_HLE) - { - uint32 ppcAddress = imlInstruction->op_macro.param; - uint32 funcId = imlInstruction->op_macro.param2; - Label cyclesLeftLabel; - - // update instruction pointer - mov(TEMP_GPR1.WReg, ppcAddress); - str(TEMP_GPR1.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, instructionPointer))); - // set parameters - str(x30, AdrPreImm(sp, -16)); - - mov(x0, HCPU_REG); - mov(w1, funcId); - // call HLE function - - mov(TEMP_GPR1.XReg, (uint64)PPCRecompiler_virtualHLE); - blr(TEMP_GPR1.XReg); - - mov(HCPU_REG, x0); - - ldr(x30, AdrPostImm(sp, 16)); - - // check if cycles where decreased beyond zero, if yes -> leave recompiler - ldr(TEMP_GPR1.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, remainingCycles))); - tbz(TEMP_GPR1.WReg, 31, cyclesLeftLabel); // check if negative - - mov(TEMP_GPR1.XReg, offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - ldr(TEMP_GPR1.XReg, AdrReg(PPC_REC_INSTANCE_REG, TEMP_GPR1.XReg)); - ldr(LR.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, instructionPointer))); - // branch to recompiler exit - br(TEMP_GPR1.XReg); - - L(cyclesLeftLabel); - // check if instruction pointer was changed - // assign new instruction pointer to LR.WReg - ldr(LR.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, instructionPointer))); - mov(TEMP_GPR1.XReg, offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - add(TEMP_GPR1.XReg, TEMP_GPR1.XReg, LR.XReg, ShMod::LSL, 1); - ldr(TEMP_GPR1.XReg, AdrReg(PPC_REC_INSTANCE_REG, TEMP_GPR1.XReg)); - // branch to [ppcRecompilerDirectJumpTable + PPCInterpreter_t::instructionPointer * 2] - br(TEMP_GPR1.XReg); - return true; - } - else - { - cemuLog_log(LogType::Recompiler, "Unknown recompiler macro operation %d\n", imlInstruction->operation); - cemu_assert_suspicious(); - } - return false; -} - -bool AArch64GenContext_t::load(IMLInstruction* imlInstruction, bool indexed) -{ - cemu_assert_debug(imlInstruction->op_storeLoad.registerData.GetRegFormat() == IMLRegFormat::I32); - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem.GetRegFormat() == IMLRegFormat::I32); - if (indexed) - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem2.GetRegFormat() == IMLRegFormat::I32); - - sint32 memOffset = imlInstruction->op_storeLoad.immS32; - bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend; - bool switchEndian = imlInstruction->op_storeLoad.flags2.swapEndian; - WReg memReg = gpReg(imlInstruction->op_storeLoad.registerMem); - WReg dataReg = gpReg(imlInstruction->op_storeLoad.registerData); - - add_imm(TEMP_GPR1.WReg, memReg, memOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, gpReg(imlInstruction->op_storeLoad.registerMem2)); - - auto adr = AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW); - if (imlInstruction->op_storeLoad.copyWidth == 32) - { - ldr(dataReg, adr); - if (switchEndian) - rev(dataReg, dataReg); - } - else if (imlInstruction->op_storeLoad.copyWidth == 16) - { - if (switchEndian) - { - ldrh(dataReg, adr); - rev(dataReg, dataReg); - if (signExtend) - asr(dataReg, dataReg, 16); - else - lsr(dataReg, dataReg, 16); - } - else - { - if (signExtend) - ldrsh(dataReg, adr); - else - ldrh(dataReg, adr); - } - } - else if (imlInstruction->op_storeLoad.copyWidth == 8) - { - if (signExtend) - ldrsb(dataReg, adr); - else - ldrb(dataReg, adr); - } - else - { - return false; - } - return true; -} - -bool AArch64GenContext_t::store(IMLInstruction* imlInstruction, bool indexed) -{ - cemu_assert_debug(imlInstruction->op_storeLoad.registerData.GetRegFormat() == IMLRegFormat::I32); - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem.GetRegFormat() == IMLRegFormat::I32); - if (indexed) - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem2.GetRegFormat() == IMLRegFormat::I32); - - WReg dataReg = gpReg(imlInstruction->op_storeLoad.registerData); - WReg memReg = gpReg(imlInstruction->op_storeLoad.registerMem); - sint32 memOffset = imlInstruction->op_storeLoad.immS32; - bool swapEndian = imlInstruction->op_storeLoad.flags2.swapEndian; - - add_imm(TEMP_GPR1.WReg, memReg, memOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, gpReg(imlInstruction->op_storeLoad.registerMem2)); - AdrExt adr = AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW); - if (imlInstruction->op_storeLoad.copyWidth == 32) - { - if (swapEndian) - { - rev(TEMP_GPR2.WReg, dataReg); - str(TEMP_GPR2.WReg, adr); - } - else - { - str(dataReg, adr); - } - } - else if (imlInstruction->op_storeLoad.copyWidth == 16) - { - if (swapEndian) - { - rev(TEMP_GPR2.WReg, dataReg); - lsr(TEMP_GPR2.WReg, TEMP_GPR2.WReg, 16); - strh(TEMP_GPR2.WReg, adr); - } - else - { - strh(dataReg, adr); - } - } - else if (imlInstruction->op_storeLoad.copyWidth == 8) - { - strb(dataReg, adr); - } - else - { - return false; - } - return true; -} - -void AArch64GenContext_t::atomic_cmp_store(IMLInstruction* imlInstruction) -{ - WReg outReg = gpReg(imlInstruction->op_atomic_compare_store.regBoolOut); - WReg eaReg = gpReg(imlInstruction->op_atomic_compare_store.regEA); - WReg valReg = gpReg(imlInstruction->op_atomic_compare_store.regWriteValue); - WReg cmpValReg = gpReg(imlInstruction->op_atomic_compare_store.regCompareValue); - - if (s_cpu.isAtomicSupported()) - { - mov(TEMP_GPR2.WReg, cmpValReg); - add(TEMP_GPR1.XReg, MEM_BASE_REG, eaReg, ExtMod::UXTW); - casal(TEMP_GPR2.WReg, valReg, AdrNoOfs(TEMP_GPR1.XReg)); - cmp(TEMP_GPR2.WReg, cmpValReg); - cset(outReg, Cond::EQ); - } - else - { - Label notEqual; - Label storeFailed; - - add(TEMP_GPR1.XReg, MEM_BASE_REG, eaReg, ExtMod::UXTW); - L(storeFailed); - ldaxr(TEMP_GPR2.WReg, AdrNoOfs(TEMP_GPR1.XReg)); - cmp(TEMP_GPR2.WReg, cmpValReg); - bne(notEqual); - stlxr(TEMP_GPR2.WReg, valReg, AdrNoOfs(TEMP_GPR1.XReg)); - cbnz(TEMP_GPR2.WReg, storeFailed); - - L(notEqual); - cset(outReg, Cond::EQ); - } -} - -bool AArch64GenContext_t::fpr_load(IMLInstruction* imlInstruction, bool indexed) -{ - const IMLReg& dataReg = imlInstruction->op_storeLoad.registerData; - SReg dataSReg = fpReg(dataReg); - DReg dataDReg = fpReg(dataReg); - WReg realRegisterMem = gpReg(imlInstruction->op_storeLoad.registerMem); - WReg indexReg = indexed ? gpReg(imlInstruction->op_storeLoad.registerMem2) : wzr; - sint32 adrOffset = imlInstruction->op_storeLoad.immS32; - uint8 mode = imlInstruction->op_storeLoad.mode; - - if (mode == PPCREC_FPR_LD_MODE_SINGLE) - { - add_imm(TEMP_GPR1.WReg, realRegisterMem, adrOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, indexReg); - ldr(TEMP_GPR2.WReg, AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW)); - rev(TEMP_GPR2.WReg, TEMP_GPR2.WReg); - fmov(dataSReg, TEMP_GPR2.WReg); - - if (imlInstruction->op_storeLoad.flags2.notExpanded) - { - // leave value as single - } - else - { - fcvt(dataDReg, dataSReg); - } - } - else if (mode == PPCREC_FPR_LD_MODE_DOUBLE) - { - add_imm(TEMP_GPR1.WReg, realRegisterMem, adrOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, indexReg); - ldr(TEMP_GPR2.XReg, AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW)); - rev(TEMP_GPR2.XReg, TEMP_GPR2.XReg); - fmov(dataDReg, TEMP_GPR2.XReg); - } - else - { - return false; - } - return true; -} - -// store to memory -bool AArch64GenContext_t::fpr_store(IMLInstruction* imlInstruction, bool indexed) -{ - const IMLReg& dataImlReg = imlInstruction->op_storeLoad.registerData; - DReg dataDReg = fpReg(dataImlReg); - SReg dataSReg = fpReg(dataImlReg); - WReg memReg = gpReg(imlInstruction->op_storeLoad.registerMem); - WReg indexReg = indexed ? gpReg(imlInstruction->op_storeLoad.registerMem2) : wzr; - sint32 memOffset = imlInstruction->op_storeLoad.immS32; - uint8 mode = imlInstruction->op_storeLoad.mode; - - if (mode == PPCREC_FPR_ST_MODE_SINGLE) - { - add_imm(TEMP_GPR1.WReg, memReg, memOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, indexReg); - - if (imlInstruction->op_storeLoad.flags2.notExpanded) - { - // value is already in single format - fmov(TEMP_GPR2.WReg, dataSReg); - } - else - { - fcvt(TEMP_FPR.SReg, dataDReg); - fmov(TEMP_GPR2.WReg, TEMP_FPR.SReg); - } - rev(TEMP_GPR2.WReg, TEMP_GPR2.WReg); - str(TEMP_GPR2.WReg, AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW)); - } - else if (mode == PPCREC_FPR_ST_MODE_DOUBLE) - { - add_imm(TEMP_GPR1.WReg, memReg, memOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, indexReg); - fmov(TEMP_GPR2.XReg, dataDReg); - rev(TEMP_GPR2.XReg, TEMP_GPR2.XReg); - str(TEMP_GPR2.XReg, AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW)); - } - else if (mode == PPCREC_FPR_ST_MODE_UI32_FROM_PS0) - { - add_imm(TEMP_GPR1.WReg, memReg, memOffset, TEMP_GPR1.WReg); - if (indexed) - add(TEMP_GPR1.WReg, TEMP_GPR1.WReg, indexReg); - fmov(TEMP_GPR2.WReg, dataSReg); - rev(TEMP_GPR2.WReg, TEMP_GPR2.WReg); - str(TEMP_GPR2.WReg, AdrExt(MEM_BASE_REG, TEMP_GPR1.WReg, ExtMod::UXTW)); - } - else - { - cemu_assert_suspicious(); - cemuLog_log(LogType::Recompiler, "PPCRecompilerAArch64Gen_imlInstruction_fpr_store(): Unsupported mode %d\n", mode); - return false; - } - return true; -} - -// FPR op FPR -void AArch64GenContext_t::fpr_r_r(IMLInstruction* imlInstruction) -{ - auto imlRegR = imlInstruction->op_fpr_r_r.regR; - auto imlRegA = imlInstruction->op_fpr_r_r.regA; - - if (imlInstruction->operation == PPCREC_IML_OP_FPR_FLOAT_TO_INT) - { - fcvtzs(gpReg(imlRegR), fpReg(imlRegA)); - return; - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_INT_TO_FLOAT) - { - scvtf(fpReg(imlRegR), gpReg(imlRegA)); - return; - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_BITCAST_INT_TO_FLOAT) - { - cemu_assert_debug(imlRegR.GetRegFormat() == IMLRegFormat::F64); // assuming target is always F64 for now - // exact operation depends on size of types. Floats are automatically promoted to double if the target is F64 - DReg regFprDReg = fpReg(imlRegR); - SReg regFprSReg = fpReg(imlRegR); - if (imlRegA.GetRegFormat() == IMLRegFormat::I32) - { - fmov(regFprSReg, gpReg(imlRegA)); - // float to double - fcvt(regFprDReg, regFprSReg); - } - else if (imlRegA.GetRegFormat() == IMLRegFormat::I64) - { - fmov(regFprDReg, gpReg(imlRegA)); - } - else - { - cemu_assert_unimplemented(); - } - return; - } - - DReg regR = fpReg(imlRegR); - DReg regA = fpReg(imlRegA); - - if (imlInstruction->operation == PPCREC_IML_OP_FPR_ASSIGN) - { - fmov(regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY) - { - fmul(regR, regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE) - { - fdiv(regR, regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ADD) - { - fadd(regR, regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_SUB) - { - fsub(regR, regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_FCTIWZ) - { - fcvtzs(regR, regA); - } - else - { - cemu_assert_suspicious(); - } -} - -void AArch64GenContext_t::fpr_r_r_r(IMLInstruction* imlInstruction) -{ - DReg regR = fpReg(imlInstruction->op_fpr_r_r_r.regR); - DReg regA = fpReg(imlInstruction->op_fpr_r_r_r.regA); - DReg regB = fpReg(imlInstruction->op_fpr_r_r_r.regB); - - if (imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY) - { - fmul(regR, regA, regB); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ADD) - { - fadd(regR, regA, regB); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_SUB) - { - fsub(regR, regA, regB); - } - else - { - cemu_assert_suspicious(); - } -} - -/* - * FPR = op (fprA, fprB, fprC) - */ -void AArch64GenContext_t::fpr_r_r_r_r(IMLInstruction* imlInstruction) -{ - DReg regR = fpReg(imlInstruction->op_fpr_r_r_r_r.regR); - DReg regA = fpReg(imlInstruction->op_fpr_r_r_r_r.regA); - DReg regB = fpReg(imlInstruction->op_fpr_r_r_r_r.regB); - DReg regC = fpReg(imlInstruction->op_fpr_r_r_r_r.regC); - - if (imlInstruction->operation == PPCREC_IML_OP_FPR_SELECT) - { - fcmp(regA, 0.0); - fcsel(regR, regC, regB, Cond::GE); - } - else - { - cemu_assert_suspicious(); - } -} - -void AArch64GenContext_t::fpr_r(IMLInstruction* imlInstruction) -{ - DReg regRDReg = fpReg(imlInstruction->op_fpr_r.regR); - SReg regRSReg = fpReg(imlInstruction->op_fpr_r.regR); - - if (imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE) - { - fneg(regRDReg, regRDReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_LOAD_ONE) - { - fmov(regRDReg, 1.0); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ABS) - { - fabs(regRDReg, regRDReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS) - { - fabs(regRDReg, regRDReg); - fneg(regRDReg, regRDReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM) - { - // convert to 32bit single - fcvt(regRSReg, regRDReg); - // convert back to 64bit double - fcvt(regRDReg, regRSReg); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64) - { - // convert bottom to 64bit double - fcvt(regRDReg, regRSReg); - } - else - { - cemu_assert_unimplemented(); - } -} - -Cond ImlFPCondToArm64Cond(IMLCondition cond) -{ - switch (cond) - { - case IMLCondition::UNORDERED_GT: - return Cond::GT; - case IMLCondition::UNORDERED_LT: - return Cond::MI; - case IMLCondition::UNORDERED_EQ: - return Cond::EQ; - case IMLCondition::UNORDERED_U: - return Cond::VS; - default: - { - cemu_assert_suspicious(); - return Cond::EQ; - } - } -} - -void AArch64GenContext_t::fpr_compare(IMLInstruction* imlInstruction) -{ - WReg regR = gpReg(imlInstruction->op_fpr_compare.regR); - DReg regA = fpReg(imlInstruction->op_fpr_compare.regA); - DReg regB = fpReg(imlInstruction->op_fpr_compare.regB); - auto cond = ImlFPCondToArm64Cond(imlInstruction->op_fpr_compare.cond); - fcmp(regA, regB); - cset(regR, cond); -} - -void AArch64GenContext_t::call_imm(IMLInstruction* imlInstruction) -{ - str(x30, AdrPreImm(sp, -16)); - mov(TEMP_GPR1.XReg, imlInstruction->op_call_imm.callAddress); - blr(TEMP_GPR1.XReg); - ldr(x30, AdrPostImm(sp, 16)); -} - -bool PPCRecompiler_generateAArch64Code(struct PPCRecFunction_t* PPCRecFunction, struct ppcImlGenContext_t* ppcImlGenContext) -{ - AArch64Allocator allocator; - AArch64GenContext_t aarch64GenContext{&allocator}; - - // generate iml instruction code - bool codeGenerationFailed = false; - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - { - if (codeGenerationFailed) - break; - segIt->x64Offset = aarch64GenContext.getSize(); - - aarch64GenContext.storeSegmentStart(segIt); - - for (size_t i = 0; i < segIt->imlList.size(); i++) - { - IMLInstruction* imlInstruction = segIt->imlList.data() + i; - if (imlInstruction->type == PPCREC_IML_TYPE_R_NAME) - { - aarch64GenContext.r_name(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_NAME_R) - { - aarch64GenContext.name_r(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R) - { - if (!aarch64GenContext.r_r(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) - { - if (!aarch64GenContext.r_s32(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32) - { - if (!aarch64GenContext.r_r_s32(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32_CARRY) - { - if (!aarch64GenContext.r_r_s32_carry(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R) - { - if (!aarch64GenContext.r_r_r(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R_CARRY) - { - if (!aarch64GenContext.r_r_r_carry(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_COMPARE) - { - aarch64GenContext.compare(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_COMPARE_S32) - { - aarch64GenContext.compare_s32(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_JUMP) - { - aarch64GenContext.cjump(imlInstruction, segIt); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_JUMP) - { - aarch64GenContext.jump(segIt); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) - { - aarch64GenContext.conditionalJumpCycleCheck(segIt); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_MACRO) - { - if (!aarch64GenContext.macro(imlInstruction)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD) - { - if (!aarch64GenContext.load(imlInstruction, false)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED) - { - if (!aarch64GenContext.load(imlInstruction, true)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_STORE) - { - if (!aarch64GenContext.store(imlInstruction, false)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) - { - if (!aarch64GenContext.store(imlInstruction, true)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_ATOMIC_CMP_STORE) - { - aarch64GenContext.atomic_cmp_store(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_CALL_IMM) - { - aarch64GenContext.call_imm(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_NO_OP) - { - // no op - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD) - { - if (!aarch64GenContext.fpr_load(imlInstruction, false)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED) - { - if (!aarch64GenContext.fpr_load(imlInstruction, true)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE) - { - if (!aarch64GenContext.fpr_store(imlInstruction, false)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED) - { - if (!aarch64GenContext.fpr_store(imlInstruction, true)) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R) - { - aarch64GenContext.fpr_r_r(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R) - { - aarch64GenContext.fpr_r_r_r(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R) - { - aarch64GenContext.fpr_r_r_r_r(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R) - { - aarch64GenContext.fpr_r(imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_COMPARE) - { - aarch64GenContext.fpr_compare(imlInstruction); - } - else - { - codeGenerationFailed = true; - cemu_assert_suspicious(); - cemuLog_log(LogType::Recompiler, "PPCRecompiler_generateAArch64Code(): Unsupported iml type {}", imlInstruction->type); - } - } - } - - // handle failed code generation - if (codeGenerationFailed) - { - return false; - } - - if (!aarch64GenContext.processAllJumps()) - { - cemuLog_log(LogType::Recompiler, "PPCRecompiler_generateAArch64Code(): some jumps exceeded the +/-128MB offset."); - return false; - } - - aarch64GenContext.readyRE(); - - // set code - PPCRecFunction->x86Code = aarch64GenContext.getCode(); - PPCRecFunction->x86Size = aarch64GenContext.getMaxSize(); - // set free disabled to skip freeing the code from the CodeGenerator destructor - allocator.setFreeDisabled(true); - return true; -} - -void PPCRecompiler_cleanupAArch64Code(void* code, size_t size) -{ - AArch64Allocator allocator; - if (allocator.useProtect()) - CodeArray::protect(code, size, CodeArray::PROTECT_RW); - allocator.free(static_cast(code)); -} - -void AArch64GenContext_t::enterRecompilerCode() -{ - constexpr size_t STACK_SIZE = 160 /* x19 .. x30 + v8.d[0] .. v15.d[0] */; - static_assert(STACK_SIZE % 16 == 0); - sub(sp, sp, STACK_SIZE); - mov(x9, sp); - - stp(x19, x20, AdrPostImm(x9, 16)); - stp(x21, x22, AdrPostImm(x9, 16)); - stp(x23, x24, AdrPostImm(x9, 16)); - stp(x25, x26, AdrPostImm(x9, 16)); - stp(x27, x28, AdrPostImm(x9, 16)); - stp(x29, x30, AdrPostImm(x9, 16)); - st4((v8.d - v11.d)[0], AdrPostImm(x9, 32)); - st4((v12.d - v15.d)[0], AdrPostImm(x9, 32)); - mov(HCPU_REG, x1); // call argument 2 - mov(PPC_REC_INSTANCE_REG, (uint64)ppcRecompilerInstanceData); - mov(MEM_BASE_REG, (uint64)memory_base); - - // branch to recFunc - blr(x0); // call argument 1 - - mov(x9, sp); - ldp(x19, x20, AdrPostImm(x9, 16)); - ldp(x21, x22, AdrPostImm(x9, 16)); - ldp(x23, x24, AdrPostImm(x9, 16)); - ldp(x25, x26, AdrPostImm(x9, 16)); - ldp(x27, x28, AdrPostImm(x9, 16)); - ldp(x29, x30, AdrPostImm(x9, 16)); - ld4((v8.d - v11.d)[0], AdrPostImm(x9, 32)); - ld4((v12.d - v15.d)[0], AdrPostImm(x9, 32)); - - add(sp, sp, STACK_SIZE); - - ret(); -} - -void AArch64GenContext_t::leaveRecompilerCode() -{ - str(LR.WReg, AdrUimm(HCPU_REG, offsetof(PPCInterpreter_t, instructionPointer))); - ret(); -} - -bool initializedInterfaceFunctions = false; -AArch64GenContext_t enterRecompilerCode_ctx{}; - -AArch64GenContext_t leaveRecompilerCode_unvisited_ctx{}; -AArch64GenContext_t leaveRecompilerCode_visited_ctx{}; -void PPCRecompilerAArch64Gen_generateRecompilerInterfaceFunctions() -{ - if (initializedInterfaceFunctions) - return; - initializedInterfaceFunctions = true; - - enterRecompilerCode_ctx.enterRecompilerCode(); - enterRecompilerCode_ctx.readyRE(); - PPCRecompiler_enterRecompilerCode = enterRecompilerCode_ctx.getCode(); - - leaveRecompilerCode_unvisited_ctx.leaveRecompilerCode(); - leaveRecompilerCode_unvisited_ctx.readyRE(); - PPCRecompiler_leaveRecompilerCode_unvisited = leaveRecompilerCode_unvisited_ctx.getCode(); - - leaveRecompilerCode_visited_ctx.leaveRecompilerCode(); - leaveRecompilerCode_visited_ctx.readyRE(); - PPCRecompiler_leaveRecompilerCode_visited = leaveRecompilerCode_visited_ctx.getCode(); -} diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.h b/src/Cafe/HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.h deleted file mode 100644 index b610ee04..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/BackendAArch64/BackendAArch64.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once - -#include "HW/Espresso/Recompiler/IML/IMLInstruction.h" -#include "../PPCRecompiler.h" - -bool PPCRecompiler_generateAArch64Code(struct PPCRecFunction_t* PPCRecFunction, struct ppcImlGenContext_t* ppcImlGenContext); -void PPCRecompiler_cleanupAArch64Code(void* code, size_t size); - -void PPCRecompilerAArch64Gen_generateRecompilerInterfaceFunctions(); - -// architecture specific constants -namespace IMLArchAArch64 -{ - static constexpr int PHYSREG_GPR_BASE = 0; - static constexpr int PHYSREG_GPR_COUNT = 25; - static constexpr int PHYSREG_FPR_BASE = PHYSREG_GPR_COUNT; - static constexpr int PHYSREG_FPR_COUNT = 31; -}; // namespace IMLArchAArch64 \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64.cpp b/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64.cpp deleted file mode 100644 index eadb80fb..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64.cpp +++ /dev/null @@ -1,1672 +0,0 @@ -#include "Cafe/HW/Espresso/PPCState.h" -#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h" -#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterHelper.h" -#include "../PPCRecompiler.h" -#include "../PPCRecompilerIml.h" -#include "BackendX64.h" -#include "Cafe/OS/libs/coreinit/coreinit_Time.h" -#include "util/MemMapper/MemMapper.h" -#include "Common/cpu_features.h" -#include - -static x86Assembler64::GPR32 _reg32(IMLReg physReg) -{ - cemu_assert_debug(physReg.GetRegFormat() == IMLRegFormat::I32); - IMLRegID regId = physReg.GetRegID(); - cemu_assert_debug(regId < 16); - return (x86Assembler64::GPR32)regId; -} - -static uint32 _reg64(IMLReg physReg) -{ - cemu_assert_debug(physReg.GetRegFormat() == IMLRegFormat::I64); - IMLRegID regId = physReg.GetRegID(); - cemu_assert_debug(regId < 16); - return regId; -} - -uint32 _regF64(IMLReg physReg) -{ - cemu_assert_debug(physReg.GetRegFormat() == IMLRegFormat::F64); - IMLRegID regId = physReg.GetRegID(); - cemu_assert_debug(regId >= IMLArchX86::PHYSREG_FPR_BASE && regId < IMLArchX86::PHYSREG_FPR_BASE+16); - regId -= IMLArchX86::PHYSREG_FPR_BASE; - return regId; -} - -static x86Assembler64::GPR8_REX _reg8(IMLReg physReg) -{ - cemu_assert_debug(physReg.GetRegFormat() == IMLRegFormat::I32); // for now these are represented as 32bit - return (x86Assembler64::GPR8_REX)physReg.GetRegID(); -} - -static x86Assembler64::GPR32 _reg32_from_reg8(x86Assembler64::GPR8_REX regId) -{ - return (x86Assembler64::GPR32)regId; -} - -static x86Assembler64::GPR8_REX _reg8_from_reg32(x86Assembler64::GPR32 regId) -{ - return (x86Assembler64::GPR8_REX)regId; -} - -static x86Assembler64::GPR8_REX _reg8_from_reg64(uint32 regId) -{ - return (x86Assembler64::GPR8_REX)regId; -} - -static x86Assembler64::GPR64 _reg64_from_reg32(x86Assembler64::GPR32 regId) -{ - return (x86Assembler64::GPR64)regId; -} - -X86Cond _x86Cond(IMLCondition imlCond) -{ - switch (imlCond) - { - case IMLCondition::EQ: - return X86_CONDITION_Z; - case IMLCondition::NEQ: - return X86_CONDITION_NZ; - case IMLCondition::UNSIGNED_GT: - return X86_CONDITION_NBE; - case IMLCondition::UNSIGNED_LT: - return X86_CONDITION_B; - case IMLCondition::SIGNED_GT: - return X86_CONDITION_NLE; - case IMLCondition::SIGNED_LT: - return X86_CONDITION_L; - default: - break; - } - cemu_assert_suspicious(); - return X86_CONDITION_Z; -} - -X86Cond _x86CondInverted(IMLCondition imlCond) -{ - switch (imlCond) - { - case IMLCondition::EQ: - return X86_CONDITION_NZ; - case IMLCondition::NEQ: - return X86_CONDITION_Z; - case IMLCondition::UNSIGNED_GT: - return X86_CONDITION_BE; - case IMLCondition::UNSIGNED_LT: - return X86_CONDITION_NB; - case IMLCondition::SIGNED_GT: - return X86_CONDITION_LE; - case IMLCondition::SIGNED_LT: - return X86_CONDITION_NL; - default: - break; - } - cemu_assert_suspicious(); - return X86_CONDITION_Z; -} - -X86Cond _x86Cond(IMLCondition imlCond, bool condIsInverted) -{ - if (condIsInverted) - return _x86CondInverted(imlCond); - return _x86Cond(imlCond); -} - -/* -* Remember current instruction output offset for reloc -* The instruction generated after this method has been called will be adjusted -*/ -void PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext_t* x64GenContext, void* extraInfo = nullptr) -{ - x64GenContext->relocateOffsetTable2.emplace_back(x64GenContext->emitter->GetWriteIndex(), extraInfo); -} - -void PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext_t* x64GenContext, sint32 jumpInstructionOffset, sint32 destinationOffset) -{ - uint8* instructionData = x64GenContext->emitter->GetBufferPtr() + jumpInstructionOffset; - if (instructionData[0] == 0x0F && (instructionData[1] >= 0x80 && instructionData[1] <= 0x8F)) - { - // far conditional jump - *(uint32*)(instructionData + 2) = (destinationOffset - (jumpInstructionOffset + 6)); - } - else if (instructionData[0] >= 0x70 && instructionData[0] <= 0x7F) - { - // short conditional jump - sint32 distance = (sint32)((destinationOffset - (jumpInstructionOffset + 2))); - cemu_assert_debug(distance >= -128 && distance <= 127); - *(uint8*)(instructionData + 1) = (uint8)distance; - } - else if (instructionData[0] == 0xE9) - { - *(uint32*)(instructionData + 1) = (destinationOffset - (jumpInstructionOffset + 5)); - } - else if (instructionData[0] == 0xEB) - { - sint32 distance = (sint32)((destinationOffset - (jumpInstructionOffset + 2))); - cemu_assert_debug(distance >= -128 && distance <= 127); - *(uint8*)(instructionData + 1) = (uint8)distance; - } - else - { - assert_dbg(); - } -} - -void* ATTR_MS_ABI PPCRecompiler_virtualHLE(PPCInterpreter_t* hCPU, uint32 hleFuncId) -{ - void* prevRSPTemp = hCPU->rspTemp; - if( hleFuncId == 0xFFD0 ) - { - hCPU->remainingCycles -= 500; // let subtract about 500 cycles for each HLE call - hCPU->gpr[3] = 0; - PPCInterpreter_nextInstruction(hCPU); - return hCPU; - } - else - { - auto hleCall = PPCInterpreter_getHLECall(hleFuncId); - cemu_assert(hleCall != nullptr); - hleCall(hCPU); - } - hCPU->rspTemp = prevRSPTemp; - return PPCInterpreter_getCurrentInstance(); -} - -bool PPCRecompilerX64Gen_imlInstruction_macro(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - if (imlInstruction->operation == PPCREC_IML_MACRO_B_TO_REG) - { - //x64Gen_int3(x64GenContext); - uint32 branchDstReg = _reg32(imlInstruction->op_macro.paramReg); - if(X86_REG_RDX != branchDstReg) - x64Gen_mov_reg64_reg64(x64GenContext, X86_REG_RDX, branchDstReg); - // potential optimization: Use branchDstReg directly if possible instead of moving to RDX/EDX - // JMP [offset+RDX*(8/4)+R15] - x64Gen_writeU8(x64GenContext, 0x41); - x64Gen_writeU8(x64GenContext, 0xFF); - x64Gen_writeU8(x64GenContext, 0xA4); - x64Gen_writeU8(x64GenContext, 0x57); - x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - return true; - } - else if( imlInstruction->operation == PPCREC_IML_MACRO_BL ) - { - // MOV DWORD [SPR_LinkRegister], newLR - uint32 newLR = imlInstruction->op_macro.param + 4; - x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.LR), newLR); - // remember new instruction pointer in RDX - uint32 newIP = imlInstruction->op_macro.param2; - x64Gen_mov_reg64Low32_imm32(x64GenContext, X86_REG_RDX, newIP); - // since RDX is constant we can use JMP [R15+const_offset] if jumpTableOffset+RDX*2 does not exceed the 2GB boundary - uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; - if (lookupOffset >= 0x80000000ULL) - { - // JMP [offset+RDX*(8/4)+R15] - x64Gen_writeU8(x64GenContext, 0x41); - x64Gen_writeU8(x64GenContext, 0xFF); - x64Gen_writeU8(x64GenContext, 0xA4); - x64Gen_writeU8(x64GenContext, 0x57); - x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - } - else - { - x64Gen_writeU8(x64GenContext, 0x41); - x64Gen_writeU8(x64GenContext, 0xFF); - x64Gen_writeU8(x64GenContext, 0xA7); - x64Gen_writeU32(x64GenContext, (uint32)lookupOffset); - } - return true; - } - else if( imlInstruction->operation == PPCREC_IML_MACRO_B_FAR ) - { - // remember new instruction pointer in RDX - uint32 newIP = imlInstruction->op_macro.param2; - x64Gen_mov_reg64Low32_imm32(x64GenContext, X86_REG_RDX, newIP); - // Since RDX is constant we can use JMP [R15+const_offset] if jumpTableOffset+RDX*2 does not exceed the 2GB boundary - uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; - if (lookupOffset >= 0x80000000ULL) - { - // JMP [offset+RDX*(8/4)+R15] - x64Gen_writeU8(x64GenContext, 0x41); - x64Gen_writeU8(x64GenContext, 0xFF); - x64Gen_writeU8(x64GenContext, 0xA4); - x64Gen_writeU8(x64GenContext, 0x57); - x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - } - else - { - x64Gen_writeU8(x64GenContext, 0x41); - x64Gen_writeU8(x64GenContext, 0xFF); - x64Gen_writeU8(x64GenContext, 0xA7); - x64Gen_writeU32(x64GenContext, (uint32)lookupOffset); - } - return true; - } - else if( imlInstruction->operation == PPCREC_IML_MACRO_LEAVE ) - { - uint32 currentInstructionAddress = imlInstruction->op_macro.param; - // remember PC value in REG_EDX - x64Gen_mov_reg64Low32_imm32(x64GenContext, X86_REG_RDX, currentInstructionAddress); - - uint32 newIP = 0; // special value for recompiler exit - uint64 lookupOffset = (uint64)&(((PPCRecompilerInstanceData_t*)NULL)->ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; - // JMP [R15+offset] - x64Gen_writeU8(x64GenContext, 0x41); - x64Gen_writeU8(x64GenContext, 0xFF); - x64Gen_writeU8(x64GenContext, 0xA7); - x64Gen_writeU32(x64GenContext, (uint32)lookupOffset); - return true; - } - else if( imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK ) - { - x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, imlInstruction->op_macro.param2); - x64Gen_int3(x64GenContext); - return true; - } - else if( imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES ) - { - uint32 cycleCount = imlInstruction->op_macro.param; - x64Gen_sub_mem32reg64_imm32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, remainingCycles), cycleCount); - return true; - } - else if( imlInstruction->operation == PPCREC_IML_MACRO_HLE ) - { - uint32 ppcAddress = imlInstruction->op_macro.param; - uint32 funcId = imlInstruction->op_macro.param2; - // update instruction pointer - x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, instructionPointer), ppcAddress); - // set parameters - x64Gen_mov_reg64_reg64(x64GenContext, X86_REG_RCX, REG_RESV_HCPU); - x64Gen_mov_reg64_imm64(x64GenContext, X86_REG_RDX, funcId); - // restore stackpointer from hCPU->rspTemp - x64Emit_mov_reg64_mem64(x64GenContext, X86_REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp)); - // reserve space on stack for call parameters - x64Gen_sub_reg64_imm32(x64GenContext, X86_REG_RSP, 8*11); // must be uneven number in order to retain stack 0x10 alignment - x64Gen_mov_reg64_imm64(x64GenContext, X86_REG_RBP, 0); - // call HLE function - x64Gen_mov_reg64_imm64(x64GenContext, X86_REG_RAX, (uint64)PPCRecompiler_virtualHLE); - x64Gen_call_reg64(x64GenContext, X86_REG_RAX); - // restore RSP to hCPU (from RAX, result of PPCRecompiler_virtualHLE) - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_HCPU, X86_REG_RAX); - // MOV R15, ppcRecompilerInstanceData - x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_RECDATA, (uint64)ppcRecompilerInstanceData); - // MOV R13, memory_base - x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_MEMBASE, (uint64)memory_base); - // check if cycles where decreased beyond zero, if yes -> leave recompiler - x64Gen_bt_mem8(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, remainingCycles), 31); // check if negative - sint32 jumpInstructionOffset1 = x64GenContext->emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NOT_CARRY, 0); - - x64Emit_mov_reg64_mem32(x64GenContext, X86_REG_RDX, REG_RESV_HCPU, offsetof(PPCInterpreter_t, instructionPointer)); - // set EAX to 0 (we assume that ppcRecompilerDirectJumpTable[0] will be a recompiler escape function) - x64Gen_xor_reg32_reg32(x64GenContext, X86_REG_RAX, X86_REG_RAX); - // ADD RAX, REG_RESV_RECDATA - x64Gen_add_reg64_reg64(x64GenContext, X86_REG_RAX, REG_RESV_RECDATA); - // JMP [recompilerCallTable+EAX/4*8] - x64Gen_jmp_memReg64(x64GenContext, X86_REG_RAX, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->emitter->GetWriteIndex()); - // check if instruction pointer was changed - // assign new instruction pointer to EAX - x64Emit_mov_reg64_mem32(x64GenContext, X86_REG_RAX, REG_RESV_HCPU, offsetof(PPCInterpreter_t, instructionPointer)); - // remember instruction pointer in REG_EDX - x64Gen_mov_reg64_reg64(x64GenContext, X86_REG_RDX, X86_REG_RAX); - // EAX *= 2 - x64Gen_add_reg64_reg64(x64GenContext, X86_REG_RAX, X86_REG_RAX); - // ADD RAX, REG_RESV_RECDATA - x64Gen_add_reg64_reg64(x64GenContext, X86_REG_RAX, REG_RESV_RECDATA); - // JMP [ppcRecompilerDirectJumpTable+RAX/4*8] - x64Gen_jmp_memReg64(x64GenContext, X86_REG_RAX, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); - return true; - } - else - { - debug_printf("Unknown recompiler macro operation %d\n", imlInstruction->operation); - assert_dbg(); - } - return false; -} - -/* -* Load from memory -*/ -bool PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, bool indexed) -{ - cemu_assert_debug(imlInstruction->op_storeLoad.registerData.GetRegFormat() == IMLRegFormat::I32); - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem.GetRegFormat() == IMLRegFormat::I32); - if (indexed) - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem2.GetRegFormat() == IMLRegFormat::I32); - - IMLRegID realRegisterData = imlInstruction->op_storeLoad.registerData.GetRegID(); - IMLRegID realRegisterMem = imlInstruction->op_storeLoad.registerMem.GetRegID(); - IMLRegID realRegisterMem2 = PPC_REC_INVALID_REGISTER; - if( indexed ) - realRegisterMem2 = imlInstruction->op_storeLoad.registerMem2.GetRegID(); - if( indexed && realRegisterMem == realRegisterMem2 ) - { - return false; - } - if( indexed && realRegisterData == realRegisterMem2 ) - { - // for indexed memory access realRegisterData must not be the same register as the second memory register, - // this can easily be worked around by swapping realRegisterMem and realRegisterMem2 - std::swap(realRegisterMem, realRegisterMem2); - } - - bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend; - bool switchEndian = imlInstruction->op_storeLoad.flags2.swapEndian; - if( imlInstruction->op_storeLoad.copyWidth == 32 ) - { - if (indexed) - { - x64Gen_lea_reg64Low32_reg64Low32PlusReg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem, realRegisterMem2); - } - if( g_CPUFeatures.x86.movbe && switchEndian ) - { - if (indexed) - { - x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); - } - else - { - x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - } - } - else - { - if (indexed) - { - x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); - if (switchEndian) - x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData); - } - else - { - x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - if (switchEndian) - x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData); - } - } - } - else if( imlInstruction->op_storeLoad.copyWidth == 16 ) - { - if (indexed) - { - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - if(g_CPUFeatures.x86.movbe && switchEndian ) - { - x64Gen_movBEZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, realRegisterData, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - if( indexed && realRegisterMem != realRegisterData ) - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - else - { - x64Gen_movZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, realRegisterData, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - if( indexed && realRegisterMem != realRegisterData ) - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - if( switchEndian ) - x64Gen_rol_reg64Low16_imm8(x64GenContext, realRegisterData, 8); - } - if( signExtend ) - x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, realRegisterData, realRegisterData); - else - x64Gen_movZeroExtend_reg64Low32_reg64Low16(x64GenContext, realRegisterData, realRegisterData); - } - else if( imlInstruction->op_storeLoad.copyWidth == 8 ) - { - if( indexed ) - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - if( signExtend ) - x64Gen_movSignExtend_reg64Low32_mem8Reg64PlusReg64(x64GenContext, realRegisterData, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - else - x64Emit_movZX_reg32_mem8(x64GenContext, realRegisterData, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - if( indexed && realRegisterMem != realRegisterData ) - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - else - return false; - return true; -} - -/* -* Write to memory -*/ -bool PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, bool indexed) -{ - cemu_assert_debug(imlInstruction->op_storeLoad.registerData.GetRegFormat() == IMLRegFormat::I32); - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem.GetRegFormat() == IMLRegFormat::I32); - if (indexed) - cemu_assert_debug(imlInstruction->op_storeLoad.registerMem2.GetRegFormat() == IMLRegFormat::I32); - - IMLRegID realRegisterData = imlInstruction->op_storeLoad.registerData.GetRegID(); - IMLRegID realRegisterMem = imlInstruction->op_storeLoad.registerMem.GetRegID(); - IMLRegID realRegisterMem2 = PPC_REC_INVALID_REGISTER; - if (indexed) - realRegisterMem2 = imlInstruction->op_storeLoad.registerMem2.GetRegID(); - - if (indexed && realRegisterMem == realRegisterMem2) - { - return false; - } - if (indexed && realRegisterData == realRegisterMem2) - { - // for indexed memory access realRegisterData must not be the same register as the second memory register, - // this can easily be worked around by swapping realRegisterMem and realRegisterMem2 - std::swap(realRegisterMem, realRegisterMem2); - } - - bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend; - bool swapEndian = imlInstruction->op_storeLoad.flags2.swapEndian; - if (imlInstruction->op_storeLoad.copyWidth == 32) - { - uint32 valueRegister; - if ((swapEndian == false || g_CPUFeatures.x86.movbe) && realRegisterMem != realRegisterData) - { - valueRegister = realRegisterData; - } - else - { - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); - valueRegister = REG_RESV_TEMP; - } - if (!g_CPUFeatures.x86.movbe && swapEndian) - x64Gen_bswap_reg64Lower32bit(x64GenContext, valueRegister); - if (indexed) - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - if (g_CPUFeatures.x86.movbe && swapEndian) - x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, valueRegister); - else - x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, valueRegister); - if (indexed) - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - else if (imlInstruction->op_storeLoad.copyWidth == 16) - { - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); - if (swapEndian) - x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); - if (indexed) - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); - if (indexed) - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - // todo: Optimize this, e.g. by using MOVBE - } - else if (imlInstruction->op_storeLoad.copyWidth == 8) - { - if (indexed && realRegisterMem == realRegisterData) - { - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); - realRegisterData = REG_RESV_TEMP; - } - if (indexed) - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, realRegisterData); - if (indexed) - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - else - return false; - return true; -} - -void PPCRecompilerX64Gen_imlInstruction_atomic_cmp_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regBoolOut = _reg32_from_reg8(_reg8(imlInstruction->op_atomic_compare_store.regBoolOut)); - auto regEA = _reg32(imlInstruction->op_atomic_compare_store.regEA); - auto regVal = _reg32(imlInstruction->op_atomic_compare_store.regWriteValue); - auto regCmp = _reg32(imlInstruction->op_atomic_compare_store.regCompareValue); - - cemu_assert_debug(regBoolOut == X86_REG_EAX); - cemu_assert_debug(regEA != X86_REG_EAX); - cemu_assert_debug(regVal != X86_REG_EAX); - cemu_assert_debug(regCmp != X86_REG_EAX); - - x64GenContext->emitter->MOV_dd(X86_REG_EAX, regCmp); - x64GenContext->emitter->LockPrefix(); - x64GenContext->emitter->CMPXCHG_dd_l(REG_RESV_MEMBASE, 0, _reg64_from_reg32(regEA), 1, regVal); - x64GenContext->emitter->SETcc_b(X86Cond::X86_CONDITION_Z, regBoolOut); - x64GenContext->emitter->AND_di32(regBoolOut, 1); // SETcc doesn't clear the upper bits so we do it manually here -} - -void PPCRecompilerX64Gen_imlInstruction_call_imm(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - // the register allocator takes care of spilling volatile registers and moving parameters to the right registers, so we don't need to do any special handling here - x64GenContext->emitter->SUB_qi8(X86_REG_RSP, 0x20); // reserve enough space for any parameters while keeping stack alignment of 16 intact - x64GenContext->emitter->MOV_qi64(X86_REG_RAX, imlInstruction->op_call_imm.callAddress); - x64GenContext->emitter->CALL_q(X86_REG_RAX); - x64GenContext->emitter->ADD_qi8(X86_REG_RSP, 0x20); - // a note about the stack pointer: - // currently the code generated by generateEnterRecompilerCode makes sure the stack is 16 byte aligned, so we don't need to fix it up here -} - -bool PPCRecompilerX64Gen_imlInstruction_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regR = _reg32(imlInstruction->op_r_r.regR); - auto regA = _reg32(imlInstruction->op_r_r.regA); - - if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN) - { - // registerResult = registerA - if (regR != regA) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ENDIAN_SWAP) - { - if (regA != regR) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); // if movbe is available we can move and swap in a single instruction? - x64Gen_bswap_reg64Lower32bit(x64GenContext, regR); - } - else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32 ) - { - x64Gen_movSignExtend_reg64Low32_reg64Low8(x64GenContext, regR, regA); - } - else if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32) - { - x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, regR, reg32ToReg16(regA)); - } - else if( imlInstruction->operation == PPCREC_IML_OP_NOT ) - { - // copy register content if different registers - if( regR != regA ) - x64Gen_mov_reg64_reg64(x64GenContext, regR, regA); - x64Gen_not_reg64Low32(x64GenContext, regR); - } - else if (imlInstruction->operation == PPCREC_IML_OP_NEG) - { - // copy register content if different registers - if (regR != regA) - x64Gen_mov_reg64_reg64(x64GenContext, regR, regA); - x64Gen_neg_reg64Low32(x64GenContext, regR); - } - else if( imlInstruction->operation == PPCREC_IML_OP_CNTLZW ) - { - // count leading zeros - // LZCNT instruction (part of SSE4, CPUID.80000001H:ECX.ABM[Bit 5]) - if(g_CPUFeatures.x86.lzcnt) - { - x64Gen_lzcnt_reg64Low32_reg64Low32(x64GenContext, regR, regA); - } - else - { - x64Gen_test_reg64Low32_reg64Low32(x64GenContext, regA, regA); - sint32 jumpInstructionOffset1 = x64GenContext->emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); - x64Gen_bsr_reg64Low32_reg64Low32(x64GenContext, regR, regA); - x64Gen_neg_reg64Low32(x64GenContext, regR); - x64Gen_add_reg64Low32_imm32(x64GenContext, regR, 32-1); - sint32 jumpInstructionOffset2 = x64GenContext->emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); - PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->emitter->GetWriteIndex()); - x64Gen_mov_reg64Low32_imm32(x64GenContext, regR, 32); - PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->emitter->GetWriteIndex()); - } - } - else if( imlInstruction->operation == PPCREC_IML_OP_X86_CMP) - { - x64GenContext->emitter->CMP_dd(regR, regA); - } - else - { - cemuLog_logDebug(LogType::Force, "PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation 0x%x\n", imlInstruction->operation); - return false; - } - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regR = _reg32(imlInstruction->op_r_immS32.regR); - - if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN ) - { - x64Gen_mov_reg64Low32_imm32(x64GenContext, regR, (uint32)imlInstruction->op_r_immS32.immS32); - } - else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE ) - { - cemu_assert_debug((imlInstruction->op_r_immS32.immS32 & 0x80) == 0); - x64Gen_rol_reg64Low32_imm8(x64GenContext, regR, (uint8)imlInstruction->op_r_immS32.immS32); - } - else if( imlInstruction->operation == PPCREC_IML_OP_X86_CMP) - { - sint32 imm = imlInstruction->op_r_immS32.immS32; - x64GenContext->emitter->CMP_di32(regR, imm); - } - else - { - cemuLog_logDebug(LogType::Force, "PPCRecompilerX64Gen_imlInstruction_r_s32(): Unsupported operation 0x%x\n", imlInstruction->operation); - return false; - } - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto rRegResult = _reg32(imlInstruction->op_r_r_r.regR); - auto rRegOperand1 = _reg32(imlInstruction->op_r_r_r.regA); - auto rRegOperand2 = _reg32(imlInstruction->op_r_r_r.regB); - - if (imlInstruction->operation == PPCREC_IML_OP_ADD) - { - // registerResult = registerOperand1 + registerOperand2 - if( (rRegResult == rRegOperand1) || (rRegResult == rRegOperand2) ) - { - // be careful not to overwrite the operand before we use it - if( rRegResult == rRegOperand1 ) - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - else - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); - } - else - { - // copy operand1 to destination register before doing addition - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - } - } - else if( imlInstruction->operation == PPCREC_IML_OP_SUB ) - { - if( rRegOperand1 == rRegOperand2 ) - { - // result = operand1 - operand1 -> 0 - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); - } - else if( rRegResult == rRegOperand1 ) - { - // result = result - operand2 - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - } - else if ( rRegResult == rRegOperand2 ) - { - // result = operand1 - result - x64Gen_neg_reg64Low32(x64GenContext, rRegResult); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); - } - else - { - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - } - } - else if (imlInstruction->operation == PPCREC_IML_OP_OR || imlInstruction->operation == PPCREC_IML_OP_AND || imlInstruction->operation == PPCREC_IML_OP_XOR) - { - if (rRegResult == rRegOperand2) - std::swap(rRegOperand1, rRegOperand2); - - if (rRegResult != rRegOperand1) - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); - - if (imlInstruction->operation == PPCREC_IML_OP_OR) - x64Gen_or_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - else if (imlInstruction->operation == PPCREC_IML_OP_AND) - x64Gen_and_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - else - x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - } - else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED ) - { - // registerResult = registerOperand1 * registerOperand2 - if( (rRegResult == rRegOperand1) || (rRegResult == rRegOperand2) ) - { - // be careful not to overwrite the operand before we use it - if( rRegResult == rRegOperand1 ) - x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - else - x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); - } - else - { - // copy operand1 to destination register before doing multiplication - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); - // add operand2 - x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); - } - } - else if( imlInstruction->operation == PPCREC_IML_OP_SLW || imlInstruction->operation == PPCREC_IML_OP_SRW ) - { - // registerResult = registerOperand1(rA) >> registerOperand2(rB) (up to 63 bits) - - if (g_CPUFeatures.x86.bmi2 && imlInstruction->operation == PPCREC_IML_OP_SRW) - { - // use BMI2 SHRX if available - x64Gen_shrx_reg64_reg64_reg64(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); - } - else if (g_CPUFeatures.x86.bmi2 && imlInstruction->operation == PPCREC_IML_OP_SLW) - { - // use BMI2 SHLX if available - x64Gen_shlx_reg64_reg64_reg64(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); - x64Gen_and_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); // trim result to 32bit - } - else - { - // lazy and slow way to do shift by register without relying on ECX/CL or BMI2 - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); - for (sint32 b = 0; b < 6; b++) - { - x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1 << b)); - sint32 jumpInstructionOffset = x64GenContext->emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set - if (b == 5) - { - x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); - } - else - { - if (imlInstruction->operation == PPCREC_IML_OP_SLW) - x64Gen_shl_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1 << b)); - else - x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1 << b)); - } - PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->emitter->GetWriteIndex()); - } - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); - } - } - else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE ) - { - // todo: Use BMI2 rotate if available - // check if CL/ECX/RCX is available - if( rRegResult != X86_REG_RCX && rRegOperand1 != X86_REG_RCX && rRegOperand2 != X86_REG_RCX ) - { - // swap operand 2 with RCX - x64Gen_xchg_reg64_reg64(x64GenContext, X86_REG_RCX, rRegOperand2); - // move operand 1 to temp register - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); - // rotate - x64Gen_rol_reg64Low32_cl(x64GenContext, REG_RESV_TEMP); - // undo swap operand 2 with RCX - x64Gen_xchg_reg64_reg64(x64GenContext, X86_REG_RCX, rRegOperand2); - // copy to result register - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); - } - else - { - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); - // lazy and slow way to do shift by register without relying on ECX/CL - for(sint32 b=0; b<5; b++) - { - x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1<emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set - x64Gen_rol_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<emitter->GetWriteIndex()); - } - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); - } - } - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S || - imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U || - imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT) - { - if(g_CPUFeatures.x86.bmi2) - { - if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S) - x64Gen_sarx_reg32_reg32_reg32(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U) - x64Gen_shrx_reg32_reg32_reg32(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT) - x64Gen_shlx_reg32_reg32_reg32(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); - } - else - { - cemu_assert_debug(rRegOperand2 == X86_REG_ECX); - bool useTempReg = rRegResult == X86_REG_ECX && rRegOperand1 != X86_REG_ECX; - auto origRegResult = rRegResult; - if(useTempReg) - { - x64GenContext->emitter->MOV_dd(REG_RESV_TEMP, rRegOperand1); - rRegResult = REG_RESV_TEMP; - } - if(rRegOperand1 != rRegResult) - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); - if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S) - x64GenContext->emitter->SAR_d_CL(rRegResult); - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U) - x64GenContext->emitter->SHR_d_CL(rRegResult); - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT) - x64GenContext->emitter->SHL_d_CL(rRegResult); - if(useTempReg) - x64GenContext->emitter->MOV_dd(origRegResult, REG_RESV_TEMP); - } - } - else if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_DIVIDE_UNSIGNED ) - { - x64Emit_mov_mem32_reg32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), X86_REG_EAX); - x64Emit_mov_mem32_reg32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]), X86_REG_EDX); - // mov operand 2 to temp register - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2); - // mov operand1 to EAX - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, X86_REG_EAX, rRegOperand1); - // sign or zero extend EAX to EDX:EAX based on division sign mode - if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED ) - x64Gen_cdq(x64GenContext); - else - x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, X86_REG_EDX, X86_REG_EDX); - // make sure we avoid division by zero - x64Gen_test_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 3); - // divide - if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED ) - x64Gen_idiv_reg64Low32(x64GenContext, REG_RESV_TEMP); - else - x64Gen_div_reg64Low32(x64GenContext, REG_RESV_TEMP); - // result of division is now stored in EAX, move it to result register - if( rRegResult != X86_REG_EAX ) - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, X86_REG_EAX); - // restore EAX / EDX - if( rRegResult != X86_REG_RAX ) - x64Emit_mov_reg64_mem32(x64GenContext, X86_REG_EAX, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0])); - if( rRegResult != X86_REG_RDX ) - x64Emit_mov_reg64_mem32(x64GenContext, X86_REG_EDX, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1])); - } - else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED || imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED ) - { - x64Emit_mov_mem32_reg32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), X86_REG_EAX); - x64Emit_mov_mem32_reg32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]), X86_REG_EDX); - // mov operand 2 to temp register - x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2); - // mov operand1 to EAX - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, X86_REG_EAX, rRegOperand1); - if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED ) - { - // zero extend EAX to EDX:EAX - x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, X86_REG_EDX, X86_REG_EDX); - } - else - { - // sign extend EAX to EDX:EAX - x64Gen_cdq(x64GenContext); - } - // multiply - if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED ) - x64Gen_imul_reg64Low32(x64GenContext, REG_RESV_TEMP); - else - x64Gen_mul_reg64Low32(x64GenContext, REG_RESV_TEMP); - // result of multiplication is now stored in EDX:EAX, move it to result register - if( rRegResult != X86_REG_EDX ) - x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, X86_REG_EDX); - // restore EAX / EDX - if( rRegResult != X86_REG_RAX ) - x64Emit_mov_reg64_mem32(x64GenContext, X86_REG_EAX, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0])); - if( rRegResult != X86_REG_RDX ) - x64Emit_mov_reg64_mem32(x64GenContext, X86_REG_EDX, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1])); - } - else - { - cemuLog_logDebug(LogType::Force, "PPCRecompilerX64Gen_imlInstruction_r_r_r(): Unsupported operation 0x%x\n", imlInstruction->operation); - return false; - } - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_r_r_r_carry(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regR = _reg32(imlInstruction->op_r_r_r_carry.regR); - auto regA = _reg32(imlInstruction->op_r_r_r_carry.regA); - auto regB = _reg32(imlInstruction->op_r_r_r_carry.regB); - auto regCarry = _reg32(imlInstruction->op_r_r_r_carry.regCarry); - bool carryRegIsShared = regCarry == regA || regCarry == regB; - cemu_assert_debug(regCarry != regR); // two outputs sharing the same register is undefined behavior - - switch (imlInstruction->operation) - { - case PPCREC_IML_OP_ADD: - if (regB == regR) - std::swap(regB, regA); - if (regR != regA) - x64GenContext->emitter->MOV_dd(regR, regA); - if(!carryRegIsShared) - x64GenContext->emitter->XOR_dd(regCarry, regCarry); - x64GenContext->emitter->ADD_dd(regR, regB); - x64GenContext->emitter->SETcc_b(X86_CONDITION_B, _reg8_from_reg32(regCarry)); // below condition checks carry flag - if(carryRegIsShared) - x64GenContext->emitter->AND_di8(regCarry, 1); // clear upper bits - break; - case PPCREC_IML_OP_ADD_WITH_CARRY: - // assumes that carry is already correctly initialized as 0 or 1 - if (regB == regR) - std::swap(regB, regA); - if (regR != regA) - x64GenContext->emitter->MOV_dd(regR, regA); - x64GenContext->emitter->BT_du8(regCarry, 0); // copy carry register to x86 carry flag - x64GenContext->emitter->ADC_dd(regR, regB); - x64GenContext->emitter->SETcc_b(X86_CONDITION_B, _reg8_from_reg32(regCarry)); - break; - default: - cemu_assert_unimplemented(); - return false; - } - return true; -} - -bool PPCRecompilerX64Gen_IsSameCompare(IMLInstruction* imlInstructionA, IMLInstruction* imlInstructionB) -{ - if(imlInstructionA->type != imlInstructionB->type) - return false; - if(imlInstructionA->type == PPCREC_IML_TYPE_COMPARE) - return imlInstructionA->op_compare.regA == imlInstructionB->op_compare.regA && imlInstructionA->op_compare.regB == imlInstructionB->op_compare.regB; - else if(imlInstructionA->type == PPCREC_IML_TYPE_COMPARE_S32) - return imlInstructionA->op_compare_s32.regA == imlInstructionB->op_compare_s32.regA && imlInstructionA->op_compare_s32.immS32 == imlInstructionB->op_compare_s32.immS32; - return false; -} - -bool PPCRecompilerX64Gen_imlInstruction_compare_x(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, sint32& extraInstructionsProcessed) -{ - extraInstructionsProcessed = 0; - boost::container::static_vector compareInstructions; - compareInstructions.push_back(imlInstruction); - for(sint32 i=1; i<4; i++) - { - IMLInstruction* nextIns = x64GenContext->GetNextInstruction(i); - if(!nextIns || !PPCRecompilerX64Gen_IsSameCompare(imlInstruction, nextIns)) - break; - compareInstructions.push_back(nextIns); - } - auto OperandOverlapsWithR = [&](IMLInstruction* ins) -> bool - { - cemu_assert_debug(ins->type == PPCREC_IML_TYPE_COMPARE || ins->type == PPCREC_IML_TYPE_COMPARE_S32); - if(ins->type == PPCREC_IML_TYPE_COMPARE) - return _reg32_from_reg8(_reg8(ins->op_compare.regR)) == _reg32(ins->op_compare.regA) || _reg32_from_reg8(_reg8(ins->op_compare.regR)) == _reg32(ins->op_compare.regB); - else /* PPCREC_IML_TYPE_COMPARE_S32 */ - return _reg32_from_reg8(_reg8(ins->op_compare_s32.regR)) == _reg32(ins->op_compare_s32.regA); - }; - auto GetRegR = [](IMLInstruction* insn) - { - return insn->type == PPCREC_IML_TYPE_COMPARE ? _reg32_from_reg8(_reg8(insn->op_compare.regR)) : _reg32_from_reg8(_reg8(insn->op_compare_s32.regR)); - }; - // prefer XOR method for zeroing out registers if possible - for(auto& it : compareInstructions) - { - if(OperandOverlapsWithR(it)) - continue; - auto regR = GetRegR(it); - x64GenContext->emitter->XOR_dd(regR, regR); // zero bytes unaffected by SETcc - } - // emit the compare instruction - if(imlInstruction->type == PPCREC_IML_TYPE_COMPARE) - { - auto regA = _reg32(imlInstruction->op_compare.regA); - auto regB = _reg32(imlInstruction->op_compare.regB); - x64GenContext->emitter->CMP_dd(regA, regB); - } - else if(imlInstruction->type == PPCREC_IML_TYPE_COMPARE_S32) - { - auto regA = _reg32(imlInstruction->op_compare_s32.regA); - sint32 imm = imlInstruction->op_compare_s32.immS32; - x64GenContext->emitter->CMP_di32(regA, imm); - } - // emit the SETcc instructions - for(auto& it : compareInstructions) - { - auto regR = _reg8(it->op_compare.regR); - X86Cond cond = _x86Cond(it->op_compare.cond); - if(OperandOverlapsWithR(it)) - x64GenContext->emitter->MOV_di32(_reg32_from_reg8(regR), 0); - x64GenContext->emitter->SETcc_b(cond, regR); - } - extraInstructionsProcessed = (sint32)compareInstructions.size() - 1; - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_cjump2(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, IMLSegment* imlSegment) -{ - auto regBool = _reg8(imlInstruction->op_conditional_jump.registerBool); - bool mustBeTrue = imlInstruction->op_conditional_jump.mustBeTrue; - x64GenContext->emitter->TEST_bb(regBool, regBool); - PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, imlSegment->nextSegmentBranchTaken); - x64GenContext->emitter->Jcc_j32(mustBeTrue ? X86_CONDITION_NZ : X86_CONDITION_Z, 0); - return true; -} - -void PPCRecompilerX64Gen_imlInstruction_x86_eflags_jcc(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, IMLSegment* imlSegment) -{ - X86Cond cond = _x86Cond(imlInstruction->op_x86_eflags_jcc.cond, imlInstruction->op_x86_eflags_jcc.invertedCondition); - PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, imlSegment->nextSegmentBranchTaken); - x64GenContext->emitter->Jcc_j32(cond, 0); -} - -bool PPCRecompilerX64Gen_imlInstruction_jump2(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, IMLSegment* imlSegment) -{ - PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, imlSegment->nextSegmentBranchTaken); - x64GenContext->emitter->JMP_j32(0); - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_r_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regR = _reg32(imlInstruction->op_r_r_s32.regR); - auto regA = _reg32(imlInstruction->op_r_r_s32.regA); - uint32 immS32 = imlInstruction->op_r_r_s32.immS32; - - if( imlInstruction->operation == PPCREC_IML_OP_ADD ) - { - uint32 immU32 = (uint32)imlInstruction->op_r_r_s32.immS32; - if(regR != regA) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); - x64Gen_add_reg64Low32_imm32(x64GenContext, regR, (uint32)immU32); - } - else if (imlInstruction->operation == PPCREC_IML_OP_SUB) - { - if (regR != regA) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); - x64Gen_sub_reg64Low32_imm32(x64GenContext, regR, immS32); - } - else if (imlInstruction->operation == PPCREC_IML_OP_AND || - imlInstruction->operation == PPCREC_IML_OP_OR || - imlInstruction->operation == PPCREC_IML_OP_XOR) - { - if (regR != regA) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); - if (imlInstruction->operation == PPCREC_IML_OP_AND) - x64Gen_and_reg64Low32_imm32(x64GenContext, regR, immS32); - else if (imlInstruction->operation == PPCREC_IML_OP_OR) - x64Gen_or_reg64Low32_imm32(x64GenContext, regR, immS32); - else // XOR - x64Gen_xor_reg64Low32_imm32(x64GenContext, regR, immS32); - } - else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED ) - { - // registerResult = registerOperand * immS32 - sint32 immS32 = (uint32)imlInstruction->op_r_r_s32.immS32; - x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (sint64)immS32); // todo: Optimize - if( regR != regA ) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); - x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, regR, REG_RESV_TEMP); - } - else if (imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT || - imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U || - imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S) - { - if( regA != regR ) - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, regR, regA); - if (imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT) - x64Gen_shl_reg64Low32_imm8(x64GenContext, regR, imlInstruction->op_r_r_s32.immS32); - else if (imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U) - x64Gen_shr_reg64Low32_imm8(x64GenContext, regR, imlInstruction->op_r_r_s32.immS32); - else // RIGHT_SHIFT_S - x64Gen_sar_reg64Low32_imm8(x64GenContext, regR, imlInstruction->op_r_r_s32.immS32); - } - else - { - debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r_s32(): Unsupported operation 0x%x\n", imlInstruction->operation); - return false; - } - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_r_r_s32_carry(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regR = _reg32(imlInstruction->op_r_r_s32_carry.regR); - auto regA = _reg32(imlInstruction->op_r_r_s32_carry.regA); - sint32 immS32 = imlInstruction->op_r_r_s32_carry.immS32; - auto regCarry = _reg32(imlInstruction->op_r_r_s32_carry.regCarry); - cemu_assert_debug(regCarry != regR); // we dont allow two different outputs sharing the same register - - bool delayCarryInit = regCarry == regA; - - switch (imlInstruction->operation) - { - case PPCREC_IML_OP_ADD: - if(!delayCarryInit) - x64GenContext->emitter->XOR_dd(regCarry, regCarry); - if (regR != regA) - x64GenContext->emitter->MOV_dd(regR, regA); - x64GenContext->emitter->ADD_di32(regR, immS32); - if(delayCarryInit) - x64GenContext->emitter->MOV_di32(regCarry, 0); - x64GenContext->emitter->SETcc_b(X86_CONDITION_B, _reg8_from_reg32(regCarry)); - break; - case PPCREC_IML_OP_ADD_WITH_CARRY: - // assumes that carry is already correctly initialized as 0 or 1 - cemu_assert_debug(regCarry != regR); - if (regR != regA) - x64GenContext->emitter->MOV_dd(regR, regA); - x64GenContext->emitter->BT_du8(regCarry, 0); // copy carry register to x86 carry flag - x64GenContext->emitter->ADC_di32(regR, immS32); - x64GenContext->emitter->SETcc_b(X86_CONDITION_B, _reg8_from_reg32(regCarry)); - break; - default: - cemu_assert_unimplemented(); - return false; - } - return true; -} - -bool PPCRecompilerX64Gen_imlInstruction_conditionalJumpCycleCheck(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - // some tests (all performed on a i7-4790K) - // 1) DEC [mem] + JNS has significantly worse performance than BT + JNC (probably due to additional memory write and direct dependency) - // 2) CMP [mem], 0 + JG has about equal (or slightly worse) performance than BT + JNC - - // BT - x64Gen_bt_mem8(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, remainingCycles), 31); // check if negative - cemu_assert_debug(x64GenContext->currentSegment->GetBranchTaken()); - PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, x64GenContext->currentSegment->GetBranchTaken()); - x64Gen_jmpc_far(x64GenContext, X86_CONDITION_CARRY, 0); - return true; -} - -void PPCRecompilerX64Gen_imlInstruction_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - uint32 name = imlInstruction->op_r_name.name; - if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::I64) - { - auto regR = _reg64(imlInstruction->op_r_name.regR); - if (name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0 + 32) - { - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, gpr) + sizeof(uint32) * (name - PPCREC_NAME_R0)); - } - else if (name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0 + 999) - { - sint32 sprIndex = (name - PPCREC_NAME_SPR0); - if (sprIndex == SPR_LR) - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.LR)); - else if (sprIndex == SPR_CTR) - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.CTR)); - else if (sprIndex == SPR_XER) - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.XER)); - else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7) - { - sint32 memOffset = offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0); - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, memOffset); - } - else - assert_dbg(); - } - else if (name >= PPCREC_NAME_TEMPORARY && name < PPCREC_NAME_TEMPORARY + 4) - { - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR_reg) + sizeof(uint32) * (name - PPCREC_NAME_TEMPORARY)); - } - else if (name == PPCREC_NAME_XER_CA) - { - x64Emit_movZX_reg64_mem8(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, xer_ca)); - } - else if (name == PPCREC_NAME_XER_SO) - { - x64Emit_movZX_reg64_mem8(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, xer_so)); - } - else if (name >= PPCREC_NAME_CR && name <= PPCREC_NAME_CR_LAST) - { - x64Emit_movZX_reg64_mem8(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, cr) + (name - PPCREC_NAME_CR)); - } - else if (name == PPCREC_NAME_CPU_MEMRES_EA) - { - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemAddr)); - } - else if (name == PPCREC_NAME_CPU_MEMRES_VAL) - { - x64Emit_mov_reg64_mem32(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemValue)); - } - else - assert_dbg(); - } - else if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::F64) - { - auto regR = _regF64(imlInstruction->op_r_name.regR); - if (name >= PPCREC_NAME_FPR_HALF && name < (PPCREC_NAME_FPR_HALF + 64)) - { - sint32 regIndex = (name - PPCREC_NAME_FPR_HALF) / 2; - sint32 pairIndex = (name - PPCREC_NAME_FPR_HALF) % 2; - x64Gen_movsd_xmmReg_memReg64(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, fpr) + sizeof(FPR_t) * regIndex + pairIndex * sizeof(double)); - } - else if (name >= PPCREC_NAME_TEMPORARY_FPR0 || name < (PPCREC_NAME_TEMPORARY_FPR0 + 8)) - { - x64Gen_movupd_xmmReg_memReg128(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR) + sizeof(FPR_t) * (name - PPCREC_NAME_TEMPORARY_FPR0)); - } - else - { - cemu_assert_debug(false); - } - } - else - DEBUG_BREAK; - -} - -void PPCRecompilerX64Gen_imlInstruction_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - uint32 name = imlInstruction->op_r_name.name; - - if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::I64) - { - auto regR = _reg64(imlInstruction->op_r_name.regR); - if (name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0 + 32) - { - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, gpr) + sizeof(uint32) * (name - PPCREC_NAME_R0), regR); - } - else if (name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0 + 999) - { - uint32 sprIndex = (name - PPCREC_NAME_SPR0); - if (sprIndex == SPR_LR) - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.LR), regR); - else if (sprIndex == SPR_CTR) - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.CTR), regR); - else if (sprIndex == SPR_XER) - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, spr.XER), regR); - else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7) - { - sint32 memOffset = offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0); - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, memOffset, regR); - } - else - assert_dbg(); - } - else if (name >= PPCREC_NAME_TEMPORARY && name < PPCREC_NAME_TEMPORARY + 4) - { - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR_reg) + sizeof(uint32) * (name - PPCREC_NAME_TEMPORARY), regR); - } - else if (name == PPCREC_NAME_XER_CA) - { - x64GenContext->emitter->MOV_bb_l(REG_RESV_HCPU, offsetof(PPCInterpreter_t, xer_ca), X86_REG_NONE, 0, _reg8_from_reg64(regR)); - } - else if (name == PPCREC_NAME_XER_SO) - { - x64GenContext->emitter->MOV_bb_l(REG_RESV_HCPU, offsetof(PPCInterpreter_t, xer_so), X86_REG_NONE, 0, _reg8_from_reg64(regR)); - } - else if (name >= PPCREC_NAME_CR && name <= PPCREC_NAME_CR_LAST) - { - x64GenContext->emitter->MOV_bb_l(REG_RESV_HCPU, offsetof(PPCInterpreter_t, cr) + (name - PPCREC_NAME_CR), X86_REG_NONE, 0, _reg8_from_reg64(regR)); - } - else if (name == PPCREC_NAME_CPU_MEMRES_EA) - { - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemAddr), regR); - } - else if (name == PPCREC_NAME_CPU_MEMRES_VAL) - { - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemValue), regR); - } - else - assert_dbg(); - } - else if (imlInstruction->op_r_name.regR.GetBaseFormat() == IMLRegFormat::F64) - { - auto regR = _regF64(imlInstruction->op_r_name.regR); - uint32 name = imlInstruction->op_r_name.name; - if (name >= PPCREC_NAME_FPR_HALF && name < (PPCREC_NAME_FPR_HALF + 64)) - { - sint32 regIndex = (name - PPCREC_NAME_FPR_HALF) / 2; - sint32 pairIndex = (name - PPCREC_NAME_FPR_HALF) % 2; - x64Gen_movsd_memReg64_xmmReg(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, fpr) + sizeof(FPR_t) * regIndex + (pairIndex ? sizeof(double) : 0)); - } - else if (name >= PPCREC_NAME_TEMPORARY_FPR0 && name < (PPCREC_NAME_TEMPORARY_FPR0 + 8)) - { - x64Gen_movupd_memReg128_xmmReg(x64GenContext, regR, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR) + sizeof(FPR_t) * (name - PPCREC_NAME_TEMPORARY_FPR0)); - } - else - { - cemu_assert_debug(false); - } - } - else - DEBUG_BREAK; - - -} - -uint8* codeMemoryBlock = nullptr; -sint32 codeMemoryBlockIndex = 0; -sint32 codeMemoryBlockSize = 0; - -std::mutex mtx_allocExecutableMemory; - -uint8* PPCRecompilerX86_allocateExecutableMemory(sint32 size) -{ - std::lock_guard lck(mtx_allocExecutableMemory); - if( codeMemoryBlockIndex+size > codeMemoryBlockSize ) - { - // allocate new block - codeMemoryBlockSize = std::max(1024*1024*4, size+1024); // 4MB (or more if the function is larger than 4MB) - codeMemoryBlockIndex = 0; - codeMemoryBlock = (uint8*)MemMapper::AllocateMemory(nullptr, codeMemoryBlockSize, MemMapper::PAGE_PERMISSION::P_RWX); - } - uint8* codeMem = codeMemoryBlock + codeMemoryBlockIndex; - codeMemoryBlockIndex += size; - // pad to 4 byte alignment - while (codeMemoryBlockIndex & 3) - { - codeMemoryBlock[codeMemoryBlockIndex] = 0x90; - codeMemoryBlockIndex++; - } - return codeMem; -} - -bool PPCRecompiler_generateX64Code(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext) -{ - x64GenContext_t x64GenContext{}; - - // generate iml instruction code - bool codeGenerationFailed = false; - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - { - x64GenContext.currentSegment = segIt; - segIt->x64Offset = x64GenContext.emitter->GetWriteIndex(); - for(size_t i=0; iimlList.size(); i++) - { - x64GenContext.m_currentInstructionEmitIndex = i; - IMLInstruction* imlInstruction = segIt->imlList.data() + i; - - if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME ) - { - PPCRecompilerX64Gen_imlInstruction_r_name(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R ) - { - PPCRecompilerX64Gen_imlInstruction_name_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_R_R ) - { - if( PPCRecompilerX64Gen_imlInstruction_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) - { - if (PPCRecompilerX64Gen_imlInstruction_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32) - { - if (PPCRecompilerX64Gen_imlInstruction_r_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32_CARRY) - { - if (PPCRecompilerX64Gen_imlInstruction_r_r_s32_carry(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R) - { - if (PPCRecompilerX64Gen_imlInstruction_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R_CARRY) - { - if (PPCRecompilerX64Gen_imlInstruction_r_r_r_carry(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) - codeGenerationFailed = true; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_COMPARE || imlInstruction->type == PPCREC_IML_TYPE_COMPARE_S32) - { - sint32 extraInstructionsProcessed; - PPCRecompilerX64Gen_imlInstruction_compare_x(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, extraInstructionsProcessed); - i += extraInstructionsProcessed; - } - else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_JUMP) - { - if (PPCRecompilerX64Gen_imlInstruction_cjump2(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, segIt) == false) - codeGenerationFailed = true; - } - else if(imlInstruction->type == PPCREC_IML_TYPE_X86_EFLAGS_JCC) - { - PPCRecompilerX64Gen_imlInstruction_x86_eflags_jcc(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, segIt); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_JUMP) - { - if (PPCRecompilerX64Gen_imlInstruction_jump2(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, segIt) == false) - codeGenerationFailed = true; - } - else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK ) - { - PPCRecompilerX64Gen_imlInstruction_conditionalJumpCycleCheck(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO ) - { - if( PPCRecompilerX64Gen_imlInstruction_macro(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD ) - { - if( PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED ) - { - if( PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_STORE ) - { - if( PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED ) - { - if( PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) - { - codeGenerationFailed = true; - } - } - else if (imlInstruction->type == PPCREC_IML_TYPE_ATOMIC_CMP_STORE) - { - PPCRecompilerX64Gen_imlInstruction_atomic_cmp_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_CALL_IMM) - { - PPCRecompilerX64Gen_imlInstruction_call_imm(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP ) - { - // no op - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD ) - { - if( PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED ) - { - if( PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE ) - { - if( PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED ) - { - if( PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) - { - codeGenerationFailed = true; - } - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R ) - { - PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R ) - { - PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R ) - { - PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R ) - { - PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_COMPARE) - { - PPCRecompilerX64Gen_imlInstruction_fpr_compare(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); - } - else - { - debug_printf("PPCRecompiler_generateX64Code(): Unsupported iml type 0x%x\n", imlInstruction->type); - assert_dbg(); - } - } - } - // handle failed code generation - if( codeGenerationFailed ) - { - return false; - } - // allocate executable memory - uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.emitter->GetBuffer().size_bytes()); - size_t baseAddress = (size_t)executableMemory; - // fix relocs - for(auto& relocIt : x64GenContext.relocateOffsetTable2) - { - // search for segment that starts with this offset - uint32 ppcOffset = (uint32)(size_t)relocIt.extraInfo; - uint32 x64Offset = 0xFFFFFFFF; - - IMLSegment* destSegment = (IMLSegment*)relocIt.extraInfo; - x64Offset = destSegment->x64Offset; - - uint32 relocBase = relocIt.offset; - uint8* relocInstruction = x64GenContext.emitter->GetBufferPtr()+relocBase; - if( relocInstruction[0] == 0x0F && (relocInstruction[1] >= 0x80 && relocInstruction[1] <= 0x8F) ) - { - // Jcc relativeImm32 - sint32 distanceNearJump = (sint32)((baseAddress + x64Offset) - (baseAddress + relocBase + 2)); - if (distanceNearJump >= -128 && distanceNearJump < 127) // disabled - { - // convert to near Jcc - *(uint8*)(relocInstruction + 0) = (uint8)(relocInstruction[1]-0x80 + 0x70); - // patch offset - *(uint8*)(relocInstruction + 1) = (uint8)distanceNearJump; - // replace unused 4 bytes with NOP instruction - relocInstruction[2] = 0x0F; - relocInstruction[3] = 0x1F; - relocInstruction[4] = 0x40; - relocInstruction[5] = 0x00; - } - else - { - // patch offset - *(uint32*)(relocInstruction + 2) = (uint32)((baseAddress + x64Offset) - (baseAddress + relocBase + 6)); - } - } - else if( relocInstruction[0] == 0xE9 ) - { - // JMP relativeImm32 - *(uint32*)(relocInstruction+1) = (uint32)((baseAddress+x64Offset)-(baseAddress+relocBase+5)); - } - else - assert_dbg(); - } - - // copy code to executable memory - std::span codeBuffer = x64GenContext.emitter->GetBuffer(); - memcpy(executableMemory, codeBuffer.data(), codeBuffer.size_bytes()); - // set code - PPCRecFunction->x86Code = executableMemory; - PPCRecFunction->x86Size = codeBuffer.size_bytes(); - return true; -} - -void PPCRecompilerX64Gen_generateEnterRecompilerCode() -{ - x64GenContext_t x64GenContext{}; - - // start of recompiler entry function (15 regs) - x64Gen_push_reg64(&x64GenContext, X86_REG_RAX); - x64Gen_push_reg64(&x64GenContext, X86_REG_RCX); - x64Gen_push_reg64(&x64GenContext, X86_REG_RDX); - x64Gen_push_reg64(&x64GenContext, X86_REG_RBX); - x64Gen_push_reg64(&x64GenContext, X86_REG_RBP); - x64Gen_push_reg64(&x64GenContext, X86_REG_RDI); - x64Gen_push_reg64(&x64GenContext, X86_REG_RSI); - x64Gen_push_reg64(&x64GenContext, X86_REG_R8); - x64Gen_push_reg64(&x64GenContext, X86_REG_R9); - x64Gen_push_reg64(&x64GenContext, X86_REG_R10); - x64Gen_push_reg64(&x64GenContext, X86_REG_R11); - x64Gen_push_reg64(&x64GenContext, X86_REG_R12); - x64Gen_push_reg64(&x64GenContext, X86_REG_R13); - x64Gen_push_reg64(&x64GenContext, X86_REG_R14); - x64Gen_push_reg64(&x64GenContext, X86_REG_R15); - - // 000000007775EF04 | E8 00 00 00 00 call +0x00 - x64Gen_writeU8(&x64GenContext, 0xE8); - x64Gen_writeU8(&x64GenContext, 0x00); - x64Gen_writeU8(&x64GenContext, 0x00); - x64Gen_writeU8(&x64GenContext, 0x00); - x64Gen_writeU8(&x64GenContext, 0x00); - //000000007775EF09 | 48 83 04 24 05 add qword ptr ss:[rsp],5 - x64Gen_writeU8(&x64GenContext, 0x48); - x64Gen_writeU8(&x64GenContext, 0x83); - x64Gen_writeU8(&x64GenContext, 0x04); - x64Gen_writeU8(&x64GenContext, 0x24); - uint32 jmpPatchOffset = x64GenContext.emitter->GetWriteIndex(); - x64Gen_writeU8(&x64GenContext, 0); // skip the distance until after the JMP - x64Emit_mov_mem64_reg64(&x64GenContext, X86_REG_RDX, offsetof(PPCInterpreter_t, rspTemp), X86_REG_RSP); - - // MOV RSP, RDX (ppc interpreter instance) - x64Gen_mov_reg64_reg64(&x64GenContext, REG_RESV_HCPU, X86_REG_RDX); - // MOV R15, ppcRecompilerInstanceData - x64Gen_mov_reg64_imm64(&x64GenContext, REG_RESV_RECDATA, (uint64)ppcRecompilerInstanceData); - // MOV R13, memory_base - x64Gen_mov_reg64_imm64(&x64GenContext, REG_RESV_MEMBASE, (uint64)memory_base); - - //JMP recFunc - x64Gen_jmp_reg64(&x64GenContext, X86_REG_RCX); // call argument 1 - - x64GenContext.emitter->GetBuffer()[jmpPatchOffset] = (x64GenContext.emitter->GetWriteIndex() -(jmpPatchOffset-4)); - - //recompilerExit1: - x64Gen_pop_reg64(&x64GenContext, X86_REG_R15); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R14); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R13); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R12); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R11); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R10); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R9); - x64Gen_pop_reg64(&x64GenContext, X86_REG_R8); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RSI); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RDI); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RBP); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RBX); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RDX); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RCX); - x64Gen_pop_reg64(&x64GenContext, X86_REG_RAX); - // RET - x64Gen_ret(&x64GenContext); - - uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.emitter->GetBuffer().size_bytes()); - // copy code to executable memory - memcpy(executableMemory, x64GenContext.emitter->GetBuffer().data(), x64GenContext.emitter->GetBuffer().size_bytes()); - PPCRecompiler_enterRecompilerCode = (void ATTR_MS_ABI (*)(uint64,uint64))executableMemory; -} - - -void* PPCRecompilerX64Gen_generateLeaveRecompilerCode() -{ - x64GenContext_t x64GenContext{}; - - // update instruction pointer - // LR is in EDX - x64Emit_mov_mem32_reg32(&x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, instructionPointer), X86_REG_EDX); - // MOV RSP, [hCPU->rspTemp] - x64Emit_mov_reg64_mem64(&x64GenContext, X86_REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp)); - // RET - x64Gen_ret(&x64GenContext); - - uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.emitter->GetBuffer().size_bytes()); - // copy code to executable memory - memcpy(executableMemory, x64GenContext.emitter->GetBuffer().data(), x64GenContext.emitter->GetBuffer().size_bytes()); - return executableMemory; -} - -void PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions() -{ - PPCRecompilerX64Gen_generateEnterRecompilerCode(); - PPCRecompiler_leaveRecompilerCode_unvisited = (void ATTR_MS_ABI (*)())PPCRecompilerX64Gen_generateLeaveRecompilerCode(); - PPCRecompiler_leaveRecompilerCode_visited = (void ATTR_MS_ABI (*)())PPCRecompilerX64Gen_generateLeaveRecompilerCode(); - cemu_assert_debug(PPCRecompiler_leaveRecompilerCode_unvisited != PPCRecompiler_leaveRecompilerCode_visited); -} - diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64FPU.cpp b/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64FPU.cpp deleted file mode 100644 index 6a8b1b97..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64FPU.cpp +++ /dev/null @@ -1,469 +0,0 @@ -#include "../PPCRecompiler.h" -#include "../IML/IML.h" -#include "BackendX64.h" -#include "Common/cpu_features.h" - -uint32 _regF64(IMLReg physReg); - -uint32 _regI32(IMLReg r) -{ - cemu_assert_debug(r.GetRegFormat() == IMLRegFormat::I32); - return (uint32)r.GetRegID(); -} - -static x86Assembler64::GPR32 _reg32(sint8 physRegId) -{ - return (x86Assembler64::GPR32)physRegId; -} - -static x86Assembler64::GPR8_REX _reg8(IMLReg r) -{ - cemu_assert_debug(r.GetRegFormat() == IMLRegFormat::I32); // currently bool regs are implemented as 32bit registers - return (x86Assembler64::GPR8_REX)r.GetRegID(); -} - -static x86Assembler64::GPR32 _reg32_from_reg8(x86Assembler64::GPR8_REX regId) -{ - return (x86Assembler64::GPR32)regId; -} - -static x86Assembler64::GPR8_REX _reg8_from_reg32(x86Assembler64::GPR32 regId) -{ - return (x86Assembler64::GPR8_REX)regId; -} - -// load from memory -bool PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, bool indexed) -{ - sint32 realRegisterXMM = _regF64(imlInstruction->op_storeLoad.registerData); - sint32 realRegisterMem = _regI32(imlInstruction->op_storeLoad.registerMem); - sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER; - if( indexed ) - realRegisterMem2 = _regI32(imlInstruction->op_storeLoad.registerMem2); - uint8 mode = imlInstruction->op_storeLoad.mode; - - if( mode == PPCREC_FPR_LD_MODE_SINGLE ) - { - // load byte swapped single into temporary FPR - if( indexed ) - { - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); - if(g_CPUFeatures.x86.movbe) - x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); - else - x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); - } - else - { - if(g_CPUFeatures.x86.movbe) - x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - else - x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); - } - if(g_CPUFeatures.x86.movbe == false ) - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Gen_movd_xmmReg_reg64Low32(x64GenContext, realRegisterXMM, REG_RESV_TEMP); - - if (imlInstruction->op_storeLoad.flags2.notExpanded) - { - // leave value as single - } - else - { - x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, realRegisterXMM); - } - } - else if( mode == PPCREC_FPR_LD_MODE_DOUBLE ) - { - if( g_CPUFeatures.x86.avx ) - { - if( indexed ) - { - // calculate offset - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); - // load value - x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+0); - x64GenContext->emitter->BSWAP_q(REG_RESV_TEMP); - x64Gen_movq_xmmReg_reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP); - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_FPR_TEMP); - } - else - { - x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32+0); - x64GenContext->emitter->BSWAP_q(REG_RESV_TEMP); - x64Gen_movq_xmmReg_reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP); - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_FPR_TEMP); - } - } - else - { - if( indexed ) - { - // calculate offset - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); - // load double low part to temporaryFPR - x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+0); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)+4, REG_RESV_TEMP); - // calculate offset again - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); - // load double high part to temporaryFPR - x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+4); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)+0, REG_RESV_TEMP); - // load double from temporaryFPR - x64Gen_movlpd_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)); - } - else - { - // load double low part to temporaryFPR - x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32+0); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)+4, REG_RESV_TEMP); - // load double high part to temporaryFPR - x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32+4); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Emit_mov_mem32_reg64(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)+0, REG_RESV_TEMP); - // load double from temporaryFPR - x64Gen_movlpd_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)); - } - } - } - else - { - return false; - } - return true; -} - -// store to memory -bool PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, bool indexed) -{ - sint32 realRegisterXMM = _regF64(imlInstruction->op_storeLoad.registerData); - sint32 realRegisterMem = _regI32(imlInstruction->op_storeLoad.registerMem); - sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER; - if( indexed ) - realRegisterMem2 = _regI32(imlInstruction->op_storeLoad.registerMem2); - uint8 mode = imlInstruction->op_storeLoad.mode; - if( mode == PPCREC_FPR_ST_MODE_SINGLE ) - { - if (imlInstruction->op_storeLoad.flags2.notExpanded) - { - // value is already in single format - x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, realRegisterXMM); - } - else - { - x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, realRegisterXMM); - x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP); - } - if(g_CPUFeatures.x86.movbe == false ) - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - if( indexed ) - { - if( realRegisterMem == realRegisterMem2 ) - assert_dbg(); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - if(g_CPUFeatures.x86.movbe) - x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); - else - x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); - if( indexed ) - { - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - } - else if( mode == PPCREC_FPR_ST_MODE_DOUBLE ) - { - if( indexed ) - { - if( realRegisterMem == realRegisterMem2 ) - assert_dbg(); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)); - // store double low part - x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)+0); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32+4, REG_RESV_TEMP); - // store double high part - x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryFPR)+4); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32+0, REG_RESV_TEMP); - if( indexed ) - { - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - } - else if( mode == PPCREC_FPR_ST_MODE_UI32_FROM_PS0 ) - { - x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, realRegisterXMM); - x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); - if( indexed ) - { - cemu_assert_debug(realRegisterMem == realRegisterMem2); - x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); - x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); - } - else - { - x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); - } - } - else - { - debug_printf("PPCRecompilerX64Gen_imlInstruction_fpr_store(): Unsupported mode %d\n", mode); - return false; - } - return true; -} - -// FPR op FPR -void PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - if( imlInstruction->operation == PPCREC_IML_OP_FPR_FLOAT_TO_INT ) - { - uint32 regGpr = _regI32(imlInstruction->op_fpr_r_r.regR); - uint32 regFpr = _regF64(imlInstruction->op_fpr_r_r.regA); - x64Gen_cvttsd2si_reg64Low_xmmReg(x64GenContext, regGpr, regFpr); - return; - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_INT_TO_FLOAT ) - { - uint32 regFpr = _regF64(imlInstruction->op_fpr_r_r.regR); - uint32 regGpr = _regI32(imlInstruction->op_fpr_r_r.regA); - x64Gen_cvtsi2sd_xmmReg_xmmReg(x64GenContext, regFpr, regGpr); - return; - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_BITCAST_INT_TO_FLOAT) - { - cemu_assert_debug(imlInstruction->op_fpr_r_r.regR.GetRegFormat() == IMLRegFormat::F64); // assuming target is always F64 for now - cemu_assert_debug(imlInstruction->op_fpr_r_r.regA.GetRegFormat() == IMLRegFormat::I32); // supporting only 32bit floats as input for now - // exact operation depends on size of types. Floats are automatically promoted to double if the target is F64 - uint32 regFpr = _regF64(imlInstruction->op_fpr_r_r.regR); - if (imlInstruction->op_fpr_r_r.regA.GetRegFormat() == IMLRegFormat::I32) - { - uint32 regGpr = _regI32(imlInstruction->op_fpr_r_r.regA); - x64Gen_movq_xmmReg_reg64(x64GenContext, regFpr, regGpr); // using reg32 as reg64 param here is ok. We'll refactor later - // float to double - x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, regFpr, regFpr); - } - else - { - cemu_assert_unimplemented(); - } - return; - } - - uint32 regR = _regF64(imlInstruction->op_fpr_r_r.regR); - uint32 regA = _regF64(imlInstruction->op_fpr_r_r.regA); - if( imlInstruction->operation == PPCREC_IML_OP_FPR_ASSIGN ) - { - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY ) - { - x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE ) - { - x64Gen_divsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ADD ) - { - x64Gen_addsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB ) - { - x64Gen_subsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_FCTIWZ ) - { - x64Gen_cvttsd2si_xmmReg_xmmReg(x64GenContext, REG_RESV_TEMP, regA); - x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); - // move to FPR register - x64Gen_movq_xmmReg_reg64(x64GenContext, regR, REG_RESV_TEMP); - } - else - { - assert_dbg(); - } -} - -/* - * FPR = op (fprA, fprB) - */ -void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - uint32 regR = _regF64(imlInstruction->op_fpr_r_r_r.regR); - uint32 regA = _regF64(imlInstruction->op_fpr_r_r_r.regA); - uint32 regB = _regF64(imlInstruction->op_fpr_r_r_r.regB); - - if (imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY) - { - if (regR == regA) - { - x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, regR, regB); - } - else if (regR == regB) - { - x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else - { - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, regR, regA); - x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, regR, regB); - } - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ADD) - { - // todo: Use AVX 3-operand VADDSD if available - if (regR == regA) - { - x64Gen_addsd_xmmReg_xmmReg(x64GenContext, regR, regB); - } - else if (regR == regB) - { - x64Gen_addsd_xmmReg_xmmReg(x64GenContext, regR, regA); - } - else - { - x64Gen_movaps_xmmReg_xmmReg(x64GenContext, regR, regA); - x64Gen_addsd_xmmReg_xmmReg(x64GenContext, regR, regB); - } - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB ) - { - if( regR == regA ) - { - x64Gen_subsd_xmmReg_xmmReg(x64GenContext, regR, regB); - } - else if( regR == regB ) - { - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, regA); - x64Gen_subsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, regB); - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, regR, REG_RESV_FPR_TEMP); - } - else - { - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, regR, regA); - x64Gen_subsd_xmmReg_xmmReg(x64GenContext, regR, regB); - } - } - else - assert_dbg(); -} - -/* - * FPR = op (fprA, fprB, fprC) - */ -void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - uint32 regR = _regF64(imlInstruction->op_fpr_r_r_r_r.regR); - uint32 regA = _regF64(imlInstruction->op_fpr_r_r_r_r.regA); - uint32 regB = _regF64(imlInstruction->op_fpr_r_r_r_r.regB); - uint32 regC = _regF64(imlInstruction->op_fpr_r_r_r_r.regC); - - if( imlInstruction->operation == PPCREC_IML_OP_FPR_SELECT ) - { - x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, regA, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0)); - sint32 jumpInstructionOffset1 = x64GenContext->emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0); - // select C - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, regR, regC); - sint32 jumpInstructionOffset2 = x64GenContext->emitter->GetWriteIndex(); - x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); - // select B - PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->emitter->GetWriteIndex()); - x64Gen_movsd_xmmReg_xmmReg(x64GenContext, regR, regB); - // end - PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->emitter->GetWriteIndex()); - } - else - assert_dbg(); -} - -void PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - uint32 regR = _regF64(imlInstruction->op_fpr_r.regR); - - if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE ) - { - x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext, regR, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom)); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_LOAD_ONE ) - { - x64Gen_movsd_xmmReg_memReg64(x64GenContext, regR, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble1_1)); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ABS ) - { - x64Gen_andps_xmmReg_mem128Reg64(x64GenContext, regR, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_andAbsMaskBottom)); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS ) - { - x64Gen_orps_xmmReg_mem128Reg64(x64GenContext, regR, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom)); - } - else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM ) - { - // convert to 32bit single - x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, regR, regR); - // convert back to 64bit double - x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, regR, regR); - } - else if (imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64) - { - // convert bottom to 64bit double - x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, regR, regR); - } - else - { - cemu_assert_unimplemented(); - } -} - -void PPCRecompilerX64Gen_imlInstruction_fpr_compare(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction) -{ - auto regR = _reg8(imlInstruction->op_fpr_compare.regR); - auto regA = _regF64(imlInstruction->op_fpr_compare.regA); - auto regB = _regF64(imlInstruction->op_fpr_compare.regB); - - x64GenContext->emitter->XOR_dd(_reg32_from_reg8(regR), _reg32_from_reg8(regR)); - x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, regA, regB); - - if (imlInstruction->op_fpr_compare.cond == IMLCondition::UNORDERED_GT) - { - // GT case can be covered with a single SETnbe which checks CF==0 && ZF==0 (unordered sets both) - x64GenContext->emitter->SETcc_b(X86Cond::X86_CONDITION_NBE, regR); - return; - } - else if (imlInstruction->op_fpr_compare.cond == IMLCondition::UNORDERED_U) - { - // unordered case can be checked via PF - x64GenContext->emitter->SETcc_b(X86Cond::X86_CONDITION_PE, regR); - return; - } - - // remember unordered state - auto regTmp = _reg32_from_reg8(_reg32(REG_RESV_TEMP)); - x64GenContext->emitter->SETcc_b(X86Cond::X86_CONDITION_PO, regTmp); // by reversing the parity we can avoid having to XOR the value for masking the LT/EQ conditions - - X86Cond x86Cond; - switch (imlInstruction->op_fpr_compare.cond) - { - case IMLCondition::UNORDERED_LT: - x64GenContext->emitter->SETcc_b(X86Cond::X86_CONDITION_B, regR); - break; - case IMLCondition::UNORDERED_EQ: - x64GenContext->emitter->SETcc_b(X86Cond::X86_CONDITION_Z, regR); - break; - default: - cemu_assert_unimplemented(); - } - x64GenContext->emitter->AND_bb(_reg8_from_reg32(regR), _reg8_from_reg32(regTmp)); // if unordered (PF=1) then force LT/GT/EQ to zero -} \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/x86Emitter.h b/src/Cafe/HW/Espresso/Recompiler/BackendX64/x86Emitter.h deleted file mode 100644 index eae3835d..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/x86Emitter.h +++ /dev/null @@ -1,4335 +0,0 @@ -#pragma once - -// x86-64 assembler/emitter -// auto generated. Do not edit this file manually - -typedef unsigned long long u64; -typedef unsigned int u32; -typedef unsigned short u16; -typedef unsigned char u8; -typedef signed long long s64; -typedef signed int s32; -typedef signed short s16; -typedef signed char s8; - -enum X86Reg : sint8 -{ - X86_REG_NONE = -1, - X86_REG_EAX = 0, - X86_REG_ECX = 1, - X86_REG_EDX = 2, - X86_REG_EBX = 3, - X86_REG_ESP = 4, - X86_REG_EBP = 5, - X86_REG_ESI = 6, - X86_REG_EDI = 7, - X86_REG_R8D = 8, - X86_REG_R9D = 9, - X86_REG_R10D = 10, - X86_REG_R11D = 11, - X86_REG_R12D = 12, - X86_REG_R13D = 13, - X86_REG_R14D = 14, - X86_REG_R15D = 15, - X86_REG_RAX = 0, - X86_REG_RCX = 1, - X86_REG_RDX = 2, - X86_REG_RBX = 3, - X86_REG_RSP = 4, - X86_REG_RBP = 5, - X86_REG_RSI = 6, - X86_REG_RDI = 7, - X86_REG_R8 = 8, - X86_REG_R9 = 9, - X86_REG_R10 = 10, - X86_REG_R11 = 11, - X86_REG_R12 = 12, - X86_REG_R13 = 13, - X86_REG_R14 = 14, - X86_REG_R15 = 15 -}; - -enum X86Cond : u8 -{ - X86_CONDITION_O = 0, - X86_CONDITION_NO = 1, - X86_CONDITION_B = 2, - X86_CONDITION_NB = 3, - X86_CONDITION_Z = 4, - X86_CONDITION_NZ = 5, - X86_CONDITION_BE = 6, - X86_CONDITION_NBE = 7, - X86_CONDITION_S = 8, - X86_CONDITION_NS = 9, - X86_CONDITION_PE = 10, - X86_CONDITION_PO = 11, - X86_CONDITION_L = 12, - X86_CONDITION_NL = 13, - X86_CONDITION_LE = 14, - X86_CONDITION_NLE = 15 -}; -class x86Assembler64 -{ -private: - std::vector m_buffer; - -public: - u8* GetBufferPtr() { return m_buffer.data(); }; - std::span GetBuffer() { return m_buffer; }; - u32 GetWriteIndex() { return (u32)m_buffer.size(); }; - void _emitU8(u8 v) { m_buffer.emplace_back(v); }; - void _emitU16(u16 v) { size_t writeIdx = m_buffer.size(); m_buffer.resize(writeIdx + 2); *(u16*)(m_buffer.data() + writeIdx) = v; }; - void _emitU32(u32 v) { size_t writeIdx = m_buffer.size(); m_buffer.resize(writeIdx + 4); *(u32*)(m_buffer.data() + writeIdx) = v; }; - void _emitU64(u64 v) { size_t writeIdx = m_buffer.size(); m_buffer.resize(writeIdx + 8); *(u64*)(m_buffer.data() + writeIdx) = v; }; - using GPR64 = X86Reg; - using GPR32 = X86Reg; - using GPR8_REX = X86Reg; - void LockPrefix() { _emitU8(0xF0); }; - void ADD_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x00); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void ADD_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x00); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADD_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x02); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADD_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x01); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void ADD_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x01); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void ADD_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x01); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADD_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x01); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADD_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x03); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADD_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x03); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void OR_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x08); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void OR_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x08); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void OR_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x0a); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void OR_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x09); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void OR_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x09); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void OR_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x09); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void OR_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x09); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void OR_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x0b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void OR_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x0b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADC_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x10); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void ADC_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x10); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADC_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x12); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADC_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x11); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void ADC_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x11); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void ADC_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x11); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADC_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x11); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADC_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x13); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADC_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x13); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SBB_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x18); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void SBB_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x18); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SBB_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x1a); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SBB_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x19); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void SBB_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x19); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void SBB_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x19); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SBB_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x19); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SBB_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x1b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SBB_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x1b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void AND_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x20); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void AND_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x20); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void AND_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x22); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void AND_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x21); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void AND_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x21); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void AND_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x21); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void AND_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x21); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void AND_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x23); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void AND_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x23); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SUB_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x28); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void SUB_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x28); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SUB_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x2a); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SUB_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x29); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void SUB_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x29); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void SUB_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x29); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SUB_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x29); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SUB_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x2b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SUB_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x2b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XOR_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x30); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void XOR_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x30); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XOR_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x32); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XOR_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x31); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void XOR_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x31); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void XOR_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x31); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XOR_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x31); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XOR_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x33); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XOR_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x33); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMP_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x38); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void CMP_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x38); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMP_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x3a); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMP_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x39); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void CMP_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x39); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void CMP_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x39); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMP_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x39); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMP_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x3b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMP_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x3b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void ADD_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((0 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void ADD_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((0 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void ADD_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((0 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void ADD_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((0 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void OR_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((1 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void OR_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((1 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void OR_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((1 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void OR_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((1 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void ADC_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((2 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void ADC_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((2 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void ADC_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((2 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void ADC_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((2 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void SBB_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((3 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void SBB_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((3 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void SBB_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((3 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void SBB_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((3 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void AND_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void AND_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void AND_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void AND_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void SUB_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void SUB_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void SUB_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void SUB_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void XOR_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((6 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void XOR_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((6 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void XOR_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((6 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void XOR_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((6 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void CMP_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x81); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void CMP_qi32(GPR64 dst, s32 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x81); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - _emitU32((u32)imm); - } - void CMP_di32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void CMP_qi32_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x81); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void ADD_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((0 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void ADD_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((0 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void ADD_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((0 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void ADD_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((0 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void OR_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((1 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void OR_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((1 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void OR_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((1 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void OR_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((1 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void ADC_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((2 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void ADC_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((2 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void ADC_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((2 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void ADC_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((2 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void SBB_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((3 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void SBB_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((3 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void SBB_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((3 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void SBB_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((3 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void AND_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void AND_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void AND_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void AND_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void SUB_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void SUB_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void SUB_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void SUB_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void XOR_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((6 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void XOR_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((6 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void XOR_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((6 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void XOR_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((6 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void CMP_di8(GPR32 dst, s8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x83); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void CMP_qi8(GPR64 dst, s8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x83); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void CMP_di8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void CMP_qi8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x83); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void TEST_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x84); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void TEST_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x84); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void TEST_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x85); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void TEST_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x85); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void TEST_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x85); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void TEST_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x85); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XCHG_bb(GPR8_REX dst, GPR8_REX src) - { - if ((dst >= 4) || (src >= 4)) - { - _emitU8(0x40 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - } - _emitU8(0x86); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - } - void XCHG_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x86); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XCHG_dd(GPR32 dst, GPR32 src) - { - if (((dst & 8) != 0) || ((src & 8) != 0)) - { - _emitU8(0x40 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - } - _emitU8(0x87); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - } - void XCHG_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - _emitU8(0x87); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - } - void XCHG_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x87); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void XCHG_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x87); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_bb(GPR8_REX dst, GPR8_REX src) - { - if ((src >= 4) || (dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x88); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void MOV_bb_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR8_REX src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src >= 4) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x88); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_bb_r(GPR8_REX dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst >= 4) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst >= 4) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x8a); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x89); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void MOV_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x89); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void MOV_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x89); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x89); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_dd_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x8b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_qq_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x8b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void MOV_di32(GPR32 dst, s32 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xb8 | ((dst) & 7)); - _emitU32((u32)imm); - } - void MOV_qi64(GPR64 dst, s64 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0xb8 | ((dst) & 7)); - _emitU64((u64)imm); - } - void CALL_q(GPR64 dst) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xff); - _emitU8((3 << 6) | ((2 & 7) << 3) | (dst & 7)); - } - void CALL_q_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xff); - _emitU8((mod << 6) | ((2 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void IMUL_ddi32(GPR32 dst, GPR32 src, s32 imm) - { - if (((dst & 8) != 0) || ((src & 8) != 0)) - { - _emitU8(0x40 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - } - _emitU8(0x69); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - _emitU32((u32)imm); - } - void IMUL_qqi32(GPR64 dst, GPR64 src, s32 imm) - { - _emitU8(0x48 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - _emitU8(0x69); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - _emitU32((u32)imm); - } - void IMUL_ddi32_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x69); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void IMUL_qqi32_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s32 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x69); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU32((u32)imm); - } - void IMUL_ddi8(GPR32 dst, GPR32 src, s8 imm) - { - if (((dst & 8) != 0) || ((src & 8) != 0)) - { - _emitU8(0x40 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - } - _emitU8(0x6b); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - _emitU8((u8)imm); - } - void IMUL_qqi8(GPR64 dst, GPR64 src, s8 imm) - { - _emitU8(0x48 | ((src & 8) >> 3) | ((dst & 8) >> 1)); - _emitU8(0x6b); - _emitU8((3 << 6) | ((dst & 7) << 3) | (src & 7)); - _emitU8((u8)imm); - } - void IMUL_ddi8_r(GPR32 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((dst & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((dst & 8) || (memReg & 8)) - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x6b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void IMUL_qqi8_r(GPR64 dst, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, s8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((dst & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x6b); - _emitU8((mod << 6) | ((dst & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void SHL_b_CL(GPR8_REX dst) - { - if ((dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xd2); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - } - void SHL_b_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xd2); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SHR_b_CL(GPR8_REX dst) - { - if ((dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xd2); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - } - void SHR_b_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xd2); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SAR_b_CL(GPR8_REX dst) - { - if ((dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xd2); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - } - void SAR_b_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xd2); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SHL_d_CL(GPR32 dst) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xd3); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - } - void SHL_q_CL(GPR64 dst) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0xd3); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - } - void SHL_d_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xd3); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SHL_q_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0xd3); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SHR_d_CL(GPR32 dst) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xd3); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - } - void SHR_q_CL(GPR64 dst) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0xd3); - _emitU8((3 << 6) | ((5 & 7) << 3) | (dst & 7)); - } - void SHR_d_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xd3); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SHR_q_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0xd3); - _emitU8((mod << 6) | ((5 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SAR_d_CL(GPR32 dst) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0xd3); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - } - void SAR_q_CL(GPR64 dst) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0xd3); - _emitU8((3 << 6) | ((7 & 7) << 3) | (dst & 7)); - } - void SAR_d_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0xd3); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void SAR_q_CL_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0xd3); - _emitU8((mod << 6) | ((7 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void JMP_j32(s32 imm) - { - _emitU8(0xe9); - _emitU32((u32)imm); - } - void Jcc_j32(X86Cond cond, s32 imm) - { - _emitU8(0x0f); - _emitU8(0x80 | (u8)cond); - _emitU32((u32)imm); - } - void SETcc_b(X86Cond cond, GPR8_REX dst) - { - if ((dst >= 4)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x0f); - _emitU8(0x90 | (u8)cond); - _emitU8((3 << 6) | (dst & 7)); - } - void SETcc_b_l(X86Cond cond, GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x0f); - _emitU8(0x90); - _emitU8((mod << 6) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMPXCHG_dd(GPR32 dst, GPR32 src) - { - if (((src & 8) != 0) || ((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - } - _emitU8(0x0f); - _emitU8(0xb1); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void CMPXCHG_qq(GPR64 dst, GPR64 src) - { - _emitU8(0x48 | ((dst & 8) >> 3) | ((src & 8) >> 1)); - _emitU8(0x0f); - _emitU8(0xb1); - _emitU8((3 << 6) | ((src & 7) << 3) | (dst & 7)); - } - void CMPXCHG_dd_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR32 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((src & 8) || (memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((src & 8) || (memReg & 8)) - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1)); - } - _emitU8(0x0f); - _emitU8(0xb1); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void CMPXCHG_qq_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, GPR64 src) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((src & 8) >> 1) | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x0f); - _emitU8(0xb1); - _emitU8((mod << 6) | ((src & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - } - void BSWAP_d(GPR32 dst) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x0f); - _emitU8(0xc8 | ((dst) & 7)); - } - void BSWAP_q(GPR64 dst) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x0f); - _emitU8(0xc8 | ((dst) & 7)); - } - void BT_du8(GPR32 dst, u8 imm) - { - if (((dst & 8) != 0)) - { - _emitU8(0x40 | ((dst & 8) >> 3)); - } - _emitU8(0x0f); - _emitU8(0xba); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void BT_qu8(GPR64 dst, u8 imm) - { - _emitU8(0x48 | ((dst & 8) >> 3)); - _emitU8(0x0f); - _emitU8(0xba); - _emitU8((3 << 6) | ((4 & 7) << 3) | (dst & 7)); - _emitU8((u8)imm); - } - void BT_du8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, u8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - if ((memReg & 8) || ((index != X86_REG_NONE) && (index & 8))) - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2)); - } - else - { - if ((memReg & 8)) - _emitU8(0x40 | ((memReg & 8) >> 1)); - } - _emitU8(0x0f); - _emitU8(0xba); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } - void BT_qu8_l(GPR64 memReg, sint32 offset, GPR64 index, uint8 scaler, u8 imm) - { - uint8 mod; - if (offset == 0 && (memReg & 7) != 5) mod = 0; - else if (offset == (s32)(s8)offset) mod = 1; - else mod = 2; - bool sib_use = (scaler != 0 && index != X86_REG_NONE); - if ((memReg & 7) == 4) - { - cemu_assert_debug(index == X86_REG_NONE); - index = memReg; - sib_use = true; - } - if (sib_use) - { - _emitU8(0x40 | ((memReg & 8) >> 3) | ((index & 8) >> 2) | 0x08); - } - else - { - _emitU8(0x40 | ((memReg & 8) >> 1) | 0x08); - } - _emitU8(0x0f); - _emitU8(0xba); - _emitU8((mod << 6) | ((4 & 7) << 3) | (sib_use ? 4 : (memReg & 7))); - if (sib_use) - { - _emitU8((0 << 6) | ((memReg & 7)) | ((index & 7) << 3)); - } - if (mod == 1) _emitU8((u8)offset); - else if (mod == 2) _emitU32((u32)offset); - _emitU8((u8)imm); - } -}; diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IML.h b/src/Cafe/HW/Espresso/Recompiler/IML/IML.h deleted file mode 100644 index bc0c27c5..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IML.h +++ /dev/null @@ -1,16 +0,0 @@ -#pragma once - -#include "IMLInstruction.h" -#include "IMLSegment.h" - -// optimizer passes -void IMLOptimizer_OptimizeDirectFloatCopies(struct ppcImlGenContext_t* ppcImlGenContext); -void IMLOptimizer_OptimizeDirectIntegerCopies(struct ppcImlGenContext_t* ppcImlGenContext); -void PPCRecompiler_optimizePSQLoadAndStore(struct ppcImlGenContext_t* ppcImlGenContext); - -void IMLOptimizer_StandardOptimizationPass(ppcImlGenContext_t& ppcImlGenContext); - -// debug -void IMLDebug_DisassembleInstruction(const IMLInstruction& inst, std::string& disassemblyLineOut); -void IMLDebug_DumpSegment(struct ppcImlGenContext_t* ctx, IMLSegment* imlSegment, bool printLivenessRangeInfo = false); -void IMLDebug_Dump(struct ppcImlGenContext_t* ppcImlGenContext, bool printLivenessRangeInfo = false); diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLAnalyzer.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLAnalyzer.cpp deleted file mode 100644 index 6ae4b591..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLAnalyzer.cpp +++ /dev/null @@ -1,5 +0,0 @@ -#include "IML.h" -//#include "PPCRecompilerIml.h" -#include "util/helpers/fixedSizeList.h" - -#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h" diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLDebug.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLDebug.cpp deleted file mode 100644 index cd269869..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLDebug.cpp +++ /dev/null @@ -1,561 +0,0 @@ -#include "IML.h" -#include "IMLInstruction.h" -#include "IMLSegment.h" -#include "IMLRegisterAllocatorRanges.h" -#include "util/helpers/StringBuf.h" - -#include "../PPCRecompiler.h" - -const char* IMLDebug_GetOpcodeName(const IMLInstruction* iml) -{ - static char _tempOpcodename[32]; - uint32 op = iml->operation; - if (op == PPCREC_IML_OP_ASSIGN) - return "MOV"; - else if (op == PPCREC_IML_OP_ADD) - return "ADD"; - else if (op == PPCREC_IML_OP_ADD_WITH_CARRY) - return "ADC"; - else if (op == PPCREC_IML_OP_SUB) - return "SUB"; - else if (op == PPCREC_IML_OP_OR) - return "OR"; - else if (op == PPCREC_IML_OP_AND) - return "AND"; - else if (op == PPCREC_IML_OP_XOR) - return "XOR"; - else if (op == PPCREC_IML_OP_LEFT_SHIFT) - return "LSH"; - else if (op == PPCREC_IML_OP_RIGHT_SHIFT_U) - return "RSH"; - else if (op == PPCREC_IML_OP_RIGHT_SHIFT_S) - return "ARSH"; - else if (op == PPCREC_IML_OP_LEFT_ROTATE) - return "LROT"; - else if (op == PPCREC_IML_OP_MULTIPLY_SIGNED) - return "MULS"; - else if (op == PPCREC_IML_OP_DIVIDE_SIGNED) - return "DIVS"; - else if (op == PPCREC_IML_OP_FPR_ASSIGN) - return "FMOV"; - else if (op == PPCREC_IML_OP_FPR_ADD) - return "FADD"; - else if (op == PPCREC_IML_OP_FPR_SUB) - return "FSUB"; - else if (op == PPCREC_IML_OP_FPR_MULTIPLY) - return "FMUL"; - else if (op == PPCREC_IML_OP_FPR_DIVIDE) - return "FDIV"; - else if (op == PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64) - return "F32TOF64"; - else if (op == PPCREC_IML_OP_FPR_ABS) - return "FABS"; - else if (op == PPCREC_IML_OP_FPR_NEGATE) - return "FNEG"; - else if (op == PPCREC_IML_OP_FPR_NEGATIVE_ABS) - return "FNABS"; - else if (op == PPCREC_IML_OP_FPR_FLOAT_TO_INT) - return "F2I"; - else if (op == PPCREC_IML_OP_FPR_INT_TO_FLOAT) - return "I2F"; - else if (op == PPCREC_IML_OP_FPR_BITCAST_INT_TO_FLOAT) - return "BITMOVE"; - - sprintf(_tempOpcodename, "OP0%02x_T%d", iml->operation, iml->type); - return _tempOpcodename; -} - -std::string IMLDebug_GetRegName(IMLReg r) -{ - std::string regName; - uint32 regId = r.GetRegID(); - switch (r.GetRegFormat()) - { - case IMLRegFormat::F32: - regName.append("f"); - break; - case IMLRegFormat::F64: - regName.append("fd"); - break; - case IMLRegFormat::I32: - regName.append("i"); - break; - case IMLRegFormat::I64: - regName.append("r"); - break; - default: - DEBUG_BREAK; - } - regName.append(fmt::format("{}", regId)); - return regName; -} - -void IMLDebug_AppendRegisterParam(StringBuf& strOutput, IMLReg virtualRegister, bool isLast = false) -{ - strOutput.add(IMLDebug_GetRegName(virtualRegister)); - if (!isLast) - strOutput.add(", "); -} - -void IMLDebug_AppendS32Param(StringBuf& strOutput, sint32 val, bool isLast = false) -{ - if (val < 0) - { - strOutput.add("-"); - val = -val; - } - strOutput.addFmt("0x{:08x}", val); - if (!isLast) - strOutput.add(", "); -} - -void IMLDebug_PrintLivenessRangeInfo(StringBuf& currentLineText, IMLSegment* imlSegment, sint32 offset) -{ - // pad to 70 characters - sint32 index = currentLineText.getLen(); - while (index < 70) - { - currentLineText.add(" "); - index++; - } - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - if (subrangeItr->interval.start.GetInstructionIndexEx() == offset) - { - if(subrangeItr->interval.start.IsInstructionIndex() && !subrangeItr->interval.start.IsOnInputEdge()) - currentLineText.add("."); - else - currentLineText.add("|"); - - currentLineText.addFmt("{:<4}", subrangeItr->GetVirtualRegister()); - } - else if (subrangeItr->interval.end.GetInstructionIndexEx() == offset) - { - if(subrangeItr->interval.end.IsInstructionIndex() && !subrangeItr->interval.end.IsOnOutputEdge()) - currentLineText.add("* "); - else - currentLineText.add("| "); - } - else if (subrangeItr->interval.ContainsInstructionIndexEx(offset)) - { - currentLineText.add("| "); - } - else - { - currentLineText.add(" "); - } - index += 5; - // next - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } -} - -std::string IMLDebug_GetSegmentName(ppcImlGenContext_t* ctx, IMLSegment* seg) -{ - if (!ctx) - { - return ""; - } - // find segment index - for (size_t i = 0; i < ctx->segmentList2.size(); i++) - { - if (ctx->segmentList2[i] == seg) - { - return fmt::format("Seg{:04x}", i); - } - } - return ""; -} - -std::string IMLDebug_GetConditionName(IMLCondition cond) -{ - switch (cond) - { - case IMLCondition::EQ: - return "EQ"; - case IMLCondition::NEQ: - return "NEQ"; - case IMLCondition::UNSIGNED_GT: - return "UGT"; - case IMLCondition::UNSIGNED_LT: - return "ULT"; - case IMLCondition::SIGNED_GT: - return "SGT"; - case IMLCondition::SIGNED_LT: - return "SLT"; - default: - cemu_assert_unimplemented(); - } - return "ukn"; -} - -void IMLDebug_DisassembleInstruction(const IMLInstruction& inst, std::string& disassemblyLineOut) -{ - const sint32 lineOffsetParameters = 10;//18; - - StringBuf strOutput(1024); - strOutput.reset(); - if (inst.type == PPCREC_IML_TYPE_R_NAME || inst.type == PPCREC_IML_TYPE_NAME_R) - { - if (inst.type == PPCREC_IML_TYPE_R_NAME) - strOutput.add("R_NAME"); - else - strOutput.add("NAME_R"); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - - if(inst.type == PPCREC_IML_TYPE_R_NAME) - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_name.regR); - - strOutput.add("name_"); - if (inst.op_r_name.name >= PPCREC_NAME_R0 && inst.op_r_name.name < (PPCREC_NAME_R0 + 999)) - { - strOutput.addFmt("r{}", inst.op_r_name.name - PPCREC_NAME_R0); - } - if (inst.op_r_name.name >= PPCREC_NAME_FPR_HALF && inst.op_r_name.name < (PPCREC_NAME_FPR_HALF + 32*2)) - { - strOutput.addFmt("f{}", inst.op_r_name.name - ((PPCREC_NAME_FPR_HALF - inst.op_r_name.name)/2)); - if ((inst.op_r_name.name-PPCREC_NAME_FPR_HALF)&1) - strOutput.add(".ps1"); - else - strOutput.add(".ps0"); - } - else if (inst.op_r_name.name >= PPCREC_NAME_SPR0 && inst.op_r_name.name < (PPCREC_NAME_SPR0 + 999)) - { - strOutput.addFmt("spr{}", inst.op_r_name.name - PPCREC_NAME_SPR0); - } - else if (inst.op_r_name.name >= PPCREC_NAME_CR && inst.op_r_name.name <= PPCREC_NAME_CR_LAST) - strOutput.addFmt("cr{}", inst.op_r_name.name - PPCREC_NAME_CR); - else if (inst.op_r_name.name == PPCREC_NAME_XER_CA) - strOutput.add("xer.ca"); - else if (inst.op_r_name.name == PPCREC_NAME_XER_SO) - strOutput.add("xer.so"); - else if (inst.op_r_name.name == PPCREC_NAME_XER_OV) - strOutput.add("xer.ov"); - else if (inst.op_r_name.name == PPCREC_NAME_CPU_MEMRES_EA) - strOutput.add("cpuReservation.ea"); - else if (inst.op_r_name.name == PPCREC_NAME_CPU_MEMRES_VAL) - strOutput.add("cpuReservation.value"); - else - { - strOutput.addFmt("name_ukn{}", inst.op_r_name.name); - } - if (inst.type != PPCREC_IML_TYPE_R_NAME) - { - strOutput.add(", "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_name.regR, true); - } - - } - else if (inst.type == PPCREC_IML_TYPE_R_R) - { - strOutput.addFmt("{}", IMLDebug_GetOpcodeName(&inst)); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r.regR); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r.regA, true); - } - else if (inst.type == PPCREC_IML_TYPE_R_R_R) - { - strOutput.addFmt("{}", IMLDebug_GetOpcodeName(&inst)); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r.regR); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r.regA); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r.regB, true); - } - else if (inst.type == PPCREC_IML_TYPE_R_R_R_CARRY) - { - strOutput.addFmt("{}", IMLDebug_GetOpcodeName(&inst)); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r_carry.regR); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r_carry.regA); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r_carry.regB); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_r_carry.regCarry, true); - } - else if (inst.type == PPCREC_IML_TYPE_COMPARE) - { - strOutput.add("CMP "); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_compare.regA); - IMLDebug_AppendRegisterParam(strOutput, inst.op_compare.regB); - strOutput.addFmt("{}", IMLDebug_GetConditionName(inst.op_compare.cond)); - strOutput.add(" -> "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_compare.regR, true); - } - else if (inst.type == PPCREC_IML_TYPE_COMPARE_S32) - { - strOutput.add("CMP "); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_compare_s32.regA); - strOutput.addFmt("{}", inst.op_compare_s32.immS32); - strOutput.addFmt(", {}", IMLDebug_GetConditionName(inst.op_compare_s32.cond)); - strOutput.add(" -> "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_compare_s32.regR, true); - } - else if (inst.type == PPCREC_IML_TYPE_CONDITIONAL_JUMP) - { - strOutput.add("CJUMP "); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - IMLDebug_AppendRegisterParam(strOutput, inst.op_conditional_jump.registerBool, true); - if (!inst.op_conditional_jump.mustBeTrue) - strOutput.add("(inverted)"); - } - else if (inst.type == PPCREC_IML_TYPE_JUMP) - { - strOutput.add("JUMP"); - } - else if (inst.type == PPCREC_IML_TYPE_R_R_S32) - { - strOutput.addFmt("{}", IMLDebug_GetOpcodeName(&inst)); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_s32.regR); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_s32.regA); - IMLDebug_AppendS32Param(strOutput, inst.op_r_r_s32.immS32, true); - } - else if (inst.type == PPCREC_IML_TYPE_R_R_S32_CARRY) - { - strOutput.addFmt("{}", IMLDebug_GetOpcodeName(&inst)); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_s32_carry.regR); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_s32_carry.regA); - IMLDebug_AppendS32Param(strOutput, inst.op_r_r_s32_carry.immS32); - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_r_s32_carry.regCarry, true); - } - else if (inst.type == PPCREC_IML_TYPE_R_S32) - { - strOutput.addFmt("{}", IMLDebug_GetOpcodeName(&inst)); - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - - IMLDebug_AppendRegisterParam(strOutput, inst.op_r_immS32.regR); - IMLDebug_AppendS32Param(strOutput, inst.op_r_immS32.immS32, true); - } - else if (inst.type == PPCREC_IML_TYPE_LOAD || inst.type == PPCREC_IML_TYPE_STORE || - inst.type == PPCREC_IML_TYPE_LOAD_INDEXED || inst.type == PPCREC_IML_TYPE_STORE_INDEXED) - { - if (inst.type == PPCREC_IML_TYPE_LOAD || inst.type == PPCREC_IML_TYPE_LOAD_INDEXED) - strOutput.add("LD_"); - else - strOutput.add("ST_"); - - if (inst.op_storeLoad.flags2.signExtend) - strOutput.add("S"); - else - strOutput.add("U"); - strOutput.addFmt("{}", inst.op_storeLoad.copyWidth); - - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - - IMLDebug_AppendRegisterParam(strOutput, inst.op_storeLoad.registerData); - - if (inst.type == PPCREC_IML_TYPE_LOAD_INDEXED || inst.type == PPCREC_IML_TYPE_STORE_INDEXED) - strOutput.addFmt("[{}+{}]", IMLDebug_GetRegName(inst.op_storeLoad.registerMem), IMLDebug_GetRegName(inst.op_storeLoad.registerMem2)); - else - strOutput.addFmt("[{}+{}]", IMLDebug_GetRegName(inst.op_storeLoad.registerMem), inst.op_storeLoad.immS32); - } - else if (inst.type == PPCREC_IML_TYPE_ATOMIC_CMP_STORE) - { - strOutput.add("ATOMIC_ST_U32"); - - while ((sint32)strOutput.getLen() < lineOffsetParameters) - strOutput.add(" "); - - IMLDebug_AppendRegisterParam(strOutput, inst.op_atomic_compare_store.regEA); - IMLDebug_AppendRegisterParam(strOutput, inst.op_atomic_compare_store.regCompareValue); - IMLDebug_AppendRegisterParam(strOutput, inst.op_atomic_compare_store.regWriteValue); - IMLDebug_AppendRegisterParam(strOutput, inst.op_atomic_compare_store.regBoolOut, true); - } - else if (inst.type == PPCREC_IML_TYPE_NO_OP) - { - strOutput.add("NOP"); - } - else if (inst.type == PPCREC_IML_TYPE_MACRO) - { - if (inst.operation == PPCREC_IML_MACRO_B_TO_REG) - { - strOutput.addFmt("MACRO B_TO_REG {}", IMLDebug_GetRegName(inst.op_macro.paramReg)); - } - else if (inst.operation == PPCREC_IML_MACRO_BL) - { - strOutput.addFmt("MACRO BL 0x{:08x} -> 0x{:08x} cycles (depr): {}", inst.op_macro.param, inst.op_macro.param2, (sint32)inst.op_macro.paramU16); - } - else if (inst.operation == PPCREC_IML_MACRO_B_FAR) - { - strOutput.addFmt("MACRO B_FAR 0x{:08x} -> 0x{:08x} cycles (depr): {}", inst.op_macro.param, inst.op_macro.param2, (sint32)inst.op_macro.paramU16); - } - else if (inst.operation == PPCREC_IML_MACRO_LEAVE) - { - strOutput.addFmt("MACRO LEAVE ppc: 0x{:08x}", inst.op_macro.param); - } - else if (inst.operation == PPCREC_IML_MACRO_HLE) - { - strOutput.addFmt("MACRO HLE ppcAddr: 0x{:08x} funcId: 0x{:08x}", inst.op_macro.param, inst.op_macro.param2); - } - else if (inst.operation == PPCREC_IML_MACRO_COUNT_CYCLES) - { - strOutput.addFmt("MACRO COUNT_CYCLES cycles: {}", inst.op_macro.param); - } - else - { - strOutput.addFmt("MACRO ukn operation {}", inst.operation); - } - } - else if (inst.type == PPCREC_IML_TYPE_FPR_LOAD) - { - strOutput.addFmt("{} = ", IMLDebug_GetRegName(inst.op_storeLoad.registerData)); - if (inst.op_storeLoad.flags2.signExtend) - strOutput.add("S"); - else - strOutput.add("U"); - strOutput.addFmt("{} [{}+{}] mode {}", inst.op_storeLoad.copyWidth / 8, IMLDebug_GetRegName(inst.op_storeLoad.registerMem), inst.op_storeLoad.immS32, inst.op_storeLoad.mode); - if (inst.op_storeLoad.flags2.notExpanded) - { - strOutput.addFmt(" "); - } - } - else if (inst.type == PPCREC_IML_TYPE_FPR_STORE) - { - if (inst.op_storeLoad.flags2.signExtend) - strOutput.add("S"); - else - strOutput.add("U"); - strOutput.addFmt("{} [t{}+{}]", inst.op_storeLoad.copyWidth / 8, inst.op_storeLoad.registerMem.GetRegID(), inst.op_storeLoad.immS32); - strOutput.addFmt(" = {} mode {}", IMLDebug_GetRegName(inst.op_storeLoad.registerData), inst.op_storeLoad.mode); - } - else if (inst.type == PPCREC_IML_TYPE_FPR_R) - { - strOutput.addFmt("{:<6} ", IMLDebug_GetOpcodeName(&inst)); - strOutput.addFmt("{}", IMLDebug_GetRegName(inst.op_fpr_r.regR)); - } - else if (inst.type == PPCREC_IML_TYPE_FPR_R_R) - { - strOutput.addFmt("{:<6} ", IMLDebug_GetOpcodeName(&inst)); - strOutput.addFmt("{}, {}", IMLDebug_GetRegName(inst.op_fpr_r_r.regR), IMLDebug_GetRegName(inst.op_fpr_r_r.regA)); - } - else if (inst.type == PPCREC_IML_TYPE_FPR_R_R_R_R) - { - strOutput.addFmt("{:<6} ", IMLDebug_GetOpcodeName(&inst)); - strOutput.addFmt("{}, {}, {}, {}", IMLDebug_GetRegName(inst.op_fpr_r_r_r_r.regR), IMLDebug_GetRegName(inst.op_fpr_r_r_r_r.regA), IMLDebug_GetRegName(inst.op_fpr_r_r_r_r.regB), IMLDebug_GetRegName(inst.op_fpr_r_r_r_r.regC)); - } - else if (inst.type == PPCREC_IML_TYPE_FPR_R_R_R) - { - strOutput.addFmt("{:<6} ", IMLDebug_GetOpcodeName(&inst)); - strOutput.addFmt("{}, {}, {}", IMLDebug_GetRegName(inst.op_fpr_r_r_r.regR), IMLDebug_GetRegName(inst.op_fpr_r_r_r.regA), IMLDebug_GetRegName(inst.op_fpr_r_r_r.regB)); - } - else if (inst.type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) - { - strOutput.addFmt("CYCLE_CHECK"); - } - else if (inst.type == PPCREC_IML_TYPE_X86_EFLAGS_JCC) - { - strOutput.addFmt("X86_JCC {}", IMLDebug_GetConditionName(inst.op_x86_eflags_jcc.cond)); - } - else - { - strOutput.addFmt("Unknown iml type {}", inst.type); - } - disassemblyLineOut.assign(strOutput.c_str()); -} - -void IMLDebug_DumpSegment(ppcImlGenContext_t* ctx, IMLSegment* imlSegment, bool printLivenessRangeInfo) -{ - StringBuf strOutput(4096); - - strOutput.addFmt("SEGMENT {} | PPC=0x{:08x} Loop-depth {}", IMLDebug_GetSegmentName(ctx, imlSegment), imlSegment->ppcAddress, imlSegment->loopDepth); - if (imlSegment->isEnterable) - { - strOutput.addFmt(" ENTERABLE (0x{:08x})", imlSegment->enterPPCAddress); - } - if (imlSegment->deadCodeEliminationHintSeg) - { - strOutput.addFmt(" InheritOverwrite: {}", IMLDebug_GetSegmentName(ctx, imlSegment->deadCodeEliminationHintSeg)); - } - cemuLog_log(LogType::Force, "{}", strOutput.c_str()); - - if (printLivenessRangeInfo) - { - strOutput.reset(); - IMLDebug_PrintLivenessRangeInfo(strOutput, imlSegment, RA_INTER_RANGE_START); - cemuLog_log(LogType::Force, "{}", strOutput.c_str()); - } - //debug_printf("\n"); - strOutput.reset(); - - std::string disassemblyLine; - for (sint32 i = 0; i < imlSegment->imlList.size(); i++) - { - const IMLInstruction& inst = imlSegment->imlList[i]; - // don't log NOP instructions - if (inst.type == PPCREC_IML_TYPE_NO_OP) - continue; - strOutput.reset(); - strOutput.addFmt("{:02x} ", i); - //cemuLog_log(LogType::Force, "{:02x} ", i); - disassemblyLine.clear(); - IMLDebug_DisassembleInstruction(inst, disassemblyLine); - strOutput.add(disassemblyLine); - if (printLivenessRangeInfo) - { - IMLDebug_PrintLivenessRangeInfo(strOutput, imlSegment, i); - } - cemuLog_log(LogType::Force, "{}", strOutput.c_str()); - } - // all ranges - if (printLivenessRangeInfo) - { - strOutput.reset(); - strOutput.add("Ranges-VirtReg "); - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - strOutput.addFmt("v{:<4}", (uint32)subrangeItr->GetVirtualRegister()); - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - cemuLog_log(LogType::Force, "{}", strOutput.c_str()); - strOutput.reset(); - strOutput.add("Ranges-PhysReg "); - subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - strOutput.addFmt("p{:<4}", subrangeItr->GetPhysicalRegister()); - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - cemuLog_log(LogType::Force, "{}", strOutput.c_str()); - } - // branch info - strOutput.reset(); - strOutput.add("Links from: "); - for (sint32 i = 0; i < imlSegment->list_prevSegments.size(); i++) - { - if (i) - strOutput.add(", "); - strOutput.addFmt("{}", IMLDebug_GetSegmentName(ctx, imlSegment->list_prevSegments[i]).c_str()); - } - cemuLog_log(LogType::Force, "{}", strOutput.c_str()); - if (imlSegment->nextSegmentBranchNotTaken) - cemuLog_log(LogType::Force, "BranchNotTaken: {}", IMLDebug_GetSegmentName(ctx, imlSegment->nextSegmentBranchNotTaken).c_str()); - if (imlSegment->nextSegmentBranchTaken) - cemuLog_log(LogType::Force, "BranchTaken: {}", IMLDebug_GetSegmentName(ctx, imlSegment->nextSegmentBranchTaken).c_str()); - if (imlSegment->nextSegmentIsUncertain) - cemuLog_log(LogType::Force, "Dynamic target"); -} - -void IMLDebug_Dump(ppcImlGenContext_t* ppcImlGenContext, bool printLivenessRangeInfo) -{ - for (size_t i = 0; i < ppcImlGenContext->segmentList2.size(); i++) - { - IMLDebug_DumpSegment(ppcImlGenContext, ppcImlGenContext->segmentList2[i], printLivenessRangeInfo); - cemuLog_log(LogType::Force, ""); - } -} diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.cpp deleted file mode 100644 index 997de4e9..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.cpp +++ /dev/null @@ -1,536 +0,0 @@ -#include "IMLInstruction.h" -#include "IML.h" - -#include "../PPCRecompiler.h" -#include "../PPCRecompilerIml.h" - -// return true if an instruction has side effects on top of just reading and writing registers -bool IMLInstruction::HasSideEffects() const -{ - bool hasSideEffects = true; - if(type == PPCREC_IML_TYPE_R_R || type == PPCREC_IML_TYPE_R_R_S32 || type == PPCREC_IML_TYPE_COMPARE || type == PPCREC_IML_TYPE_COMPARE_S32) - hasSideEffects = false; - // todo - add more cases - return hasSideEffects; -} - -void IMLInstruction::CheckRegisterUsage(IMLUsedRegisters* registersUsed) const -{ - registersUsed->readGPR1 = IMLREG_INVALID; - registersUsed->readGPR2 = IMLREG_INVALID; - registersUsed->readGPR3 = IMLREG_INVALID; - registersUsed->readGPR4 = IMLREG_INVALID; - registersUsed->writtenGPR1 = IMLREG_INVALID; - registersUsed->writtenGPR2 = IMLREG_INVALID; - if (type == PPCREC_IML_TYPE_R_NAME) - { - registersUsed->writtenGPR1 = op_r_name.regR; - } - else if (type == PPCREC_IML_TYPE_NAME_R) - { - registersUsed->readGPR1 = op_r_name.regR; - } - else if (type == PPCREC_IML_TYPE_R_R) - { - if (operation == PPCREC_IML_OP_X86_CMP) - { - // both operands are read only - registersUsed->readGPR1 = op_r_r.regR; - registersUsed->readGPR2 = op_r_r.regA; - } - else if ( - operation == PPCREC_IML_OP_ASSIGN || - operation == PPCREC_IML_OP_ENDIAN_SWAP || - operation == PPCREC_IML_OP_CNTLZW || - operation == PPCREC_IML_OP_NOT || - operation == PPCREC_IML_OP_NEG || - operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32 || - operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32) - { - // result is written, operand is read - registersUsed->writtenGPR1 = op_r_r.regR; - registersUsed->readGPR1 = op_r_r.regA; - } - else - cemu_assert_unimplemented(); - } - else if (type == PPCREC_IML_TYPE_R_S32) - { - cemu_assert_debug(operation != PPCREC_IML_OP_ADD && - operation != PPCREC_IML_OP_SUB && - operation != PPCREC_IML_OP_AND && - operation != PPCREC_IML_OP_OR && - operation != PPCREC_IML_OP_XOR); // deprecated, use r_r_s32 for these - - if (operation == PPCREC_IML_OP_LEFT_ROTATE) - { - // register operand is read and write - registersUsed->readGPR1 = op_r_immS32.regR; - registersUsed->writtenGPR1 = op_r_immS32.regR; - } - else if (operation == PPCREC_IML_OP_X86_CMP) - { - // register operand is read only - registersUsed->readGPR1 = op_r_immS32.regR; - } - else - { - // register operand is write only - // todo - use explicit lists, avoid default cases - registersUsed->writtenGPR1 = op_r_immS32.regR; - } - } - else if (type == PPCREC_IML_TYPE_R_R_S32) - { - registersUsed->writtenGPR1 = op_r_r_s32.regR; - registersUsed->readGPR1 = op_r_r_s32.regA; - } - else if (type == PPCREC_IML_TYPE_R_R_S32_CARRY) - { - registersUsed->writtenGPR1 = op_r_r_s32_carry.regR; - registersUsed->readGPR1 = op_r_r_s32_carry.regA; - // some operations read carry - switch (operation) - { - case PPCREC_IML_OP_ADD_WITH_CARRY: - registersUsed->readGPR2 = op_r_r_s32_carry.regCarry; - break; - case PPCREC_IML_OP_ADD: - break; - default: - cemu_assert_unimplemented(); - } - // carry is always written - registersUsed->writtenGPR2 = op_r_r_s32_carry.regCarry; - } - else if (type == PPCREC_IML_TYPE_R_R_R) - { - // in all cases result is written and other operands are read only - // with the exception of XOR, where if regA == regB then all bits are zeroed out. So we don't consider it a read - registersUsed->writtenGPR1 = op_r_r_r.regR; - if(!(operation == PPCREC_IML_OP_XOR && op_r_r_r.regA == op_r_r_r.regB)) - { - registersUsed->readGPR1 = op_r_r_r.regA; - registersUsed->readGPR2 = op_r_r_r.regB; - } - } - else if (type == PPCREC_IML_TYPE_R_R_R_CARRY) - { - registersUsed->writtenGPR1 = op_r_r_r_carry.regR; - registersUsed->readGPR1 = op_r_r_r_carry.regA; - registersUsed->readGPR2 = op_r_r_r_carry.regB; - // some operations read carry - switch (operation) - { - case PPCREC_IML_OP_ADD_WITH_CARRY: - registersUsed->readGPR3 = op_r_r_r_carry.regCarry; - break; - case PPCREC_IML_OP_ADD: - break; - default: - cemu_assert_unimplemented(); - } - // carry is always written - registersUsed->writtenGPR2 = op_r_r_r_carry.regCarry; - } - else if (type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) - { - // no effect on registers - } - else if (type == PPCREC_IML_TYPE_NO_OP) - { - // no effect on registers - } - else if (type == PPCREC_IML_TYPE_MACRO) - { - if (operation == PPCREC_IML_MACRO_BL || operation == PPCREC_IML_MACRO_B_FAR || operation == PPCREC_IML_MACRO_LEAVE || operation == PPCREC_IML_MACRO_DEBUGBREAK || operation == PPCREC_IML_MACRO_COUNT_CYCLES || operation == PPCREC_IML_MACRO_HLE) - { - // no effect on registers - } - else if (operation == PPCREC_IML_MACRO_B_TO_REG) - { - cemu_assert_debug(op_macro.paramReg.IsValid()); - registersUsed->readGPR1 = op_macro.paramReg; - } - else - cemu_assert_unimplemented(); - } - else if (type == PPCREC_IML_TYPE_COMPARE) - { - registersUsed->readGPR1 = op_compare.regA; - registersUsed->readGPR2 = op_compare.regB; - registersUsed->writtenGPR1 = op_compare.regR; - } - else if (type == PPCREC_IML_TYPE_COMPARE_S32) - { - registersUsed->readGPR1 = op_compare_s32.regA; - registersUsed->writtenGPR1 = op_compare_s32.regR; - } - else if (type == PPCREC_IML_TYPE_CONDITIONAL_JUMP) - { - registersUsed->readGPR1 = op_conditional_jump.registerBool; - } - else if (type == PPCREC_IML_TYPE_JUMP) - { - // no registers affected - } - else if (type == PPCREC_IML_TYPE_LOAD) - { - registersUsed->writtenGPR1 = op_storeLoad.registerData; - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR1 = op_storeLoad.registerMem; - } - else if (type == PPCREC_IML_TYPE_LOAD_INDEXED) - { - registersUsed->writtenGPR1 = op_storeLoad.registerData; - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR1 = op_storeLoad.registerMem; - if (op_storeLoad.registerMem2.IsValid()) - registersUsed->readGPR2 = op_storeLoad.registerMem2; - } - else if (type == PPCREC_IML_TYPE_STORE) - { - registersUsed->readGPR1 = op_storeLoad.registerData; - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR2 = op_storeLoad.registerMem; - } - else if (type == PPCREC_IML_TYPE_STORE_INDEXED) - { - registersUsed->readGPR1 = op_storeLoad.registerData; - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR2 = op_storeLoad.registerMem; - if (op_storeLoad.registerMem2.IsValid()) - registersUsed->readGPR3 = op_storeLoad.registerMem2; - } - else if (type == PPCREC_IML_TYPE_ATOMIC_CMP_STORE) - { - registersUsed->readGPR1 = op_atomic_compare_store.regEA; - registersUsed->readGPR2 = op_atomic_compare_store.regCompareValue; - registersUsed->readGPR3 = op_atomic_compare_store.regWriteValue; - registersUsed->writtenGPR1 = op_atomic_compare_store.regBoolOut; - } - else if (type == PPCREC_IML_TYPE_CALL_IMM) - { - if (op_call_imm.regParam0.IsValid()) - registersUsed->readGPR1 = op_call_imm.regParam0; - if (op_call_imm.regParam1.IsValid()) - registersUsed->readGPR2 = op_call_imm.regParam1; - if (op_call_imm.regParam2.IsValid()) - registersUsed->readGPR3 = op_call_imm.regParam2; - registersUsed->writtenGPR1 = op_call_imm.regReturn; - } - else if (type == PPCREC_IML_TYPE_FPR_LOAD) - { - // fpr load operation - registersUsed->writtenGPR1 = op_storeLoad.registerData; - // address is in gpr register - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR1 = op_storeLoad.registerMem; - } - else if (type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED) - { - // fpr load operation - registersUsed->writtenGPR1 = op_storeLoad.registerData; - // address is in gpr registers - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR1 = op_storeLoad.registerMem; - if (op_storeLoad.registerMem2.IsValid()) - registersUsed->readGPR2 = op_storeLoad.registerMem2; - } - else if (type == PPCREC_IML_TYPE_FPR_STORE) - { - // fpr store operation - registersUsed->readGPR1 = op_storeLoad.registerData; - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR2 = op_storeLoad.registerMem; - } - else if (type == PPCREC_IML_TYPE_FPR_STORE_INDEXED) - { - // fpr store operation - registersUsed->readGPR1 = op_storeLoad.registerData; - // address is in gpr registers - if (op_storeLoad.registerMem.IsValid()) - registersUsed->readGPR2 = op_storeLoad.registerMem; - if (op_storeLoad.registerMem2.IsValid()) - registersUsed->readGPR3 = op_storeLoad.registerMem2; - } - else if (type == PPCREC_IML_TYPE_FPR_R_R) - { - // fpr operation - if ( - operation == PPCREC_IML_OP_FPR_ASSIGN || - operation == PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64 || - operation == PPCREC_IML_OP_FPR_FCTIWZ - ) - { - registersUsed->readGPR1 = op_fpr_r_r.regA; - registersUsed->writtenGPR1 = op_fpr_r_r.regR; - } - else if (operation == PPCREC_IML_OP_FPR_MULTIPLY || - operation == PPCREC_IML_OP_FPR_DIVIDE || - operation == PPCREC_IML_OP_FPR_ADD || - operation == PPCREC_IML_OP_FPR_SUB) - { - registersUsed->readGPR1 = op_fpr_r_r.regA; - registersUsed->readGPR2 = op_fpr_r_r.regR; - registersUsed->writtenGPR1 = op_fpr_r_r.regR; - - } - else if (operation == PPCREC_IML_OP_FPR_FLOAT_TO_INT || - operation == PPCREC_IML_OP_FPR_INT_TO_FLOAT || - operation == PPCREC_IML_OP_FPR_BITCAST_INT_TO_FLOAT) - { - registersUsed->writtenGPR1 = op_fpr_r_r.regR; - registersUsed->readGPR1 = op_fpr_r_r.regA; - } - else - cemu_assert_unimplemented(); - } - else if (type == PPCREC_IML_TYPE_FPR_R_R_R) - { - // fpr operation - registersUsed->readGPR1 = op_fpr_r_r_r.regA; - registersUsed->readGPR2 = op_fpr_r_r_r.regB; - registersUsed->writtenGPR1 = op_fpr_r_r_r.regR; - } - else if (type == PPCREC_IML_TYPE_FPR_R_R_R_R) - { - // fpr operation - registersUsed->readGPR1 = op_fpr_r_r_r_r.regA; - registersUsed->readGPR2 = op_fpr_r_r_r_r.regB; - registersUsed->readGPR3 = op_fpr_r_r_r_r.regC; - registersUsed->writtenGPR1 = op_fpr_r_r_r_r.regR; - } - else if (type == PPCREC_IML_TYPE_FPR_R) - { - // fpr operation - if (operation == PPCREC_IML_OP_FPR_NEGATE || - operation == PPCREC_IML_OP_FPR_ABS || - operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS || - operation == PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64 || - operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM) - { - registersUsed->readGPR1 = op_fpr_r.regR; - registersUsed->writtenGPR1 = op_fpr_r.regR; - } - else if (operation == PPCREC_IML_OP_FPR_LOAD_ONE) - { - registersUsed->writtenGPR1 = op_fpr_r.regR; - } - else - cemu_assert_unimplemented(); - } - else if (type == PPCREC_IML_TYPE_FPR_COMPARE) - { - registersUsed->writtenGPR1 = op_fpr_compare.regR; - registersUsed->readGPR1 = op_fpr_compare.regA; - registersUsed->readGPR2 = op_fpr_compare.regB; - } - else if (type == PPCREC_IML_TYPE_X86_EFLAGS_JCC) - { - // no registers read or written (except for the implicit eflags) - } - else - { - cemu_assert_unimplemented(); - } -} - -IMLReg replaceRegisterIdMultiple(IMLReg reg, const std::unordered_map& translationTable) -{ - if (reg.IsInvalid()) - return reg; - const auto& it = translationTable.find(reg.GetRegID()); - cemu_assert_debug(it != translationTable.cend()); - IMLReg alteredReg = reg; - alteredReg.SetRegID(it->second); - return alteredReg; -} - -void IMLInstruction::RewriteGPR(const std::unordered_map& translationTable) -{ - if (type == PPCREC_IML_TYPE_R_NAME) - { - op_r_name.regR = replaceRegisterIdMultiple(op_r_name.regR, translationTable); - } - else if (type == PPCREC_IML_TYPE_NAME_R) - { - op_r_name.regR = replaceRegisterIdMultiple(op_r_name.regR, translationTable); - } - else if (type == PPCREC_IML_TYPE_R_R) - { - op_r_r.regR = replaceRegisterIdMultiple(op_r_r.regR, translationTable); - op_r_r.regA = replaceRegisterIdMultiple(op_r_r.regA, translationTable); - } - else if (type == PPCREC_IML_TYPE_R_S32) - { - op_r_immS32.regR = replaceRegisterIdMultiple(op_r_immS32.regR, translationTable); - } - else if (type == PPCREC_IML_TYPE_R_R_S32) - { - op_r_r_s32.regR = replaceRegisterIdMultiple(op_r_r_s32.regR, translationTable); - op_r_r_s32.regA = replaceRegisterIdMultiple(op_r_r_s32.regA, translationTable); - } - else if (type == PPCREC_IML_TYPE_R_R_S32_CARRY) - { - op_r_r_s32_carry.regR = replaceRegisterIdMultiple(op_r_r_s32_carry.regR, translationTable); - op_r_r_s32_carry.regA = replaceRegisterIdMultiple(op_r_r_s32_carry.regA, translationTable); - op_r_r_s32_carry.regCarry = replaceRegisterIdMultiple(op_r_r_s32_carry.regCarry, translationTable); - } - else if (type == PPCREC_IML_TYPE_R_R_R) - { - op_r_r_r.regR = replaceRegisterIdMultiple(op_r_r_r.regR, translationTable); - op_r_r_r.regA = replaceRegisterIdMultiple(op_r_r_r.regA, translationTable); - op_r_r_r.regB = replaceRegisterIdMultiple(op_r_r_r.regB, translationTable); - } - else if (type == PPCREC_IML_TYPE_R_R_R_CARRY) - { - op_r_r_r_carry.regR = replaceRegisterIdMultiple(op_r_r_r_carry.regR, translationTable); - op_r_r_r_carry.regA = replaceRegisterIdMultiple(op_r_r_r_carry.regA, translationTable); - op_r_r_r_carry.regB = replaceRegisterIdMultiple(op_r_r_r_carry.regB, translationTable); - op_r_r_r_carry.regCarry = replaceRegisterIdMultiple(op_r_r_r_carry.regCarry, translationTable); - } - else if (type == PPCREC_IML_TYPE_COMPARE) - { - op_compare.regR = replaceRegisterIdMultiple(op_compare.regR, translationTable); - op_compare.regA = replaceRegisterIdMultiple(op_compare.regA, translationTable); - op_compare.regB = replaceRegisterIdMultiple(op_compare.regB, translationTable); - } - else if (type == PPCREC_IML_TYPE_COMPARE_S32) - { - op_compare_s32.regR = replaceRegisterIdMultiple(op_compare_s32.regR, translationTable); - op_compare_s32.regA = replaceRegisterIdMultiple(op_compare_s32.regA, translationTable); - } - else if (type == PPCREC_IML_TYPE_CONDITIONAL_JUMP) - { - op_conditional_jump.registerBool = replaceRegisterIdMultiple(op_conditional_jump.registerBool, translationTable); - } - else if (type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK || type == PPCREC_IML_TYPE_JUMP) - { - // no effect on registers - } - else if (type == PPCREC_IML_TYPE_NO_OP) - { - // no effect on registers - } - else if (type == PPCREC_IML_TYPE_MACRO) - { - if (operation == PPCREC_IML_MACRO_BL || operation == PPCREC_IML_MACRO_B_FAR || operation == PPCREC_IML_MACRO_LEAVE || operation == PPCREC_IML_MACRO_DEBUGBREAK || operation == PPCREC_IML_MACRO_HLE || operation == PPCREC_IML_MACRO_COUNT_CYCLES) - { - // no effect on registers - } - else if (operation == PPCREC_IML_MACRO_B_TO_REG) - { - op_macro.paramReg = replaceRegisterIdMultiple(op_macro.paramReg, translationTable); - } - else - { - cemu_assert_unimplemented(); - } - } - else if (type == PPCREC_IML_TYPE_LOAD) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - if (op_storeLoad.registerMem.IsValid()) - { - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - } - } - else if (type == PPCREC_IML_TYPE_LOAD_INDEXED) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - if (op_storeLoad.registerMem.IsValid()) - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - if (op_storeLoad.registerMem2.IsValid()) - op_storeLoad.registerMem2 = replaceRegisterIdMultiple(op_storeLoad.registerMem2, translationTable); - } - else if (type == PPCREC_IML_TYPE_STORE) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - if (op_storeLoad.registerMem.IsValid()) - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - } - else if (type == PPCREC_IML_TYPE_STORE_INDEXED) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - if (op_storeLoad.registerMem.IsValid()) - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - if (op_storeLoad.registerMem2.IsValid()) - op_storeLoad.registerMem2 = replaceRegisterIdMultiple(op_storeLoad.registerMem2, translationTable); - } - else if (type == PPCREC_IML_TYPE_ATOMIC_CMP_STORE) - { - op_atomic_compare_store.regEA = replaceRegisterIdMultiple(op_atomic_compare_store.regEA, translationTable); - op_atomic_compare_store.regCompareValue = replaceRegisterIdMultiple(op_atomic_compare_store.regCompareValue, translationTable); - op_atomic_compare_store.regWriteValue = replaceRegisterIdMultiple(op_atomic_compare_store.regWriteValue, translationTable); - op_atomic_compare_store.regBoolOut = replaceRegisterIdMultiple(op_atomic_compare_store.regBoolOut, translationTable); - } - else if (type == PPCREC_IML_TYPE_CALL_IMM) - { - op_call_imm.regReturn = replaceRegisterIdMultiple(op_call_imm.regReturn, translationTable); - if (op_call_imm.regParam0.IsValid()) - op_call_imm.regParam0 = replaceRegisterIdMultiple(op_call_imm.regParam0, translationTable); - if (op_call_imm.regParam1.IsValid()) - op_call_imm.regParam1 = replaceRegisterIdMultiple(op_call_imm.regParam1, translationTable); - if (op_call_imm.regParam2.IsValid()) - op_call_imm.regParam2 = replaceRegisterIdMultiple(op_call_imm.regParam2, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_LOAD) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - op_storeLoad.registerMem2 = replaceRegisterIdMultiple(op_storeLoad.registerMem2, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_STORE) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_STORE_INDEXED) - { - op_storeLoad.registerData = replaceRegisterIdMultiple(op_storeLoad.registerData, translationTable); - op_storeLoad.registerMem = replaceRegisterIdMultiple(op_storeLoad.registerMem, translationTable); - op_storeLoad.registerMem2 = replaceRegisterIdMultiple(op_storeLoad.registerMem2, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_R) - { - op_fpr_r.regR = replaceRegisterIdMultiple(op_fpr_r.regR, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_R_R) - { - op_fpr_r_r.regR = replaceRegisterIdMultiple(op_fpr_r_r.regR, translationTable); - op_fpr_r_r.regA = replaceRegisterIdMultiple(op_fpr_r_r.regA, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_R_R_R) - { - op_fpr_r_r_r.regR = replaceRegisterIdMultiple(op_fpr_r_r_r.regR, translationTable); - op_fpr_r_r_r.regA = replaceRegisterIdMultiple(op_fpr_r_r_r.regA, translationTable); - op_fpr_r_r_r.regB = replaceRegisterIdMultiple(op_fpr_r_r_r.regB, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_R_R_R_R) - { - op_fpr_r_r_r_r.regR = replaceRegisterIdMultiple(op_fpr_r_r_r_r.regR, translationTable); - op_fpr_r_r_r_r.regA = replaceRegisterIdMultiple(op_fpr_r_r_r_r.regA, translationTable); - op_fpr_r_r_r_r.regB = replaceRegisterIdMultiple(op_fpr_r_r_r_r.regB, translationTable); - op_fpr_r_r_r_r.regC = replaceRegisterIdMultiple(op_fpr_r_r_r_r.regC, translationTable); - } - else if (type == PPCREC_IML_TYPE_FPR_COMPARE) - { - op_fpr_compare.regA = replaceRegisterIdMultiple(op_fpr_compare.regA, translationTable); - op_fpr_compare.regB = replaceRegisterIdMultiple(op_fpr_compare.regB, translationTable); - op_fpr_compare.regR = replaceRegisterIdMultiple(op_fpr_compare.regR, translationTable); - } - else if (type == PPCREC_IML_TYPE_X86_EFLAGS_JCC) - { - // no registers read or written (except for the implicit eflags) - } - else - { - cemu_assert_unimplemented(); - } -} diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.h b/src/Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.h deleted file mode 100644 index 4df2a666..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.h +++ /dev/null @@ -1,826 +0,0 @@ -#pragma once - -using IMLRegID = uint16; // 16 bit ID -using IMLPhysReg = sint32; // arbitrary value that is up to the architecture backend, usually this will be the register index. A value of -1 is reserved and means not assigned - -// format of IMLReg: -// 0-15 (16 bit) IMLRegID -// 19-23 (5 bit) Offset In elements, for SIMD registers -// 24-27 (4 bit) IMLRegFormat RegFormat -// 28-31 (4 bit) IMLRegFormat BaseFormat - -enum class IMLRegFormat : uint8 -{ - INVALID_FORMAT, - I64, - I32, - I16, - I8, - // I1 ? - F64, - F32, - TYPE_COUNT, -}; - -class IMLReg -{ -public: - IMLReg() - { - m_raw = 0; // 0 is invalid - } - - IMLReg(IMLRegFormat baseRegFormat, IMLRegFormat regFormat, uint8 viewOffset, IMLRegID regId) - { - m_raw = 0; - m_raw |= ((uint8)baseRegFormat << 28); - m_raw |= ((uint8)regFormat << 24); - m_raw |= (uint32)regId; - } - - IMLReg(IMLReg&& baseReg, IMLRegFormat viewFormat, uint8 viewOffset, IMLRegID regId) - { - DEBUG_BREAK; - //m_raw = 0; - //m_raw |= ((uint8)baseRegFormat << 28); - //m_raw |= ((uint8)viewFormat << 24); - //m_raw |= (uint32)regId; - } - - IMLReg(const IMLReg& other) : m_raw(other.m_raw) {} - - IMLRegFormat GetBaseFormat() const - { - return (IMLRegFormat)((m_raw >> 28) & 0xF); - } - - IMLRegFormat GetRegFormat() const - { - return (IMLRegFormat)((m_raw >> 24) & 0xF); - } - - IMLRegID GetRegID() const - { - cemu_assert_debug(GetBaseFormat() != IMLRegFormat::INVALID_FORMAT); - cemu_assert_debug(GetRegFormat() != IMLRegFormat::INVALID_FORMAT); - return (IMLRegID)(m_raw & 0xFFFF); - } - - void SetRegID(IMLRegID regId) - { - cemu_assert_debug(regId <= 0xFFFF); - m_raw &= ~0xFFFF; - m_raw |= (uint32)regId; - } - - bool IsInvalid() const - { - return GetBaseFormat() == IMLRegFormat::INVALID_FORMAT; - } - - bool IsValid() const - { - return GetBaseFormat() != IMLRegFormat::INVALID_FORMAT; - } - - bool IsValidAndSameRegID(IMLRegID regId) const - { - return IsValid() && GetRegID() == regId; - } - - // compare all fields - bool operator==(const IMLReg& other) const - { - return m_raw == other.m_raw; - } - -private: - uint32 m_raw; -}; - -static const IMLReg IMLREG_INVALID(IMLRegFormat::INVALID_FORMAT, IMLRegFormat::INVALID_FORMAT, 0, 0); -static const IMLRegID IMLRegID_INVALID(0xFFFF); - -using IMLName = uint32; - -enum -{ - PPCREC_IML_OP_ASSIGN, // '=' operator - PPCREC_IML_OP_ENDIAN_SWAP, // '=' operator with 32bit endian swap - PPCREC_IML_OP_MULTIPLY_SIGNED, // '*' operator (signed multiply) - PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED, // unsigned 64bit multiply, store only high 32bit-word of result - PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED, // signed 64bit multiply, store only high 32bit-word of result - PPCREC_IML_OP_DIVIDE_SIGNED, // '/' operator (signed divide) - PPCREC_IML_OP_DIVIDE_UNSIGNED, // '/' operator (unsigned divide) - - // binary operation - PPCREC_IML_OP_OR, // '|' operator - PPCREC_IML_OP_AND, // '&' operator - PPCREC_IML_OP_XOR, // '^' operator - PPCREC_IML_OP_LEFT_ROTATE, // left rotate operator - PPCREC_IML_OP_LEFT_SHIFT, // shift left operator - PPCREC_IML_OP_RIGHT_SHIFT_U, // right shift operator (unsigned) - PPCREC_IML_OP_RIGHT_SHIFT_S, // right shift operator (signed) - // ppc - PPCREC_IML_OP_SLW, // SLW (shift based on register by up to 63 bits) - PPCREC_IML_OP_SRW, // SRW (shift based on register by up to 63 bits) - PPCREC_IML_OP_CNTLZW, - // FPU - PPCREC_IML_OP_FPR_ASSIGN, - PPCREC_IML_OP_FPR_LOAD_ONE, // load constant 1.0 into register - PPCREC_IML_OP_FPR_ADD, - PPCREC_IML_OP_FPR_SUB, - PPCREC_IML_OP_FPR_MULTIPLY, - PPCREC_IML_OP_FPR_DIVIDE, - PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64, // expand f32 to f64 in-place - PPCREC_IML_OP_FPR_NEGATE, - PPCREC_IML_OP_FPR_ABS, // abs(fpr) - PPCREC_IML_OP_FPR_NEGATIVE_ABS, // -abs(fpr) - PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, // round 64bit double to 64bit double with 32bit float precision (in bottom half of xmm register) - PPCREC_IML_OP_FPR_FCTIWZ, - PPCREC_IML_OP_FPR_SELECT, // selectively copy bottom value from operand B or C based on value in operand A - // Conversion (FPR_R_R) - PPCREC_IML_OP_FPR_INT_TO_FLOAT, // convert integer value in gpr to floating point value in fpr - PPCREC_IML_OP_FPR_FLOAT_TO_INT, // convert floating point value in fpr to integer value in gpr - - // Bitcast (FPR_R_R) - PPCREC_IML_OP_FPR_BITCAST_INT_TO_FLOAT, - - // R_R_R + R_R_S32 - PPCREC_IML_OP_ADD, // also R_R_R_CARRY - PPCREC_IML_OP_SUB, - - // R_R only - PPCREC_IML_OP_NOT, - PPCREC_IML_OP_NEG, - PPCREC_IML_OP_ASSIGN_S16_TO_S32, - PPCREC_IML_OP_ASSIGN_S8_TO_S32, - - // R_R_R_carry - PPCREC_IML_OP_ADD_WITH_CARRY, // similar to ADD but also adds carry bit (0 or 1) - - // X86 extension - PPCREC_IML_OP_X86_CMP, // R_R and R_S32 - - PPCREC_IML_OP_INVALID -}; - -#define PPCREC_IML_OP_FPR_COPY_PAIR (PPCREC_IML_OP_ASSIGN) - -enum -{ - PPCREC_IML_MACRO_B_TO_REG, // branch to PPC address in register (used for BCCTR, BCLR) - - PPCREC_IML_MACRO_BL, // call to different function (can be within same function) - PPCREC_IML_MACRO_B_FAR, // branch to different function - PPCREC_IML_MACRO_COUNT_CYCLES, // decrease current remaining thread cycles by a certain amount - PPCREC_IML_MACRO_HLE, // HLE function call - PPCREC_IML_MACRO_LEAVE, // leaves recompiler and switches to interpeter - // debugging - PPCREC_IML_MACRO_DEBUGBREAK, // throws a debugbreak -}; - -enum class IMLCondition : uint8 -{ - EQ, - NEQ, - SIGNED_GT, - SIGNED_LT, - UNSIGNED_GT, - UNSIGNED_LT, - - // floating point conditions - UNORDERED_GT, // a > b, false if either is NaN - UNORDERED_LT, // a < b, false if either is NaN - UNORDERED_EQ, // a == b, false if either is NaN - UNORDERED_U, // unordered (true if either operand is NaN) - - ORDERED_GT, - ORDERED_LT, - ORDERED_EQ, - ORDERED_U -}; - -enum -{ - PPCREC_IML_TYPE_NONE, - PPCREC_IML_TYPE_NO_OP, // no-op instruction - PPCREC_IML_TYPE_R_R, // r* = (op) *r (can also be r* (op) *r) - PPCREC_IML_TYPE_R_R_R, // r* = r* (op) r* - PPCREC_IML_TYPE_R_R_R_CARRY, // r* = r* (op) r* (reads and/or updates carry) - PPCREC_IML_TYPE_R_R_S32, // r* = r* (op) s32* - PPCREC_IML_TYPE_R_R_S32_CARRY, // r* = r* (op) s32* (reads and/or updates carry) - PPCREC_IML_TYPE_LOAD, // r* = [r*+s32*] - PPCREC_IML_TYPE_LOAD_INDEXED, // r* = [r*+r*] - PPCREC_IML_TYPE_STORE, // [r*+s32*] = r* - PPCREC_IML_TYPE_STORE_INDEXED, // [r*+r*] = r* - PPCREC_IML_TYPE_R_NAME, // r* = name - PPCREC_IML_TYPE_NAME_R, // name* = r* - PPCREC_IML_TYPE_R_S32, // r* (op) imm - PPCREC_IML_TYPE_MACRO, - PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK, // jumps only if remaining thread cycles < 0 - - // conditions and branches - PPCREC_IML_TYPE_COMPARE, // r* = r* CMP[cond] r* - PPCREC_IML_TYPE_COMPARE_S32, // r* = r* CMP[cond] imm - PPCREC_IML_TYPE_JUMP, // jump always - PPCREC_IML_TYPE_CONDITIONAL_JUMP, // jump conditionally based on boolean value in register - - // atomic - PPCREC_IML_TYPE_ATOMIC_CMP_STORE, - - // function call - PPCREC_IML_TYPE_CALL_IMM, // call to fixed immediate address - - // FPR - PPCREC_IML_TYPE_FPR_LOAD, // r* = (bitdepth) [r*+s32*] (single or paired single mode) - PPCREC_IML_TYPE_FPR_LOAD_INDEXED, // r* = (bitdepth) [r*+r*] (single or paired single mode) - PPCREC_IML_TYPE_FPR_STORE, // (bitdepth) [r*+s32*] = r* (single or paired single mode) - PPCREC_IML_TYPE_FPR_STORE_INDEXED, // (bitdepth) [r*+r*] = r* (single or paired single mode) - PPCREC_IML_TYPE_FPR_R_R, - PPCREC_IML_TYPE_FPR_R_R_R, - PPCREC_IML_TYPE_FPR_R_R_R_R, - PPCREC_IML_TYPE_FPR_R, - - PPCREC_IML_TYPE_FPR_COMPARE, // r* = r* CMP[cond] r* - - // X86 specific - PPCREC_IML_TYPE_X86_EFLAGS_JCC, -}; - -enum // IMLName -{ - PPCREC_NAME_NONE, - PPCREC_NAME_TEMPORARY = 1000, - PPCREC_NAME_R0 = 2000, - PPCREC_NAME_SPR0 = 3000, - PPCREC_NAME_FPR_HALF = 4800, // Counts PS0 and PS1 separately. E.g. fp3.ps1 is at offset 3 * 2 + 1 - PPCREC_NAME_TEMPORARY_FPR0 = 5000, // 0 to 7 - PPCREC_NAME_XER_CA = 6000, // carry bit from XER - PPCREC_NAME_XER_OV = 6001, // overflow bit from XER - PPCREC_NAME_XER_SO = 6002, // summary overflow bit from XER - PPCREC_NAME_CR = 7000, // CR register bits (31 to 0) - PPCREC_NAME_CR_LAST = PPCREC_NAME_CR+31, - PPCREC_NAME_CPU_MEMRES_EA = 8000, - PPCREC_NAME_CPU_MEMRES_VAL = 8001 -}; - -#define PPC_REC_INVALID_REGISTER 0xFF // deprecated. Use IMLREG_INVALID instead - -enum -{ - // fpr load - PPCREC_FPR_LD_MODE_SINGLE, - PPCREC_FPR_LD_MODE_DOUBLE, - - // fpr store - PPCREC_FPR_ST_MODE_SINGLE, - PPCREC_FPR_ST_MODE_DOUBLE, - - PPCREC_FPR_ST_MODE_UI32_FROM_PS0, // store raw low-32bit of PS0 -}; - -struct IMLUsedRegisters -{ - IMLUsedRegisters() {}; - - bool IsWrittenByRegId(IMLRegID regId) const - { - if (writtenGPR1.IsValid() && writtenGPR1.GetRegID() == regId) - return true; - if (writtenGPR2.IsValid() && writtenGPR2.GetRegID() == regId) - return true; - return false; - } - - bool IsBaseGPRWritten(IMLReg imlReg) const - { - cemu_assert_debug(imlReg.IsValid()); - auto regId = imlReg.GetRegID(); - return IsWrittenByRegId(regId); - } - - template - void ForEachWrittenGPR(Fn F) const - { - if (writtenGPR1.IsValid()) - F(writtenGPR1); - if (writtenGPR2.IsValid()) - F(writtenGPR2); - } - - template - void ForEachReadGPR(Fn F) const - { - if (readGPR1.IsValid()) - F(readGPR1); - if (readGPR2.IsValid()) - F(readGPR2); - if (readGPR3.IsValid()) - F(readGPR3); - if (readGPR4.IsValid()) - F(readGPR4); - } - - template - void ForEachAccessedGPR(Fn F) const - { - // GPRs - if (readGPR1.IsValid()) - F(readGPR1, false); - if (readGPR2.IsValid()) - F(readGPR2, false); - if (readGPR3.IsValid()) - F(readGPR3, false); - if (readGPR4.IsValid()) - F(readGPR4, false); - if (writtenGPR1.IsValid()) - F(writtenGPR1, true); - if (writtenGPR2.IsValid()) - F(writtenGPR2, true); - } - - IMLReg readGPR1; - IMLReg readGPR2; - IMLReg readGPR3; - IMLReg readGPR4; - IMLReg writtenGPR1; - IMLReg writtenGPR2; -}; - -struct IMLInstruction -{ - IMLInstruction() {} - IMLInstruction(const IMLInstruction& other) - { - memcpy(this, &other, sizeof(IMLInstruction)); - } - - uint8 type; - uint8 operation; - union - { - struct - { - uint8 _padding[7]; - }padding; - struct - { - IMLReg regR; - IMLReg regA; - }op_r_r; - struct - { - IMLReg regR; - IMLReg regA; - IMLReg regB; - }op_r_r_r; - struct - { - IMLReg regR; - IMLReg regA; - IMLReg regB; - IMLReg regCarry; - }op_r_r_r_carry; - struct - { - IMLReg regR; - IMLReg regA; - sint32 immS32; - }op_r_r_s32; - struct - { - IMLReg regR; - IMLReg regA; - IMLReg regCarry; - sint32 immS32; - }op_r_r_s32_carry; - struct - { - IMLReg regR; - IMLName name; - }op_r_name; // alias op_name_r - struct - { - IMLReg regR; - sint32 immS32; - }op_r_immS32; - struct - { - uint32 param; - uint32 param2; - uint16 paramU16; - IMLReg paramReg; - }op_macro; - struct - { - IMLReg registerData; - IMLReg registerMem; - IMLReg registerMem2; - uint8 copyWidth; - struct - { - bool swapEndian : 1; - bool signExtend : 1; - bool notExpanded : 1; // for floats - }flags2; - uint8 mode; // transfer mode - sint32 immS32; - }op_storeLoad; - struct - { - uintptr_t callAddress; - IMLReg regParam0; - IMLReg regParam1; - IMLReg regParam2; - IMLReg regReturn; - }op_call_imm; - struct - { - IMLReg regR; - IMLReg regA; - }op_fpr_r_r; - struct - { - IMLReg regR; - IMLReg regA; - IMLReg regB; - }op_fpr_r_r_r; - struct - { - IMLReg regR; - IMLReg regA; - IMLReg regB; - IMLReg regC; - }op_fpr_r_r_r_r; - struct - { - IMLReg regR; - }op_fpr_r; - struct - { - IMLReg regR; // stores the boolean result of the comparison - IMLReg regA; - IMLReg regB; - IMLCondition cond; - }op_fpr_compare; - struct - { - IMLReg regR; // stores the boolean result of the comparison - IMLReg regA; - IMLReg regB; - IMLCondition cond; - }op_compare; - struct - { - IMLReg regR; // stores the boolean result of the comparison - IMLReg regA; - sint32 immS32; - IMLCondition cond; - }op_compare_s32; - struct - { - IMLReg registerBool; - bool mustBeTrue; - }op_conditional_jump; - struct - { - IMLReg regEA; - IMLReg regCompareValue; - IMLReg regWriteValue; - IMLReg regBoolOut; - }op_atomic_compare_store; - // conditional operations (emitted if supported by target platform) - struct - { - // r_s32 - IMLReg regR; - sint32 immS32; - // condition - uint8 crRegisterIndex; - uint8 crBitIndex; - bool bitMustBeSet; - }op_conditional_r_s32; - // X86 specific - struct - { - IMLCondition cond; - bool invertedCondition; - }op_x86_eflags_jcc; - }; - - bool IsSuffixInstruction() const - { - if (type == PPCREC_IML_TYPE_MACRO && operation == PPCREC_IML_MACRO_BL || - type == PPCREC_IML_TYPE_MACRO && operation == PPCREC_IML_MACRO_B_FAR || - type == PPCREC_IML_TYPE_MACRO && operation == PPCREC_IML_MACRO_B_TO_REG || - type == PPCREC_IML_TYPE_MACRO && operation == PPCREC_IML_MACRO_LEAVE || - type == PPCREC_IML_TYPE_MACRO && operation == PPCREC_IML_MACRO_HLE || - type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK || - type == PPCREC_IML_TYPE_JUMP || - type == PPCREC_IML_TYPE_CONDITIONAL_JUMP || - type == PPCREC_IML_TYPE_X86_EFLAGS_JCC) - return true; - return false; - } - - // instruction setters - void make_no_op() - { - type = PPCREC_IML_TYPE_NO_OP; - operation = 0; - } - - void make_r_name(IMLReg regR, IMLName name) - { - cemu_assert_debug(regR.GetBaseFormat() == regR.GetRegFormat()); // for name load/store instructions the register must match the base format - type = PPCREC_IML_TYPE_R_NAME; - operation = PPCREC_IML_OP_ASSIGN; - op_r_name.regR = regR; - op_r_name.name = name; - } - - void make_name_r(IMLName name, IMLReg regR) - { - cemu_assert_debug(regR.GetBaseFormat() == regR.GetRegFormat()); // for name load/store instructions the register must match the base format - type = PPCREC_IML_TYPE_NAME_R; - operation = PPCREC_IML_OP_ASSIGN; - op_r_name.regR = regR; - op_r_name.name = name; - } - - void make_debugbreak(uint32 currentPPCAddress = 0) - { - make_macro(PPCREC_IML_MACRO_DEBUGBREAK, 0, currentPPCAddress, 0, IMLREG_INVALID); - } - - void make_macro(uint32 macroId, uint32 param, uint32 param2, uint16 paramU16, IMLReg regParam) - { - this->type = PPCREC_IML_TYPE_MACRO; - this->operation = macroId; - this->op_macro.param = param; - this->op_macro.param2 = param2; - this->op_macro.paramU16 = paramU16; - this->op_macro.paramReg = regParam; - } - - void make_cjump_cycle_check() - { - this->type = PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK; - this->operation = 0; - } - - void make_r_r(uint32 operation, IMLReg regR, IMLReg regA) - { - this->type = PPCREC_IML_TYPE_R_R; - this->operation = operation; - this->op_r_r.regR = regR; - this->op_r_r.regA = regA; - } - - void make_r_s32(uint32 operation, IMLReg regR, sint32 immS32) - { - this->type = PPCREC_IML_TYPE_R_S32; - this->operation = operation; - this->op_r_immS32.regR = regR; - this->op_r_immS32.immS32 = immS32; - } - - void make_r_r_r(uint32 operation, IMLReg regR, IMLReg regA, IMLReg regB) - { - this->type = PPCREC_IML_TYPE_R_R_R; - this->operation = operation; - this->op_r_r_r.regR = regR; - this->op_r_r_r.regA = regA; - this->op_r_r_r.regB = regB; - } - - void make_r_r_r_carry(uint32 operation, IMLReg regR, IMLReg regA, IMLReg regB, IMLReg regCarry) - { - this->type = PPCREC_IML_TYPE_R_R_R_CARRY; - this->operation = operation; - this->op_r_r_r_carry.regR = regR; - this->op_r_r_r_carry.regA = regA; - this->op_r_r_r_carry.regB = regB; - this->op_r_r_r_carry.regCarry = regCarry; - } - - void make_r_r_s32(uint32 operation, IMLReg regR, IMLReg regA, sint32 immS32) - { - this->type = PPCREC_IML_TYPE_R_R_S32; - this->operation = operation; - this->op_r_r_s32.regR = regR; - this->op_r_r_s32.regA = regA; - this->op_r_r_s32.immS32 = immS32; - } - - void make_r_r_s32_carry(uint32 operation, IMLReg regR, IMLReg regA, sint32 immS32, IMLReg regCarry) - { - this->type = PPCREC_IML_TYPE_R_R_S32_CARRY; - this->operation = operation; - this->op_r_r_s32_carry.regR = regR; - this->op_r_r_s32_carry.regA = regA; - this->op_r_r_s32_carry.immS32 = immS32; - this->op_r_r_s32_carry.regCarry = regCarry; - } - - void make_compare(IMLReg regA, IMLReg regB, IMLReg regR, IMLCondition cond) - { - this->type = PPCREC_IML_TYPE_COMPARE; - this->operation = PPCREC_IML_OP_INVALID; - this->op_compare.regR = regR; - this->op_compare.regA = regA; - this->op_compare.regB = regB; - this->op_compare.cond = cond; - } - - void make_compare_s32(IMLReg regA, sint32 immS32, IMLReg regR, IMLCondition cond) - { - this->type = PPCREC_IML_TYPE_COMPARE_S32; - this->operation = PPCREC_IML_OP_INVALID; - this->op_compare_s32.regR = regR; - this->op_compare_s32.regA = regA; - this->op_compare_s32.immS32 = immS32; - this->op_compare_s32.cond = cond; - } - - void make_conditional_jump(IMLReg regBool, bool mustBeTrue) - { - this->type = PPCREC_IML_TYPE_CONDITIONAL_JUMP; - this->operation = PPCREC_IML_OP_INVALID; - this->op_conditional_jump.registerBool = regBool; - this->op_conditional_jump.mustBeTrue = mustBeTrue; - } - - void make_jump() - { - this->type = PPCREC_IML_TYPE_JUMP; - this->operation = PPCREC_IML_OP_INVALID; - } - - // load from memory - void make_r_memory(IMLReg regD, IMLReg regMem, sint32 immS32, uint32 copyWidth, bool signExtend, bool switchEndian) - { - this->type = PPCREC_IML_TYPE_LOAD; - this->operation = 0; - this->op_storeLoad.registerData = regD; - this->op_storeLoad.registerMem = regMem; - this->op_storeLoad.immS32 = immS32; - this->op_storeLoad.copyWidth = copyWidth; - this->op_storeLoad.flags2.swapEndian = switchEndian; - this->op_storeLoad.flags2.signExtend = signExtend; - } - - // store to memory - void make_memory_r(IMLReg regS, IMLReg regMem, sint32 immS32, uint32 copyWidth, bool switchEndian) - { - this->type = PPCREC_IML_TYPE_STORE; - this->operation = 0; - this->op_storeLoad.registerData = regS; - this->op_storeLoad.registerMem = regMem; - this->op_storeLoad.immS32 = immS32; - this->op_storeLoad.copyWidth = copyWidth; - this->op_storeLoad.flags2.swapEndian = switchEndian; - this->op_storeLoad.flags2.signExtend = false; - } - - void make_atomic_cmp_store(IMLReg regEA, IMLReg regCompareValue, IMLReg regWriteValue, IMLReg regSuccessOutput) - { - this->type = PPCREC_IML_TYPE_ATOMIC_CMP_STORE; - this->operation = 0; - this->op_atomic_compare_store.regEA = regEA; - this->op_atomic_compare_store.regCompareValue = regCompareValue; - this->op_atomic_compare_store.regWriteValue = regWriteValue; - this->op_atomic_compare_store.regBoolOut = regSuccessOutput; - } - - void make_call_imm(uintptr_t callAddress, IMLReg param0, IMLReg param1, IMLReg param2, IMLReg regReturn) - { - this->type = PPCREC_IML_TYPE_CALL_IMM; - this->operation = 0; - this->op_call_imm.callAddress = callAddress; - this->op_call_imm.regParam0 = param0; - this->op_call_imm.regParam1 = param1; - this->op_call_imm.regParam2 = param2; - this->op_call_imm.regReturn = regReturn; - } - - // FPR - - // load from memory - void make_fpr_r_memory(IMLReg registerDestination, IMLReg registerMemory, sint32 immS32, uint32 mode, bool switchEndian) - { - this->type = PPCREC_IML_TYPE_FPR_LOAD; - this->operation = 0; - this->op_storeLoad.registerData = registerDestination; - this->op_storeLoad.registerMem = registerMemory; - this->op_storeLoad.immS32 = immS32; - this->op_storeLoad.mode = mode; - this->op_storeLoad.flags2.swapEndian = switchEndian; - } - - void make_fpr_r_memory_indexed(IMLReg registerDestination, IMLReg registerMemory1, IMLReg registerMemory2, uint32 mode, bool switchEndian) - { - this->type = PPCREC_IML_TYPE_FPR_LOAD_INDEXED; - this->operation = 0; - this->op_storeLoad.registerData = registerDestination; - this->op_storeLoad.registerMem = registerMemory1; - this->op_storeLoad.registerMem2 = registerMemory2; - this->op_storeLoad.immS32 = 0; - this->op_storeLoad.mode = mode; - this->op_storeLoad.flags2.swapEndian = switchEndian; - } - - // store to memory - void make_fpr_memory_r(IMLReg registerSource, IMLReg registerMemory, sint32 immS32, uint32 mode, bool switchEndian) - { - this->type = PPCREC_IML_TYPE_FPR_STORE; - this->operation = 0; - this->op_storeLoad.registerData = registerSource; - this->op_storeLoad.registerMem = registerMemory; - this->op_storeLoad.immS32 = immS32; - this->op_storeLoad.mode = mode; - this->op_storeLoad.flags2.swapEndian = switchEndian; - } - - void make_fpr_memory_r_indexed(IMLReg registerSource, IMLReg registerMemory1, IMLReg registerMemory2, sint32 immS32, uint32 mode, bool switchEndian) - { - this->type = PPCREC_IML_TYPE_FPR_STORE_INDEXED; - this->operation = 0; - this->op_storeLoad.registerData = registerSource; - this->op_storeLoad.registerMem = registerMemory1; - this->op_storeLoad.registerMem2 = registerMemory2; - this->op_storeLoad.immS32 = immS32; - this->op_storeLoad.mode = mode; - this->op_storeLoad.flags2.swapEndian = switchEndian; - } - - void make_fpr_compare(IMLReg regA, IMLReg regB, IMLReg regR, IMLCondition cond) - { - this->type = PPCREC_IML_TYPE_FPR_COMPARE; - this->operation = -999; - this->op_fpr_compare.regR = regR; - this->op_fpr_compare.regA = regA; - this->op_fpr_compare.regB = regB; - this->op_fpr_compare.cond = cond; - } - - void make_fpr_r(sint32 operation, IMLReg registerResult) - { - // OP (fpr) - this->type = PPCREC_IML_TYPE_FPR_R; - this->operation = operation; - this->op_fpr_r.regR = registerResult; - } - - void make_fpr_r_r(sint32 operation, IMLReg registerResult, IMLReg registerOperand, sint32 crRegister=PPC_REC_INVALID_REGISTER) - { - // fpr OP fpr - this->type = PPCREC_IML_TYPE_FPR_R_R; - this->operation = operation; - this->op_fpr_r_r.regR = registerResult; - this->op_fpr_r_r.regA = registerOperand; - } - - void make_fpr_r_r_r(sint32 operation, IMLReg registerResult, IMLReg registerOperand1, IMLReg registerOperand2, sint32 crRegister=PPC_REC_INVALID_REGISTER) - { - // fpr = OP (fpr,fpr) - this->type = PPCREC_IML_TYPE_FPR_R_R_R; - this->operation = operation; - this->op_fpr_r_r_r.regR = registerResult; - this->op_fpr_r_r_r.regA = registerOperand1; - this->op_fpr_r_r_r.regB = registerOperand2; - } - - void make_fpr_r_r_r_r(sint32 operation, IMLReg registerResult, IMLReg registerOperandA, IMLReg registerOperandB, IMLReg registerOperandC, sint32 crRegister=PPC_REC_INVALID_REGISTER) - { - // fpr = OP (fpr,fpr,fpr) - this->type = PPCREC_IML_TYPE_FPR_R_R_R_R; - this->operation = operation; - this->op_fpr_r_r_r_r.regR = registerResult; - this->op_fpr_r_r_r_r.regA = registerOperandA; - this->op_fpr_r_r_r_r.regB = registerOperandB; - this->op_fpr_r_r_r_r.regC = registerOperandC; - } - - /* X86 specific */ - void make_x86_eflags_jcc(IMLCondition cond, bool invertedCondition) - { - this->type = PPCREC_IML_TYPE_X86_EFLAGS_JCC; - this->operation = -999; - this->op_x86_eflags_jcc.cond = cond; - this->op_x86_eflags_jcc.invertedCondition = invertedCondition; - } - - void CheckRegisterUsage(IMLUsedRegisters* registersUsed) const; - bool HasSideEffects() const; // returns true if the instruction has side effects beyond just reading and writing registers. Dead code elimination uses this to know if an instruction can be dropped when the regular register outputs are not used - - void RewriteGPR(const std::unordered_map& translationTable); -}; - -// architecture specific constants -namespace IMLArchX86 -{ - static constexpr int PHYSREG_GPR_BASE = 0; - static constexpr int PHYSREG_FPR_BASE = 16; -}; \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLOptimizer.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLOptimizer.cpp deleted file mode 100644 index 7671a163..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLOptimizer.cpp +++ /dev/null @@ -1,719 +0,0 @@ -#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h" -#include "Cafe/HW/Espresso/Recompiler/IML/IML.h" -#include "Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.h" - -#include "../PPCRecompiler.h" -#include "../PPCRecompilerIml.h" -#include "../BackendX64/BackendX64.h" - -#include "Common/FileStream.h" - -#include -#include - -IMLReg _FPRRegFromID(IMLRegID regId) -{ - return IMLReg(IMLRegFormat::F64, IMLRegFormat::F64, 0, regId); -} - -void PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment, sint32 imlIndexLoad, IMLReg fprReg) -{ - IMLRegID fprIndex = fprReg.GetRegID(); - - IMLInstruction* imlInstructionLoad = imlSegment->imlList.data() + imlIndexLoad; - if (imlInstructionLoad->op_storeLoad.flags2.notExpanded) - return; - boost::container::static_vector trackedMoves; // only track up to 4 copies - IMLUsedRegisters registersUsed; - sint32 scanRangeEnd = std::min(imlIndexLoad + 25, imlSegment->imlList.size()); // don't scan too far (saves performance and also the chances we can merge the load+store become low at high distances) - bool foundMatch = false; - sint32 lastStore = -1; - for (sint32 i = imlIndexLoad + 1; i < scanRangeEnd; i++) - { - IMLInstruction* imlInstruction = imlSegment->imlList.data() + i; - if (imlInstruction->IsSuffixInstruction()) - break; - // check if FPR is stored - if ((imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE && imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_SINGLE) || - (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED && imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_SINGLE)) - { - if (imlInstruction->op_storeLoad.registerData.GetRegID() == fprIndex) - { - if (foundMatch == false) - { - // flag the load-single instruction as "don't expand" (leave single value as-is) - imlInstructionLoad->op_storeLoad.flags2.notExpanded = true; - } - // also set the flag for the store instruction - IMLInstruction* imlInstructionStore = imlInstruction; - imlInstructionStore->op_storeLoad.flags2.notExpanded = true; - - foundMatch = true; - lastStore = i + 1; - - continue; - } - } - // if the FPR is copied then keep track of it. We can expand the copies instead of the original - if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R && imlInstruction->operation == PPCREC_IML_OP_FPR_ASSIGN && imlInstruction->op_fpr_r_r.regA.GetRegID() == fprIndex) - { - if (imlInstruction->op_fpr_r_r.regR.GetRegID() == fprIndex) - { - // unexpected no-op - break; - } - if (trackedMoves.size() >= trackedMoves.capacity()) - { - // we cant track any more moves, expand here - lastStore = i; - break; - } - trackedMoves.push_back(i); - continue; - } - // check if FPR is overwritten - imlInstruction->CheckRegisterUsage(®istersUsed); - if (registersUsed.writtenGPR1.IsValidAndSameRegID(fprIndex) || registersUsed.writtenGPR2.IsValidAndSameRegID(fprIndex)) - break; - if (registersUsed.readGPR1.IsValidAndSameRegID(fprIndex)) - break; - if (registersUsed.readGPR2.IsValidAndSameRegID(fprIndex)) - break; - if (registersUsed.readGPR3.IsValidAndSameRegID(fprIndex)) - break; - if (registersUsed.readGPR4.IsValidAndSameRegID(fprIndex)) - break; - } - - if (foundMatch) - { - // insert expand instructions for each target register of a move - sint32 positionBias = 0; - for (auto& trackedMove : trackedMoves) - { - sint32 realPosition = trackedMove + positionBias; - IMLInstruction* imlMoveInstruction = imlSegment->imlList.data() + realPosition; - if (realPosition >= lastStore) - break; // expand is inserted before this move - else - lastStore++; - - cemu_assert_debug(imlMoveInstruction->type == PPCREC_IML_TYPE_FPR_R_R && imlMoveInstruction->op_fpr_r_r.regA.GetRegID() == fprIndex); - cemu_assert_debug(imlMoveInstruction->op_fpr_r_r.regA.GetRegFormat() == IMLRegFormat::F64); - auto dstReg = imlMoveInstruction->op_fpr_r_r.regR; - IMLInstruction* newExpand = PPCRecompiler_insertInstruction(imlSegment, realPosition+1); // one after the move - newExpand->make_fpr_r(PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64, dstReg); - positionBias++; - } - // insert expand instruction after store - IMLInstruction* newExpand = PPCRecompiler_insertInstruction(imlSegment, lastStore); - newExpand->make_fpr_r(PPCREC_IML_OP_FPR_EXPAND_F32_TO_F64, _FPRRegFromID(fprIndex)); - } -} - -/* -* Scans for patterns: -* -* -* -* For these patterns the store and load is modified to work with un-extended values (float remains as float, no double conversion) -* The float->double extension is then executed later -* Advantages: -* Keeps denormals and other special float values intact -* Slightly improves performance -*/ -void IMLOptimizer_OptimizeDirectFloatCopies(ppcImlGenContext_t* ppcImlGenContext) -{ - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - { - for (sint32 i = 0; i < segIt->imlList.size(); i++) - { - IMLInstruction* imlInstruction = segIt->imlList.data() + i; - if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD && imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_SINGLE) - { - PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext, segIt, i, imlInstruction->op_storeLoad.registerData); - } - else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED && imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_SINGLE) - { - PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext, segIt, i, imlInstruction->op_storeLoad.registerData); - } - } - } -} - -void PPCRecompiler_optimizeDirectIntegerCopiesScanForward(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment, sint32 imlIndexLoad, IMLReg gprReg) -{ - cemu_assert_debug(gprReg.GetBaseFormat() == IMLRegFormat::I64); // todo - proper handling required for non-standard sizes - cemu_assert_debug(gprReg.GetRegFormat() == IMLRegFormat::I32); - - IMLRegID gprIndex = gprReg.GetRegID(); - IMLInstruction* imlInstructionLoad = imlSegment->imlList.data() + imlIndexLoad; - if ( imlInstructionLoad->op_storeLoad.flags2.swapEndian == false ) - return; - bool foundMatch = false; - IMLUsedRegisters registersUsed; - sint32 scanRangeEnd = std::min(imlIndexLoad + 25, imlSegment->imlList.size()); // don't scan too far (saves performance and also the chances we can merge the load+store become low at high distances) - sint32 i = imlIndexLoad + 1; - for (; i < scanRangeEnd; i++) - { - IMLInstruction* imlInstruction = imlSegment->imlList.data() + i; - if (imlInstruction->IsSuffixInstruction()) - break; - // check if GPR is stored - if ((imlInstruction->type == PPCREC_IML_TYPE_STORE && imlInstruction->op_storeLoad.copyWidth == 32 ) ) - { - if (imlInstruction->op_storeLoad.registerMem.GetRegID() == gprIndex) - break; - if (imlInstruction->op_storeLoad.registerData.GetRegID() == gprIndex) - { - IMLInstruction* imlInstructionStore = imlInstruction; - if (foundMatch == false) - { - // switch the endian swap flag for the load instruction - imlInstructionLoad->op_storeLoad.flags2.swapEndian = !imlInstructionLoad->op_storeLoad.flags2.swapEndian; - foundMatch = true; - } - // switch the endian swap flag for the store instruction - imlInstructionStore->op_storeLoad.flags2.swapEndian = !imlInstructionStore->op_storeLoad.flags2.swapEndian; - // keep scanning - continue; - } - } - // check if GPR is accessed - imlInstruction->CheckRegisterUsage(®istersUsed); - if (registersUsed.readGPR1.IsValidAndSameRegID(gprIndex) || - registersUsed.readGPR2.IsValidAndSameRegID(gprIndex) || - registersUsed.readGPR3.IsValidAndSameRegID(gprIndex)) - { - break; - } - if (registersUsed.IsBaseGPRWritten(gprReg)) - return; // GPR overwritten, we don't need to byte swap anymore - } - if (foundMatch) - { - PPCRecompiler_insertInstruction(imlSegment, i)->make_r_r(PPCREC_IML_OP_ENDIAN_SWAP, gprReg, gprReg); - } -} - -/* -* Scans for patterns: -* -* -* -* For these patterns the store and load is modified to work with non-swapped values -* The big_endian->little_endian conversion is then executed later -* Advantages: -* Slightly improves performance -*/ -void IMLOptimizer_OptimizeDirectIntegerCopies(ppcImlGenContext_t* ppcImlGenContext) -{ - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - { - for (sint32 i = 0; i < segIt->imlList.size(); i++) - { - IMLInstruction* imlInstruction = segIt->imlList.data() + i; - if (imlInstruction->type == PPCREC_IML_TYPE_LOAD && imlInstruction->op_storeLoad.copyWidth == 32 && imlInstruction->op_storeLoad.flags2.swapEndian ) - { - PPCRecompiler_optimizeDirectIntegerCopiesScanForward(ppcImlGenContext, segIt, i, imlInstruction->op_storeLoad.registerData); - } - } - } -} - -IMLName PPCRecompilerImlGen_GetRegName(ppcImlGenContext_t* ppcImlGenContext, IMLReg reg); - -sint32 _getGQRIndexFromRegister(ppcImlGenContext_t* ppcImlGenContext, IMLReg gqrReg) -{ - if (gqrReg.IsInvalid()) - return -1; - sint32 namedReg = PPCRecompilerImlGen_GetRegName(ppcImlGenContext, gqrReg); - if (namedReg >= (PPCREC_NAME_SPR0 + SPR_UGQR0) && namedReg <= (PPCREC_NAME_SPR0 + SPR_UGQR7)) - { - return namedReg - (PPCREC_NAME_SPR0 + SPR_UGQR0); - } - else - { - cemu_assert_suspicious(); - } - return -1; -} - -bool PPCRecompiler_isUGQRValueKnown(ppcImlGenContext_t* ppcImlGenContext, sint32 gqrIndex, uint32& gqrValue) -{ - // the default configuration is: - // UGQR0 = 0x00000000 - // UGQR2 = 0x00040004 - // UGQR3 = 0x00050005 - // UGQR4 = 0x00060006 - // UGQR5 = 0x00070007 - // but games are free to modify UGQR2 to UGQR7 it seems. - // no game modifies UGQR0 so it's safe enough to optimize for the default value - // Ideally we would do some kind of runtime tracking and second recompilation to create fast paths for PSQ_L/PSQ_ST but thats todo - if (gqrIndex == 0) - gqrValue = 0x00000000; - else - return false; - return true; -} - -// analyses register dependencies across the entire function -// per segment this will generate information about which registers need to be preserved and which ones don't (e.g. are overwritten) -class IMLOptimizerRegIOAnalysis -{ - public: - // constructor with segment pointer list as span - IMLOptimizerRegIOAnalysis(std::span segmentList, uint32 maxRegId) : m_segmentList(segmentList), m_maxRegId(maxRegId) - { - m_segRegisterInOutList.resize(segmentList.size()); - } - - struct IMLSegmentRegisterInOut - { - // todo - since our register ID range is usually pretty small (<64) we could use integer bitmasks to accelerate this? There is a helper class used in RA code already - std::unordered_set regWritten; // registers which are modified in this segment - std::unordered_set regImported; // registers which are read in this segment before they are written (importing value from previous segments) - std::unordered_set regForward; // registers which are not read or written in this segment, but are imported into a later segment (propagated info) - }; - - // calculate which registers are imported (read-before-written) and forwarded (read-before-written by a later segment) per segment - // then in a second step propagate the dependencies across linked segments - void ComputeDepedencies() - { - std::vector& segRegisterInOutList = m_segRegisterInOutList; - IMLSegmentRegisterInOut* segIO = segRegisterInOutList.data(); - uint32 index = 0; - for(auto& seg : m_segmentList) - { - seg->momentaryIndex = index; - index++; - for(auto& instr : seg->imlList) - { - IMLUsedRegisters registerUsage; - instr.CheckRegisterUsage(®isterUsage); - // registers are considered imported if they are read before being written in this seg - registerUsage.ForEachReadGPR([&](IMLReg gprReg) { - IMLRegID gprId = gprReg.GetRegID(); - if (!segIO->regWritten.contains(gprId)) - { - segIO->regImported.insert(gprId); - } - }); - registerUsage.ForEachWrittenGPR([&](IMLReg gprReg) { - IMLRegID gprId = gprReg.GetRegID(); - segIO->regWritten.insert(gprId); - }); - } - segIO++; - } - // for every exit segment, import all registers - for(auto& seg : m_segmentList) - { - if (!seg->nextSegmentIsUncertain) - continue; - if(seg->deadCodeEliminationHintSeg) - continue; - IMLSegmentRegisterInOut& segIO = segRegisterInOutList[seg->momentaryIndex]; - for(uint32 i=0; i<=m_maxRegId; i++) - { - segIO.regImported.insert((IMLRegID)i); - } - } - // broadcast dependencies across segment chains - std::unordered_set segIdsWhichNeedUpdate; - for (uint32 i = 0; i < m_segmentList.size(); i++) - { - segIdsWhichNeedUpdate.insert(i); - } - while(!segIdsWhichNeedUpdate.empty()) - { - auto firstIt = segIdsWhichNeedUpdate.begin(); - uint32 segId = *firstIt; - segIdsWhichNeedUpdate.erase(firstIt); - // forward regImported and regForward to earlier segments into their regForward, unless the register is written - auto& curSeg = m_segmentList[segId]; - IMLSegmentRegisterInOut& curSegIO = segRegisterInOutList[segId]; - for(auto& prevSeg : curSeg->list_prevSegments) - { - IMLSegmentRegisterInOut& prevSegIO = segRegisterInOutList[prevSeg->momentaryIndex]; - bool prevSegChanged = false; - for(auto& regId : curSegIO.regImported) - { - if (!prevSegIO.regWritten.contains(regId)) - prevSegChanged |= prevSegIO.regForward.insert(regId).second; - } - for(auto& regId : curSegIO.regForward) - { - if (!prevSegIO.regWritten.contains(regId)) - prevSegChanged |= prevSegIO.regForward.insert(regId).second; - } - if(prevSegChanged) - segIdsWhichNeedUpdate.insert(prevSeg->momentaryIndex); - } - // same for hint links - for(auto& prevSeg : curSeg->list_deadCodeHintBy) - { - IMLSegmentRegisterInOut& prevSegIO = segRegisterInOutList[prevSeg->momentaryIndex]; - bool prevSegChanged = false; - for(auto& regId : curSegIO.regImported) - { - if (!prevSegIO.regWritten.contains(regId)) - prevSegChanged |= prevSegIO.regForward.insert(regId).second; - } - for(auto& regId : curSegIO.regForward) - { - if (!prevSegIO.regWritten.contains(regId)) - prevSegChanged |= prevSegIO.regForward.insert(regId).second; - } - if(prevSegChanged) - segIdsWhichNeedUpdate.insert(prevSeg->momentaryIndex); - } - } - } - - std::unordered_set GetRegistersNeededAtEndOfSegment(IMLSegment& seg) - { - std::unordered_set regsNeeded; - if(seg.nextSegmentIsUncertain) - { - if(seg.deadCodeEliminationHintSeg) - { - auto& nextSegIO = m_segRegisterInOutList[seg.deadCodeEliminationHintSeg->momentaryIndex]; - regsNeeded.insert(nextSegIO.regImported.begin(), nextSegIO.regImported.end()); - regsNeeded.insert(nextSegIO.regForward.begin(), nextSegIO.regForward.end()); - } - else - { - // add all regs - for(uint32 i = 0; i <= m_maxRegId; i++) - regsNeeded.insert(i); - } - return regsNeeded; - } - if(seg.nextSegmentBranchTaken) - { - auto& nextSegIO = m_segRegisterInOutList[seg.nextSegmentBranchTaken->momentaryIndex]; - regsNeeded.insert(nextSegIO.regImported.begin(), nextSegIO.regImported.end()); - regsNeeded.insert(nextSegIO.regForward.begin(), nextSegIO.regForward.end()); - } - if(seg.nextSegmentBranchNotTaken) - { - auto& nextSegIO = m_segRegisterInOutList[seg.nextSegmentBranchNotTaken->momentaryIndex]; - regsNeeded.insert(nextSegIO.regImported.begin(), nextSegIO.regImported.end()); - regsNeeded.insert(nextSegIO.regForward.begin(), nextSegIO.regForward.end()); - } - return regsNeeded; - } - - bool IsRegisterNeededAtEndOfSegment(IMLSegment& seg, IMLRegID regId) - { - if(seg.nextSegmentIsUncertain) - { - if(!seg.deadCodeEliminationHintSeg) - return true; - auto& nextSegIO = m_segRegisterInOutList[seg.deadCodeEliminationHintSeg->momentaryIndex]; - if(nextSegIO.regImported.contains(regId)) - return true; - if(nextSegIO.regForward.contains(regId)) - return true; - return false; - } - if(seg.nextSegmentBranchTaken) - { - auto& nextSegIO = m_segRegisterInOutList[seg.nextSegmentBranchTaken->momentaryIndex]; - if(nextSegIO.regImported.contains(regId)) - return true; - if(nextSegIO.regForward.contains(regId)) - return true; - } - if(seg.nextSegmentBranchNotTaken) - { - auto& nextSegIO = m_segRegisterInOutList[seg.nextSegmentBranchNotTaken->momentaryIndex]; - if(nextSegIO.regImported.contains(regId)) - return true; - if(nextSegIO.regForward.contains(regId)) - return true; - } - return false; - } - - private: - std::span m_segmentList; - uint32 m_maxRegId; - - std::vector m_segRegisterInOutList; - -}; - -// scan backwards starting from index and return the index of the first found instruction which writes to the given register (by id) -sint32 IMLUtil_FindInstructionWhichWritesRegister(IMLSegment& seg, sint32 startIndex, IMLReg reg, sint32 maxScanDistance = -1) -{ - sint32 endIndex = std::max(startIndex - maxScanDistance, 0); - for (sint32 i = startIndex; i >= endIndex; i--) - { - IMLInstruction& imlInstruction = seg.imlList[i]; - IMLUsedRegisters registersUsed; - imlInstruction.CheckRegisterUsage(®istersUsed); - if (registersUsed.IsBaseGPRWritten(reg)) - return i; - } - return -1; -} - -// returns true if the instruction can safely be moved while keeping ordering constraints and data dependencies intact -// initialIndex is inclusive, targetIndex is exclusive -bool IMLUtil_CanMoveInstructionTo(IMLSegment& seg, sint32 initialIndex, sint32 targetIndex) -{ - boost::container::static_vector regsWritten; - boost::container::static_vector regsRead; - // get list of read and written registers - IMLUsedRegisters registersUsed; - seg.imlList[initialIndex].CheckRegisterUsage(®istersUsed); - registersUsed.ForEachAccessedGPR([&](IMLReg reg, bool isWritten) { - if (isWritten) - regsWritten.push_back(reg.GetRegID()); - else - regsRead.push_back(reg.GetRegID()); - }); - // check all the instructions inbetween - if(initialIndex < targetIndex) - { - sint32 scanStartIndex = initialIndex+1; // +1 to skip the moving instruction itself - sint32 scanEndIndex = targetIndex; - for (sint32 i = scanStartIndex; i < scanEndIndex; i++) - { - IMLUsedRegisters registersUsed; - seg.imlList[i].CheckRegisterUsage(®istersUsed); - // in order to be able to move an instruction past another instruction, any of the read registers must not be modified (written) - // and any of it's written registers must not be read - bool canMove = true; - registersUsed.ForEachAccessedGPR([&](IMLReg reg, bool isWritten) { - IMLRegID regId = reg.GetRegID(); - if (!isWritten) - canMove = canMove && std::find(regsWritten.begin(), regsWritten.end(), regId) == regsWritten.end(); - else - canMove = canMove && std::find(regsRead.begin(), regsRead.end(), regId) == regsRead.end(); - }); - if(!canMove) - return false; - } - } - else - { - cemu_assert_unimplemented(); // backwards scan is todo - return false; - } - return true; -} - -sint32 IMLUtil_CountRegisterReadsInRange(IMLSegment& seg, sint32 scanStartIndex, sint32 scanEndIndex, IMLRegID regId) -{ - cemu_assert_debug(scanStartIndex <= scanEndIndex); - cemu_assert_debug(scanEndIndex < seg.imlList.size()); - sint32 count = 0; - for (sint32 i = scanStartIndex; i <= scanEndIndex; i++) - { - IMLUsedRegisters registersUsed; - seg.imlList[i].CheckRegisterUsage(®istersUsed); - registersUsed.ForEachReadGPR([&](IMLReg reg) { - if (reg.GetRegID() == regId) - count++; - }); - } - return count; -} - -// move instruction from one index to another -// instruction will be inserted before the instruction at targetIndex -// returns the new instruction index of the moved instruction -sint32 IMLUtil_MoveInstructionTo(IMLSegment& seg, sint32 initialIndex, sint32 targetIndex) -{ - cemu_assert_debug(initialIndex != targetIndex); - IMLInstruction temp = seg.imlList[initialIndex]; - if (initialIndex < targetIndex) - { - cemu_assert_debug(targetIndex > 0); - targetIndex--; - for(size_t i=initialIndex; i regsNeeded = regIoAnalysis.GetRegistersNeededAtEndOfSegment(seg); - - // start with suffix instruction - if(seg.HasSuffixInstruction()) - { - IMLInstruction& imlInstruction = seg.imlList[seg.GetSuffixInstructionIndex()]; - IMLUsedRegisters registersUsed; - imlInstruction.CheckRegisterUsage(®istersUsed); - registersUsed.ForEachWrittenGPR([&](IMLReg reg) { - regsNeeded.erase(reg.GetRegID()); - }); - registersUsed.ForEachReadGPR([&](IMLReg reg) { - regsNeeded.insert(reg.GetRegID()); - }); - } - // iterate instructions backwards - for (sint32 i = seg.imlList.size() - (seg.HasSuffixInstruction() ? 2:1); i >= 0; i--) - { - IMLInstruction& imlInstruction = seg.imlList[i]; - IMLUsedRegisters registersUsed; - imlInstruction.CheckRegisterUsage(®istersUsed); - // register read -> remove from overwritten list - // register written -> add to overwritten list - - // check if this instruction only writes registers which will never be read - bool onlyWritesRedundantRegisters = true; - registersUsed.ForEachWrittenGPR([&](IMLReg reg) { - if (regsNeeded.contains(reg.GetRegID())) - onlyWritesRedundantRegisters = false; - }); - // check if any of the written registers are read after this point - registersUsed.ForEachWrittenGPR([&](IMLReg reg) { - regsNeeded.erase(reg.GetRegID()); - }); - registersUsed.ForEachReadGPR([&](IMLReg reg) { - regsNeeded.insert(reg.GetRegID()); - }); - if(!imlInstruction.HasSideEffects() && onlyWritesRedundantRegisters) - { - imlInstruction.make_no_op(); - } - } -} - -void IMLOptimizerX86_SubstituteCJumpForEflagsJump(IMLOptimizerRegIOAnalysis& regIoAnalysis, IMLSegment& seg) -{ - // convert and optimize bool condition jumps to eflags condition jumps - // - Moves eflag setter (e.g. cmp) closer to eflags consumer (conditional jump) if necessary. If not possible but required then exit early - // - Since we only rely on eflags, the boolean register can be optimized out if DCE considers it unused - // - Further detect and optimize patterns like DEC + CMP + JCC into fused ops (todo) - - // check if this segment ends with a conditional jump - if(!seg.HasSuffixInstruction()) - return; - sint32 cjmpInstIndex = seg.GetSuffixInstructionIndex(); - if(cjmpInstIndex < 0) - return; - IMLInstruction& cjumpInstr = seg.imlList[cjmpInstIndex]; - if( cjumpInstr.type != PPCREC_IML_TYPE_CONDITIONAL_JUMP ) - return; - IMLReg regCondBool = cjumpInstr.op_conditional_jump.registerBool; - bool invertedCondition = !cjumpInstr.op_conditional_jump.mustBeTrue; - // find the instruction which sets the bool - sint32 cmpInstrIndex = IMLUtil_FindInstructionWhichWritesRegister(seg, cjmpInstIndex-1, regCondBool, 20); - if(cmpInstrIndex < 0) - return; - // check if its an instruction combo which can be optimized (currently only cmp + cjump) and get the condition - IMLInstruction& condSetterInstr = seg.imlList[cmpInstrIndex]; - IMLCondition cond; - if(condSetterInstr.type == PPCREC_IML_TYPE_COMPARE) - cond = condSetterInstr.op_compare.cond; - else if(condSetterInstr.type == PPCREC_IML_TYPE_COMPARE_S32) - cond = condSetterInstr.op_compare_s32.cond; - else - return; - // check if instructions inbetween modify eflags - sint32 indexEflagsSafeStart = -1; // index of the first instruction which does not modify eflags up to cjump - for(sint32 i = cjmpInstIndex-1; i > cmpInstrIndex; i--) - { - if(IMLOptimizerX86_ModifiesEFlags(seg.imlList[i])) - { - indexEflagsSafeStart = i+1; - break; - } - } - if(indexEflagsSafeStart >= 0) - { - cemu_assert(indexEflagsSafeStart > 0); - // there are eflags-modifying instructions inbetween the bool setter and cjump - // try to move the eflags setter close enough to the cjump (to indexEflagsSafeStart) - bool canMove = IMLUtil_CanMoveInstructionTo(seg, cmpInstrIndex, indexEflagsSafeStart); - if(!canMove) - { - return; - } - else - { - cmpInstrIndex = IMLUtil_MoveInstructionTo(seg, cmpInstrIndex, indexEflagsSafeStart); - } - } - // we can turn the jump into an eflags jump - cjumpInstr.make_x86_eflags_jcc(cond, invertedCondition); - - if (IMLUtil_CountRegisterReadsInRange(seg, cmpInstrIndex, cjmpInstIndex, regCondBool.GetRegID()) > 1 || regIoAnalysis.IsRegisterNeededAtEndOfSegment(seg, regCondBool.GetRegID())) - return; // bool register is used beyond the CMP, we can't drop it - - auto& cmpInstr = seg.imlList[cmpInstrIndex]; - cemu_assert_debug(cmpInstr.type == PPCREC_IML_TYPE_COMPARE || cmpInstr.type == PPCREC_IML_TYPE_COMPARE_S32); - if(cmpInstr.type == PPCREC_IML_TYPE_COMPARE) - { - IMLReg regA = cmpInstr.op_compare.regA; - IMLReg regB = cmpInstr.op_compare.regB; - seg.imlList[cmpInstrIndex].make_r_r(PPCREC_IML_OP_X86_CMP, regA, regB); - } - else - { - IMLReg regA = cmpInstr.op_compare_s32.regA; - sint32 val = cmpInstr.op_compare_s32.immS32; - seg.imlList[cmpInstrIndex].make_r_s32(PPCREC_IML_OP_X86_CMP, regA, val); - } - -} - -void IMLOptimizer_StandardOptimizationPassForSegment(IMLOptimizerRegIOAnalysis& regIoAnalysis, IMLSegment& seg) -{ - IMLOptimizer_RemoveDeadCodeFromSegment(regIoAnalysis, seg); - -#ifdef ARCH_X86_64 - // x86 specific optimizations - IMLOptimizerX86_SubstituteCJumpForEflagsJump(regIoAnalysis, seg); // this pass should be applied late since it creates invisible eflags dependencies (which would break further register dependency analysis) -#endif -} - -void IMLOptimizer_StandardOptimizationPass(ppcImlGenContext_t& ppcImlGenContext) -{ - IMLOptimizerRegIOAnalysis regIoAnalysis(ppcImlGenContext.segmentList2, ppcImlGenContext.GetMaxRegId()); - regIoAnalysis.ComputeDepedencies(); - for (IMLSegment* segIt : ppcImlGenContext.segmentList2) - { - IMLOptimizer_StandardOptimizationPassForSegment(regIoAnalysis, *segIt); - } -} diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocator.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocator.cpp deleted file mode 100644 index 935e61ac..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocator.cpp +++ /dev/null @@ -1,2204 +0,0 @@ -#include "IML.h" - -#include "../PPCRecompiler.h" -#include "../PPCRecompilerIml.h" -#include "IMLRegisterAllocator.h" -#include "IMLRegisterAllocatorRanges.h" - -#include "../BackendX64/BackendX64.h" -#ifdef __aarch64__ -#include "../BackendAArch64/BackendAArch64.h" -#endif - -#include -#include - -#include "Common/cpu_features.h" - -#define DEBUG_RA_EXTRA_VALIDATION 0 // if set to non-zero, additional expensive validation checks will be performed -#define DEBUG_RA_INSTRUCTION_GEN 0 - -struct IMLRARegAbstractLiveness // preliminary liveness info. One entry per register and segment -{ - IMLRARegAbstractLiveness(IMLRegFormat regBaseFormat, sint32 usageStart, sint32 usageEnd) - : regBaseFormat(regBaseFormat), usageStart(usageStart), usageEnd(usageEnd) {}; - - void TrackInstruction(sint32 index) - { - usageStart = std::min(usageStart, index); - usageEnd = std::max(usageEnd, index + 1); // exclusive index - } - - sint32 usageStart; - sint32 usageEnd; - bool isProcessed{false}; - IMLRegFormat regBaseFormat; -}; - -struct IMLRegisterAllocatorContext -{ - IMLRegisterAllocatorParameters* raParam; - ppcImlGenContext_t* deprGenContext; // deprecated. Try to decouple IMLRA from other parts of IML/PPCRec - - std::unordered_map regIdToBaseFormat; - // first pass - std::vector> perSegmentAbstractRanges; - - // helper methods - inline std::unordered_map& GetSegmentAbstractRangeMap(IMLSegment* imlSegment) - { - return perSegmentAbstractRanges[imlSegment->momentaryIndex]; - } - - inline IMLRegFormat GetBaseFormatByRegId(IMLRegID regId) const - { - auto it = regIdToBaseFormat.find(regId); - cemu_assert_debug(it != regIdToBaseFormat.cend()); - return it->second; - } -}; - -struct IMLFixedRegisters -{ - struct Entry - { - Entry(IMLReg reg, IMLPhysRegisterSet physRegSet) - : reg(reg), physRegSet(physRegSet) {} - - IMLReg reg; - IMLPhysRegisterSet physRegSet; - }; - boost::container::small_vector listInput; // fixed register requirements for instruction input edge - boost::container::small_vector listOutput; // fixed register requirements for instruction output edge -}; - -static void SetupCallingConvention(const IMLInstruction* instruction, IMLFixedRegisters& fixedRegs, const IMLPhysReg intParamToPhysReg[3], const IMLPhysReg floatParamToPhysReg[3], const IMLPhysReg intReturnPhysReg, const IMLPhysReg floatReturnPhysReg, IMLPhysRegisterSet volatileRegisters) -{ - sint32 numIntParams = 0, numFloatParams = 0; - - auto AddParameterMapping = [&](IMLReg reg) { - if (!reg.IsValid()) - return; - if (reg.GetBaseFormat() == IMLRegFormat::I64) - { - IMLPhysRegisterSet ps; - ps.SetAvailable(intParamToPhysReg[numIntParams]); - fixedRegs.listInput.emplace_back(reg, ps); - numIntParams++; - } - else if (reg.GetBaseFormat() == IMLRegFormat::F64) - { - IMLPhysRegisterSet ps; - ps.SetAvailable(floatParamToPhysReg[numFloatParams]); - fixedRegs.listInput.emplace_back(reg, ps); - numFloatParams++; - } - else - { - cemu_assert_suspicious(); - } - }; - AddParameterMapping(instruction->op_call_imm.regParam0); - AddParameterMapping(instruction->op_call_imm.regParam1); - AddParameterMapping(instruction->op_call_imm.regParam2); - // return value - if (instruction->op_call_imm.regReturn.IsValid()) - { - IMLRegFormat returnFormat = instruction->op_call_imm.regReturn.GetBaseFormat(); - bool isIntegerFormat = returnFormat == IMLRegFormat::I64 || returnFormat == IMLRegFormat::I32 || returnFormat == IMLRegFormat::I16 || returnFormat == IMLRegFormat::I8; - IMLPhysRegisterSet ps; - if (isIntegerFormat) - { - ps.SetAvailable(intReturnPhysReg); - volatileRegisters.SetReserved(intReturnPhysReg); - } - else - { - ps.SetAvailable(floatReturnPhysReg); - volatileRegisters.SetReserved(floatReturnPhysReg); - } - fixedRegs.listOutput.emplace_back(instruction->op_call_imm.regReturn, ps); - } - // block volatile registers from being used on the output edge, this makes the register allocator store them during the call - fixedRegs.listOutput.emplace_back(IMLREG_INVALID, volatileRegisters); -} - -#if defined(__aarch64__) -// aarch64 -static void GetInstructionFixedRegisters(IMLInstruction* instruction, IMLFixedRegisters& fixedRegs) -{ - fixedRegs.listInput.clear(); - fixedRegs.listOutput.clear(); - - // The purpose of GetInstructionFixedRegisters() is to constraint virtual registers to specific physical registers for instructions which need it - // on x86 this is used for instructions like SHL , CL where the CL register is hardwired. On aarch it's probably only necessary for setting up the calling convention - if (instruction->type == PPCREC_IML_TYPE_CALL_IMM) - { - const IMLPhysReg intParamToPhysReg[3] = {IMLArchAArch64::PHYSREG_GPR_BASE + 0, IMLArchAArch64::PHYSREG_GPR_BASE + 1, IMLArchAArch64::PHYSREG_GPR_BASE + 2}; - const IMLPhysReg floatParamToPhysReg[3] = {IMLArchAArch64::PHYSREG_FPR_BASE + 0, IMLArchAArch64::PHYSREG_FPR_BASE + 1, IMLArchAArch64::PHYSREG_FPR_BASE + 2}; - IMLPhysRegisterSet volatileRegs; - for (int i = 0; i <= 17; i++) // x0 to x17 are volatile - volatileRegs.SetAvailable(IMLArchAArch64::PHYSREG_GPR_BASE + i); - // v0-v7 & v16-v31 are volatile. For v8-v15 only the high 64 bits are volatile. - for (int i = 0; i <= 7; i++) - volatileRegs.SetAvailable(IMLArchAArch64::PHYSREG_FPR_BASE + i); - for (int i = 16; i <= 31; i++) - volatileRegs.SetAvailable(IMLArchAArch64::PHYSREG_FPR_BASE + i); - SetupCallingConvention(instruction, fixedRegs, intParamToPhysReg, floatParamToPhysReg, IMLArchAArch64::PHYSREG_GPR_BASE + 0, IMLArchAArch64::PHYSREG_FPR_BASE + 0, volatileRegs); - } -} -#else -// x86-64 -static void GetInstructionFixedRegisters(IMLInstruction* instruction, IMLFixedRegisters& fixedRegs) -{ - fixedRegs.listInput.clear(); - fixedRegs.listOutput.clear(); - - if (instruction->type == PPCREC_IML_TYPE_R_R_R) - { - if (instruction->operation == PPCREC_IML_OP_LEFT_SHIFT || instruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_S || instruction->operation == PPCREC_IML_OP_RIGHT_SHIFT_U) - { - if(!g_CPUFeatures.x86.bmi2) - { - IMLPhysRegisterSet ps; - ps.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_ECX); - fixedRegs.listInput.emplace_back(instruction->op_r_r_r.regB, ps); - } - } - } - else if (instruction->type == PPCREC_IML_TYPE_ATOMIC_CMP_STORE) - { - IMLPhysRegisterSet ps; - ps.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_EAX); - fixedRegs.listInput.emplace_back(IMLREG_INVALID, ps); // none of the inputs may use EAX - fixedRegs.listOutput.emplace_back(instruction->op_atomic_compare_store.regBoolOut, ps); // but we output to EAX - } - else if (instruction->type == PPCREC_IML_TYPE_CALL_IMM) - { - const IMLPhysReg intParamToPhysReg[3] = {IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RCX, IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RDX, IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R8}; - const IMLPhysReg floatParamToPhysReg[3] = {IMLArchX86::PHYSREG_FPR_BASE + 0, IMLArchX86::PHYSREG_FPR_BASE + 1, IMLArchX86::PHYSREG_FPR_BASE + 2}; - IMLPhysRegisterSet volatileRegs; - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RAX); - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RCX); - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RDX); - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R8); - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R9); - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R10); - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R11); - // YMM0-YMM5 are volatile - for (int i = 0; i <= 5; i++) - volatileRegs.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + i); - // for YMM6-YMM15 only the upper 128 bits are volatile which we dont use - SetupCallingConvention(instruction, fixedRegs, intParamToPhysReg, floatParamToPhysReg, IMLArchX86::PHYSREG_GPR_BASE + X86_REG_EAX, IMLArchX86::PHYSREG_FPR_BASE + 0, volatileRegs); - } -} -#endif - -uint32 IMLRA_GetNextIterationIndex() -{ - static uint32 recRACurrentIterationIndex = 0; - recRACurrentIterationIndex++; - return recRACurrentIterationIndex; -} - -bool _detectLoop(IMLSegment* currentSegment, sint32 depth, uint32 iterationIndex, IMLSegment* imlSegmentLoopBase) -{ - if (currentSegment == imlSegmentLoopBase) - return true; - if (currentSegment->raInfo.lastIterationIndex == iterationIndex) - return currentSegment->raInfo.isPartOfProcessedLoop; - if (depth >= 9) - return false; - currentSegment->raInfo.lastIterationIndex = iterationIndex; - currentSegment->raInfo.isPartOfProcessedLoop = false; - - if (currentSegment->nextSegmentIsUncertain) - return false; - if (currentSegment->nextSegmentBranchNotTaken) - { - if (currentSegment->nextSegmentBranchNotTaken->momentaryIndex > currentSegment->momentaryIndex) - { - currentSegment->raInfo.isPartOfProcessedLoop |= _detectLoop(currentSegment->nextSegmentBranchNotTaken, depth + 1, iterationIndex, imlSegmentLoopBase); - } - } - if (currentSegment->nextSegmentBranchTaken) - { - if (currentSegment->nextSegmentBranchTaken->momentaryIndex > currentSegment->momentaryIndex) - { - currentSegment->raInfo.isPartOfProcessedLoop |= _detectLoop(currentSegment->nextSegmentBranchTaken, depth + 1, iterationIndex, imlSegmentLoopBase); - } - } - if (currentSegment->raInfo.isPartOfProcessedLoop) - currentSegment->loopDepth++; - return currentSegment->raInfo.isPartOfProcessedLoop; -} - -void IMLRA_DetectLoop(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegmentLoopBase) -{ - uint32 iterationIndex = IMLRA_GetNextIterationIndex(); - imlSegmentLoopBase->raInfo.lastIterationIndex = iterationIndex; - if (_detectLoop(imlSegmentLoopBase->nextSegmentBranchTaken, 0, iterationIndex, imlSegmentLoopBase)) - { - imlSegmentLoopBase->loopDepth++; - } -} - -void IMLRA_IdentifyLoop(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment) -{ - if (imlSegment->nextSegmentIsUncertain) - return; - // check if this segment has a branch that links to itself (tight loop) - if (imlSegment->nextSegmentBranchTaken == imlSegment) - { - // segment loops over itself - imlSegment->loopDepth++; - return; - } - // check if this segment has a branch that goes backwards (potential complex loop) - if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->momentaryIndex < imlSegment->momentaryIndex) - { - IMLRA_DetectLoop(ppcImlGenContext, imlSegment); - } -} - -#define SUBRANGE_LIST_SIZE (128) - -sint32 IMLRA_CountDistanceUntilNextUse(raLivenessRange* subrange, raInstructionEdge startPosition) -{ - for (sint32 i = 0; i < subrange->list_accessLocations.size(); i++) - { - if (subrange->list_accessLocations[i].pos >= startPosition) - { - auto& it = subrange->list_accessLocations[i]; - cemu_assert_debug(it.IsRead() != it.IsWrite()); // an access location can be either read or write - cemu_assert_debug(!startPosition.ConnectsToPreviousSegment() && !startPosition.ConnectsToNextSegment()); - return it.pos.GetRaw() - startPosition.GetRaw(); - } - } - cemu_assert_debug(subrange->imlSegment->imlList.size() < 10000); - return 10001 * 2; -} - -// returns -1 if there is no fixed register requirement on or after startPosition -sint32 IMLRA_CountDistanceUntilFixedRegUsageInRange(IMLSegment* imlSegment, raLivenessRange* range, raInstructionEdge startPosition, sint32 physRegister, bool& hasFixedAccess) -{ - hasFixedAccess = false; - cemu_assert_debug(startPosition.IsInstructionIndex()); - for (auto& fixedReqEntry : range->list_fixedRegRequirements) - { - if (fixedReqEntry.pos < startPosition) - continue; - if (fixedReqEntry.allowedReg.IsAvailable(physRegister)) - { - hasFixedAccess = true; - return fixedReqEntry.pos.GetRaw() - startPosition.GetRaw(); - } - } - cemu_assert_debug(range->interval.end.IsInstructionIndex()); - return range->interval.end.GetRaw() - startPosition.GetRaw(); -} - -sint32 IMLRA_CountDistanceUntilFixedRegUsage(IMLSegment* imlSegment, raInstructionEdge startPosition, sint32 maxDistance, IMLRegID ourRegId, sint32 physRegister) -{ - cemu_assert_debug(startPosition.IsInstructionIndex()); - raInstructionEdge lastPos2; - lastPos2.Set(imlSegment->imlList.size(), false); - - raInstructionEdge endPos; - endPos = startPosition + maxDistance; - if (endPos > lastPos2) - endPos = lastPos2; - IMLFixedRegisters fixedRegs; - if (startPosition.IsOnOutputEdge()) - GetInstructionFixedRegisters(imlSegment->imlList.data() + startPosition.GetInstructionIndex(), fixedRegs); - for (raInstructionEdge currentPos = startPosition; currentPos <= endPos; ++currentPos) - { - if (currentPos.IsOnInputEdge()) - { - GetInstructionFixedRegisters(imlSegment->imlList.data() + currentPos.GetInstructionIndex(), fixedRegs); - } - auto& fixedRegAccess = currentPos.IsOnInputEdge() ? fixedRegs.listInput : fixedRegs.listOutput; - for (auto& fixedRegLoc : fixedRegAccess) - { - if (fixedRegLoc.reg.IsInvalid() || fixedRegLoc.reg.GetRegID() != ourRegId) - { - cemu_assert_debug(fixedRegLoc.reg.IsInvalid() || fixedRegLoc.physRegSet.HasExactlyOneAvailable()); // this whole function only makes sense when there is only one fixed register, otherwise there are extra permutations to consider. Except for IMLREG_INVALID which is used to indicate reserved registers - if (fixedRegLoc.physRegSet.IsAvailable(physRegister)) - return currentPos.GetRaw() - startPosition.GetRaw(); - } - } - } - return endPos.GetRaw() - startPosition.GetRaw(); -} - -// count how many instructions there are until physRegister is used by any subrange or reserved for any fixed register requirement (returns 0 if register is in use at startIndex) -sint32 PPCRecRA_countDistanceUntilNextLocalPhysRegisterUse(IMLSegment* imlSegment, raInstructionEdge startPosition, sint32 physRegister) -{ - cemu_assert_debug(startPosition.IsInstructionIndex()); - sint32 minDistance = (sint32)imlSegment->imlList.size() * 2 - startPosition.GetRaw(); - // next - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - if (subrangeItr->GetPhysicalRegister() != physRegister) - { - subrangeItr = subrangeItr->link_allSegmentRanges.next; - continue; - } - if (subrangeItr->interval.ContainsEdge(startPosition)) - return 0; - if (subrangeItr->interval.end < startPosition) - { - subrangeItr = subrangeItr->link_allSegmentRanges.next; - continue; - } - cemu_assert_debug(startPosition <= subrangeItr->interval.start); - sint32 currentDist = subrangeItr->interval.start.GetRaw() - startPosition.GetRaw(); - minDistance = std::min(minDistance, currentDist); - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - return minDistance; -} - -struct IMLRALivenessTimeline -{ - IMLRALivenessTimeline() - { - } - - // manually add an active range - void AddActiveRange(raLivenessRange* subrange) - { - activeRanges.emplace_back(subrange); - } - - void ExpireRanges(raInstructionEdge expireUpTo) - { - expiredRanges.clear(); - size_t count = activeRanges.size(); - for (size_t f = 0; f < count; f++) - { - raLivenessRange* liverange = activeRanges[f]; - if (liverange->interval.end < expireUpTo) // this was <= but since end is not inclusive we need to use < - { -#ifdef CEMU_DEBUG_ASSERT - if (!expireUpTo.ConnectsToNextSegment() && (liverange->subrangeBranchTaken || liverange->subrangeBranchNotTaken)) - assert_dbg(); // infinite subranges should not expire -#endif - expiredRanges.emplace_back(liverange); - // remove entry - activeRanges[f] = activeRanges[count - 1]; - f--; - count--; - } - } - if (count != activeRanges.size()) - activeRanges.resize(count); - } - - std::span GetExpiredRanges() - { - return {expiredRanges.data(), expiredRanges.size()}; - } - - std::span GetActiveRanges() - { - return {activeRanges.data(), activeRanges.size()}; - } - - raLivenessRange* GetActiveRangeByVirtualRegId(IMLRegID regId) - { - for (auto& it : activeRanges) - if (it->virtualRegister == regId) - return it; - return nullptr; - } - - raLivenessRange* GetActiveRangeByPhysicalReg(sint32 physReg) - { - cemu_assert_debug(physReg >= 0); - for (auto& it : activeRanges) - if (it->physicalRegister == physReg) - return it; - return nullptr; - } - - boost::container::small_vector activeRanges; - - private: - boost::container::small_vector expiredRanges; -}; - -// mark occupied registers by any overlapping range as unavailable in physRegSet -void PPCRecRA_MaskOverlappingPhysRegForGlobalRange(raLivenessRange* range2, IMLPhysRegisterSet& physRegSet) -{ - auto clusterRanges = range2->GetAllSubrangesInCluster(); - for (auto& subrange : clusterRanges) - { - IMLSegment* imlSegment = subrange->imlSegment; - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - if (subrange == subrangeItr) - { - // next - subrangeItr = subrangeItr->link_allSegmentRanges.next; - continue; - } - if (subrange->interval.IsOverlapping(subrangeItr->interval)) - { - if (subrangeItr->GetPhysicalRegister() >= 0) - physRegSet.SetReserved(subrangeItr->GetPhysicalRegister()); - } - // next - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - } -} - -bool _livenessRangeStartCompare(raLivenessRange* lhs, raLivenessRange* rhs) -{ - return lhs->interval.start < rhs->interval.start; -} - -void _sortSegmentAllSubrangesLinkedList(IMLSegment* imlSegment) -{ - raLivenessRange* subrangeList[4096 + 1]; - sint32 count = 0; - // disassemble linked list - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - cemu_assert(count < 4096); - subrangeList[count] = subrangeItr; - count++; - // next - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - if (count == 0) - { - imlSegment->raInfo.linkedList_allSubranges = nullptr; - return; - } - // sort - std::sort(subrangeList, subrangeList + count, _livenessRangeStartCompare); - // reassemble linked list - subrangeList[count] = nullptr; - imlSegment->raInfo.linkedList_allSubranges = subrangeList[0]; - subrangeList[0]->link_allSegmentRanges.prev = nullptr; - subrangeList[0]->link_allSegmentRanges.next = subrangeList[1]; - for (sint32 i = 1; i < count; i++) - { - subrangeList[i]->link_allSegmentRanges.prev = subrangeList[i - 1]; - subrangeList[i]->link_allSegmentRanges.next = subrangeList[i + 1]; - } - // validate list -#if DEBUG_RA_EXTRA_VALIDATION - sint32 count2 = 0; - subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - raInstructionEdge currentStartPosition; - currentStartPosition.SetRaw(RA_INTER_RANGE_START); - while (subrangeItr) - { - count2++; - if (subrangeItr->interval2.start < currentStartPosition) - assert_dbg(); - currentStartPosition = subrangeItr->interval2.start; - // next - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - if (count != count2) - assert_dbg(); -#endif -} - -std::unordered_map& IMLRA_GetSubrangeMap(IMLSegment* imlSegment) -{ - return imlSegment->raInfo.linkedList_perVirtualRegister; -} - -raLivenessRange* IMLRA_GetSubrange(IMLSegment* imlSegment, IMLRegID regId) -{ - auto it = imlSegment->raInfo.linkedList_perVirtualRegister.find(regId); - if (it == imlSegment->raInfo.linkedList_perVirtualRegister.end()) - return nullptr; - return it->second; -} - -struct raFixedRegRequirementWithVGPR -{ - raFixedRegRequirementWithVGPR(raInstructionEdge pos, IMLPhysRegisterSet allowedReg, IMLRegID regId) - : pos(pos), allowedReg(allowedReg), regId(regId) {} - - raInstructionEdge pos; - IMLPhysRegisterSet allowedReg; - IMLRegID regId; -}; - -std::vector IMLRA_BuildSegmentInstructionFixedRegList(IMLSegment* imlSegment) -{ - std::vector frrList; - size_t index = 0; - while (index < imlSegment->imlList.size()) - { - IMLFixedRegisters fixedRegs; - GetInstructionFixedRegisters(&imlSegment->imlList[index], fixedRegs); - raInstructionEdge pos; - pos.Set(index, true); - for (auto& fixedRegAccess : fixedRegs.listInput) - { - frrList.emplace_back(pos, fixedRegAccess.physRegSet, fixedRegAccess.reg.IsValid() ? fixedRegAccess.reg.GetRegID() : IMLRegID_INVALID); - } - pos = pos + 1; - for (auto& fixedRegAccess : fixedRegs.listOutput) - { - frrList.emplace_back(pos, fixedRegAccess.physRegSet, fixedRegAccess.reg.IsValid() ? fixedRegAccess.reg.GetRegID() : IMLRegID_INVALID); - } - index++; - } - return frrList; -} - -boost::container::small_vector IMLRA_GetRangeWithFixedRegReservationOverlappingPos(IMLSegment* imlSegment, raInstructionEdge pos, IMLPhysReg physReg) -{ - boost::container::small_vector rangeList; - for (raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; currentRange; currentRange = currentRange->link_allSegmentRanges.next) - { - if (!currentRange->interval.ContainsEdge(pos)) - continue; - IMLPhysRegisterSet allowedRegs; - if (!currentRange->GetAllowedRegistersEx(allowedRegs)) - continue; - if (allowedRegs.IsAvailable(physReg)) - rangeList.emplace_back(currentRange); - } - return rangeList; -} - -void IMLRA_HandleFixedRegisters(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment) -{ - // first pass - iterate over all ranges with fixed register requirements and split them if they cross the segment border - // todo - this pass currently creates suboptimal results by splitting all ranges that cross the segment border if they have any fixed register requirement. This can be avoided in some cases - for (raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; currentRange;) - { - IMLPhysRegisterSet allowedRegs; - if(currentRange->list_fixedRegRequirements.empty()) - { - currentRange = currentRange->link_allSegmentRanges.next; - continue; // since we run this pass for every segment we dont need to do global checks here for clusters which may not even have fixed register requirements - } - if (!currentRange->GetAllowedRegistersEx(allowedRegs)) - { - currentRange = currentRange->link_allSegmentRanges.next; - continue; - } - if (currentRange->interval.ExtendsPreviousSegment() || currentRange->interval.ExtendsIntoNextSegment()) - { - raLivenessRange* nextRange = currentRange->link_allSegmentRanges.next; - IMLRA_ExplodeRangeCluster(ppcImlGenContext, currentRange); - currentRange = nextRange; - continue; - } - currentRange = currentRange->link_allSegmentRanges.next; - } - // second pass - look for ranges with conflicting fixed register requirements and split these too (locally) - for (raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; currentRange; currentRange = currentRange->link_allSegmentRanges.next) - { - IMLPhysRegisterSet allowedRegs; - if (currentRange->list_fixedRegRequirements.empty()) - continue; // we dont need to check whole clusters because the pass above guarantees that there are no ranges with fixed register requirements that extend outside of this segment - if (!currentRange->GetAllowedRegistersEx(allowedRegs)) - continue; - if (allowedRegs.HasAnyAvailable()) - continue; - cemu_assert_unimplemented(); - } - // third pass - assign fixed registers, split ranges if needed - std::vector frr = IMLRA_BuildSegmentInstructionFixedRegList(imlSegment); - std::unordered_map lastVGPR; - for (size_t i = 0; i < frr.size(); i++) - { - raFixedRegRequirementWithVGPR& entry = frr[i]; - // we currently only handle fixed register requirements with a single register - // with one exception: When regId is IMLRegID_INVALID then the entry acts as a list of reserved registers - cemu_assert_debug(entry.regId == IMLRegID_INVALID || entry.allowedReg.HasExactlyOneAvailable()); - for (IMLPhysReg physReg = entry.allowedReg.GetFirstAvailableReg(); physReg >= 0; physReg = entry.allowedReg.GetNextAvailableReg(physReg + 1)) - { - // check if the assigned vGPR has changed - bool vgprHasChanged = false; - auto it = lastVGPR.find(physReg); - if (it != lastVGPR.end()) - vgprHasChanged = it->second != entry.regId; - else - vgprHasChanged = true; - lastVGPR[physReg] = entry.regId; - - if (!vgprHasChanged) - continue; - - boost::container::small_vector overlappingRanges = IMLRA_GetRangeWithFixedRegReservationOverlappingPos(imlSegment, entry.pos, physReg); - if (entry.regId != IMLRegID_INVALID) - cemu_assert_debug(!overlappingRanges.empty()); // there should always be at least one range that overlaps corresponding to the fixed register requirement, except for IMLRegID_INVALID which is used to indicate reserved registers - - for (auto& range : overlappingRanges) - { - if (range->interval.start < entry.pos) - { - IMLRA_SplitRange(ppcImlGenContext, range, entry.pos, true); - } - } - } - } - // finally iterate ranges and assign fixed registers - for (raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; currentRange; currentRange = currentRange->link_allSegmentRanges.next) - { - IMLPhysRegisterSet allowedRegs; - if (currentRange->list_fixedRegRequirements.empty()) - continue; // we dont need to check whole clusters because the pass above guarantees that there are no ranges with fixed register requirements that extend outside of this segment - if (!currentRange->GetAllowedRegistersEx(allowedRegs)) - { - cemu_assert_debug(currentRange->list_fixedRegRequirements.empty()); - continue; - } - cemu_assert_debug(allowedRegs.HasExactlyOneAvailable()); - currentRange->SetPhysicalRegister(allowedRegs.GetFirstAvailableReg()); - } - // DEBUG - check for collisions and make sure all ranges with fixed register requirements got their physical register assigned -#if DEBUG_RA_EXTRA_VALIDATION - for (raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; currentRange; currentRange = currentRange->link_allSegmentRanges.next) - { - IMLPhysRegisterSet allowedRegs; - if (!currentRange->HasPhysicalRegister()) - continue; - for (raLivenessRange* currentRange2 = imlSegment->raInfo.linkedList_allSubranges; currentRange2; currentRange2 = currentRange2->link_allSegmentRanges.next) - { - if (currentRange == currentRange2) - continue; - if (currentRange->interval2.IsOverlapping(currentRange2->interval2)) - { - cemu_assert_debug(currentRange->GetPhysicalRegister() != currentRange2->GetPhysicalRegister()); - } - } - } -#endif -} - -// we should not split ranges on instructions with tied registers (i.e. where a register encoded as a single parameter is both input and output) -// otherwise the RA algorithm has to assign both ranges the same physical register (not supported yet) and the point of splitting to fit another range is nullified -void IMLRA_MakeSafeSplitPosition(IMLSegment* imlSegment, raInstructionEdge& pos) -{ - // we ignore the instruction for now and just always make it a safe split position - cemu_assert_debug(pos.IsInstructionIndex()); - if (pos.IsOnOutputEdge()) - pos = pos - 1; -} - -// convenience wrapper for IMLRA_MakeSafeSplitPosition -void IMLRA_MakeSafeSplitDistance(IMLSegment* imlSegment, raInstructionEdge startPos, sint32& distance) -{ - cemu_assert_debug(startPos.IsInstructionIndex()); - cemu_assert_debug(distance >= 0); - raInstructionEdge endPos = startPos + distance; - IMLRA_MakeSafeSplitPosition(imlSegment, endPos); - if (endPos < startPos) - { - distance = 0; - return; - } - distance = endPos.GetRaw() - startPos.GetRaw(); -} - -static void DbgVerifyAllRanges(IMLRegisterAllocatorContext& ctx); - -class RASpillStrategy -{ - public: - virtual void Apply(ppcImlGenContext_t* ctx, IMLSegment* imlSegment, raLivenessRange* currentRange) = 0; - - sint32 GetCost() - { - return strategyCost; - } - - protected: - void ResetCost() - { - strategyCost = INT_MAX; - } - - sint32 strategyCost; -}; - -class RASpillStrategy_LocalRangeHoleCutting : public RASpillStrategy -{ - public: - void Reset() - { - localRangeHoleCutting.distance = -1; - localRangeHoleCutting.largestHoleSubrange = nullptr; - ResetCost(); - } - - void Evaluate(IMLSegment* imlSegment, raLivenessRange* currentRange, const IMLRALivenessTimeline& timeline, const IMLPhysRegisterSet& allowedRegs) - { - raInstructionEdge currentRangeStart = currentRange->interval.start; - sint32 requiredSize2 = currentRange->interval.GetPreciseDistance(); - cemu_assert_debug(localRangeHoleCutting.distance == -1); - cemu_assert_debug(strategyCost == INT_MAX); - if (!currentRangeStart.ConnectsToPreviousSegment()) - { - cemu_assert_debug(currentRangeStart.GetRaw() >= 0); - for (auto candidate : timeline.activeRanges) - { - if (candidate->interval.ExtendsIntoNextSegment()) - continue; - // new checks (Oct 2024): - if (candidate == currentRange) - continue; - if (candidate->GetPhysicalRegister() < 0) - continue; - if (!allowedRegs.IsAvailable(candidate->GetPhysicalRegister())) - continue; - - sint32 distance2 = IMLRA_CountDistanceUntilNextUse(candidate, currentRangeStart); - IMLRA_MakeSafeSplitDistance(imlSegment, currentRangeStart, distance2); - if (distance2 < 2) - continue; - cemu_assert_debug(currentRangeStart.IsInstructionIndex()); - distance2 = std::min(distance2, imlSegment->imlList.size() * 2 - currentRangeStart.GetRaw()); // limit distance to end of segment - // calculate split cost of candidate - sint32 cost = IMLRA_CalculateAdditionalCostAfterSplit(candidate, currentRangeStart + distance2); - // calculate additional split cost of currentRange if hole is not large enough - if (distance2 < requiredSize2) - { - cost += IMLRA_CalculateAdditionalCostAfterSplit(currentRange, currentRangeStart + distance2); - // we also slightly increase cost in relation to the remaining length (in order to make the algorithm prefer larger holes) - cost += (requiredSize2 - distance2) / 10; - } - // compare cost with previous candidates - if (cost < strategyCost) - { - strategyCost = cost; - localRangeHoleCutting.distance = distance2; - localRangeHoleCutting.largestHoleSubrange = candidate; - } - } - } - } - - void Apply(ppcImlGenContext_t* ctx, IMLSegment* imlSegment, raLivenessRange* currentRange) override - { - cemu_assert_debug(strategyCost != INT_MAX); - sint32 requiredSize2 = currentRange->interval.GetPreciseDistance(); - raInstructionEdge currentRangeStart = currentRange->interval.start; - - raInstructionEdge holeStartPosition = currentRangeStart; - raInstructionEdge holeEndPosition = currentRangeStart + localRangeHoleCutting.distance; - raLivenessRange* collisionRange = localRangeHoleCutting.largestHoleSubrange; - - if (collisionRange->interval.start < holeStartPosition) - { - collisionRange = IMLRA_SplitRange(nullptr, collisionRange, holeStartPosition, true); - cemu_assert_debug(!collisionRange || collisionRange->interval.start >= holeStartPosition); // verify if splitting worked at all, tail must be on or after the split point - cemu_assert_debug(!collisionRange || collisionRange->interval.start >= holeEndPosition); // also verify that the trimmed hole is actually big enough - } - else - { - cemu_assert_unimplemented(); // we still need to trim? - } - // we may also have to cut the current range to fit partially into the hole - if (requiredSize2 > localRangeHoleCutting.distance) - { - raLivenessRange* tailRange = IMLRA_SplitRange(nullptr, currentRange, currentRangeStart + localRangeHoleCutting.distance, true); - if (tailRange) - { - cemu_assert_debug(tailRange->list_fixedRegRequirements.empty()); // we are not allowed to unassign fixed registers - tailRange->UnsetPhysicalRegister(); - } - } - // verify that the hole is large enough - if (collisionRange) - { - cemu_assert_debug(!collisionRange->interval.IsOverlapping(currentRange->interval)); - } - } - - private: - struct - { - sint32 distance; - raLivenessRange* largestHoleSubrange; - } localRangeHoleCutting; -}; - -class RASpillStrategy_AvailableRegisterHole : public RASpillStrategy -{ - // split current range (this is generally only a good choice when the current range is long but has few usages) - public: - void Reset() - { - ResetCost(); - availableRegisterHole.distance = -1; - availableRegisterHole.physRegister = -1; - } - - void Evaluate(IMLSegment* imlSegment, raLivenessRange* currentRange, const IMLRALivenessTimeline& timeline, const IMLPhysRegisterSet& localAvailableRegsMask, const IMLPhysRegisterSet& allowedRegs) - { - sint32 requiredSize2 = currentRange->interval.GetPreciseDistance(); - - raInstructionEdge currentRangeStart = currentRange->interval.start; - cemu_assert_debug(strategyCost == INT_MAX); - availableRegisterHole.distance = -1; - availableRegisterHole.physRegister = -1; - if (currentRangeStart.GetRaw() >= 0) - { - if (localAvailableRegsMask.HasAnyAvailable()) - { - sint32 physRegItr = -1; - while (true) - { - physRegItr = localAvailableRegsMask.GetNextAvailableReg(physRegItr + 1); - if (physRegItr < 0) - break; - if (!allowedRegs.IsAvailable(physRegItr)) - continue; - // get size of potential hole for this register - sint32 distance = PPCRecRA_countDistanceUntilNextLocalPhysRegisterUse(imlSegment, currentRangeStart, physRegItr); - - // some instructions may require the same register for another range, check the distance here - sint32 distUntilFixedReg = IMLRA_CountDistanceUntilFixedRegUsage(imlSegment, currentRangeStart, distance, currentRange->GetVirtualRegister(), physRegItr); - if (distUntilFixedReg < distance) - distance = distUntilFixedReg; - - IMLRA_MakeSafeSplitDistance(imlSegment, currentRangeStart, distance); - if (distance < 2) - continue; - // calculate additional cost due to split - cemu_assert_debug(distance < requiredSize2); // should always be true otherwise previous step would have selected this register? - sint32 cost = IMLRA_CalculateAdditionalCostAfterSplit(currentRange, currentRangeStart + distance); - // add small additional cost for the remaining range (prefer larger holes) - cost += ((requiredSize2 - distance) / 2) / 10; - if (cost < strategyCost) - { - strategyCost = cost; - availableRegisterHole.distance = distance; - availableRegisterHole.physRegister = physRegItr; - } - } - } - } - } - - void Apply(ppcImlGenContext_t* ctx, IMLSegment* imlSegment, raLivenessRange* currentRange) override - { - cemu_assert_debug(strategyCost != INT_MAX); - raInstructionEdge currentRangeStart = currentRange->interval.start; - // use available register - raLivenessRange* tailRange = IMLRA_SplitRange(nullptr, currentRange, currentRangeStart + availableRegisterHole.distance, true); - if (tailRange) - { - cemu_assert_debug(tailRange->list_fixedRegRequirements.empty()); // we are not allowed to unassign fixed registers - tailRange->UnsetPhysicalRegister(); - } - } - - private: - struct - { - sint32 physRegister; - sint32 distance; // size of hole - } availableRegisterHole; -}; - -class RASpillStrategy_ExplodeRange : public RASpillStrategy -{ - public: - void Reset() - { - ResetCost(); - explodeRange.range = nullptr; - explodeRange.distance = -1; - } - - void Evaluate(IMLSegment* imlSegment, raLivenessRange* currentRange, const IMLRALivenessTimeline& timeline, const IMLPhysRegisterSet& allowedRegs) - { - raInstructionEdge currentRangeStart = currentRange->interval.start; - if (currentRangeStart.ConnectsToPreviousSegment()) - currentRangeStart.Set(0, true); - sint32 requiredSize2 = currentRange->interval.GetPreciseDistance(); - cemu_assert_debug(strategyCost == INT_MAX); - explodeRange.range = nullptr; - explodeRange.distance = -1; - for (auto candidate : timeline.activeRanges) - { - if (!candidate->interval.ExtendsIntoNextSegment()) - continue; - // new checks (Oct 2024): - if (candidate == currentRange) - continue; - if (candidate->GetPhysicalRegister() < 0) - continue; - if (!allowedRegs.IsAvailable(candidate->GetPhysicalRegister())) - continue; - - sint32 distance = IMLRA_CountDistanceUntilNextUse(candidate, currentRangeStart); - IMLRA_MakeSafeSplitDistance(imlSegment, currentRangeStart, distance); - if (distance < 2) - continue; - sint32 cost = IMLRA_CalculateAdditionalCostOfRangeExplode(candidate); - // if the hole is not large enough, add cost of splitting current subrange - if (distance < requiredSize2) - { - cost += IMLRA_CalculateAdditionalCostAfterSplit(currentRange, currentRangeStart + distance); - // add small additional cost for the remaining range (prefer larger holes) - cost += ((requiredSize2 - distance) / 2) / 10; - } - // compare with current best candidate for this strategy - if (cost < strategyCost) - { - strategyCost = cost; - explodeRange.distance = distance; - explodeRange.range = candidate; - } - } - } - - void Apply(ppcImlGenContext_t* ctx, IMLSegment* imlSegment, raLivenessRange* currentRange) override - { - raInstructionEdge currentRangeStart = currentRange->interval.start; - if (currentRangeStart.ConnectsToPreviousSegment()) - currentRangeStart.Set(0, true); - sint32 requiredSize2 = currentRange->interval.GetPreciseDistance(); - // explode range - IMLRA_ExplodeRangeCluster(nullptr, explodeRange.range); - // split current subrange if necessary - if (requiredSize2 > explodeRange.distance) - { - raLivenessRange* tailRange = IMLRA_SplitRange(nullptr, currentRange, currentRangeStart + explodeRange.distance, true); - if (tailRange) - { - cemu_assert_debug(tailRange->list_fixedRegRequirements.empty()); // we are not allowed to unassign fixed registers - tailRange->UnsetPhysicalRegister(); - } - } - } - - private: - struct - { - raLivenessRange* range; - sint32 distance; // size of hole - // note: If we explode a range, we still have to check the size of the hole that becomes available, if too small then we need to add cost of splitting local subrange - } explodeRange; -}; - -class RASpillStrategy_ExplodeRangeInter : public RASpillStrategy -{ - public: - void Reset() - { - ResetCost(); - explodeRange.range = nullptr; - explodeRange.distance = -1; - } - - void Evaluate(IMLSegment* imlSegment, raLivenessRange* currentRange, const IMLRALivenessTimeline& timeline, const IMLPhysRegisterSet& allowedRegs) - { - // explode the range with the least cost - cemu_assert_debug(strategyCost == INT_MAX); - cemu_assert_debug(explodeRange.range == nullptr && explodeRange.distance == -1); - for (auto candidate : timeline.activeRanges) - { - if (!candidate->interval.ExtendsIntoNextSegment()) - continue; - // only select candidates that clash with current subrange - if (candidate->GetPhysicalRegister() < 0 && candidate != currentRange) - continue; - // and also filter any that dont meet fixed register requirements - if (!allowedRegs.IsAvailable(candidate->GetPhysicalRegister())) - continue; - sint32 cost; - cost = IMLRA_CalculateAdditionalCostOfRangeExplode(candidate); - // compare with current best candidate for this strategy - if (cost < strategyCost) - { - strategyCost = cost; - explodeRange.distance = INT_MAX; - explodeRange.range = candidate; - } - } - // add current range as a candidate too - sint32 ownCost; - ownCost = IMLRA_CalculateAdditionalCostOfRangeExplode(currentRange); - if (ownCost < strategyCost) - { - strategyCost = ownCost; - explodeRange.distance = INT_MAX; - explodeRange.range = currentRange; - } - } - - void Apply(ppcImlGenContext_t* ctx, IMLSegment* imlSegment, raLivenessRange* currentRange) override - { - cemu_assert_debug(strategyCost != INT_MAX); - IMLRA_ExplodeRangeCluster(ctx, explodeRange.range); - } - - private: - struct - { - raLivenessRange* range; - sint32 distance; // size of hole - // note: If we explode a range, we still have to check the size of the hole that becomes available, if too small then we need to add cost of splitting local subrange - }explodeRange; -}; - -// filter any registers from candidatePhysRegSet which cannot be used by currentRange due to fixed register requirements within the range that it occupies -void IMLRA_FilterReservedFixedRegisterRequirementsForSegment(IMLRegisterAllocatorContext& ctx, raLivenessRange* currentRange, IMLPhysRegisterSet& candidatePhysRegSet) -{ - IMLSegment* seg = currentRange->imlSegment; - if (seg->imlList.empty()) - return; // there can be no fixed register requirements if there are no instructions - - raInstructionEdge firstPos = currentRange->interval.start; - if (currentRange->interval.start.ConnectsToPreviousSegment()) - firstPos.SetRaw(0); - else if (currentRange->interval.start.ConnectsToNextSegment()) - firstPos.Set(seg->imlList.size() - 1, false); - - raInstructionEdge lastPos = currentRange->interval.end; - if (currentRange->interval.end.ConnectsToPreviousSegment()) - lastPos.SetRaw(0); - else if (currentRange->interval.end.ConnectsToNextSegment()) - lastPos.Set(seg->imlList.size() - 1, false); - cemu_assert_debug(firstPos <= lastPos); - - IMLRegID ourRegId = currentRange->GetVirtualRegister(); - - IMLFixedRegisters fixedRegs; - if (firstPos.IsOnOutputEdge()) - GetInstructionFixedRegisters(seg->imlList.data() + firstPos.GetInstructionIndex(), fixedRegs); - for (raInstructionEdge currentPos = firstPos; currentPos <= lastPos; ++currentPos) - { - if (currentPos.IsOnInputEdge()) - { - GetInstructionFixedRegisters(seg->imlList.data() + currentPos.GetInstructionIndex(), fixedRegs); - } - auto& fixedRegAccess = currentPos.IsOnInputEdge() ? fixedRegs.listInput : fixedRegs.listOutput; - for (auto& fixedRegLoc : fixedRegAccess) - { - if (fixedRegLoc.reg.IsInvalid() || fixedRegLoc.reg.GetRegID() != ourRegId) - candidatePhysRegSet.RemoveRegisters(fixedRegLoc.physRegSet); - } - } -} - -// filter out any registers along the range cluster -void IMLRA_FilterReservedFixedRegisterRequirementsForCluster(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment, raLivenessRange* currentRange, IMLPhysRegisterSet& candidatePhysRegSet) -{ - cemu_assert_debug(currentRange->imlSegment == imlSegment); - if (currentRange->interval.ExtendsPreviousSegment() || currentRange->interval.ExtendsIntoNextSegment()) - { - auto clusterRanges = currentRange->GetAllSubrangesInCluster(); - for (auto& rangeIt : clusterRanges) - { - IMLRA_FilterReservedFixedRegisterRequirementsForSegment(ctx, rangeIt, candidatePhysRegSet); - if (!candidatePhysRegSet.HasAnyAvailable()) - break; - } - return; - } - IMLRA_FilterReservedFixedRegisterRequirementsForSegment(ctx, currentRange, candidatePhysRegSet); -} - -bool IMLRA_AssignSegmentRegisters(IMLRegisterAllocatorContext& ctx, ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment) -{ - // sort subranges ascending by start index - _sortSegmentAllSubrangesLinkedList(imlSegment); - - IMLRALivenessTimeline livenessTimeline; - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - raInstructionEdge lastInstructionEdge; - lastInstructionEdge.SetRaw(RA_INTER_RANGE_END); - - struct - { - RASpillStrategy_LocalRangeHoleCutting localRangeHoleCutting; - RASpillStrategy_AvailableRegisterHole availableRegisterHole; - RASpillStrategy_ExplodeRange explodeRange; - // for ranges that connect to follow up segments: - RASpillStrategy_ExplodeRangeInter explodeRangeInter; - } strategy; - - while (subrangeItr) - { - raInstructionEdge currentRangeStart = subrangeItr->interval.start; // used to be currentIndex before refactor - PPCRecRA_debugValidateSubrange(subrangeItr); - - livenessTimeline.ExpireRanges((currentRangeStart > lastInstructionEdge) ? lastInstructionEdge : currentRangeStart); // expire up to currentIndex (inclusive), but exclude infinite ranges - - // if subrange already has register assigned then add it to the active list and continue - if (subrangeItr->GetPhysicalRegister() >= 0) - { - // verify if register is actually available -#if DEBUG_RA_EXTRA_VALIDATION - for (auto& liverangeItr : livenessTimeline.activeRanges) - { - // check for register mismatch - cemu_assert_debug(liverangeItr->GetPhysicalRegister() != subrangeItr->GetPhysicalRegister()); - } -#endif - livenessTimeline.AddActiveRange(subrangeItr); - subrangeItr = subrangeItr->link_allSegmentRanges.next; - continue; - } - // ranges with fixed register requirements should already have a phys register assigned - if (!subrangeItr->list_fixedRegRequirements.empty()) - { - cemu_assert_debug(subrangeItr->HasPhysicalRegister()); - } - // find free register for current subrangeItr and segment - IMLRegFormat regBaseFormat = ctx.GetBaseFormatByRegId(subrangeItr->GetVirtualRegister()); - IMLPhysRegisterSet candidatePhysRegSet = ctx.raParam->GetPhysRegPool(regBaseFormat); - cemu_assert_debug(candidatePhysRegSet.HasAnyAvailable()); // no valid pool provided for this register type - - IMLPhysRegisterSet allowedRegs = subrangeItr->GetAllowedRegisters(candidatePhysRegSet); - cemu_assert_debug(allowedRegs.HasAnyAvailable()); // if zero regs are available, then this range needs to be split to avoid mismatching register requirements (do this in the initial pass to keep the code here simpler) - candidatePhysRegSet &= allowedRegs; - - for (auto& liverangeItr : livenessTimeline.activeRanges) - { - cemu_assert_debug(liverangeItr->GetPhysicalRegister() >= 0); - candidatePhysRegSet.SetReserved(liverangeItr->GetPhysicalRegister()); - } - // check intersections with other ranges and determine allowed registers - IMLPhysRegisterSet localAvailableRegsMask = candidatePhysRegSet; // mask of registers that are currently not used (does not include range checks in other segments) - if (candidatePhysRegSet.HasAnyAvailable()) - { - // check for overlaps on a global scale (subrangeItr can be part of a larger range cluster across multiple segments) - PPCRecRA_MaskOverlappingPhysRegForGlobalRange(subrangeItr, candidatePhysRegSet); - } - // some target instructions may enforce specific registers (e.g. common on X86 where something like SHL , CL forces CL as the count register) - // we determine the list of allowed registers here - // this really only works if we assume single-register requirements (otherwise its better not to filter out early and instead allow register corrections later but we don't support this yet) - if (candidatePhysRegSet.HasAnyAvailable()) - { - IMLRA_FilterReservedFixedRegisterRequirementsForCluster(ctx, imlSegment, subrangeItr, candidatePhysRegSet); - } - if (candidatePhysRegSet.HasAnyAvailable()) - { - // use free register - subrangeItr->SetPhysicalRegisterForCluster(candidatePhysRegSet.GetFirstAvailableReg()); - livenessTimeline.AddActiveRange(subrangeItr); - subrangeItr = subrangeItr->link_allSegmentRanges.next; // next - continue; - } - // there is no free register for the entire range - // evaluate different strategies of splitting ranges to free up another register or shorten the current range - strategy.localRangeHoleCutting.Reset(); - strategy.availableRegisterHole.Reset(); - strategy.explodeRange.Reset(); - // cant assign register - // there might be registers available, we just can't use them due to range conflicts - RASpillStrategy* selectedStrategy = nullptr; - auto SelectStrategyIfBetter = [&selectedStrategy](RASpillStrategy& newStrategy) { - if (newStrategy.GetCost() == INT_MAX) - return; - if (selectedStrategy == nullptr || newStrategy.GetCost() < selectedStrategy->GetCost()) - selectedStrategy = &newStrategy; - }; - - if (!subrangeItr->interval.ExtendsIntoNextSegment()) - { - // range ends in current segment, use local strategies - // evaluate strategy: Cut hole into local subrange - strategy.localRangeHoleCutting.Evaluate(imlSegment, subrangeItr, livenessTimeline, allowedRegs); - SelectStrategyIfBetter(strategy.localRangeHoleCutting); - // evaluate strategy: Split current range to fit in available holes - // todo - are checks required to avoid splitting on the suffix instruction? - strategy.availableRegisterHole.Evaluate(imlSegment, subrangeItr, livenessTimeline, localAvailableRegsMask, allowedRegs); - SelectStrategyIfBetter(strategy.availableRegisterHole); - // evaluate strategy: Explode inter-segment ranges - strategy.explodeRange.Evaluate(imlSegment, subrangeItr, livenessTimeline, allowedRegs); - SelectStrategyIfBetter(strategy.explodeRange); - } - else // if subrangeItr->interval2.ExtendsIntoNextSegment() - { - strategy.explodeRangeInter.Reset(); - strategy.explodeRangeInter.Evaluate(imlSegment, subrangeItr, livenessTimeline, allowedRegs); - SelectStrategyIfBetter(strategy.explodeRangeInter); - } - // choose strategy - if (selectedStrategy) - { - selectedStrategy->Apply(ppcImlGenContext, imlSegment, subrangeItr); - } - else - { - // none of the evulated strategies can be applied, this should only happen if the segment extends into the next segment(s) for which we have no good strategy - cemu_assert_debug(subrangeItr->interval.ExtendsPreviousSegment()); - // alternative strategy if we have no other choice: explode current range - IMLRA_ExplodeRangeCluster(ppcImlGenContext, subrangeItr); - } - return false; - } - return true; -} - -void IMLRA_AssignRegisters(IMLRegisterAllocatorContext& ctx, ppcImlGenContext_t* ppcImlGenContext) -{ - // start with frequently executed segments first - sint32 maxLoopDepth = 0; - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - { - maxLoopDepth = std::max(maxLoopDepth, segIt->loopDepth); - } - // assign fixed registers first - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - IMLRA_HandleFixedRegisters(ppcImlGenContext, segIt); -#if DEBUG_RA_EXTRA_VALIDATION - // fixed registers are currently handled per-segment, but here we validate that they are assigned correctly on a global scope as well - for (IMLSegment* imlSegment : ppcImlGenContext->segmentList2) - { - for (raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; currentRange; currentRange = currentRange->link_allSegmentRanges.next) - { - IMLPhysRegisterSet allowedRegs; - if (!currentRange->GetAllowedRegistersEx(allowedRegs)) - { - cemu_assert_debug(currentRange->list_fixedRegRequirements.empty()); - continue; - } - cemu_assert_debug(currentRange->HasPhysicalRegister() && allowedRegs.IsAvailable(currentRange->GetPhysicalRegister())); - } - } -#endif - - while (true) - { - bool done = false; - for (sint32 d = maxLoopDepth; d >= 0; d--) - { - for (IMLSegment* segIt : ppcImlGenContext->segmentList2) - { - if (segIt->loopDepth != d) - continue; - done = IMLRA_AssignSegmentRegisters(ctx, ppcImlGenContext, segIt); - if (done == false) - break; - } - if (done == false) - break; - } - if (done) - break; - } -} - -void IMLRA_ReshapeForRegisterAllocation(ppcImlGenContext_t* ppcImlGenContext) -{ - // insert empty segments after every non-taken branch if the linked segment has more than one input - // this gives the register allocator more room to create efficient spill code - size_t segmentIndex = 0; - while (segmentIndex < ppcImlGenContext->segmentList2.size()) - { - IMLSegment* imlSegment = ppcImlGenContext->segmentList2[segmentIndex]; - if (imlSegment->nextSegmentIsUncertain) - { - segmentIndex++; - continue; - } - if (imlSegment->nextSegmentBranchTaken == nullptr || imlSegment->nextSegmentBranchNotTaken == nullptr) - { - segmentIndex++; - continue; - } - if (imlSegment->nextSegmentBranchNotTaken->list_prevSegments.size() <= 1) - { - segmentIndex++; - continue; - } - if (imlSegment->nextSegmentBranchNotTaken->isEnterable) - { - segmentIndex++; - continue; - } - PPCRecompilerIml_insertSegments(ppcImlGenContext, segmentIndex + 1, 1); - IMLSegment* imlSegmentP0 = ppcImlGenContext->segmentList2[segmentIndex + 0]; - IMLSegment* imlSegmentP1 = ppcImlGenContext->segmentList2[segmentIndex + 1]; - IMLSegment* nextSegment = imlSegment->nextSegmentBranchNotTaken; - IMLSegment_RemoveLink(imlSegmentP0, nextSegment); - IMLSegment_SetLinkBranchNotTaken(imlSegmentP1, nextSegment); - IMLSegment_SetLinkBranchNotTaken(imlSegmentP0, imlSegmentP1); - segmentIndex++; - } - // detect loops - for (size_t s = 0; s < ppcImlGenContext->segmentList2.size(); s++) - { - IMLSegment* imlSegment = ppcImlGenContext->segmentList2[s]; - imlSegment->momentaryIndex = s; - } - for (size_t s = 0; s < ppcImlGenContext->segmentList2.size(); s++) - { - IMLSegment* imlSegment = ppcImlGenContext->segmentList2[s]; - IMLRA_IdentifyLoop(ppcImlGenContext, imlSegment); - } -} - -IMLRARegAbstractLiveness* _GetAbstractRange(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment, IMLRegID regId) -{ - auto& segMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - auto it = segMap.find(regId); - return it != segMap.end() ? &it->second : nullptr; -} - -// scan instructions and establish register usage range for segment -void IMLRA_CalculateSegmentMinMaxAbstractRanges(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment) -{ - size_t instructionIndex = 0; - IMLUsedRegisters gprTracking; - auto& segDistMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - while (instructionIndex < imlSegment->imlList.size()) - { - imlSegment->imlList[instructionIndex].CheckRegisterUsage(&gprTracking); - gprTracking.ForEachAccessedGPR([&](IMLReg gprReg, bool isWritten) { - IMLRegID gprId = gprReg.GetRegID(); - auto it = segDistMap.find(gprId); - if (it == segDistMap.end()) - { - segDistMap.try_emplace(gprId, gprReg.GetBaseFormat(), (sint32)instructionIndex, (sint32)instructionIndex + 1); - ctx.regIdToBaseFormat.try_emplace(gprId, gprReg.GetBaseFormat()); - } - else - { - it->second.TrackInstruction(instructionIndex); -#ifdef CEMU_DEBUG_ASSERT - cemu_assert_debug(ctx.regIdToBaseFormat[gprId] == gprReg.GetBaseFormat()); // the base type per register always has to be the same -#endif - } - }); - instructionIndex++; - } -} - -void IMLRA_CalculateLivenessRanges(IMLRegisterAllocatorContext& ctx) -{ - // for each register calculate min/max index of usage range within each segment - size_t dbgIndex = 0; - for (IMLSegment* segIt : ctx.deprGenContext->segmentList2) - { - cemu_assert_debug(segIt->momentaryIndex == dbgIndex); - IMLRA_CalculateSegmentMinMaxAbstractRanges(ctx, segIt); - dbgIndex++; - } -} - -raLivenessRange* PPCRecRA_convertToMappedRanges(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment, IMLRegID vGPR, IMLName name) -{ - IMLRARegAbstractLiveness* abstractRange = _GetAbstractRange(ctx, imlSegment, vGPR); - if (!abstractRange) - return nullptr; - if (abstractRange->isProcessed) - { - // return already existing segment - raLivenessRange* existingRange = IMLRA_GetSubrange(imlSegment, vGPR); - cemu_assert_debug(existingRange); - return existingRange; - } - abstractRange->isProcessed = true; - // create subrange - cemu_assert_debug(IMLRA_GetSubrange(imlSegment, vGPR) == nullptr); - cemu_assert_debug( - (abstractRange->usageStart == abstractRange->usageEnd && (abstractRange->usageStart == RA_INTER_RANGE_START || abstractRange->usageStart == RA_INTER_RANGE_END)) || - abstractRange->usageStart < abstractRange->usageEnd); // usageEnd is exclusive so it should always be larger - sint32 inclusiveEnd = abstractRange->usageEnd; - if (inclusiveEnd != RA_INTER_RANGE_START && inclusiveEnd != RA_INTER_RANGE_END) - inclusiveEnd--; // subtract one, because usageEnd is exclusive, but the end value of the interval passed to createSubrange is inclusive - raInterval interval; - interval.SetInterval(abstractRange->usageStart, true, inclusiveEnd, true); - raLivenessRange* subrange = IMLRA_CreateRange(ctx.deprGenContext, imlSegment, vGPR, name, interval.start, interval.end); - // traverse forward - if (abstractRange->usageEnd == RA_INTER_RANGE_END) - { - if (imlSegment->nextSegmentBranchTaken) - { - IMLRARegAbstractLiveness* branchTakenRange = _GetAbstractRange(ctx, imlSegment->nextSegmentBranchTaken, vGPR); - if (branchTakenRange && branchTakenRange->usageStart == RA_INTER_RANGE_START) - { - subrange->subrangeBranchTaken = PPCRecRA_convertToMappedRanges(ctx, imlSegment->nextSegmentBranchTaken, vGPR, name); - subrange->subrangeBranchTaken->previousRanges.push_back(subrange); - cemu_assert_debug(subrange->subrangeBranchTaken->interval.ExtendsPreviousSegment()); - } - } - if (imlSegment->nextSegmentBranchNotTaken) - { - IMLRARegAbstractLiveness* branchNotTakenRange = _GetAbstractRange(ctx, imlSegment->nextSegmentBranchNotTaken, vGPR); - if (branchNotTakenRange && branchNotTakenRange->usageStart == RA_INTER_RANGE_START) - { - subrange->subrangeBranchNotTaken = PPCRecRA_convertToMappedRanges(ctx, imlSegment->nextSegmentBranchNotTaken, vGPR, name); - subrange->subrangeBranchNotTaken->previousRanges.push_back(subrange); - cemu_assert_debug(subrange->subrangeBranchNotTaken->interval.ExtendsPreviousSegment()); - } - } - } - // traverse backward - if (abstractRange->usageStart == RA_INTER_RANGE_START) - { - for (auto& it : imlSegment->list_prevSegments) - { - IMLRARegAbstractLiveness* prevRange = _GetAbstractRange(ctx, it, vGPR); - if (!prevRange) - continue; - if (prevRange->usageEnd == RA_INTER_RANGE_END) - PPCRecRA_convertToMappedRanges(ctx, it, vGPR, name); - } - } - return subrange; -} - -void IMLRA_UpdateOrAddSubrangeLocation(raLivenessRange* subrange, raInstructionEdge pos) -{ - if (subrange->list_accessLocations.empty()) - { - subrange->list_accessLocations.emplace_back(pos); - return; - } - if(subrange->list_accessLocations.back().pos == pos) - return; - cemu_assert_debug(subrange->list_accessLocations.back().pos < pos); - subrange->list_accessLocations.emplace_back(pos); -} - -// take abstract range data and create LivenessRanges -void IMLRA_ConvertAbstractToLivenessRanges(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment) -{ - const std::unordered_map& regToSubrange = IMLRA_GetSubrangeMap(imlSegment); - - auto AddOrUpdateFixedRegRequirement = [&](IMLRegID regId, sint32 instructionIndex, bool isInput, const IMLPhysRegisterSet& physRegSet) { - raLivenessRange* subrange = regToSubrange.find(regId)->second; - cemu_assert_debug(subrange); - raFixedRegRequirement tmp; - tmp.pos.Set(instructionIndex, isInput); - tmp.allowedReg = physRegSet; - if (subrange->list_fixedRegRequirements.empty() || subrange->list_fixedRegRequirements.back().pos != tmp.pos) - subrange->list_fixedRegRequirements.push_back(tmp); - }; - - // convert abstract min-max ranges to liveness range objects - auto& segMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - for (auto& it : segMap) - { - if (it.second.isProcessed) - continue; - IMLRegID regId = it.first; - PPCRecRA_convertToMappedRanges(ctx, imlSegment, regId, ctx.raParam->regIdToName.find(regId)->second); - } - // fill created ranges with read/write location indices - // note that at this point there is only one range per register per segment - // and the algorithm below relies on this - size_t index = 0; - IMLUsedRegisters gprTracking; - while (index < imlSegment->imlList.size()) - { - imlSegment->imlList[index].CheckRegisterUsage(&gprTracking); - raInstructionEdge pos((sint32)index, true); - gprTracking.ForEachReadGPR([&](IMLReg gprReg) { - IMLRegID gprId = gprReg.GetRegID(); - raLivenessRange* subrange = regToSubrange.find(gprId)->second; - IMLRA_UpdateOrAddSubrangeLocation(subrange, pos); - }); - pos = {(sint32)index, false}; - gprTracking.ForEachWrittenGPR([&](IMLReg gprReg) { - IMLRegID gprId = gprReg.GetRegID(); - raLivenessRange* subrange = regToSubrange.find(gprId)->second; - IMLRA_UpdateOrAddSubrangeLocation(subrange, pos); - }); - // check fixed register requirements - IMLFixedRegisters fixedRegs; - GetInstructionFixedRegisters(&imlSegment->imlList[index], fixedRegs); - for (auto& fixedRegAccess : fixedRegs.listInput) - { - if (fixedRegAccess.reg != IMLREG_INVALID) - AddOrUpdateFixedRegRequirement(fixedRegAccess.reg.GetRegID(), index, true, fixedRegAccess.physRegSet); - } - for (auto& fixedRegAccess : fixedRegs.listOutput) - { - if (fixedRegAccess.reg != IMLREG_INVALID) - AddOrUpdateFixedRegRequirement(fixedRegAccess.reg.GetRegID(), index, false, fixedRegAccess.physRegSet); - } - index++; - } -} - -void IMLRA_extendAbstractRangeToEndOfSegment(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment, IMLRegID regId) -{ - auto& segDistMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - auto it = segDistMap.find(regId); - if (it == segDistMap.end()) - { - sint32 startIndex; - if (imlSegment->HasSuffixInstruction()) - startIndex = imlSegment->GetSuffixInstructionIndex(); - else - startIndex = RA_INTER_RANGE_END; - segDistMap.try_emplace((IMLRegID)regId, IMLRegFormat::INVALID_FORMAT, startIndex, RA_INTER_RANGE_END); - } - else - { - it->second.usageEnd = RA_INTER_RANGE_END; - } -} - -void IMLRA_extendAbstractRangeToBeginningOfSegment(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment, IMLRegID regId) -{ - auto& segDistMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - auto it = segDistMap.find(regId); - if (it == segDistMap.end()) - { - segDistMap.try_emplace((IMLRegID)regId, IMLRegFormat::INVALID_FORMAT, RA_INTER_RANGE_START, RA_INTER_RANGE_START); - } - else - { - it->second.usageStart = RA_INTER_RANGE_START; - } - // propagate backwards - for (auto& it : imlSegment->list_prevSegments) - { - IMLRA_extendAbstractRangeToEndOfSegment(ctx, it, regId); - } -} - -void IMLRA_connectAbstractRanges(IMLRegisterAllocatorContext& ctx, IMLRegID regId, IMLSegment** route, sint32 routeDepth) -{ -#ifdef CEMU_DEBUG_ASSERT - if (routeDepth < 2) - assert_dbg(); -#endif - // extend starting range to end of segment - IMLRA_extendAbstractRangeToEndOfSegment(ctx, route[0], regId); - // extend all the connecting segments in both directions - for (sint32 i = 1; i < (routeDepth - 1); i++) - { - IMLRA_extendAbstractRangeToEndOfSegment(ctx, route[i], regId); - IMLRA_extendAbstractRangeToBeginningOfSegment(ctx, route[i], regId); - } - // extend the final segment towards the beginning - IMLRA_extendAbstractRangeToBeginningOfSegment(ctx, route[routeDepth - 1], regId); -} - -void _IMLRA_checkAndTryExtendRange(IMLRegisterAllocatorContext& ctx, IMLSegment* currentSegment, IMLRegID regID, sint32 distanceLeft, IMLSegment** route, sint32 routeDepth) -{ - if (routeDepth >= 64) - { - cemuLog_logDebug(LogType::Force, "Recompiler RA route maximum depth exceeded\n"); - return; - } - route[routeDepth] = currentSegment; - - IMLRARegAbstractLiveness* range = _GetAbstractRange(ctx, currentSegment, regID); - - if (!range) - { - // measure distance over entire segment - distanceLeft -= (sint32)currentSegment->imlList.size(); - if (distanceLeft > 0) - { - if (currentSegment->nextSegmentBranchNotTaken) - _IMLRA_checkAndTryExtendRange(ctx, currentSegment->nextSegmentBranchNotTaken, regID, distanceLeft, route, routeDepth + 1); - if (currentSegment->nextSegmentBranchTaken) - _IMLRA_checkAndTryExtendRange(ctx, currentSegment->nextSegmentBranchTaken, regID, distanceLeft, route, routeDepth + 1); - } - return; - } - else - { - // measure distance to range - if (range->usageStart == RA_INTER_RANGE_END) - { - if (distanceLeft < (sint32)currentSegment->imlList.size()) - return; // range too far away - } - else if (range->usageStart != RA_INTER_RANGE_START && range->usageStart > distanceLeft) - return; // out of range - // found close range -> connect ranges - IMLRA_connectAbstractRanges(ctx, regID, route, routeDepth + 1); - } -} - -void PPCRecRA_checkAndTryExtendRange(IMLRegisterAllocatorContext& ctx, IMLSegment* currentSegment, IMLRARegAbstractLiveness* range, IMLRegID regID) -{ - cemu_assert_debug(range->usageEnd >= 0); - // count instructions to end of initial segment - sint32 instructionsUntilEndOfSeg; - if (range->usageEnd == RA_INTER_RANGE_END) - instructionsUntilEndOfSeg = 0; - else - instructionsUntilEndOfSeg = (sint32)currentSegment->imlList.size() - range->usageEnd; - cemu_assert_debug(instructionsUntilEndOfSeg >= 0); - sint32 remainingScanDist = 45 - instructionsUntilEndOfSeg; - if (remainingScanDist <= 0) - return; // can't reach end - - IMLSegment* route[64]; - route[0] = currentSegment; - if (currentSegment->nextSegmentBranchNotTaken) - _IMLRA_checkAndTryExtendRange(ctx, currentSegment->nextSegmentBranchNotTaken, regID, remainingScanDist, route, 1); - if (currentSegment->nextSegmentBranchTaken) - _IMLRA_checkAndTryExtendRange(ctx, currentSegment->nextSegmentBranchTaken, regID, remainingScanDist, route, 1); -} - -void PPCRecRA_mergeCloseRangesForSegmentV2(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment) -{ - auto& segMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - for (auto& it : segMap) - { - PPCRecRA_checkAndTryExtendRange(ctx, imlSegment, &(it.second), it.first); - } -#ifdef CEMU_DEBUG_ASSERT - if (imlSegment->list_prevSegments.empty() == false && imlSegment->isEnterable) - assert_dbg(); - if ((imlSegment->nextSegmentBranchNotTaken != nullptr || imlSegment->nextSegmentBranchTaken != nullptr) && imlSegment->nextSegmentIsUncertain) - assert_dbg(); -#endif -} - -void PPCRecRA_followFlowAndExtendRanges(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment) -{ - std::vector list_segments; - std::vector list_processedSegment; - size_t segmentCount = ctx.deprGenContext->segmentList2.size(); - list_segments.reserve(segmentCount + 1); - list_processedSegment.resize(segmentCount); - - auto markSegProcessed = [&list_processedSegment](IMLSegment* seg) { - list_processedSegment[seg->momentaryIndex] = true; - }; - auto isSegProcessed = [&list_processedSegment](IMLSegment* seg) -> bool { - return list_processedSegment[seg->momentaryIndex]; - }; - markSegProcessed(imlSegment); - - sint32 index = 0; - list_segments.push_back(imlSegment); - while (index < list_segments.size()) - { - IMLSegment* currentSegment = list_segments[index]; - PPCRecRA_mergeCloseRangesForSegmentV2(ctx, currentSegment); - // follow flow - if (currentSegment->nextSegmentBranchNotTaken && !isSegProcessed(currentSegment->nextSegmentBranchNotTaken)) - { - markSegProcessed(currentSegment->nextSegmentBranchNotTaken); - list_segments.push_back(currentSegment->nextSegmentBranchNotTaken); - } - if (currentSegment->nextSegmentBranchTaken && !isSegProcessed(currentSegment->nextSegmentBranchTaken)) - { - markSegProcessed(currentSegment->nextSegmentBranchTaken); - list_segments.push_back(currentSegment->nextSegmentBranchTaken); - } - index++; - } -} - -void IMLRA_MergeCloseAbstractRanges(IMLRegisterAllocatorContext& ctx) -{ - for (size_t s = 0; s < ctx.deprGenContext->segmentList2.size(); s++) - { - IMLSegment* imlSegment = ctx.deprGenContext->segmentList2[s]; - if (!imlSegment->list_prevSegments.empty()) - continue; // not an entry/standalone segment - PPCRecRA_followFlowAndExtendRanges(ctx, imlSegment); - } -} - -void IMLRA_ExtendAbstractRangesOutOfLoops(IMLRegisterAllocatorContext& ctx) -{ - for (size_t s = 0; s < ctx.deprGenContext->segmentList2.size(); s++) - { - IMLSegment* imlSegment = ctx.deprGenContext->segmentList2[s]; - auto localLoopDepth = imlSegment->loopDepth; - if (localLoopDepth <= 0) - continue; // not inside a loop - // look for loop exit - bool hasLoopExit = false; - if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->loopDepth < localLoopDepth) - { - hasLoopExit = true; - } - if (imlSegment->nextSegmentBranchNotTaken && imlSegment->nextSegmentBranchNotTaken->loopDepth < localLoopDepth) - { - hasLoopExit = true; - } - if (hasLoopExit == false) - continue; - - // extend looping ranges into all exits (this allows the data flow analyzer to move stores out of the loop) - auto& segMap = ctx.GetSegmentAbstractRangeMap(imlSegment); - for (auto& it : segMap) - { - if (it.second.usageEnd != RA_INTER_RANGE_END) - continue; - if (imlSegment->nextSegmentBranchTaken) - IMLRA_extendAbstractRangeToBeginningOfSegment(ctx, imlSegment->nextSegmentBranchTaken, it.first); - if (imlSegment->nextSegmentBranchNotTaken) - IMLRA_extendAbstractRangeToBeginningOfSegment(ctx, imlSegment->nextSegmentBranchNotTaken, it.first); - } - } -} - -void IMLRA_ProcessFlowAndCalculateLivenessRanges(IMLRegisterAllocatorContext& ctx) -{ - IMLRA_MergeCloseAbstractRanges(ctx); - // extra pass to move register loads and stores out of loops - IMLRA_ExtendAbstractRangesOutOfLoops(ctx); - // calculate liveness ranges - for (auto& segIt : ctx.deprGenContext->segmentList2) - IMLRA_ConvertAbstractToLivenessRanges(ctx, segIt); -} - -void IMLRA_AnalyzeSubrangeDataDependency(raLivenessRange* subrange) -{ - bool isRead = false; - bool isWritten = false; - bool isOverwritten = false; - for (auto& location : subrange->list_accessLocations) - { - if (location.IsRead()) - { - isRead = true; - } - if (location.IsWrite()) - { - if (isRead == false) - isOverwritten = true; - isWritten = true; - } - } - subrange->_noLoad = isOverwritten; - subrange->hasStore = isWritten; - - if (subrange->interval.ExtendsPreviousSegment()) - subrange->_noLoad = true; -} - -struct subrangeEndingInfo_t -{ - raLivenessRange* subrangeList[SUBRANGE_LIST_SIZE]; - sint32 subrangeCount; - - bool hasUndefinedEndings; -}; - -void _findSubrangeWriteEndings(raLivenessRange* subrange, uint32 iterationIndex, sint32 depth, subrangeEndingInfo_t* info) -{ - if (depth >= 30) - { - info->hasUndefinedEndings = true; - return; - } - if (subrange->lastIterationIndex == iterationIndex) - return; // already processed - subrange->lastIterationIndex = iterationIndex; - if (subrange->hasStoreDelayed) - return; // no need to traverse this subrange - IMLSegment* imlSegment = subrange->imlSegment; - if (!subrange->interval.ExtendsIntoNextSegment()) - { - // ending segment - if (info->subrangeCount >= SUBRANGE_LIST_SIZE) - { - info->hasUndefinedEndings = true; - return; - } - else - { - info->subrangeList[info->subrangeCount] = subrange; - info->subrangeCount++; - } - return; - } - - // traverse next subranges in flow - if (imlSegment->nextSegmentBranchNotTaken) - { - if (subrange->subrangeBranchNotTaken == nullptr) - { - info->hasUndefinedEndings = true; - } - else - { - _findSubrangeWriteEndings(subrange->subrangeBranchNotTaken, iterationIndex, depth + 1, info); - } - } - if (imlSegment->nextSegmentBranchTaken) - { - if (subrange->subrangeBranchTaken == nullptr) - { - info->hasUndefinedEndings = true; - } - else - { - _findSubrangeWriteEndings(subrange->subrangeBranchTaken, iterationIndex, depth + 1, info); - } - } -} - -static void IMLRA_AnalyzeRangeDataFlow(raLivenessRange* subrange) -{ - if (!subrange->interval.ExtendsIntoNextSegment()) - return; - // analyze data flow across segments (if this segment has writes) - if (subrange->hasStore) - { - subrangeEndingInfo_t writeEndingInfo; - writeEndingInfo.subrangeCount = 0; - writeEndingInfo.hasUndefinedEndings = false; - _findSubrangeWriteEndings(subrange, IMLRA_GetNextIterationIndex(), 0, &writeEndingInfo); - if (writeEndingInfo.hasUndefinedEndings == false) - { - // get cost of delaying store into endings - sint32 delayStoreCost = 0; - bool alreadyStoredInAllEndings = true; - for (sint32 i = 0; i < writeEndingInfo.subrangeCount; i++) - { - raLivenessRange* subrangeItr = writeEndingInfo.subrangeList[i]; - if (subrangeItr->hasStore) - continue; // this ending already stores, no extra cost - alreadyStoredInAllEndings = false; - sint32 storeCost = IMLRA_GetSegmentReadWriteCost(subrangeItr->imlSegment); - delayStoreCost = std::max(storeCost, delayStoreCost); - } - if (alreadyStoredInAllEndings) - { - subrange->hasStore = false; - subrange->hasStoreDelayed = true; - } - else if (delayStoreCost <= IMLRA_GetSegmentReadWriteCost(subrange->imlSegment)) - { - subrange->hasStore = false; - subrange->hasStoreDelayed = true; - for (sint32 i = 0; i < writeEndingInfo.subrangeCount; i++) - { - raLivenessRange* subrangeItr = writeEndingInfo.subrangeList[i]; - subrangeItr->hasStore = true; - } - } - } - } -} - -void IMLRA_AnalyzeRangeDataFlow(ppcImlGenContext_t* ppcImlGenContext) -{ - // this function is called after _AssignRegisters(), which means that all liveness ranges are already final and must not be modified anymore - // track read/write dependencies per segment - for (auto& seg : ppcImlGenContext->segmentList2) - { - raLivenessRange* subrange = seg->raInfo.linkedList_allSubranges; - while (subrange) - { - IMLRA_AnalyzeSubrangeDataDependency(subrange); - subrange = subrange->link_allSegmentRanges.next; - } - } - // propagate information across segment boundaries - for (auto& seg : ppcImlGenContext->segmentList2) - { - raLivenessRange* subrange = seg->raInfo.linkedList_allSubranges; - while (subrange) - { - IMLRA_AnalyzeRangeDataFlow(subrange); - subrange = subrange->link_allSegmentRanges.next; - } - } -} - -/* Generate move instructions */ - -inline IMLReg _MakeNativeReg(IMLRegFormat baseFormat, IMLRegID regId) -{ - return IMLReg(baseFormat, baseFormat, 0, regId); -} - -// prepass for IMLRA_GenerateSegmentMoveInstructions which updates all virtual registers to their physical counterparts -void IMLRA_RewriteRegisters(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment) -{ - std::unordered_map virtId2PhysReg; - boost::container::small_vector activeRanges; - raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; - raInstructionEdge currentEdge; - for (size_t i = 0; i < imlSegment->imlList.size(); i++) - { - currentEdge.Set(i, false); // set to instruction index on output edge - // activate ranges which begin before or during this instruction - while (currentRange && currentRange->interval.start <= currentEdge) - { - cemu_assert_debug(virtId2PhysReg.find(currentRange->GetVirtualRegister()) == virtId2PhysReg.end() || virtId2PhysReg[currentRange->GetVirtualRegister()] == currentRange->GetPhysicalRegister()); // check for register conflict - - virtId2PhysReg[currentRange->GetVirtualRegister()] = currentRange->GetPhysicalRegister(); - activeRanges.push_back(currentRange); - currentRange = currentRange->link_allSegmentRanges.next; - } - // rewrite registers - imlSegment->imlList[i].RewriteGPR(virtId2PhysReg); - // deactivate ranges which end during this instruction - auto it = activeRanges.begin(); - while (it != activeRanges.end()) - { - if ((*it)->interval.end <= currentEdge) - { - virtId2PhysReg.erase((*it)->GetVirtualRegister()); - it = activeRanges.erase(it); - } - else - ++it; - } - } -} - -void IMLRA_GenerateSegmentMoveInstructions2(IMLRegisterAllocatorContext& ctx, IMLSegment* imlSegment) -{ - IMLRA_RewriteRegisters(ctx, imlSegment); - -#if DEBUG_RA_INSTRUCTION_GEN - cemuLog_log(LogType::Force, ""); - cemuLog_log(LogType::Force, "[Seg before RA]"); - IMLDebug_DumpSegment(nullptr, imlSegment, true); -#endif - - bool hadSuffixInstruction = imlSegment->HasSuffixInstruction(); - - std::vector rebuiltInstructions; - sint32 numInstructionsWithoutSuffix = (sint32)imlSegment->imlList.size() - (imlSegment->HasSuffixInstruction() ? 1 : 0); - - if (imlSegment->imlList.empty()) - { - // empty segments need special handling (todo - look into merging this with the core logic below eventually) - // store all ranges - raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; - while (currentRange) - { - if (currentRange->hasStore) - rebuiltInstructions.emplace_back().make_name_r(currentRange->GetName(), _MakeNativeReg(ctx.regIdToBaseFormat[currentRange->GetVirtualRegister()], currentRange->GetPhysicalRegister())); - currentRange = currentRange->link_allSegmentRanges.next; - } - // load ranges - currentRange = imlSegment->raInfo.linkedList_allSubranges; - while (currentRange) - { - if (!currentRange->_noLoad) - { - cemu_assert_debug(currentRange->interval.ExtendsIntoNextSegment()); - rebuiltInstructions.emplace_back().make_r_name(_MakeNativeReg(ctx.regIdToBaseFormat[currentRange->GetVirtualRegister()], currentRange->GetPhysicalRegister()), currentRange->GetName()); - } - currentRange = currentRange->link_allSegmentRanges.next; - } - imlSegment->imlList = std::move(rebuiltInstructions); - return; - } - - // make sure that no range exceeds the suffix instruction input edge except if they need to be loaded for the next segment (todo - for those, set the start point accordingly?) - { - raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; - raInstructionEdge edge; - if (imlSegment->HasSuffixInstruction()) - edge.Set(numInstructionsWithoutSuffix, true); - else - edge.Set(numInstructionsWithoutSuffix - 1, false); - - while (currentRange) - { - if (!currentRange->interval.IsNextSegmentOnly() && currentRange->interval.end > edge) - { - currentRange->interval.SetEnd(edge); - } - currentRange = currentRange->link_allSegmentRanges.next; - } - } - -#if DEBUG_RA_INSTRUCTION_GEN - cemuLog_log(LogType::Force, ""); - cemuLog_log(LogType::Force, "--- Intermediate liveness info ---"); - { - raLivenessRange* dbgRange = imlSegment->raInfo.linkedList_allSubranges; - while (dbgRange) - { - cemuLog_log(LogType::Force, "Range i{}: {}-{}", dbgRange->GetVirtualRegister(), dbgRange->interval2.start.GetDebugString(), dbgRange->interval2.end.GetDebugString()); - dbgRange = dbgRange->link_allSegmentRanges.next; - } - } -#endif - - boost::container::small_vector activeRanges; - // first we add all the ranges that extend from the previous segment, some of these will end immediately at the first instruction so we might need to store them early - raLivenessRange* currentRange = imlSegment->raInfo.linkedList_allSubranges; - // make all ranges active that start on RA_INTER_RANGE_START - while (currentRange && currentRange->interval.start.ConnectsToPreviousSegment()) - { - activeRanges.push_back(currentRange); - currentRange = currentRange->link_allSegmentRanges.next; - } - // store all ranges that end before the first output edge (includes RA_INTER_RANGE_START) - auto it = activeRanges.begin(); - raInstructionEdge firstOutputEdge; - firstOutputEdge.Set(0, false); - while (it != activeRanges.end()) - { - if ((*it)->interval.end < firstOutputEdge) - { - raLivenessRange* storedRange = *it; - if (storedRange->hasStore) - rebuiltInstructions.emplace_back().make_name_r(storedRange->GetName(), _MakeNativeReg(ctx.regIdToBaseFormat[storedRange->GetVirtualRegister()], storedRange->GetPhysicalRegister())); - it = activeRanges.erase(it); - continue; - } - ++it; - } - - sint32 numInstructions = (sint32)imlSegment->imlList.size(); - for (sint32 i = 0; i < numInstructions; i++) - { - raInstructionEdge curEdge; - // input edge - curEdge.SetRaw(i * 2 + 1); // +1 to include ranges that start at the output of the instruction - while (currentRange && currentRange->interval.start <= curEdge) - { - if (!currentRange->_noLoad) - { - rebuiltInstructions.emplace_back().make_r_name(_MakeNativeReg(ctx.regIdToBaseFormat[currentRange->GetVirtualRegister()], currentRange->GetPhysicalRegister()), currentRange->GetName()); - } - activeRanges.push_back(currentRange); - currentRange = currentRange->link_allSegmentRanges.next; - } - // copy instruction - rebuiltInstructions.push_back(imlSegment->imlList[i]); - // output edge - curEdge.SetRaw(i * 2 + 1 + 1); - // also store ranges that end on the next input edge, we handle this by adding an extra 1 above - auto it = activeRanges.begin(); - while (it != activeRanges.end()) - { - if ((*it)->interval.end <= curEdge) - { - // range expires - // todo - check hasStore - raLivenessRange* storedRange = *it; - if (storedRange->hasStore) - { - cemu_assert_debug(i != numInstructionsWithoutSuffix); // not allowed to emit after suffix - rebuiltInstructions.emplace_back().make_name_r(storedRange->GetName(), _MakeNativeReg(ctx.regIdToBaseFormat[storedRange->GetVirtualRegister()], storedRange->GetPhysicalRegister())); - } - it = activeRanges.erase(it); - continue; - } - ++it; - } - } - // if there is no suffix instruction we currently need to handle the final loads here - cemu_assert_debug(hadSuffixInstruction == imlSegment->HasSuffixInstruction()); - if (imlSegment->HasSuffixInstruction()) - { - if (currentRange) - { - cemuLog_logDebug(LogType::Force, "[DEBUG] GenerateSegmentMoveInstructions() hit suffix path with non-null currentRange. Segment: {:08x}", imlSegment->ppcAddress); - } - for (auto& remainingRange : activeRanges) - { - cemu_assert_debug(!remainingRange->hasStore); - } - } - else - { - for (auto& remainingRange : activeRanges) - { - cemu_assert_debug(!remainingRange->hasStore); // this range still needs to be stored - } - while (currentRange) - { - cemu_assert_debug(currentRange->interval.IsNextSegmentOnly()); - cemu_assert_debug(!currentRange->_noLoad); - rebuiltInstructions.emplace_back().make_r_name(_MakeNativeReg(ctx.regIdToBaseFormat[currentRange->GetVirtualRegister()], currentRange->GetPhysicalRegister()), currentRange->GetName()); - currentRange = currentRange->link_allSegmentRanges.next; - } - } - - imlSegment->imlList = std::move(rebuiltInstructions); - cemu_assert_debug(hadSuffixInstruction == imlSegment->HasSuffixInstruction()); - -#if DEBUG_RA_INSTRUCTION_GEN - cemuLog_log(LogType::Force, ""); - cemuLog_log(LogType::Force, "[Seg after RA]"); - IMLDebug_DumpSegment(nullptr, imlSegment, false); -#endif -} - -void IMLRA_GenerateMoveInstructions(IMLRegisterAllocatorContext& ctx) -{ - for (size_t s = 0; s < ctx.deprGenContext->segmentList2.size(); s++) - { - IMLSegment* imlSegment = ctx.deprGenContext->segmentList2[s]; - IMLRA_GenerateSegmentMoveInstructions2(ctx, imlSegment); - } -} - -static void DbgVerifyFixedRegRequirements(IMLSegment* imlSegment) -{ -#if DEBUG_RA_EXTRA_VALIDATION - std::vector frr = IMLRA_BuildSegmentInstructionFixedRegList(imlSegment); - for(auto& fixedReq : frr) - { - for (raLivenessRange* range = imlSegment->raInfo.linkedList_allSubranges; range; range = range->link_allSegmentRanges.next) - { - if (!range->interval2.ContainsEdge(fixedReq.pos)) - continue; - // verify if the requirement is compatible - if(range->GetVirtualRegister() == fixedReq.regId) - { - cemu_assert(range->HasPhysicalRegister()); - cemu_assert(fixedReq.allowedReg.IsAvailable(range->GetPhysicalRegister())); // virtual register matches, but not assigned the right physical register - } - else - { - cemu_assert(!fixedReq.allowedReg.IsAvailable(range->GetPhysicalRegister())); // virtual register does not match, but using the reserved physical register - } - } - } -#endif -} - -static void DbgVerifyAllRanges(IMLRegisterAllocatorContext& ctx) -{ -#if DEBUG_RA_EXTRA_VALIDATION - for (size_t s = 0; s < ctx.deprGenContext->segmentList2.size(); s++) - { - IMLSegment* imlSegment = ctx.deprGenContext->segmentList2[s]; - raLivenessRange* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; - while (subrangeItr) - { - PPCRecRA_debugValidateSubrange(subrangeItr); - subrangeItr = subrangeItr->link_allSegmentRanges.next; - } - } - // check that no range validates register requirements - for (size_t s = 0; s < ctx.deprGenContext->segmentList2.size(); s++) - { - DbgVerifyFixedRegRequirements(ctx.deprGenContext->segmentList2[s]); - } -#endif -} - -void IMLRegisterAllocator_AllocateRegisters(ppcImlGenContext_t* ppcImlGenContext, IMLRegisterAllocatorParameters& raParam) -{ - IMLRegisterAllocatorContext ctx; - ctx.raParam = &raParam; - ctx.deprGenContext = ppcImlGenContext; - - IMLRA_ReshapeForRegisterAllocation(ppcImlGenContext); - ppcImlGenContext->UpdateSegmentIndices(); // update momentaryIndex of each segment - ctx.perSegmentAbstractRanges.resize(ppcImlGenContext->segmentList2.size()); - IMLRA_CalculateLivenessRanges(ctx); - IMLRA_ProcessFlowAndCalculateLivenessRanges(ctx); - IMLRA_AssignRegisters(ctx, ppcImlGenContext); - DbgVerifyAllRanges(ctx); - IMLRA_AnalyzeRangeDataFlow(ppcImlGenContext); - IMLRA_GenerateMoveInstructions(ctx); - - IMLRA_DeleteAllRanges(ppcImlGenContext); -} diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocator.h b/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocator.h deleted file mode 100644 index 0a54e4cb..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocator.h +++ /dev/null @@ -1,125 +0,0 @@ -#pragma once - -// container for storing a set of register indices -// specifically optimized towards storing typical range of physical register indices (expected to be below 64) -class IMLPhysRegisterSet -{ -public: - void SetAvailable(uint32 index) - { - cemu_assert_debug(index < 64); - m_regBitmask |= ((uint64)1 << index); - } - - void SetReserved(uint32 index) - { - cemu_assert_debug(index < 64); - m_regBitmask &= ~((uint64)1 << index); - } - - void SetAllAvailable() - { - m_regBitmask = ~0ull; - } - - bool HasAllAvailable() const - { - return m_regBitmask == ~0ull; - } - - bool IsAvailable(uint32 index) const - { - return (m_regBitmask & ((uint64)1 << index)) != 0; - } - - IMLPhysRegisterSet& operator&=(const IMLPhysRegisterSet& other) - { - this->m_regBitmask &= other.m_regBitmask; - return *this; - } - - IMLPhysRegisterSet& operator=(const IMLPhysRegisterSet& other) - { - this->m_regBitmask = other.m_regBitmask; - return *this; - } - - void RemoveRegisters(const IMLPhysRegisterSet& other) - { - this->m_regBitmask &= ~other.m_regBitmask; - } - - bool HasAnyAvailable() const - { - return m_regBitmask != 0; - } - - bool HasExactlyOneAvailable() const - { - return m_regBitmask != 0 && (m_regBitmask & (m_regBitmask - 1)) == 0; - } - - // returns index of first available register. Do not call when HasAnyAvailable() == false - IMLPhysReg GetFirstAvailableReg() - { - cemu_assert_debug(m_regBitmask != 0); - sint32 regIndex = 0; - auto tmp = m_regBitmask; - while ((tmp & 0xFF) == 0) - { - regIndex += 8; - tmp >>= 8; - } - while ((tmp & 0x1) == 0) - { - regIndex++; - tmp >>= 1; - } - return regIndex; - } - - // returns index of next available register (search includes any register index >= startIndex) - // returns -1 if there is no more register - IMLPhysReg GetNextAvailableReg(sint32 startIndex) const - { - if (startIndex >= 64) - return -1; - uint32 regIndex = startIndex; - auto tmp = m_regBitmask; - tmp >>= regIndex; - if (!tmp) - return -1; - while ((tmp & 0xFF) == 0) - { - regIndex += 8; - tmp >>= 8; - } - while ((tmp & 0x1) == 0) - { - regIndex++; - tmp >>= 1; - } - return regIndex; - } - - sint32 CountAvailableRegs() const - { - return std::popcount(m_regBitmask); - } - -private: - uint64 m_regBitmask{ 0 }; -}; - -struct IMLRegisterAllocatorParameters -{ - inline IMLPhysRegisterSet& GetPhysRegPool(IMLRegFormat regFormat) - { - return perTypePhysPool[stdx::to_underlying(regFormat)]; - } - - IMLPhysRegisterSet perTypePhysPool[stdx::to_underlying(IMLRegFormat::TYPE_COUNT)]; - std::unordered_map regIdToName; -}; - -void IMLRegisterAllocator_AllocateRegisters(ppcImlGenContext_t* ppcImlGenContext, IMLRegisterAllocatorParameters& raParam); \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.cpp deleted file mode 100644 index 583d5905..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.cpp +++ /dev/null @@ -1,635 +0,0 @@ -#include "../PPCRecompiler.h" -#include "../PPCRecompilerIml.h" -#include "IMLRegisterAllocatorRanges.h" -#include "util/helpers/MemoryPool.h" - -uint32 IMLRA_GetNextIterationIndex(); - -IMLRegID raLivenessRange::GetVirtualRegister() const -{ - return virtualRegister; -} - -sint32 raLivenessRange::GetPhysicalRegister() const -{ - return physicalRegister; -} - -IMLName raLivenessRange::GetName() const -{ - return name; -} - -void raLivenessRange::SetPhysicalRegister(IMLPhysReg physicalRegister) -{ - this->physicalRegister = physicalRegister; -} - -void raLivenessRange::SetPhysicalRegisterForCluster(IMLPhysReg physicalRegister) -{ - auto clusterRanges = GetAllSubrangesInCluster(); - for(auto& range : clusterRanges) - range->physicalRegister = physicalRegister; -} - -boost::container::small_vector raLivenessRange::GetAllSubrangesInCluster() -{ - uint32 iterationIndex = IMLRA_GetNextIterationIndex(); - boost::container::small_vector subranges; - subranges.push_back(this); - this->lastIterationIndex = iterationIndex; - size_t i = 0; - while(isubrangeBranchTaken && cur->subrangeBranchTaken->lastIterationIndex != iterationIndex) - { - cur->subrangeBranchTaken->lastIterationIndex = iterationIndex; - subranges.push_back(cur->subrangeBranchTaken); - } - if(cur->subrangeBranchNotTaken && cur->subrangeBranchNotTaken->lastIterationIndex != iterationIndex) - { - cur->subrangeBranchNotTaken->lastIterationIndex = iterationIndex; - subranges.push_back(cur->subrangeBranchNotTaken); - } - // check predecessors - for(auto& prev : cur->previousRanges) - { - if(prev->lastIterationIndex != iterationIndex) - { - prev->lastIterationIndex = iterationIndex; - subranges.push_back(prev); - } - } - } - return subranges; -} - -void raLivenessRange::GetAllowedRegistersExRecursive(raLivenessRange* range, uint32 iterationIndex, IMLPhysRegisterSet& allowedRegs) -{ - range->lastIterationIndex = iterationIndex; - for (auto& it : range->list_fixedRegRequirements) - allowedRegs &= it.allowedReg; - // check successors - if (range->subrangeBranchTaken && range->subrangeBranchTaken->lastIterationIndex != iterationIndex) - GetAllowedRegistersExRecursive(range->subrangeBranchTaken, iterationIndex, allowedRegs); - if (range->subrangeBranchNotTaken && range->subrangeBranchNotTaken->lastIterationIndex != iterationIndex) - GetAllowedRegistersExRecursive(range->subrangeBranchNotTaken, iterationIndex, allowedRegs); - // check predecessors - for (auto& prev : range->previousRanges) - { - if (prev->lastIterationIndex != iterationIndex) - GetAllowedRegistersExRecursive(prev, iterationIndex, allowedRegs); - } -}; - -bool raLivenessRange::GetAllowedRegistersEx(IMLPhysRegisterSet& allowedRegisters) -{ - uint32 iterationIndex = IMLRA_GetNextIterationIndex(); - allowedRegisters.SetAllAvailable(); - GetAllowedRegistersExRecursive(this, iterationIndex, allowedRegisters); - return !allowedRegisters.HasAllAvailable(); -} - -IMLPhysRegisterSet raLivenessRange::GetAllowedRegisters(IMLPhysRegisterSet regPool) -{ - IMLPhysRegisterSet fixedRegRequirements = regPool; - if(interval.ExtendsPreviousSegment() || interval.ExtendsIntoNextSegment()) - { - auto clusterRanges = GetAllSubrangesInCluster(); - for(auto& subrange : clusterRanges) - { - for(auto& fixedRegLoc : subrange->list_fixedRegRequirements) - fixedRegRequirements &= fixedRegLoc.allowedReg; - } - return fixedRegRequirements; - } - for(auto& fixedRegLoc : list_fixedRegRequirements) - fixedRegRequirements &= fixedRegLoc.allowedReg; - return fixedRegRequirements; -} - -void PPCRecRARange_addLink_perVirtualGPR(std::unordered_map& root, raLivenessRange* subrange) -{ - IMLRegID regId = subrange->GetVirtualRegister(); - auto it = root.find(regId); - if (it == root.end()) - { - // new single element - root.try_emplace(regId, subrange); - subrange->link_sameVirtualRegister.prev = nullptr; - subrange->link_sameVirtualRegister.next = nullptr; - } - else - { - // insert in first position - raLivenessRange* priorFirst = it->second; - subrange->link_sameVirtualRegister.next = priorFirst; - it->second = subrange; - subrange->link_sameVirtualRegister.prev = nullptr; - priorFirst->link_sameVirtualRegister.prev = subrange; - } -} - -void PPCRecRARange_addLink_allSegmentRanges(raLivenessRange** root, raLivenessRange* subrange) -{ - subrange->link_allSegmentRanges.next = *root; - if (*root) - (*root)->link_allSegmentRanges.prev = subrange; - subrange->link_allSegmentRanges.prev = nullptr; - *root = subrange; -} - -void PPCRecRARange_removeLink_perVirtualGPR(std::unordered_map& root, raLivenessRange* subrange) -{ -#ifdef CEMU_DEBUG_ASSERT - raLivenessRange* cur = root.find(subrange->GetVirtualRegister())->second; - bool hasRangeFound = false; - while(cur) - { - if(cur == subrange) - { - hasRangeFound = true; - break; - } - cur = cur->link_sameVirtualRegister.next; - } - cemu_assert_debug(hasRangeFound); -#endif - IMLRegID regId = subrange->GetVirtualRegister(); - raLivenessRange* nextRange = subrange->link_sameVirtualRegister.next; - raLivenessRange* prevRange = subrange->link_sameVirtualRegister.prev; - raLivenessRange* newBase = prevRange ? prevRange : nextRange; - if (prevRange) - prevRange->link_sameVirtualRegister.next = subrange->link_sameVirtualRegister.next; - if (nextRange) - nextRange->link_sameVirtualRegister.prev = subrange->link_sameVirtualRegister.prev; - - if (!prevRange) - { - if (nextRange) - { - root.find(regId)->second = nextRange; - } - else - { - cemu_assert_debug(root.find(regId)->second == subrange); - root.erase(regId); - } - } -#ifdef CEMU_DEBUG_ASSERT - subrange->link_sameVirtualRegister.prev = (raLivenessRange*)1; - subrange->link_sameVirtualRegister.next = (raLivenessRange*)1; -#endif -} - -void PPCRecRARange_removeLink_allSegmentRanges(raLivenessRange** root, raLivenessRange* subrange) -{ - raLivenessRange* tempPrev = subrange->link_allSegmentRanges.prev; - if (subrange->link_allSegmentRanges.prev) - subrange->link_allSegmentRanges.prev->link_allSegmentRanges.next = subrange->link_allSegmentRanges.next; - else - (*root) = subrange->link_allSegmentRanges.next; - if (subrange->link_allSegmentRanges.next) - subrange->link_allSegmentRanges.next->link_allSegmentRanges.prev = tempPrev; -#ifdef CEMU_DEBUG_ASSERT - subrange->link_allSegmentRanges.prev = (raLivenessRange*)1; - subrange->link_allSegmentRanges.next = (raLivenessRange*)1; -#endif -} - -MemoryPoolPermanentObjects memPool_livenessSubrange(4096); - -// startPosition and endPosition are inclusive -raLivenessRange* IMLRA_CreateRange(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment, IMLRegID virtualRegister, IMLName name, raInstructionEdge startPosition, raInstructionEdge endPosition) -{ - raLivenessRange* range = memPool_livenessSubrange.acquireObj(); - range->previousRanges.clear(); - range->list_accessLocations.clear(); - range->list_fixedRegRequirements.clear(); - range->imlSegment = imlSegment; - - cemu_assert_debug(startPosition <= endPosition); - range->interval.start = startPosition; - range->interval.end = endPosition; - - // register mapping - range->virtualRegister = virtualRegister; - range->name = name; - range->physicalRegister = -1; - // default values - range->hasStore = false; - range->hasStoreDelayed = false; - range->lastIterationIndex = 0; - range->subrangeBranchNotTaken = nullptr; - range->subrangeBranchTaken = nullptr; - cemu_assert_debug(range->previousRanges.empty()); - range->_noLoad = false; - // add to segment linked lists - PPCRecRARange_addLink_perVirtualGPR(imlSegment->raInfo.linkedList_perVirtualRegister, range); - PPCRecRARange_addLink_allSegmentRanges(&imlSegment->raInfo.linkedList_allSubranges, range); - return range; -} - -void _unlinkSubrange(raLivenessRange* range) -{ - IMLSegment* imlSegment = range->imlSegment; - PPCRecRARange_removeLink_perVirtualGPR(imlSegment->raInfo.linkedList_perVirtualRegister, range); - PPCRecRARange_removeLink_allSegmentRanges(&imlSegment->raInfo.linkedList_allSubranges, range); - // unlink reverse references - if(range->subrangeBranchTaken) - range->subrangeBranchTaken->previousRanges.erase(std::find(range->subrangeBranchTaken->previousRanges.begin(), range->subrangeBranchTaken->previousRanges.end(), range)); - if(range->subrangeBranchNotTaken) - range->subrangeBranchNotTaken->previousRanges.erase(std::find(range->subrangeBranchNotTaken->previousRanges.begin(), range->subrangeBranchNotTaken->previousRanges.end(), range)); - range->subrangeBranchTaken = (raLivenessRange*)(uintptr_t)-1; - range->subrangeBranchNotTaken = (raLivenessRange*)(uintptr_t)-1; - // remove forward references - for(auto& prev : range->previousRanges) - { - if(prev->subrangeBranchTaken == range) - prev->subrangeBranchTaken = nullptr; - if(prev->subrangeBranchNotTaken == range) - prev->subrangeBranchNotTaken = nullptr; - } - range->previousRanges.clear(); -} - -void IMLRA_DeleteRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* range) -{ - _unlinkSubrange(range); - range->list_accessLocations.clear(); - range->list_fixedRegRequirements.clear(); - memPool_livenessSubrange.releaseObj(range); -} - -void IMLRA_DeleteRangeCluster(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* range) -{ - auto clusterRanges = range->GetAllSubrangesInCluster(); - for (auto& subrange : clusterRanges) - IMLRA_DeleteRange(ppcImlGenContext, subrange); -} - -void IMLRA_DeleteAllRanges(ppcImlGenContext_t* ppcImlGenContext) -{ - for(auto& seg : ppcImlGenContext->segmentList2) - { - raLivenessRange* cur; - while(cur = seg->raInfo.linkedList_allSubranges) - IMLRA_DeleteRange(ppcImlGenContext, cur); - seg->raInfo.linkedList_allSubranges = nullptr; - seg->raInfo.linkedList_perVirtualRegister.clear(); - } -} - -void IMLRA_MergeSubranges(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* subrange, raLivenessRange* absorbedSubrange) -{ -#ifdef CEMU_DEBUG_ASSERT - PPCRecRA_debugValidateSubrange(subrange); - PPCRecRA_debugValidateSubrange(absorbedSubrange); - if (subrange->imlSegment != absorbedSubrange->imlSegment) - assert_dbg(); - cemu_assert_debug(subrange->interval.end == absorbedSubrange->interval.start); - - if (subrange->subrangeBranchTaken || subrange->subrangeBranchNotTaken) - assert_dbg(); - if (subrange == absorbedSubrange) - assert_dbg(); -#endif - // update references - subrange->subrangeBranchTaken = absorbedSubrange->subrangeBranchTaken; - subrange->subrangeBranchNotTaken = absorbedSubrange->subrangeBranchNotTaken; - absorbedSubrange->subrangeBranchTaken = nullptr; - absorbedSubrange->subrangeBranchNotTaken = nullptr; - if(subrange->subrangeBranchTaken) - *std::find(subrange->subrangeBranchTaken->previousRanges.begin(), subrange->subrangeBranchTaken->previousRanges.end(), absorbedSubrange) = subrange; - if(subrange->subrangeBranchNotTaken) - *std::find(subrange->subrangeBranchNotTaken->previousRanges.begin(), subrange->subrangeBranchNotTaken->previousRanges.end(), absorbedSubrange) = subrange; - - // merge usage locations - for (auto& accessLoc : absorbedSubrange->list_accessLocations) - subrange->list_accessLocations.push_back(accessLoc); - absorbedSubrange->list_accessLocations.clear(); - // merge fixed reg locations -#ifdef CEMU_DEBUG_ASSERT - if(!subrange->list_fixedRegRequirements.empty() && !absorbedSubrange->list_fixedRegRequirements.empty()) - { - cemu_assert_debug(subrange->list_fixedRegRequirements.back().pos < absorbedSubrange->list_fixedRegRequirements.front().pos); - } -#endif - for (auto& fixedReg : absorbedSubrange->list_fixedRegRequirements) - subrange->list_fixedRegRequirements.push_back(fixedReg); - absorbedSubrange->list_fixedRegRequirements.clear(); - - subrange->interval.end = absorbedSubrange->interval.end; - - PPCRecRA_debugValidateSubrange(subrange); - - IMLRA_DeleteRange(ppcImlGenContext, absorbedSubrange); -} - -// remove all inter-segment connections from the range cluster and split it into local ranges. Ranges are trimmed and if they have no access location they will be removed -void IMLRA_ExplodeRangeCluster(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* originRange) -{ - cemu_assert_debug(originRange->interval.ExtendsPreviousSegment() || originRange->interval.ExtendsIntoNextSegment()); // only call this on ranges that span multiple segments - auto clusterRanges = originRange->GetAllSubrangesInCluster(); - for (auto& subrange : clusterRanges) - { - if (subrange->list_accessLocations.empty()) - continue; - raInterval interval; - interval.SetInterval(subrange->list_accessLocations.front().pos, subrange->list_accessLocations.back().pos); - raLivenessRange* newSubrange = IMLRA_CreateRange(ppcImlGenContext, subrange->imlSegment, subrange->GetVirtualRegister(), subrange->GetName(), interval.start, interval.end); - // copy locations and fixed reg indices - newSubrange->list_accessLocations = subrange->list_accessLocations; - newSubrange->list_fixedRegRequirements = subrange->list_fixedRegRequirements; - if(originRange->HasPhysicalRegister()) - { - cemu_assert_debug(subrange->list_fixedRegRequirements.empty()); // avoid unassigning a register from a range with a fixed register requirement - } - // validate - if(!newSubrange->list_accessLocations.empty()) - { - cemu_assert_debug(newSubrange->list_accessLocations.front().pos >= newSubrange->interval.start); - cemu_assert_debug(newSubrange->list_accessLocations.back().pos <= newSubrange->interval.end); - } - if(!newSubrange->list_fixedRegRequirements.empty()) - { - cemu_assert_debug(newSubrange->list_fixedRegRequirements.front().pos >= newSubrange->interval.start); // fixed register requirements outside of the actual access range probably means there is a mistake in GetInstructionFixedRegisters() - cemu_assert_debug(newSubrange->list_fixedRegRequirements.back().pos <= newSubrange->interval.end); - } - } - // delete the original range cluster - IMLRA_DeleteRangeCluster(ppcImlGenContext, originRange); -} - -#ifdef CEMU_DEBUG_ASSERT -void PPCRecRA_debugValidateSubrange(raLivenessRange* range) -{ - // validate subrange - if (range->subrangeBranchTaken && range->subrangeBranchTaken->imlSegment != range->imlSegment->nextSegmentBranchTaken) - assert_dbg(); - if (range->subrangeBranchNotTaken && range->subrangeBranchNotTaken->imlSegment != range->imlSegment->nextSegmentBranchNotTaken) - assert_dbg(); - - if(range->subrangeBranchTaken || range->subrangeBranchNotTaken) - { - cemu_assert_debug(range->interval.end.ConnectsToNextSegment()); - } - if(!range->previousRanges.empty()) - { - cemu_assert_debug(range->interval.start.ConnectsToPreviousSegment()); - } - // validate locations - if (!range->list_accessLocations.empty()) - { - cemu_assert_debug(range->list_accessLocations.front().pos >= range->interval.start); - cemu_assert_debug(range->list_accessLocations.back().pos <= range->interval.end); - } - // validate fixed reg requirements - if (!range->list_fixedRegRequirements.empty()) - { - cemu_assert_debug(range->list_fixedRegRequirements.front().pos >= range->interval.start); - cemu_assert_debug(range->list_fixedRegRequirements.back().pos <= range->interval.end); - for(sint32 i = 0; i < (sint32)range->list_fixedRegRequirements.size()-1; i++) - cemu_assert_debug(range->list_fixedRegRequirements[i].pos < range->list_fixedRegRequirements[i+1].pos); - } - -} -#else -void PPCRecRA_debugValidateSubrange(raLivenessRange* range) {} -#endif - -// trim start and end of range to match first and last read/write locations -// does not trim start/endpoints which extend into the next/previous segment -void IMLRA_TrimRangeToUse(raLivenessRange* range) -{ - if(range->list_accessLocations.empty()) - { - // special case where we trim ranges extending from other segments to a single instruction edge - cemu_assert_debug(!range->interval.start.IsInstructionIndex() || !range->interval.end.IsInstructionIndex()); - if(range->interval.start.IsInstructionIndex()) - range->interval.start = range->interval.end; - if(range->interval.end.IsInstructionIndex()) - range->interval.end = range->interval.start; - return; - } - // trim start and end - raInterval prevInterval = range->interval; - if(range->interval.start.IsInstructionIndex()) - range->interval.start = range->list_accessLocations.front().pos; - if(range->interval.end.IsInstructionIndex()) - range->interval.end = range->list_accessLocations.back().pos; - // extra checks -#ifdef CEMU_DEBUG_ASSERT - cemu_assert_debug(range->interval.start <= range->interval.end); - for(auto& loc : range->list_accessLocations) - { - cemu_assert_debug(range->interval.ContainsEdge(loc.pos)); - } - cemu_assert_debug(prevInterval.ContainsWholeInterval(range->interval)); -#endif -} - -// split range at the given position -// After the split there will be two ranges: -// head -> subrange is shortened to end at splitIndex (exclusive) -// tail -> a new subrange that ranges from splitIndex (inclusive) to the end of the original subrange -// if head has a physical register assigned it will not carry over to tail -// The return value is the tail range -// If trimToUsage is true, the end of the head subrange and the start of the tail subrange will be shrunk to fit the read/write locations within. If there are no locations then the range will be deleted -raLivenessRange* IMLRA_SplitRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange*& subrange, raInstructionEdge splitPosition, bool trimToUsage) -{ - cemu_assert_debug(splitPosition.IsInstructionIndex()); - cemu_assert_debug(!subrange->interval.IsNextSegmentOnly() && !subrange->interval.IsPreviousSegmentOnly()); - cemu_assert_debug(subrange->interval.ContainsEdge(splitPosition)); - // determine new intervals - raInterval headInterval, tailInterval; - headInterval.SetInterval(subrange->interval.start, splitPosition-1); - tailInterval.SetInterval(splitPosition, subrange->interval.end); - cemu_assert_debug(headInterval.start <= headInterval.end); - cemu_assert_debug(tailInterval.start <= tailInterval.end); - // create tail - raLivenessRange* tailSubrange = IMLRA_CreateRange(ppcImlGenContext, subrange->imlSegment, subrange->GetVirtualRegister(), subrange->GetName(), tailInterval.start, tailInterval.end); - tailSubrange->SetPhysicalRegister(subrange->GetPhysicalRegister()); - // carry over branch targets and update reverse references - tailSubrange->subrangeBranchTaken = subrange->subrangeBranchTaken; - tailSubrange->subrangeBranchNotTaken = subrange->subrangeBranchNotTaken; - subrange->subrangeBranchTaken = nullptr; - subrange->subrangeBranchNotTaken = nullptr; - if(tailSubrange->subrangeBranchTaken) - *std::find(tailSubrange->subrangeBranchTaken->previousRanges.begin(), tailSubrange->subrangeBranchTaken->previousRanges.end(), subrange) = tailSubrange; - if(tailSubrange->subrangeBranchNotTaken) - *std::find(tailSubrange->subrangeBranchNotTaken->previousRanges.begin(), tailSubrange->subrangeBranchNotTaken->previousRanges.end(), subrange) = tailSubrange; - // we assume that list_locations is ordered by instruction index and contains no duplicate indices, so lets check that here just in case -#ifdef CEMU_DEBUG_ASSERT - if(subrange->list_accessLocations.size() > 1) - { - for(size_t i=0; ilist_accessLocations.size()-1; i++) - { - cemu_assert_debug(subrange->list_accessLocations[i].pos < subrange->list_accessLocations[i+1].pos); - } - } -#endif - // split locations - auto it = std::lower_bound( - subrange->list_accessLocations.begin(), subrange->list_accessLocations.end(), splitPosition, - [](const raAccessLocation& accessLoc, raInstructionEdge value) { return accessLoc.pos < value; } - ); - size_t originalCount = subrange->list_accessLocations.size(); - tailSubrange->list_accessLocations.insert(tailSubrange->list_accessLocations.end(), it, subrange->list_accessLocations.end()); - subrange->list_accessLocations.erase(it, subrange->list_accessLocations.end()); - cemu_assert_debug(subrange->list_accessLocations.empty() || subrange->list_accessLocations.back().pos < splitPosition); - cemu_assert_debug(tailSubrange->list_accessLocations.empty() || tailSubrange->list_accessLocations.front().pos >= splitPosition); - cemu_assert_debug(subrange->list_accessLocations.size() + tailSubrange->list_accessLocations.size() == originalCount); - // split fixed reg requirements - for (sint32 i = 0; i < subrange->list_fixedRegRequirements.size(); i++) - { - raFixedRegRequirement* fixedReg = subrange->list_fixedRegRequirements.data() + i; - if (tailInterval.ContainsEdge(fixedReg->pos)) - { - tailSubrange->list_fixedRegRequirements.push_back(*fixedReg); - } - } - // remove tail fixed reg requirements from head - for (sint32 i = 0; i < subrange->list_fixedRegRequirements.size(); i++) - { - raFixedRegRequirement* fixedReg = subrange->list_fixedRegRequirements.data() + i; - if (!headInterval.ContainsEdge(fixedReg->pos)) - { - subrange->list_fixedRegRequirements.resize(i); - break; - } - } - // adjust intervals - subrange->interval = headInterval; - tailSubrange->interval = tailInterval; - // trim to hole - if(trimToUsage) - { - if(subrange->list_accessLocations.empty() && (subrange->interval.start.IsInstructionIndex() && subrange->interval.end.IsInstructionIndex())) - { - IMLRA_DeleteRange(ppcImlGenContext, subrange); - subrange = nullptr; - } - else - { - IMLRA_TrimRangeToUse(subrange); - } - if(tailSubrange->list_accessLocations.empty() && (tailSubrange->interval.start.IsInstructionIndex() && tailSubrange->interval.end.IsInstructionIndex())) - { - IMLRA_DeleteRange(ppcImlGenContext, tailSubrange); - tailSubrange = nullptr; - } - else - { - IMLRA_TrimRangeToUse(tailSubrange); - } - } - // validation - cemu_assert_debug(!subrange || subrange->interval.start <= subrange->interval.end); - cemu_assert_debug(!tailSubrange || tailSubrange->interval.start <= tailSubrange->interval.end); - cemu_assert_debug(!tailSubrange || tailSubrange->interval.start >= splitPosition); - if (!trimToUsage) - cemu_assert_debug(!tailSubrange || tailSubrange->interval.start == splitPosition); - - if(subrange) - PPCRecRA_debugValidateSubrange(subrange); - if(tailSubrange) - PPCRecRA_debugValidateSubrange(tailSubrange); - return tailSubrange; -} - -sint32 IMLRA_GetSegmentReadWriteCost(IMLSegment* imlSegment) -{ - sint32 v = imlSegment->loopDepth + 1; - v *= 5; - return v*v; // 25, 100, 225, 400 -} - -// calculate additional cost of range that it would have after calling _ExplodeRange() on it -sint32 IMLRA_CalculateAdditionalCostOfRangeExplode(raLivenessRange* subrange) -{ - auto ranges = subrange->GetAllSubrangesInCluster(); - sint32 cost = 0;//-PPCRecRARange_estimateTotalCost(ranges); - for (auto& subrange : ranges) - { - if (subrange->list_accessLocations.empty()) - continue; // this range would be deleted and thus has no cost - sint32 segmentLoadStoreCost = IMLRA_GetSegmentReadWriteCost(subrange->imlSegment); - bool hasAdditionalLoad = subrange->interval.ExtendsPreviousSegment(); - bool hasAdditionalStore = subrange->interval.ExtendsIntoNextSegment(); - if(hasAdditionalLoad && subrange->list_accessLocations.front().IsWrite()) // if written before read then a load isn't necessary - { - cemu_assert_debug(!subrange->list_accessLocations.front().IsRead()); - cost += segmentLoadStoreCost; - } - if(hasAdditionalStore) - { - bool hasWrite = std::find_if(subrange->list_accessLocations.begin(), subrange->list_accessLocations.end(), [](const raAccessLocation& loc) { return loc.IsWrite(); }) != subrange->list_accessLocations.end(); - if(!hasWrite) // ranges which don't modify their value do not need to be stored - cost += segmentLoadStoreCost; - } - } - // todo - properly calculating all the data-flow dependency based costs is more complex so this currently is an approximation - return cost; -} - -sint32 IMLRA_CalculateAdditionalCostAfterSplit(raLivenessRange* subrange, raInstructionEdge splitPosition) -{ - // validation -#ifdef CEMU_DEBUG_ASSERT - if (subrange->interval.ExtendsIntoNextSegment()) - assert_dbg(); -#endif - cemu_assert_debug(splitPosition.IsInstructionIndex()); - - sint32 cost = 0; - // find split position in location list - if (subrange->list_accessLocations.empty()) - return 0; - if (splitPosition <= subrange->list_accessLocations.front().pos) - return 0; - if (splitPosition > subrange->list_accessLocations.back().pos) - return 0; - - size_t firstTailLocationIndex = 0; - for (size_t i = 0; i < subrange->list_accessLocations.size(); i++) - { - if (subrange->list_accessLocations[i].pos >= splitPosition) - { - firstTailLocationIndex = i; - break; - } - } - std::span headLocations{subrange->list_accessLocations.data(), firstTailLocationIndex}; - std::span tailLocations{subrange->list_accessLocations.data() + firstTailLocationIndex, subrange->list_accessLocations.size() - firstTailLocationIndex}; - cemu_assert_debug(headLocations.empty() || headLocations.back().pos < splitPosition); - cemu_assert_debug(tailLocations.empty() || tailLocations.front().pos >= splitPosition); - - sint32 segmentLoadStoreCost = IMLRA_GetSegmentReadWriteCost(subrange->imlSegment); - - auto CalculateCostFromLocationRange = [segmentLoadStoreCost](std::span locations, bool trackLoadCost = true, bool trackStoreCost = true) -> sint32 - { - if(locations.empty()) - return 0; - sint32 cost = 0; - if(locations.front().IsRead() && trackLoadCost) - cost += segmentLoadStoreCost; // not overwritten, so there is a load cost - bool hasWrite = std::find_if(locations.begin(), locations.end(), [](const raAccessLocation& loc) { return loc.IsWrite(); }) != locations.end(); - if(hasWrite && trackStoreCost) - cost += segmentLoadStoreCost; // modified, so there is a store cost - return cost; - }; - - sint32 baseCost = CalculateCostFromLocationRange(subrange->list_accessLocations); - - bool tailOverwritesValue = !tailLocations.empty() && !tailLocations.front().IsRead() && tailLocations.front().IsWrite(); - - sint32 newCost = CalculateCostFromLocationRange(headLocations) + CalculateCostFromLocationRange(tailLocations, !tailOverwritesValue, true); - cemu_assert_debug(newCost >= baseCost); - cost = newCost - baseCost; - - return cost; -} \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.h b/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.h deleted file mode 100644 index b0685cc5..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLRegisterAllocatorRanges.h +++ /dev/null @@ -1,364 +0,0 @@ -#pragma once -#include "IMLRegisterAllocator.h" - -struct raLivenessSubrangeLink -{ - struct raLivenessRange* prev; - struct raLivenessRange* next; -}; - -struct raInstructionEdge -{ - friend struct raInterval; -public: - raInstructionEdge() - { - index = 0; - } - - raInstructionEdge(sint32 instructionIndex, bool isInputEdge) - { - Set(instructionIndex, isInputEdge); - } - - void Set(sint32 instructionIndex, bool isInputEdge) - { - if(instructionIndex == RA_INTER_RANGE_START || instructionIndex == RA_INTER_RANGE_END) - { - index = instructionIndex; - return; - } - index = instructionIndex * 2 + (isInputEdge ? 0 : 1); - cemu_assert_debug(index >= 0 && index < 0x100000*2); // make sure index value is sane - } - - void SetRaw(sint32 index) - { - this->index = index; - cemu_assert_debug(index == RA_INTER_RANGE_START || index == RA_INTER_RANGE_END || (index >= 0 && index < 0x100000*2)); // make sure index value is sane - } - - // sint32 GetRaw() - // { - // this->index = index; - // } - - std::string GetDebugString() - { - if(index == RA_INTER_RANGE_START) - return "RA_START"; - else if(index == RA_INTER_RANGE_END) - return "RA_END"; - std::string str = fmt::format("{}", GetInstructionIndex()); - if(IsOnInputEdge()) - str += "i"; - else if(IsOnOutputEdge()) - str += "o"; - return str; - } - - sint32 GetInstructionIndex() const - { - cemu_assert_debug(index != RA_INTER_RANGE_START && index != RA_INTER_RANGE_END); - return index >> 1; - } - - // returns instruction index or RA_INTER_RANGE_START/RA_INTER_RANGE_END - sint32 GetInstructionIndexEx() const - { - if(index == RA_INTER_RANGE_START || index == RA_INTER_RANGE_END) - return index; - return index >> 1; - } - - sint32 GetRaw() const - { - return index; - } - - bool IsOnInputEdge() const - { - cemu_assert_debug(index != RA_INTER_RANGE_START && index != RA_INTER_RANGE_END); - return (index&1) == 0; - } - - bool IsOnOutputEdge() const - { - cemu_assert_debug(index != RA_INTER_RANGE_START && index != RA_INTER_RANGE_END); - return (index&1) != 0; - } - - bool ConnectsToPreviousSegment() const - { - return index == RA_INTER_RANGE_START; - } - - bool ConnectsToNextSegment() const - { - return index == RA_INTER_RANGE_END; - } - - bool IsInstructionIndex() const - { - return index != RA_INTER_RANGE_START && index != RA_INTER_RANGE_END; - } - - // comparison operators - bool operator>(const raInstructionEdge& other) const - { - return index > other.index; - } - bool operator<(const raInstructionEdge& other) const - { - return index < other.index; - } - bool operator<=(const raInstructionEdge& other) const - { - return index <= other.index; - } - bool operator>=(const raInstructionEdge& other) const - { - return index >= other.index; - } - bool operator==(const raInstructionEdge& other) const - { - return index == other.index; - } - - raInstructionEdge operator+(sint32 offset) const - { - cemu_assert_debug(IsInstructionIndex()); - cemu_assert_debug(offset >= 0 && offset < RA_INTER_RANGE_END); - raInstructionEdge edge; - edge.index = index + offset; - return edge; - } - - raInstructionEdge operator-(sint32 offset) const - { - cemu_assert_debug(IsInstructionIndex()); - cemu_assert_debug(offset >= 0 && offset < RA_INTER_RANGE_END); - raInstructionEdge edge; - edge.index = index - offset; - return edge; - } - - raInstructionEdge& operator++() - { - cemu_assert_debug(IsInstructionIndex()); - index++; - return *this; - } - -private: - sint32 index; // can also be RA_INTER_RANGE_START or RA_INTER_RANGE_END, otherwise contains instruction index * 2 - -}; - -struct raAccessLocation -{ - raAccessLocation(raInstructionEdge pos) : pos(pos) {} - - bool IsRead() const - { - return pos.IsOnInputEdge(); - } - - bool IsWrite() const - { - return pos.IsOnOutputEdge(); - } - - raInstructionEdge pos; -}; - -struct raInterval -{ - raInterval() - { - - } - - raInterval(raInstructionEdge start, raInstructionEdge end) - { - SetInterval(start, end); - } - - // isStartOnInput = Input+Output edge on first instruction. If false then only output - // isEndOnOutput = Input+Output edge on last instruction. If false then only input - void SetInterval(sint32 start, bool isStartOnInput, sint32 end, bool isEndOnOutput) - { - this->start.Set(start, isStartOnInput); - this->end.Set(end, !isEndOnOutput); - } - - void SetInterval(raInstructionEdge start, raInstructionEdge end) - { - cemu_assert_debug(start <= end); - this->start = start; - this->end = end; - } - - void SetStart(const raInstructionEdge& edge) - { - start = edge; - } - - void SetEnd(const raInstructionEdge& edge) - { - end = edge; - } - - sint32 GetStartIndex() const - { - return start.GetInstructionIndex(); - } - - sint32 GetEndIndex() const - { - return end.GetInstructionIndex(); - } - - bool ExtendsPreviousSegment() const - { - return start.ConnectsToPreviousSegment(); - } - - bool ExtendsIntoNextSegment() const - { - return end.ConnectsToNextSegment(); - } - - bool IsNextSegmentOnly() const - { - return start.ConnectsToNextSegment() && end.ConnectsToNextSegment(); - } - - bool IsPreviousSegmentOnly() const - { - return start.ConnectsToPreviousSegment() && end.ConnectsToPreviousSegment(); - } - - // returns true if range is contained within a single segment - bool IsLocal() const - { - return start.GetRaw() > RA_INTER_RANGE_START && end.GetRaw() < RA_INTER_RANGE_END; - } - - bool ContainsInstructionIndex(sint32 instructionIndex) const - { - cemu_assert_debug(instructionIndex != RA_INTER_RANGE_START && instructionIndex != RA_INTER_RANGE_END); - return instructionIndex >= start.GetInstructionIndexEx() && instructionIndex <= end.GetInstructionIndexEx(); - } - - // similar to ContainsInstructionIndex, but allows RA_INTER_RANGE_START/END as input - bool ContainsInstructionIndexEx(sint32 instructionIndex) const - { - if(instructionIndex == RA_INTER_RANGE_START) - return start.ConnectsToPreviousSegment(); - if(instructionIndex == RA_INTER_RANGE_END) - return end.ConnectsToNextSegment(); - return instructionIndex >= start.GetInstructionIndexEx() && instructionIndex <= end.GetInstructionIndexEx(); - } - - bool ContainsEdge(const raInstructionEdge& edge) const - { - return edge >= start && edge <= end; - } - - bool ContainsWholeInterval(const raInterval& other) const - { - return other.start >= start && other.end <= end; - } - - bool IsOverlapping(const raInterval& other) const - { - return start <= other.end && end >= other.start; - } - - sint32 GetPreciseDistance() - { - cemu_assert_debug(!start.ConnectsToNextSegment()); // how to handle this? - if(start == end) - return 1; - cemu_assert_debug(!end.ConnectsToPreviousSegment() && !end.ConnectsToNextSegment()); - if(start.ConnectsToPreviousSegment()) - return end.GetRaw() + 1; - - return end.GetRaw() - start.GetRaw() + 1; // +1 because end is inclusive - } - -//private: not making these directly accessible only forces us to create loads of verbose getters and setters - raInstructionEdge start; - raInstructionEdge end; -}; - -struct raFixedRegRequirement -{ - raInstructionEdge pos; - IMLPhysRegisterSet allowedReg; -}; - -struct raLivenessRange -{ - IMLSegment* imlSegment; - raInterval interval; - - // dirty state tracking - bool _noLoad; - bool hasStore; - bool hasStoreDelayed; - // next - raLivenessRange* subrangeBranchTaken; - raLivenessRange* subrangeBranchNotTaken; - // reverse counterpart of BranchTaken/BranchNotTaken - boost::container::small_vector previousRanges; - // processing - uint32 lastIterationIndex; - // instruction read/write locations - std::vector list_accessLocations; - // ordered list of all raInstructionEdge indices which require a fixed register - std::vector list_fixedRegRequirements; - // linked list (subranges with same GPR virtual register) - raLivenessSubrangeLink link_sameVirtualRegister; - // linked list (all subranges for this segment) - raLivenessSubrangeLink link_allSegmentRanges; - // register info - IMLRegID virtualRegister; - IMLName name; - // register allocator result - IMLPhysReg physicalRegister; - - boost::container::small_vector GetAllSubrangesInCluster(); - bool GetAllowedRegistersEx(IMLPhysRegisterSet& allowedRegisters); // if the cluster has fixed register requirements in any instruction this returns the combined register mask. Otherwise returns false in which case allowedRegisters is left undefined - IMLPhysRegisterSet GetAllowedRegisters(IMLPhysRegisterSet regPool); // return regPool with fixed register requirements filtered out - - IMLRegID GetVirtualRegister() const; - sint32 GetPhysicalRegister() const; - bool HasPhysicalRegister() const { return physicalRegister >= 0; } - IMLName GetName() const; - void SetPhysicalRegister(IMLPhysReg physicalRegister); - void SetPhysicalRegisterForCluster(IMLPhysReg physicalRegister); - void UnsetPhysicalRegister() { physicalRegister = -1; } - - private: - void GetAllowedRegistersExRecursive(raLivenessRange* range, uint32 iterationIndex, IMLPhysRegisterSet& allowedRegs); -}; - -raLivenessRange* IMLRA_CreateRange(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* imlSegment, IMLRegID virtualRegister, IMLName name, raInstructionEdge startPosition, raInstructionEdge endPosition); -void IMLRA_DeleteRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* subrange); -void IMLRA_DeleteAllRanges(ppcImlGenContext_t* ppcImlGenContext); - -void IMLRA_ExplodeRangeCluster(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* originRange); - -void IMLRA_MergeSubranges(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange* subrange, raLivenessRange* absorbedSubrange); - -raLivenessRange* IMLRA_SplitRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange*& subrange, raInstructionEdge splitPosition, bool trimToUsage = false); - -void PPCRecRA_debugValidateSubrange(raLivenessRange* subrange); - -// cost estimation -sint32 IMLRA_GetSegmentReadWriteCost(IMLSegment* imlSegment); -sint32 IMLRA_CalculateAdditionalCostOfRangeExplode(raLivenessRange* subrange); -//sint32 PPCRecRARange_estimateAdditionalCostAfterSplit(raLivenessRange* subrange, sint32 splitIndex); -sint32 IMLRA_CalculateAdditionalCostAfterSplit(raLivenessRange* subrange, raInstructionEdge splitPosition); \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLSegment.cpp b/src/Cafe/HW/Espresso/Recompiler/IML/IMLSegment.cpp deleted file mode 100644 index f3b6834f..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLSegment.cpp +++ /dev/null @@ -1,133 +0,0 @@ -#include "IMLInstruction.h" -#include "IMLSegment.h" - -void IMLSegment::SetEnterable(uint32 enterAddress) -{ - cemu_assert_debug(!isEnterable || enterPPCAddress == enterAddress); - isEnterable = true; - enterPPCAddress = enterAddress; -} - -bool IMLSegment::HasSuffixInstruction() const -{ - if (imlList.empty()) - return false; - const IMLInstruction& imlInstruction = imlList.back(); - return imlInstruction.IsSuffixInstruction(); -} - -sint32 IMLSegment::GetSuffixInstructionIndex() const -{ - cemu_assert_debug(HasSuffixInstruction()); - return (sint32)(imlList.size() - 1); -} - -IMLInstruction* IMLSegment::GetLastInstruction() -{ - if (imlList.empty()) - return nullptr; - return &imlList.back(); -} - -void IMLSegment::SetLinkBranchNotTaken(IMLSegment* imlSegmentDst) -{ - if (nextSegmentBranchNotTaken) - nextSegmentBranchNotTaken->list_prevSegments.erase(std::find(nextSegmentBranchNotTaken->list_prevSegments.begin(), nextSegmentBranchNotTaken->list_prevSegments.end(), this)); - nextSegmentBranchNotTaken = imlSegmentDst; - if(imlSegmentDst) - imlSegmentDst->list_prevSegments.push_back(this); -} - -void IMLSegment::SetLinkBranchTaken(IMLSegment* imlSegmentDst) -{ - if (nextSegmentBranchTaken) - nextSegmentBranchTaken->list_prevSegments.erase(std::find(nextSegmentBranchTaken->list_prevSegments.begin(), nextSegmentBranchTaken->list_prevSegments.end(), this)); - nextSegmentBranchTaken = imlSegmentDst; - if (imlSegmentDst) - imlSegmentDst->list_prevSegments.push_back(this); -} - -IMLInstruction* IMLSegment::AppendInstruction() -{ - IMLInstruction& inst = imlList.emplace_back(); - memset(&inst, 0, sizeof(IMLInstruction)); - return &inst; -} - -void IMLSegment_SetLinkBranchNotTaken(IMLSegment* imlSegmentSrc, IMLSegment* imlSegmentDst) -{ - // make sure segments aren't already linked - if (imlSegmentSrc->nextSegmentBranchNotTaken == imlSegmentDst) - return; - // add as next segment for source - if (imlSegmentSrc->nextSegmentBranchNotTaken != nullptr) - assert_dbg(); - imlSegmentSrc->nextSegmentBranchNotTaken = imlSegmentDst; - // add as previous segment for destination - imlSegmentDst->list_prevSegments.push_back(imlSegmentSrc); -} - -void IMLSegment_SetLinkBranchTaken(IMLSegment* imlSegmentSrc, IMLSegment* imlSegmentDst) -{ - // make sure segments aren't already linked - if (imlSegmentSrc->nextSegmentBranchTaken == imlSegmentDst) - return; - // add as next segment for source - if (imlSegmentSrc->nextSegmentBranchTaken != nullptr) - assert_dbg(); - imlSegmentSrc->nextSegmentBranchTaken = imlSegmentDst; - // add as previous segment for destination - imlSegmentDst->list_prevSegments.push_back(imlSegmentSrc); -} - -void IMLSegment_RemoveLink(IMLSegment* imlSegmentSrc, IMLSegment* imlSegmentDst) -{ - if (imlSegmentSrc->nextSegmentBranchNotTaken == imlSegmentDst) - { - imlSegmentSrc->nextSegmentBranchNotTaken = nullptr; - } - else if (imlSegmentSrc->nextSegmentBranchTaken == imlSegmentDst) - { - imlSegmentSrc->nextSegmentBranchTaken = nullptr; - } - else - assert_dbg(); - - bool matchFound = false; - for (sint32 i = 0; i < imlSegmentDst->list_prevSegments.size(); i++) - { - if (imlSegmentDst->list_prevSegments[i] == imlSegmentSrc) - { - imlSegmentDst->list_prevSegments.erase(imlSegmentDst->list_prevSegments.begin() + i); - matchFound = true; - break; - } - } - if (matchFound == false) - assert_dbg(); -} - -/* - * Replaces all links to segment orig with linkts to segment new - */ -void IMLSegment_RelinkInputSegment(IMLSegment* imlSegmentOrig, IMLSegment* imlSegmentNew) -{ - while (imlSegmentOrig->list_prevSegments.size() != 0) - { - IMLSegment* prevSegment = imlSegmentOrig->list_prevSegments[0]; - if (prevSegment->nextSegmentBranchNotTaken == imlSegmentOrig) - { - IMLSegment_RemoveLink(prevSegment, imlSegmentOrig); - IMLSegment_SetLinkBranchNotTaken(prevSegment, imlSegmentNew); - } - else if (prevSegment->nextSegmentBranchTaken == imlSegmentOrig) - { - IMLSegment_RemoveLink(prevSegment, imlSegmentOrig); - IMLSegment_SetLinkBranchTaken(prevSegment, imlSegmentNew); - } - else - { - assert_dbg(); - } - } -} diff --git a/src/Cafe/HW/Espresso/Recompiler/IML/IMLSegment.h b/src/Cafe/HW/Espresso/Recompiler/IML/IMLSegment.h deleted file mode 100644 index 10e3dc06..00000000 --- a/src/Cafe/HW/Espresso/Recompiler/IML/IMLSegment.h +++ /dev/null @@ -1,193 +0,0 @@ -#pragma once -#include "IMLInstruction.h" - -#include - -// special values to mark the index of ranges that reach across the segment border -#define RA_INTER_RANGE_START (-1) -#define RA_INTER_RANGE_END (0x70000000) - -struct IMLSegmentPoint -{ - friend struct IMLSegmentInterval; - - sint32 index; - struct IMLSegment* imlSegment; // do we really need to track this? SegmentPoints are always accessed via the segment that they are part of - IMLSegmentPoint* next; - IMLSegmentPoint* prev; - - // the index is the instruction index times two. - // this gives us the ability to cover half an instruction with RA ranges - // covering only the first half of an instruction (0-0) means that the register is read, but not preserved - // covering first and the second half means the register is read and preserved - // covering only the second half means the register is written but not read - - sint32 GetInstructionIndex() const - { - return index; - } - - void SetInstructionIndex(sint32 index) - { - this->index = index; - } - - void ShiftIfAfter(sint32 instructionIndex, sint32 shiftCount) - { - if (!IsPreviousSegment() && !IsNextSegment()) - { - if (GetInstructionIndex() >= instructionIndex) - index += shiftCount; - } - } - - void DecrementByOneInstruction() - { - index--; - } - - // the segment point can point beyond the first and last instruction which indicates that it is an infinite range reaching up to the previous or next segment - bool IsPreviousSegment() const { return index == RA_INTER_RANGE_START; } - bool IsNextSegment() const { return index == RA_INTER_RANGE_END; } - - // overload operand > and < - bool operator>(const IMLSegmentPoint& other) const { return index > other.index; } - bool operator<(const IMLSegmentPoint& other) const { return index < other.index; } - bool operator==(const IMLSegmentPoint& other) const { return index == other.index; } - bool operator!=(const IMLSegmentPoint& other) const { return index != other.index; } - - // overload comparison operands for sint32 - bool operator>(const sint32 other) const { return index > other; } - bool operator<(const sint32 other) const { return index < other; } - bool operator<=(const sint32 other) const { return index <= other; } - bool operator>=(const sint32 other) const { return index >= other; } -}; - -struct IMLSegmentInterval -{ - IMLSegmentPoint start; - IMLSegmentPoint end; - - bool ContainsInstructionIndex(sint32 offset) const { return start <= offset && end > offset; } - - bool IsRangeOverlapping(const IMLSegmentInterval& other) - { - // todo - compare the raw index - sint32 r1start = this->start.GetInstructionIndex(); - sint32 r1end = this->end.GetInstructionIndex(); - sint32 r2start = other.start.GetInstructionIndex(); - sint32 r2end = other.end.GetInstructionIndex(); - if (r1start < r2end && r1end > r2start) - return true; - if (this->start.IsPreviousSegment() && r1start == r2start) - return true; - if (this->end.IsNextSegment() && r1end == r2end) - return true; - return false; - } - - bool ExtendsIntoPreviousSegment() const - { - return start.IsPreviousSegment(); - } - - bool ExtendsIntoNextSegment() const - { - return end.IsNextSegment(); - } - - bool IsNextSegmentOnly() const - { - if(!start.IsNextSegment()) - return false; - cemu_assert_debug(end.IsNextSegment()); - return true; - } - - bool IsPreviousSegmentOnly() const - { - if (!end.IsPreviousSegment()) - return false; - cemu_assert_debug(start.IsPreviousSegment()); - return true; - } - - sint32 GetDistance() const - { - // todo - assert if either start or end is outside the segment - // we may also want to switch this to raw indices? - return end.GetInstructionIndex() - start.GetInstructionIndex(); - } -}; - -struct PPCSegmentRegisterAllocatorInfo_t -{ - // used during loop detection - bool isPartOfProcessedLoop{}; - sint32 lastIterationIndex{}; - // linked lists - struct raLivenessRange* linkedList_allSubranges{}; - std::unordered_map linkedList_perVirtualRegister; -}; - -struct IMLSegment -{ - sint32 momentaryIndex{}; // index in segment list, generally not kept up to date except if needed (necessary for loop detection) - sint32 loopDepth{}; - uint32 ppcAddress{}; // ppc address (0xFFFFFFFF if not associated with an address) - uint32 x64Offset{}; // x64 code offset of segment start - // list of intermediate instructions in this segment - std::vector imlList; - // segment link - IMLSegment* nextSegmentBranchNotTaken{}; // this is also the default for segments where there is no branch - IMLSegment* nextSegmentBranchTaken{}; - bool nextSegmentIsUncertain{}; - std::vector list_prevSegments{}; - // source for overwrite analysis (if nextSegmentIsUncertain is true) - // sometimes a segment is marked as an exit point, but for the purposes of dead code elimination we know the next segment - IMLSegment* deadCodeEliminationHintSeg{}; - std::vector list_deadCodeHintBy{}; - // enterable segments - bool isEnterable{}; // this segment can be entered from outside the recompiler (no preloaded registers necessary) - uint32 enterPPCAddress{}; // used if isEnterable is true - // register allocator info - PPCSegmentRegisterAllocatorInfo_t raInfo{}; - // segment state API - void SetEnterable(uint32 enterAddress); - void SetLinkBranchNotTaken(IMLSegment* imlSegmentDst); - void SetLinkBranchTaken(IMLSegment* imlSegmentDst); - - IMLSegment* GetBranchTaken() - { - return nextSegmentBranchTaken; - } - - IMLSegment* GetBranchNotTaken() - { - return nextSegmentBranchNotTaken; - } - - void SetNextSegmentForOverwriteHints(IMLSegment* seg) - { - cemu_assert_debug(!deadCodeEliminationHintSeg); - deadCodeEliminationHintSeg = seg; - if (seg) - seg->list_deadCodeHintBy.push_back(this); - } - - // instruction API - IMLInstruction* AppendInstruction(); - - bool HasSuffixInstruction() const; - sint32 GetSuffixInstructionIndex() const; - IMLInstruction* GetLastInstruction(); - - // segment points - IMLSegmentPoint* segmentPointList{}; -}; - - -void IMLSegment_SetLinkBranchNotTaken(IMLSegment* imlSegmentSrc, IMLSegment* imlSegmentDst); -void IMLSegment_SetLinkBranchTaken(IMLSegment* imlSegmentSrc, IMLSegment* imlSegmentDst); -void IMLSegment_RelinkInputSegment(IMLSegment* imlSegmentOrig, IMLSegment* imlSegmentNew); -void IMLSegment_RemoveLink(IMLSegment* imlSegmentSrc, IMLSegment* imlSegmentDst); diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCFunctionBoundaryTracker.h b/src/Cafe/HW/Espresso/Recompiler/PPCFunctionBoundaryTracker.h index 96b5143e..e558292b 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCFunctionBoundaryTracker.h +++ b/src/Cafe/HW/Espresso/Recompiler/PPCFunctionBoundaryTracker.h @@ -21,16 +21,6 @@ public: }; public: - ~PPCFunctionBoundaryTracker() - { - while (!map_ranges.empty()) - { - PPCRange_t* range = *map_ranges.begin(); - delete range; - map_ranges.erase(map_ranges.begin()); - } - } - void trackStartPoint(MPTR startAddress) { processRange(startAddress, nullptr, nullptr); @@ -50,34 +40,10 @@ public: return false; } - std::vector GetRanges() - { - std::vector r; - for (auto& it : map_ranges) - r.emplace_back(*it); - return r; - } - - bool ContainsAddress(uint32 addr) const - { - for (auto& it : map_ranges) - { - if (addr >= it->startAddress && addr < it->getEndAddress()) - return true; - } - return false; - } - - const std::set& GetBranchTargets() const - { - return map_branchTargetsAll; - } - private: void addBranchDestination(PPCRange_t* sourceRange, MPTR address) { - map_queuedBranchTargets.emplace(address); - map_branchTargetsAll.emplace(address); + map_branchTargets.emplace(address); } // process flow of instruction @@ -148,7 +114,7 @@ private: Espresso::BOField BO; uint32 BI; bool LK; - Espresso::decodeOp_BCSPR(opcode, BO, BI, LK); + Espresso::decodeOp_BCLR(opcode, BO, BI, LK); if (BO.branchAlways() && !LK) { // unconditional BLR @@ -252,7 +218,7 @@ private: auto rangeItr = map_ranges.begin(); PPCRange_t* previousRange = nullptr; - for (std::set::const_iterator targetItr = map_queuedBranchTargets.begin() ; targetItr != map_queuedBranchTargets.end(); ) + for (std::set::const_iterator targetItr = map_branchTargets.begin() ; targetItr != map_branchTargets.end(); ) { while (rangeItr != map_ranges.end() && ((*rangeItr)->startAddress + (*rangeItr)->length) <= (*targetItr)) { @@ -273,7 +239,7 @@ private: (*targetItr) < ((*rangeItr)->startAddress + (*rangeItr)->length)) { // delete visited targets - targetItr = map_queuedBranchTargets.erase(targetItr); + targetItr = map_branchTargets.erase(targetItr); continue; } @@ -323,6 +289,5 @@ private: }; std::set map_ranges; - std::set map_queuedBranchTargets; - std::set map_branchTargetsAll; + std::set map_branchTargets; }; \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.cpp index 6125c7da..24e87bd1 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.cpp @@ -2,6 +2,7 @@ #include "PPCFunctionBoundaryTracker.h" #include "PPCRecompiler.h" #include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" #include "Cafe/OS/RPL/rpl.h" #include "util/containers/RangeStore.h" #include "Cafe/OS/libs/coreinit/coreinit_CodeGen.h" @@ -13,17 +14,6 @@ #include "util/helpers/helpers.h" #include "util/MemMapper/MemMapper.h" -#include "IML/IML.h" -#include "IML/IMLRegisterAllocator.h" -#include "BackendX64/BackendX64.h" -#ifdef __aarch64__ -#include "BackendAArch64/BackendAArch64.h" -#endif -#include "util/highresolutiontimer/HighResolutionTimer.h" - -#define PPCREC_FORCE_SYNCHRONOUS_COMPILATION 0 // if 1, then function recompilation will block and execute on the thread that called PPCRecompiler_visitAddressNoBlock -#define PPCREC_LOG_RECOMPILATION_RESULTS 0 - struct PPCInvalidationRange { MPTR startAddress; @@ -47,36 +37,11 @@ void ATTR_MS_ABI (*PPCRecompiler_leaveRecompilerCode_unvisited)(); PPCRecompilerInstanceData_t* ppcRecompilerInstanceData; -#if PPCREC_FORCE_SYNCHRONOUS_COMPILATION -static std::mutex s_singleRecompilationMutex; -#endif - bool ppcRecompilerEnabled = false; -void PPCRecompiler_recompileAtAddress(uint32 address); - // this function does never block and can fail if the recompiler lock cannot be acquired immediately void PPCRecompiler_visitAddressNoBlock(uint32 enterAddress) { -#if PPCREC_FORCE_SYNCHRONOUS_COMPILATION - if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] != PPCRecompiler_leaveRecompilerCode_unvisited) - return; - PPCRecompilerState.recompilerSpinlock.lock(); - if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] != PPCRecompiler_leaveRecompilerCode_unvisited) - { - PPCRecompilerState.recompilerSpinlock.unlock(); - return; - } - ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] = PPCRecompiler_leaveRecompilerCode_visited; - PPCRecompilerState.recompilerSpinlock.unlock(); - s_singleRecompilationMutex.lock(); - if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] == PPCRecompiler_leaveRecompilerCode_visited) - { - PPCRecompiler_recompileAtAddress(enterAddress); - } - s_singleRecompilationMutex.unlock(); - return; -#endif // quick read-only check without lock if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] != PPCRecompiler_leaveRecompilerCode_unvisited) return; @@ -162,15 +127,15 @@ void PPCRecompiler_attemptEnter(PPCInterpreter_t* hCPU, uint32 enterAddress) PPCRecompiler_enter(hCPU, funcPtr); } } -bool PPCRecompiler_ApplyIMLPasses(ppcImlGenContext_t& ppcImlGenContext); -PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PPCRange_t range, std::set& entryAddresses, std::vector>& entryPointsOut, PPCFunctionBoundaryTracker& boundaryTracker) +PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PPCRange_t range, std::set& entryAddresses, std::vector>& entryPointsOut) { if (range.startAddress >= PPC_REC_CODE_AREA_END) { cemuLog_log(LogType::Force, "Attempting to recompile function outside of allowed code area"); return nullptr; } + uint32 codeGenRangeStart; uint32 codeGenRangeSize = 0; coreinit::OSGetCodegenVirtAddrRangeInternal(codeGenRangeStart, codeGenRangeSize); @@ -188,69 +153,29 @@ PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PP PPCRecFunction_t* ppcRecFunc = new PPCRecFunction_t(); ppcRecFunc->ppcAddress = range.startAddress; ppcRecFunc->ppcSize = range.length; - -#if PPCREC_LOG_RECOMPILATION_RESULTS - BenchmarkTimer bt; - bt.Start(); -#endif - // generate intermediate code ppcImlGenContext_t ppcImlGenContext = { 0 }; - ppcImlGenContext.debug_entryPPCAddress = range.startAddress; - bool compiledSuccessfully = PPCRecompiler_generateIntermediateCode(ppcImlGenContext, ppcRecFunc, entryAddresses, boundaryTracker); + bool compiledSuccessfully = PPCRecompiler_generateIntermediateCode(ppcImlGenContext, ppcRecFunc, entryAddresses); if (compiledSuccessfully == false) { + // todo: Free everything + PPCRecompiler_freeContext(&ppcImlGenContext); delete ppcRecFunc; - return nullptr; + return NULL; } - - uint32 ppcRecLowerAddr = LaunchSettings::GetPPCRecLowerAddr(); - uint32 ppcRecUpperAddr = LaunchSettings::GetPPCRecUpperAddr(); - - if (ppcRecLowerAddr != 0 && ppcRecUpperAddr != 0) - { - if (ppcRecFunc->ppcAddress < ppcRecLowerAddr || ppcRecFunc->ppcAddress > ppcRecUpperAddr) - { - delete ppcRecFunc; - return nullptr; - } - } - - // apply passes - if (!PPCRecompiler_ApplyIMLPasses(ppcImlGenContext)) - { - delete ppcRecFunc; - return nullptr; - } - -#if defined(ARCH_X86_64) // emit x64 code bool x64GenerationSuccess = PPCRecompiler_generateX64Code(ppcRecFunc, &ppcImlGenContext); if (x64GenerationSuccess == false) { + PPCRecompiler_freeContext(&ppcImlGenContext); return nullptr; } -#elif defined(__aarch64__) - bool aarch64GenerationSuccess = PPCRecompiler_generateAArch64Code(ppcRecFunc, &ppcImlGenContext); - if (aarch64GenerationSuccess == false) - { - return nullptr; - } -#endif - if (ActiveSettings::DumpRecompilerFunctionsEnabled()) - { - FileStream* fs = FileStream::createFile2(ActiveSettings::GetUserDataPath(fmt::format("dump/recompiler/ppc_{:08x}.bin", ppcRecFunc->ppcAddress))); - if (fs) - { - fs->writeData(ppcRecFunc->x86Code, ppcRecFunc->x86Size); - delete fs; - } - } // collect list of PPC-->x64 entry points entryPointsOut.clear(); - for(IMLSegment* imlSegment : ppcImlGenContext.segmentList2) + for (sint32 s = 0; s < ppcImlGenContext.segmentListCount; s++) { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext.segmentList[s]; if (imlSegment->isEnterable == false) continue; @@ -260,94 +185,10 @@ PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PP entryPointsOut.emplace_back(ppcEnterOffset, x64Offset); } -#if PPCREC_LOG_RECOMPILATION_RESULTS - bt.Stop(); - uint32 codeHash = 0; - for (uint32 i = 0; i < ppcRecFunc->x86Size; i++) - { - codeHash = _rotr(codeHash, 3); - codeHash += ((uint8*)ppcRecFunc->x86Code)[i]; - } - cemuLog_log(LogType::Force, "[Recompiler] PPC 0x{:08x} -> x64: 0x{:x} Took {:.4}ms | Size {:04x} CodeHash {:08x}", (uint32)ppcRecFunc->ppcAddress, (uint64)(uintptr_t)ppcRecFunc->x86Code, bt.GetElapsedMilliseconds(), ppcRecFunc->x86Size, codeHash); -#endif - + PPCRecompiler_freeContext(&ppcImlGenContext); return ppcRecFunc; } -void PPCRecompiler_NativeRegisterAllocatorPass(ppcImlGenContext_t& ppcImlGenContext) -{ - IMLRegisterAllocatorParameters raParam; - - for (auto& it : ppcImlGenContext.mappedRegs) - raParam.regIdToName.try_emplace(it.second.GetRegID(), it.first); - -#if defined(ARCH_X86_64) - auto& gprPhysPool = raParam.GetPhysRegPool(IMLRegFormat::I64); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RAX); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RDX); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RBX); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RBP); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RSI); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RDI); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R8); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R9); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R10); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R11); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_R12); - gprPhysPool.SetAvailable(IMLArchX86::PHYSREG_GPR_BASE + X86_REG_RCX); - - // add XMM registers, except XMM15 which is the temporary register - auto& fprPhysPool = raParam.GetPhysRegPool(IMLRegFormat::F64); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 0); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 1); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 2); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 3); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 4); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 5); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 6); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 7); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 8); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 9); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 10); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 11); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 12); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 13); - fprPhysPool.SetAvailable(IMLArchX86::PHYSREG_FPR_BASE + 14); -#elif defined(__aarch64__) - auto& gprPhysPool = raParam.GetPhysRegPool(IMLRegFormat::I64); - for (auto i = IMLArchAArch64::PHYSREG_GPR_BASE; i < IMLArchAArch64::PHYSREG_GPR_BASE + IMLArchAArch64::PHYSREG_GPR_COUNT; i++) - { - if (i == IMLArchAArch64::PHYSREG_GPR_BASE + 18) - continue; // Skip reserved platform register - gprPhysPool.SetAvailable(i); - } - - auto& fprPhysPool = raParam.GetPhysRegPool(IMLRegFormat::F64); - for (auto i = IMLArchAArch64::PHYSREG_FPR_BASE; i < IMLArchAArch64::PHYSREG_FPR_BASE + IMLArchAArch64::PHYSREG_FPR_COUNT; i++) - fprPhysPool.SetAvailable(i); -#endif - - IMLRegisterAllocator_AllocateRegisters(&ppcImlGenContext, raParam); -} - -bool PPCRecompiler_ApplyIMLPasses(ppcImlGenContext_t& ppcImlGenContext) -{ - // isolate entry points from function flow (enterable segments must not be the target of any other segment) - // this simplifies logic during register allocation - PPCRecompilerIML_isolateEnterableSegments(&ppcImlGenContext); - - // merge certain float load+store patterns - IMLOptimizer_OptimizeDirectFloatCopies(&ppcImlGenContext); - // delay byte swapping for certain load+store patterns - IMLOptimizer_OptimizeDirectIntegerCopies(&ppcImlGenContext); - - IMLOptimizer_StandardOptimizationPass(ppcImlGenContext); - - PPCRecompiler_NativeRegisterAllocatorPass(ppcImlGenContext); - - return true; -} - bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFunctionBoundaryTracker::PPCRange_t& range, PPCRecFunction_t* ppcRecFunc, std::vector>& entryPoints) { // update jump table @@ -361,7 +202,7 @@ bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFun return false; } - // check if the current range got invalidated during the time it took to recompile it + // check if the current range got invalidated in the time it took to recompile it bool isInvalidated = false; for (auto& invRange : PPCRecompilerState.invalidationRanges) { @@ -439,7 +280,7 @@ void PPCRecompiler_recompileAtAddress(uint32 address) PPCRecompilerState.recompilerSpinlock.unlock(); std::vector> functionEntryPoints; - auto func = PPCRecompiler_recompileFunction(range, entryAddresses, functionEntryPoints, funcBoundaries); + auto func = PPCRecompiler_recompileFunction(range, entryAddresses, functionEntryPoints); if (!func) { @@ -454,10 +295,6 @@ std::atomic_bool s_recompilerThreadStopSignal{false}; void PPCRecompiler_thread() { SetThreadName("PPCRecompiler"); -#if PPCREC_FORCE_SYNCHRONOUS_COMPILATION - return; -#endif - while (true) { if(s_recompilerThreadStopSignal) @@ -638,6 +475,44 @@ void PPCRecompiler_invalidateRange(uint32 startAddr, uint32 endAddr) #if defined(ARCH_X86_64) void PPCRecompiler_initPlatform() { + // mxcsr + ppcRecompilerInstanceData->_x64XMM_mxCsr_ftzOn = 0x1F80 | 0x8000; + ppcRecompilerInstanceData->_x64XMM_mxCsr_ftzOff = 0x1F80; +} +#else +void PPCRecompiler_initPlatform() +{ + +} +#endif + +void PPCRecompiler_init() +{ + if (ActiveSettings::GetCPUMode() == CPUMode::SinglecoreInterpreter) + { + ppcRecompilerEnabled = false; + return; + } + if (LaunchSettings::ForceInterpreter()) + { + cemuLog_log(LogType::Force, "Recompiler disabled. Command line --force-interpreter was passed"); + return; + } + if (ppcRecompilerInstanceData) + { + MemMapper::FreeReservation(ppcRecompilerInstanceData, sizeof(PPCRecompilerInstanceData_t)); + ppcRecompilerInstanceData = nullptr; + } + debug_printf("Allocating %dMB for recompiler instance data...\n", (sint32)(sizeof(PPCRecompilerInstanceData_t) / 1024 / 1024)); + ppcRecompilerInstanceData = (PPCRecompilerInstanceData_t*)MemMapper::ReserveMemory(nullptr, sizeof(PPCRecompilerInstanceData_t), MemMapper::PAGE_PERMISSION::P_RW); + MemMapper::AllocateMemory(&(ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom), sizeof(PPCRecompilerInstanceData_t) - offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom), MemMapper::PAGE_PERMISSION::P_RW, true); + PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions(); + + PPCRecompiler_allocateRange(0, 0x1000); // the first entry is used for fallback to interpreter + PPCRecompiler_allocateRange(mmuRange_TRAMPOLINE_AREA.getBase(), mmuRange_TRAMPOLINE_AREA.getSize()); + PPCRecompiler_allocateRange(mmuRange_CODECAVE.getBase(), mmuRange_CODECAVE.getSize()); + + // init x64 recompiler instance data ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom[0] = 1ULL << 63ULL; ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom[1] = 0ULL; ppcRecompilerInstanceData->_x64XMM_xorNegateMaskPair[0] = 1ULL << 63ULL; @@ -673,45 +548,44 @@ void PPCRecompiler_initPlatform() ppcRecompilerInstanceData->_x64XMM_flushDenormalMaskResetSignBits[2] = ~0x80000000; ppcRecompilerInstanceData->_x64XMM_flushDenormalMaskResetSignBits[3] = ~0x80000000; - // mxcsr - ppcRecompilerInstanceData->_x64XMM_mxCsr_ftzOn = 0x1F80 | 0x8000; - ppcRecompilerInstanceData->_x64XMM_mxCsr_ftzOff = 0x1F80; -} -#else -void PPCRecompiler_initPlatform() -{ - -} -#endif + // setup GQR scale tables -void PPCRecompiler_init() -{ - if (ActiveSettings::GetCPUMode() == CPUMode::SinglecoreInterpreter) + for (uint32 i = 0; i < 32; i++) { - ppcRecompilerEnabled = false; - return; + float a = 1.0f / (float)(1u << i); + float b = 0; + if (i == 0) + b = 4294967296.0f; + else + b = (float)(1u << (32u - i)); + + float ar = (float)(1u << i); + float br = 0; + if (i == 0) + br = 1.0f / 4294967296.0f; + else + br = 1.0f / (float)(1u << (32u - i)); + + ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[i * 2 + 0] = a; + ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[i * 2 + 1] = 1.0f; + ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[(i + 32) * 2 + 0] = b; + ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[(i + 32) * 2 + 1] = 1.0f; + + ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[i * 2 + 0] = a; + ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[i * 2 + 1] = a; + ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[(i + 32) * 2 + 0] = b; + ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[(i + 32) * 2 + 1] = b; + + ppcRecompilerInstanceData->_psq_st_scale_ps0_1[i * 2 + 0] = ar; + ppcRecompilerInstanceData->_psq_st_scale_ps0_1[i * 2 + 1] = 1.0f; + ppcRecompilerInstanceData->_psq_st_scale_ps0_1[(i + 32) * 2 + 0] = br; + ppcRecompilerInstanceData->_psq_st_scale_ps0_1[(i + 32) * 2 + 1] = 1.0f; + + ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[i * 2 + 0] = ar; + ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[i * 2 + 1] = ar; + ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[(i + 32) * 2 + 0] = br; + ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[(i + 32) * 2 + 1] = br; } - if (LaunchSettings::ForceInterpreter() || LaunchSettings::ForceMultiCoreInterpreter()) - { - cemuLog_log(LogType::Force, "Recompiler disabled. Command line --force-interpreter or force-multicore-interpreter was passed"); - return; - } - if (ppcRecompilerInstanceData) - { - MemMapper::FreeReservation(ppcRecompilerInstanceData, sizeof(PPCRecompilerInstanceData_t)); - ppcRecompilerInstanceData = nullptr; - } - debug_printf("Allocating %dMB for recompiler instance data...\n", (sint32)(sizeof(PPCRecompilerInstanceData_t) / 1024 / 1024)); - ppcRecompilerInstanceData = (PPCRecompilerInstanceData_t*)MemMapper::ReserveMemory(nullptr, sizeof(PPCRecompilerInstanceData_t), MemMapper::PAGE_PERMISSION::P_RW); - MemMapper::AllocateMemory(&(ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom), sizeof(PPCRecompilerInstanceData_t) - offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom), MemMapper::PAGE_PERMISSION::P_RW, true); -#ifdef ARCH_X86_64 - PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions(); -#elif defined(__aarch64__) - PPCRecompilerAArch64Gen_generateRecompilerInterfaceFunctions(); -#endif - PPCRecompiler_allocateRange(0, 0x1000); // the first entry is used for fallback to interpreter - PPCRecompiler_allocateRange(mmuRange_TRAMPOLINE_AREA.getBase(), mmuRange_TRAMPOLINE_AREA.getSize()); - PPCRecompiler_allocateRange(mmuRange_CODECAVE.getBase(), mmuRange_CODECAVE.getSize()); PPCRecompiler_initPlatform(); @@ -749,4 +623,4 @@ void PPCRecompiler_Shutdown() // mark as unmapped ppcRecompiler_reservedBlockMask[i] = false; } -} +} \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.h b/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.h index 47902630..2e40f19d 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.h +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.h @@ -1,4 +1,4 @@ -#pragma once +#include #define PPC_REC_CODE_AREA_START (0x00000000) // lower bound of executable memory area. Recompiler expects this address to be 0 #define PPC_REC_CODE_AREA_END (0x10000000) // upper bound of executable memory area @@ -6,113 +6,336 @@ #define PPC_REC_ALIGN_TO_4MB(__v) (((__v)+4*1024*1024-1)&~(4*1024*1024-1)) -#define PPC_REC_MAX_VIRTUAL_GPR (40 + 32) // enough to store 32 GPRs + a few SPRs + temp registers (usually only 1-2) +#define PPC_REC_MAX_VIRTUAL_GPR (40) // enough to store 32 GPRs + a few SPRs + temp registers (usually only 1-2) -struct ppcRecRange_t +typedef struct { uint32 ppcAddress; uint32 ppcSize; + //void* x86Start; + //size_t x86Size; void* storedRange; -}; +}ppcRecRange_t; -struct PPCRecFunction_t +typedef struct { uint32 ppcAddress; uint32 ppcSize; // ppc code size of function void* x86Code; // pointer to x86 code size_t x86Size; std::vector list_ranges; +}PPCRecFunction_t; + +#define PPCREC_IML_OP_FLAG_SIGNEXTEND (1<<0) +#define PPCREC_IML_OP_FLAG_SWITCHENDIAN (1<<1) +#define PPCREC_IML_OP_FLAG_NOT_EXPANDED (1<<2) // set single-precision load instructions to indicate that the value should not be rounded to double-precision +#define PPCREC_IML_OP_FLAG_UNUSED (1<<7) // used to mark instructions that are not used + +typedef struct +{ + uint8 type; + uint8 operation; + uint8 crRegister; // set to 0xFF if not set, not all IML instruction types support cr. + uint8 crMode; // only used when crRegister is valid, used to differentiate between various forms of condition flag set/clear behavior + uint32 crIgnoreMask; // bit set for every respective CR bit that doesn't need to be updated + uint32 associatedPPCAddress; // ppc address that is associated with this instruction + union + { + struct + { + uint8 _padding[7]; + }padding; + struct + { + // R (op) A [update cr* in mode *] + uint8 registerResult; + uint8 registerA; + }op_r_r; + struct + { + // R = A (op) B [update cr* in mode *] + uint8 registerResult; + uint8 registerA; + uint8 registerB; + }op_r_r_r; + struct + { + // R = A (op) immS32 [update cr* in mode *] + uint8 registerResult; + uint8 registerA; + sint32 immS32; + }op_r_r_s32; + struct + { + // R/F = NAME or NAME = R/F + uint8 registerIndex; + uint8 copyWidth; + uint32 name; + uint8 flags; + }op_r_name; + struct + { + // R (op) s32 [update cr* in mode *] + uint8 registerIndex; + sint32 immS32; + }op_r_immS32; + struct + { + uint32 address; + uint8 flags; + }op_jumpmark; + struct + { + uint32 param; + uint32 param2; + uint16 paramU16; + }op_macro; + struct + { + uint32 jumpmarkAddress; + bool jumpAccordingToSegment; //PPCRecImlSegment_t* destinationSegment; // if set, this replaces jumpmarkAddress + uint8 condition; // only used when crRegisterIndex is 8 or above (update: Apparently only used to mark jumps without a condition? -> Cleanup) + uint8 crRegisterIndex; + uint8 crBitIndex; + bool bitMustBeSet; + }op_conditionalJump; + struct + { + uint8 registerData; + uint8 registerMem; + uint8 registerMem2; + uint8 registerGQR; + uint8 copyWidth; + //uint8 flags; + struct + { + bool swapEndian : 1; + bool signExtend : 1; + bool notExpanded : 1; // for floats + }flags2; + uint8 mode; // transfer mode (copy width, ps0/ps1 behavior) + sint32 immS32; + }op_storeLoad; + struct + { + struct + { + uint8 registerMem; + sint32 immS32; + }src; + struct + { + uint8 registerMem; + sint32 immS32; + }dst; + uint8 copyWidth; + }op_mem2mem; + struct + { + uint8 registerResult; + uint8 registerOperand; + uint8 flags; + }op_fpr_r_r; + struct + { + uint8 registerResult; + uint8 registerOperandA; + uint8 registerOperandB; + uint8 flags; + }op_fpr_r_r_r; + struct + { + uint8 registerResult; + uint8 registerOperandA; + uint8 registerOperandB; + uint8 registerOperandC; + uint8 flags; + }op_fpr_r_r_r_r; + struct + { + uint8 registerResult; + //uint8 flags; + }op_fpr_r; + struct + { + uint32 ppcAddress; + uint32 x64Offset; + }op_ppcEnter; + struct + { + uint8 crD; // crBitIndex (result) + uint8 crA; // crBitIndex + uint8 crB; // crBitIndex + }op_cr; + // conditional operations (emitted if supported by target platform) + struct + { + // r_s32 + uint8 registerIndex; + sint32 immS32; + // condition + uint8 crRegisterIndex; + uint8 crBitIndex; + bool bitMustBeSet; + }op_conditional_r_s32; + }; +}PPCRecImlInstruction_t; + +typedef struct _PPCRecImlSegment_t PPCRecImlSegment_t; + +typedef struct _ppcRecompilerSegmentPoint_t +{ + sint32 index; + PPCRecImlSegment_t* imlSegment; + _ppcRecompilerSegmentPoint_t* next; + _ppcRecompilerSegmentPoint_t* prev; +}ppcRecompilerSegmentPoint_t; + +struct raLivenessLocation_t +{ + sint32 index; + bool isRead; + bool isWrite; + + raLivenessLocation_t() = default; + + raLivenessLocation_t(sint32 index, bool isRead, bool isWrite) + : index(index), isRead(isRead), isWrite(isWrite) {}; }; -#include "Cafe/HW/Espresso/Recompiler/IML/IMLInstruction.h" -#include "Cafe/HW/Espresso/Recompiler/IML/IMLSegment.h" +struct raLivenessSubrangeLink_t +{ + struct raLivenessSubrange_t* prev; + struct raLivenessSubrange_t* next; +}; -struct IMLInstruction* PPCRecompilerImlGen_generateNewEmptyInstruction(struct ppcImlGenContext_t* ppcImlGenContext); +struct raLivenessSubrange_t +{ + struct raLivenessRange_t* range; + PPCRecImlSegment_t* imlSegment; + ppcRecompilerSegmentPoint_t start; + ppcRecompilerSegmentPoint_t end; + // dirty state tracking + bool _noLoad; + bool hasStore; + bool hasStoreDelayed; + // next + raLivenessSubrange_t* subrangeBranchTaken; + raLivenessSubrange_t* subrangeBranchNotTaken; + // processing + uint32 lastIterationIndex; + // instruction locations + std::vector list_locations; + // linked list (subranges with same GPR virtual register) + raLivenessSubrangeLink_t link_sameVirtualRegisterGPR; + // linked list (all subranges for this segment) + raLivenessSubrangeLink_t link_segmentSubrangesGPR; +}; + +struct raLivenessRange_t +{ + sint32 virtualRegister; + sint32 physicalRegister; + sint32 name; + std::vector list_subranges; +}; + +struct PPCSegmentRegisterAllocatorInfo_t +{ + // analyzer stage + bool isPartOfProcessedLoop{}; // used during loop detection + sint32 lastIterationIndex{}; + // linked lists + raLivenessSubrange_t* linkedList_allSubranges{}; + raLivenessSubrange_t* linkedList_perVirtualGPR[PPC_REC_MAX_VIRTUAL_GPR]{}; +}; + +struct PPCRecVGPRDistances_t +{ + struct _RegArrayEntry + { + sint32 usageStart{}; + sint32 usageEnd{}; + }reg[PPC_REC_MAX_VIRTUAL_GPR]; + bool isProcessed[PPC_REC_MAX_VIRTUAL_GPR]{}; +}; + +typedef struct _PPCRecImlSegment_t +{ + sint32 momentaryIndex{}; // index in segment list, generally not kept up to date except if needed (necessary for loop detection) + sint32 startOffset{}; // offset to first instruction in iml instruction list + sint32 count{}; // number of instructions in segment + uint32 ppcAddress{}; // ppc address (0xFFFFFFFF if not associated with an address) + uint32 x64Offset{}; // x64 code offset of segment start + uint32 cycleCount{}; // number of PPC cycles required to execute this segment (roughly) + // list of intermediate instructions in this segment + PPCRecImlInstruction_t* imlList{}; + sint32 imlListSize{}; + sint32 imlListCount{}; + // segment link + _PPCRecImlSegment_t* nextSegmentBranchNotTaken{}; // this is also the default for segments where there is no branch + _PPCRecImlSegment_t* nextSegmentBranchTaken{}; + bool nextSegmentIsUncertain{}; + sint32 loopDepth{}; + //sList_t* list_prevSegments; + std::vector<_PPCRecImlSegment_t*> list_prevSegments{}; + // PPC range of segment + uint32 ppcAddrMin{}; + uint32 ppcAddrMax{}; + // enterable segments + bool isEnterable{}; // this segment can be entered from outside the recompiler (no preloaded registers necessary) + uint32 enterPPCAddress{}; // used if isEnterable is true + // jump destination segments + bool isJumpDestination{}; // segment is a destination for one or more (conditional) jumps + uint32 jumpDestinationPPCAddress{}; + // PPC FPR use mask + bool ppcFPRUsed[32]{}; // same as ppcGPRUsed, but for FPR + // CR use mask + uint32 crBitsInput{}; // bits that are expected to be set from the previous segment (read in this segment but not overwritten) + uint32 crBitsRead{}; // all bits that are read in this segment + uint32 crBitsWritten{}; // bits that are written in this segment + // register allocator info + PPCSegmentRegisterAllocatorInfo_t raInfo{}; + PPCRecVGPRDistances_t raDistances{}; + bool raRangeExtendProcessed{}; + // segment points + ppcRecompilerSegmentPoint_t* segmentPointList{}; +}PPCRecImlSegment_t; struct ppcImlGenContext_t { - class PPCFunctionBoundaryTracker* boundaryTracker; + PPCRecFunction_t* functionRef; uint32* currentInstruction; uint32 ppcAddressOfCurrentInstruction; - IMLSegment* currentOutputSegment; - struct PPCBasicBlockInfo* currentBasicBlock{}; // fpr mode bool LSQE{ true }; bool PSE{ true }; // cycle counter uint32 cyclesSinceLastBranch; // used to track ppc cycles - std::unordered_map mappedRegs; - - uint32 GetMaxRegId() const - { - if (mappedRegs.empty()) - return 0; - return mappedRegs.size()-1; - } - + // temporary general purpose registers + uint32 mappedRegister[PPC_REC_MAX_VIRTUAL_GPR]; + // temporary floating point registers (single and double precision) + uint32 mappedFPRRegister[256]; + // list of intermediate instructions + PPCRecImlInstruction_t* imlList; + sint32 imlListSize; + sint32 imlListCount; // list of segments - std::vector segmentList2; + PPCRecImlSegment_t** segmentList; + sint32 segmentListSize; + sint32 segmentListCount; // code generation control bool hasFPUInstruction; // if true, PPCEnter macro will create FP_UNAVAIL checks -> Not needed in user mode + // register allocator info + struct + { + std::vector list_ranges; + }raInfo; // analysis info struct { bool modifiesGQR[8]; }tracking; - // debug helpers - uint32 debug_entryPPCAddress{0}; - - ~ppcImlGenContext_t() - { - for (IMLSegment* imlSegment : segmentList2) - delete imlSegment; - segmentList2.clear(); - } - - // append raw instruction - IMLInstruction& emitInst() - { - return *PPCRecompilerImlGen_generateNewEmptyInstruction(this); - } - - IMLSegment* NewSegment() - { - IMLSegment* seg = new IMLSegment(); - segmentList2.emplace_back(seg); - return seg; - } - - size_t GetSegmentIndex(IMLSegment* seg) - { - for (size_t i = 0; i < segmentList2.size(); i++) - { - if (segmentList2[i] == seg) - return i; - } - cemu_assert_error(); - return 0; - } - - IMLSegment* InsertSegment(size_t index) - { - IMLSegment* newSeg = new IMLSegment(); - segmentList2.insert(segmentList2.begin() + index, 1, newSeg); - return newSeg; - } - - std::span InsertSegments(size_t index, size_t count) - { - segmentList2.insert(segmentList2.begin() + index, count, {}); - for (size_t i = index; i < (index + count); i++) - segmentList2[i] = new IMLSegment(); - return { segmentList2.data() + index, count}; - } - - void UpdateSegmentIndices() - { - for (size_t i = 0; i < segmentList2.size(); i++) - segmentList2[i]->momentaryIndex = (sint32)i; - } }; typedef void ATTR_MS_ABI (*PPCREC_JUMP_ENTRY)(); @@ -136,6 +359,11 @@ typedef struct alignas(16) float _x64XMM_constFloatMin[2]; alignas(16) uint32 _x64XMM_flushDenormalMask1[4]; alignas(16) uint32 _x64XMM_flushDenormalMaskResetSignBits[4]; + // PSQ load/store scale tables + double _psq_ld_scale_ps0_ps1[64 * 2]; + double _psq_ld_scale_ps0_1[64 * 2]; + double _psq_st_scale_ps0_ps1[64 * 2]; + double _psq_st_scale_ps0_1[64 * 2]; // MXCSR uint32 _x64XMM_mxCsr_ftzOn; uint32 _x64XMM_mxCsr_ftzOff; @@ -157,6 +385,8 @@ extern void ATTR_MS_ABI (*PPCRecompiler_leaveRecompilerCode_unvisited)(); #define PPC_REC_INVALID_FUNCTION ((PPCRecFunction_t*)-1) +// todo - move some of the stuff above into PPCRecompilerInternal.h + // recompiler interface void PPCRecompiler_recompileIfUnvisited(uint32 enterAddress); diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIml.h b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIml.h index bfb2aed5..86af33b2 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIml.h +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIml.h @@ -1,33 +1,293 @@ -bool PPCRecompiler_generateIntermediateCode(ppcImlGenContext_t& ppcImlGenContext, PPCRecFunction_t* PPCRecFunction, std::set& entryAddresses, class PPCFunctionBoundaryTracker& boundaryTracker); -IMLSegment* PPCIMLGen_CreateSplitSegmentAtEnd(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo); -IMLSegment* PPCIMLGen_CreateNewSegmentAsBranchTarget(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo); +#define PPCREC_CR_REG_TEMP 8 // there are only 8 cr registers (0-7) we use the 8th as temporary cr register that is never stored (BDNZ instruction for example) -void PPCIMLGen_AssertIfNotLastSegmentInstruction(ppcImlGenContext_t& ppcImlGenContext); +enum +{ + PPCREC_IML_OP_ASSIGN, // '=' operator + PPCREC_IML_OP_ENDIAN_SWAP, // '=' operator with 32bit endian swap + PPCREC_IML_OP_ADD, // '+' operator + PPCREC_IML_OP_SUB, // '-' operator + PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY, // complex operation, result = operand + ~operand2 + carry bit, updates carry bit + PPCREC_IML_OP_COMPARE_SIGNED, // arithmetic/signed comparison operator (updates cr) + PPCREC_IML_OP_COMPARE_UNSIGNED, // logical/unsigned comparison operator (updates cr) + PPCREC_IML_OP_MULTIPLY_SIGNED, // '*' operator (signed multiply) + PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED, // unsigned 64bit multiply, store only high 32bit-word of result + PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED, // signed 64bit multiply, store only high 32bit-word of result + PPCREC_IML_OP_DIVIDE_SIGNED, // '/' operator (signed divide) + PPCREC_IML_OP_DIVIDE_UNSIGNED, // '/' operator (unsigned divide) + PPCREC_IML_OP_ADD_CARRY, // complex operation, result = operand + carry bit, updates carry bit + PPCREC_IML_OP_ADD_CARRY_ME, // complex operation, result = operand + carry bit + (-1), updates carry bit + PPCREC_IML_OP_ADD_UPDATE_CARRY, // '+' operator but also updates carry flag + PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY, // '+' operator and also adds carry, updates carry flag + // assign operators with cast + PPCREC_IML_OP_ASSIGN_S16_TO_S32, // copy 16bit and sign extend + PPCREC_IML_OP_ASSIGN_S8_TO_S32, // copy 8bit and sign extend + // binary operation + PPCREC_IML_OP_OR, // '|' operator + PPCREC_IML_OP_ORC, // '|' operator, second operand is complemented first + PPCREC_IML_OP_AND, // '&' operator + PPCREC_IML_OP_XOR, // '^' operator + PPCREC_IML_OP_LEFT_ROTATE, // left rotate operator + PPCREC_IML_OP_LEFT_SHIFT, // shift left operator + PPCREC_IML_OP_RIGHT_SHIFT, // right shift operator (unsigned) + PPCREC_IML_OP_NOT, // complement each bit + PPCREC_IML_OP_NEG, // negate + // ppc + PPCREC_IML_OP_RLWIMI, // RLWIMI instruction (rotate, merge based on mask) + PPCREC_IML_OP_SRAW, // SRAWI/SRAW instruction (algebraic shift right, sets ca flag) + PPCREC_IML_OP_SLW, // SLW (shift based on register by up to 63 bits) + PPCREC_IML_OP_SRW, // SRW (shift based on register by up to 63 bits) + PPCREC_IML_OP_CNTLZW, + PPCREC_IML_OP_SUBFC, // SUBFC and SUBFIC (subtract from and set carry) + PPCREC_IML_OP_DCBZ, // clear 32 bytes aligned to 0x20 + PPCREC_IML_OP_MFCR, // copy cr to gpr + PPCREC_IML_OP_MTCRF, // copy gpr to cr (with mask) + // condition register + PPCREC_IML_OP_CR_CLEAR, // clear cr bit + PPCREC_IML_OP_CR_SET, // set cr bit + PPCREC_IML_OP_CR_OR, // OR cr bits + PPCREC_IML_OP_CR_ORC, // OR cr bits, complement second input operand bit first + PPCREC_IML_OP_CR_AND, // AND cr bits + PPCREC_IML_OP_CR_ANDC, // AND cr bits, complement second input operand bit first + // FPU + PPCREC_IML_OP_FPR_ADD_BOTTOM, + PPCREC_IML_OP_FPR_ADD_PAIR, + PPCREC_IML_OP_FPR_SUB_PAIR, + PPCREC_IML_OP_FPR_SUB_BOTTOM, + PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, + PPCREC_IML_OP_FPR_MULTIPLY_PAIR, + PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, + PPCREC_IML_OP_FPR_DIVIDE_PAIR, + PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, + PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, + PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, + PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP, // leave bottom of destination untouched + PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, // leave bottom of destination untouched + PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM, // leave top of destination untouched + PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, + PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64, // expand bottom f32 to f64 in bottom and top half + PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP, // calculate reciprocal with Espresso accuracy of source bottom half and write result to destination bottom and top half + PPCREC_IML_OP_FPR_FCMPO_BOTTOM, + PPCREC_IML_OP_FPR_FCMPU_BOTTOM, + PPCREC_IML_OP_FPR_FCMPU_TOP, + PPCREC_IML_OP_FPR_NEGATE_BOTTOM, + PPCREC_IML_OP_FPR_NEGATE_PAIR, + PPCREC_IML_OP_FPR_ABS_BOTTOM, // abs(fp0) + PPCREC_IML_OP_FPR_ABS_PAIR, + PPCREC_IML_OP_FPR_FRES_PAIR, // 1.0/fp approx (Espresso accuracy) + PPCREC_IML_OP_FPR_FRSQRTE_PAIR, // 1.0/sqrt(fp) approx (Espresso accuracy) + PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM, // -abs(fp0) + PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, // round 64bit double to 64bit double with 32bit float precision (in bottom half of xmm register) + PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR, // round two 64bit doubles to 64bit double with 32bit float precision + PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT, + PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ, + PPCREC_IML_OP_FPR_SELECT_BOTTOM, // selectively copy bottom value from operand B or C based on value in operand A + PPCREC_IML_OP_FPR_SELECT_PAIR, // selectively copy top/bottom from operand B or C based on value in top/bottom of operand A + // PS + PPCREC_IML_OP_FPR_SUM0, + PPCREC_IML_OP_FPR_SUM1, +}; -IMLInstruction* PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext_t* ppcImlGenContext); -void PPCRecompiler_pushBackIMLInstructions(IMLSegment* imlSegment, sint32 index, sint32 shiftBackCount); -IMLInstruction* PPCRecompiler_insertInstruction(IMLSegment* imlSegment, sint32 index); +#define PPCREC_IML_OP_FPR_COPY_PAIR (PPCREC_IML_OP_ASSIGN) + +enum +{ + PPCREC_IML_MACRO_BLR, // macro for BLR instruction code + PPCREC_IML_MACRO_BLRL, // macro for BLRL instruction code + PPCREC_IML_MACRO_BCTR, // macro for BCTR instruction code + PPCREC_IML_MACRO_BCTRL, // macro for BCTRL instruction code + PPCREC_IML_MACRO_BL, // call to different function (can be within same function) + PPCREC_IML_MACRO_B_FAR, // branch to different function + PPCREC_IML_MACRO_COUNT_CYCLES, // decrease current remaining thread cycles by a certain amount + PPCREC_IML_MACRO_HLE, // HLE function call + PPCREC_IML_MACRO_MFTB, // get TB register value (low or high) + PPCREC_IML_MACRO_LEAVE, // leaves recompiler and switches to interpeter + // debugging + PPCREC_IML_MACRO_DEBUGBREAK, // throws a debugbreak +}; + +enum +{ + PPCREC_JUMP_CONDITION_NONE, + PPCREC_JUMP_CONDITION_E, // equal / zero + PPCREC_JUMP_CONDITION_NE, // not equal / not zero + PPCREC_JUMP_CONDITION_LE, // less or equal + PPCREC_JUMP_CONDITION_L, // less + PPCREC_JUMP_CONDITION_GE, // greater or equal + PPCREC_JUMP_CONDITION_G, // greater + // special case: + PPCREC_JUMP_CONDITION_SUMMARYOVERFLOW, // needs special handling + PPCREC_JUMP_CONDITION_NSUMMARYOVERFLOW, // not summaryoverflow + +}; + +enum +{ + PPCREC_CR_MODE_COMPARE_SIGNED, + PPCREC_CR_MODE_COMPARE_UNSIGNED, // alias logic compare + // others: PPCREC_CR_MODE_ARITHMETIC, + PPCREC_CR_MODE_ARITHMETIC, // arithmetic use (for use with add/sub instructions without generating extra code) + PPCREC_CR_MODE_LOGICAL, +}; + +enum +{ + PPCREC_IML_TYPE_NONE, + PPCREC_IML_TYPE_NO_OP, // no-op instruction + PPCREC_IML_TYPE_JUMPMARK, // possible jump destination (generated before each ppc instruction) + PPCREC_IML_TYPE_R_R, // r* (op) *r + PPCREC_IML_TYPE_R_R_R, // r* = r* (op) r* + PPCREC_IML_TYPE_R_R_S32, // r* = r* (op) s32* + PPCREC_IML_TYPE_LOAD, // r* = [r*+s32*] + PPCREC_IML_TYPE_LOAD_INDEXED, // r* = [r*+r*] + PPCREC_IML_TYPE_STORE, // [r*+s32*] = r* + PPCREC_IML_TYPE_STORE_INDEXED, // [r*+r*] = r* + PPCREC_IML_TYPE_R_NAME, // r* = name + PPCREC_IML_TYPE_NAME_R, // name* = r* + PPCREC_IML_TYPE_R_S32, // r* (op) imm + PPCREC_IML_TYPE_MACRO, + PPCREC_IML_TYPE_CJUMP, // conditional jump + PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK, // jumps only if remaining thread cycles >= 0 + PPCREC_IML_TYPE_PPC_ENTER, // used to mark locations that should be written to recompilerCallTable + PPCREC_IML_TYPE_CR, // condition register specific operations (one or more operands) + // conditional + PPCREC_IML_TYPE_CONDITIONAL_R_S32, + // FPR + PPCREC_IML_TYPE_FPR_R_NAME, // name = f* + PPCREC_IML_TYPE_FPR_NAME_R, // f* = name + PPCREC_IML_TYPE_FPR_LOAD, // r* = (bitdepth) [r*+s32*] (single or paired single mode) + PPCREC_IML_TYPE_FPR_LOAD_INDEXED, // r* = (bitdepth) [r*+r*] (single or paired single mode) + PPCREC_IML_TYPE_FPR_STORE, // (bitdepth) [r*+s32*] = r* (single or paired single mode) + PPCREC_IML_TYPE_FPR_STORE_INDEXED, // (bitdepth) [r*+r*] = r* (single or paired single mode) + PPCREC_IML_TYPE_FPR_R_R, + PPCREC_IML_TYPE_FPR_R_R_R, + PPCREC_IML_TYPE_FPR_R_R_R_R, + PPCREC_IML_TYPE_FPR_R, + // special + PPCREC_IML_TYPE_MEM2MEM, // memory to memory copy (deprecated) + +}; + +enum +{ + PPCREC_NAME_NONE, + PPCREC_NAME_TEMPORARY, + PPCREC_NAME_R0 = 1000, + PPCREC_NAME_SPR0 = 2000, + PPCREC_NAME_FPR0 = 3000, + PPCREC_NAME_TEMPORARY_FPR0 = 4000, // 0 to 7 + //PPCREC_NAME_CR0 = 3000, // value mapped condition register (usually it isn't needed and can be optimized away) +}; + +// special cases for LOAD/STORE +#define PPC_REC_LOAD_LWARX_MARKER (100) // lwarx instruction (similar to LWZX but sets reserved address/value) +#define PPC_REC_STORE_STWCX_MARKER (100) // stwcx instruction (similar to STWX but writes only if reservation from LWARX is valid) +#define PPC_REC_STORE_STSWI_1 (200) // stswi nb = 1 +#define PPC_REC_STORE_STSWI_2 (201) // stswi nb = 2 +#define PPC_REC_STORE_STSWI_3 (202) // stswi nb = 3 +#define PPC_REC_STORE_LSWI_1 (200) // lswi nb = 1 +#define PPC_REC_STORE_LSWI_2 (201) // lswi nb = 2 +#define PPC_REC_STORE_LSWI_3 (202) // lswi nb = 3 + +#define PPC_REC_INVALID_REGISTER 0xFF + +#define PPCREC_CR_BIT_LT 0 +#define PPCREC_CR_BIT_GT 1 +#define PPCREC_CR_BIT_EQ 2 +#define PPCREC_CR_BIT_SO 3 + +enum +{ + // fpr load + PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, + PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, + PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, + PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0, + PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1, + PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0, + PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1, + PPCREC_FPR_LD_MODE_PSQ_S16_PS0, + PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1, + PPCREC_FPR_LD_MODE_PSQ_U16_PS0, + PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1, + PPCREC_FPR_LD_MODE_PSQ_S8_PS0, + PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1, + PPCREC_FPR_LD_MODE_PSQ_U8_PS0, + PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1, + // fpr store + PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, // store 1 single precision float from ps0 + PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, // store 1 double precision float from ps0 + + PPCREC_FPR_ST_MODE_UI32_FROM_PS0, // store raw low-32bit of PS0 + + PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1, + PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0, + PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1, + PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0, + PPCREC_FPR_ST_MODE_PSQ_S8_PS0, + PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1, + PPCREC_FPR_ST_MODE_PSQ_U8_PS0, + PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1, + PPCREC_FPR_ST_MODE_PSQ_U16_PS0, + PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1, + PPCREC_FPR_ST_MODE_PSQ_S16_PS0, + PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1, +}; + +bool PPCRecompiler_generateIntermediateCode(ppcImlGenContext_t& ppcImlGenContext, PPCRecFunction_t* PPCRecFunction, std::set& entryAddresses); +void PPCRecompiler_freeContext(ppcImlGenContext_t* ppcImlGenContext); // todo - move to destructor + +PPCRecImlInstruction_t* PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext_t* ppcImlGenContext); +void PPCRecompiler_pushBackIMLInstructions(PPCRecImlSegment_t* imlSegment, sint32 index, sint32 shiftBackCount); +PPCRecImlInstruction_t* PPCRecompiler_insertInstruction(PPCRecImlSegment_t* imlSegment, sint32 index); void PPCRecompilerIml_insertSegments(ppcImlGenContext_t* ppcImlGenContext, sint32 index, sint32 count); -void PPCRecompilerIml_setSegmentPoint(IMLSegmentPoint* segmentPoint, IMLSegment* imlSegment, sint32 index); -void PPCRecompilerIml_removeSegmentPoint(IMLSegmentPoint* segmentPoint); +void PPCRecompilerIml_setSegmentPoint(ppcRecompilerSegmentPoint_t* segmentPoint, PPCRecImlSegment_t* imlSegment, sint32 index); +void PPCRecompilerIml_removeSegmentPoint(ppcRecompilerSegmentPoint_t* segmentPoint); -// Register management -IMLReg PPCRecompilerImlGen_LookupReg(ppcImlGenContext_t* ppcImlGenContext, IMLName mappedName, IMLRegFormat regFormat); +// GPR register management +uint32 PPCRecompilerImlGen_loadRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName, bool loadNew = false); +uint32 PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName); -IMLReg PPCRecompilerImlGen_loadRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName); +// FPR register management +uint32 PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName, bool loadNew = false); +uint32 PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName); // IML instruction generation -void PPCRecompilerImlGen_generateNewInstruction_conditional_r_s32(ppcImlGenContext_t* ppcImlGenContext, IMLInstruction* imlInstruction, uint32 operation, IMLReg registerIndex, sint32 immS32, uint32 crRegisterIndex, uint32 crBitIndex, bool bitMustBeSet); +void PPCRecompilerImlGen_generateNewInstruction_jump(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint32 jumpmarkAddress); +void PPCRecompilerImlGen_generateNewInstruction_jumpSegment(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction); + +void PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext_t* ppcImlGenContext, uint32 operation, uint8 registerIndex, sint32 immS32, uint32 copyWidth, bool signExtend, bool bigEndian, uint8 crRegister, uint32 crMode); +void PPCRecompilerImlGen_generateNewInstruction_conditional_r_s32(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint32 operation, uint8 registerIndex, sint32 immS32, uint32 crRegisterIndex, uint32 crBitIndex, bool bitMustBeSet); +void PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint32 operation, uint8 registerResult, uint8 registerA, uint8 crRegister = PPC_REC_INVALID_REGISTER, uint8 crMode = 0); + + + +// IML instruction generation (new style, can generate new instructions but also overwrite existing ones) + +void PPCRecompilerImlGen_generateNewInstruction_noOp(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction); +void PPCRecompilerImlGen_generateNewInstruction_memory_memory(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint8 srcMemReg, sint32 srcImmS32, uint8 dstMemReg, sint32 dstImmS32, uint8 copyWidth); + +void PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 operation, uint8 registerResult, sint32 crRegister = PPC_REC_INVALID_REGISTER); // IML generation - FPU -bool PPCRecompilerImlGen_LFS_LFSU_LFD_LFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate, bool isDouble); -bool PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate, bool isDouble); -bool PPCRecompilerImlGen_STFS_STFSU_STFD_STFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate, bool isDouble); -bool PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool hasUpdate, bool isDouble); +bool PPCRecompilerImlGen_LFS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFSU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFSUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFDX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_LFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFSU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFSUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_STFIWX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_STFDX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_FADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_FSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_FMUL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); @@ -53,17 +313,22 @@ bool PPCRecompilerImlGen_FNEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod bool PPCRecompilerImlGen_FSEL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_FRSQRTE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_FCTIWZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); -bool PPCRecompilerImlGen_PSQ_L(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate); -bool PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate); -bool PPCRecompilerImlGen_PS_MULSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isVariant1); -bool PPCRecompilerImlGen_PS_MADDSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isVariant1); +bool PPCRecompilerImlGen_PSQ_L(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PSQ_LU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PSQ_STU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PS_MULS0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PS_MULS1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PS_MADDS0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PS_MADDS1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_ADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_SUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_MUL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_DIV(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_MADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_NMADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); -bool PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withNegative); +bool PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); +bool PPCRecompilerImlGen_PS_NMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_SUM0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_SUM1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); bool PPCRecompilerImlGen_PS_NEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode); @@ -82,20 +347,76 @@ bool PPCRecompilerImlGen_PS_CMPU1(ppcImlGenContext_t* ppcImlGenContext, uint32 o // IML general +bool PPCRecompiler_isSuffixInstruction(PPCRecImlInstruction_t* iml); +void PPCRecompilerIML_linkSegments(ppcImlGenContext_t* ppcImlGenContext); +void PPCRecompilerIml_setLinkBranchNotTaken(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst); +void PPCRecompilerIml_setLinkBranchTaken(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst); +void PPCRecompilerIML_relinkInputSegment(PPCRecImlSegment_t* imlSegmentOrig, PPCRecImlSegment_t* imlSegmentNew); +void PPCRecompilerIML_removeLink(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst); void PPCRecompilerIML_isolateEnterableSegments(ppcImlGenContext_t* ppcImlGenContext); -void PPCIMLGen_CreateSegmentBranchedPath(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo, const std::function& genSegmentBranchTaken, const std::function& genSegmentBranchNotTaken); -void PPCIMLGen_CreateSegmentBranchedPath(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo, const std::function& genSegmentBranchNotTaken); // no else segment -void PPCIMLGen_CreateSegmentBranchedPathMultiple(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo, IMLSegment** segmentsOut, IMLReg compareReg, sint32* compareValues, sint32 count, sint32 defaultCaseIndex); +PPCRecImlInstruction_t* PPCRecompilerIML_getLastInstruction(PPCRecImlSegment_t* imlSegment); -class IMLRedirectInstOutput +// IML analyzer +typedef struct { -public: - IMLRedirectInstOutput(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* outputSegment); - ~IMLRedirectInstOutput(); + uint32 readCRBits; + uint32 writtenCRBits; +}PPCRecCRTracking_t; + +bool PPCRecompilerImlAnalyzer_isTightFiniteLoop(PPCRecImlSegment_t* imlSegment); +bool PPCRecompilerImlAnalyzer_canTypeWriteCR(PPCRecImlInstruction_t* imlInstruction); +void PPCRecompilerImlAnalyzer_getCRTracking(PPCRecImlInstruction_t* imlInstruction, PPCRecCRTracking_t* crTracking); + +// IML optimizer +bool PPCRecompiler_reduceNumberOfFPRRegisters(ppcImlGenContext_t* ppcImlGenContext); + +bool PPCRecompiler_manageFPRRegisters(ppcImlGenContext_t* ppcImlGenContext); + +void PPCRecompiler_removeRedundantCRUpdates(ppcImlGenContext_t* ppcImlGenContext); +void PPCRecompiler_optimizeDirectFloatCopies(ppcImlGenContext_t* ppcImlGenContext); +void PPCRecompiler_optimizeDirectIntegerCopies(ppcImlGenContext_t* ppcImlGenContext); + +void PPCRecompiler_optimizePSQLoadAndStore(ppcImlGenContext_t* ppcImlGenContext); + +// IML register allocator +void PPCRecompilerImm_allocateRegisters(ppcImlGenContext_t* ppcImlGenContext); + +// late optimizations +void PPCRecompiler_reorderConditionModifyInstructions(ppcImlGenContext_t* ppcImlGenContext); + +// debug + +void PPCRecompiler_dumpIMLSegment(PPCRecImlSegment_t* imlSegment, sint32 segmentIndex, bool printLivenessRangeInfo = false); -private: - ppcImlGenContext_t* m_context; - IMLSegment* m_prevSegment; -}; \ No newline at end of file +typedef struct +{ + union + { + struct + { + sint16 readNamedReg1; + sint16 readNamedReg2; + sint16 readNamedReg3; + sint16 writtenNamedReg1; + }; + sint16 gpr[4]; // 3 read + 1 write + }; + // FPR + union + { + struct + { + // note: If destination operand is not fully written, it will be added as a read FPR as well + sint16 readFPR1; + sint16 readFPR2; + sint16 readFPR3; + sint16 readFPR4; // usually this is set to the result FPR if only partially overwritten + sint16 writtenFPR1; + }; + sint16 fpr[4]; + }; +}PPCImlOptimizerUsedRegisters_t; + +void PPCRecompiler_checkRegisterUsage(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, PPCImlOptimizerUsedRegisters_t* registersUsed); diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlAnalyzer.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlAnalyzer.cpp new file mode 100644 index 00000000..4962d30d --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlAnalyzer.cpp @@ -0,0 +1,137 @@ +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "util/helpers/fixedSizeList.h" +#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h" + +/* + * Initializes a single segment and returns true if it is a finite loop + */ +bool PPCRecompilerImlAnalyzer_isTightFiniteLoop(PPCRecImlSegment_t* imlSegment) +{ + bool isTightFiniteLoop = false; + // base criteria, must jump to beginning of same segment + if (imlSegment->nextSegmentBranchTaken != imlSegment) + return false; + // loops using BDNZ are assumed to always be finite + for (sint32 t = 0; t < imlSegment->imlListCount; t++) + { + if (imlSegment->imlList[t].type == PPCREC_IML_TYPE_R_S32 && imlSegment->imlList[t].operation == PPCREC_IML_OP_SUB && imlSegment->imlList[t].crRegister == 8) + { + return true; + } + } + // for non-BDNZ loops, check for common patterns + // risky approach, look for ADD/SUB operations and assume that potential overflow means finite (does not include r_r_s32 ADD/SUB) + // this catches most loops with load-update and store-update instructions, but also those with decrementing counters + FixedSizeList list_modifiedRegisters; + for (sint32 t = 0; t < imlSegment->imlListCount; t++) + { + if (imlSegment->imlList[t].type == PPCREC_IML_TYPE_R_S32 && (imlSegment->imlList[t].operation == PPCREC_IML_OP_ADD || imlSegment->imlList[t].operation == PPCREC_IML_OP_SUB) ) + { + list_modifiedRegisters.addUnique(imlSegment->imlList[t].op_r_immS32.registerIndex); + } + } + if (list_modifiedRegisters.count > 0) + { + // remove all registers from the list that are modified by non-ADD/SUB instructions + // todo: We should also cover the case where ADD+SUB on the same register cancel the effect out + PPCImlOptimizerUsedRegisters_t registersUsed; + for (sint32 t = 0; t < imlSegment->imlListCount; t++) + { + if (imlSegment->imlList[t].type == PPCREC_IML_TYPE_R_S32 && (imlSegment->imlList[t].operation == PPCREC_IML_OP_ADD || imlSegment->imlList[t].operation == PPCREC_IML_OP_SUB)) + continue; + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + t, ®istersUsed); + if(registersUsed.writtenNamedReg1 < 0) + continue; + list_modifiedRegisters.remove(registersUsed.writtenNamedReg1); + } + if (list_modifiedRegisters.count > 0) + { + return true; + } + } + return false; +} + +/* +* Returns true if the imlInstruction can overwrite CR (depending on value of ->crRegister) +*/ +bool PPCRecompilerImlAnalyzer_canTypeWriteCR(PPCRecImlInstruction_t* imlInstruction) +{ + if (imlInstruction->type == PPCREC_IML_TYPE_R_R) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R) + return true; + if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R) + return true; + return false; +} + +void PPCRecompilerImlAnalyzer_getCRTracking(PPCRecImlInstruction_t* imlInstruction, PPCRecCRTracking_t* crTracking) +{ + crTracking->readCRBits = 0; + crTracking->writtenCRBits = 0; + if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP) + { + if (imlInstruction->op_conditionalJump.condition != PPCREC_JUMP_CONDITION_NONE) + { + uint32 crBitFlag = 1 << (imlInstruction->op_conditionalJump.crRegisterIndex * 4 + imlInstruction->op_conditionalJump.crBitIndex); + crTracking->readCRBits = (crBitFlag); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32) + { + uint32 crBitFlag = 1 << (imlInstruction->op_conditional_r_s32.crRegisterIndex * 4 + imlInstruction->op_conditional_r_s32.crBitIndex); + crTracking->readCRBits = crBitFlag; + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MFCR) + { + crTracking->readCRBits = 0xFFFFFFFF; + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MTCRF) + { + crTracking->writtenCRBits |= ppc_MTCRFMaskToCRBitMask((uint32)imlInstruction->op_r_immS32.immS32); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CR) + { + if (imlInstruction->operation == PPCREC_IML_OP_CR_CLEAR || + imlInstruction->operation == PPCREC_IML_OP_CR_SET) + { + uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD); + crTracking->writtenCRBits = crBitFlag; + } + else if (imlInstruction->operation == PPCREC_IML_OP_CR_OR || + imlInstruction->operation == PPCREC_IML_OP_CR_ORC || + imlInstruction->operation == PPCREC_IML_OP_CR_AND || + imlInstruction->operation == PPCREC_IML_OP_CR_ANDC) + { + uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD); + crTracking->writtenCRBits = crBitFlag; + crBitFlag = 1 << (imlInstruction->op_cr.crA); + crTracking->readCRBits = crBitFlag; + crBitFlag = 1 << (imlInstruction->op_cr.crB); + crTracking->readCRBits |= crBitFlag; + } + else + assert_dbg(); + } + else if (PPCRecompilerImlAnalyzer_canTypeWriteCR(imlInstruction) && imlInstruction->crRegister >= 0 && imlInstruction->crRegister <= 7) + { + crTracking->writtenCRBits |= (0xF << (imlInstruction->crRegister * 4)); + } + else if ((imlInstruction->type == PPCREC_IML_TYPE_STORE || imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) && imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STWCX_MARKER) + { + // overwrites CR0 + crTracking->writtenCRBits |= (0xF << 0); + } +} diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGen.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGen.cpp index e76a53fa..b9685488 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGen.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGen.cpp @@ -1,345 +1,563 @@ #include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h" #include "Cafe/HW/Espresso/Interpreter/PPCInterpreterHelper.h" -#include "Cafe/HW/Espresso/EspressoISA.h" #include "PPCRecompiler.h" #include "PPCRecompilerIml.h" -#include "IML/IML.h" -#include "IML/IMLRegisterAllocatorRanges.h" -#include "PPCFunctionBoundaryTracker.h" -#include "Cafe/OS/libs/coreinit/coreinit_Time.h" +#include "PPCRecompilerX64.h" +#include "PPCRecompilerImlRanges.h" +#include "util/helpers/StringBuf.h" bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext); +uint32 PPCRecompiler_iterateCurrentInstruction(ppcImlGenContext_t* ppcImlGenContext); +uint32 PPCRecompiler_getInstructionByOffset(ppcImlGenContext_t* ppcImlGenContext, uint32 offset); -struct PPCBasicBlockInfo +PPCRecImlInstruction_t* PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext_t* ppcImlGenContext) { - PPCBasicBlockInfo(uint32 startAddress, const std::set& entryAddresses) : startAddress(startAddress), lastAddress(startAddress) + if( ppcImlGenContext->imlListCount+1 > ppcImlGenContext->imlListSize ) { - isEnterable = entryAddresses.find(startAddress) != entryAddresses.end(); + sint32 newSize = ppcImlGenContext->imlListCount*2 + 2; + ppcImlGenContext->imlList = (PPCRecImlInstruction_t*)realloc(ppcImlGenContext->imlList, sizeof(PPCRecImlInstruction_t)*newSize); + ppcImlGenContext->imlListSize = newSize; } - - uint32 startAddress; - uint32 lastAddress; // inclusive - bool isEnterable{ false }; - bool hasContinuedFlow{ true }; // non-branch path goes to next segment, assumed by default - bool hasBranchTarget{ false }; - uint32 branchTarget{}; - - // associated IML segments - IMLSegment* firstSegment{}; // first segment in chain, used as branch target for other segments - IMLSegment* appendSegment{}; // last segment in chain, additional instructions should be appended to this segment - - void SetInitialSegment(IMLSegment* seg) - { - cemu_assert_debug(!firstSegment); - cemu_assert_debug(!appendSegment); - firstSegment = seg; - appendSegment = seg; - } - - IMLSegment* GetFirstSegmentInChain() - { - return firstSegment; - } - - IMLSegment* GetSegmentForInstructionAppend() - { - return appendSegment; - } -}; - -IMLInstruction* PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext_t* ppcImlGenContext) -{ - IMLInstruction& inst = ppcImlGenContext->currentOutputSegment->imlList.emplace_back(); - memset(&inst, 0x00, sizeof(IMLInstruction)); - return &inst; + PPCRecImlInstruction_t* imlInstruction = ppcImlGenContext->imlList+ppcImlGenContext->imlListCount; + memset(imlInstruction, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; // dont update any cr register by default + imlInstruction->associatedPPCAddress = ppcImlGenContext->ppcAddressOfCurrentInstruction; + ppcImlGenContext->imlListCount++; + return imlInstruction; } -void PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext_t* ppcImlGenContext, IMLReg registerDestination, IMLReg registerMemory1, IMLReg registerMemory2, uint32 copyWidth, bool signExtend, bool switchEndian) +void PPCRecompilerImlGen_generateNewInstruction_jumpmark(ppcImlGenContext_t* ppcImlGenContext, uint32 address) { - cemu_assert_debug(registerMemory1.IsValid()); - cemu_assert_debug(registerMemory2.IsValid()); - cemu_assert_debug(registerDestination.IsValid()); - IMLInstruction* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + // no-op that indicates possible destination of a jump + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_JUMPMARK; + imlInstruction->op_jumpmark.address = address; +} + +void PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext_t* ppcImlGenContext, uint32 macroId, uint32 param, uint32 param2, uint16 paramU16) +{ + // no-op that indicates possible destination of a jump + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_MACRO; + imlInstruction->operation = macroId; + imlInstruction->op_macro.param = param; + imlInstruction->op_macro.param2 = param2; + imlInstruction->op_macro.paramU16 = paramU16; +} + +/* + * Generates a marker for Interpreter -> Recompiler entrypoints + * PPC_ENTER iml instructions have no associated PPC address but the instruction itself has one + */ +void PPCRecompilerImlGen_generateNewInstruction_ppcEnter(ppcImlGenContext_t* ppcImlGenContext, uint32 ppcAddress) +{ + // no-op that indicates possible destination of a jump + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_PPC_ENTER; + imlInstruction->operation = 0; + imlInstruction->op_ppcEnter.ppcAddress = ppcAddress; + imlInstruction->op_ppcEnter.x64Offset = 0; + imlInstruction->associatedPPCAddress = 0; +} + +void PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint32 operation, uint8 registerResult, uint8 registerA, uint8 crRegister, uint8 crMode) +{ + // operation with two register operands (e.g. "t0 = t1") + if(imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_R_R; + imlInstruction->operation = operation; + imlInstruction->crRegister = crRegister; + imlInstruction->crMode = crMode; + imlInstruction->op_r_r.registerResult = registerResult; + imlInstruction->op_r_r.registerA = registerA; +} + +void PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext_t* ppcImlGenContext, uint32 operation, uint8 registerResult, uint8 registerA, uint8 registerB, uint8 crRegister=PPC_REC_INVALID_REGISTER, uint8 crMode=0) +{ + // operation with three register operands (e.g. "t0 = t1 + t4") + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_R_R_R; + imlInstruction->operation = operation; + imlInstruction->crRegister = crRegister; + imlInstruction->crMode = crMode; + imlInstruction->op_r_r_r.registerResult = registerResult; + imlInstruction->op_r_r_r.registerA = registerA; + imlInstruction->op_r_r_r.registerB = registerB; +} + +void PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext_t* ppcImlGenContext, uint32 operation, uint8 registerResult, uint8 registerA, sint32 immS32, uint8 crRegister=PPC_REC_INVALID_REGISTER, uint8 crMode=0) +{ + // operation with two register operands and one signed immediate (e.g. "t0 = t1 + 1234") + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_R_R_S32; + imlInstruction->operation = operation; + imlInstruction->crRegister = crRegister; + imlInstruction->crMode = crMode; + imlInstruction->op_r_r_s32.registerResult = registerResult; + imlInstruction->op_r_r_s32.registerA = registerA; + imlInstruction->op_r_r_s32.immS32 = immS32; +} + +void PPCRecompilerImlGen_generateNewInstruction_name_r(ppcImlGenContext_t* ppcImlGenContext, uint32 operation, uint8 registerIndex, uint32 name, uint32 copyWidth, bool signExtend, bool bigEndian) +{ + // Store name (e.g. "'r3' = t0" which translates to MOV [ESP+offset_r3], reg32) + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_NAME_R; + imlInstruction->operation = operation; + imlInstruction->op_r_name.registerIndex = registerIndex; + imlInstruction->op_r_name.name = name; + imlInstruction->op_r_name.copyWidth = copyWidth; + imlInstruction->op_r_name.flags = (signExtend?PPCREC_IML_OP_FLAG_SIGNEXTEND:0)|(bigEndian?PPCREC_IML_OP_FLAG_SWITCHENDIAN:0); +} + +void PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext_t* ppcImlGenContext, uint32 operation, uint8 registerIndex, sint32 immS32, uint32 copyWidth, bool signExtend, bool bigEndian, uint8 crRegister, uint32 crMode) +{ + // two variations: + // operation without store (e.g. "'r3' < 123" which has no effect other than updating a condition flags register) + // operation with store (e.g. "'r3' = 123") + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_R_S32; + imlInstruction->operation = operation; + imlInstruction->crRegister = crRegister; + imlInstruction->crMode = crMode; + imlInstruction->op_r_immS32.registerIndex = registerIndex; + imlInstruction->op_r_immS32.immS32 = immS32; +} + +void PPCRecompilerImlGen_generateNewInstruction_conditional_r_s32(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint32 operation, uint8 registerIndex, sint32 immS32, uint32 crRegisterIndex, uint32 crBitIndex, bool bitMustBeSet) +{ + if(imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + else + memset(imlInstruction, 0, sizeof(PPCRecImlInstruction_t)); + imlInstruction->type = PPCREC_IML_TYPE_CONDITIONAL_R_S32; + imlInstruction->operation = operation; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + // r_s32 operation + imlInstruction->op_conditional_r_s32.registerIndex = registerIndex; + imlInstruction->op_conditional_r_s32.immS32 = immS32; + // condition + imlInstruction->op_conditional_r_s32.crRegisterIndex = crRegisterIndex; + imlInstruction->op_conditional_r_s32.crBitIndex = crBitIndex; + imlInstruction->op_conditional_r_s32.bitMustBeSet = bitMustBeSet; +} + + +void PPCRecompilerImlGen_generateNewInstruction_jump(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint32 jumpmarkAddress) +{ + // jump + if (imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + else + memset(imlInstruction, 0, sizeof(PPCRecImlInstruction_t)); + imlInstruction->type = PPCREC_IML_TYPE_CJUMP; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->op_conditionalJump.jumpmarkAddress = jumpmarkAddress; + imlInstruction->op_conditionalJump.jumpAccordingToSegment = false; + imlInstruction->op_conditionalJump.condition = PPCREC_JUMP_CONDITION_NONE; + imlInstruction->op_conditionalJump.crRegisterIndex = 0; + imlInstruction->op_conditionalJump.crBitIndex = 0; + imlInstruction->op_conditionalJump.bitMustBeSet = false; +} + +// jump based on segment branches +void PPCRecompilerImlGen_generateNewInstruction_jumpSegment(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction) +{ + // jump + if (imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->associatedPPCAddress = 0; + imlInstruction->type = PPCREC_IML_TYPE_CJUMP; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->op_conditionalJump.jumpmarkAddress = 0; + imlInstruction->op_conditionalJump.jumpAccordingToSegment = true; + imlInstruction->op_conditionalJump.condition = PPCREC_JUMP_CONDITION_NONE; + imlInstruction->op_conditionalJump.crRegisterIndex = 0; + imlInstruction->op_conditionalJump.crBitIndex = 0; + imlInstruction->op_conditionalJump.bitMustBeSet = false; +} + +void PPCRecompilerImlGen_generateNewInstruction_noOp(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction) +{ + if (imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_NO_OP; + imlInstruction->operation = 0; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->crMode = 0; +} + +void PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext_t* ppcImlGenContext, uint32 operation, uint8 crD, uint8 crA, uint8 crB) +{ + // multiple variations: + // operation involving only one cr bit (like clear crD bit) + // operation involving three cr bits (like crD = crA or crB) + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_CR; + imlInstruction->operation = operation; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->crMode = 0; + imlInstruction->op_cr.crD = crD; + imlInstruction->op_cr.crA = crA; + imlInstruction->op_cr.crB = crB; +} + +void PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext_t* ppcImlGenContext, uint32 jumpmarkAddress, uint32 jumpCondition, uint32 crRegisterIndex, uint32 crBitIndex, bool bitMustBeSet) +{ + // conditional jump + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_CJUMP; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->op_conditionalJump.jumpmarkAddress = jumpmarkAddress; + imlInstruction->op_conditionalJump.condition = jumpCondition; + imlInstruction->op_conditionalJump.crRegisterIndex = crRegisterIndex; + imlInstruction->op_conditionalJump.crBitIndex = crBitIndex; + imlInstruction->op_conditionalJump.bitMustBeSet = bitMustBeSet; +} + +void PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory, sint32 immS32, uint32 copyWidth, bool signExtend, bool switchEndian) +{ + // load from memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_LOAD; + imlInstruction->operation = 0; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->op_storeLoad.registerData = registerDestination; + imlInstruction->op_storeLoad.registerMem = registerMemory; + imlInstruction->op_storeLoad.immS32 = immS32; + imlInstruction->op_storeLoad.copyWidth = copyWidth; + //imlInstruction->op_storeLoad.flags = (signExtend ? PPCREC_IML_OP_FLAG_SIGNEXTEND : 0) | (switchEndian ? PPCREC_IML_OP_FLAG_SWITCHENDIAN : 0); + imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; + imlInstruction->op_storeLoad.flags2.signExtend = signExtend; +} + +void PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory1, uint8 registerMemory2, uint32 copyWidth, bool signExtend, bool switchEndian) +{ + // load from memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); imlInstruction->type = PPCREC_IML_TYPE_LOAD_INDEXED; imlInstruction->operation = 0; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; imlInstruction->op_storeLoad.registerData = registerDestination; imlInstruction->op_storeLoad.registerMem = registerMemory1; imlInstruction->op_storeLoad.registerMem2 = registerMemory2; imlInstruction->op_storeLoad.copyWidth = copyWidth; + //imlInstruction->op_storeLoad.flags = (signExtend?PPCREC_IML_OP_FLAG_SIGNEXTEND:0)|(switchEndian?PPCREC_IML_OP_FLAG_SWITCHENDIAN:0); imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; imlInstruction->op_storeLoad.flags2.signExtend = signExtend; } -void PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext_t* ppcImlGenContext, IMLReg registerDestination, IMLReg registerMemory1, IMLReg registerMemory2, uint32 copyWidth, bool signExtend, bool switchEndian) +void PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext_t* ppcImlGenContext, uint8 registerSource, uint8 registerMemory, sint32 immS32, uint32 copyWidth, bool switchEndian) { - cemu_assert_debug(registerMemory1.IsValid()); - cemu_assert_debug(registerMemory2.IsValid()); - cemu_assert_debug(registerDestination.IsValid()); - IMLInstruction* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + // load from memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_STORE; + imlInstruction->operation = 0; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->op_storeLoad.registerData = registerSource; + imlInstruction->op_storeLoad.registerMem = registerMemory; + imlInstruction->op_storeLoad.immS32 = immS32; + imlInstruction->op_storeLoad.copyWidth = copyWidth; + //imlInstruction->op_storeLoad.flags = (switchEndian?PPCREC_IML_OP_FLAG_SWITCHENDIAN:0); + imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; + imlInstruction->op_storeLoad.flags2.signExtend = false; +} + +void PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory1, uint8 registerMemory2, uint32 copyWidth, bool signExtend, bool switchEndian) +{ + // load from memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); imlInstruction->type = PPCREC_IML_TYPE_STORE_INDEXED; imlInstruction->operation = 0; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; imlInstruction->op_storeLoad.registerData = registerDestination; imlInstruction->op_storeLoad.registerMem = registerMemory1; imlInstruction->op_storeLoad.registerMem2 = registerMemory2; imlInstruction->op_storeLoad.copyWidth = copyWidth; + //imlInstruction->op_storeLoad.flags = (signExtend?PPCREC_IML_OP_FLAG_SIGNEXTEND:0)|(switchEndian?PPCREC_IML_OP_FLAG_SWITCHENDIAN:0); imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; imlInstruction->op_storeLoad.flags2.signExtend = signExtend; } -// create and fill two segments (branch taken and branch not taken) as a follow up to the current segment and then merge flow afterwards -void PPCIMLGen_CreateSegmentBranchedPath(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo, const std::function& genSegmentBranchTaken, const std::function& genSegmentBranchNotTaken) +void PPCRecompilerImlGen_generateNewInstruction_memory_memory(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, uint8 srcMemReg, sint32 srcImmS32, uint8 dstMemReg, sint32 dstImmS32, uint8 copyWidth) { - IMLSegment* currentWriteSegment = basicBlockInfo.GetSegmentForInstructionAppend(); - - std::span segments = ppcImlGenContext.InsertSegments(ppcImlGenContext.GetSegmentIndex(currentWriteSegment) + 1, 3); - IMLSegment* segBranchNotTaken = segments[0]; - IMLSegment* segBranchTaken = segments[1]; - IMLSegment* segMerge = segments[2]; - - // link the segments - segMerge->SetLinkBranchTaken(currentWriteSegment->GetBranchTaken()); - segMerge->SetLinkBranchNotTaken(currentWriteSegment->GetBranchNotTaken()); - currentWriteSegment->SetLinkBranchTaken(segBranchTaken); - currentWriteSegment->SetLinkBranchNotTaken(segBranchNotTaken); - segBranchTaken->SetLinkBranchNotTaken(segMerge); - segBranchNotTaken->SetLinkBranchTaken(segMerge); - // generate code for branch taken segment - ppcImlGenContext.currentOutputSegment = segBranchTaken; - genSegmentBranchTaken(ppcImlGenContext); - cemu_assert_debug(ppcImlGenContext.currentOutputSegment == segBranchTaken); - // generate code for branch not taken segment - ppcImlGenContext.currentOutputSegment = segBranchNotTaken; - genSegmentBranchNotTaken(ppcImlGenContext); - cemu_assert_debug(ppcImlGenContext.currentOutputSegment == segBranchNotTaken); - ppcImlGenContext.emitInst().make_jump(); - // make merge segment the new write segment - ppcImlGenContext.currentOutputSegment = segMerge; - basicBlockInfo.appendSegment = segMerge; + // copy from memory to memory + if(imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_MEM2MEM; + imlInstruction->operation = 0; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->op_mem2mem.src.registerMem = srcMemReg; + imlInstruction->op_mem2mem.src.immS32 = srcImmS32; + imlInstruction->op_mem2mem.dst.registerMem = dstMemReg; + imlInstruction->op_mem2mem.dst.immS32 = dstImmS32; + imlInstruction->op_mem2mem.copyWidth = copyWidth; } -void PPCIMLGen_CreateSegmentBranchedPath(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo, const std::function& genSegmentBranchNotTaken) +uint32 PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) { - IMLSegment* currentWriteSegment = basicBlockInfo.GetSegmentForInstructionAppend(); - - std::span segments = ppcImlGenContext.InsertSegments(ppcImlGenContext.GetSegmentIndex(currentWriteSegment) + 1, 2); - IMLSegment* segBranchNotTaken = segments[0]; - IMLSegment* segMerge = segments[1]; - - // link the segments - segMerge->SetLinkBranchTaken(currentWriteSegment->GetBranchTaken()); - segMerge->SetLinkBranchNotTaken(currentWriteSegment->GetBranchNotTaken()); - currentWriteSegment->SetLinkBranchTaken(segMerge); - currentWriteSegment->SetLinkBranchNotTaken(segBranchNotTaken); - segBranchNotTaken->SetLinkBranchNotTaken(segMerge); - // generate code for branch not taken segment - ppcImlGenContext.currentOutputSegment = segBranchNotTaken; - genSegmentBranchNotTaken(ppcImlGenContext); - cemu_assert_debug(ppcImlGenContext.currentOutputSegment == segBranchNotTaken); - // make merge segment the new write segment - ppcImlGenContext.currentOutputSegment = segMerge; - basicBlockInfo.appendSegment = segMerge; -} - -IMLReg _GetRegTemporaryS8(ppcImlGenContext_t* ppcImlGenContext, uint32 index); - -IMLRedirectInstOutput::IMLRedirectInstOutput(ppcImlGenContext_t* ppcImlGenContext, IMLSegment* outputSegment) : m_context(ppcImlGenContext) -{ - m_prevSegment = ppcImlGenContext->currentOutputSegment; - cemu_assert_debug(ppcImlGenContext->currentOutputSegment == ppcImlGenContext->currentBasicBlock->appendSegment); - if (outputSegment == ppcImlGenContext->currentOutputSegment) + if( mappedName == PPCREC_NAME_NONE ) { - m_prevSegment = nullptr; - return; + debug_printf("PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(): Invalid mappedName parameter\n"); + return PPC_REC_INVALID_REGISTER; } - m_context->currentBasicBlock->appendSegment = outputSegment; - m_context->currentOutputSegment = outputSegment; -} - -IMLRedirectInstOutput::~IMLRedirectInstOutput() -{ - if (m_prevSegment) + for(uint32 i=0; i<(PPC_REC_MAX_VIRTUAL_GPR-1); i++) { - m_context->currentBasicBlock->appendSegment = m_prevSegment; - m_context->currentOutputSegment = m_prevSegment; - } -} - -// compare values and branch to segment with same index in segmentsOut. The last segment doesn't actually have any comparison and just is the default case. Thus compareValues is one shorter than count -void PPCIMLGen_CreateSegmentBranchedPathMultiple(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo, IMLSegment** segmentsOut, IMLReg compareReg, sint32* compareValues, sint32 count, sint32 defaultCaseIndex) -{ - IMLSegment* currentWriteSegment = basicBlockInfo.GetSegmentForInstructionAppend(); - cemu_assert_debug(!currentWriteSegment->HasSuffixInstruction()); // must not already have a suffix instruction - - const sint32 numBranchSegments = count + 1; - const sint32 numCaseSegments = count; - - std::span segments = ppcImlGenContext.InsertSegments(ppcImlGenContext.GetSegmentIndex(currentWriteSegment) + 1, numBranchSegments - 1 + numCaseSegments + 1); - IMLSegment** extraBranchSegments = segments.data(); - IMLSegment** caseSegments = segments.data() + numBranchSegments - 1; - IMLSegment* mergeSegment = segments[numBranchSegments - 1 + numCaseSegments]; - - // move links to the merge segment - mergeSegment->SetLinkBranchTaken(currentWriteSegment->GetBranchTaken()); - mergeSegment->SetLinkBranchNotTaken(currentWriteSegment->GetBranchNotTaken()); - currentWriteSegment->SetLinkBranchTaken(nullptr); - currentWriteSegment->SetLinkBranchNotTaken(nullptr); - - for (sint32 i=0; imappedRegister[i] == PPCREC_NAME_NONE ) { - cemu_assert_debug(i < numCaseSegments); - seg->SetLinkBranchTaken(caseSegments[i]); - seg->SetLinkBranchNotTaken(GetBranchSegment(i + 1)); - seg->AppendInstruction()->make_compare_s32(compareReg, compareValues[i], tmpBoolReg, IMLCondition::EQ); - seg->AppendInstruction()->make_conditional_jump(tmpBoolReg, true); - } - else - { - cemu_assert_debug(defaultCaseIndex < numCaseSegments); - seg->SetLinkBranchTaken(caseSegments[defaultCaseIndex]); - seg->AppendInstruction()->make_jump(); + ppcImlGenContext->mappedRegister[i] = mappedName; + return i; } } - // link case segments - for (sint32 i=0; iSetLinkBranchTaken(mergeSegment); - // -> Jumps are added after the instructions - } - else - { - seg->SetLinkBranchTaken(mergeSegment); - } - } - ppcImlGenContext.currentOutputSegment = mergeSegment; - basicBlockInfo.appendSegment = mergeSegment; -} - -IMLReg PPCRecompilerImlGen_LookupReg(ppcImlGenContext_t* ppcImlGenContext, IMLName mappedName, IMLRegFormat regFormat) -{ - auto it = ppcImlGenContext->mappedRegs.find(mappedName); - if (it != ppcImlGenContext->mappedRegs.end()) - return it->second; - // create new reg entry - IMLRegFormat baseFormat; - if (regFormat == IMLRegFormat::F64) - baseFormat = IMLRegFormat::F64; - else if (regFormat == IMLRegFormat::I32) - baseFormat = IMLRegFormat::I64; - else - { - cemu_assert_suspicious(); - } - IMLRegID newRegId = ppcImlGenContext->mappedRegs.size(); - IMLReg newReg(baseFormat, regFormat, 0, newRegId); - ppcImlGenContext->mappedRegs.try_emplace(mappedName, newReg); - return newReg; -} - -IMLName PPCRecompilerImlGen_GetRegName(ppcImlGenContext_t* ppcImlGenContext, IMLReg reg) -{ - for (auto& it : ppcImlGenContext->mappedRegs) - { - if (it.second.GetRegID() == reg.GetRegID()) - return it.first; - } - cemu_assert(false); return 0; } +uint32 PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) +{ + for(uint32 i=0; i< PPC_REC_MAX_VIRTUAL_GPR; i++) + { + if( ppcImlGenContext->mappedRegister[i] == mappedName ) + { + return i; + } + } + return PPC_REC_INVALID_REGISTER; +} + uint32 PPCRecompilerImlGen_getAndLockFreeTemporaryFPR(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) { - DEBUG_BREAK; - //if( mappedName == PPCREC_NAME_NONE ) - //{ - // debug_printf("PPCRecompilerImlGen_getAndLockFreeTemporaryFPR(): Invalid mappedName parameter\n"); - // return PPC_REC_INVALID_REGISTER; - //} - //for(uint32 i=0; i<255; i++) - //{ - // if( ppcImlGenContext->mappedFPRRegister[i] == PPCREC_NAME_NONE ) - // { - // ppcImlGenContext->mappedFPRRegister[i] = mappedName; - // return i; - // } - //} + if( mappedName == PPCREC_NAME_NONE ) + { + debug_printf("PPCRecompilerImlGen_getAndLockFreeTemporaryFPR(): Invalid mappedName parameter\n"); + return PPC_REC_INVALID_REGISTER; + } + for(uint32 i=0; i<255; i++) + { + if( ppcImlGenContext->mappedFPRRegister[i] == PPCREC_NAME_NONE ) + { + ppcImlGenContext->mappedFPRRegister[i] = mappedName; + return i; + } + } return 0; } uint32 PPCRecompilerImlGen_findFPRRegisterByMappedName(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) { - DEBUG_BREAK; - //for(uint32 i=0; i<255; i++) - //{ - // if( ppcImlGenContext->mappedFPRRegister[i] == mappedName ) - // { - // return i; - // } - //} + for(uint32 i=0; i<255; i++) + { + if( ppcImlGenContext->mappedFPRRegister[i] == mappedName ) + { + return i; + } + } return PPC_REC_INVALID_REGISTER; } -IMLReg PPCRecompilerImlGen_loadRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) +/* + * Loads a PPC gpr into any of the available IML registers + * If loadNew is false, it will reuse already loaded instances + */ +uint32 PPCRecompilerImlGen_loadRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName, bool loadNew) { - return PPCRecompilerImlGen_LookupReg(ppcImlGenContext, mappedName, IMLRegFormat::I32); + if( loadNew == false ) + { + uint32 loadedRegisterIndex = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, mappedName); + if( loadedRegisterIndex != PPC_REC_INVALID_REGISTER ) + return loadedRegisterIndex; + } + uint32 registerIndex = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, mappedName); + return registerIndex; } -IMLReg _GetRegGPR(ppcImlGenContext_t* ppcImlGenContext, uint32 index) +/* + * Reuse already loaded register if present + * Otherwise create new IML register and map the name. The register contents will be undefined + */ +uint32 PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) { - cemu_assert_debug(index < 32); - return PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + index); + uint32 loadedRegisterIndex = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, mappedName); + if( loadedRegisterIndex != PPC_REC_INVALID_REGISTER ) + return loadedRegisterIndex; + uint32 registerIndex = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, mappedName); + return registerIndex; } -IMLReg _GetRegCR(ppcImlGenContext_t* ppcImlGenContext, uint32 index) +/* + * Loads a PPC fpr into any of the available IML FPU registers + * If loadNew is false, it will check first if the fpr is already loaded into any IML register + */ +uint32 PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName, bool loadNew) { - cemu_assert_debug(index < 32); - return PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_CR + index); + if( loadNew == false ) + { + uint32 loadedRegisterIndex = PPCRecompilerImlGen_findFPRRegisterByMappedName(ppcImlGenContext, mappedName); + if( loadedRegisterIndex != PPC_REC_INVALID_REGISTER ) + return loadedRegisterIndex; + } + uint32 registerIndex = PPCRecompilerImlGen_getAndLockFreeTemporaryFPR(ppcImlGenContext, mappedName); + return registerIndex; } -IMLReg _GetRegCR(ppcImlGenContext_t* ppcImlGenContext, uint8 crReg, uint8 crBit) +/* + * Checks if a PPC fpr register is already loaded into any IML register + * If no, it will create a new undefined temporary IML FPU register and map the name (effectively overwriting the old ppc register) + */ +uint32 PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext_t* ppcImlGenContext, uint32 mappedName) { - cemu_assert_debug(crReg < 8); - cemu_assert_debug(crBit < 4); - return _GetRegCR(ppcImlGenContext, (crReg * 4) + crBit); + uint32 loadedRegisterIndex = PPCRecompilerImlGen_findFPRRegisterByMappedName(ppcImlGenContext, mappedName); + if( loadedRegisterIndex != PPC_REC_INVALID_REGISTER ) + return loadedRegisterIndex; + uint32 registerIndex = PPCRecompilerImlGen_getAndLockFreeTemporaryFPR(ppcImlGenContext, mappedName); + return registerIndex; } -IMLReg _GetRegTemporary(ppcImlGenContext_t* ppcImlGenContext, uint32 index) +void PPCRecompilerImlGen_TW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - cemu_assert_debug(index < 4); - return PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + index); +//#ifdef CEMU_DEBUG_ASSERT +// PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, 0); +//#endif + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_LEAVE, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, 0); } -// get throw-away register -// be careful to not collide with other temporary register -IMLReg _GetRegTemporaryS8(ppcImlGenContext_t* ppcImlGenContext, uint32 index) +bool PPCRecompilerImlGen_MTSPR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - cemu_assert_debug(index < 4); - return PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + index); + uint32 rD, spr1, spr2, spr; + PPC_OPC_TEMPL_XO(opcode, rD, spr1, spr2); + spr = spr1 | (spr2<<5); + if (spr == SPR_CTR || spr == SPR_LR) + { + uint32 gprReg = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0 + rD); + if (gprReg == PPC_REC_INVALID_REGISTER) + gprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rD); + uint32 sprReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, sprReg, gprReg); + } + else if (spr >= SPR_UGQR0 && spr <= SPR_UGQR7) + { + uint32 gprReg = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0 + rD); + if (gprReg == PPC_REC_INVALID_REGISTER) + gprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rD); + uint32 sprReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, sprReg, gprReg); + ppcImlGenContext->tracking.modifiesGQR[spr - SPR_UGQR0] = true; + } + else + return false; + return true; +} + +bool PPCRecompilerImlGen_MFSPR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 rD, spr1, spr2, spr; + PPC_OPC_TEMPL_XO(opcode, rD, spr1, spr2); + spr = spr1 | (spr2<<5); + if (spr == SPR_LR || spr == SPR_CTR) + { + uint32 sprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); + uint32 gprReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0 + rD); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprReg, sprReg); + } + else if (spr >= SPR_UGQR0 && spr <= SPR_UGQR7) + { + uint32 sprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); + uint32 gprReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0 + rD); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprReg, sprReg); + } + else + return false; + return true; +} + +bool PPCRecompilerImlGen_MFTB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 rD, spr1, spr2, spr; + PPC_OPC_TEMPL_XO(opcode, rD, spr1, spr2); + spr = spr1 | (spr2<<5); + + if (spr == 268 || spr == 269) + { + // TBL / TBU + uint32 param2 = spr | (rD << 16); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_MFTB, ppcImlGenContext->ppcAddressOfCurrentInstruction, param2, 0); + return true; + } + return false; +} + +bool PPCRecompilerImlGen_MFCR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rD, rA, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + uint32 gprReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0 + rD); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_MFCR, gprReg, 0, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + return true; +} + +bool PPCRecompilerImlGen_MTCRF(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 rS; + uint32 crMask; + PPC_OPC_TEMPL_XFX(opcode, rS, crMask); + uint32 gprReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0 + rS); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_MTCRF, gprReg, crMask, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + return true; +} + +void PPCRecompilerImlGen_CMP(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 cr; + int rA, rB; + PPC_OPC_TEMPL_X(opcode, cr, rA, rB); + cr >>= 2; + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_COMPARE_SIGNED, gprRegisterA, gprRegisterB, cr, PPCREC_CR_MODE_COMPARE_SIGNED); +} + +void PPCRecompilerImlGen_CMPL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 cr; + int rA, rB; + PPC_OPC_TEMPL_X(opcode, cr, rA, rB); + cr >>= 2; + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_COMPARE_UNSIGNED, gprRegisterA, gprRegisterB, cr, PPCREC_CR_MODE_COMPARE_UNSIGNED); +} + +void PPCRecompilerImlGen_CMPI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 cr; + int rA; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, cr, rA, imm); + cr >>= 2; + sint32 b = imm; + // load gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_COMPARE_SIGNED, gprRegister, b, 0, false, false, cr, PPCREC_CR_MODE_COMPARE_SIGNED); +} + +void PPCRecompilerImlGen_CMPLI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 cr; + int rA; + uint32 imm; + PPC_OPC_TEMPL_D_UImm(opcode, cr, rA, imm); + cr >>= 2; + uint32 b = imm; + // load gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_COMPARE_UNSIGNED, gprRegister, (sint32)b, 0, false, false, cr, PPCREC_CR_MODE_COMPARE_UNSIGNED); } bool PPCRecompiler_canInlineFunction(MPTR functionPtr, sint32* functionInstructionCount) { for (sint32 i = 0; i < 6; i++) { - uint32 opcode = memory_readU32(functionPtr + i * 4); + uint32 opcode = memory_readU32(functionPtr+i*4); switch ((opcode >> 26)) { case 14: // ADDI @@ -393,220 +611,18 @@ void PPCRecompiler_generateInlinedCode(ppcImlGenContext_t* ppcImlGenContext, uin { for (sint32 i = 0; i < instructionCount; i++) { - ppcImlGenContext->ppcAddressOfCurrentInstruction = startAddress + i * 4; + ppcImlGenContext->ppcAddressOfCurrentInstruction = startAddress + i*4; ppcImlGenContext->cyclesSinceLastBranch++; if (PPCRecompiler_decodePPCInstruction(ppcImlGenContext)) { - cemu_assert_suspicious(); + assert_dbg(); } } // add range - cemu_assert_unimplemented(); - //ppcRecRange_t recRange; - //recRange.ppcAddress = startAddress; - //recRange.ppcSize = instructionCount*4 + 4; // + 4 because we have to include the BLR - //ppcImlGenContext->functionRef->list_ranges.push_back(recRange); -} - -// for handling RC bit of many instructions -void PPCImlGen_UpdateCR0(ppcImlGenContext_t* ppcImlGenContext, IMLReg regR) -{ - IMLReg crBitRegLT = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT::CR_BIT_INDEX_LT); - IMLReg crBitRegGT = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT::CR_BIT_INDEX_GT); - IMLReg crBitRegEQ = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT::CR_BIT_INDEX_EQ); - // todo - SO bit - - ppcImlGenContext->emitInst().make_compare_s32(regR, 0, crBitRegLT, IMLCondition::SIGNED_LT); - ppcImlGenContext->emitInst().make_compare_s32(regR, 0, crBitRegGT, IMLCondition::SIGNED_GT); - ppcImlGenContext->emitInst().make_compare_s32(regR, 0, crBitRegEQ, IMLCondition::EQ); - - //ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, crBitRegSO, 0); // todo - copy from XER - - //ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, registerR, registerR, 0, PPCREC_CR_MODE_LOGICAL); -} - -void PPCRecompilerImlGen_TW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - // split before and after to make sure the macro is in an isolated segment that we can make enterable - PPCIMLGen_CreateSplitSegmentAtEnd(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock); - ppcImlGenContext->currentOutputSegment->SetEnterable(ppcImlGenContext->ppcAddressOfCurrentInstruction); - PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext)->make_macro(PPCREC_IML_MACRO_LEAVE, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, 0, IMLREG_INVALID); - IMLSegment* middleSeg = PPCIMLGen_CreateSplitSegmentAtEnd(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock); - middleSeg->SetLinkBranchTaken(nullptr); - middleSeg->SetLinkBranchNotTaken(nullptr); -} - -bool PPCRecompilerImlGen_MTSPR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - uint32 rD, spr1, spr2, spr; - PPC_OPC_TEMPL_XO(opcode, rD, spr1, spr2); - spr = spr1 | (spr2<<5); - IMLReg gprReg = _GetRegGPR(ppcImlGenContext, rD); - if (spr == SPR_CTR || spr == SPR_LR) - { - IMLReg sprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, sprReg, gprReg); - } - else if (spr >= SPR_UGQR0 && spr <= SPR_UGQR7) - { - IMLReg sprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, sprReg, gprReg); - ppcImlGenContext->tracking.modifiesGQR[spr - SPR_UGQR0] = true; - } - else - return false; - return true; -} - -bool PPCRecompilerImlGen_MFSPR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - uint32 rD, spr1, spr2, spr; - PPC_OPC_TEMPL_XO(opcode, rD, spr1, spr2); - spr = spr1 | (spr2<<5); - IMLReg gprReg = _GetRegGPR(ppcImlGenContext, rD); - if (spr == SPR_LR || spr == SPR_CTR) - { - IMLReg sprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, gprReg, sprReg); - } - else if (spr >= SPR_UGQR0 && spr <= SPR_UGQR7) - { - IMLReg sprReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + spr); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, gprReg, sprReg); - } - else - return false; - return true; -} - -ATTR_MS_ABI uint32 PPCRecompiler_GetTBL() -{ - return (uint32)coreinit::OSGetSystemTime(); -} - -ATTR_MS_ABI uint32 PPCRecompiler_GetTBU() -{ - return (uint32)(coreinit::OSGetSystemTime() >> 32); -} - -bool PPCRecompilerImlGen_MFTB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - uint32 rD, spr1, spr2, spr; - PPC_OPC_TEMPL_XO(opcode, rD, spr1, spr2); - spr = spr1 | (spr2<<5); - - if( spr == SPR_TBL || spr == SPR_TBU ) - { - IMLReg resultReg = _GetRegGPR(ppcImlGenContext, rD); - ppcImlGenContext->emitInst().make_call_imm(spr == SPR_TBL ? (uintptr_t)PPCRecompiler_GetTBL : (uintptr_t)PPCRecompiler_GetTBU, IMLREG_INVALID, IMLREG_INVALID, IMLREG_INVALID, resultReg); - return true; - } - return false; -} - -void PPCRecompilerImlGen_MCRF(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - uint32 crD, crS, b; - PPC_OPC_TEMPL_X(opcode, crD, crS, b); - cemu_assert_debug((crD&3) == 0); - cemu_assert_debug((crS&3) == 0); - crD >>= 2; - crS >>= 2; - for (sint32 i = 0; i<4; i++) - { - IMLReg regCrSrcBit = _GetRegCR(ppcImlGenContext, crS * 4 + i); - IMLReg regCrDstBit = _GetRegCR(ppcImlGenContext, crD * 4 + i); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regCrDstBit, regCrSrcBit); - } -} - -bool PPCRecompilerImlGen_MFCR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - sint32 rD, rA, rB; - PPC_OPC_TEMPL_X(opcode, rD, rA, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regD, 0); - for (sint32 i = 0; i < 32; i++) - { - IMLReg regCrBit = _GetRegCR(ppcImlGenContext, i); - cemu_assert_debug(regCrBit.GetRegFormat() == IMLRegFormat::I32); // addition is only allowed between same-format regs - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_LEFT_SHIFT, regD, regD, 1); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regD, regD, regCrBit); - } - return true; -} - -bool PPCRecompilerImlGen_MTCRF(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - uint32 rS; - uint32 crMask; - PPC_OPC_TEMPL_XFX(opcode, rS, crMask); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regTmp = _GetRegTemporary(ppcImlGenContext, 0); - uint32 crBitMask = ppc_MTCRFMaskToCRBitMask(crMask); - for (sint32 f = 0; f < 32; f++) - { - if(((crBitMask >> f) & 1) == 0) - continue; - IMLReg regCrBit = _GetRegCR(ppcImlGenContext, f); - cemu_assert_debug(regCrBit.GetRegFormat() == IMLRegFormat::I32); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_U, regTmp, regS, (31-f)); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regCrBit, regTmp, 1); - } - return true; -} - -void PPCRecompilerImlGen_CMP(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isUnsigned) -{ - uint32 cr; - int rA, rB; - PPC_OPC_TEMPL_X(opcode, cr, rA, rB); - cr >>= 2; - - IMLReg gprRegisterA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg gprRegisterB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regXerSO = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_SO); - - IMLReg crBitRegLT = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_LT); - IMLReg crBitRegGT = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_GT); - IMLReg crBitRegEQ = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_EQ); - IMLReg crBitRegSO = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_SO); - - ppcImlGenContext->emitInst().make_compare(gprRegisterA, gprRegisterB, crBitRegLT, isUnsigned ? IMLCondition::UNSIGNED_LT : IMLCondition::SIGNED_LT); - ppcImlGenContext->emitInst().make_compare(gprRegisterA, gprRegisterB, crBitRegGT, isUnsigned ? IMLCondition::UNSIGNED_GT : IMLCondition::SIGNED_GT); - ppcImlGenContext->emitInst().make_compare(gprRegisterA, gprRegisterB, crBitRegEQ, IMLCondition::EQ); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, crBitRegSO, regXerSO); -} - -bool PPCRecompilerImlGen_CMPI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isUnsigned) -{ - uint32 cr; - int rA; - uint32 imm; - if (isUnsigned) - { - PPC_OPC_TEMPL_D_UImm(opcode, cr, rA, imm); - } - else - { - PPC_OPC_TEMPL_D_SImm(opcode, cr, rA, imm); - } - cr >>= 2; - - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regXerSO = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_SO); - - IMLReg crBitRegLT = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_LT); - IMLReg crBitRegGT = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_GT); - IMLReg crBitRegEQ = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_EQ); - IMLReg crBitRegSO = _GetRegCR(ppcImlGenContext, cr, Espresso::CR_BIT::CR_BIT_INDEX_SO); - - ppcImlGenContext->emitInst().make_compare_s32(regA, (sint32)imm, crBitRegLT, isUnsigned ? IMLCondition::UNSIGNED_LT : IMLCondition::SIGNED_LT); - ppcImlGenContext->emitInst().make_compare_s32(regA, (sint32)imm, crBitRegGT, isUnsigned ? IMLCondition::UNSIGNED_GT : IMLCondition::SIGNED_GT); - ppcImlGenContext->emitInst().make_compare_s32(regA, (sint32)imm, crBitRegEQ, IMLCondition::EQ); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, crBitRegSO, regXerSO); - - return true; + ppcRecRange_t recRange; + recRange.ppcAddress = startAddress; + recRange.ppcSize = instructionCount*4 + 4; // + 4 because we have to include the BLR + ppcImlGenContext->functionRef->list_ranges.push_back(recRange); } bool PPCRecompilerImlGen_B(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) @@ -621,26 +637,43 @@ bool PPCRecompilerImlGen_B(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) if( opcode&PPC_OPC_LK ) { // function call - ppcImlGenContext->emitInst().make_macro(PPCREC_IML_MACRO_BL, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch, IMLREG_INVALID); + // check if function can be inlined + sint32 inlineFuncInstructionCount = 0; + if (PPCRecompiler_canInlineFunction(jumpAddressDest, &inlineFuncInstructionCount)) + { + // generate NOP iml instead of BL macro (this assures that segment PPC range remains intact) + PPCRecompilerImlGen_generateNewInstruction_noOp(ppcImlGenContext, NULL); + //cemuLog_log(LogType::Force, "Inline func 0x{:08x} at {:08x}", jumpAddressDest, ppcImlGenContext->ppcAddressOfCurrentInstruction); + uint32* prevInstructionPtr = ppcImlGenContext->currentInstruction; + ppcImlGenContext->currentInstruction = (uint32*)memory_getPointerFromVirtualOffset(jumpAddressDest); + PPCRecompiler_generateInlinedCode(ppcImlGenContext, jumpAddressDest, inlineFuncInstructionCount); + ppcImlGenContext->currentInstruction = prevInstructionPtr; + return true; + } + // generate funtion call instructions + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BL, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch); + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4); return true; } // is jump destination within recompiled function? - if (ppcImlGenContext->boundaryTracker->ContainsAddress(jumpAddressDest)) - ppcImlGenContext->emitInst().make_jump(); + if( jumpAddressDest >= ppcImlGenContext->functionRef->ppcAddress && jumpAddressDest < (ppcImlGenContext->functionRef->ppcAddress + ppcImlGenContext->functionRef->ppcSize) ) + { + // generate instruction + PPCRecompilerImlGen_generateNewInstruction_jump(ppcImlGenContext, NULL, jumpAddressDest); + } else - ppcImlGenContext->emitInst().make_macro(PPCREC_IML_MACRO_B_FAR, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch, IMLREG_INVALID); + { + // todo: Inline this jump destination if possible (in many cases it's a bunch of GPR/FPR store instructions + BLR) + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_B_FAR, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch); + } return true; } bool PPCRecompilerImlGen_BC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - PPCIMLGen_AssertIfNotLastSegmentInstruction(*ppcImlGenContext); - uint32 BO, BI, BD; PPC_OPC_TEMPL_B(opcode, BO, BI, BD); - Espresso::BOField boField(BO); - uint32 crRegister = BI/4; uint32 crBit = BI%4; uint32 jumpCondition = 0; @@ -649,10 +682,6 @@ bool PPCRecompilerImlGen_BC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) bool decrementerMustBeZero = (BO&2)!=0; // bit set -> branch if CTR = 0, bit not set -> branch if CTR != 0 bool ignoreCondition = (BO&16)!=0; - IMLReg regCRBit; - if (!ignoreCondition) - regCRBit = _GetRegCR(ppcImlGenContext, crRegister, crBit); - uint32 jumpAddressDest = BD; if( (opcode&PPC_OPC_AA) == 0 ) { @@ -661,15 +690,37 @@ bool PPCRecompilerImlGen_BC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) if( opcode&PPC_OPC_LK ) { - if (useDecrementer) - return false; // conditional function calls are not supported if( ignoreCondition == false ) { - PPCBasicBlockInfo* currentBasicBlock = ppcImlGenContext->currentBasicBlock; - IMLSegment* blSeg = PPCIMLGen_CreateNewSegmentAsBranchTarget(*ppcImlGenContext, *currentBasicBlock); - ppcImlGenContext->emitInst().make_conditional_jump(regCRBit, conditionMustBeTrue); - blSeg->AppendInstruction()->make_macro(PPCREC_IML_MACRO_BL, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch, IMLREG_INVALID); + // generate jump condition + if( conditionMustBeTrue ) + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_GE; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_LE; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_NE; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_NSUMMARYOVERFLOW; + } + else + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_L; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_G; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_E; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_SUMMARYOVERFLOW; + } + // generate instruction + //PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, 0); + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4, jumpCondition, crRegister, crBit, !conditionMustBeTrue); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BL, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch); + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4); return true; } return false; @@ -679,11 +730,12 @@ bool PPCRecompilerImlGen_BC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { if( ignoreCondition == false ) return false; // not supported for the moment - IMLReg ctrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0+SPR_CTR); - IMLReg tmpBoolReg = _GetRegTemporaryS8(ppcImlGenContext, 1); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_SUB, ctrRegister, ctrRegister, 1); - ppcImlGenContext->emitInst().make_compare_s32(ctrRegister, 0, tmpBoolReg, decrementerMustBeZero ? IMLCondition::EQ : IMLCondition::NEQ); - ppcImlGenContext->emitInst().make_conditional_jump(tmpBoolReg, true); + uint32 ctrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0+SPR_CTR, false); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_SUB, ctrRegister, 1, 0, false, false, PPCREC_CR_REG_TEMP, PPCREC_CR_MODE_ARITHMETIC); + if( decrementerMustBeZero ) + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, jumpAddressDest, PPCREC_JUMP_CONDITION_E, PPCREC_CR_REG_TEMP, 0, false); + else + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, jumpAddressDest, PPCREC_JUMP_CONDITION_NE, PPCREC_CR_REG_TEMP, 0, false); return true; } else @@ -691,90 +743,219 @@ bool PPCRecompilerImlGen_BC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) if( ignoreCondition ) { // branch always, no condition and no decrementer - // not supported - return false; + debugBreakpoint(); + crRegister = PPC_REC_INVALID_REGISTER; // not necessary but lets optimizer know we dont care for cr register on this instruction } else { - if (ppcImlGenContext->boundaryTracker->ContainsAddress(jumpAddressDest)) + // generate jump condition + if( conditionMustBeTrue ) + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_GE; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_LE; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_NE; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_NSUMMARYOVERFLOW; + } + else + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_L; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_G; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_E; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_SUMMARYOVERFLOW; + } + + if (jumpAddressDest >= ppcImlGenContext->functionRef->ppcAddress && jumpAddressDest < (ppcImlGenContext->functionRef->ppcAddress + ppcImlGenContext->functionRef->ppcSize)) { // near jump - ppcImlGenContext->emitInst().make_conditional_jump(regCRBit, conditionMustBeTrue); + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, jumpAddressDest, jumpCondition, crRegister, crBit, conditionMustBeTrue); } else { // far jump - debug_printf("PPCRecompilerImlGen_BC(): Far jump not supported yet"); - return false; + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction + 4, jumpCondition, crRegister, crBit, !conditionMustBeTrue); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_B_FAR, ppcImlGenContext->ppcAddressOfCurrentInstruction, jumpAddressDest, ppcImlGenContext->cyclesSinceLastBranch); + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction + 4); } } } return true; } -// BCCTR or BCLR -bool PPCRecompilerImlGen_BCSPR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 sprReg) +bool PPCRecompilerImlGen_BCLR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - PPCIMLGen_AssertIfNotLastSegmentInstruction(*ppcImlGenContext); + uint32 BO, BI, BD; + PPC_OPC_TEMPL_XL(opcode, BO, BI, BD); - Espresso::BOField BO; - uint32 BI; - bool LK; - Espresso::decodeOp_BCSPR(opcode, BO, BI, LK); uint32 crRegister = BI/4; uint32 crBit = BI%4; - IMLReg regCRBit; - if (!BO.conditionIgnore()) - regCRBit = _GetRegCR(ppcImlGenContext, crRegister, crBit); + uint32 jumpCondition = 0; - IMLReg branchDestReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + sprReg); - if (LK) + bool conditionMustBeTrue = (BO&8)!=0; + bool useDecrementer = (BO&4)==0; // bit not set -> decrement + bool decrementerMustBeZero = (BO&2)!=0; // bit set -> branch if CTR = 0, bit not set -> branch if CTR != 0 + bool ignoreCondition = (BO&16)!=0; + bool saveLR = (opcode&PPC_OPC_LK)!=0; + // since we skip this instruction if the condition is true, we need to invert the logic + bool invertedConditionMustBeTrue = !conditionMustBeTrue; + if( useDecrementer ) { - if (sprReg == SPR_LR) - { - // if the branch target is LR, then preserve it in a temporary - cemu_assert_suspicious(); // this case needs testing - IMLReg tmpRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, tmpRegister, branchDestReg); - branchDestReg = tmpRegister; - } - IMLReg registerLR = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_LR); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, registerLR, ppcImlGenContext->ppcAddressOfCurrentInstruction + 4); - } - - if (!BO.decrementerIgnore()) - { - cemu_assert_unimplemented(); - return false; - } - else if (!BO.conditionIgnore()) - { - // no decrementer but CR check - cemu_assert_debug(ppcImlGenContext->currentBasicBlock->hasContinuedFlow); - cemu_assert_debug(!ppcImlGenContext->currentBasicBlock->hasBranchTarget); - PPCBasicBlockInfo* currentBasicBlock = ppcImlGenContext->currentBasicBlock; - IMLSegment* bctrSeg = PPCIMLGen_CreateNewSegmentAsBranchTarget(*ppcImlGenContext, *currentBasicBlock); - ppcImlGenContext->emitInst().make_conditional_jump(regCRBit, !BO.conditionInverted()); - bctrSeg->AppendInstruction()->make_macro(PPCREC_IML_MACRO_B_TO_REG, 0, 0, 0, branchDestReg); + cemu_assert_debug(false); + return false; // unsupported } else { - // branch always, no condition and no decrementer check - cemu_assert_debug(!ppcImlGenContext->currentBasicBlock->hasContinuedFlow); - cemu_assert_debug(!ppcImlGenContext->currentBasicBlock->hasBranchTarget); - ppcImlGenContext->emitInst().make_macro(PPCREC_IML_MACRO_B_TO_REG, 0, 0, 0, branchDestReg); + if( ignoreCondition ) + { + // store LR + if( saveLR ) + { + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BLRL, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, ppcImlGenContext->cyclesSinceLastBranch); + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4); + } + else + { + // branch always, no condition and no decrementer + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BLR, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, ppcImlGenContext->cyclesSinceLastBranch); + } + } + else + { + // store LR + if( saveLR ) + { + uint32 registerLR = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_SPR0+SPR_LR); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, registerLR, (ppcImlGenContext->ppcAddressOfCurrentInstruction+4)&0x7FFFFFFF, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + } + // generate jump condition + if( invertedConditionMustBeTrue ) + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_L; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_G; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_E; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_SUMMARYOVERFLOW; + } + else + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_GE; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_LE; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_NE; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_NSUMMARYOVERFLOW; + } + // jump if BCLR condition NOT met (jump to jumpmark of next instruction, essentially skipping current instruction) + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4, jumpCondition, crRegister, crBit, invertedConditionMustBeTrue); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BLR, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, ppcImlGenContext->cyclesSinceLastBranch); + } + } + return true; +} + +bool PPCRecompilerImlGen_BCCTR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + uint32 BO, BI, BD; + PPC_OPC_TEMPL_XL(opcode, BO, BI, BD); + + uint32 crRegister = BI/4; + uint32 crBit = BI%4; + + uint32 jumpCondition = 0; + + bool conditionMustBeTrue = (BO&8)!=0; + bool useDecrementer = (BO&4)==0; // bit not set -> decrement + bool decrementerMustBeZero = (BO&2)!=0; // bit set -> branch if CTR = 0, bit not set -> branch if CTR != 0 + bool ignoreCondition = (BO&16)!=0; + bool saveLR = (opcode&PPC_OPC_LK)!=0; + // since we skip this instruction if the condition is true, we need to invert the logic + bool invertedConditionMustBeTrue = !conditionMustBeTrue; + if( useDecrementer ) + { + assert_dbg(); + // if added, dont forget inverted logic + debug_printf("Rec: BCLR unsupported decrementer\n"); + return false; // unsupported + } + else + { + if( ignoreCondition ) + { + // store LR + if( saveLR ) + { + uint32 registerLR = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_SPR0+SPR_LR); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, registerLR, (ppcImlGenContext->ppcAddressOfCurrentInstruction+4)&0x7FFFFFFF, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BCTRL, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, ppcImlGenContext->cyclesSinceLastBranch); + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4); + } + else + { + // branch always, no condition and no decrementer + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BCTR, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, ppcImlGenContext->cyclesSinceLastBranch); + } + } + else + { + // store LR + if( saveLR ) + { + uint32 registerLR = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_SPR0+SPR_LR); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, registerLR, (ppcImlGenContext->ppcAddressOfCurrentInstruction+4)&0x7FFFFFFF, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + } + // generate jump condition + if( invertedConditionMustBeTrue ) + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_L; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_G; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_E; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_SUMMARYOVERFLOW; + } + else + { + if( crBit == 0 ) + jumpCondition = PPCREC_JUMP_CONDITION_GE; + else if( crBit == 1 ) + jumpCondition = PPCREC_JUMP_CONDITION_LE; + else if( crBit == 2 ) + jumpCondition = PPCREC_JUMP_CONDITION_NE; + else if( crBit == 3 ) + jumpCondition = PPCREC_JUMP_CONDITION_NSUMMARYOVERFLOW; + } + // jump if BCLR condition NOT met (jump to jumpmark of next instruction, essentially skipping current instruction) + PPCRecompilerImlGen_generateNewInstruction_conditionalJump(ppcImlGenContext, ppcImlGenContext->ppcAddressOfCurrentInstruction+4, jumpCondition, crRegister, crBit, invertedConditionMustBeTrue); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_BCTR, ppcImlGenContext->ppcAddressOfCurrentInstruction, 0, ppcImlGenContext->cyclesSinceLastBranch); + } } return true; } bool PPCRecompilerImlGen_ISYNC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { + // does not need to be translated return true; } bool PPCRecompilerImlGen_SYNC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { + // does not need to be translated return true; } @@ -782,12 +963,102 @@ bool PPCRecompilerImlGen_ADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regD, regA, regB); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + //hCPU->gpr[rD] = (int)hCPU->gpr[rA] + (int)hCPU->gpr[rB]; + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ADD, registerRD, registerRA, registerRB, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ADD, registerRD, registerRA, registerRB); + } + return true; +} + +bool PPCRecompilerImlGen_ADDC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rD, rA, rB; + PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); + //hCPU->gpr[rD] = (int)hCPU->gpr[rA] + (int)hCPU->gpr[rB]; -> Update carry + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ADD_UPDATE_CARRY, registerRD, registerRA, registerRB, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ADD_UPDATE_CARRY, registerRD, registerRA, registerRB); + return true; +} + +bool PPCRecompilerImlGen_ADDE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rD, rA, rB; + PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); + // hCPU->gpr[rD] = hCPU->gpr[rA] + hCPU->gpr[rB] + ca; + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY, registerRD, registerRB, registerRA, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY, registerRD, registerRB, registerRA); + return true; +} + +bool PPCRecompilerImlGen_ADDZE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rD, rA, rB; + PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); + PPC_ASSERT(rB == 0); + //uint32 a = hCPU->gpr[rA]; + //uint32 ca = hCPU->xer_ca; + //hCPU->gpr[rD] = a + ca; + + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + // move rA to rD + if( registerRA != registerRD ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, registerRD, registerRA); + } + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD_CARRY, registerRD, registerRD, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD_CARRY, registerRD, registerRD); + } + return true; +} + +bool PPCRecompilerImlGen_ADDME(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rD, rA, rB; + PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); + PPC_ASSERT(rB == 0); + //uint32 a = hCPU->gpr[rA]; + //uint32 ca = hCPU->xer_ca; + //hCPU->gpr[rD] = a + ca + -1; + + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + // move rA to rD + if( registerRA != registerRD ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, registerRD, registerRA); + } + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD_CARRY_ME, registerRD, registerRD, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD_CARRY_ME, registerRD, registerRD); + } return true; } @@ -796,16 +1067,22 @@ bool PPCRecompilerImlGen_ADDI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - if (rA != 0) + //hCPU->gpr[rD] = (rA ? (int)hCPU->gpr[rA] : 0) + (int)imm; + if( rA != 0 ) { - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, regD, regA, imm); + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if rD is already loaded, else use new temporary register + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, registerRD, registerRA, imm); } else { - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regD, imm); + // rA not used, instruction is value assignment + // rD = imm + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, registerRD, imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); } + // never updates any cr return true; } @@ -814,88 +1091,49 @@ bool PPCRecompilerImlGen_ADDIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opco int rD, rA; uint32 imm; PPC_OPC_TEMPL_D_Shift16(opcode, rD, rA, imm); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - if (rA != 0) + if( rA != 0 ) { - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, regD, regA, (sint32)imm); + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if rD is already loaded, else use new temporary register + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, registerRD, registerRA, (sint32)imm); } else { - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regD, (sint32)imm); + // rA not used, instruction turns into simple value assignment + // rD = imm + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, registerRD, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); } + // never updates any cr return true; } -bool PPCRecompilerImlGen_ADDC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - // r = a + b -> update carry - sint32 rD, rA, rB; - PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regRA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regRB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regRD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r_r_carry(PPCREC_IML_OP_ADD, regRD, regRA, regRB, regCa); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regRD); - return true; -} - -bool PPCRecompilerImlGen_ADDIC_(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool updateCR0) +bool PPCRecompilerImlGen_ADDIC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r_s32_carry(PPCREC_IML_OP_ADD, regD, regA, (sint32)imm, regCa); - if(updateCR0) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + // rD = rA + imm; + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if rD is already loaded, else use new temporary register + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD_UPDATE_CARRY, registerRD, registerRA, imm); + // never updates any cr return true; } -bool PPCRecompilerImlGen_ADDE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +bool PPCRecompilerImlGen_ADDIC_(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // r = a + b + carry -> update carry - sint32 rD, rA, rB; - PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regRA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regRB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regRD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r_r_carry(PPCREC_IML_OP_ADD_WITH_CARRY, regRD, regRA, regRB, regCa); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regRD); - return true; -} - -bool PPCRecompilerImlGen_ADDZE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - // r = a + carry -> update carry - sint32 rD, rA, rB; - PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regRA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regRD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r_s32_carry(PPCREC_IML_OP_ADD_WITH_CARRY, regRD, regRA, 0, regCa); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regRD); - return true; -} - -bool PPCRecompilerImlGen_ADDME(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - // r = a + 0xFFFFFFFF + carry -> update carry - sint32 rD, rA, rB; - PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regRA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regRD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r_s32_carry(PPCREC_IML_OP_ADD_WITH_CARRY, regRD, regRA, -1, regCa); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regRD); + // this opcode is identical to ADDIC but additionally it updates CR0 + sint32 rD, rA; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + // rD = rA + imm; + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if rD is already loaded, else use new temporary register + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD_UPDATE_CARRY, registerRD, registerRA, imm, 0, PPCREC_CR_MODE_LOGICAL); return true; } @@ -903,79 +1141,74 @@ bool PPCRecompilerImlGen_SUBF(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - // rD = ~rA + rB + 1 - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_SUB, regD, regB, regA); - if ((opcode & PPC_OPC_RC)) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + // hCPU->gpr[rD] = ~hCPU->gpr[rA] + hCPU->gpr[rB] + 1; + // rD = rB - rA + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SUB, registerRD, registerRB, registerRA, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SUB, registerRD, registerRB, registerRA); return true; } bool PPCRecompilerImlGen_SUBFE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // d = ~a + b + ca; sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regTmp, regA); - ppcImlGenContext->emitInst().make_r_r_r_carry(PPCREC_IML_OP_ADD_WITH_CARRY, regD, regTmp, regB, regCa); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + // hCPU->gpr[rD] = ~hCPU->gpr[rA] + hCPU->gpr[rB] + ca; + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY, registerRD, registerRB, registerRA, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY, registerRD, registerRB, registerRA); return true; } bool PPCRecompilerImlGen_SUBFZE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // d = ~a + ca; sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regTmp, regA); - ppcImlGenContext->emitInst().make_r_r_s32_carry(PPCREC_IML_OP_ADD_WITH_CARRY, regD, regTmp, 0, regCa); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + if( rB != 0 ) + debugBreakpoint(); + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY, registerRD, registerRA, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY, registerRD, registerRA); return true; } bool PPCRecompilerImlGen_SUBFC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // d = ~a + b + 1; sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regTmp, regA); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regCa, 1); // set input carry to simulate offset of 1 - ppcImlGenContext->emitInst().make_r_r_r_carry(PPCREC_IML_OP_ADD_WITH_CARRY, regD, regTmp, regB, regCa); - if ((opcode & PPC_OPC_RC)) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + // hCPU->gpr[rD] = ~hCPU->gpr[rA] + hCPU->gpr[rB] + 1; + // rD = rB - rA + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SUBFC, registerRD, registerRA, registerRB); + if (opcode & PPC_OPC_RC) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, registerRD, registerRD, 0, PPCREC_CR_MODE_LOGICAL); return true; } bool PPCRecompilerImlGen_SUBFIC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // d = ~a + imm + 1 sint32 rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regCa = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regTmp, regA); - ppcImlGenContext->emitInst().make_r_r_s32_carry(PPCREC_IML_OP_ADD, regD, regTmp, (sint32)imm + 1, regCa); + //uint32 a = hCPU->gpr[rA]; + //hCPU->gpr[rD] = ~a + imm + 1; + // cr0 is never affected + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_SUBFC, registerRD, registerRA, imm); return true; } @@ -984,9 +1217,10 @@ bool PPCRecompilerImlGen_MULLI(ppcImlGenContext_t* ppcImlGenContext, uint32 opco int rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_MULTIPLY_SIGNED, regD, regA, (sint32)imm); + // mulli instruction does not modify any flags + uint32 registerResult = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); + uint32 registerOperand = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_SIGNED, registerResult, registerOperand, (sint32)imm); return true; } @@ -994,16 +1228,18 @@ bool PPCRecompilerImlGen_MULLW(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); + //hCPU->gpr[rD] = hCPU->gpr[rA] * hCPU->gpr[rB]; + uint32 registerResult = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); + uint32 registerOperand1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerOperand2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); if (opcode & PPC_OPC_OE) { return false; } - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_MULTIPLY_SIGNED, regD, regA, regB); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_SIGNED, registerResult, registerOperand1, registerOperand2, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_SIGNED, registerResult, registerOperand1, registerOperand2); return true; } @@ -1011,12 +1247,14 @@ bool PPCRecompilerImlGen_MULHW(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED, regD, regA, regB); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + //hCPU->gpr[rD] = ((sint64)(sint32)hCPU->gpr[rA] * (sint64)(sint32)hCPU->gpr[rB])>>32; + uint32 registerResult = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); + uint32 registerOperand1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerOperand2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED, registerResult, registerOperand1, registerOperand2, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED, registerResult, registerOperand1, registerOperand2); return true; } @@ -1024,12 +1262,14 @@ bool PPCRecompilerImlGen_MULHWU(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED, regD, regA, regB); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + //hCPU->gpr[rD] = (hCPU->gpr[rA] * hCPU->gpr[rB])>>32; + uint32 registerResult = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); + uint32 registerOperand1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerOperand2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED, registerResult, registerOperand1, registerOperand2, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED, registerResult, registerOperand1, registerOperand2); return true; } @@ -1037,12 +1277,18 @@ bool PPCRecompilerImlGen_DIVW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regR = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_DIVIDE_SIGNED, regR, regA, regB); + // hCPU->gpr[rD] = (sint32)a / (sint32)b; + uint32 registerResult = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); + uint32 registerOperand1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerOperand2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regR); + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_DIVIDE_SIGNED, registerResult, registerOperand1, registerOperand2, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_DIVIDE_SIGNED, registerResult, registerOperand1, registerOperand2); + } return true; } @@ -1050,66 +1296,84 @@ bool PPCRecompilerImlGen_DIVWU(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_DIVIDE_UNSIGNED, regD, regA, regB); + // hCPU->gpr[rD] = (uint32)a / (uint32)b; + uint32 registerResult = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); + uint32 registerOperand1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerOperand2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_DIVIDE_UNSIGNED, registerResult, registerOperand1, registerOperand2, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_DIVIDE_UNSIGNED, registerResult, registerOperand1, registerOperand2); + } return true; } bool PPCRecompilerImlGen_RLWINM(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - sint32 rS, rA, SH, MB, ME; + int rS, rA, SH, MB, ME; PPC_OPC_TEMPL_M(opcode, rS, rA, SH, MB, ME); uint32 mask = ppc_mask(MB, ME); + //uint32 v = ppc_word_rotl(hCPU->gpr[rS], SH); + //hCPU->gpr[rA] = v & mask; - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - if( ME == (31-SH) && MB == 0 ) + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // handle special forms of RLWINM + if( SH == 0 && SH == (ME-SH) && MB == 0 ) + { + // CLRRWI + // todo + } + else if( ME == (31-SH) && MB == 0 ) { // SLWI - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_LEFT_SHIFT, regA, regS, SH); + if(opcode&PPC_OPC_RC) + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_LEFT_SHIFT, registerRA, registerRS, SH, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_LEFT_SHIFT, registerRA, registerRS, SH); + return true; } else if( SH == (32-MB) && ME == 31 ) { // SRWI - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_U, regA, regS, MB); + if(opcode&PPC_OPC_RC) + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_RIGHT_SHIFT, registerRA, registerRS, MB, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_RIGHT_SHIFT, registerRA, registerRS, MB); + return true; + } + // general handler + if( registerRA != registerRS ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, registerRA, registerRS); + if( SH != 0 ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_LEFT_ROTATE, registerRA, SH, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + if(opcode&PPC_OPC_RC) + { + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_AND, registerRA, (sint32)mask, 0, false, false, 0, PPCREC_CR_MODE_LOGICAL); } else { - // general handler - if (rA != rS) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regA, regS); - if (SH != 0) - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_LEFT_ROTATE, regA, SH); - if (mask != 0xFFFFFFFF) - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regA, regA, (sint32)mask); + if( mask != 0xFFFFFFFF ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_AND, registerRA, (sint32)mask, 0, false, false, PPC_REC_INVALID_REGISTER, 0); } - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); return true; } bool PPCRecompilerImlGen_RLWIMI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - sint32 rS, rA, SH, MB, ME; + int rS, rA, SH, MB, ME; PPC_OPC_TEMPL_M(opcode, rS, rA, SH, MB, ME); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regR = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regTmp = _GetRegTemporary(ppcImlGenContext, 0); - uint32 mask = ppc_mask(MB, ME); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regTmp, regS); - if (SH) - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_LEFT_ROTATE, regTmp, SH); - if (mask != 0) - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regR, regR, (sint32)~mask); - if (mask != 0xFFFFFFFF) - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regTmp, regTmp, (sint32)mask); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_OR, regR, regR, regTmp); + + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // pack RLWIMI parameters into single integer + uint32 vImm = MB|(ME<<8)|(SH<<16); + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_RLWIMI, registerRA, registerRS, (sint32)vImm, PPC_REC_INVALID_REGISTER, 0); if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regR); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, registerRA, registerRA, 0, PPCREC_CR_MODE_LOGICAL); return true; } @@ -1117,61 +1381,61 @@ bool PPCRecompilerImlGen_RLWNM(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 rS, rA, rB, MB, ME; PPC_OPC_TEMPL_M(opcode, rS, rA, rB, MB, ME); + // uint32 v = ppc_word_rotl(hCPU->gpr[rS], hCPU->gpr[rB]); uint32 mask = ppc_mask(MB, ME); - IMLReg regS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); - IMLReg regB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); - IMLReg regA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_LEFT_ROTATE, regA, regS, regB); - if( mask != 0xFFFFFFFF ) - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regA, regA, (sint32)mask); + // uint32 v = ppc_word_rotl(hCPU->gpr[rS], hCPU->gpr[rB]); + // hCPU->gpr[rA] = v & mask; + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_LEFT_ROTATE, registerRA, registerRS, registerRB); if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + { + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_AND, registerRA, (sint32)mask, 32, false, false, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + if( mask != 0xFFFFFFFF ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_AND, registerRA, (sint32)mask, 32, false, false, PPC_REC_INVALID_REGISTER, 0); + } return true; } bool PPCRecompilerImlGen_SRAW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // unlike SRAWI, for SRAW the shift range is 0-63 (masked to 6 bits) - // but only shifts up to register bitwidth minus one are well defined in IML so this requires special handling for shifts >= 32 sint32 rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); - IMLReg regB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); - IMLReg regA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - IMLReg regCarry = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_XER_CA); + //uint32 SH = hCPU->gpr[rB] & 0x3f; + //hCPU->gpr[rA] = hCPU->gpr[rS]; + //hCPU->xer_ca = 0; + //if (hCPU->gpr[rA] & 0x80000000) { + // uint32 ca = 0; + // for (uint32 i=0; i < SH; i++) { + // if (hCPU->gpr[rA] & 1) ca = 1; + // hCPU->gpr[rA] >>= 1; + // hCPU->gpr[rA] |= 0x80000000; + // } + // if (ca) hCPU->xer_ca = 1; + //} else { + // if (SH > 31) { + // hCPU->gpr[rA] = 0; + // } else { + // hCPU->gpr[rA] >>= SH; + // } + //} + //if (Opcode & PPC_OPC_RC) { + // // update cr0 flags + // ppc_update_cr0(hCPU, hCPU->gpr[rA]); + //} - IMLReg regTmpShiftAmount = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - IMLReg regTmpCondBool = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 1); - IMLReg regTmp1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 2); - IMLReg regTmp2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 3); - - // load masked shift factor into temporary register - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regTmpShiftAmount, regB, 0x3F); - ppcImlGenContext->emitInst().make_compare_s32(regTmpShiftAmount, 31, regTmpCondBool, IMLCondition::UNSIGNED_GT); - ppcImlGenContext->emitInst().make_conditional_jump(regTmpCondBool, true); - - PPCIMLGen_CreateSegmentBranchedPath(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock, - [&](ppcImlGenContext_t& genCtx) - { - /* branch taken, shift size 32 or above */ - genCtx.emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_S, regA, regS, 31); // shift the sign bit into all the bits - genCtx.emitInst().make_compare_s32(regA, 0, regCarry, IMLCondition::NEQ); - }, - [&](ppcImlGenContext_t& genCtx) - { - /* branch not taken, shift size below 32 */ - genCtx.emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_S, regTmp1, regS, 31); // signMask = input >> 31 (arithmetic shift) - genCtx.emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regTmp2, 1); // shiftMask = ((1<emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_S, regTmp, regS, 31); // signMask = input >> 31 (arithmetic shift) - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_AND, regTmp, regTmp, regS); // testValue = input & signMask & ((1<emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regTmp, regTmp, ((1 << SH) - 1)); - ppcImlGenContext->emitInst().make_compare_s32(regTmp, 0, regCarry, IMLCondition::NEQ); // ca = (testValue != 0) - // do the actual shift - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_S, regA, regS, (sint32)SH); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_SRAW, registerRA, registerRS, (sint32)SH, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_SRAW, registerRA, registerRS, (sint32)SH); return true; } @@ -1204,12 +1459,17 @@ bool PPCRecompilerImlGen_SLW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode int rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_SLW, regA, regS, regB); - if ((opcode & PPC_OPC_RC)) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if (opcode & PPC_OPC_RC) + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SLW, registerRA, registerRS, registerRB, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SLW, registerRA, registerRS, registerRB, PPC_REC_INVALID_REGISTER, 0); + } return true; } @@ -1217,24 +1477,37 @@ bool PPCRecompilerImlGen_SRW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode { int rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_SRW, regA, regS, regB); + + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SRW, registerRA, registerRS, registerRB, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_SRW, registerRA, registerRS, registerRB, PPC_REC_INVALID_REGISTER, 0); + } return true; } + bool PPCRecompilerImlGen_EXTSH(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN_S16_TO_S32, regA, regS); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + PPC_ASSERT(rB==0); + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if ( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN_S16_TO_S32, registerRA, registerRS, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN_S16_TO_S32, registerRA, registerRS); + } return true; } @@ -1242,11 +1515,16 @@ bool PPCRecompilerImlGen_EXTSB(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN_S8_TO_S32, regA, regS); - if ((opcode & PPC_OPC_RC)) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if ( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN_S8_TO_S32, registerRA, registerRS, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN_S8_TO_S32, registerRA, registerRS); + } return true; } @@ -1254,11 +1532,30 @@ bool PPCRecompilerImlGen_CNTLZW(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_CNTLZW, regA, regS); - if ((opcode & PPC_OPC_RC)) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + PPC_ASSERT(rB==0); + if( opcode&PPC_OPC_RC ) + { + return false; + } + uint32 registerRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); + uint32 registerRA = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_CNTLZW, registerRA, registerRS); + + //uint32 n=0; + //uint32 x=0x80000000; + //uint32 v=hCPU->gpr[rS]; + //while (!(v & x)) { + // n++; + // if (n==32) break; + // x>>=1; + //} + //hCPU->gpr[rA] = n; + //if (Opcode & PPC_OPC_RC) { + // // update cr0 flags + // ppc_update_cr0(hCPU, hCPU->gpr[rA]); + //} + + return true; } @@ -1266,124 +1563,438 @@ bool PPCRecompilerImlGen_NEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode { sint32 rD, rA, rB; PPC_OPC_TEMPL_XO(opcode, rD, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NEG, regD, regA); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regD); + PPC_ASSERT(rB == 0); + //hCPU->gpr[rD] = -((signed int)hCPU->gpr[rA]); + //if (Opcode & PPC_OPC_RC) { + // // update cr0 flags + // ppc_update_cr0(hCPU, hCPU->gpr[rD]); + //} + uint32 registerRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 registerRD = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NEG, registerRD, registerRA, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NEG, registerRD, registerRA); + } return true; } -bool PPCRecompilerImlGen_LOAD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 bitWidth, bool signExtend, bool isBigEndian, bool updateAddrReg) +void PPCRecompilerImlGen_LWZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int rA, rD; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - IMLReg regMemAddr; - if (rA == 0) + if( rA == 0 ) { - if (updateAddrReg) - return false; // invalid instruction form - regMemAddr = _GetRegTemporary(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regMemAddr, 0); + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; } - else - { - if (updateAddrReg && rA == rD) - return false; // invalid instruction form - regMemAddr = _GetRegGPR(ppcImlGenContext, rA); - } - if (updateAddrReg) - { - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, regMemAddr, regMemAddr, (sint32)imm); - imm = 0; - } - IMLReg regDst = _GetRegGPR(ppcImlGenContext, rD); - ppcImlGenContext->emitInst().make_r_memory(regDst, regMemAddr, (sint32)imm, bitWidth, signExtend, isBigEndian); - return true; + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, imm, 32, false, true); } -void PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 bitWidth, bool signExtend, bool isBigEndian, bool updateAddrReg) +void PPCRecompilerImlGen_LWZU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, 32, false, true); +} + +void PPCRecompilerImlGen_LHA(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new temporary register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, imm, 16, true, true); +} + +void PPCRecompilerImlGen_LHAU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new temporary register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, 16, true, true); +} + +void PPCRecompilerImlGen_LHZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + // note: Darksiders 2 has this instruction form but it is never executed. + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new temporary register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, imm, 16, false, true); +} + +void PPCRecompilerImlGen_LHZU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new temporary register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, 16, false, true); +} + +void PPCRecompilerImlGen_LBZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load byte + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, imm, 8, false, true); +} + +void PPCRecompilerImlGen_LBZU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load byte + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, 8, false, true); +} + +bool PPCRecompilerImlGen_LWZX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // if rA == rD, then the EA wont be stored to rA. We could set updateAddrReg to false in such cases but the end result is the same since the loaded value would overwrite rA sint32 rA, rD, rB; PPC_OPC_TEMPL_X(opcode, rD, rA, rB); - updateAddrReg = updateAddrReg && (rA != 0); - IMLReg regA = rA != 0 ? _GetRegGPR(ppcImlGenContext, rA) : IMLREG_INVALID; - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regDst = _GetRegGPR(ppcImlGenContext, rD); - if (updateAddrReg) + if( rA == 0 ) { - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regA, regA, regB); - // use single register addressing - regB = regA; - regA = IMLREG_INVALID; + return false; } - if(regA.IsValid()) - PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, regDst, regA, regB, bitWidth, signExtend, isBigEndian); - else - ppcImlGenContext->emitInst().make_r_memory(regDst, regB, 0, bitWidth, signExtend, isBigEndian); -} - -bool PPCRecompilerImlGen_STORE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 bitWidth, bool isBigEndian, bool updateAddrReg) -{ - int rA, rD; - uint32 imm; - PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - IMLReg regA; - if (rA != 0) - { - regA = _GetRegGPR(ppcImlGenContext, rA); - } - else - { - if (updateAddrReg) - return false; // invalid instruction form - regA = _GetRegTemporary(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regA, 0); - } - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); - if (updateAddrReg) - { - if (rD == rA) - { - // make sure to keep source data intact - regD = _GetRegTemporary(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regD, regA); - } - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, regA, regA, (sint32)imm); - imm = 0; - } - ppcImlGenContext->emitInst().make_memory_r(regD, regA, (sint32)imm, bitWidth, isBigEndian); + // hCPU->gpr[rD] = memory_readU8((rA?hCPU->gpr[rA]:0)+hCPU->gpr[rB]); + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load word + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 32, false, true); return true; } -bool PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 bitWidth, bool isBigEndian, bool updateAddrReg) +bool PPCRecompilerImlGen_LWZUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - sint32 rA, rS, rB; - PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regA = rA != 0 ? _GetRegGPR(ppcImlGenContext, rA) : IMLREG_INVALID; - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regSrc = _GetRegGPR(ppcImlGenContext, rS); - if (updateAddrReg) + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if( rA == 0 ) { - if(rA == 0) - return false; // invalid instruction form - if (regSrc == regA) - { - // make sure to keep source data intact - regSrc = _GetRegTemporary(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regSrc, regA); - } - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regA, regA, regB); - // use single register addressing - regB = regA; - regA = IMLREG_INVALID; + return false; } - if (regA.IsInvalid()) - ppcImlGenContext->emitInst().make_memory_r(regSrc, regB, 0, bitWidth, isBigEndian); + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // add rB to rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegisterA, gprRegisterB); + // load word + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterA, 0, 32, false, true); + return true; +} + +bool PPCRecompilerImlGen_LWBRX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + // load memory rA and rB into register + uint32 gprRegisterA = 0; + if( rA ) + gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0 + rD); + if (destinationRegister == PPC_REC_INVALID_REGISTER) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0 + rD); // else just create new register + // load word + if( rA ) + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 32, false, false); else - PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext, regSrc, regA, regB, bitWidth, false, isBigEndian); + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterB, 0, 32, false, false); + return true; +} + +bool PPCRecompilerImlGen_LHAX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return true; + } + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half word + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 16, true, true); + return true; +} + +bool PPCRecompilerImlGen_LHAUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return true; + } + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // add rB to rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegisterA, gprRegisterB); + // load half word + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterA, 0, 16, true, true); + return true; +} + +bool PPCRecompilerImlGen_LHZX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return true; + } + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half word + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 16, false, true); + return true; +} + +bool PPCRecompilerImlGen_LHZUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return true; + } + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // add rB to rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegisterA, gprRegisterB); + // load hald word + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterA, 0, 16, false, true); + return true; +} + +void PPCRecompilerImlGen_LHBRX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + // load memory rA and rB into register + uint32 gprRegisterA = rA != 0 ? PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false) : 0; + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0 + rD); + if (destinationRegister == PPC_REC_INVALID_REGISTER) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0 + rD); // else just create new register + // load half word (little-endian) + if (rA == 0) + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterB, 0, 16, false, false); + else + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 16, false, false); +} + +bool PPCRecompilerImlGen_LBZX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if( rA == 0 ) + { + // special case where rA is ignored and only rB is used + return false; + } + // hCPU->gpr[rD] = memory_readU8((rA?hCPU->gpr[rA]:0)+hCPU->gpr[rB]); + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load byte + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 8, false, true); + return true; +} + +bool PPCRecompilerImlGen_LBZUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + if (rA == 0) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return true; + } + // load memory rA and rB into register + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0 + rD); + if (destinationRegister == PPC_REC_INVALID_REGISTER) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0 + rD); // else just create new register + // add rB to rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegisterA, gprRegisterB); + // load byte + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterA, 0, 8, false, true); + return true; +} + +bool PPCRecompilerImlGen_LWARX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rD, rB; + PPC_OPC_TEMPL_X(opcode, rD, rA, rB); + // load memory rA and rB into register + uint32 gprRegisterA = rA != 0?PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false):0; + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load word + if( rA != 0 ) + PPCRecompilerImlGen_generateNewInstruction_r_memory_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, PPC_REC_LOAD_LWARX_MARKER, false, true); + else + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegisterB, 0, PPC_REC_LOAD_LWARX_MARKER, false, true); return true; } @@ -1392,33 +2003,257 @@ void PPCRecompilerImlGen_LMW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode sint32 rD, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); - cemu_assert_debug(rA != 0); + //uint32 ea = (rA ? hCPU->gpr[rA] : 0) + imm; sint32 index = 0; - while (rD <= 31) + while( rD <= 31 ) { - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regD = _GetRegGPR(ppcImlGenContext, rD); + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register // load word - ppcImlGenContext->emitInst().make_r_memory(regD, regA, (sint32)imm + index * 4, 32, false, true); + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, imm+index*4, 32, false, true); // next rD++; index++; } } +void PPCRecompilerImlGen_STW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + // note: Darksiders 2 has this instruction form but it is never executed. + //PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); // can be the same as gprRegister + // store word + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, imm, 32, true); +} + +void PPCRecompilerImlGen_STWU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // store&update instructions where rD==rA store the register contents without added imm, therefore we need to handle it differently + // get memory gpr register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); // can be the same as gprRegister + // add imm to memory register early if possible + if( rD != rA ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // store word + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, (rD==rA)?imm:0, 32, true); + // add imm to memory register late if we couldn't do it early + if( rD == rA ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); +} + +void PPCRecompilerImlGen_STH(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); // can be the same as gprRegister + // load half + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, imm, 16, true); +} + +void PPCRecompilerImlGen_STHU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // get memory gpr register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); // can be the same as gprRegister + // add imm to memory register early if possible + if( rD != rA ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // store word + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, (rD==rA)?imm:0, 16, true); + // add imm to memory register late if we couldn't do it early + if( rD == rA ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); +} + +void PPCRecompilerImlGen_STB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rS; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rS, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); // can be the same as gprRegister + // store byte + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, imm, 8, true); +} + +void PPCRecompilerImlGen_STBU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, rD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, rD, rA, imm); + if( rA == 0 ) + { + // special form where gpr is ignored and only imm is used + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_DEBUGBREAK, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->ppcAddressOfCurrentInstruction, ppcImlGenContext->cyclesSinceLastBranch); + return; + } + // get memory gpr register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rD, false); // can be the same as gprRegister + // add imm to memory register early if possible + if( rD != rA ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // store byte + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, (rD==rA)?imm:0, 8, true); + // add imm to memory register late if we couldn't do it early + if( rD == rA ) + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); +} + +// generic indexed store (STWX, STHX, STBX, STWUX. If bitReversed == true -> STHBRX) +bool PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 storeBitWidth, bool byteReversed = false) +{ + sint32 rA, rS, rB; + PPC_OPC_TEMPL_X(opcode, rS, rA, rB); + // prepare registers + uint32 gprRegisterA; + if(rA != 0) + gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 destinationRegister = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + // store word + if (rA == 0) + { + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, destinationRegister, gprRegisterB, 0, storeBitWidth, !byteReversed); + } + else + PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, storeBitWidth, false, !byteReversed); + return true; +} + +bool PPCRecompilerImlGen_STORE_INDEXED_UPDATE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, uint32 storeBitWidth) +{ + sint32 rA, rS, rB; + PPC_OPC_TEMPL_X(opcode, rS, rA, rB); + if( rA == 0 ) + { + // not supported + return false; + } + if( rS == rA || rS == rB ) + { + // prepare registers + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 destinationRegister = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + // store word + PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, storeBitWidth, false, true); + // update EA after store + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegisterA, gprRegisterB); + return true; + } + // prepare registers + uint32 gprRegisterA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 sourceRegister = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + // update EA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegisterA, gprRegisterB); + // store word + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegisterA, 0, storeBitWidth, true); + return true; +} + +bool PPCRecompilerImlGen_STWCX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rS, rB; + PPC_OPC_TEMPL_X(opcode, rS, rA, rB); + // prepare registers + uint32 gprRegisterA = rA!=0?PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false):0; + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 destinationRegister = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + // store word + if( rA != 0 ) + PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, PPC_REC_STORE_STWCX_MARKER, false, true); + else + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, destinationRegister, gprRegisterB, 0, PPC_REC_STORE_STWCX_MARKER, true); + return true; +} + +bool PPCRecompilerImlGen_STWBRX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, rS, rB; + PPC_OPC_TEMPL_X(opcode, rS, rA, rB); + // prepare registers + uint32 gprRegisterA = rA!=0?PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false):0; + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + uint32 destinationRegister = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + // store word + if( rA != 0 ) + PPCRecompilerImlGen_generateNewInstruction_memory_r_indexed(ppcImlGenContext, destinationRegister, gprRegisterA, gprRegisterB, 32, false, false); + else + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, destinationRegister, gprRegisterB, 0, 32, false); + return true; +} + void PPCRecompilerImlGen_STMW(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rS, rA; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, rS, rA, imm); - cemu_assert_debug(rA != 0); sint32 index = 0; while( rS <= 31 ) { - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); // can be the same as gprRegister // store word - ppcImlGenContext->emitInst().make_memory_r(regS, regA, (sint32)imm + index * 4, 32, true); + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, imm+index*4, 32, true); // next rS++; index++; @@ -1431,43 +2266,70 @@ bool PPCRecompilerImlGen_LSWI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod PPC_OPC_TEMPL_X(opcode, rD, rA, nb); if( nb == 0 ) nb = 32; - - if (rA == 0) + if( nb == 4 ) { - cemu_assert_unimplemented(); // special form where gpr is ignored and EA is 0 - return false; - } - - // potential optimization: On x86 unaligned access is allowed and we could handle the case nb==4 with a single memory read, and nb==2 with a memory read and shift - - IMLReg memReg = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regTmp = _GetRegTemporary(ppcImlGenContext, 0); - uint32 memOffset = 0; - while (nb > 0) - { - if (rD == rA) - return false; - cemu_assert(rD < 32); - IMLReg regDst = _GetRegGPR(ppcImlGenContext, rD); - // load bytes one-by-one - for (sint32 b = 0; b < 4; b++) + // if nb == 4 this instruction immitates LWZ + if( rA == 0 ) { - ppcImlGenContext->emitInst().make_r_memory(regTmp, memReg, memOffset + b, 8, false, false); - sint32 shiftAmount = (3 - b) * 8; - if(shiftAmount) - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_LEFT_SHIFT, regTmp, regTmp, shiftAmount); - if(b == 0) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regDst, regTmp); - else - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_OR, regDst, regDst, regTmp); - nb--; - if (nb == 0) - break; +#ifdef CEMU_DEBUG_ASSERT + assert_dbg(); // special form where gpr is ignored and only imm is used +#endif + return false; } - memOffset += 4; - rD++; + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, 32, false, true); + return true; } - return true; + else if( nb == 2 ) + { + // if nb == 2 this instruction immitates a LHZ but the result is shifted left by 16 bits + if( rA == 0 ) + { +#ifdef CEMU_DEBUG_ASSERT + assert_dbg(); // special form where gpr is ignored and only imm is used +#endif + return false; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, 16, false, true); + // shift + PPCRecompilerImlGen_generateNewInstruction_r_r_s32(ppcImlGenContext, PPCREC_IML_OP_LEFT_SHIFT, destinationRegister, destinationRegister, 16); + return true; + } + else if( nb == 3 ) + { + // if nb == 3 this instruction loads a 3-byte big-endian and the result is shifted left by 8 bits + if( rA == 0 ) + { +#ifdef CEMU_DEBUG_ASSERT + assert_dbg(); // special form where gpr is ignored and only imm is used +#endif + return false; + } + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // check if destination register is already loaded + uint32 destinationRegister = PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, PPCREC_NAME_R0+rD); + if( destinationRegister == PPC_REC_INVALID_REGISTER ) + destinationRegister = PPCRecompilerImlGen_getAndLockFreeTemporaryGPR(ppcImlGenContext, PPCREC_NAME_R0+rD); // else just create new register + // load half + PPCRecompilerImlGen_generateNewInstruction_r_memory(ppcImlGenContext, destinationRegister, gprRegister, 0, PPC_REC_STORE_LSWI_3, false, true); + return true; + } + debug_printf("PPCRecompilerImlGen_LSWI(): Unsupported nb value %d\n", nb); + return false; } bool PPCRecompilerImlGen_STSWI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) @@ -1476,111 +2338,38 @@ bool PPCRecompilerImlGen_STSWI(ppcImlGenContext_t* ppcImlGenContext, uint32 opco PPC_OPC_TEMPL_X(opcode, rS, rA, nb); if( nb == 0 ) nb = 32; - - IMLReg regMem = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regTmp = _GetRegTemporary(ppcImlGenContext, 0); - uint32 memOffset = 0; - while (nb > 0) + if( nb == 4 ) { - if (rS == rA) - return false; - cemu_assert(rS < 32); - IMLReg regSrc = _GetRegGPR(ppcImlGenContext, rS); - // store bytes one-by-one - for (sint32 b = 0; b < 4; b++) - { - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regTmp, regSrc); - sint32 shiftAmount = (3 - b) * 8; - if (shiftAmount) - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_U, regTmp, regTmp, shiftAmount); - ppcImlGenContext->emitInst().make_memory_r(regTmp, regMem, memOffset + b, 8, false); - nb--; - if (nb == 0) - break; - } - memOffset += 4; - rS++; + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); // can be the same as gprRegister + // store word + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, 0, 32, true); + return true; } - return true; -} - -bool PPCRecompilerImlGen_LWARX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - sint32 rA, rD, rB; - PPC_OPC_TEMPL_X(opcode, rD, rA, rB); - - IMLReg regA = rA != 0 ? PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA) : IMLREG_INVALID; - IMLReg regB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rB); - IMLReg regD = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rD); - IMLReg regMemResEA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_CPU_MEMRES_EA); - IMLReg regMemResVal = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_CPU_MEMRES_VAL); - // calculate EA - if (regA.IsValid()) - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regMemResEA, regA, regB); - else - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regMemResEA, regB); - // load word - ppcImlGenContext->emitInst().make_r_memory(regD, regMemResEA, 0, 32, false, true); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regMemResVal, regD); - return true; -} - -bool PPCRecompilerImlGen_STWCX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) -{ - sint32 rA, rS, rB; - PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regA = rA != 0 ? PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA) : IMLREG_INVALID; - IMLReg regB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rB); - IMLReg regData = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rS); - IMLReg regTmpDataBE = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 2); - IMLReg regTmpCompareBE = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 3); - // calculate EA - IMLReg regCalcEA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY); - if (regA.IsValid()) - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regCalcEA, regA, regB); - else - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regCalcEA, regB); - // get CR bit regs and set LT, GT and SO immediately - IMLReg regCrLT = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT_INDEX_LT); - IMLReg regCrGT = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT_INDEX_GT); - IMLReg regCrEQ = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT_INDEX_EQ); - IMLReg regCrSO = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT_INDEX_SO); - IMLReg regXerSO = _GetRegCR(ppcImlGenContext, 0, Espresso::CR_BIT_INDEX_SO); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regCrLT, 0); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regCrGT, 0); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regCrSO, regXerSO); - // get regs for reservation address and value - IMLReg regMemResEA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_CPU_MEMRES_EA); - IMLReg regMemResVal = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_CPU_MEMRES_VAL); - // compare calculated EA with reservation - IMLReg regTmpBool = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 1); - ppcImlGenContext->emitInst().make_compare(regCalcEA, regMemResEA, regTmpBool, IMLCondition::EQ); - ppcImlGenContext->emitInst().make_conditional_jump(regTmpBool, true); - - PPCIMLGen_CreateSegmentBranchedPath(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock, - [&](ppcImlGenContext_t& genCtx) - { - /* branch taken, EA matching */ - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ENDIAN_SWAP, regTmpDataBE, regData); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ENDIAN_SWAP, regTmpCompareBE, regMemResVal); - ppcImlGenContext->emitInst().make_atomic_cmp_store(regMemResEA, regTmpCompareBE, regTmpDataBE, regCrEQ); - }, - [&](ppcImlGenContext_t& genCtx) - { - /* branch not taken, EA mismatching */ - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regCrEQ, 0); - } - ); - - // reset reservation - // I found contradictory information of whether the reservation is cleared in all cases, so unit testing would be required - // Most sources state that it is cleared on successful store. They don't explicitly mention what happens on failure - // "The PowerPC 600 series, part 7: Atomic memory access and cache coherency" states that it is always cleared - // There may also be different behavior between individual PPC architectures - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regMemResEA, 0); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regMemResVal, 0); - - return true; + else if( nb == 2 ) + { + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); // can be the same as gprRegister + // store half-word (shifted << 16) + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, 0, PPC_REC_STORE_STSWI_2, false); + return true; + } + else if( nb == 3 ) + { + // load memory gpr into register + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // load source register + uint32 sourceRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS, false); // can be the same as gprRegister + // store 3-byte-word (shifted << 8) + PPCRecompilerImlGen_generateNewInstruction_memory_r(ppcImlGenContext, sourceRegister, gprRegister, 0, PPC_REC_STORE_STSWI_3, false); + return true; + } + debug_printf("PPCRecompilerImlGen_STSWI(): Unsupported nb value %d\n", nb); + return false; } bool PPCRecompilerImlGen_DCBZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) @@ -1589,39 +2378,92 @@ bool PPCRecompilerImlGen_DCBZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod rA = (opcode>>16)&0x1F; rB = (opcode>>11)&0x1F; // prepare registers - IMLReg regA = rA!=0?PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA):IMLREG_INVALID; - IMLReg regB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); - // load zero into a temporary register - IMLReg regZero = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regZero, 0); - // prepare EA and align it to cacheline - IMLReg regMemResEA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 1); - if(rA != 0) - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, regMemResEA, regA, regB); + uint32 gprRegisterA = rA!=0?PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false):0; + uint32 gprRegisterB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // store + if( rA != 0 ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_DCBZ, gprRegisterA, gprRegisterB); else - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regMemResEA, regB); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regMemResEA, regMemResEA, ~31); - // zero out the cacheline - for(sint32 i = 0; i < 32; i += 4) - ppcImlGenContext->emitInst().make_memory_r(regZero, regMemResEA, i, 32, false); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_DCBZ, gprRegisterB, gprRegisterB); return true; } -bool PPCRecompilerImlGen_OR_NOR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool complementResult) +bool PPCRecompilerImlGen_OR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - if(rS == rB) // check for MR mnemonic - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regA, regS); + // check for MR mnemonic + if( rS == rB ) + { + // simple register copy + if( rA != rS ) // check if no-op + { + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + } + } + else + { + if( opcode&PPC_OPC_RC ) + { + // no effect but CR is updated + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprSourceReg, gprSourceReg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + // no-op + } + } + } else - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_OR, regA, regS, regB); - if(complementResult) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regA, regA); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + { + // rA = rS | rA + sint32 gprSource1Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprSource2Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( gprSource1Reg == gprDestReg || gprSource2Reg == gprDestReg ) + { + // make sure we don't overwrite rS or rA + if( gprSource1Reg == gprDestReg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource2Reg); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource1Reg); + } + if( opcode&PPC_OPC_RC ) + { + // fixme: merge CR update into OR instruction above + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + } + } + else + { + // rA = rS + if( gprDestReg != gprSource1Reg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSource1Reg); + } + // rA |= rB + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource2Reg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource2Reg); + } + } + } return true; } @@ -1629,33 +2471,151 @@ bool PPCRecompilerImlGen_ORC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode { sint32 rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - // rA = rS | ~rB; - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regTmp = _GetRegTemporary(ppcImlGenContext, 0); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regTmp, regB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_OR, regA, regS, regTmp); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + // hCPU->gpr[rA] = hCPU->gpr[rS] | ~hCPU->gpr[rB]; + sint32 gprSource1Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprSource2Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ORC, gprDestReg, gprSource1Reg, gprSource2Reg, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r_r(ppcImlGenContext, PPCREC_IML_OP_ORC, gprDestReg, gprSource1Reg, gprSource2Reg); return true; } -bool PPCRecompilerImlGen_AND_NAND(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool complementResult) +bool PPCRecompilerImlGen_NOR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - if (regS == regB) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_ASSIGN, regA, regS); + //hCPU->gpr[rA] = ~(hCPU->gpr[rS] | hCPU->gpr[rB]); + // check for NOT mnemonic + if( rS == rB ) + { + // simple register copy with NOT + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg); + } + } else - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_AND, regA, regS, regB); - if (complementResult) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regA, regA); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + { + // rA = rS | rA + sint32 gprSource1Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprSource2Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( gprSource1Reg == gprDestReg || gprSource2Reg == gprDestReg ) + { + // make sure we don't overwrite rS or rA + if( gprSource1Reg == gprDestReg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource2Reg); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource1Reg); + } + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg); + if( opcode&PPC_OPC_RC ) + { + // fixme: merge CR update into OR instruction above + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + } + } + else + { + // rA = rS + if( gprDestReg != gprSource1Reg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSource1Reg); + } + // rA |= rB + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprDestReg, gprSource2Reg); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_ARITHMETIC); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg); + } + } + } + return true; +} + +bool PPCRecompilerImlGen_AND(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rS, rA, rB; + PPC_OPC_TEMPL_X(opcode, rS, rA, rB); + // check for MR mnemonic + if( rS == rB ) + { + // simple register copy + if( rA != rS ) // check if no-op + { + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + } + } + else + { + cemu_assert_unimplemented(); // no-op -> verify this case + } + } + else + { + // rA = rS & rA + sint32 gprSource1Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprSource2Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( gprSource1Reg == gprDestReg || gprSource2Reg == gprDestReg ) + { + // make sure we don't overwrite rS or rA + if( gprSource1Reg == gprDestReg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprSource2Reg); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprSource1Reg); + } + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + } + } + else + { + // rA = rS + if( gprDestReg != gprSource1Reg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSource1Reg); + } + // rA &= rB + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprSource2Reg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprSource2Reg); + } + } + } return true; } @@ -1663,101 +2623,277 @@ bool PPCRecompilerImlGen_ANDC(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod { sint32 rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - // rA = rS & ~rB; - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - IMLReg regTmp = _GetRegTemporary(ppcImlGenContext, 0); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regTmp, regB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_AND, regA, regS, regTmp); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + //hCPU->gpr[rA] = hCPU->gpr[rS] & ~hCPU->gpr[rB]; + //if (Opcode & PPC_OPC_RC) { + if( rS == rB ) + { + // result is always 0 -> replace with XOR rA,rA + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprDestReg); + } + } + else if( rA == rB ) + { + // rB already in rA, therefore we complement rA first and then AND it with rS + sint32 gprRS = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = ~rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprRA, gprRA); + // rA &= rS + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprRA, gprRS, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprRA, gprRS); + } + } + else + { + // a & (~b) is the same as ~((~a) | b) + sint32 gprRA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + sint32 gprRB = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprRS = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + // move rS to rA (if required) + if( gprRA != gprRS ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprRA, gprRS); + } + // rS already in rA, therefore we complement rS first and then OR it with rB + // rA = ~rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprRA, gprRA); + // rA |= rB + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_OR, gprRA, gprRB); + // rA = ~rA + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprRA, gprRA, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprRA, gprRA); + } + } return true; } -bool PPCRecompilerImlGen_XOR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool complementResult) +void PPCRecompilerImlGen_ANDI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rS, rA; + uint32 imm; + PPC_OPC_TEMPL_D_UImm(opcode, rS, rA, imm); + // ANDI. always sets cr0 flags + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = rS + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + // rA &= imm32 + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_AND, gprDestReg, (sint32)imm, 0, false, false, 0, PPCREC_CR_MODE_LOGICAL); +} + +void PPCRecompilerImlGen_ANDIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rS, rA; + uint32 imm; + PPC_OPC_TEMPL_D_Shift16(opcode, rS, rA, imm); + // ANDI. always sets cr0 flags + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = rS + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + // rA &= imm32 + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_AND, gprDestReg, (sint32)imm, 0, false, false, 0, PPCREC_CR_MODE_LOGICAL); +} + +bool PPCRecompilerImlGen_XOR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rS, rA, rB; PPC_OPC_TEMPL_X(opcode, rS, rA, rB); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); if( rS == rB ) { - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regA, 0); + // xor register with itself + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprDestReg); + } } else { - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regB = _GetRegGPR(ppcImlGenContext, rB); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_XOR, regA, regS, regB); + // rA = rS ^ rA + sint32 gprSource1Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprSource2Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( gprSource1Reg == gprDestReg || gprSource2Reg == gprDestReg ) + { + // make sure we don't overwrite rS or rA + if( gprSource1Reg == gprDestReg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource2Reg); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource1Reg); + } + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_AND, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + } + } + else + { + // rA = rS + if( gprDestReg != gprSource1Reg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSource1Reg); + } + // rA ^= rB + if( opcode&PPC_OPC_RC ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource2Reg, 0, PPCREC_CR_MODE_LOGICAL); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource2Reg); + } + } } - if (complementResult) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NOT, regA, regA); - if (opcode & PPC_OPC_RC) - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); return true; } -void PPCRecompilerImlGen_ANDI_ANDIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isShifted) + +bool PPCRecompilerImlGen_EQV(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - sint32 rS, rA; - uint32 imm; - if (isShifted) + sint32 rS, rA, rB; + PPC_OPC_TEMPL_X(opcode, rS, rA, rB); + if( rS == rB ) { - PPC_OPC_TEMPL_D_Shift16(opcode, rS, rA, imm); + // xor register with itself, then invert + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprDestReg); + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg); } else { - PPC_OPC_TEMPL_D_UImm(opcode, rS, rA, imm); + // rA = ~(rS ^ rA) + sint32 gprSource1Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprSource2Reg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + if( gprSource1Reg == gprDestReg || gprSource2Reg == gprDestReg ) + { + // make sure we don't overwrite rS or rA + if( gprSource1Reg == gprDestReg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource2Reg); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource1Reg); + } + } + else + { + // rA = rS + if( gprDestReg != gprSource1Reg ) + { + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSource1Reg); + } + // rA ^= rB + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_XOR, gprDestReg, gprSource2Reg); + } + if( opcode&PPC_OPC_RC ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg, 0, PPCREC_CR_MODE_LOGICAL); + else + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_NOT, gprDestReg, gprDestReg); } - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, regA, regS, (sint32)imm); - // ANDI/ANDIS always updates cr0 - PPCImlGen_UpdateCR0(ppcImlGenContext, regA); + return true; } -void PPCRecompilerImlGen_ORI_ORIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isShifted) +void PPCRecompilerImlGen_ORI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rS, rA; uint32 imm; - if (isShifted) - { - PPC_OPC_TEMPL_D_Shift16(opcode, rS, rA, imm); - } - else - { - PPC_OPC_TEMPL_D_UImm(opcode, rS, rA, imm); - } - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_OR, regA, regS, (sint32)imm); + PPC_OPC_TEMPL_D_UImm(opcode, rS, rA, imm); + // ORI does not set cr0 flags + //hCPU->gpr[rA] = hCPU->gpr[rS] | imm; + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = rS + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + // rA |= imm32 + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_OR, gprDestReg, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); } -void PPCRecompilerImlGen_XORI_XORIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isShifted) +void PPCRecompilerImlGen_ORIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rS, rA; uint32 imm; - if (isShifted) - { - PPC_OPC_TEMPL_D_Shift16(opcode, rS, rA, imm); - } - else - { - PPC_OPC_TEMPL_D_UImm(opcode, rS, rA, imm); - } - IMLReg regS = _GetRegGPR(ppcImlGenContext, rS); - IMLReg regA = _GetRegGPR(ppcImlGenContext, rA); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_XOR, regA, regS, (sint32)imm); + PPC_OPC_TEMPL_D_Shift16(opcode, rS, rA, imm); + // ORI does not set cr0 flags + //hCPU->gpr[rA] = hCPU->gpr[rS] | imm; + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = rS + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + // rA |= imm32 + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_OR, gprDestReg, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); +} + +void PPCRecompilerImlGen_XORI(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rS, rA; + uint32 imm; + PPC_OPC_TEMPL_D_UImm(opcode, rS, rA, imm); + //hCPU->gpr[rA] = hCPU->gpr[rS] ^ imm; + // XORI does not set cr0 flags + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = rS + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + // rA |= imm32 + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_XOR, gprDestReg, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); +} + +void PPCRecompilerImlGen_XORIS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rS, rA; + uint32 imm; + PPC_OPC_TEMPL_D_Shift16(opcode, rS, rA, imm); + //hCPU->gpr[rA] = hCPU->gpr[rS] ^ imm; + // XORIS does not set cr0 flags + sint32 gprSourceReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rS); + sint32 gprDestReg = PPCRecompilerImlGen_loadOverwriteRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); + // rA = rS + if( gprDestReg != gprSourceReg ) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ASSIGN, gprDestReg, gprSourceReg); + // rA |= imm32 + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_XOR, gprDestReg, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); } bool PPCRecompilerImlGen_CROR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int crD, crA, crB; PPC_OPC_TEMPL_X(opcode, crD, crA, crB); - IMLReg regCrA = _GetRegCR(ppcImlGenContext, crA); - IMLReg regCrB = _GetRegCR(ppcImlGenContext, crB); - IMLReg regCrR = _GetRegCR(ppcImlGenContext, crD); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_OR, regCrR, regCrA, regCrB); + PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext, PPCREC_IML_OP_CR_OR, crD, crA, crB); return true; } @@ -1765,12 +2901,7 @@ bool PPCRecompilerImlGen_CRORC(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { int crD, crA, crB; PPC_OPC_TEMPL_X(opcode, crD, crA, crB); - IMLReg regCrA = _GetRegCR(ppcImlGenContext, crA); - IMLReg regCrB = _GetRegCR(ppcImlGenContext, crB); - IMLReg regCrR = _GetRegCR(ppcImlGenContext, crD); - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_XOR, regTmp, regCrB, 1); // invert crB - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_OR, regCrR, regCrA, regTmp); + PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext, PPCREC_IML_OP_CR_ORC, crD, crA, crB); return true; } @@ -1778,10 +2909,7 @@ bool PPCRecompilerImlGen_CRAND(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { int crD, crA, crB; PPC_OPC_TEMPL_X(opcode, crD, crA, crB); - IMLReg regCrA = _GetRegCR(ppcImlGenContext, crA); - IMLReg regCrB = _GetRegCR(ppcImlGenContext, crB); - IMLReg regCrR = _GetRegCR(ppcImlGenContext, crD); - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_AND, regCrR, regCrA, regCrB); + PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext, PPCREC_IML_OP_CR_AND, crD, crA, crB); return true; } @@ -1789,12 +2917,7 @@ bool PPCRecompilerImlGen_CRANDC(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { int crD, crA, crB; PPC_OPC_TEMPL_X(opcode, crD, crA, crB); - IMLReg regCrA = _GetRegCR(ppcImlGenContext, crA); - IMLReg regCrB = _GetRegCR(ppcImlGenContext, crB); - IMLReg regCrR = _GetRegCR(ppcImlGenContext, crD); - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_XOR, regTmp, regCrB, 1); // invert crB - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_AND, regCrR, regCrA, regTmp); + PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext, PPCREC_IML_OP_CR_ANDC, crD, crA, crB); return true; } @@ -1802,15 +2925,17 @@ bool PPCRecompilerImlGen_CRXOR(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { int crD, crA, crB; PPC_OPC_TEMPL_X(opcode, crD, crA, crB); - IMLReg regCrA = _GetRegCR(ppcImlGenContext, crA); - IMLReg regCrB = _GetRegCR(ppcImlGenContext, crB); - IMLReg regCrR = _GetRegCR(ppcImlGenContext, crD); - if (regCrA == regCrB) + if (crA == crB) { - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regCrR, 0); + // both operands equal, clear bit in crD + // PPC's assert() uses this to pass a parameter to OSPanic + PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext, PPCREC_IML_OP_CR_CLEAR, crD, 0, 0); return true; } - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_XOR, regCrR, regCrA, regCrB); + else + { + return false; + } return true; } @@ -1818,24 +2943,23 @@ bool PPCRecompilerImlGen_CREQV(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { int crD, crA, crB; PPC_OPC_TEMPL_X(opcode, crD, crA, crB); - IMLReg regCrA = _GetRegCR(ppcImlGenContext, crA); - IMLReg regCrB = _GetRegCR(ppcImlGenContext, crB); - IMLReg regCrR = _GetRegCR(ppcImlGenContext, crD); - if (regCrA == regCrB) + if (crA == crB) { - ppcImlGenContext->emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, regCrR, 1); + // both operands equal, set bit in crD + PPCRecompilerImlGen_generateNewInstruction_cr(ppcImlGenContext, PPCREC_IML_OP_CR_SET, crD, 0, 0); return true; } - IMLReg regTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_XOR, regTmp, regCrB, 1); // invert crB - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_XOR, regCrR, regCrA, regTmp); + else + { + return false; + } return true; } bool PPCRecompilerImlGen_HLE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { uint32 hleFuncId = opcode&0xFFFF; - ppcImlGenContext->emitInst().make_macro(PPCREC_IML_MACRO_HLE, ppcImlGenContext->ppcAddressOfCurrentInstruction, hleFuncId, 0, IMLREG_INVALID); + PPCRecompilerImlGen_generateNewInstruction_macro(ppcImlGenContext, PPCREC_IML_MACRO_HLE, ppcImlGenContext->ppcAddressOfCurrentInstruction, hleFuncId, 0); return true; } @@ -1846,6 +2970,12 @@ uint32 PPCRecompiler_iterateCurrentInstruction(ppcImlGenContext_t* ppcImlGenCont return v; } +uint32 PPCRecompiler_getInstructionByOffset(ppcImlGenContext_t* ppcImlGenContext, uint32 offset) +{ + uint32 v = CPU_swapEndianU32(*(ppcImlGenContext->currentInstruction + offset/4)); + return v; +} + uint32 PPCRecompiler_getCurrentInstruction(ppcImlGenContext_t* ppcImlGenContext) { uint32 v = CPU_swapEndianU32(*(ppcImlGenContext->currentInstruction)); @@ -1858,10 +2988,480 @@ uint32 PPCRecompiler_getPreviousInstruction(ppcImlGenContext_t* ppcImlGenContext return v; } -void PPCRecompilerIml_setSegmentPoint(IMLSegmentPoint* segmentPoint, IMLSegment* imlSegment, sint32 index) +char _tempOpcodename[32]; + +const char* PPCRecompiler_getOpcodeDebugName(PPCRecImlInstruction_t* iml) +{ + uint32 op = iml->operation; + if (op == PPCREC_IML_OP_ASSIGN) + return "MOV"; + else if (op == PPCREC_IML_OP_ADD) + return "ADD"; + else if (op == PPCREC_IML_OP_SUB) + return "SUB"; + else if (op == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY) + return "ADDCSC"; + else if (op == PPCREC_IML_OP_OR) + return "OR"; + else if (op == PPCREC_IML_OP_AND) + return "AND"; + else if (op == PPCREC_IML_OP_XOR) + return "XOR"; + else if (op == PPCREC_IML_OP_LEFT_SHIFT) + return "LSH"; + else if (op == PPCREC_IML_OP_RIGHT_SHIFT) + return "RSH"; + else if (op == PPCREC_IML_OP_MULTIPLY_SIGNED) + return "MULS"; + else if (op == PPCREC_IML_OP_DIVIDE_SIGNED) + return "DIVS"; + + sprintf(_tempOpcodename, "OP0%02x_T%d", iml->operation, iml->type); + return _tempOpcodename; +} + +void PPCRecDebug_addRegisterParam(StringBuf& strOutput, sint32 virtualRegister, bool isLast = false) +{ + if (isLast) + { + if (virtualRegister < 10) + strOutput.addFmt("t{} ", virtualRegister); + else + strOutput.addFmt("t{}", virtualRegister); + return; + } + if (virtualRegister < 10) + strOutput.addFmt("t{} , ", virtualRegister); + else + strOutput.addFmt("t{}, ", virtualRegister); +} + +void PPCRecDebug_addS32Param(StringBuf& strOutput, sint32 val, bool isLast = false) +{ + if (isLast) + { + strOutput.addFmt("0x{:08x}", val); + return; + } + strOutput.addFmt("0x{:08x}, ", val); +} + +void PPCRecompilerDebug_printLivenessRangeInfo(StringBuf& currentLineText, PPCRecImlSegment_t* imlSegment, sint32 offset) +{ + // pad to 70 characters + sint32 index = currentLineText.getLen(); + while (index < 70) + { + debug_printf(" "); + index++; + } + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while (subrangeItr) + { + if (offset == subrangeItr->start.index) + { + if (false)//subrange->isDirtied && i == subrange->becomesDirtyAtIndex.index) + { + debug_printf("*%-2d", subrangeItr->range->virtualRegister); + } + else + { + debug_printf("|%-2d", subrangeItr->range->virtualRegister); + } + } + else if (false)//subrange->isDirtied && i == subrange->becomesDirtyAtIndex.index ) + { + debug_printf("* "); + } + else if (offset >= subrangeItr->start.index && offset < subrangeItr->end.index) + { + debug_printf("| "); + } + else + { + debug_printf(" "); + } + index += 3; + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } +} + +void PPCRecompiler_dumpIMLSegment(PPCRecImlSegment_t* imlSegment, sint32 segmentIndex, bool printLivenessRangeInfo) +{ + StringBuf strOutput(1024); + + strOutput.addFmt("SEGMENT 0x{:04x} 0x{:08x} PPC 0x{:08x} - 0x{:08x} Loop-depth {}", segmentIndex, imlSegment->ppcAddress, imlSegment->ppcAddrMin, imlSegment->ppcAddrMax, imlSegment->loopDepth); + if (imlSegment->isEnterable) + { + strOutput.addFmt(" ENTERABLE (0x{:08x})", imlSegment->enterPPCAddress); + } + else if( imlSegment->isJumpDestination ) + { + strOutput.addFmt(" JUMP-DEST (0x{:08x})", imlSegment->jumpDestinationPPCAddress); + } + + debug_printf("%s\n", strOutput.c_str()); + + strOutput.reset(); + strOutput.addFmt("SEGMENT NAME 0x{:016x}", (uintptr_t)imlSegment); + debug_printf("%s", strOutput.c_str()); + + if (printLivenessRangeInfo) + { + PPCRecompilerDebug_printLivenessRangeInfo(strOutput, imlSegment, RA_INTER_RANGE_START); + } + debug_printf("\n"); + + sint32 lineOffsetParameters = 18; + + for(sint32 i=0; iimlListCount; i++) + { + // don't log NOP instructions unless they have an associated PPC address + if(imlSegment->imlList[i].type == PPCREC_IML_TYPE_NO_OP && imlSegment->imlList[i].associatedPPCAddress == MPTR_NULL) + continue; + strOutput.reset(); + strOutput.addFmt("{:08x} ", imlSegment->imlList[i].associatedPPCAddress); + if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_NAME || imlSegment->imlList[i].type == PPCREC_IML_TYPE_NAME_R) + { + if(imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_NAME) + strOutput.add("LD_NAME"); + else + strOutput.add("ST_NAME"); + while ((sint32)strOutput.getLen() < lineOffsetParameters) + strOutput.add(" "); + + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_name.registerIndex); + + strOutput.addFmt("name_{} (", imlSegment->imlList[i].op_r_name.registerIndex, imlSegment->imlList[i].op_r_name.name); + if( imlSegment->imlList[i].op_r_name.name >= PPCREC_NAME_R0 && imlSegment->imlList[i].op_r_name.name < (PPCREC_NAME_R0+999) ) + { + strOutput.addFmt("r{}", imlSegment->imlList[i].op_r_name.name-PPCREC_NAME_R0); + } + else if( imlSegment->imlList[i].op_r_name.name >= PPCREC_NAME_SPR0 && imlSegment->imlList[i].op_r_name.name < (PPCREC_NAME_SPR0+999) ) + { + strOutput.addFmt("spr{}", imlSegment->imlList[i].op_r_name.name-PPCREC_NAME_SPR0); + } + else + strOutput.add("ukn"); + strOutput.add(")"); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_R ) + { + strOutput.addFmt("{}", PPCRecompiler_getOpcodeDebugName(imlSegment->imlList+i)); + while ((sint32)strOutput.getLen() < lineOffsetParameters) + strOutput.add(" "); + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r.registerResult); + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r.registerA, true); + + if( imlSegment->imlList[i].crRegister != PPC_REC_INVALID_REGISTER ) + { + strOutput.addFmt(" -> CR{}", imlSegment->imlList[i].crRegister); + } + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_R_R ) + { + strOutput.addFmt("{}", PPCRecompiler_getOpcodeDebugName(imlSegment->imlList + i)); + while ((sint32)strOutput.getLen() < lineOffsetParameters) + strOutput.add(" "); + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r_r.registerResult); + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r_r.registerA); + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r_r.registerB, true); + if( imlSegment->imlList[i].crRegister != PPC_REC_INVALID_REGISTER ) + { + strOutput.addFmt(" -> CR{}", imlSegment->imlList[i].crRegister); + } + } + else if (imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_R_S32) + { + strOutput.addFmt("{}", PPCRecompiler_getOpcodeDebugName(imlSegment->imlList + i)); + while ((sint32)strOutput.getLen() < lineOffsetParameters) + strOutput.add(" "); + + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r_s32.registerResult); + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_r_s32.registerA); + PPCRecDebug_addS32Param(strOutput, imlSegment->imlList[i].op_r_r_s32.immS32, true); + + if (imlSegment->imlList[i].crRegister != PPC_REC_INVALID_REGISTER) + { + strOutput.addFmt(" -> CR{}", imlSegment->imlList[i].crRegister); + } + } + else if (imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_S32) + { + strOutput.addFmt("{}", PPCRecompiler_getOpcodeDebugName(imlSegment->imlList + i)); + while ((sint32)strOutput.getLen() < lineOffsetParameters) + strOutput.add(" "); + + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_r_immS32.registerIndex); + PPCRecDebug_addS32Param(strOutput, imlSegment->imlList[i].op_r_immS32.immS32, true); + + if (imlSegment->imlList[i].crRegister != PPC_REC_INVALID_REGISTER) + { + strOutput.addFmt(" -> CR{}", imlSegment->imlList[i].crRegister); + } + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_JUMPMARK ) + { + strOutput.addFmt("jm_{:08x}:", imlSegment->imlList[i].op_jumpmark.address); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_PPC_ENTER ) + { + strOutput.addFmt("ppcEnter_{:08x}:", imlSegment->imlList[i].op_ppcEnter.ppcAddress); + } + else if(imlSegment->imlList[i].type == PPCREC_IML_TYPE_LOAD || imlSegment->imlList[i].type == PPCREC_IML_TYPE_STORE || + imlSegment->imlList[i].type == PPCREC_IML_TYPE_LOAD_INDEXED || imlSegment->imlList[i].type == PPCREC_IML_TYPE_STORE_INDEXED ) + { + if(imlSegment->imlList[i].type == PPCREC_IML_TYPE_LOAD || imlSegment->imlList[i].type == PPCREC_IML_TYPE_LOAD_INDEXED) + strOutput.add("LD_"); + else + strOutput.add("ST_"); + + if (imlSegment->imlList[i].op_storeLoad.flags2.signExtend) + strOutput.add("S"); + else + strOutput.add("U"); + strOutput.addFmt("{}", imlSegment->imlList[i].op_storeLoad.copyWidth); + + while ((sint32)strOutput.getLen() < lineOffsetParameters) + strOutput.add(" "); + + PPCRecDebug_addRegisterParam(strOutput, imlSegment->imlList[i].op_storeLoad.registerData); + + if(imlSegment->imlList[i].type == PPCREC_IML_TYPE_LOAD_INDEXED || imlSegment->imlList[i].type == PPCREC_IML_TYPE_STORE_INDEXED) + strOutput.addFmt("[t{}+t{}]", imlSegment->imlList[i].op_storeLoad.registerMem, imlSegment->imlList[i].op_storeLoad.registerMem2); + else + strOutput.addFmt("[t{}+{}]", imlSegment->imlList[i].op_storeLoad.registerMem, imlSegment->imlList[i].op_storeLoad.immS32); + } + else if (imlSegment->imlList[i].type == PPCREC_IML_TYPE_MEM2MEM) + { + strOutput.addFmt("{} [t{}+{}] = [t{}+{}]", imlSegment->imlList[i].op_mem2mem.copyWidth, imlSegment->imlList[i].op_mem2mem.dst.registerMem, imlSegment->imlList[i].op_mem2mem.dst.immS32, imlSegment->imlList[i].op_mem2mem.src.registerMem, imlSegment->imlList[i].op_mem2mem.src.immS32); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_CJUMP ) + { + if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_E) + strOutput.add("JE"); + else if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_NE) + strOutput.add("JNE"); + else if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_G) + strOutput.add("JG"); + else if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_GE) + strOutput.add("JGE"); + else if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_L) + strOutput.add("JL"); + else if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_LE) + strOutput.add("JLE"); + else if (imlSegment->imlList[i].op_conditionalJump.condition == PPCREC_JUMP_CONDITION_NONE) + strOutput.add("JALW"); // jump always + else + cemu_assert_unimplemented(); + strOutput.addFmt(" jm_{:08x} (cr{})", imlSegment->imlList[i].op_conditionalJump.jumpmarkAddress, imlSegment->imlList[i].crRegister); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_NO_OP ) + { + strOutput.add("NOP"); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_MACRO ) + { + if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_BLR ) + { + strOutput.addFmt("MACRO BLR 0x{:08x} cycles (depr): {}", imlSegment->imlList[i].op_macro.param, (sint32)imlSegment->imlList[i].op_macro.paramU16); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_BLRL ) + { + strOutput.addFmt("MACRO BLRL 0x{:08x} cycles (depr): {}", imlSegment->imlList[i].op_macro.param, (sint32)imlSegment->imlList[i].op_macro.paramU16); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_BCTR ) + { + strOutput.addFmt("MACRO BCTR 0x{:08x} cycles (depr): {}", imlSegment->imlList[i].op_macro.param, (sint32)imlSegment->imlList[i].op_macro.paramU16); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_BCTRL ) + { + strOutput.addFmt("MACRO BCTRL 0x{:08x} cycles (depr): {}", imlSegment->imlList[i].op_macro.param, (sint32)imlSegment->imlList[i].op_macro.paramU16); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_BL ) + { + strOutput.addFmt("MACRO BL 0x{:08x} -> 0x{:08x} cycles (depr): {}", imlSegment->imlList[i].op_macro.param, imlSegment->imlList[i].op_macro.param2, (sint32)imlSegment->imlList[i].op_macro.paramU16); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_B_FAR ) + { + strOutput.addFmt("MACRO B_FAR 0x{:08x} -> 0x{:08x} cycles (depr): {}", imlSegment->imlList[i].op_macro.param, imlSegment->imlList[i].op_macro.param2, (sint32)imlSegment->imlList[i].op_macro.paramU16); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_LEAVE ) + { + strOutput.addFmt("MACRO LEAVE ppc: 0x{:08x}", imlSegment->imlList[i].op_macro.param); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_HLE ) + { + strOutput.addFmt("MACRO HLE ppcAddr: 0x{:08x} funcId: 0x{:08x}", imlSegment->imlList[i].op_macro.param, imlSegment->imlList[i].op_macro.param2); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_MFTB ) + { + strOutput.addFmt("MACRO MFTB ppcAddr: 0x{:08x} sprId: 0x{:08x}", imlSegment->imlList[i].op_macro.param, imlSegment->imlList[i].op_macro.param2); + } + else if( imlSegment->imlList[i].operation == PPCREC_IML_MACRO_COUNT_CYCLES ) + { + strOutput.addFmt("MACRO COUNT_CYCLES cycles: {}", imlSegment->imlList[i].op_macro.param); + } + else + { + strOutput.addFmt("MACRO ukn operation {}", imlSegment->imlList[i].operation); + } + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_R_NAME ) + { + strOutput.addFmt("fpr_t{} = name_{} (", imlSegment->imlList[i].op_r_name.registerIndex, imlSegment->imlList[i].op_r_name.name); + if( imlSegment->imlList[i].op_r_name.name >= PPCREC_NAME_FPR0 && imlSegment->imlList[i].op_r_name.name < (PPCREC_NAME_FPR0+999) ) + { + strOutput.addFmt("fpr{}", imlSegment->imlList[i].op_r_name.name-PPCREC_NAME_FPR0); + } + else if( imlSegment->imlList[i].op_r_name.name >= PPCREC_NAME_TEMPORARY_FPR0 && imlSegment->imlList[i].op_r_name.name < (PPCREC_NAME_TEMPORARY_FPR0+999) ) + { + strOutput.addFmt("tempFpr{}", imlSegment->imlList[i].op_r_name.name-PPCREC_NAME_TEMPORARY_FPR0); + } + else + strOutput.add("ukn"); + strOutput.add(")"); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_NAME_R ) + { + strOutput.addFmt("name_{} (", imlSegment->imlList[i].op_r_name.name); + if( imlSegment->imlList[i].op_r_name.name >= PPCREC_NAME_FPR0 && imlSegment->imlList[i].op_r_name.name < (PPCREC_NAME_FPR0+999) ) + { + strOutput.addFmt("fpr{}", imlSegment->imlList[i].op_r_name.name-PPCREC_NAME_FPR0); + } + else if( imlSegment->imlList[i].op_r_name.name >= PPCREC_NAME_TEMPORARY_FPR0 && imlSegment->imlList[i].op_r_name.name < (PPCREC_NAME_TEMPORARY_FPR0+999) ) + { + strOutput.addFmt("tempFpr{}", imlSegment->imlList[i].op_r_name.name-PPCREC_NAME_TEMPORARY_FPR0); + } + else + strOutput.add("ukn"); + strOutput.addFmt(") = fpr_t{}", imlSegment->imlList[i].op_r_name.registerIndex); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_LOAD ) + { + strOutput.addFmt("fpr_t{} = ", imlSegment->imlList[i].op_storeLoad.registerData); + if( imlSegment->imlList[i].op_storeLoad.flags2.signExtend ) + strOutput.add("S"); + else + strOutput.add("U"); + strOutput.addFmt("{} [t{}+{}] mode {}", imlSegment->imlList[i].op_storeLoad.copyWidth / 8, imlSegment->imlList[i].op_storeLoad.registerMem, imlSegment->imlList[i].op_storeLoad.immS32, imlSegment->imlList[i].op_storeLoad.mode); + if (imlSegment->imlList[i].op_storeLoad.flags2.notExpanded) + { + strOutput.addFmt(" "); + } + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_STORE ) + { + if( imlSegment->imlList[i].op_storeLoad.flags2.signExtend ) + strOutput.add("S"); + else + strOutput.add("U"); + strOutput.addFmt("{} [t{}+{}]", imlSegment->imlList[i].op_storeLoad.copyWidth/8, imlSegment->imlList[i].op_storeLoad.registerMem, imlSegment->imlList[i].op_storeLoad.immS32); + strOutput.addFmt("= fpr_t{} mode {}\n", imlSegment->imlList[i].op_storeLoad.registerData, imlSegment->imlList[i].op_storeLoad.mode); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_R_R ) + { + strOutput.addFmt("{:-6} ", PPCRecompiler_getOpcodeDebugName(&imlSegment->imlList[i])); + strOutput.addFmt("fpr{:02d}, fpr{:02d}", imlSegment->imlList[i].op_fpr_r_r.registerResult, imlSegment->imlList[i].op_fpr_r_r.registerOperand); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_R_R_R_R ) + { + strOutput.addFmt("{:-6} ", PPCRecompiler_getOpcodeDebugName(&imlSegment->imlList[i])); + strOutput.addFmt("fpr{:02d}, fpr{:02d}, fpr{:02d}, fpr{:02d}", imlSegment->imlList[i].op_fpr_r_r_r_r.registerResult, imlSegment->imlList[i].op_fpr_r_r_r_r.registerOperandA, imlSegment->imlList[i].op_fpr_r_r_r_r.registerOperandB, imlSegment->imlList[i].op_fpr_r_r_r_r.registerOperandC); + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_R_R_R ) + { + strOutput.addFmt("{:-6} ", PPCRecompiler_getOpcodeDebugName(&imlSegment->imlList[i])); + strOutput.addFmt("fpr{:02d}, fpr{:02d}, fpr{:02d}", imlSegment->imlList[i].op_fpr_r_r_r.registerResult, imlSegment->imlList[i].op_fpr_r_r_r.registerOperandA, imlSegment->imlList[i].op_fpr_r_r_r.registerOperandB); + } + else if (imlSegment->imlList[i].type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) + { + strOutput.addFmt("CYCLE_CHECK jm_{:08x}\n", imlSegment->imlList[i].op_conditionalJump.jumpmarkAddress); + } + else if (imlSegment->imlList[i].type == PPCREC_IML_TYPE_CONDITIONAL_R_S32) + { + strOutput.addFmt("t{} ", imlSegment->imlList[i].op_conditional_r_s32.registerIndex); + bool displayAsHex = false; + if (imlSegment->imlList[i].operation == PPCREC_IML_OP_ASSIGN) + { + displayAsHex = true; + strOutput.add("="); + } + else + strOutput.addFmt("(unknown operation CONDITIONAL_R_S32 {})", imlSegment->imlList[i].operation); + if (displayAsHex) + strOutput.addFmt(" 0x{:x}", imlSegment->imlList[i].op_conditional_r_s32.immS32); + else + strOutput.addFmt(" {}", imlSegment->imlList[i].op_conditional_r_s32.immS32); + strOutput.add(" (conditional)"); + if (imlSegment->imlList[i].crRegister != PPC_REC_INVALID_REGISTER) + { + strOutput.addFmt(" -> and update CR{}", imlSegment->imlList[i].crRegister); + } + } + else + { + strOutput.addFmt("Unknown iml type {}", imlSegment->imlList[i].type); + } + debug_printf("%s", strOutput.c_str()); + if (printLivenessRangeInfo) + { + PPCRecompilerDebug_printLivenessRangeInfo(strOutput, imlSegment, i); + } + debug_printf("\n"); + } + // all ranges + if (printLivenessRangeInfo) + { + debug_printf("Ranges-VirtReg "); + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while(subrangeItr) + { + debug_printf("v%-2d", subrangeItr->range->virtualRegister); + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + debug_printf("\n"); + debug_printf("Ranges-PhysReg "); + subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while (subrangeItr) + { + debug_printf("p%-2d", subrangeItr->range->physicalRegister); + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + debug_printf("\n"); + } + // branch info + debug_printf("Links from: "); + for (sint32 i = 0; i < imlSegment->list_prevSegments.size(); i++) + { + if (i) + debug_printf(", "); + debug_printf("%p", (void*)imlSegment->list_prevSegments[i]); + } + debug_printf("\n"); + debug_printf("Links to: "); + if (imlSegment->nextSegmentBranchNotTaken) + debug_printf("%p (no branch), ", (void*)imlSegment->nextSegmentBranchNotTaken); + if (imlSegment->nextSegmentBranchTaken) + debug_printf("%p (branch)", (void*)imlSegment->nextSegmentBranchTaken); + debug_printf("\n"); +} + +void PPCRecompiler_dumpIML(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext) +{ + for(sint32 f=0; fsegmentListCount; f++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[f]; + PPCRecompiler_dumpIMLSegment(imlSegment, f); + debug_printf("\n"); + } +} + +void PPCRecompilerIml_setSegmentPoint(ppcRecompilerSegmentPoint_t* segmentPoint, PPCRecImlSegment_t* imlSegment, sint32 index) { segmentPoint->imlSegment = imlSegment; - segmentPoint->SetInstructionIndex(index); + segmentPoint->index = index; if (imlSegment->segmentPointList) imlSegment->segmentPointList->prev = segmentPoint; segmentPoint->prev = nullptr; @@ -1869,7 +3469,7 @@ void PPCRecompilerIml_setSegmentPoint(IMLSegmentPoint* segmentPoint, IMLSegment* imlSegment->segmentPointList = segmentPoint; } -void PPCRecompilerIml_removeSegmentPoint(IMLSegmentPoint* segmentPoint) +void PPCRecompilerIml_removeSegmentPoint(ppcRecompilerSegmentPoint_t* segmentPoint) { if (segmentPoint->prev) segmentPoint->prev->next = segmentPoint->next; @@ -1881,60 +3481,147 @@ void PPCRecompilerIml_removeSegmentPoint(IMLSegmentPoint* segmentPoint) /* * Insert multiple no-op instructions -* Warning: Can invalidate any previous instruction pointers from the same segment +* Warning: Can invalidate any previous instruction structs from the same segment */ -void PPCRecompiler_pushBackIMLInstructions(IMLSegment* imlSegment, sint32 index, sint32 shiftBackCount) +void PPCRecompiler_pushBackIMLInstructions(PPCRecImlSegment_t* imlSegment, sint32 index, sint32 shiftBackCount) { - cemu_assert_debug(index >= 0 && index <= imlSegment->imlList.size()); - - imlSegment->imlList.insert(imlSegment->imlList.begin() + index, shiftBackCount, {}); - - memset(imlSegment->imlList.data() + index, 0, sizeof(IMLInstruction) * shiftBackCount); + cemu_assert(index >= 0 && index <= imlSegment->imlListCount); + if (imlSegment->imlListCount + shiftBackCount > imlSegment->imlListSize) + { + sint32 newSize = imlSegment->imlListCount + shiftBackCount + std::max(2, imlSegment->imlListSize/2); + imlSegment->imlList = (PPCRecImlInstruction_t*)realloc(imlSegment->imlList, sizeof(PPCRecImlInstruction_t)*newSize); + imlSegment->imlListSize = newSize; + } + for (sint32 i = (sint32)imlSegment->imlListCount - 1; i >= index; i--) + { + memcpy(imlSegment->imlList + (i + shiftBackCount), imlSegment->imlList + i, sizeof(PPCRecImlInstruction_t)); + } // fill empty space with NOP instructions for (sint32 i = 0; i < shiftBackCount; i++) { imlSegment->imlList[index + i].type = PPCREC_IML_TYPE_NONE; } + imlSegment->imlListCount += shiftBackCount; - // update position of segment points if (imlSegment->segmentPointList) { - IMLSegmentPoint* segmentPoint = imlSegment->segmentPointList; + ppcRecompilerSegmentPoint_t* segmentPoint = imlSegment->segmentPointList; while (segmentPoint) { - segmentPoint->ShiftIfAfter(index, shiftBackCount); + if (segmentPoint->index != RA_INTER_RANGE_START && segmentPoint->index != RA_INTER_RANGE_END) + { + if (segmentPoint->index >= index) + segmentPoint->index += shiftBackCount; + } + // next segmentPoint = segmentPoint->next; } } } -IMLInstruction* PPCRecompiler_insertInstruction(IMLSegment* imlSegment, sint32 index) +/* +* Insert and return new instruction at index +* Warning: Can invalidate any previous instruction structs from the same segment +*/ +PPCRecImlInstruction_t* PPCRecompiler_insertInstruction(PPCRecImlSegment_t* imlSegment, sint32 index) { PPCRecompiler_pushBackIMLInstructions(imlSegment, index, 1); - return imlSegment->imlList.data() + index; + return imlSegment->imlList + index; } -IMLInstruction* PPCRecompiler_appendInstruction(IMLSegment* imlSegment) +/* +* Append and return new instruction at the end of the segment +* Warning: Can invalidate any previous instruction structs from the same segment +*/ +PPCRecImlInstruction_t* PPCRecompiler_appendInstruction(PPCRecImlSegment_t* imlSegment) { - size_t index = imlSegment->imlList.size(); - imlSegment->imlList.emplace_back(); - memset(imlSegment->imlList.data() + index, 0, sizeof(IMLInstruction)); - return imlSegment->imlList.data() + index; -} - -IMLSegment* PPCRecompilerIml_appendSegment(ppcImlGenContext_t* ppcImlGenContext) -{ - IMLSegment* segment = new IMLSegment(); - ppcImlGenContext->segmentList2.emplace_back(segment); - return segment; + sint32 index = imlSegment->imlListCount; + if (index >= imlSegment->imlListSize) + { + sint32 newSize = index+1; + imlSegment->imlList = (PPCRecImlInstruction_t*)realloc(imlSegment->imlList, sizeof(PPCRecImlInstruction_t)*newSize); + imlSegment->imlListSize = newSize; + } + imlSegment->imlListCount++; + memset(imlSegment->imlList + index, 0, sizeof(PPCRecImlInstruction_t)); + return imlSegment->imlList + index; } void PPCRecompilerIml_insertSegments(ppcImlGenContext_t* ppcImlGenContext, sint32 index, sint32 count) { - ppcImlGenContext->segmentList2.insert(ppcImlGenContext->segmentList2.begin() + index, count, nullptr); - for (sint32 i = 0; i < count; i++) - ppcImlGenContext->segmentList2[index + i] = new IMLSegment(); + if( (ppcImlGenContext->segmentListCount+count) > ppcImlGenContext->segmentListSize ) + { + // allocate space for more segments + ppcImlGenContext->segmentListSize += count; + ppcImlGenContext->segmentList = (PPCRecImlSegment_t**)realloc(ppcImlGenContext->segmentList, ppcImlGenContext->segmentListSize*sizeof(PPCRecImlSegment_t*)); + } + for(sint32 i=(sint32)ppcImlGenContext->segmentListCount-1; i>=index; i--) + { + memcpy(ppcImlGenContext->segmentList+(i+count), ppcImlGenContext->segmentList+i, sizeof(PPCRecImlSegment_t*)); + } + ppcImlGenContext->segmentListCount += count; + for(sint32 i=0; isegmentList+index+i, 0x00, sizeof(PPCRecImlSegment_t*)); + ppcImlGenContext->segmentList[index+i] = (PPCRecImlSegment_t*)malloc(sizeof(PPCRecImlSegment_t)); + memset(ppcImlGenContext->segmentList[index+i], 0x00, sizeof(PPCRecImlSegment_t)); + ppcImlGenContext->segmentList[index + i]->list_prevSegments = std::vector(); + } +} + +/* + * Allocate and init a new iml instruction segment + */ +PPCRecImlSegment_t* PPCRecompiler_generateImlSegment(ppcImlGenContext_t* ppcImlGenContext) +{ + if( ppcImlGenContext->segmentListCount >= ppcImlGenContext->segmentListSize ) + { + // allocate space for more segments + ppcImlGenContext->segmentListSize *= 2; + ppcImlGenContext->segmentList = (PPCRecImlSegment_t**)realloc(ppcImlGenContext->segmentList, ppcImlGenContext->segmentListSize*sizeof(PPCRecImlSegment_t*)); + } + PPCRecImlSegment_t* ppcRecSegment = new PPCRecImlSegment_t(); + ppcImlGenContext->segmentList[ppcImlGenContext->segmentListCount] = ppcRecSegment; + ppcImlGenContext->segmentListCount++; + return ppcRecSegment; +} + +void PPCRecompiler_freeContext(ppcImlGenContext_t* ppcImlGenContext) +{ + if (ppcImlGenContext->imlList) + { + free(ppcImlGenContext->imlList); + ppcImlGenContext->imlList = nullptr; + } + for(sint32 i=0; isegmentListCount; i++) + { + free(ppcImlGenContext->segmentList[i]->imlList); + delete ppcImlGenContext->segmentList[i]; + } + ppcImlGenContext->segmentListCount = 0; + if (ppcImlGenContext->segmentList) + { + free(ppcImlGenContext->segmentList); + ppcImlGenContext->segmentList = nullptr; + } +} + +bool PPCRecompiler_isSuffixInstruction(PPCRecImlInstruction_t* iml) +{ + if (iml->type == PPCREC_IML_TYPE_MACRO && (iml->operation == PPCREC_IML_MACRO_BLR || iml->operation == PPCREC_IML_MACRO_BCTR) || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_BL || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_B_FAR || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_BLRL || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_BCTRL || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_LEAVE || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_HLE || + iml->type == PPCREC_IML_TYPE_MACRO && iml->operation == PPCREC_IML_MACRO_MFTB || + iml->type == PPCREC_IML_TYPE_PPC_ENTER || + iml->type == PPCREC_IML_TYPE_CJUMP || + iml->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) + return true; + return false; } bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) @@ -1956,18 +3643,15 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) switch (PPC_getBits(opcode, 25, 5)) { case 0: - if( !PPCRecompilerImlGen_PS_CMPU0(ppcImlGenContext, opcode) ) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_PS_CMPU0(ppcImlGenContext, opcode); ppcImlGenContext->hasFPUInstruction = true; break; case 1: - if( !PPCRecompilerImlGen_PS_CMPO0(ppcImlGenContext, opcode) ) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_PS_CMPO0(ppcImlGenContext, opcode); ppcImlGenContext->hasFPUInstruction = true; break; case 2: - if( !PPCRecompilerImlGen_PS_CMPU1(ppcImlGenContext, opcode) ) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_PS_CMPU1(ppcImlGenContext, opcode); ppcImlGenContext->hasFPUInstruction = true; break; default: @@ -2008,23 +3692,23 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 12: // PS_MULS0 - if (PPCRecompilerImlGen_PS_MULSX(ppcImlGenContext, opcode, false) == false) + case 12: // multiply scalar + if (PPCRecompilerImlGen_PS_MULS0(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 13: // PS_MULS1 - if (PPCRecompilerImlGen_PS_MULSX(ppcImlGenContext, opcode, true) == false) + case 13: // multiply scalar + if (PPCRecompilerImlGen_PS_MULS1(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 14: // PS_MADDS0 - if (PPCRecompilerImlGen_PS_MADDSX(ppcImlGenContext, opcode, false) == false) + case 14: // multiply add scalar + if (PPCRecompilerImlGen_PS_MADDS0(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 15: // PS_MADDS1 - if (PPCRecompilerImlGen_PS_MADDSX(ppcImlGenContext, opcode, true) == false) + case 15: // multiply add scalar + if (PPCRecompilerImlGen_PS_MADDS1(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; @@ -2091,22 +3775,22 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 28: // PS_MSUB - if (PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext, opcode, false) == false) + case 28: // multiply sub paired + if (PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 29: // PS_MADD + case 29: // multiply add paired if (PPCRecompilerImlGen_PS_MADD(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 30: // PS_NMSUB - if (PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext, opcode, true) == false) + case 30: // negative multiply sub paired + if (PPCRecompilerImlGen_PS_NMSUB(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 31: // PS_NMADD + case 31: // negative multiply add paired if (PPCRecompilerImlGen_PS_NMADD(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; @@ -2120,23 +3804,20 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) PPCRecompilerImlGen_MULLI(ppcImlGenContext, opcode); break; case 8: // SUBFIC - if (!PPCRecompilerImlGen_SUBFIC(ppcImlGenContext, opcode)) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_SUBFIC(ppcImlGenContext, opcode); break; case 10: // CMPLI - if (!PPCRecompilerImlGen_CMPI(ppcImlGenContext, opcode, true)) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_CMPLI(ppcImlGenContext, opcode); break; case 11: // CMPI - if (!PPCRecompilerImlGen_CMPI(ppcImlGenContext, opcode, false)) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_CMPI(ppcImlGenContext, opcode); break; case 12: // ADDIC - if (PPCRecompilerImlGen_ADDIC_(ppcImlGenContext, opcode, false) == false) + if (PPCRecompilerImlGen_ADDIC(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 13: // ADDIC. - if (PPCRecompilerImlGen_ADDIC_(ppcImlGenContext, opcode, true) == false) + if (PPCRecompilerImlGen_ADDIC_(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 14: // ADDI @@ -2168,11 +3849,8 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) case 19: // opcode category 19 switch (PPC_getBits(opcode, 30, 10)) { - case 0: - PPCRecompilerImlGen_MCRF(ppcImlGenContext, opcode); - break; - case 16: // BCLR - if (PPCRecompilerImlGen_BCSPR(ppcImlGenContext, opcode, SPR_LR) == false) + case 16: + if (PPCRecompilerImlGen_BCLR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 129: @@ -2203,8 +3881,8 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_CROR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 528: // BCCTR - if (PPCRecompilerImlGen_BCSPR(ppcImlGenContext, opcode, SPR_CTR) == false) + case 528: + if (PPCRecompilerImlGen_BCCTR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; default: @@ -2224,34 +3902,37 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_RLWNM(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 24: // ORI - PPCRecompilerImlGen_ORI_ORIS(ppcImlGenContext, opcode, false); + case 24: + PPCRecompilerImlGen_ORI(ppcImlGenContext, opcode); break; - case 25: // ORIS - PPCRecompilerImlGen_ORI_ORIS(ppcImlGenContext, opcode, true); + case 25: + PPCRecompilerImlGen_ORIS(ppcImlGenContext, opcode); break; - case 26: // XORI - PPCRecompilerImlGen_XORI_XORIS(ppcImlGenContext, opcode, false); + case 26: + PPCRecompilerImlGen_XORI(ppcImlGenContext, opcode); break; - case 27: // XORIS - PPCRecompilerImlGen_XORI_XORIS(ppcImlGenContext, opcode, true); + case 27: + PPCRecompilerImlGen_XORIS(ppcImlGenContext, opcode); break; - case 28: // ANDI - PPCRecompilerImlGen_ANDI_ANDIS(ppcImlGenContext, opcode, false); + case 28: + PPCRecompilerImlGen_ANDI(ppcImlGenContext, opcode); break; - case 29: // ANDIS - PPCRecompilerImlGen_ANDI_ANDIS(ppcImlGenContext, opcode, true); + case 29: + PPCRecompilerImlGen_ANDIS(ppcImlGenContext, opcode); break; case 31: // opcode category switch (PPC_getBits(opcode, 30, 10)) { case 0: - PPCRecompilerImlGen_CMP(ppcImlGenContext, opcode, false); + PPCRecompilerImlGen_CMP(ppcImlGenContext, opcode); break; case 4: PPCRecompilerImlGen_TW(ppcImlGenContext, opcode); break; case 8: + // todo: Check if we can optimize this pattern: + // SUBFC + SUBFE + // SUBFC if (PPCRecompilerImlGen_SUBFC(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; @@ -2271,8 +3952,9 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_LWARX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 23: // LWZX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 32, false, true, false); + case 23: + if (PPCRecompilerImlGen_LWZX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; break; case 24: if (PPCRecompilerImlGen_SLW(ppcImlGenContext, opcode) == false) @@ -2282,12 +3964,12 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_CNTLZW(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 28: // AND - if (!PPCRecompilerImlGen_AND_NAND(ppcImlGenContext, opcode, false)) + case 28: + if (PPCRecompilerImlGen_AND(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 32: - PPCRecompilerImlGen_CMP(ppcImlGenContext, opcode, true); // CMPL + PPCRecompilerImlGen_CMPL(ppcImlGenContext, opcode); break; case 40: if (PPCRecompilerImlGen_SUBF(ppcImlGenContext, opcode) == false) @@ -2296,11 +3978,12 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) case 54: // DBCST - Generates no code break; - case 55: // LWZUX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 32, false, true, true); + case 55: + if (PPCRecompilerImlGen_LWZUX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; break; - case 60: // ANDC - if (!PPCRecompilerImlGen_ANDC(ppcImlGenContext, opcode)) + case 60: + if (PPCRecompilerImlGen_ANDC(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 75: @@ -2310,18 +3993,20 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) case 86: // DCBF -> No-Op break; - case 87: // LBZX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 8, false, true, false); + case 87: + if (PPCRecompilerImlGen_LBZX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; break; case 104: if (PPCRecompilerImlGen_NEG(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 119: // LBZUX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 8, false, true, true); + case 119: + if (PPCRecompilerImlGen_LBZUX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; break; - case 124: // NOR - if (!PPCRecompilerImlGen_OR_NOR(ppcImlGenContext, opcode, true)) + case 124: + if (PPCRecompilerImlGen_NOR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 136: @@ -2333,20 +4018,19 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) unsupportedInstructionFound = true; break; case 144: - if( !PPCRecompilerImlGen_MTCRF(ppcImlGenContext, opcode)) - unsupportedInstructionFound = true; + PPCRecompilerImlGen_MTCRF(ppcImlGenContext, opcode); break; case 150: - if (!PPCRecompilerImlGen_STWCX(ppcImlGenContext, opcode)) + if (PPCRecompilerImlGen_STWCX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 151: // STWX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 32, true, false)) + case 151: + if (PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 32) == false) unsupportedInstructionFound = true; break; - case 183: // STWUX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 32, true, true)) - unsupportedInstructionFound = true; + case 183: + if (PPCRecompilerImlGen_STORE_INDEXED_UPDATE(ppcImlGenContext, opcode, 32) == false) + unsupportedInstructionFound = true; break; case 200: if (PPCRecompilerImlGen_SUBFZE(ppcImlGenContext, opcode) == false) @@ -2356,8 +4040,8 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_ADDZE(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 215: // STBX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 8, true, false)) + case 215: + if (PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 8) == false) unsupportedInstructionFound = true; break; case 234: @@ -2368,56 +4052,59 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_MULLW(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 247: // STBUX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 8, true, true)) + case 247: + if (PPCRecompilerImlGen_STORE_INDEXED_UPDATE(ppcImlGenContext, opcode, 8) == false) unsupportedInstructionFound = true; break; case 266: if (PPCRecompilerImlGen_ADD(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 279: // LHZX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 16, false, true, false); - break; - case 284: // EQV (alias to NXOR) - if (!PPCRecompilerImlGen_XOR(ppcImlGenContext, opcode, true)) + case 279: + if (PPCRecompilerImlGen_LHZX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 311: // LHZUX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 16, false, true, true); + case 284: + PPCRecompilerImlGen_EQV(ppcImlGenContext, opcode); break; - case 316: // XOR - if (!PPCRecompilerImlGen_XOR(ppcImlGenContext, opcode, false)) + case 311: + if (PPCRecompilerImlGen_LHZUX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; + break; + case 316: + if (PPCRecompilerImlGen_XOR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 339: if (PPCRecompilerImlGen_MFSPR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 343: // LHAX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 16, true, true, false); + case 343: + if (PPCRecompilerImlGen_LHAX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; break; case 371: if (PPCRecompilerImlGen_MFTB(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 375: // LHAUX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 16, true, true, true); + case 375: + if (PPCRecompilerImlGen_LHAUX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; break; - case 407: // STHX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 16, true, false)) + case 407: + if (PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 16) == false) unsupportedInstructionFound = true; break; case 412: if (PPCRecompilerImlGen_ORC(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 439: // STHUX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 16, true, true)) + case 439: + if (PPCRecompilerImlGen_STORE_INDEXED_UPDATE(ppcImlGenContext, opcode, 16) == false) unsupportedInstructionFound = true; break; - case 444: // OR - if (!PPCRecompilerImlGen_OR_NOR(ppcImlGenContext, opcode, false)) + case 444: + if (PPCRecompilerImlGen_OR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 459: @@ -2427,19 +4114,17 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_MTSPR(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 476: // NAND - if (!PPCRecompilerImlGen_AND_NAND(ppcImlGenContext, opcode, true)) - unsupportedInstructionFound = true; - break; case 491: if (PPCRecompilerImlGen_DIVW(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 534: // LWBRX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 32, false, false, false); + case 534: + if (PPCRecompilerImlGen_LWBRX(ppcImlGenContext, opcode) == false) + unsupportedInstructionFound = true; + ppcImlGenContext->hasFPUInstruction = true; break; - case 535: // LFSX - if (PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext, opcode, false, false) == false) + case 535: + if (PPCRecompilerImlGen_LFSX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; @@ -2447,8 +4132,8 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) if (PPCRecompilerImlGen_SRW(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 567: // LFSUX - if (PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext, opcode, true, false) == false) + case 567: + if (PPCRecompilerImlGen_LFSUX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; @@ -2459,42 +4144,38 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) case 598: PPCRecompilerImlGen_SYNC(ppcImlGenContext, opcode); break; - case 599: // LFDX - if (PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext, opcode, false, true) == false) + case 599: + if (PPCRecompilerImlGen_LFDX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 631: // LFDUX - if (PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext, opcode, true, true) == false) + case 631: + if (PPCRecompilerImlGen_LFDUX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 662: // STWBRX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 32, false, false)) + case 662: + if (PPCRecompilerImlGen_STWBRX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 663: // STFSX - if (PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext, opcode, false, false) == false) + case 663: + if (PPCRecompilerImlGen_STFSX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 695: // STFSUX - if (PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext, opcode, true, false) == false) + case 695: + if (PPCRecompilerImlGen_STFSUX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; case 725: if (PPCRecompilerImlGen_STSWI(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 727: // STFDX - if (PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext, opcode, false, true) == false) + case 727: + if (PPCRecompilerImlGen_STFDX(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; break; - case 759: // STFDUX - if (PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext, opcode, true, true) == false) - unsupportedInstructionFound = true; - break; - case 790: // LHBRX - PPCRecompilerImlGen_LOAD_INDEXED(ppcImlGenContext, opcode, 16, false, false, false); + case 790: + PPCRecompilerImlGen_LHBRX(ppcImlGenContext, opcode); break; case 792: if (PPCRecompilerImlGen_SRAW(ppcImlGenContext, opcode) == false) @@ -2505,7 +4186,7 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) unsupportedInstructionFound = true; break; case 918: // STHBRX - if (!PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 16, false, true)) + if (PPCRecompilerImlGen_STORE_INDEXED(ppcImlGenContext, opcode, 16, true) == false) unsupportedInstructionFound = true; break; case 922: @@ -2529,61 +4210,47 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) break; } break; - case 32: // LWZ - if(!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 32, false, true, false)) - unsupportedInstructionFound = true; + case 32: + PPCRecompilerImlGen_LWZ(ppcImlGenContext, opcode); break; - case 33: // LWZU - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 32, false, true, true)) - unsupportedInstructionFound = true; + case 33: + PPCRecompilerImlGen_LWZU(ppcImlGenContext, opcode); break; - case 34: // LBZ - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 8, false, true, false)) - unsupportedInstructionFound = true; + case 34: + PPCRecompilerImlGen_LBZ(ppcImlGenContext, opcode); break; - case 35: // LBZU - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 8, false, true, true)) - unsupportedInstructionFound = true; + case 35: + PPCRecompilerImlGen_LBZU(ppcImlGenContext, opcode); break; - case 36: // STW - if(!PPCRecompilerImlGen_STORE(ppcImlGenContext, opcode, 32, true, false)) - unsupportedInstructionFound = true; + case 36: + PPCRecompilerImlGen_STW(ppcImlGenContext, opcode); break; - case 37: // STWU - if (!PPCRecompilerImlGen_STORE(ppcImlGenContext, opcode, 32, true, true)) - unsupportedInstructionFound = true; + case 37: + PPCRecompilerImlGen_STWU(ppcImlGenContext, opcode); break; - case 38: // STB - if (!PPCRecompilerImlGen_STORE(ppcImlGenContext, opcode, 8, true, false)) - unsupportedInstructionFound = true; + case 38: + PPCRecompilerImlGen_STB(ppcImlGenContext, opcode); break; - case 39: // STBU - if (!PPCRecompilerImlGen_STORE(ppcImlGenContext, opcode, 8, true, true)) - unsupportedInstructionFound = true; + case 39: + PPCRecompilerImlGen_STBU(ppcImlGenContext, opcode); break; - case 40: // LHZ - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 16, false, true, false)) - unsupportedInstructionFound = true; + case 40: + PPCRecompilerImlGen_LHZ(ppcImlGenContext, opcode); break; - case 41: // LHZU - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 16, false, true, true)) - unsupportedInstructionFound = true; + case 41: + PPCRecompilerImlGen_LHZU(ppcImlGenContext, opcode); break; - case 42: // LHA - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 16, true, true, false)) - unsupportedInstructionFound = true; + case 42: + PPCRecompilerImlGen_LHA(ppcImlGenContext, opcode); break; - case 43: // LHAU - if (!PPCRecompilerImlGen_LOAD(ppcImlGenContext, opcode, 16, true, true, true)) - unsupportedInstructionFound = true; + case 43: + PPCRecompilerImlGen_LHAU(ppcImlGenContext, opcode); break; - case 44: // STH - if (!PPCRecompilerImlGen_STORE(ppcImlGenContext, opcode, 16, true, false)) - unsupportedInstructionFound = true; + case 44: + PPCRecompilerImlGen_STH(ppcImlGenContext, opcode); break; - case 45: // STHU - if (!PPCRecompilerImlGen_STORE(ppcImlGenContext, opcode, 16, true, true)) - unsupportedInstructionFound = true; + case 45: + PPCRecompilerImlGen_STHU(ppcImlGenContext, opcode); break; case 46: PPCRecompilerImlGen_LMW(ppcImlGenContext, opcode); @@ -2591,53 +4258,53 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) case 47: PPCRecompilerImlGen_STMW(ppcImlGenContext, opcode); break; - case 48: // LFS - if (PPCRecompilerImlGen_LFS_LFSU_LFD_LFDU(ppcImlGenContext, opcode, false, false) == false) + case 48: + if (PPCRecompilerImlGen_LFS(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 49: // LFSU - if (PPCRecompilerImlGen_LFS_LFSU_LFD_LFDU(ppcImlGenContext, opcode, true, false) == false) + case 49: + if (PPCRecompilerImlGen_LFSU(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 50: // LFD - if (PPCRecompilerImlGen_LFS_LFSU_LFD_LFDU(ppcImlGenContext, opcode, false, true) == false) + case 50: + if (PPCRecompilerImlGen_LFD(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 51: // LFDU - if (PPCRecompilerImlGen_LFS_LFSU_LFD_LFDU(ppcImlGenContext, opcode, true, true) == false) + case 51: + if (PPCRecompilerImlGen_LFDU(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 52: // STFS - if (PPCRecompilerImlGen_STFS_STFSU_STFD_STFDU(ppcImlGenContext, opcode, false, false) == false) + case 52: + if (PPCRecompilerImlGen_STFS(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 53: // STFSU - if (PPCRecompilerImlGen_STFS_STFSU_STFD_STFDU(ppcImlGenContext, opcode, true, false) == false) + case 53: + if (PPCRecompilerImlGen_STFSU(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 54: // STFD - if (PPCRecompilerImlGen_STFS_STFSU_STFD_STFDU(ppcImlGenContext, opcode, false, true) == false) + case 54: + if (PPCRecompilerImlGen_STFD(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; - case 55: // STFDU - if (PPCRecompilerImlGen_STFS_STFSU_STFD_STFDU(ppcImlGenContext, opcode, true, true) == false) + case 55: + if (PPCRecompilerImlGen_STFDU(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; case 56: - if (PPCRecompilerImlGen_PSQ_L(ppcImlGenContext, opcode, false) == false) + if (PPCRecompilerImlGen_PSQ_L(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; case 57: - if (PPCRecompilerImlGen_PSQ_L(ppcImlGenContext, opcode, true) == false) + if (PPCRecompilerImlGen_PSQ_LU(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; @@ -2690,12 +4357,12 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) } break; case 60: - if (PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext, opcode, false) == false) + if (PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; case 61: - if (PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext, opcode, true) == false) + if (PPCRecompilerImlGen_PSQ_STU(ppcImlGenContext, opcode) == false) unsupportedInstructionFound = true; ppcImlGenContext->hasFPUInstruction = true; break; @@ -2804,482 +4471,556 @@ bool PPCRecompiler_decodePPCInstruction(ppcImlGenContext_t* ppcImlGenContext) return unsupportedInstructionFound; } -// returns false if code flow is not interrupted -bool PPCRecompiler_CheckIfInstructionEndsSegment(PPCFunctionBoundaryTracker& boundaryTracker, uint32 instructionAddress, uint32 opcode, bool& makeNextInstEnterable, bool& continueDefaultPath, bool& hasBranchTarget, uint32& branchTarget) +bool PPCRecompiler_generateIntermediateCode(ppcImlGenContext_t& ppcImlGenContext, PPCRecFunction_t* ppcRecFunc, std::set& entryAddresses) { - hasBranchTarget = false; - branchTarget = 0xFFFFFFFF; - makeNextInstEnterable = false; - continueDefaultPath = false; - switch (Espresso::GetPrimaryOpcode(opcode)) - { - case Espresso::PrimaryOpcode::VIRTUAL_HLE: - { - makeNextInstEnterable = true; - hasBranchTarget = false; - continueDefaultPath = false; - return true; - } - case Espresso::PrimaryOpcode::BC: - { - uint32 BD, BI; - Espresso::BOField BO; - bool AA, LK; - Espresso::decodeOp_BC(opcode, BD, BO, BI, AA, LK); - if (!LK) - { - hasBranchTarget = true; - branchTarget = (AA ? BD : BD) + instructionAddress; - if (!boundaryTracker.ContainsAddress(branchTarget)) - hasBranchTarget = false; // far jump - } - makeNextInstEnterable = LK; - continueDefaultPath = true; - return true; - } - case Espresso::PrimaryOpcode::B: - { - uint32 LI; - bool AA, LK; - Espresso::decodeOp_B(opcode, LI, AA, LK); - if (!LK) - { - hasBranchTarget = true; - branchTarget = AA ? LI : LI + instructionAddress; - if (!boundaryTracker.ContainsAddress(branchTarget)) - hasBranchTarget = false; // far jump - } - makeNextInstEnterable = LK; - continueDefaultPath = false; - return true; - } - case Espresso::PrimaryOpcode::GROUP_19: - switch (Espresso::GetGroup19Opcode(opcode)) - { - case Espresso::Opcode19::BCLR: - case Espresso::Opcode19::BCCTR: - { - Espresso::BOField BO; - uint32 BI; - bool LK; - Espresso::decodeOp_BCSPR(opcode, BO, BI, LK); - continueDefaultPath = !BO.conditionIgnore() || !BO.decrementerIgnore(); // if branch is always taken then there is no continued path - makeNextInstEnterable = Espresso::DecodeLK(opcode); - return true; - } - default: - break; - } - break; - case Espresso::PrimaryOpcode::GROUP_31: - switch (Espresso::GetGroup31Opcode(opcode)) - { - default: - break; - } - break; - default: - break; - } - return false; -} - -void PPCRecompiler_DetermineBasicBlockRange(std::vector& basicBlockList, PPCFunctionBoundaryTracker& boundaryTracker, uint32 ppcStart, uint32 ppcEnd, const std::set& combinedBranchTargets, const std::set& entryAddresses) -{ - cemu_assert_debug(ppcStart <= ppcEnd); - - uint32 currentAddr = ppcStart; - - PPCBasicBlockInfo* curBlockInfo = &basicBlockList.emplace_back(currentAddr, entryAddresses); - - uint32 basicBlockStart = currentAddr; - while (currentAddr <= ppcEnd) - { - curBlockInfo->lastAddress = currentAddr; - uint32 opcode = memory_readU32(currentAddr); - bool nextInstIsEnterable = false; - bool hasBranchTarget = false; - bool hasContinuedFlow = false; - uint32 branchTarget = 0; - if (PPCRecompiler_CheckIfInstructionEndsSegment(boundaryTracker, currentAddr, opcode, nextInstIsEnterable, hasContinuedFlow, hasBranchTarget, branchTarget)) - { - curBlockInfo->hasBranchTarget = hasBranchTarget; - curBlockInfo->branchTarget = branchTarget; - curBlockInfo->hasContinuedFlow = hasContinuedFlow; - // start new basic block, except if this is the last instruction - if (currentAddr >= ppcEnd) - break; - curBlockInfo = &basicBlockList.emplace_back(currentAddr + 4, entryAddresses); - curBlockInfo->isEnterable = curBlockInfo->isEnterable || nextInstIsEnterable; - currentAddr += 4; - continue; - } - currentAddr += 4; - if (currentAddr <= ppcEnd) - { - if (combinedBranchTargets.find(currentAddr) != combinedBranchTargets.end()) - { - // instruction is branch target, start new basic block - curBlockInfo = &basicBlockList.emplace_back(currentAddr, entryAddresses); - } - } - - } -} - -std::vector PPCRecompiler_DetermineBasicBlockRange(PPCFunctionBoundaryTracker& boundaryTracker, const std::set& entryAddresses) -{ - cemu_assert(!entryAddresses.empty()); - std::vector basicBlockList; - - const std::set branchTargets = boundaryTracker.GetBranchTargets(); - auto funcRanges = boundaryTracker.GetRanges(); - - std::set combinedBranchTargets = branchTargets; - combinedBranchTargets.insert(entryAddresses.begin(), entryAddresses.end()); - - for (auto& funcRangeIt : funcRanges) - PPCRecompiler_DetermineBasicBlockRange(basicBlockList, boundaryTracker, funcRangeIt.startAddress, funcRangeIt.startAddress + funcRangeIt.length - 4, combinedBranchTargets, entryAddresses); - - // mark all segments that start at entryAddresses as enterable (debug code for verification, can be removed) - size_t numMarkedEnterable = 0; - for (auto& basicBlockIt : basicBlockList) - { - if (entryAddresses.find(basicBlockIt.startAddress) != entryAddresses.end()) - { - cemu_assert_debug(basicBlockIt.isEnterable); - numMarkedEnterable++; - } - } - cemu_assert_debug(numMarkedEnterable == entryAddresses.size()); - - // todo - inline BL, currently this is done in the instruction handler of BL but this will mean that instruction cycle increasing is ignored - - return basicBlockList; -} - -bool PPCIMLGen_FillBasicBlock(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo) -{ - ppcImlGenContext.currentOutputSegment = basicBlockInfo.GetSegmentForInstructionAppend(); - ppcImlGenContext.currentInstruction = (uint32*)(memory_base + basicBlockInfo.startAddress); - - uint32* firstCurrentInstruction = ppcImlGenContext.currentInstruction; - uint32* endCurrentInstruction = (uint32*)(memory_base + basicBlockInfo.lastAddress); - - while (ppcImlGenContext.currentInstruction <= endCurrentInstruction) - { - uint32 addressOfCurrentInstruction = (uint32)((uint8*)ppcImlGenContext.currentInstruction - memory_base); - ppcImlGenContext.ppcAddressOfCurrentInstruction = addressOfCurrentInstruction; - - if (PPCRecompiler_decodePPCInstruction(&ppcImlGenContext)) - { - cemuLog_logDebug(LogType::Force, "PPCRecompiler: Unsupported instruction at 0x{:08x}", addressOfCurrentInstruction); - ppcImlGenContext.currentOutputSegment = nullptr; - return false; - } - } - ppcImlGenContext.currentOutputSegment = nullptr; - return true; -} - -// returns split segment from which the continued segment is available via seg->GetBranchNotTaken() -IMLSegment* PPCIMLGen_CreateSplitSegmentAtEnd(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo) -{ - IMLSegment* writeSegment = basicBlockInfo.GetSegmentForInstructionAppend(); - - IMLSegment* continuedSegment = ppcImlGenContext.InsertSegment(ppcImlGenContext.GetSegmentIndex(writeSegment) + 1); - - continuedSegment->SetLinkBranchTaken(writeSegment->GetBranchTaken()); - continuedSegment->SetLinkBranchNotTaken(writeSegment->GetBranchNotTaken()); - - writeSegment->SetLinkBranchNotTaken(continuedSegment); - writeSegment->SetLinkBranchTaken(nullptr); - - if (ppcImlGenContext.currentOutputSegment == writeSegment) - ppcImlGenContext.currentOutputSegment = continuedSegment; - - cemu_assert_debug(basicBlockInfo.appendSegment == writeSegment); - basicBlockInfo.appendSegment = continuedSegment; - - return writeSegment; -} - -// generates a new segment and sets it as branch target for the current write segment. Returns the created segment -IMLSegment* PPCIMLGen_CreateNewSegmentAsBranchTarget(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo) -{ - IMLSegment* writeSegment = basicBlockInfo.GetSegmentForInstructionAppend(); - IMLSegment* branchTargetSegment = ppcImlGenContext.NewSegment(); - cemu_assert_debug(!writeSegment->GetBranchTaken()); // must not have a target already - writeSegment->SetLinkBranchTaken(branchTargetSegment); - return branchTargetSegment; -} - -// verify that current instruction is the last instruction of the active basic block -void PPCIMLGen_AssertIfNotLastSegmentInstruction(ppcImlGenContext_t& ppcImlGenContext) -{ - cemu_assert_debug(ppcImlGenContext.currentBasicBlock->lastAddress == ppcImlGenContext.ppcAddressOfCurrentInstruction); -} - -bool PPCRecompiler_IsBasicBlockATightFiniteLoop(IMLSegment* imlSegment, PPCBasicBlockInfo& basicBlockInfo) -{ - // if we detect a finite loop we can skip generating the cycle check - // currently we only check for BDNZ loops since thats reasonably safe to rely on - // however there are other forms of loops that can be classified as finite, - // but detecting those involves analyzing PPC code and we dont have the infrastructure for that (e.g. IML has CheckRegisterUsage but we dont have an equivalent for PPC code) - - // base criteria, must jump to beginning of same segment - if (imlSegment->nextSegmentBranchTaken != imlSegment) - return false; - - uint32 opcode = *(uint32be*)(memory_base + basicBlockInfo.lastAddress); - if (Espresso::GetPrimaryOpcode(opcode) != Espresso::PrimaryOpcode::BC) - return false; - uint32 BO, BI, BD; - PPC_OPC_TEMPL_B(opcode, BO, BI, BD); - Espresso::BOField boField(BO); - if(!boField.conditionIgnore() || boField.branchAlways()) - return false; - if(boField.decrementerIgnore()) - return false; - return true; -} - -void PPCRecompiler_HandleCycleCheckCount(ppcImlGenContext_t& ppcImlGenContext, PPCBasicBlockInfo& basicBlockInfo) -{ - IMLSegment* imlSegment = basicBlockInfo.GetFirstSegmentInChain(); - if (!basicBlockInfo.hasBranchTarget) - return; - if (basicBlockInfo.branchTarget > basicBlockInfo.startAddress) - return; - - if (PPCRecompiler_IsBasicBlockATightFiniteLoop(imlSegment, basicBlockInfo)) - return; - - // make the segment enterable so execution can return after passing a check - basicBlockInfo.GetFirstSegmentInChain()->SetEnterable(basicBlockInfo.startAddress); - - IMLSegment* splitSeg = PPCIMLGen_CreateSplitSegmentAtEnd(ppcImlGenContext, basicBlockInfo); - splitSeg->AppendInstruction()->make_cjump_cycle_check(); - - IMLSegment* exitSegment = ppcImlGenContext.NewSegment(); - splitSeg->SetLinkBranchTaken(exitSegment); - - exitSegment->AppendInstruction()->make_macro(PPCREC_IML_MACRO_LEAVE, basicBlockInfo.startAddress, 0, 0, IMLREG_INVALID); - - cemu_assert_debug(splitSeg->nextSegmentBranchNotTaken); - // let the IML optimizer and RA know that the original segment should be used during analysis for dead code elimination - exitSegment->SetNextSegmentForOverwriteHints(splitSeg->nextSegmentBranchNotTaken); -} - -void PPCRecompiler_SetSegmentsUncertainFlow(ppcImlGenContext_t& ppcImlGenContext) -{ - for (IMLSegment* segIt : ppcImlGenContext.segmentList2) - { - // handle empty segment - if (segIt->imlList.empty()) - { - cemu_assert_debug(segIt->GetBranchNotTaken()); - continue; - } - // check last instruction of segment - IMLInstruction* imlInstruction = segIt->GetLastInstruction(); - if (imlInstruction->type == PPCREC_IML_TYPE_MACRO) - { - auto macroType = imlInstruction->operation; - switch (macroType) - { - case PPCREC_IML_MACRO_B_TO_REG: - case PPCREC_IML_MACRO_BL: - case PPCREC_IML_MACRO_B_FAR: - case PPCREC_IML_MACRO_HLE: - case PPCREC_IML_MACRO_LEAVE: - segIt->nextSegmentIsUncertain = true; - break; - case PPCREC_IML_MACRO_DEBUGBREAK: - case PPCREC_IML_MACRO_COUNT_CYCLES: - break; - default: - cemu_assert_unimplemented(); - } - } - } -} - -bool PPCRecompiler_GenerateIML(ppcImlGenContext_t& ppcImlGenContext, PPCFunctionBoundaryTracker& boundaryTracker, std::set& entryAddresses) -{ - std::vector basicBlockList = PPCRecompiler_DetermineBasicBlockRange(boundaryTracker, entryAddresses); - - // create segments - std::unordered_map addrToBB; - ppcImlGenContext.segmentList2.resize(basicBlockList.size()); - for (size_t i = 0; i < basicBlockList.size(); i++) - { - PPCBasicBlockInfo& basicBlockInfo = basicBlockList[i]; - IMLSegment* seg = new IMLSegment(); - seg->ppcAddress = basicBlockInfo.startAddress; - if(basicBlockInfo.isEnterable) - seg->SetEnterable(basicBlockInfo.startAddress); - ppcImlGenContext.segmentList2[i] = seg; - cemu_assert_debug(addrToBB.find(basicBlockInfo.startAddress) == addrToBB.end()); - basicBlockInfo.SetInitialSegment(seg); - addrToBB.emplace(basicBlockInfo.startAddress, &basicBlockInfo); - } - // link segments - for (size_t i = 0; i < basicBlockList.size(); i++) - { - PPCBasicBlockInfo& bbInfo = basicBlockList[i]; - cemu_assert_debug(bbInfo.GetFirstSegmentInChain() == bbInfo.GetSegmentForInstructionAppend()); - IMLSegment* seg = ppcImlGenContext.segmentList2[i]; - if (bbInfo.hasBranchTarget) - { - PPCBasicBlockInfo* targetBB = addrToBB[bbInfo.branchTarget]; - cemu_assert_debug(targetBB); - IMLSegment_SetLinkBranchTaken(seg, targetBB->GetFirstSegmentInChain()); - } - if (bbInfo.hasContinuedFlow) - { - PPCBasicBlockInfo* targetBB = addrToBB[bbInfo.lastAddress + 4]; - if (!targetBB) - { - cemuLog_log(LogType::Recompiler, "Recompiler was unable to link segment [0x{:08x}-0x{:08x}] to 0x{:08x}", bbInfo.startAddress, bbInfo.lastAddress, bbInfo.lastAddress + 4); - return false; - } - cemu_assert_debug(targetBB); - IMLSegment_SetLinkBranchNotTaken(seg, targetBB->GetFirstSegmentInChain()); - } - } - // we assume that all unreachable segments are potentially enterable - // todo - mark them as such - - - // generate cycle counters - // in theory we could generate these as part of FillBasicBlock() but in the future we might use more complex logic to emit fewer operations - for (size_t i = 0; i < basicBlockList.size(); i++) - { - PPCBasicBlockInfo& basicBlockInfo = basicBlockList[i]; - IMLSegment* seg = basicBlockInfo.GetSegmentForInstructionAppend(); - - uint32 ppcInstructionCount = (basicBlockInfo.lastAddress - basicBlockInfo.startAddress + 4) / 4; - cemu_assert_debug(ppcInstructionCount > 0); - - PPCRecompiler_pushBackIMLInstructions(seg, 0, 1); - seg->imlList[0].type = PPCREC_IML_TYPE_MACRO; - seg->imlList[0].operation = PPCREC_IML_MACRO_COUNT_CYCLES; - seg->imlList[0].op_macro.param = ppcInstructionCount; - } - - // generate cycle check instructions - // note: Introduces new segments - for (size_t i = 0; i < basicBlockList.size(); i++) - { - PPCBasicBlockInfo& basicBlockInfo = basicBlockList[i]; - PPCRecompiler_HandleCycleCheckCount(ppcImlGenContext, basicBlockInfo); - } - - // fill in all the basic blocks - // note: This step introduces new segments as is necessary for some instructions - for (size_t i = 0; i < basicBlockList.size(); i++) - { - PPCBasicBlockInfo& basicBlockInfo = basicBlockList[i]; - ppcImlGenContext.currentBasicBlock = &basicBlockInfo; - if (!PPCIMLGen_FillBasicBlock(ppcImlGenContext, basicBlockInfo)) - return false; - ppcImlGenContext.currentBasicBlock = nullptr; - } - - // mark segments with unknown jump destination (e.g. BLR and most macros) - PPCRecompiler_SetSegmentsUncertainFlow(ppcImlGenContext); - - // debug - check segment graph -#ifdef CEMU_DEBUG_ASSERT - //for (size_t i = 0; i < basicBlockList.size(); i++) - //{ - // IMLSegment* seg = ppcImlGenContext.segmentList2[i]; - // if (seg->list_prevSegments.empty()) - // { - // cemu_assert_debug(seg->isEnterable); - // } - //} - // debug - check if suffix instructions are at the end of segments and if they are present for branching segments - for (size_t segIndex = 0; segIndex < ppcImlGenContext.segmentList2.size(); segIndex++) - { - IMLSegment* seg = ppcImlGenContext.segmentList2[segIndex]; - IMLSegment* nextSeg = (segIndex+1) < ppcImlGenContext.segmentList2.size() ? ppcImlGenContext.segmentList2[segIndex + 1] : nullptr; - - if (seg->imlList.size() > 0) - { - for (size_t f = 0; f < seg->imlList.size() - 1; f++) - { - if (seg->imlList[f].IsSuffixInstruction()) - { - debug_printf("---------------- SegmentDump (Suffix instruction at wrong pos in segment 0x%x):\n", (int)segIndex); - IMLDebug_Dump(&ppcImlGenContext); - DEBUG_BREAK; - } - } - } - if (seg->nextSegmentBranchTaken) - { - if (!seg->HasSuffixInstruction()) - { - debug_printf("---------------- SegmentDump (NoSuffixInstruction in segment 0x%x):\n", (int)segIndex); - IMLDebug_Dump(&ppcImlGenContext); - DEBUG_BREAK; - } - } - if (seg->nextSegmentBranchNotTaken) - { - // if branch not taken, flow must continue to next segment in sequence - cemu_assert_debug(seg->nextSegmentBranchNotTaken == nextSeg); - } - // more detailed checks based on actual suffix instruction - if (seg->imlList.size() > 0) - { - IMLInstruction* inst = seg->GetLastInstruction(); - if (inst->type == PPCREC_IML_TYPE_MACRO && inst->op_macro.param == PPCREC_IML_MACRO_B_FAR) - { - cemu_assert_debug(!seg->GetBranchTaken()); - cemu_assert_debug(!seg->GetBranchNotTaken()); - } - if (inst->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) - { - cemu_assert_debug(seg->GetBranchTaken()); - cemu_assert_debug(seg->GetBranchNotTaken()); - } - if (inst->type == PPCREC_IML_TYPE_CONDITIONAL_JUMP) - { - if (!seg->GetBranchTaken() || !seg->GetBranchNotTaken()) - { - debug_printf("---------------- SegmentDump (Missing branch for conditional jump in segment 0x%x):\n", (int)segIndex); - IMLDebug_Dump(&ppcImlGenContext); - cemu_assert_error(); - } - } - } - segIndex++; - } -#endif - - - // todos: - // - basic block determination should look for the B(L) B(L) pattern. Or maybe just mark every bb without any input segments as an entry segment - - return true; -} - -bool PPCRecompiler_generateIntermediateCode(ppcImlGenContext_t& ppcImlGenContext, PPCRecFunction_t* ppcRecFunc, std::set& entryAddresses, PPCFunctionBoundaryTracker& boundaryTracker) -{ - ppcImlGenContext.boundaryTracker = &boundaryTracker; - if (!PPCRecompiler_GenerateIML(ppcImlGenContext, boundaryTracker, entryAddresses)) - return false; - - // set range - // todo - support non-continuous functions for the range tracking? + //ppcImlGenContext_t ppcImlGenContext = { 0 }; + ppcImlGenContext.functionRef = ppcRecFunc; + // add entire range ppcRecRange_t recRange; recRange.ppcAddress = ppcRecFunc->ppcAddress; recRange.ppcSize = ppcRecFunc->ppcSize; ppcRecFunc->list_ranges.push_back(recRange); - + // process ppc instructions + ppcImlGenContext.currentInstruction = (uint32*)memory_getPointerFromVirtualOffset(ppcRecFunc->ppcAddress); + bool unsupportedInstructionFound = false; + sint32 numPPCInstructions = ppcRecFunc->ppcSize/4; + sint32 unsupportedInstructionCount = 0; + uint32 unsupportedInstructionLastOffset = 0; + uint32* firstCurrentInstruction = ppcImlGenContext.currentInstruction; + uint32* endCurrentInstruction = ppcImlGenContext.currentInstruction + numPPCInstructions; + while(ppcImlGenContext.currentInstruction < endCurrentInstruction) + { + uint32 addressOfCurrentInstruction = (uint32)((uint8*)ppcImlGenContext.currentInstruction - memory_base); + ppcImlGenContext.ppcAddressOfCurrentInstruction = addressOfCurrentInstruction; + ppcImlGenContext.cyclesSinceLastBranch++; + PPCRecompilerImlGen_generateNewInstruction_jumpmark(&ppcImlGenContext, addressOfCurrentInstruction); + + if (entryAddresses.find(addressOfCurrentInstruction) != entryAddresses.end()) + { + // add PPCEnter for addresses that are in entryAddresses + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(&ppcImlGenContext, addressOfCurrentInstruction); + } + else if(ppcImlGenContext.currentInstruction != firstCurrentInstruction) + { + // add PPCEnter mark if code is seemingly unreachable (for example if between two unconditional jump instructions without jump goal) + uint32 opcodeCurrent = PPCRecompiler_getCurrentInstruction(&ppcImlGenContext); + uint32 opcodePrevious = PPCRecompiler_getPreviousInstruction(&ppcImlGenContext); + if( ((opcodePrevious>>26) == 18) && ((opcodeCurrent>>26) == 18) ) + { + // between two B(L) instructions + // todo: for BL only if they are not inlineable + + bool canInlineFunction = false; + if ((opcodePrevious & PPC_OPC_LK) && (opcodePrevious & PPC_OPC_AA) == 0) + { + uint32 li; + PPC_OPC_TEMPL_I(opcodePrevious, li); + sint32 inlineSize = 0; + if (PPCRecompiler_canInlineFunction(li + addressOfCurrentInstruction - 4, &inlineSize)) + canInlineFunction = true; + } + if( canInlineFunction == false && (opcodePrevious & PPC_OPC_LK) == false) + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(&ppcImlGenContext, addressOfCurrentInstruction); + } + if( ((opcodePrevious>>26) == 19) && PPC_getBits(opcodePrevious, 30, 10) == 528 ) + { + uint32 BO, BI, BD; + PPC_OPC_TEMPL_XL(opcodePrevious, BO, BI, BD); + if( (BO & 16) && (opcodePrevious&PPC_OPC_LK) == 0 ) + { + // after unconditional BCTR instruction + PPCRecompilerImlGen_generateNewInstruction_ppcEnter(&ppcImlGenContext, addressOfCurrentInstruction); + } + } + } + + unsupportedInstructionFound = PPCRecompiler_decodePPCInstruction(&ppcImlGenContext); + if( unsupportedInstructionFound ) + { + unsupportedInstructionCount++; + unsupportedInstructionLastOffset = ppcImlGenContext.ppcAddressOfCurrentInstruction; + unsupportedInstructionFound = false; + //break; + } + } + ppcImlGenContext.ppcAddressOfCurrentInstruction = 0; // reset current instruction offset (any future generated IML instruction will be assigned to ppc address 0) + if( unsupportedInstructionCount > 0 || unsupportedInstructionFound ) + { + // could not compile function + debug_printf("Failed recompile due to unknown instruction at 0x%08x\n", unsupportedInstructionLastOffset); + PPCRecompiler_freeContext(&ppcImlGenContext); + return false; + } + // optimize unused jumpmarks away + // first, flag all jumpmarks as unused + std::map map_jumpMarks; + for(sint32 i=0; isecond->op_jumpmark.flags &= ~PPCREC_IML_OP_FLAG_UNUSED; + } + } + // lastly, remove jumpmarks that still have the unused flag set + sint32 currentImlIndex = 0; + for(sint32 i=0; i end of segment after current instruction + // If we encounter a jumpmark -> end of segment before current instruction + // If we encounter ppc_enter -> end of segment before current instruction + if( ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_CJUMP || + (ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_MACRO && (ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_BLR || ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_BLRL || ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_BCTR || ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_BCTRL)) || + (ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_MACRO && (ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_BL)) || + (ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_MACRO && (ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_B_FAR)) || + (ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_MACRO && (ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_LEAVE)) || + (ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_MACRO && (ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_HLE)) || + (ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_MACRO && (ppcImlGenContext.imlList[segmentImlIndex].operation == PPCREC_IML_MACRO_MFTB)) ) + { + // segment ends after current instruction + PPCRecImlSegment_t* ppcRecSegment = PPCRecompiler_generateImlSegment(&ppcImlGenContext); + ppcRecSegment->startOffset = segmentStart; + ppcRecSegment->count = segmentImlIndex-segmentStart+1; + ppcRecSegment->ppcAddress = 0xFFFFFFFF; + segmentStart = segmentImlIndex+1; + } + else if( ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_JUMPMARK || + ppcImlGenContext.imlList[segmentImlIndex].type == PPCREC_IML_TYPE_PPC_ENTER ) + { + // segment ends before current instruction + if( segmentImlIndex > segmentStart ) + { + PPCRecImlSegment_t* ppcRecSegment = PPCRecompiler_generateImlSegment(&ppcImlGenContext); + ppcRecSegment->startOffset = segmentStart; + ppcRecSegment->count = segmentImlIndex-segmentStart; + ppcRecSegment->ppcAddress = 0xFFFFFFFF; + segmentStart = segmentImlIndex; + } + } + segmentImlIndex++; + } + if( segmentImlIndex != segmentStart ) + { + // final segment + PPCRecImlSegment_t* ppcRecSegment = PPCRecompiler_generateImlSegment(&ppcImlGenContext); + ppcRecSegment->startOffset = segmentStart; + ppcRecSegment->count = segmentImlIndex-segmentStart; + ppcRecSegment->ppcAddress = 0xFFFFFFFF; + segmentStart = segmentImlIndex; + } + // move iml instructions into the segments + for(sint32 s=0; sstartOffset; + uint32 imlCount = ppcImlGenContext.segmentList[s]->count; + if( imlCount > 0 ) + { + ppcImlGenContext.segmentList[s]->imlListSize = imlCount + 4; + ppcImlGenContext.segmentList[s]->imlList = (PPCRecImlInstruction_t*)malloc(sizeof(PPCRecImlInstruction_t)*ppcImlGenContext.segmentList[s]->imlListSize); + ppcImlGenContext.segmentList[s]->imlListCount = imlCount; + memcpy(ppcImlGenContext.segmentList[s]->imlList, ppcImlGenContext.imlList+imlStartIndex, sizeof(PPCRecImlInstruction_t)*imlCount); + } + else + { + // empty segments are allowed so we can handle multiple PPC entry addresses pointing to the same code + ppcImlGenContext.segmentList[s]->imlList = NULL; + ppcImlGenContext.segmentList[s]->imlListSize = 0; + ppcImlGenContext.segmentList[s]->imlListCount = 0; + } + ppcImlGenContext.segmentList[s]->startOffset = 9999999; + ppcImlGenContext.segmentList[s]->count = 9999999; + } + // clear segment-independent iml list + free(ppcImlGenContext.imlList); + ppcImlGenContext.imlList = NULL; + ppcImlGenContext.imlListCount = 999999; // set to high number to force crash in case old code still uses ppcImlGenContext.imlList + // calculate PPC address of each segment based on iml instructions inside that segment (we need this info to calculate how many cpu cycles each segment takes) + for(sint32 s=0; simlListCount; i++) + { + if( ppcImlGenContext.segmentList[s]->imlList[i].associatedPPCAddress == 0 ) + continue; + //if( ppcImlGenContext.segmentList[s]->imlList[i].type == PPCREC_IML_TYPE_JUMPMARK || ppcImlGenContext.segmentList[s]->imlList[i].type == PPCREC_IML_TYPE_NO_OP ) + // continue; // jumpmarks and no-op instructions must not affect segment ppc address range + segmentPPCAddrMin = std::min(ppcImlGenContext.segmentList[s]->imlList[i].associatedPPCAddress, segmentPPCAddrMin); + segmentPPCAddrMax = std::max(ppcImlGenContext.segmentList[s]->imlList[i].associatedPPCAddress, segmentPPCAddrMax); + } + if( segmentPPCAddrMin != 0xFFFFFFFF ) + { + ppcImlGenContext.segmentList[s]->ppcAddrMin = segmentPPCAddrMin; + ppcImlGenContext.segmentList[s]->ppcAddrMax = segmentPPCAddrMax; + } + else + { + ppcImlGenContext.segmentList[s]->ppcAddrMin = 0; + ppcImlGenContext.segmentList[s]->ppcAddrMax = 0; + } + } + // certain instructions can change the segment state + // ppcEnter instruction marks a segment as enterable (BL, BCTR, etc. instructions can enter at this location from outside) + // jumpmarks mark the segment as a jump destination (within the same function) + for(sint32 s=0; simlListCount > 0 ) + { + if( ppcImlGenContext.segmentList[s]->imlList[0].type == PPCREC_IML_TYPE_PPC_ENTER ) + { + // mark segment as enterable + if( ppcImlGenContext.segmentList[s]->isEnterable ) + assert_dbg(); // should not happen? + ppcImlGenContext.segmentList[s]->isEnterable = true; + ppcImlGenContext.segmentList[s]->enterPPCAddress = ppcImlGenContext.segmentList[s]->imlList[0].op_ppcEnter.ppcAddress; + // remove ppc_enter instruction + ppcImlGenContext.segmentList[s]->imlList[0].type = PPCREC_IML_TYPE_NO_OP; + ppcImlGenContext.segmentList[s]->imlList[0].crRegister = PPC_REC_INVALID_REGISTER; + ppcImlGenContext.segmentList[s]->imlList[0].associatedPPCAddress = 0; + } + else if( ppcImlGenContext.segmentList[s]->imlList[0].type == PPCREC_IML_TYPE_JUMPMARK ) + { + // mark segment as jump destination + if( ppcImlGenContext.segmentList[s]->isJumpDestination ) + assert_dbg(); // should not happen? + ppcImlGenContext.segmentList[s]->isJumpDestination = true; + ppcImlGenContext.segmentList[s]->jumpDestinationPPCAddress = ppcImlGenContext.segmentList[s]->imlList[0].op_jumpmark.address; + // remove jumpmark instruction + ppcImlGenContext.segmentList[s]->imlList[0].type = PPCREC_IML_TYPE_NO_OP; + ppcImlGenContext.segmentList[s]->imlList[0].crRegister = PPC_REC_INVALID_REGISTER; + ppcImlGenContext.segmentList[s]->imlList[0].associatedPPCAddress = 0; + } + else + break; + } + } + // the first segment is always enterable as the recompiled functions entrypoint + ppcImlGenContext.segmentList[0]->isEnterable = true; + ppcImlGenContext.segmentList[0]->enterPPCAddress = ppcImlGenContext.functionRef->ppcAddress; + + // link segments for further inter-segment optimization + PPCRecompilerIML_linkSegments(&ppcImlGenContext); + + // optimization pass - replace segments with conditional MOVs if possible + for (sint32 s = 0; s < ppcImlGenContext.segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext.segmentList[s]; + if (imlSegment->nextSegmentBranchNotTaken == NULL || imlSegment->nextSegmentBranchTaken == NULL) + continue; // not a branching segment + PPCRecImlInstruction_t* lastInstruction = PPCRecompilerIML_getLastInstruction(imlSegment); + if (lastInstruction->type != PPCREC_IML_TYPE_CJUMP || lastInstruction->op_conditionalJump.crRegisterIndex != 0) + continue; + PPCRecImlSegment_t* conditionalSegment = imlSegment->nextSegmentBranchNotTaken; + PPCRecImlSegment_t* finalSegment = imlSegment->nextSegmentBranchTaken; + if(imlSegment->nextSegmentBranchTaken != imlSegment->nextSegmentBranchNotTaken->nextSegmentBranchNotTaken) + continue; + if (imlSegment->nextSegmentBranchNotTaken->imlListCount > 4) + continue; + if(conditionalSegment->list_prevSegments.size() != 1) + continue; // the reduced segment must not be the target of any other branch + if(conditionalSegment->isEnterable) + continue; + // check if the segment contains only iml instructions that can be turned into conditional moves (Value assignment, register assignment) + bool canReduceSegment = true; + for (sint32 f = 0; f < conditionalSegment->imlListCount; f++) + { + PPCRecImlInstruction_t* imlInstruction = conditionalSegment->imlList+f; + if( imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_ASSIGN) + continue; + // todo: Register to register copy + canReduceSegment = false; + break; + } + + if( canReduceSegment == false ) + continue; + + // remove the branch instruction + uint8 branchCond_crRegisterIndex = lastInstruction->op_conditionalJump.crRegisterIndex; + uint8 branchCond_crBitIndex = lastInstruction->op_conditionalJump.crBitIndex; + bool branchCond_bitMustBeSet = lastInstruction->op_conditionalJump.bitMustBeSet; + + PPCRecompilerImlGen_generateNewInstruction_noOp(&ppcImlGenContext, lastInstruction); + + // append conditional moves based on branch condition + for (sint32 f = 0; f < conditionalSegment->imlListCount; f++) + { + PPCRecImlInstruction_t* imlInstruction = conditionalSegment->imlList + f; + if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_ASSIGN) + PPCRecompilerImlGen_generateNewInstruction_conditional_r_s32(&ppcImlGenContext, PPCRecompiler_appendInstruction(imlSegment), PPCREC_IML_OP_ASSIGN, imlInstruction->op_r_immS32.registerIndex, imlInstruction->op_r_immS32.immS32, branchCond_crRegisterIndex, branchCond_crBitIndex, !branchCond_bitMustBeSet); + else + assert_dbg(); + } + // update segment links + // source segment: imlSegment, conditional/removed segment: conditionalSegment, final segment: finalSegment + PPCRecompilerIML_removeLink(imlSegment, conditionalSegment); + PPCRecompilerIML_removeLink(imlSegment, finalSegment); + PPCRecompilerIML_removeLink(conditionalSegment, finalSegment); + PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, finalSegment); + // remove all instructions from conditional segment + conditionalSegment->imlListCount = 0; + + // if possible, merge imlSegment with finalSegment + if (finalSegment->isEnterable == false && finalSegment->list_prevSegments.size() == 1) + { + // todo: Clean this up and move into separate function PPCRecompilerIML_mergeSegments() + PPCRecompilerIML_removeLink(imlSegment, finalSegment); + if (finalSegment->nextSegmentBranchNotTaken) + { + PPCRecImlSegment_t* tempSegment = finalSegment->nextSegmentBranchNotTaken; + PPCRecompilerIML_removeLink(finalSegment, tempSegment); + PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, tempSegment); + } + if (finalSegment->nextSegmentBranchTaken) + { + PPCRecImlSegment_t* tempSegment = finalSegment->nextSegmentBranchTaken; + PPCRecompilerIML_removeLink(finalSegment, tempSegment); + PPCRecompilerIml_setLinkBranchTaken(imlSegment, tempSegment); + } + // copy IML instructions + for (sint32 f = 0; f < finalSegment->imlListCount; f++) + { + memcpy(PPCRecompiler_appendInstruction(imlSegment), finalSegment->imlList + f, sizeof(PPCRecImlInstruction_t)); + } + finalSegment->imlListCount = 0; + + //PPCRecompiler_dumpIML(ppcRecFunc, &ppcImlGenContext); + } + + // todo: If possible, merge with the segment following conditionalSegment (merging is only possible if the segment is not an entry point or has no other jump sources) + } + + // insert cycle counter instruction in every segment that has a cycle count greater zero + for(sint32 s=0; sppcAddrMin == 0 ) + continue; + // count number of PPC instructions in segment + // note: This algorithm correctly counts inlined functions but it doesn't count NO-OP instructions like ISYNC + uint32 lastPPCInstAddr = 0; + uint32 ppcCount2 = 0; + for (sint32 i = 0; i < imlSegment->imlListCount; i++) + { + if (imlSegment->imlList[i].associatedPPCAddress == 0) + continue; + if (imlSegment->imlList[i].associatedPPCAddress == lastPPCInstAddr) + continue; + lastPPCInstAddr = imlSegment->imlList[i].associatedPPCAddress; + ppcCount2++; + } + //uint32 ppcCount = imlSegment->ppcAddrMax-imlSegment->ppcAddrMin+4; -> No longer works with inlined functions + uint32 cycleCount = ppcCount2;// ppcCount / 4; + if( cycleCount > 0 ) + { + PPCRecompiler_pushBackIMLInstructions(imlSegment, 0, 1); + imlSegment->imlList[0].type = PPCREC_IML_TYPE_MACRO; + imlSegment->imlList[0].crRegister = PPC_REC_INVALID_REGISTER; + imlSegment->imlList[0].operation = PPCREC_IML_MACRO_COUNT_CYCLES; + imlSegment->imlList[0].op_macro.param = cycleCount; + } + } + + // find segments that have a (conditional) jump instruction that points in reverse direction of code flow + // for these segments there is a risk that the recompiler could get trapped in an infinite busy loop. + // todo: We should do a loop-detection prepass where we flag segments that are actually in a loop. We can then use this information below to avoid generating the scheduler-exit code for segments that aren't actually in a loop despite them referencing an earlier segment (which could be an exit segment for example) + uint32 currentLoopEscapeJumpMarker = 0xFF000000; // start in an area where no valid code can be located + for(sint32 s=0; sppcAddrMin which isn't really reliable. (We already had a problem where function inlining would generate falsified segment ranges by omitting the branch instruction). Find a better solution (use jumpmark/enterable offsets?) + PPCRecImlSegment_t* imlSegment = ppcImlGenContext.segmentList[s]; + if( imlSegment->imlListCount == 0 ) + continue; + if (imlSegment->imlList[imlSegment->imlListCount - 1].type != PPCREC_IML_TYPE_CJUMP || imlSegment->imlList[imlSegment->imlListCount - 1].op_conditionalJump.jumpmarkAddress > imlSegment->ppcAddrMin) + continue; + if (imlSegment->imlList[imlSegment->imlListCount - 1].type != PPCREC_IML_TYPE_CJUMP || imlSegment->imlList[imlSegment->imlListCount - 1].op_conditionalJump.jumpAccordingToSegment) + continue; + // exclude non-infinite tight loops + if (PPCRecompilerImlAnalyzer_isTightFiniteLoop(imlSegment)) + continue; + // potential loop segment found, split this segment into four: + // P0: This segment checks if the remaining cycles counter is still above zero. If yes, it jumps to segment P2 (it's also the jump destination for other segments) + // P1: This segment consists only of a single ppc_leave instruction and is usually skipped. Register unload instructions are later inserted here. + // P2: This segment contains the iml instructions of the original segment + // PEntry: This segment is used to enter the function, it jumps to P0 + // All segments are considered to be part of the same PPC instruction range + // The first segment also retains the jump destination and enterable properties from the original segment. + //debug_printf("--- Insert cycle counter check ---\n"); + //PPCRecompiler_dumpIML(ppcRecFunc, &ppcImlGenContext); + + PPCRecompilerIml_insertSegments(&ppcImlGenContext, s, 2); + imlSegment = NULL; + PPCRecImlSegment_t* imlSegmentP0 = ppcImlGenContext.segmentList[s+0]; + PPCRecImlSegment_t* imlSegmentP1 = ppcImlGenContext.segmentList[s+1]; + PPCRecImlSegment_t* imlSegmentP2 = ppcImlGenContext.segmentList[s+2]; + // create entry point segment + PPCRecompilerIml_insertSegments(&ppcImlGenContext, ppcImlGenContext.segmentListCount, 1); + PPCRecImlSegment_t* imlSegmentPEntry = ppcImlGenContext.segmentList[ppcImlGenContext.segmentListCount-1]; + // relink segments + PPCRecompilerIML_relinkInputSegment(imlSegmentP2, imlSegmentP0); + PPCRecompilerIml_setLinkBranchNotTaken(imlSegmentP0, imlSegmentP1); + PPCRecompilerIml_setLinkBranchTaken(imlSegmentP0, imlSegmentP2); + PPCRecompilerIml_setLinkBranchTaken(imlSegmentPEntry, imlSegmentP0); + // update segments + uint32 enterPPCAddress = imlSegmentP2->ppcAddrMin; + if (imlSegmentP2->isEnterable) + enterPPCAddress = imlSegmentP2->enterPPCAddress; + imlSegmentP0->ppcAddress = 0xFFFFFFFF; + imlSegmentP1->ppcAddress = 0xFFFFFFFF; + imlSegmentP2->ppcAddress = 0xFFFFFFFF; + cemu_assert_debug(imlSegmentP2->ppcAddrMin != 0); + // move segment properties from segment P2 to segment P0 + imlSegmentP0->isJumpDestination = imlSegmentP2->isJumpDestination; + imlSegmentP0->jumpDestinationPPCAddress = imlSegmentP2->jumpDestinationPPCAddress; + imlSegmentP0->isEnterable = false; + //imlSegmentP0->enterPPCAddress = imlSegmentP2->enterPPCAddress; + imlSegmentP0->ppcAddrMin = imlSegmentP2->ppcAddrMin; + imlSegmentP0->ppcAddrMax = imlSegmentP2->ppcAddrMax; + imlSegmentP2->isJumpDestination = false; + imlSegmentP2->jumpDestinationPPCAddress = 0; + imlSegmentP2->isEnterable = false; + imlSegmentP2->enterPPCAddress = 0; + imlSegmentP2->ppcAddrMin = 0; + imlSegmentP2->ppcAddrMax = 0; + // setup enterable segment + if( enterPPCAddress != 0 && enterPPCAddress != 0xFFFFFFFF ) + { + imlSegmentPEntry->isEnterable = true; + imlSegmentPEntry->ppcAddress = enterPPCAddress; + imlSegmentPEntry->enterPPCAddress = enterPPCAddress; + } + // assign new jumpmark to segment P2 + imlSegmentP2->isJumpDestination = true; + imlSegmentP2->jumpDestinationPPCAddress = currentLoopEscapeJumpMarker; + currentLoopEscapeJumpMarker++; + // create ppc_leave instruction in segment P1 + PPCRecompiler_pushBackIMLInstructions(imlSegmentP1, 0, 1); + imlSegmentP1->imlList[0].type = PPCREC_IML_TYPE_MACRO; + imlSegmentP1->imlList[0].operation = PPCREC_IML_MACRO_LEAVE; + imlSegmentP1->imlList[0].crRegister = PPC_REC_INVALID_REGISTER; + imlSegmentP1->imlList[0].op_macro.param = imlSegmentP0->ppcAddrMin; + imlSegmentP1->imlList[0].associatedPPCAddress = imlSegmentP0->ppcAddrMin; + // create cycle-based conditional instruction in segment P0 + PPCRecompiler_pushBackIMLInstructions(imlSegmentP0, 0, 1); + imlSegmentP0->imlList[0].type = PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK; + imlSegmentP0->imlList[0].operation = 0; + imlSegmentP0->imlList[0].crRegister = PPC_REC_INVALID_REGISTER; + imlSegmentP0->imlList[0].op_conditionalJump.jumpmarkAddress = imlSegmentP2->jumpDestinationPPCAddress; + imlSegmentP0->imlList[0].associatedPPCAddress = imlSegmentP0->ppcAddrMin; + // jump instruction for PEntry + PPCRecompiler_pushBackIMLInstructions(imlSegmentPEntry, 0, 1); + PPCRecompilerImlGen_generateNewInstruction_jumpSegment(&ppcImlGenContext, imlSegmentPEntry->imlList + 0); + + // skip the newly created segments + s += 2; + } + + // isolate entry points from function flow (enterable segments must not be the target of any other segment) + // this simplifies logic during register allocation + PPCRecompilerIML_isolateEnterableSegments(&ppcImlGenContext); + + // if GQRs can be predicted, optimize PSQ load/stores + PPCRecompiler_optimizePSQLoadAndStore(&ppcImlGenContext); + + // count number of used registers + uint32 numLoadedFPRRegisters = 0; + for(uint32 i=0; i<255; i++) + { + if( ppcImlGenContext.mappedFPRRegister[i] ) + numLoadedFPRRegisters++; + } + + // insert name store instructions at the end of each segment but before branch instructions + for(sint32 s=0; simlListCount == 0 ) + continue; // ignore empty segments + // analyze segment for register usage + PPCImlOptimizerUsedRegisters_t registersUsed; + for(sint32 i=0; iimlListCount; i++) + { + PPCRecompiler_checkRegisterUsage(&ppcImlGenContext, imlSegment->imlList+i, ®istersUsed); + //PPCRecompilerImlGen_findRegisterByMappedName(ppcImlGenContext, registersUsed.readGPR1); + sint32 accessedTempReg[5]; + // intermediate FPRs + accessedTempReg[0] = registersUsed.readFPR1; + accessedTempReg[1] = registersUsed.readFPR2; + accessedTempReg[2] = registersUsed.readFPR3; + accessedTempReg[3] = registersUsed.readFPR4; + accessedTempReg[4] = registersUsed.writtenFPR1; + for(sint32 f=0; f<5; f++) + { + if( accessedTempReg[f] == -1 ) + continue; + uint32 regName = ppcImlGenContext.mappedFPRRegister[accessedTempReg[f]]; + if( regName >= PPCREC_NAME_FPR0 && regName < PPCREC_NAME_FPR0+32 ) + { + imlSegment->ppcFPRUsed[regName - PPCREC_NAME_FPR0] = true; + } + } + } + } + + // merge certain float load+store patterns (must happen before FPR register remapping) + PPCRecompiler_optimizeDirectFloatCopies(&ppcImlGenContext); + // delay byte swapping for certain load+store patterns + PPCRecompiler_optimizeDirectIntegerCopies(&ppcImlGenContext); + + if (numLoadedFPRRegisters > 0) + { + if (PPCRecompiler_manageFPRRegisters(&ppcImlGenContext) == false) + { + PPCRecompiler_freeContext(&ppcImlGenContext); + return false; + } + } + + PPCRecompilerImm_allocateRegisters(&ppcImlGenContext); + + // remove redundant name load and store instructions + PPCRecompiler_reorderConditionModifyInstructions(&ppcImlGenContext); + PPCRecompiler_removeRedundantCRUpdates(&ppcImlGenContext); return true; } diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGenFPU.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGenFPU.cpp index 6e602b47..1efc41b8 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGenFPU.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGenFPU.cpp @@ -1,84 +1,183 @@ -#include "Cafe/HW/Espresso/EspressoISA.h" #include "../Interpreter/PPCInterpreterInternal.h" #include "PPCRecompiler.h" #include "PPCRecompilerIml.h" #include "Cafe/GameProfile/GameProfile.h" -#include "IML/IML.h" -ATTR_MS_ABI double frsqrte_espresso(double input); -ATTR_MS_ABI double fres_espresso(double input); - -IMLReg _GetRegCR(ppcImlGenContext_t* ppcImlGenContext, uint8 crReg, uint8 crBit); - -#define DefinePS0(name, regIndex) IMLReg name = _GetFPRRegPS0(ppcImlGenContext, regIndex); -#define DefinePS1(name, regIndex) IMLReg name = _GetFPRRegPS1(ppcImlGenContext, regIndex); -#define DefinePSX(name, regIndex, isPS1) IMLReg name = isPS1 ? _GetFPRRegPS1(ppcImlGenContext, regIndex) : _GetFPRRegPS0(ppcImlGenContext, regIndex); -#define DefineTempFPR(name, index) IMLReg name = _GetFPRTemp(ppcImlGenContext, index); - -IMLReg _GetFPRRegPS0(ppcImlGenContext_t* ppcImlGenContext, uint32 regIndex) +void PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory, sint32 immS32, uint32 mode, bool switchEndian, uint8 registerGQR = PPC_REC_INVALID_REGISTER) { - cemu_assert_debug(regIndex < 32); - return PPCRecompilerImlGen_LookupReg(ppcImlGenContext, PPCREC_NAME_FPR_HALF + regIndex * 2 + 0, IMLRegFormat::F64); + // load from memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_LOAD; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->operation = 0; + imlInstruction->op_storeLoad.registerData = registerDestination; + imlInstruction->op_storeLoad.registerMem = registerMemory; + imlInstruction->op_storeLoad.registerGQR = registerGQR; + imlInstruction->op_storeLoad.immS32 = immS32; + imlInstruction->op_storeLoad.mode = mode; + imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; } -IMLReg _GetFPRRegPS1(ppcImlGenContext_t* ppcImlGenContext, uint32 regIndex) +void PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory1, uint8 registerMemory2, uint32 mode, bool switchEndian, uint8 registerGQR = PPC_REC_INVALID_REGISTER) { - cemu_assert_debug(regIndex < 32); - return PPCRecompilerImlGen_LookupReg(ppcImlGenContext, PPCREC_NAME_FPR_HALF + regIndex * 2 + 1, IMLRegFormat::F64); + // load from memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_LOAD_INDEXED; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->operation = 0; + imlInstruction->op_storeLoad.registerData = registerDestination; + imlInstruction->op_storeLoad.registerMem = registerMemory1; + imlInstruction->op_storeLoad.registerMem2 = registerMemory2; + imlInstruction->op_storeLoad.registerGQR = registerGQR; + imlInstruction->op_storeLoad.immS32 = 0; + imlInstruction->op_storeLoad.mode = mode; + imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; } -IMLReg _GetFPRTemp(ppcImlGenContext_t* ppcImlGenContext, uint32 index) +void PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext_t* ppcImlGenContext, uint8 registerSource, uint8 registerMemory, sint32 immS32, uint32 mode, bool switchEndian, uint8 registerGQR = PPC_REC_INVALID_REGISTER) { - cemu_assert_debug(index < 4); - return PPCRecompilerImlGen_LookupReg(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0 + index, IMLRegFormat::F64); + // store to memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_STORE; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->operation = 0; + imlInstruction->op_storeLoad.registerData = registerSource; + imlInstruction->op_storeLoad.registerMem = registerMemory; + imlInstruction->op_storeLoad.registerGQR = registerGQR; + imlInstruction->op_storeLoad.immS32 = immS32; + imlInstruction->op_storeLoad.mode = mode; + imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; } -IMLReg _GetFPRReg(ppcImlGenContext_t* ppcImlGenContext, uint32 regIndex, bool selectPS1) +void PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext_t* ppcImlGenContext, uint8 registerSource, uint8 registerMemory1, uint8 registerMemory2, sint32 immS32, uint32 mode, bool switchEndian, uint8 registerGQR = 0) { - cemu_assert_debug(regIndex < 32); - return PPCRecompilerImlGen_LookupReg(ppcImlGenContext, PPCREC_NAME_FPR_HALF + regIndex * 2 + (selectPS1 ? 1 : 0), IMLRegFormat::F64); + // store to memory + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_STORE_INDEXED; + imlInstruction->crRegister = PPC_REC_INVALID_REGISTER; + imlInstruction->operation = 0; + imlInstruction->op_storeLoad.registerData = registerSource; + imlInstruction->op_storeLoad.registerMem = registerMemory1; + imlInstruction->op_storeLoad.registerMem2 = registerMemory2; + imlInstruction->op_storeLoad.registerGQR = registerGQR; + imlInstruction->op_storeLoad.immS32 = immS32; + imlInstruction->op_storeLoad.mode = mode; + imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian; } -void PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext_t* ppcImlGenContext, IMLReg fprRegister, bool flushDenormals=false) +void PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext_t* ppcImlGenContext, sint32 operation, uint8 registerResult, uint8 registerOperand, sint32 crRegister=PPC_REC_INVALID_REGISTER) { - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, fprRegister); + // fpr OP fpr + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_R_R; + imlInstruction->operation = operation; + imlInstruction->op_fpr_r_r.registerResult = registerResult; + imlInstruction->op_fpr_r_r.registerOperand = registerOperand; + imlInstruction->crRegister = crRegister; + imlInstruction->op_fpr_r_r.flags = 0; +} + +void PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext_t* ppcImlGenContext, sint32 operation, uint8 registerResult, uint8 registerOperand1, uint8 registerOperand2, sint32 crRegister=PPC_REC_INVALID_REGISTER) +{ + // fpr = OP (fpr,fpr) + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_R_R_R; + imlInstruction->operation = operation; + imlInstruction->op_fpr_r_r_r.registerResult = registerResult; + imlInstruction->op_fpr_r_r_r.registerOperandA = registerOperand1; + imlInstruction->op_fpr_r_r_r.registerOperandB = registerOperand2; + imlInstruction->crRegister = crRegister; + imlInstruction->op_fpr_r_r_r.flags = 0; +} + +void PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext_t* ppcImlGenContext, sint32 operation, uint8 registerResult, uint8 registerOperandA, uint8 registerOperandB, uint8 registerOperandC, sint32 crRegister=PPC_REC_INVALID_REGISTER) +{ + // fpr = OP (fpr,fpr,fpr) + PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_R_R_R_R; + imlInstruction->operation = operation; + imlInstruction->op_fpr_r_r_r_r.registerResult = registerResult; + imlInstruction->op_fpr_r_r_r_r.registerOperandA = registerOperandA; + imlInstruction->op_fpr_r_r_r_r.registerOperandB = registerOperandB; + imlInstruction->op_fpr_r_r_r_r.registerOperandC = registerOperandC; + imlInstruction->crRegister = crRegister; + imlInstruction->op_fpr_r_r_r_r.flags = 0; +} + +void PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 operation, uint8 registerResult, sint32 crRegister) +{ + // OP (fpr) + if(imlInstruction == NULL) + imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext); + imlInstruction->type = PPCREC_IML_TYPE_FPR_R; + imlInstruction->operation = operation; + imlInstruction->op_fpr_r.registerResult = registerResult; + imlInstruction->crRegister = crRegister; +} + +/* + * Rounds the bottom double to single precision (if single precision accuracy is emulated) + */ +void PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext_t* ppcImlGenContext, uint32 fprRegister, bool flushDenormals=false) +{ + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL, PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, fprRegister); if( flushDenormals ) assert_dbg(); } -bool PPCRecompilerImlGen_LFS_LFSU_LFD_LFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate, bool isDouble) +/* + * Rounds pair of doubles to single precision (if single precision accuracy is emulated) + */ +void PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext_t* ppcImlGenContext, uint32 fprRegister, bool flushDenormals=false) +{ + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL, PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR, fprRegister); + if( flushDenormals ) + assert_dbg(); +} + +bool PPCRecompilerImlGen_LFS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rA, frD; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); - IMLReg gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - if (withUpdate) + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( ppcImlGenContext->LSQE ) { - // add imm to memory register - cemu_assert_debug(rA != 0); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, gprRegister, gprRegister, (sint32)imm); - imm = 0; // set imm to 0 so we dont add it twice - } - DefinePS0(fpPs0, frD); - if (isDouble) - { - // LFD/LFDU - ppcImlGenContext->emitInst().make_fpr_r_memory(fpPs0, gprRegister, imm, PPCREC_FPR_LD_MODE_DOUBLE, true); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true); } else { - // LFS/LFSU - ppcImlGenContext->emitInst().make_fpr_r_memory(fpPs0, gprRegister, imm, PPCREC_FPR_LD_MODE_SINGLE, true); - if( ppcImlGenContext->LSQE ) - { - DefinePS1(fpPs1, frD); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fpPs1, fpPs0); - } + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true); } return true; } -bool PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate, bool isDouble) +bool PPCRecompilerImlGen_LFSU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( ppcImlGenContext->LSQE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true); + } + return true; +} + +bool PPCRecompilerImlGen_LFSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rA, frD, rB; PPC_OPC_TEMPL_X(opcode, frD, rA, rB); @@ -88,53 +187,154 @@ bool PPCRecompilerImlGen_LFSX_LFSUX_LFDX_LFDUX(ppcImlGenContext_t* ppcImlGenCont return false; } // get memory gpr registers - IMLReg gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - IMLReg gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); - if (withUpdate) - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, gprRegister1, gprRegister1, gprRegister2); - DefinePS0(fpPs0, frD); - if (isDouble) + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( ppcImlGenContext->LSQE ) { - if (withUpdate) - ppcImlGenContext->emitInst().make_fpr_r_memory(fpPs0, gprRegister1, 0, PPCREC_FPR_LD_MODE_DOUBLE, true); - else - ppcImlGenContext->emitInst().make_fpr_r_memory_indexed(fpPs0, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_DOUBLE, true); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true); } else { - if (withUpdate) - ppcImlGenContext->emitInst().make_fpr_r_memory( fpPs0, gprRegister1, 0, PPCREC_FPR_LD_MODE_SINGLE, true); - else - ppcImlGenContext->emitInst().make_fpr_r_memory_indexed( fpPs0, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_SINGLE, true); - if( ppcImlGenContext->LSQE ) - { - DefinePS1(fpPs1, frD); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fpPs1, fpPs0); - } + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true); } return true; } -bool PPCRecompilerImlGen_STFS_STFSU_STFD_STFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate, bool isDouble) +bool PPCRecompilerImlGen_LFSUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD, rB; + PPC_OPC_TEMPL_X(opcode, frD, rA, rB); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr registers + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // add rB to rA (if rA != 0) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegister1, gprRegister2); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( ppcImlGenContext->LSQE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true); + } + return true; +} + +bool PPCRecompilerImlGen_LFD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rA, frD; uint32 imm; PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); - IMLReg gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - DefinePS0(fpPs0, frD); - if (withUpdate) + if( rA == 0 ) { - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, gprRegister, gprRegister, (sint32)imm); - imm = 0; + assert_dbg(); } - if (isDouble) - ppcImlGenContext->emitInst().make_fpr_memory_r(fpPs0, gprRegister, imm, PPCREC_FPR_ST_MODE_DOUBLE, true); - else - ppcImlGenContext->emitInst().make_fpr_memory_r(fpPs0, gprRegister, imm, PPCREC_FPR_ST_MODE_SINGLE, true); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true); return true; } -bool PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool hasUpdate, bool isDouble) +bool PPCRecompilerImlGen_LFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); + if( rA == 0 ) + { + assert_dbg(); + } + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // emit load iml + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_LFDX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD, rB; + PPC_OPC_TEMPL_X(opcode, frD, rA, rB); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr registers + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_LFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD, rB; + PPC_OPC_TEMPL_X(opcode, frD, rA, rB); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr registers + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // add rB to rA (if rA != 0) + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegister1, gprRegister2); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_STFS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_STFSU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_STFSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 rA, frS, rB; PPC_OPC_TEMPL_X(opcode, frS, rA, rB); @@ -144,27 +344,103 @@ bool PPCRecompilerImlGen_STFSX_STFSUX_STFDX_STFDUX(ppcImlGenContext_t* ppcImlGen return false; } // get memory gpr registers - IMLReg gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - IMLReg gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); - if (hasUpdate) - { - ppcImlGenContext->emitInst().make_r_r_r(PPCREC_IML_OP_ADD, gprRegister1, gprRegister1, gprRegister2); - } - DefinePS0(fpPs0, frS); - auto mode = isDouble ? PPCREC_FPR_ST_MODE_DOUBLE : PPCREC_FPR_ST_MODE_SINGLE; + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS); if( ppcImlGenContext->LSQE ) { - if (hasUpdate) - ppcImlGenContext->emitInst().make_fpr_memory_r(fpPs0, gprRegister1, 0, mode, true); - else - ppcImlGenContext->emitInst().make_fpr_memory_r_indexed(fpPs0, gprRegister1, gprRegister2, 0, mode, true); + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true); } else { - if (hasUpdate) - ppcImlGenContext->emitInst().make_fpr_memory_r(fpPs0, gprRegister1, 0, mode, true); - else - ppcImlGenContext->emitInst().make_fpr_memory_r_indexed(fpPs0, gprRegister1, gprRegister2, 0, mode, true); + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true); + } + return true; +} + + +bool PPCRecompilerImlGen_STFSUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frS, rB; + PPC_OPC_TEMPL_X(opcode, frS, rA, rB); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr registers + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS); + // calculate EA in rA + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegister1, gprRegister2); + + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_STFD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_STFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frD; + uint32 imm; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true); + return true; +} + +bool PPCRecompilerImlGen_STFDX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 rA, frS, rB; + PPC_OPC_TEMPL_X(opcode, frS, rA, rB); + if( rA == 0 ) + { + debugBreakpoint(); + return false; + } + // get memory gpr registers + uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS); + if( ppcImlGenContext->LSQE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true); + } + else + { + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true); } return true; } @@ -173,24 +449,26 @@ bool PPCRecompilerImlGen_STFIWX(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 rA, frS, rB; PPC_OPC_TEMPL_X(opcode, frS, rA, rB); - DefinePS0(fpPs0, frS); - IMLReg gprRegister1; - IMLReg gprRegister2; + // get memory gpr registers + uint32 gprRegister1; + uint32 gprRegister2; if( rA != 0 ) { - gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); + gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false); + gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); } else { // rA is not used - gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB); - gprRegister2 = IMLREG_INVALID; + gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false); + gprRegister2 = 0; } + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS); if( rA != 0 ) - ppcImlGenContext->emitInst().make_fpr_memory_r_indexed(fpPs0, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_UI32_FROM_PS0, true); + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_UI32_FROM_PS0, true); else - ppcImlGenContext->emitInst().make_fpr_memory_r(fpPs0, gprRegister1, 0, PPCREC_FPR_ST_MODE_UI32_FROM_PS0, true); + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_ST_MODE_UI32_FROM_PS0, true); return true; } @@ -199,10 +477,13 @@ bool PPCRecompilerImlGen_FADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); PPC_ASSERT(frC==0); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_ADD, fprD, fprA, fprB); + + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB); return true; } @@ -211,10 +492,13 @@ bool PPCRecompilerImlGen_FSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); PPC_ASSERT(frC==0); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_SUB, fprD, fprA, fprB); + + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // subtract bottom double of frB from bottom double of frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB); return true; } @@ -229,14 +513,15 @@ bool PPCRecompilerImlGen_FMUL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod frA = frC; frC = temp; } - DefinePS0(fprA, frA); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 // multiply bottom double of frD with bottom double of frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprD, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC); return true; } @@ -245,25 +530,27 @@ bool PPCRecompilerImlGen_FDIV(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 frD, frA, frB, frC_unused; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC_unused); PPC_ASSERT(frB==0); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( frB == frD && frA != frB ) { - DefineTempFPR(fprTemp, 0); + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); // move frA to temporary register - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp, fprA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterTemp, fprRegisterA); // divide bottom double of temporary register by bottom double of frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprTemp, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterTemp, fprRegisterB); // move result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprTemp); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp); return true; } // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); // copy ps0 + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA); // copy ps0 // divide bottom double of frD by bottom double of frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprD, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterD, fprRegisterB); return true; } @@ -271,37 +558,38 @@ bool PPCRecompilerImlGen_FMADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); // if frB is already in frD we need a temporary register to store the product of frA*frC if( frB == frD ) { - DefineTempFPR(fprTemp, 0); + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); // move frA to temporary register - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp, fprA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterTemp, fprRegisterA); // multiply bottom double of temporary register with bottom double of frC - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterC); // add result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprD, fprTemp); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterTemp); return true; } // if frC == frD -> swap registers, we assume that frC != frD - if( frD == frC ) + if( fprRegisterD == fprRegisterC ) { // swap frA and frC - IMLReg temp = fprA; - fprA = fprC; - fprC = temp; + sint32 temp = fprRegisterA; + fprRegisterA = fprRegisterC; + fprRegisterC = temp; } // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); // always copy ps0 and ps1 + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 // multiply bottom double of frD with bottom double of frC - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprD, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC); // add frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprD, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterB); return true; } @@ -309,34 +597,32 @@ bool PPCRecompilerImlGen_FMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opco { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // if frB is already in frD we need a temporary register to store the product of frA*frC if( frB == frD ) { - // if frB is already in frD we need a temporary register to store the product of frA*frC - DefineTempFPR(fprTemp, 0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp, fprA); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp, fprC); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprTemp, fprB); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprTemp); + // not implemented return false; } - if( frD == frC ) + // if frC == frD -> swap registers, we assume that frC != frD + if( fprRegisterD == fprRegisterC ) { // swap frA and frC - IMLReg temp = fprA; - fprA = fprC; - fprC = temp; + sint32 temp = fprRegisterA; + fprRegisterA = fprRegisterC; + fprRegisterC = temp; } - // move frA to frD - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); + // move frA to frD (if different register) + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 // multiply bottom double of frD with bottom double of frC - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprD, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC); // sub frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprD, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterB); return true; } @@ -344,52 +630,51 @@ bool PPCRecompilerImlGen_FNMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); + + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); // if frB is already in frD we need a temporary register to store the product of frA*frC if( frB == frD ) { - DefineTempFPR(fprTemp, 0); + // hCPU->fpr[frD].fpr = -(hCPU->fpr[frA].fpr * hCPU->fpr[frC].fpr - hCPU->fpr[frD].fpr); + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + //// negate frB/frD + //PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterD, true); // move frA to temporary register - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp, fprA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterTemp, fprRegisterA); // multiply bottom double of temporary register with bottom double of frC - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterC); // sub frB from temporary register - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprTemp, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterTemp, fprRegisterB); // negate result - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprTemp); + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterTemp); // move result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprTemp); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp); return true; } // if frC == frD -> swap registers, we assume that frC != frD - if( frD == frC ) + if( fprRegisterD == fprRegisterC ) { // swap frA and frC - IMLReg temp = fprA; - fprA = fprC; - fprC = temp; + sint32 temp = fprRegisterA; + fprRegisterA = fprRegisterC; + fprRegisterC = temp; } // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 // multiply bottom double of frD with bottom double of frC - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprD, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC); // sub frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprD, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterB); // negate result - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterD); return true; } -#define PSE_CopyResultToPs1() if( ppcImlGenContext->PSE ) \ - { \ - DefinePS1(fprDPS1, frD); \ - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDPS1, fprD); \ - } - bool PPCRecompilerImlGen_FMULS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 frD, frA, frB_unused, frC; @@ -402,18 +687,24 @@ bool PPCRecompilerImlGen_FMULS(ppcImlGenContext_t* ppcImlGenContext, uint32 opco frA = frC; frC = temp; } - DefinePS0(fprA, frA); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 + // multiply bottom double of frD with bottom double of frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprD, fprC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); // if paired single mode, copy frD ps0 to ps1 - PSE_CopyResultToPs1(); + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD); + } + return true; } @@ -422,31 +713,44 @@ bool PPCRecompilerImlGen_FDIVS(ppcImlGenContext_t* ppcImlGenContext, uint32 opco sint32 frD, frA, frB, frC_unused; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC_unused); PPC_ASSERT(frB==0); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); + /*hCPU->fpr[frD].fpr = (float)(hCPU->fpr[frA].fpr / hCPU->fpr[frB].fpr); + if( hCPU->PSE ) + hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0;*/ + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( frB == frD && frA != frB ) { - DefineTempFPR(fprTemp, 0); + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); // move frA to temporary register - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp, fprA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterTemp, fprRegisterA); // divide bottom double of temporary register by bottom double of frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprTemp, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterTemp, fprRegisterB); // move result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprTemp); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); - PSE_CopyResultToPs1(); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + // if paired single mode, copy frD ps0 to ps1 + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD); + } return true; } // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 // subtract bottom double of frB from bottom double of frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprD, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterD, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); - PSE_CopyResultToPs1(); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + // if paired single mode, copy frD ps0 to ps1 + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD); + } return true; } @@ -462,17 +766,22 @@ bool PPCRecompilerImlGen_FADDS(ppcImlGenContext_t* ppcImlGenContext, uint32 opco frA = frB; frB = temp; } - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); // move frA to frD (if different register) - if( frD != frA ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprA); + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1 // add bottom double of frD and bottom double of frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprD, fprB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); - PSE_CopyResultToPs1(); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + // if paired single mode, copy frD ps0 to ps1 + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD); + } return true; } @@ -481,12 +790,20 @@ bool PPCRecompilerImlGen_FSUBS(ppcImlGenContext_t* ppcImlGenContext, uint32 opco int frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); PPC_ASSERT(frB==0); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_SUB, fprD, fprA, fprB); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); - PSE_CopyResultToPs1(); + + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // subtract bottom + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB); + // adjust accuracy + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + // if paired single mode, copy frD ps0 to ps1 + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD); + } return true; } @@ -494,26 +811,34 @@ bool PPCRecompilerImlGen_FMADDS(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); + //FPRD(RD) = FPRD(RA) * FPRD(RC) + FPRD(RB); + //hCPU->fpr[frD].fpr = hCPU->fpr[frA].fpr * hCPU->fpr[frC].fpr + hCPU->fpr[frB].fpr; + //if( hCPU->PSE ) + // hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0; + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + uint32 fprRegisterTemp; // if none of the operand registers overlap with the result register then we can avoid the usage of a temporary register - IMLReg fprRegisterTemp; - if( frD != frA && frD != frB && frD != frC ) - fprRegisterTemp = fprD; + if( fprRegisterD != fprRegisterA && fprRegisterD != fprRegisterB && fprRegisterD != fprRegisterC ) + fprRegisterTemp = fprRegisterD; else - fprRegisterTemp = _GetFPRTemp(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprRegisterTemp, fprA, fprC); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprRegisterTemp, fprB); + fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterA, fprRegisterC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterTemp, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprRegisterTemp); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterTemp); // set result - if( fprD != fprRegisterTemp ) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprRegisterTemp); + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterTemp); + } + else if( fprRegisterD != fprRegisterTemp ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp); } - PSE_CopyResultToPs1(); return true; } @@ -521,27 +846,33 @@ bool PPCRecompilerImlGen_FMSUBS(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); - - IMLReg fprRegisterTemp; + //hCPU->fpr[frD].fp0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 - hCPU->fpr[frB].fp0); + //if( hCPU->PSE ) + // hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0; + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + uint32 fprRegisterTemp; // if none of the operand registers overlap with the result register then we can avoid the usage of a temporary register - if( frD != frA && frD != frB && frD != frC ) - fprRegisterTemp = fprD; + if( fprRegisterD != fprRegisterA && fprRegisterD != fprRegisterB && fprRegisterD != fprRegisterC ) + fprRegisterTemp = fprRegisterD; else - fprRegisterTemp = _GetFPRTemp(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprRegisterTemp, fprA, fprC); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprRegisterTemp, fprB); + fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterA, fprRegisterC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterTemp, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprRegisterTemp); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterTemp); // set result - if( fprD != fprRegisterTemp ) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprRegisterTemp); + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterTemp); + } + else if( fprRegisterD != fprRegisterTemp ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp); } - PSE_CopyResultToPs1(); return true; } @@ -549,32 +880,49 @@ bool PPCRecompilerImlGen_FNMSUBS(ppcImlGenContext_t* ppcImlGenContext, uint32 op { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); - IMLReg fprRegisterTemp; + + //[FP1(RD) = ]FP0(RD) = -(FP0(RA) * FP0(RC) - FP0(RB)); + //hCPU->fpr[frD].fp0 = (float)-(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 - hCPU->fpr[frB].fp0); + //if( PPC_PSE ) + // hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0; + + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + uint32 fprRegisterTemp; // if none of the operand registers overlap with the result register then we can avoid the usage of a temporary register - if( frD != frA && frD != frB && frD != frC ) - fprRegisterTemp = fprD; + if( fprRegisterD != fprRegisterA && fprRegisterD != fprRegisterB && fprRegisterD != fprRegisterC ) + fprRegisterTemp = fprRegisterD; else - fprRegisterTemp = _GetFPRTemp(ppcImlGenContext, 0); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprRegisterTemp, fprA, fprC); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprRegisterTemp, fprB); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprRegisterTemp); + fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterA, fprRegisterC); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterTemp, fprRegisterB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterTemp); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprRegisterTemp); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterTemp); // set result - if( fprD != fprRegisterTemp ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprRegisterTemp); - PSE_CopyResultToPs1(); + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterTemp); + } + else if( fprRegisterD != fprRegisterTemp ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp); + } return true; } bool PPCRecompilerImlGen_FCMPO(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // Not implemented - return false; + sint32 crfD, frA, frB; + PPC_OPC_TEMPL_X(opcode, crfD, frA, frB); + crfD >>= 2; + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPO_BOTTOM, fprRegisterA, fprRegisterB, crfD); + return true; } bool PPCRecompilerImlGen_FCMPU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) @@ -582,21 +930,9 @@ bool PPCRecompilerImlGen_FCMPU(ppcImlGenContext_t* ppcImlGenContext, uint32 opco sint32 crfD, frA, frB; PPC_OPC_TEMPL_X(opcode, crfD, frA, frB); crfD >>= 2; - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - - IMLReg crBitRegLT = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_LT); - IMLReg crBitRegGT = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_GT); - IMLReg crBitRegEQ = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_EQ); - IMLReg crBitRegSO = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_SO); - - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegLT, IMLCondition::UNORDERED_LT); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegGT, IMLCondition::UNORDERED_GT); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegEQ, IMLCondition::UNORDERED_EQ); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegSO, IMLCondition::UNORDERED_U); - - // todo: set fpscr - + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPU_BOTTOM, fprRegisterA, fprRegisterB, crfD); return true; } @@ -604,9 +940,9 @@ bool PPCRecompilerImlGen_FMR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode { sint32 frD, rA, frB; PPC_OPC_TEMPL_X(opcode, frD, rA, frB); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprB); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); return true; } @@ -615,11 +951,14 @@ bool PPCRecompilerImlGen_FABS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 frD, frA, frB; PPC_OPC_TEMPL_X(opcode, frD, frA, frB); PPC_ASSERT(frA==0); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - if( frD != frB ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprB); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_ABS, fprD); + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // move frB to frD (if different register) + if( fprRegisterD != fprRegisterB ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); + // abs frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_ABS_BOTTOM, fprRegisterD); return true; } @@ -628,11 +967,14 @@ bool PPCRecompilerImlGen_FNABS(ppcImlGenContext_t* ppcImlGenContext, uint32 opco sint32 frD, frA, frB; PPC_OPC_TEMPL_X(opcode, frD, frA, frB); PPC_ASSERT(frA==0); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - if( frD != frB ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprB); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATIVE_ABS, fprD); + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // move frB to frD (if different register) + if( fprRegisterD != fprRegisterB ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); + // abs frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM, fprRegisterD); return true; } @@ -641,12 +983,12 @@ bool PPCRecompilerImlGen_FRES(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 frD, frA, frB; PPC_OPC_TEMPL_X(opcode, frD, frA, frB); PPC_ASSERT(frA==0); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_call_imm((uintptr_t)fres_espresso, fprB, IMLREG_INVALID, IMLREG_INVALID, fprD); + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); - PSE_CopyResultToPs1(); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -655,12 +997,17 @@ bool PPCRecompilerImlGen_FRSP(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod sint32 frD, frA, frB; PPC_OPC_TEMPL_X(opcode, frD, frA, frB); PPC_ASSERT(frA==0); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - if( fprD != fprB ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprB); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, fprD); - PSE_CopyResultToPs1(); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( fprRegisterD != fprRegisterB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); + } + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, fprRegisterD); + if( ppcImlGenContext->PSE ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD); + } return true; } @@ -670,12 +1017,17 @@ bool PPCRecompilerImlGen_FNEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod PPC_OPC_TEMPL_X(opcode, frD, frA, frB); PPC_ASSERT(frA==0); if( opcode&PPC_OPC_RC ) + { return false; - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - if( frD != frB ) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprD, fprB); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprD); + } + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // move frB to frD (if different register) + if( fprRegisterD != fprRegisterB ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); + // negate frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterD); return true; } @@ -687,11 +1039,11 @@ bool PPCRecompilerImlGen_FSEL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcod { return false; } - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - DefinePS0(fprC, frC); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_fpr_r_r_r_r(PPCREC_IML_OP_FPR_SELECT, fprD, fprA, fprB, fprC); + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SELECT_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC); return true; } @@ -699,11 +1051,12 @@ bool PPCRecompilerImlGen_FRSQRTE(ppcImlGenContext_t* ppcImlGenContext, uint32 op { sint32 frD, frA, frB, frC; PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_call_imm((uintptr_t)frsqrte_espresso, fprB, IMLREG_INVALID, IMLREG_INVALID, fprD); + // hCPU->fpr[frD].fpr = 1.0 / sqrt(hCPU->fpr[frB].fpr); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT, fprRegisterD, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprD); + PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -711,242 +1064,65 @@ bool PPCRecompilerImlGen_FCTIWZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opc { sint32 frD, frA, frB; PPC_OPC_TEMPL_X(opcode, frD, frA, frB); - DefinePS0(fprB, frB); - DefinePS0(fprD, frD); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_FCTIWZ, fprD, fprB); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ, fprRegisterD, fprRegisterB); return true; } -bool PPCRecompiler_isUGQRValueKnown(ppcImlGenContext_t* ppcImlGenContext, sint32 gqrIndex, uint32& gqrValue); - -void PPCRecompilerImlGen_ClampInteger(ppcImlGenContext_t* ppcImlGenContext, IMLReg reg, sint32 clampMin, sint32 clampMax) -{ - IMLReg regTmpCondBool = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 1); - // min(reg, clampMax) - ppcImlGenContext->emitInst().make_compare_s32(reg, clampMax, regTmpCondBool, IMLCondition::SIGNED_GT); - ppcImlGenContext->emitInst().make_conditional_jump(regTmpCondBool, false); // condition needs to be inverted because we skip if the condition is true - PPCIMLGen_CreateSegmentBranchedPath(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock, - [&](ppcImlGenContext_t& genCtx) - { - /* branch not taken */ - genCtx.emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, reg, clampMax); - } - ); - // max(reg, clampMin) - ppcImlGenContext->emitInst().make_compare_s32(reg, clampMin, regTmpCondBool, IMLCondition::SIGNED_LT); - ppcImlGenContext->emitInst().make_conditional_jump(regTmpCondBool, false); - PPCIMLGen_CreateSegmentBranchedPath(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock, - [&](ppcImlGenContext_t& genCtx) - { - /* branch not taken */ - genCtx.emitInst().make_r_s32(PPCREC_IML_OP_ASSIGN, reg, clampMin); - } - ); -} - -void PPCRecompilerIMLGen_GetPSQScale(ppcImlGenContext_t* ppcImlGenContext, IMLReg gqrRegister, IMLReg fprRegScaleOut, bool isLoad) -{ - IMLReg gprTmp2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 2); - // extract scale factor and sign extend it - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_LEFT_SHIFT, gprTmp2, gqrRegister, 32 - ((isLoad ? 24 : 8)+7)); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_S, gprTmp2, gprTmp2, (32-23)-7); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, gprTmp2, gprTmp2, 0x1FF<<23); - if (isLoad) - ppcImlGenContext->emitInst().make_r_r(PPCREC_IML_OP_NEG, gprTmp2, gprTmp2); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, gprTmp2, gprTmp2, 0x7F<<23); - // gprTmp2 now holds the scale float bits, bitcast to float - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_BITCAST_INT_TO_FLOAT, fprRegScaleOut, gprTmp2); -} - -void PPCRecompilerImlGen_EmitPSQLoadCase(ppcImlGenContext_t* ppcImlGenContext, sint32 gqrIndex, Espresso::PSQ_LOAD_TYPE loadType, bool readPS1, IMLReg gprA, sint32 imm, IMLReg fprDPS0, IMLReg fprDPS1) -{ - if (loadType == Espresso::PSQ_LOAD_TYPE::TYPE_F32) - { - ppcImlGenContext->emitInst().make_fpr_r_memory(fprDPS0, gprA, imm, PPCREC_FPR_LD_MODE_SINGLE, true); - if(readPS1) - { - ppcImlGenContext->emitInst().make_fpr_r_memory(fprDPS1, gprA, imm + 4, PPCREC_FPR_LD_MODE_SINGLE, true); - } - } - if (loadType == Espresso::PSQ_LOAD_TYPE::TYPE_U16 || loadType == Espresso::PSQ_LOAD_TYPE::TYPE_S16) - { - // get scale factor - IMLReg gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex); - IMLReg fprScaleReg = _GetFPRTemp(ppcImlGenContext, 2); - PPCRecompilerIMLGen_GetPSQScale(ppcImlGenContext, gqrRegister, fprScaleReg, true); - - bool isSigned = (loadType == Espresso::PSQ_LOAD_TYPE::TYPE_S16); - IMLReg gprTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - ppcImlGenContext->emitInst().make_r_memory(gprTmp, gprA, imm, 16, isSigned, true); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_INT_TO_FLOAT, fprDPS0, gprTmp); - - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDPS0, fprDPS0, fprScaleReg); - - if(readPS1) - { - ppcImlGenContext->emitInst().make_r_memory(gprTmp, gprA, imm + 2, 16, isSigned, true); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_INT_TO_FLOAT, fprDPS1, gprTmp); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDPS1, fprDPS1, fprScaleReg); - } - } - else if (loadType == Espresso::PSQ_LOAD_TYPE::TYPE_U8 || loadType == Espresso::PSQ_LOAD_TYPE::TYPE_S8) - { - // get scale factor - IMLReg gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex); - IMLReg fprScaleReg = _GetFPRTemp(ppcImlGenContext, 2); - PPCRecompilerIMLGen_GetPSQScale(ppcImlGenContext, gqrRegister, fprScaleReg, true); - - bool isSigned = (loadType == Espresso::PSQ_LOAD_TYPE::TYPE_S8); - IMLReg gprTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - ppcImlGenContext->emitInst().make_r_memory(gprTmp, gprA, imm, 8, isSigned, true); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_INT_TO_FLOAT, fprDPS0, gprTmp); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDPS0, fprDPS0, fprScaleReg); - if(readPS1) - { - ppcImlGenContext->emitInst().make_r_memory(gprTmp, gprA, imm + 1, 8, isSigned, true); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_INT_TO_FLOAT, fprDPS1, gprTmp); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDPS1, fprDPS1, fprScaleReg); - } - } -} - -// PSQ_L and PSQ_LU -bool PPCRecompilerImlGen_PSQ_L(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate) +bool PPCRecompilerImlGen_PSQ_L(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int rA, frD; uint32 immUnused; PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused); + sint32 gqrIndex = ((opcode >> 12) & 7); uint32 imm = opcode & 0xFFF; if (imm & 0x800) imm |= ~0xFFF; + bool readPS1 = (opcode & 0x8000) == false; - IMLReg gprA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - DefinePS0(fprDPS0, frD); - DefinePS1(fprDPS1, frD); - if (!readPS1) - { - // if PS1 is not explicitly read then set it to 1.0 - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_LOAD_ONE, fprDPS1); - } - if (withUpdate) - { - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, gprA, gprA, (sint32)imm); - imm = 0; - } - uint32 knownGQRValue = 0; - if ( !PPCRecompiler_isUGQRValueKnown(ppcImlGenContext, gqrIndex, knownGQRValue) ) - { - // generate complex dynamic handler when we dont know the GQR value ahead of time - IMLReg gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex); - IMLReg loadTypeReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - // extract the load type from the GQR register - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_RIGHT_SHIFT_U, loadTypeReg, gqrRegister, 16); - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, loadTypeReg, loadTypeReg, 0x7); - IMLSegment* caseSegment[6]; - sint32 compareValues[6] = {0, 4, 5, 6, 7}; - PPCIMLGen_CreateSegmentBranchedPathMultiple(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock, caseSegment, loadTypeReg, compareValues, 5, 0); - for (sint32 i=0; i<5; i++) - { - IMLRedirectInstOutput outputToCase(ppcImlGenContext, caseSegment[i]); // while this is in scope, instructions go to caseSegment[i] - PPCRecompilerImlGen_EmitPSQLoadCase(ppcImlGenContext, gqrIndex, static_cast(compareValues[i]), readPS1, gprA, imm, fprDPS0, fprDPS1); - // create the case jump instructions here because we need to add it last - caseSegment[i]->AppendInstruction()->make_jump(); - } - return true; - } - - Espresso::PSQ_LOAD_TYPE type = static_cast((knownGQRValue >> 0) & 0x7); - sint32 scale = (knownGQRValue >> 8) & 0x3F; - cemu_assert_debug(scale == 0); // known GQR values always use a scale of 0 (1.0f) - if (scale != 0) - return false; - - if (type == Espresso::PSQ_LOAD_TYPE::TYPE_UNUSED1 || - type == Espresso::PSQ_LOAD_TYPE::TYPE_UNUSED2 || - type == Espresso::PSQ_LOAD_TYPE::TYPE_UNUSED3) - { - return false; - } - - PPCRecompilerImlGen_EmitPSQLoadCase(ppcImlGenContext, gqrIndex, type, readPS1, gprA, imm, fprDPS0, fprDPS1); + // get gqr register + uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD); + // psq load + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, readPS1 ? PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0, true, gqrRegister); return true; } -void PPCRecompilerImlGen_EmitPSQStoreCase(ppcImlGenContext_t* ppcImlGenContext, sint32 gqrIndex, Espresso::PSQ_LOAD_TYPE storeType, bool storePS1, IMLReg gprA, sint32 imm, IMLReg fprDPS0, IMLReg fprDPS1) +bool PPCRecompilerImlGen_PSQ_LU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - cemu_assert_debug(!storePS1 || fprDPS1.IsValid()); - if (storeType == Espresso::PSQ_LOAD_TYPE::TYPE_F32) - { - ppcImlGenContext->emitInst().make_fpr_memory_r(fprDPS0, gprA, imm, PPCREC_FPR_ST_MODE_SINGLE, true); - if(storePS1) - { - ppcImlGenContext->emitInst().make_fpr_memory_r(fprDPS1, gprA, imm + 4, PPCREC_FPR_ST_MODE_SINGLE, true); - } - } - else if (storeType == Espresso::PSQ_LOAD_TYPE::TYPE_U16 || storeType == Espresso::PSQ_LOAD_TYPE::TYPE_S16) - { - // get scale factor - IMLReg gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex); - IMLReg fprScaleReg = _GetFPRTemp(ppcImlGenContext, 2); - PPCRecompilerIMLGen_GetPSQScale(ppcImlGenContext, gqrRegister, fprScaleReg, false); + int rA, frD; + uint32 immUnused; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused); + if (rA == 0) + return false; - bool isSigned = (storeType == Espresso::PSQ_LOAD_TYPE::TYPE_S16); - IMLReg fprTmp = _GetFPRTemp(ppcImlGenContext, 0); + sint32 gqrIndex = ((opcode >> 12) & 7); + uint32 imm = opcode & 0xFFF; + if (imm & 0x800) + imm |= ~0xFFF; - IMLReg gprTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp, fprDPS0, fprScaleReg); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_FLOAT_TO_INT, gprTmp, fprTmp); - - if (isSigned) - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, -32768, 32767); - else - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, 0, 65535); - ppcImlGenContext->emitInst().make_memory_r(gprTmp, gprA, imm, 16, true); - if(storePS1) - { - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp, fprDPS1, fprScaleReg); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_FLOAT_TO_INT, gprTmp, fprTmp); - if (isSigned) - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, -32768, 32767); - else - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, 0, 65535); - ppcImlGenContext->emitInst().make_memory_r(gprTmp, gprA, imm + 2, 16, true); - } - } - else if (storeType == Espresso::PSQ_LOAD_TYPE::TYPE_U8 || storeType == Espresso::PSQ_LOAD_TYPE::TYPE_S8) - { - // get scale factor - IMLReg gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex); - IMLReg fprScaleReg = _GetFPRTemp(ppcImlGenContext, 2); - PPCRecompilerIMLGen_GetPSQScale(ppcImlGenContext, gqrRegister, fprScaleReg, false); - - bool isSigned = (storeType == Espresso::PSQ_LOAD_TYPE::TYPE_S8); - IMLReg fprTmp = _GetFPRTemp(ppcImlGenContext, 0); - IMLReg gprTmp = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp, fprDPS0, fprScaleReg); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_FLOAT_TO_INT, gprTmp, fprTmp); - if (isSigned) - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, -128, 127); - else - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, 0, 255); - ppcImlGenContext->emitInst().make_memory_r(gprTmp, gprA, imm, 8, true); - if(storePS1) - { - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp, fprDPS1, fprScaleReg); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_FLOAT_TO_INT, gprTmp, fprTmp); - if (isSigned) - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, -128, 127); - else - PPCRecompilerImlGen_ClampInteger(ppcImlGenContext, gprTmp, 0, 255); - ppcImlGenContext->emitInst().make_memory_r(gprTmp, gprA, imm + 1, 8, true); - } - } + bool readPS1 = (opcode & 0x8000) == false; + + // get gqr register + uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD); + // paired load + PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, readPS1 ? PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0, true, gqrRegister); + return true; } -// PSQ_ST and PSQ_STU -bool PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withUpdate) +bool PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { int rA, frD; uint32 immUnused; @@ -955,133 +1131,181 @@ bool PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext_t* ppcImlGenContext, uint32 opc if (imm & 0x800) imm |= ~0xFFF; sint32 gqrIndex = ((opcode >> 12) & 7); + bool storePS1 = (opcode & 0x8000) == false; - IMLReg gprA = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA); - DefinePS0(fprDPS0, frD); - IMLReg fprDPS1 = storePS1 ? _GetFPRRegPS1(ppcImlGenContext, frD) : IMLREG_INVALID; - - if (withUpdate) - { - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_ADD, gprA, gprA, (sint32)imm); - imm = 0; - } - - uint32 gqrValue = 0; - if ( !PPCRecompiler_isUGQRValueKnown(ppcImlGenContext, gqrIndex, gqrValue) ) - { - // generate complex dynamic handler when we dont know the GQR value ahead of time - IMLReg gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex); - IMLReg loadTypeReg = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY + 0); - // extract the load type from the GQR register - ppcImlGenContext->emitInst().make_r_r_s32(PPCREC_IML_OP_AND, loadTypeReg, gqrRegister, 0x7); - - IMLSegment* caseSegment[5]; - sint32 compareValues[5] = {0, 4, 5, 6, 7}; - PPCIMLGen_CreateSegmentBranchedPathMultiple(*ppcImlGenContext, *ppcImlGenContext->currentBasicBlock, caseSegment, loadTypeReg, compareValues, 5, 0); - for (sint32 i=0; i<5; i++) - { - IMLRedirectInstOutput outputToCase(ppcImlGenContext, caseSegment[i]); // while this is in scope, instructions go to caseSegment[i] - PPCRecompilerImlGen_EmitPSQStoreCase(ppcImlGenContext, gqrIndex, static_cast(compareValues[i]), storePS1, gprA, imm, fprDPS0, fprDPS1); - ppcImlGenContext->emitInst().make_jump(); // finalize case - } - return true; - } - - Espresso::PSQ_LOAD_TYPE type = static_cast((gqrValue >> 16) & 0x7); - sint32 scale = (gqrValue >> 24) & 0x3F; - cemu_assert_debug(scale == 0); // known GQR values always use a scale of 0 (1.0f) - - if (type == Espresso::PSQ_LOAD_TYPE::TYPE_UNUSED1 || - type == Espresso::PSQ_LOAD_TYPE::TYPE_UNUSED2 || - type == Espresso::PSQ_LOAD_TYPE::TYPE_UNUSED3) - { - return false; - } - - PPCRecompilerImlGen_EmitPSQStoreCase(ppcImlGenContext, gqrIndex, type, storePS1, gprA, imm, fprDPS0, fprDPS1); + // get gqr register + uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD); + // paired store + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, imm, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0, true, gqrRegister); return true; } -// PS_MULS0 and PS_MULS1 -bool PPCRecompilerImlGen_PS_MULSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isVariant1) +bool PPCRecompilerImlGen_PSQ_STU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + int rA, frD; + uint32 immUnused; + PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused); + if (rA == 0) + return false; + + uint32 imm = opcode & 0xFFF; + if (imm & 0x800) + imm |= ~0xFFF; + sint32 gqrIndex = ((opcode >> 12) & 7); + + bool storePS1 = (opcode & 0x8000) == false; + + // get gqr register + uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false); + // get memory gpr register index + uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false); + // add imm to memory register + PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0); + // get fpr register index + uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD); + // paired store + PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, 0, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0, true, gqrRegister); + return true; +} + +bool PPCRecompilerImlGen_PS_MULS0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 frD, frA, frC; frC = (opcode>>6)&0x1F; frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePSX(fprC, frC, isVariant1); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - DefineTempFPR(fprTmp0, 0); - DefineTempFPR(fprTmp1, 1); - - // todo - optimize cases where a temporary is not necessary - // todo - round fprC to 25bit accuracy - - // copy ps0 and ps1 to temporary - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTmp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTmp1, fprAps1); - - // multiply - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp0, fprC); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp1, fprC); - - // copy back to result - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTmp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTmp1); - - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); - + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC); + // if frD == frA we can multiply frD immediately and safe a copy instruction + if( frD == frA ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + } + else + { + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); + } + // adjust accuracy + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } -// PS_MADDS0 and PS_MADDS1 -bool PPCRecompilerImlGen_PS_MADDSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool isVariant1) +bool PPCRecompilerImlGen_PS_MULS1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 frD, frA, frC; + frC = (opcode>>6)&0x1F; + frA = (opcode>>16)&0x1F; + frD = (opcode>>21)&0x1F; + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC); + // if frD == frA we can multiply frD immediately and safe a copy instruction + if( frD == frA ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + } + else + { + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); + } + // adjust accuracy + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + return true; +} + +bool PPCRecompilerImlGen_PS_MADDS0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 frD, frA, frB, frC; frC = (opcode>>6)&0x1F; frB = (opcode>>11)&0x1F; frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; + //float s0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 + hCPU->fpr[frB].fp0); + //float s1 = (float)(hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp0 + hCPU->fpr[frB].fp1); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC); + // if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction + if( frD == frA && frD != frB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB); + } + else + { + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); + } + // adjust accuracy + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + return true; +} - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePSX(fprC, frC, isVariant1); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - DefineTempFPR(fprTmp0, 0); - DefineTempFPR(fprTmp1, 1); - - // todo - round C to 25bit - // todo - optimize cases where a temporary is not necessary - - // copy ps0 and ps1 to temporary - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTmp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTmp1, fprAps1); - - // multiply - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp0, fprC); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTmp1, fprC); - - // add - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprTmp0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprTmp1, fprBps1); - - // copy back to result - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTmp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTmp1); - - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); +bool PPCRecompilerImlGen_PS_MADDS1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 frD, frA, frB, frC; + frC = (opcode>>6)&0x1F; + frB = (opcode>>11)&0x1F; + frA = (opcode>>16)&0x1F; + frD = (opcode>>21)&0x1F; + //float s0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp1 + hCPU->fpr[frB].fp0); + //float s1 = (float)(hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp1 + hCPU->fpr[frB].fp1); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp1 in bottom and top half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC); + // if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction + if( frD == frA && frD != frB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB); + } + else + { + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); + } + // adjust accuracy + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1093,34 +1317,25 @@ bool PPCRecompilerImlGen_PS_ADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opc frD = (opcode>>21)&0x1F; //hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp0; //hCPU->fpr[frD].fp1 = hCPU->fpr[frA].fp1 + hCPU->fpr[frB].fp1; - - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); if( frD == frA ) { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB); } else if( frD == frB ) { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprAps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterA); } else { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB); } // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1132,20 +1347,13 @@ bool PPCRecompilerImlGen_PS_SUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opc frD = (opcode>>21)&0x1F; //hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0 - hCPU->fpr[frB].fp0; //hCPU->fpr[frD].fp1 = hCPU->fpr[frA].fp1 - hCPU->fpr[frB].fp1; - - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_SUB, fprDps0, fprAps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r_r(PPCREC_IML_OP_FPR_SUB, fprDps1, fprAps1, fprBps1); - + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterD, fprRegisterA, fprRegisterB); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1155,37 +1363,28 @@ bool PPCRecompilerImlGen_PS_MUL(ppcImlGenContext_t* ppcImlGenContext, uint32 opc frC = (opcode >> 6) & 0x1F; frA = (opcode >> 16) & 0x1F; frD = (opcode >> 21) & 0x1F; - - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprCps0, frC); - DefinePS1(fprCps1, frC); - - DefineTempFPR(fprTemp0, 0); - DefineTempFPR(fprTemp1, 1); - - // todo: Optimize for when a temporary isnt necessary - // todo: Round to 25bit? - - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp1, fprCps1); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD); + // we need a temporary register + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0 + 0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC); + // todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register + // if frD == frA we can multiply frD immediately and safe a copy instruction if (frD == frA) { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps1, fprTemp1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); } else { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp1, fprAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTemp1); + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); } // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1197,35 +1396,28 @@ bool PPCRecompilerImlGen_PS_DIV(ppcImlGenContext_t* ppcImlGenContext, uint32 opc frD = (opcode >> 21) & 0x1F; //hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0 / hCPU->fpr[frB].fp0; //hCPU->fpr[frD].fp1 = hCPU->fpr[frA].fp1 / hCPU->fpr[frB].fp1; - - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD); + // todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register + // if frD == frA we can divide frD immediately and safe a copy instruction if (frD == frA) { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprDps1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_PAIR, fprRegisterD, fprRegisterB); } else { - DefineTempFPR(fprTemp0, 0); - DefineTempFPR(fprTemp1, 1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp1, fprAps1); + // we need a temporary register + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0 + 0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterA); // we divide temporary by frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprTemp0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_DIVIDE, fprTemp1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_PAIR, fprRegisterTemp, fprRegisterB); // copy result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTemp1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); } // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1239,61 +1431,33 @@ bool PPCRecompilerImlGen_PS_MADD(ppcImlGenContext_t* ppcImlGenContext, uint32 op //float s0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 + hCPU->fpr[frB].fp0); //float s1 = (float)(hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp1 + hCPU->fpr[frB].fp1); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprCps0, frC); - DefinePS1(fprCps1, frC); - - if (frD != frA && frD != frB) + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC); + // todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register + // if frD == frA and frD != frB we can multiply frD immediately and save a copy instruction + if( frD == frA && frD != frB ) { - if (frD == frC) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprCps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprCps1, fprAps1); - } - else - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps1, fprCps1); - } - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB); } else { - DefineTempFPR(fprTemp0, 0); - DefineTempFPR(fprTemp1, 1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp1, fprCps1); - if( frD == frA && frD != frB ) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps1, fprTemp1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprBps1); - } - else - { - // we multiply temporary by frA - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp1, fprAps1); - // add frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprTemp0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprTemp1, fprBps1); - // copy result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTemp1); - } + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); } // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1305,54 +1469,81 @@ bool PPCRecompilerImlGen_PS_NMADD(ppcImlGenContext_t* ppcImlGenContext, uint32 o frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprCps0, frC); - DefinePS1(fprCps1, frC); - - DefineTempFPR(fprTemp0, 0); - DefineTempFPR(fprTemp1, 1); - - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp1, fprCps1); + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC); // todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register - // if frD == frA and frD != frB we can multiply frD immediately and save a copy instruction + // if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction if( frD == frA && frD != frB ) { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps1, fprTemp1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + // add frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB); } else { // we multiply temporary by frA - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp1, fprAps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); // add frB - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprTemp0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprTemp1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB); // copy result to frD - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTemp1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); } - // negate - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprDps0); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprDps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_NEGATE_PAIR, fprRegisterD, fprRegisterD); // adjust accuracy //PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); // Splatoon requires that we emulate flush-to-denormals for this instruction - //ppcImlGenContext->emitInst().make_fpr_r(NULL,PPCREC_IML_OP_FPR_ROUND_FLDN_TO_SINGLE_PRECISION_PAIR, fprRegisterD, false); + //PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_ROUND_FLDN_TO_SINGLE_PRECISION_PAIR, fprRegisterD, false); return true; } -// PS_MSUB and PS_NMSUB -bool PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode, bool withNegative) +bool PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) +{ + sint32 frD, frA, frB, frC; + frC = (opcode>>6)&0x1F; + frB = (opcode>>11)&0x1F; + frA = (opcode>>16)&0x1F; + frD = (opcode>>21)&0x1F; + //hCPU->fpr[frD].fp0 = (hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 - hCPU->fpr[frB].fp0); + //hCPU->fpr[frD].fp1 = (hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp1 - hCPU->fpr[frB].fp1); + + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC); + // todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register + // if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction + if( frD == frA && frD != frB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + // sub frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterD, fprRegisterB); + } + else + { + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // sub frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterTemp, fprRegisterB); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); + } + // adjust accuracy + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); + return true; +} + +bool PPCRecompilerImlGen_PS_NMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { sint32 frD, frA, frB, frC; frC = (opcode>>6)&0x1F; @@ -1360,64 +1551,35 @@ bool PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 op frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprCps0, frC); - DefinePS1(fprCps1, frC); - - if (frD != frA && frD != frB) + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // we need a temporary register to store frC.fp0 in low and high half + uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC); + // todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register + // if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction + if( frD == frA && frD != frB ) { - if (frD == frC) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprCps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprCps1, fprAps1); - } - else - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps1, fprCps1); - } - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprDps1, fprBps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp); + // sub frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterD, fprRegisterB); } else { - DefineTempFPR(fprTemp0, 0); - DefineTempFPR(fprTemp1, 1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprTemp1, fprCps1); - if( frD == frA && frD != frB ) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprDps1, fprTemp1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprDps1, fprBps1); - } - else - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_MULTIPLY, fprTemp1, fprAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprTemp0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_SUB, fprTemp1, fprBps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprTemp0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprTemp1); - } + // we multiply temporary by frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA); + // sub frB + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterTemp, fprRegisterB); + // copy result to frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp); } // negate result - if (withNegative) - { - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprDps0); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprDps1); - } + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_NEGATE_PAIR, fprRegisterD, fprRegisterD); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1428,27 +1590,18 @@ bool PPCRecompilerImlGen_PS_SUM0(ppcImlGenContext_t* ppcImlGenContext, uint32 op frB = (opcode>>11)&0x1F; frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprBps1, frB); - DefinePS1(fprCps1, frC); - - if( frD == frA ) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps1); - } - else - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps0, fprBps1); - } - if (fprDps1 != fprCps1) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprCps1); + //float s0 = (float)(hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp1); + //float s1 = (float)hCPU->fpr[frC].fp1; + //hCPU->fpr[frD].fp0 = s0; + //hCPU->fpr[frD].fp1 = s1; + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUM0, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1459,26 +1612,18 @@ bool PPCRecompilerImlGen_PS_SUM1(ppcImlGenContext_t* ppcImlGenContext, uint32 op frB = (opcode>>11)&0x1F; frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - DefinePS0(fprAps0, frA); - DefinePS1(fprBps1, frB); - DefinePS0(fprCps0, frC); - - if (frB != frD) - { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprBps1); - } - else - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ADD, fprDps1, fprAps0); - - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprCps0); - + //float s0 = (float)hCPU->fpr[frC].fp0; + //float s1 = (float)(hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp1); + //hCPU->fpr[frD].fp0 = s0; + //hCPU->fpr[frD].fp1 = s1; + // load registers + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUM1, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC); // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD); return true; } @@ -1487,20 +1632,12 @@ bool PPCRecompilerImlGen_PS_NEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opc sint32 frD, frB; frB = (opcode>>11)&0x1F; frD = (opcode>>21)&0x1F; - - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - if (frB != frD) - { - // copy - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprBps1); - } - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprDps0); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_NEGATE, fprDps1); + //hCPU->fpr[frD].fp0 = -hCPU->fpr[frB].fp0; + //hCPU->fpr[frD].fp1 = -hCPU->fpr[frB].fp1; + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_NEGATE_PAIR, fprRegisterD, fprRegisterB); return true; } @@ -1509,17 +1646,10 @@ bool PPCRecompilerImlGen_PS_ABS(ppcImlGenContext_t* ppcImlGenContext, uint32 opc sint32 frD, frB; frB = (opcode>>11)&0x1F; frD = (opcode>>21)&0x1F; - - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprBps1); - - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_ABS, fprDps0); - ppcImlGenContext->emitInst().make_fpr_r(PPCREC_IML_OP_FPR_ABS, fprDps1); + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ABS_PAIR, fprRegisterD, fprRegisterB); return true; } @@ -1531,16 +1661,11 @@ bool PPCRecompilerImlGen_PS_RES(ppcImlGenContext_t* ppcImlGenContext, uint32 opc //hCPU->fpr[frD].fp0 = (float)(1.0f / (float)hCPU->fpr[frB].fp0); //hCPU->fpr[frD].fp1 = (float)(1.0f / (float)hCPU->fpr[frB].fp1); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); - ppcImlGenContext->emitInst().make_call_imm((uintptr_t)fres_espresso, fprBps0, IMLREG_INVALID, IMLREG_INVALID, fprDps0); - ppcImlGenContext->emitInst().make_call_imm((uintptr_t)fres_espresso, fprBps1, IMLREG_INVALID, IMLREG_INVALID, fprDps1); - // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FRES_PAIR, fprRegisterD, fprRegisterB); return true; } @@ -1549,17 +1674,13 @@ bool PPCRecompilerImlGen_PS_RSQRTE(ppcImlGenContext_t* ppcImlGenContext, uint32 sint32 frD, frB; frB = (opcode>>11)&0x1F; frD = (opcode>>21)&0x1F; + //hCPU->fpr[frD].fp0 = (float)(1.0f / (float)sqrt(hCPU->fpr[frB].fp0)); + //hCPU->fpr[frD].fp1 = (float)(1.0f / (float)sqrt(hCPU->fpr[frB].fp1)); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - ppcImlGenContext->emitInst().make_call_imm((uintptr_t)frsqrte_espresso, fprBps0, IMLREG_INVALID, IMLREG_INVALID, fprDps0); - ppcImlGenContext->emitInst().make_call_imm((uintptr_t)frsqrte_espresso, fprBps1, IMLREG_INVALID, IMLREG_INVALID, fprDps1); - // adjust accuracy - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps0); - PPRecompilerImmGen_roundToSinglePrecision(ppcImlGenContext, fprDps1); + // load registers + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FRSQRTE_PAIR, fprRegisterD, fprRegisterB); return true; } @@ -1568,15 +1689,14 @@ bool PPCRecompilerImlGen_PS_MR(ppcImlGenContext_t* ppcImlGenContext, uint32 opco sint32 frD, frB; frB = (opcode>>11)&0x1F; frD = (opcode>>21)&0x1F; + //hCPU->fpr[frD].fp0 = hCPU->fpr[frB].fp0; + //hCPU->fpr[frD].fp1 = hCPU->fpr[frB].fp1; + // load registers if( frB != frD ) { - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps0, fprBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, fprDps1, fprBps1); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterB); } return true; } @@ -1589,17 +1709,11 @@ bool PPCRecompilerImlGen_PS_SEL(ppcImlGenContext_t* ppcImlGenContext, uint32 opc frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS0(fprAps0, frA); - DefinePS1(fprAps1, frA); - DefinePS0(fprBps0, frB); - DefinePS1(fprBps1, frB); - DefinePS0(fprCps0, frC); - DefinePS1(fprCps1, frC); - DefinePS0(fprDps0, frD); - DefinePS1(fprDps1, frD); - - ppcImlGenContext->emitInst().make_fpr_r_r_r_r(PPCREC_IML_OP_FPR_SELECT, fprDps0, fprAps0, fprBps0, fprCps0); - ppcImlGenContext->emitInst().make_fpr_r_r_r_r(PPCREC_IML_OP_FPR_SELECT, fprDps1, fprAps1, fprBps1, fprCps1); + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SELECT_PAIR, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC); return true; } @@ -1609,13 +1723,26 @@ bool PPCRecompilerImlGen_PS_MERGE00(ppcImlGenContext_t* ppcImlGenContext, uint32 frB = (opcode>>11)&0x1F; frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS0(frpAps0, frA); - DefinePS0(frpBps0, frB); - DefinePS0(frpDps0, frD); - DefinePS1(frpDps1, frD); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps1, frpBps0); - if (frpDps0 != frpAps0) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps0, frpAps0); + //float s0 = (float)hCPU->fpr[frA].fp0; + //float s1 = (float)hCPU->fpr[frB].fp0; + //hCPU->fpr[frD].fp0 = s0; + //hCPU->fpr[frD].fp1 = s1; + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + // unpcklpd + if( frA == frB ) + { + // simply duplicate bottom into bottom and top of destination register + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA); + } + else + { + // copy bottom of frB to top first + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP, fprRegisterD, fprRegisterB); + // copy bottom of frA + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA); + } return true; } @@ -1625,14 +1752,17 @@ bool PPCRecompilerImlGen_PS_MERGE01(ppcImlGenContext_t* ppcImlGenContext, uint32 frB = (opcode>>11)&0x1F; frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS0(frpAps0, frA); - DefinePS1(frpBps1, frB); - DefinePS0(frpDps0, frD); - DefinePS1(frpDps1, frD); + // hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0; + // hCPU->fpr[frD].fp1 = hCPU->fpr[frB].fp1; + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + + if( fprRegisterD != fprRegisterB ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, fprRegisterD, fprRegisterB); + if( fprRegisterD != fprRegisterA ) + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA); - if (frpDps0 != frpAps0) - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps0, frpAps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps1, frpBps1); return true; } @@ -1643,22 +1773,33 @@ bool PPCRecompilerImlGen_PS_MERGE10(ppcImlGenContext_t* ppcImlGenContext, uint32 frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS1(frpAps1, frA); - DefinePS0(frpBps0, frB); - DefinePS0(frpDps0, frD); - DefinePS1(frpDps1, frD); - - if (frD != frB) + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( frA == frB ) { - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps0, frpAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps1, frpBps0); + // swap bottom and top + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterA); + } + else if( frA == frD ) + { + // copy frB bottom to frD bottom + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); + // swap lower and upper half of frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterD); + } + else if( frB == frD ) + { + // copy upper half of frA to upper half of frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, fprRegisterD, fprRegisterA); + // swap lower and upper half of frD + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterD); } else { - DefineTempFPR(frpTemp, 0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpTemp, frpBps0); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps0, frpAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps1, frpTemp); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterD); } return true; } @@ -1670,20 +1811,42 @@ bool PPCRecompilerImlGen_PS_MERGE11(ppcImlGenContext_t* ppcImlGenContext, uint32 frA = (opcode>>16)&0x1F; frD = (opcode>>21)&0x1F; - DefinePS1(frpAps1, frA); - DefinePS1(frpBps1, frB); - DefinePS0(frpDps0, frD); - DefinePS1(frpDps1, frD); - - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps0, frpAps1); - ppcImlGenContext->emitInst().make_fpr_r_r(PPCREC_IML_OP_FPR_ASSIGN, frpDps1, frpBps1); + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD); + if( fprRegisterA == fprRegisterB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA); + } + else if( fprRegisterD != fprRegisterB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, fprRegisterD, fprRegisterB); + } + else if( fprRegisterD == fprRegisterB ) + { + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM, fprRegisterD, fprRegisterA); + } + else + { + debugBreakpoint(); + return false; + } return true; } bool PPCRecompilerImlGen_PS_CMPO0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) { - // Not implemented - return false; + sint32 crfD, frA, frB; + uint32 c=0; + frB = (opcode>>11)&0x1F; + frA = (opcode>>16)&0x1F; + crfD = (opcode>>23)&0x7; + + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPO_BOTTOM, fprRegisterA, fprRegisterB, crfD); + return true; } bool PPCRecompilerImlGen_PS_CMPU0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode) @@ -1692,21 +1855,9 @@ bool PPCRecompilerImlGen_PS_CMPU0(ppcImlGenContext_t* ppcImlGenContext, uint32 o frB = (opcode >> 11) & 0x1F; frA = (opcode >> 16) & 0x1F; crfD = (opcode >> 23) & 0x7; - - DefinePS0(fprA, frA); - DefinePS0(fprB, frB); - - IMLReg crBitRegLT = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_LT); - IMLReg crBitRegGT = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_GT); - IMLReg crBitRegEQ = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_EQ); - IMLReg crBitRegSO = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_SO); - - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegLT, IMLCondition::UNORDERED_LT); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegGT, IMLCondition::UNORDERED_GT); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegEQ, IMLCondition::UNORDERED_EQ); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegSO, IMLCondition::UNORDERED_U); - - // todo: set fpscr + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPU_BOTTOM, fprRegisterA, fprRegisterB, crfD); return true; } @@ -1716,18 +1867,8 @@ bool PPCRecompilerImlGen_PS_CMPU1(ppcImlGenContext_t* ppcImlGenContext, uint32 o frB = (opcode >> 11) & 0x1F; frA = (opcode >> 16) & 0x1F; crfD = (opcode >> 23) & 0x7; - - DefinePS1(fprA, frA); - DefinePS1(fprB, frB); - - IMLReg crBitRegLT = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_LT); - IMLReg crBitRegGT = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_GT); - IMLReg crBitRegEQ = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_EQ); - IMLReg crBitRegSO = _GetRegCR(ppcImlGenContext, crfD, Espresso::CR_BIT::CR_BIT_INDEX_SO); - - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegLT, IMLCondition::UNORDERED_LT); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegGT, IMLCondition::UNORDERED_GT); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegEQ, IMLCondition::UNORDERED_EQ); - ppcImlGenContext->emitInst().make_fpr_compare(fprA, fprB, crBitRegSO, IMLCondition::UNORDERED_U); + uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA); + uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frB); + PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPU_TOP, fprRegisterA, fprRegisterB, crfD); return true; -} +} \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlOptimizer.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlOptimizer.cpp new file mode 100644 index 00000000..45e27664 --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlOptimizer.cpp @@ -0,0 +1,2175 @@ +#include "../Interpreter/PPCInterpreterInternal.h" +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" + +void PPCRecompiler_checkRegisterUsage(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, PPCImlOptimizerUsedRegisters_t* registersUsed) +{ + registersUsed->readNamedReg1 = -1; + registersUsed->readNamedReg2 = -1; + registersUsed->readNamedReg3 = -1; + registersUsed->writtenNamedReg1 = -1; + registersUsed->readFPR1 = -1; + registersUsed->readFPR2 = -1; + registersUsed->readFPR3 = -1; + registersUsed->readFPR4 = -1; + registersUsed->writtenFPR1 = -1; + if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME ) + { + registersUsed->writtenNamedReg1 = imlInstruction->op_r_name.registerIndex; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R ) + { + registersUsed->readNamedReg1 = imlInstruction->op_r_name.registerIndex; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R ) + { + if (imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED || imlInstruction->operation == PPCREC_IML_OP_DCBZ) + { + // both operands are read only + registersUsed->readNamedReg1 = imlInstruction->op_r_r.registerResult; + registersUsed->readNamedReg2 = imlInstruction->op_r_r.registerA; + } + else if ( + imlInstruction->operation == PPCREC_IML_OP_OR || + imlInstruction->operation == PPCREC_IML_OP_AND || + imlInstruction->operation == PPCREC_IML_OP_XOR || + imlInstruction->operation == PPCREC_IML_OP_ADD || + imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY || + imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_ME || + imlInstruction->operation == PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY) + { + // result is read and written, operand is read + registersUsed->writtenNamedReg1 = imlInstruction->op_r_r.registerResult; + registersUsed->readNamedReg1 = imlInstruction->op_r_r.registerResult; + registersUsed->readNamedReg2 = imlInstruction->op_r_r.registerA; + } + else if ( + imlInstruction->operation == PPCREC_IML_OP_ASSIGN || + imlInstruction->operation == PPCREC_IML_OP_ENDIAN_SWAP || + imlInstruction->operation == PPCREC_IML_OP_CNTLZW || + imlInstruction->operation == PPCREC_IML_OP_NOT || + imlInstruction->operation == PPCREC_IML_OP_NEG || + imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32 || + imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32) + { + // result is written, operand is read + registersUsed->writtenNamedReg1 = imlInstruction->op_r_r.registerResult; + registersUsed->readNamedReg1 = imlInstruction->op_r_r.registerA; + } + else + cemu_assert_unimplemented(); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) + { + if (imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED || imlInstruction->operation == PPCREC_IML_OP_MTCRF) + { + // operand register is read only + registersUsed->readNamedReg1 = imlInstruction->op_r_immS32.registerIndex; + } + else if (imlInstruction->operation == PPCREC_IML_OP_ADD || + imlInstruction->operation == PPCREC_IML_OP_SUB || + imlInstruction->operation == PPCREC_IML_OP_AND || + imlInstruction->operation == PPCREC_IML_OP_OR || + imlInstruction->operation == PPCREC_IML_OP_XOR || + imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE) + { + // operand register is read and write + registersUsed->readNamedReg1 = imlInstruction->op_r_immS32.registerIndex; + registersUsed->writtenNamedReg1 = imlInstruction->op_r_immS32.registerIndex; + } + else + { + // operand register is write only + // todo - use explicit lists, avoid default cases + registersUsed->writtenNamedReg1 = imlInstruction->op_r_immS32.registerIndex; + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32) + { + if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN) + { + // result is written, but also considered read (in case the condition fails) + registersUsed->readNamedReg1 = imlInstruction->op_conditional_r_s32.registerIndex; + registersUsed->writtenNamedReg1 = imlInstruction->op_conditional_r_s32.registerIndex; + } + else + cemu_assert_unimplemented(); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_S32 ) + { + if( imlInstruction->operation == PPCREC_IML_OP_RLWIMI ) + { + // result and operand register are both read, result is written + registersUsed->writtenNamedReg1 = imlInstruction->op_r_r_s32.registerResult; + registersUsed->readNamedReg1 = imlInstruction->op_r_r_s32.registerResult; + registersUsed->readNamedReg2 = imlInstruction->op_r_r_s32.registerA; + } + else + { + // result is write only and operand is read only + registersUsed->writtenNamedReg1 = imlInstruction->op_r_r_s32.registerResult; + registersUsed->readNamedReg1 = imlInstruction->op_r_r_s32.registerA; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_R ) + { + // in all cases result is written and other operands are read only + registersUsed->writtenNamedReg1 = imlInstruction->op_r_r_r.registerResult; + registersUsed->readNamedReg1 = imlInstruction->op_r_r_r.registerA; + registersUsed->readNamedReg2 = imlInstruction->op_r_r_r.registerB; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK ) + { + // no effect on registers + } + else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP ) + { + // no effect on registers + } + else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO ) + { + if( imlInstruction->operation == PPCREC_IML_MACRO_BL || imlInstruction->operation == PPCREC_IML_MACRO_B_FAR || imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BLRL || imlInstruction->operation == PPCREC_IML_MACRO_BCTR || imlInstruction->operation == PPCREC_IML_MACRO_BCTRL || imlInstruction->operation == PPCREC_IML_MACRO_LEAVE || imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK || imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES || imlInstruction->operation == PPCREC_IML_MACRO_HLE || imlInstruction->operation == PPCREC_IML_MACRO_MFTB ) + { + // no effect on registers + } + else + cemu_assert_unimplemented(); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD) + { + registersUsed->writtenNamedReg1 = imlInstruction->op_storeLoad.registerData; + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem; + } + else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM) + { + registersUsed->readNamedReg1 = imlInstruction->op_mem2mem.src.registerMem; + registersUsed->readNamedReg2 = imlInstruction->op_mem2mem.dst.registerMem; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED ) + { + registersUsed->writtenNamedReg1 = imlInstruction->op_storeLoad.registerData; + if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem; + if( imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem2; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_STORE ) + { + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerData; + if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED ) + { + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerData; + if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem; + if( imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg3 = imlInstruction->op_storeLoad.registerMem2; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CR ) + { + // only affects cr register + } + else if( imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK ) + { + // no effect on registers + } + else if( imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER ) + { + // no op + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME ) + { + // fpr operation + registersUsed->writtenFPR1 = imlInstruction->op_r_name.registerIndex; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R ) + { + // fpr operation + registersUsed->readFPR1 = imlInstruction->op_r_name.registerIndex; + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD ) + { + // fpr load operation + registersUsed->writtenFPR1 = imlInstruction->op_storeLoad.registerData; + // address is in gpr register + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem; + // determine partially written result + switch (imlInstruction->op_storeLoad.mode) + { + case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0: + case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1: + cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER); + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerGQR; + break; + case PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0: + // PS1 remains the same + registersUsed->readFPR4 = imlInstruction->op_storeLoad.registerData; + break; + case PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S16_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U16_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U8_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S8_PS0: + break; + default: + cemu_assert_unimplemented(); + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED ) + { + // fpr load operation + registersUsed->writtenFPR1 = imlInstruction->op_storeLoad.registerData; + // address is in gpr registers + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem; + if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER) + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem2; + // determine partially written result + switch (imlInstruction->op_storeLoad.mode) + { + case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0: + case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1: + cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER); + registersUsed->readNamedReg3 = imlInstruction->op_storeLoad.registerGQR; + break; + case PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0: + // PS1 remains the same + registersUsed->readFPR4 = imlInstruction->op_storeLoad.registerData; + break; + case PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S16_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U16_PS0: + case PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1: + case PPCREC_FPR_LD_MODE_PSQ_U8_PS0: + break; + default: + cemu_assert_unimplemented(); + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE ) + { + // fpr store operation + registersUsed->readFPR1 = imlInstruction->op_storeLoad.registerData; + if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem; + // PSQ generic stores also access GQR + switch (imlInstruction->op_storeLoad.mode) + { + case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0: + case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1: + cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER); + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerGQR; + break; + default: + break; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED ) + { + // fpr store operation + registersUsed->readFPR1 = imlInstruction->op_storeLoad.registerData; + // address is in gpr registers + if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem; + if( imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER ) + registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem2; + // PSQ generic stores also access GQR + switch (imlInstruction->op_storeLoad.mode) + { + case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0: + case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1: + cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER); + registersUsed->readNamedReg3 = imlInstruction->op_storeLoad.registerGQR; + break; + default: + break; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R ) + { + // fpr operation + if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED || + imlInstruction->operation == PPCREC_IML_OP_ASSIGN || + imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_FRES_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_FRSQRTE_PAIR ) + { + // operand read, result written + registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand; + registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r.registerResult; + } + else if( + imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64 || + imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ || + imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT + ) + { + // operand read, result read and (partially) written + registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand; + registersUsed->readFPR4 = imlInstruction->op_fpr_r_r.registerResult; + registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r.registerResult; + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_PAIR || + imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_BOTTOM ) + { + // operand read, result read and written + registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand; + registersUsed->readFPR2 = imlInstruction->op_fpr_r_r.registerResult; + registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r.registerResult; + + } + else if(imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPO_BOTTOM) + { + // operand read, result read + registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand; + registersUsed->readFPR2 = imlInstruction->op_fpr_r_r.registerResult; + } + else + cemu_assert_unimplemented(); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R ) + { + // fpr operation + registersUsed->readFPR1 = imlInstruction->op_fpr_r_r_r.registerOperandA; + registersUsed->readFPR2 = imlInstruction->op_fpr_r_r_r.registerOperandB; + registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r_r.registerResult; + // handle partially written result + switch (imlInstruction->operation) + { + case PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM: + case PPCREC_IML_OP_FPR_ADD_BOTTOM: + case PPCREC_IML_OP_FPR_SUB_BOTTOM: + registersUsed->readFPR4 = imlInstruction->op_fpr_r_r_r.registerResult; + break; + case PPCREC_IML_OP_FPR_SUB_PAIR: + break; + default: + cemu_assert_unimplemented(); + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R ) + { + // fpr operation + registersUsed->readFPR1 = imlInstruction->op_fpr_r_r_r_r.registerOperandA; + registersUsed->readFPR2 = imlInstruction->op_fpr_r_r_r_r.registerOperandB; + registersUsed->readFPR3 = imlInstruction->op_fpr_r_r_r_r.registerOperandC; + registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r_r_r.registerResult; + // handle partially written result + switch (imlInstruction->operation) + { + case PPCREC_IML_OP_FPR_SELECT_BOTTOM: + registersUsed->readFPR4 = imlInstruction->op_fpr_r_r_r_r.registerResult; + break; + case PPCREC_IML_OP_FPR_SUM0: + case PPCREC_IML_OP_FPR_SUM1: + case PPCREC_IML_OP_FPR_SELECT_PAIR: + break; + default: + cemu_assert_unimplemented(); + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R ) + { + // fpr operation + if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64 || + imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR ) + { + registersUsed->readFPR1 = imlInstruction->op_fpr_r.registerResult; + registersUsed->writtenFPR1 = imlInstruction->op_fpr_r.registerResult; + } + else + cemu_assert_unimplemented(); + } + else + { + cemu_assert_unimplemented(); + } +} + +#define replaceRegister(__x,__r,__n) (((__x)==(__r))?(__n):(__x)) + +sint32 replaceRegisterMultiple(sint32 reg, sint32 match[4], sint32 replaced[4]) +{ + for (sint32 i = 0; i < 4; i++) + { + if(match[i] < 0) + continue; + if (reg == match[i]) + { + return replaced[i]; + } + } + return reg; +} + +void PPCRecompiler_replaceGPRRegisterUsageMultiple(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 gprRegisterSearched[4], sint32 gprRegisterReplaced[4]) +{ + if (imlInstruction->type == PPCREC_IML_TYPE_R_NAME) + { + imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_NAME_R) + { + imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_R) + { + imlInstruction->op_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_r_r.registerResult, gprRegisterSearched, gprRegisterReplaced); + imlInstruction->op_r_r.registerA = replaceRegisterMultiple(imlInstruction->op_r_r.registerA, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) + { + imlInstruction->op_r_immS32.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_immS32.registerIndex, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32) + { + imlInstruction->op_conditional_r_s32.registerIndex = replaceRegisterMultiple(imlInstruction->op_conditional_r_s32.registerIndex, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32) + { + // in all cases result is written and other operand is read only + imlInstruction->op_r_r_s32.registerResult = replaceRegisterMultiple(imlInstruction->op_r_r_s32.registerResult, gprRegisterSearched, gprRegisterReplaced); + imlInstruction->op_r_r_s32.registerA = replaceRegisterMultiple(imlInstruction->op_r_r_s32.registerA, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R) + { + // in all cases result is written and other operands are read only + imlInstruction->op_r_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_r_r_r.registerResult, gprRegisterSearched, gprRegisterReplaced); + imlInstruction->op_r_r_r.registerA = replaceRegisterMultiple(imlInstruction->op_r_r_r.registerA, gprRegisterSearched, gprRegisterReplaced); + imlInstruction->op_r_r_r.registerB = replaceRegisterMultiple(imlInstruction->op_r_r_r.registerB, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) + { + // no effect on registers + } + else if (imlInstruction->type == PPCREC_IML_TYPE_NO_OP) + { + // no effect on registers + } + else if (imlInstruction->type == PPCREC_IML_TYPE_MACRO) + { + if (imlInstruction->operation == PPCREC_IML_MACRO_BL || imlInstruction->operation == PPCREC_IML_MACRO_B_FAR || imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BLRL || imlInstruction->operation == PPCREC_IML_MACRO_BCTR || imlInstruction->operation == PPCREC_IML_MACRO_BCTRL || imlInstruction->operation == PPCREC_IML_MACRO_LEAVE || imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK || imlInstruction->operation == PPCREC_IML_MACRO_HLE || imlInstruction->operation == PPCREC_IML_MACRO_MFTB || imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES ) + { + // no effect on registers + } + else + { + cemu_assert_unimplemented(); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced); + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced); + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER) + imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_STORE) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced); + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced); + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER) + imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CR) + { + // only affects cr register + } + else if (imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK) + { + // no effect on registers + } + else if (imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER) + { + // no op + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME) + { + + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R) + { + + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD) + { + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + } + if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED) + { + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + } + if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced); + } + if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE) + { + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + } + if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED) + { + if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced); + } + if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced); + } + if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER) + { + imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R) + { + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R) + { + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R) + { + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R) + { + } + else + { + cemu_assert_unimplemented(); + } +} + +void PPCRecompiler_replaceFPRRegisterUsageMultiple(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 fprRegisterSearched[4], sint32 fprRegisterReplaced[4]) +{ + if (imlInstruction->type == PPCREC_IML_TYPE_R_NAME) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_NAME_R) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_R) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK) + { + // no effect on registers + } + else if (imlInstruction->type == PPCREC_IML_TYPE_NO_OP) + { + // no effect on registers + } + else if (imlInstruction->type == PPCREC_IML_TYPE_MACRO) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_STORE) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CR) + { + // only affects cr register + } + else if (imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK) + { + // no effect on registers + } + else if (imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER) + { + // no op + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME) + { + imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R) + { + imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED) + { + imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R) + { + imlInstruction->op_fpr_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r.registerOperand = replaceRegisterMultiple(imlInstruction->op_fpr_r_r.registerOperand, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R) + { + imlInstruction->op_fpr_r_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r.registerOperandA = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r.registerOperandB = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R) + { + imlInstruction->op_fpr_r_r_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r_r.registerOperandA = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r_r.registerOperandB = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r_r.registerOperandC = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerOperandC, fprRegisterSearched, fprRegisterReplaced); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R) + { + imlInstruction->op_fpr_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + } + else + { + cemu_assert_unimplemented(); + } +} + +void PPCRecompiler_replaceFPRRegisterUsage(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 fprRegisterSearched, sint32 fprRegisterReplaced) +{ + if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_S32 ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_S32 ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_R ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK ) + { + // no effect on registers + } + else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP ) + { + // no effect on registers + } + else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD ) + { + // not affected + } + else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_STORE ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED ) + { + // not affected + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CR ) + { + // only affects cr register + } + else if( imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK ) + { + // no effect on registers + } + else if( imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER ) + { + // no op + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME ) + { + imlInstruction->op_r_name.registerIndex = replaceRegister(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R ) + { + imlInstruction->op_r_name.registerIndex = replaceRegister(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD ) + { + imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED ) + { + imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE ) + { + imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED ) + { + imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R ) + { + imlInstruction->op_fpr_r_r.registerResult = replaceRegister(imlInstruction->op_fpr_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r.registerOperand = replaceRegister(imlInstruction->op_fpr_r_r.registerOperand, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R ) + { + imlInstruction->op_fpr_r_r_r.registerResult = replaceRegister(imlInstruction->op_fpr_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r.registerOperandA = replaceRegister(imlInstruction->op_fpr_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r.registerOperandB = replaceRegister(imlInstruction->op_fpr_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R ) + { + imlInstruction->op_fpr_r_r_r_r.registerResult = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r_r.registerOperandA = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r_r.registerOperandB = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced); + imlInstruction->op_fpr_r_r_r_r.registerOperandC = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerOperandC, fprRegisterSearched, fprRegisterReplaced); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R ) + { + imlInstruction->op_fpr_r.registerResult = replaceRegister(imlInstruction->op_fpr_r.registerResult, fprRegisterSearched, fprRegisterReplaced); + } + else + { + cemu_assert_unimplemented(); + } +} + +typedef struct +{ + struct + { + sint32 instructionIndex; + sint32 registerPreviousName; + sint32 registerNewName; + sint32 index; // new index + sint32 previousIndex; // previous index (always out of range) + bool nameMustBeMaintained; // must be stored before replacement and loaded after replacement ends + }replacedRegisterEntry[PPC_X64_GPR_USABLE_REGISTERS]; + sint32 count; +}replacedRegisterTracker_t; + +bool PPCRecompiler_checkIfGPRRegisterIsAccessed(PPCImlOptimizerUsedRegisters_t* registersUsed, sint32 gprRegister) +{ + if( registersUsed->readNamedReg1 == gprRegister ) + return true; + if( registersUsed->readNamedReg2 == gprRegister ) + return true; + if( registersUsed->readNamedReg3 == gprRegister ) + return true; + if( registersUsed->writtenNamedReg1 == gprRegister ) + return true; + return false; +} + +/* + * Returns index of register to replace + * If no register needs to be replaced, -1 is returned + */ +sint32 PPCRecompiler_getNextRegisterToReplace(PPCImlOptimizerUsedRegisters_t* registersUsed) +{ + // get index of register to replace + sint32 gprToReplace = -1; + if( registersUsed->readNamedReg1 >= PPC_X64_GPR_USABLE_REGISTERS ) + gprToReplace = registersUsed->readNamedReg1; + else if( registersUsed->readNamedReg2 >= PPC_X64_GPR_USABLE_REGISTERS ) + gprToReplace = registersUsed->readNamedReg2; + else if( registersUsed->readNamedReg3 >= PPC_X64_GPR_USABLE_REGISTERS ) + gprToReplace = registersUsed->readNamedReg3; + else if( registersUsed->writtenNamedReg1 >= PPC_X64_GPR_USABLE_REGISTERS ) + gprToReplace = registersUsed->writtenNamedReg1; + // return + return gprToReplace; +} + +bool PPCRecompiler_findAvailableRegisterDepr(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 imlIndexStart, replacedRegisterTracker_t* replacedRegisterTracker, sint32* registerIndex, sint32* registerName, bool* isUsed) +{ + PPCImlOptimizerUsedRegisters_t registersUsed; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+imlIndexStart, ®istersUsed); + // mask all registers used by this instruction + uint32 instructionReservedRegisterMask = 0;//(1<<(PPC_X64_GPR_USABLE_REGISTERS+1))-1; + if( registersUsed.readNamedReg1 != -1 ) + instructionReservedRegisterMask |= (1<<(registersUsed.readNamedReg1)); + if( registersUsed.readNamedReg2 != -1 ) + instructionReservedRegisterMask |= (1<<(registersUsed.readNamedReg2)); + if( registersUsed.readNamedReg3 != -1 ) + instructionReservedRegisterMask |= (1<<(registersUsed.readNamedReg3)); + if( registersUsed.writtenNamedReg1 != -1 ) + instructionReservedRegisterMask |= (1<<(registersUsed.writtenNamedReg1)); + // mask all registers that are reserved for other replacements + uint32 replacementReservedRegisterMask = 0; + for(sint32 i=0; icount; i++) + { + replacementReservedRegisterMask |= (1<replacedRegisterEntry[i].index); + } + + // potential improvement: Scan ahead a few instructions and look for registers that are the least used (or ideally never used) + + // pick available register + const uint32 allRegisterMask = (1<<(PPC_X64_GPR_USABLE_REGISTERS+1))-1; // mask with set bit for every register + uint32 reservedRegisterMask = instructionReservedRegisterMask | replacementReservedRegisterMask; + cemu_assert(instructionReservedRegisterMask != allRegisterMask); // no usable register! (Need to store a register from the replacedRegisterTracker) + sint32 usedRegisterIndex = -1; + for(sint32 i=0; imappedRegister[i] != -1 ) + { + // register is reserved by segment -> In use + *isUsed = true; + *registerName = ppcImlGenContext->mappedRegister[i]; + } + else + { + *isUsed = false; + *registerName = -1; + } + *registerIndex = i; + return true; + } + } + return false; + +} + +bool PPCRecompiler_hasSuffixInstruction(PPCRecImlSegment_t* imlSegment) +{ + if( imlSegment->imlListCount == 0 ) + return false; + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+imlSegment->imlListCount-1; + if( imlInstruction->type == PPCREC_IML_TYPE_MACRO && (imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BCTR) || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_BL || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_B_FAR || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_BLRL || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_BCTRL || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_LEAVE || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_HLE || + imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_MFTB || + imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER || + imlInstruction->type == PPCREC_IML_TYPE_CJUMP || + imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK ) + return true; + return false; +} + +void PPCRecompiler_storeReplacedRegister(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, replacedRegisterTracker_t* replacedRegisterTracker, sint32 registerTrackerIndex, sint32* imlIndex) +{ + // store register + sint32 imlIndexEdit = *imlIndex; + PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndexEdit, 1); + // name_unusedRegister = unusedRegister + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+(imlIndexEdit+0); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_NAME_R; + imlInstructionItr->crRegister = PPC_REC_INVALID_REGISTER; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].index; + imlInstructionItr->op_r_name.name = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].registerNewName; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + imlIndexEdit++; + // load new register if required + if( replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].nameMustBeMaintained ) + { + PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndexEdit, 1); + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+(imlIndexEdit+0); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_R_NAME; + imlInstructionItr->crRegister = PPC_REC_INVALID_REGISTER; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].index; + imlInstructionItr->op_r_name.name = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].registerPreviousName;//ppcImlGenContext->mappedRegister[replacedRegisterTracker.replacedRegisterEntry[i].index]; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + imlIndexEdit += 1; + } + // move last entry to current one + memcpy(replacedRegisterTracker->replacedRegisterEntry+registerTrackerIndex, replacedRegisterTracker->replacedRegisterEntry+replacedRegisterTracker->count-1, sizeof(replacedRegisterTracker->replacedRegisterEntry[0])); + replacedRegisterTracker->count--; + *imlIndex = imlIndexEdit; +} + +bool PPCRecompiler_reduceNumberOfFPRRegisters(ppcImlGenContext_t* ppcImlGenContext) +{ + // only xmm0 to xmm14 may be used, xmm15 is reserved + // this method will reduce the number of fpr registers used + // inefficient algorithm for optimizing away excess registers + // we simply load, use and store excess registers into other unused registers when we need to + // first we remove all name load and store instructions that involve out-of-bounds registers + for(sint32 s=0; ssegmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + sint32 imlIndex = 0; + while( imlIndex < imlSegment->imlListCount ) + { + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+imlIndex; + if( imlInstructionItr->type == PPCREC_IML_TYPE_FPR_R_NAME || imlInstructionItr->type == PPCREC_IML_TYPE_FPR_NAME_R ) + { + if( imlInstructionItr->op_r_name.registerIndex >= PPC_X64_FPR_USABLE_REGISTERS ) + { + // convert to NO-OP instruction + imlInstructionItr->type = PPCREC_IML_TYPE_NO_OP; + imlInstructionItr->associatedPPCAddress = 0; + } + } + imlIndex++; + } + } + // replace registers + for(sint32 s=0; ssegmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + sint32 imlIndex = 0; + while( imlIndex < imlSegment->imlListCount ) + { + PPCImlOptimizerUsedRegisters_t registersUsed; + while( true ) + { + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+imlIndex, ®istersUsed); + if( registersUsed.readFPR1 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.readFPR2 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.readFPR3 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.readFPR4 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.writtenFPR1 >= PPC_X64_FPR_USABLE_REGISTERS ) + { + // get index of register to replace + sint32 fprToReplace = -1; + if( registersUsed.readFPR1 >= PPC_X64_FPR_USABLE_REGISTERS ) + fprToReplace = registersUsed.readFPR1; + else if( registersUsed.readFPR2 >= PPC_X64_FPR_USABLE_REGISTERS ) + fprToReplace = registersUsed.readFPR2; + else if (registersUsed.readFPR3 >= PPC_X64_FPR_USABLE_REGISTERS) + fprToReplace = registersUsed.readFPR3; + else if (registersUsed.readFPR4 >= PPC_X64_FPR_USABLE_REGISTERS) + fprToReplace = registersUsed.readFPR4; + else if( registersUsed.writtenFPR1 >= PPC_X64_FPR_USABLE_REGISTERS ) + fprToReplace = registersUsed.writtenFPR1; + // generate mask of useable registers + uint8 useableRegisterMask = 0x7F; // lowest bit is fpr register 0 + if( registersUsed.readFPR1 != -1 ) + useableRegisterMask &= ~(1<<(registersUsed.readFPR1)); + if( registersUsed.readFPR2 != -1 ) + useableRegisterMask &= ~(1<<(registersUsed.readFPR2)); + if (registersUsed.readFPR3 != -1) + useableRegisterMask &= ~(1 << (registersUsed.readFPR3)); + if (registersUsed.readFPR4 != -1) + useableRegisterMask &= ~(1 << (registersUsed.readFPR4)); + if( registersUsed.writtenFPR1 != -1 ) + useableRegisterMask &= ~(1<<(registersUsed.writtenFPR1)); + // get highest unused register index (0-6 range) + sint32 unusedRegisterIndex = -1; + for(sint32 f=0; fmappedFPRRegister[unusedRegisterIndex]; + bool replacedRegisterIsUsed = true; + if( unusedRegisterName >= PPCREC_NAME_FPR0 && unusedRegisterName < (PPCREC_NAME_FPR0+32) ) + { + replacedRegisterIsUsed = imlSegment->ppcFPRUsed[unusedRegisterName-PPCREC_NAME_FPR0]; + } + // replace registers that are out of range + PPCRecompiler_replaceFPRRegisterUsage(ppcImlGenContext, imlSegment->imlList+imlIndex, fprToReplace, unusedRegisterIndex); + // add load/store name after instruction + PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndex+1, 2); + // add load/store before current instruction + PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndex, 2); + // name_unusedRegister = unusedRegister + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+(imlIndex+0); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + if( replacedRegisterIsUsed ) + { + imlInstructionItr->type = PPCREC_IML_TYPE_FPR_NAME_R; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex; + imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[unusedRegisterIndex]; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + } + else + imlInstructionItr->type = PPCREC_IML_TYPE_NO_OP; + imlInstructionItr = imlSegment->imlList+(imlIndex+1); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_FPR_R_NAME; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex; + imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[fprToReplace]; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + // name_gprToReplace = unusedRegister + imlInstructionItr = imlSegment->imlList+(imlIndex+3); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_FPR_NAME_R; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex; + imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[fprToReplace]; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + // unusedRegister = name_unusedRegister + imlInstructionItr = imlSegment->imlList+(imlIndex+4); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + if( replacedRegisterIsUsed ) + { + imlInstructionItr->type = PPCREC_IML_TYPE_FPR_R_NAME; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex; + imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[unusedRegisterIndex]; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + } + else + imlInstructionItr->type = PPCREC_IML_TYPE_NO_OP; + } + else + break; + } + imlIndex++; + } + } + return true; +} + +typedef struct +{ + bool isActive; + uint32 virtualReg; + sint32 lastUseIndex; +}ppcRecRegisterMapping_t; + +typedef struct +{ + ppcRecRegisterMapping_t currentMapping[PPC_X64_FPR_USABLE_REGISTERS]; + sint32 ppcRegToMapping[64]; + sint32 currentUseIndex; +}ppcRecManageRegisters_t; + +ppcRecRegisterMapping_t* PPCRecompiler_findAvailableRegisterDepr(ppcRecManageRegisters_t* rCtx, PPCImlOptimizerUsedRegisters_t* instructionUsedRegisters) +{ + // find free register + for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++) + { + if (rCtx->currentMapping[i].isActive == false) + { + rCtx->currentMapping[i].isActive = true; + rCtx->currentMapping[i].virtualReg = -1; + rCtx->currentMapping[i].lastUseIndex = rCtx->currentUseIndex; + return rCtx->currentMapping + i; + } + } + // all registers are used + return nullptr; +} + +ppcRecRegisterMapping_t* PPCRecompiler_findUnloadableRegister(ppcRecManageRegisters_t* rCtx, PPCImlOptimizerUsedRegisters_t* instructionUsedRegisters, uint32 unloadLockedMask) +{ + // find unloadable register (with lowest lastUseIndex) + sint32 unloadIndex = -1; + sint32 unloadIndexLastUse = 0x7FFFFFFF; + for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++) + { + if (rCtx->currentMapping[i].isActive == false) + continue; + if( (unloadLockedMask&(1<currentMapping[i].virtualReg; + bool isReserved = false; + for (sint32 f = 0; f < 4; f++) + { + if (virtualReg == (sint32)instructionUsedRegisters->fpr[f]) + { + isReserved = true; + break; + } + } + if (isReserved) + continue; + if (rCtx->currentMapping[i].lastUseIndex < unloadIndexLastUse) + { + unloadIndexLastUse = rCtx->currentMapping[i].lastUseIndex; + unloadIndex = i; + } + } + cemu_assert(unloadIndex != -1); + return rCtx->currentMapping + unloadIndex; +} + +bool PPCRecompiler_manageFPRRegistersForSegment(ppcImlGenContext_t* ppcImlGenContext, sint32 segmentIndex) +{ + ppcRecManageRegisters_t rCtx = { 0 }; + for (sint32 i = 0; i < 64; i++) + rCtx.ppcRegToMapping[i] = -1; + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[segmentIndex]; + sint32 idx = 0; + sint32 currentUseIndex = 0; + PPCImlOptimizerUsedRegisters_t registersUsed; + while (idx < imlSegment->imlListCount) + { + if ( PPCRecompiler_isSuffixInstruction(imlSegment->imlList + idx) ) + break; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList + idx, ®istersUsed); + sint32 fprMatch[4]; + sint32 fprReplace[4]; + fprMatch[0] = -1; + fprMatch[1] = -1; + fprMatch[2] = -1; + fprMatch[3] = -1; + fprReplace[0] = -1; + fprReplace[1] = -1; + fprReplace[2] = -1; + fprReplace[3] = -1; + // generate a mask of registers that we may not free + sint32 numReplacedOperands = 0; + uint32 unloadLockedMask = 0; + for (sint32 f = 0; f < 5; f++) + { + sint32 virtualFpr; + if (f == 0) + virtualFpr = registersUsed.readFPR1; + else if (f == 1) + virtualFpr = registersUsed.readFPR2; + else if (f == 2) + virtualFpr = registersUsed.readFPR3; + else if (f == 3) + virtualFpr = registersUsed.readFPR4; + else if (f == 4) + virtualFpr = registersUsed.writtenFPR1; + if( virtualFpr < 0 ) + continue; + cemu_assert_debug(virtualFpr < 64); + // check if this virtual FPR is already loaded in any real register + ppcRecRegisterMapping_t* regMapping; + if (rCtx.ppcRegToMapping[virtualFpr] == -1) + { + // not loaded + // find available register + while (true) + { + regMapping = PPCRecompiler_findAvailableRegisterDepr(&rCtx, ®istersUsed); + if (regMapping == NULL) + { + // unload least recently used register and try again + ppcRecRegisterMapping_t* unloadRegMapping = PPCRecompiler_findUnloadableRegister(&rCtx, ®istersUsed, unloadLockedMask); + // mark as locked + unloadLockedMask |= (1<<(unloadRegMapping- rCtx.currentMapping)); + // create unload instruction + PPCRecompiler_pushBackIMLInstructions(imlSegment, idx, 1); + PPCRecImlInstruction_t* imlInstructionTemp = imlSegment->imlList + idx; + memset(imlInstructionTemp, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionTemp->type = PPCREC_IML_TYPE_FPR_NAME_R; + imlInstructionTemp->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionTemp->op_r_name.registerIndex = (uint8)(unloadRegMapping - rCtx.currentMapping); + imlInstructionTemp->op_r_name.name = ppcImlGenContext->mappedFPRRegister[unloadRegMapping->virtualReg]; + imlInstructionTemp->op_r_name.copyWidth = 32; + imlInstructionTemp->op_r_name.flags = 0; + idx++; + // update mapping + unloadRegMapping->isActive = false; + rCtx.ppcRegToMapping[unloadRegMapping->virtualReg] = -1; + } + else + break; + } + // create load instruction + PPCRecompiler_pushBackIMLInstructions(imlSegment, idx, 1); + PPCRecImlInstruction_t* imlInstructionTemp = imlSegment->imlList + idx; + memset(imlInstructionTemp, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionTemp->type = PPCREC_IML_TYPE_FPR_R_NAME; + imlInstructionTemp->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionTemp->op_r_name.registerIndex = (uint8)(regMapping-rCtx.currentMapping); + imlInstructionTemp->op_r_name.name = ppcImlGenContext->mappedFPRRegister[virtualFpr]; + imlInstructionTemp->op_r_name.copyWidth = 32; + imlInstructionTemp->op_r_name.flags = 0; + idx++; + // update mapping + regMapping->virtualReg = virtualFpr; + rCtx.ppcRegToMapping[virtualFpr] = (sint32)(regMapping - rCtx.currentMapping); + regMapping->lastUseIndex = rCtx.currentUseIndex; + rCtx.currentUseIndex++; + } + else + { + regMapping = rCtx.currentMapping + rCtx.ppcRegToMapping[virtualFpr]; + regMapping->lastUseIndex = rCtx.currentUseIndex; + rCtx.currentUseIndex++; + } + // replace FPR + bool entryFound = false; + for (sint32 t = 0; t < numReplacedOperands; t++) + { + if (fprMatch[t] == virtualFpr) + { + cemu_assert_debug(fprReplace[t] == (regMapping - rCtx.currentMapping)); + entryFound = true; + break; + } + } + if (entryFound == false) + { + cemu_assert_debug(numReplacedOperands != 4); + fprMatch[numReplacedOperands] = virtualFpr; + fprReplace[numReplacedOperands] = (sint32)(regMapping - rCtx.currentMapping); + numReplacedOperands++; + } + } + if (numReplacedOperands > 0) + { + PPCRecompiler_replaceFPRRegisterUsageMultiple(ppcImlGenContext, imlSegment->imlList + idx, fprMatch, fprReplace); + } + // next + idx++; + } + // count loaded registers + sint32 numLoadedRegisters = 0; + for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++) + { + if (rCtx.currentMapping[i].isActive) + numLoadedRegisters++; + } + // store all loaded registers + if (numLoadedRegisters > 0) + { + PPCRecompiler_pushBackIMLInstructions(imlSegment, idx, numLoadedRegisters); + for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++) + { + if (rCtx.currentMapping[i].isActive == false) + continue; + PPCRecImlInstruction_t* imlInstructionTemp = imlSegment->imlList + idx; + memset(imlInstructionTemp, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionTemp->type = PPCREC_IML_TYPE_FPR_NAME_R; + imlInstructionTemp->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionTemp->op_r_name.registerIndex = i; + imlInstructionTemp->op_r_name.name = ppcImlGenContext->mappedFPRRegister[rCtx.currentMapping[i].virtualReg]; + imlInstructionTemp->op_r_name.copyWidth = 32; + imlInstructionTemp->op_r_name.flags = 0; + idx++; + } + } + return true; +} + +bool PPCRecompiler_manageFPRRegisters(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + if (PPCRecompiler_manageFPRRegistersForSegment(ppcImlGenContext, s) == false) + return false; + } + return true; +} + + +/* + * Returns true if the loaded value is guaranteed to be overwritten + */ +bool PPCRecompiler_trackRedundantNameLoadInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth) +{ + sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex; + for(sint32 i=startIndex; iimlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + //nameStoreInstruction->op_r_name.registerIndex + PPCImlOptimizerUsedRegisters_t registersUsed; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed); + if( registersUsed.readNamedReg1 == registerIndex || registersUsed.readNamedReg2 == registerIndex || registersUsed.readNamedReg3 == registerIndex ) + return false; + if( registersUsed.writtenNamedReg1 == registerIndex ) + return true; + } + // todo: Scan next segment(s) + return false; +} + +/* + * Returns true if the loaded value is guaranteed to be overwritten + */ +bool PPCRecompiler_trackRedundantFPRNameLoadInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth) +{ + sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex; + for(sint32 i=startIndex; iimlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + PPCImlOptimizerUsedRegisters_t registersUsed; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed); + if( registersUsed.readFPR1 == registerIndex || registersUsed.readFPR2 == registerIndex || registersUsed.readFPR3 == registerIndex || registersUsed.readFPR4 == registerIndex) + return false; + if( registersUsed.writtenFPR1 == registerIndex ) + return true; + } + // todo: Scan next segment(s) + return false; +} + +/* + * Returns true if the loaded name is never changed + */ +bool PPCRecompiler_trackRedundantNameStoreInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth) +{ + sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex; + for(sint32 i=startIndex; i>=0; i--) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + PPCImlOptimizerUsedRegisters_t registersUsed; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed); + if( registersUsed.writtenNamedReg1 == registerIndex ) + { + if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_NAME ) + return true; + return false; + } + } + return false; +} + +sint32 debugCallCounter1 = 0; + +/* + * Returns true if the name is overwritten in the current or any following segments + */ +bool PPCRecompiler_trackOverwrittenNameStoreInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth) +{ + //sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex; + uint32 name = nameStoreInstruction->op_r_name.name; + for(sint32 i=startIndex; iimlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_NAME ) + { + // name is loaded before being written + if( imlSegment->imlList[i].op_r_name.name == name ) + return false; + } + else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_NAME_R ) + { + // name is written before being loaded + if( imlSegment->imlList[i].op_r_name.name == name ) + return true; + } + } + if( scanDepth >= 2 ) + return false; + if( imlSegment->nextSegmentIsUncertain ) + return false; + if( imlSegment->nextSegmentBranchTaken && PPCRecompiler_trackOverwrittenNameStoreInstruction(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, 0, nameStoreInstruction, scanDepth+1) == false ) + return false; + if( imlSegment->nextSegmentBranchNotTaken && PPCRecompiler_trackOverwrittenNameStoreInstruction(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, nameStoreInstruction, scanDepth+1) == false ) + return false; + if( imlSegment->nextSegmentBranchTaken == NULL && imlSegment->nextSegmentBranchNotTaken == NULL ) + return false; + + return true; +} + +/* + * Returns true if the loaded FPR name is never changed + */ +bool PPCRecompiler_trackRedundantFPRNameStoreInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth) +{ + sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex; + for(sint32 i=startIndex; i>=0; i--) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + PPCImlOptimizerUsedRegisters_t registersUsed; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed); + if( registersUsed.writtenFPR1 == registerIndex ) + { + if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_R_NAME ) + return true; + return false; + } + } + // todo: Scan next segment(s) + return false; +} + +uint32 _PPCRecompiler_getCROverwriteMask(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, uint32 currentOverwriteMask, uint32 currentReadMask, uint32 scanDepth) +{ + // is any bit overwritten but not read? + uint32 overwriteMask = imlSegment->crBitsWritten&~imlSegment->crBitsInput; + currentOverwriteMask |= overwriteMask; + // next segment + if( imlSegment->nextSegmentIsUncertain == false && scanDepth < 3 ) + { + uint32 nextSegmentOverwriteMask = 0; + if( imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchNotTaken ) + { + uint32 mask0 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, 0, 0, scanDepth+1); + uint32 mask1 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, scanDepth+1); + nextSegmentOverwriteMask = mask0&mask1; + } + else if( imlSegment->nextSegmentBranchNotTaken) + { + nextSegmentOverwriteMask = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, scanDepth+1); + } + nextSegmentOverwriteMask &= ~imlSegment->crBitsRead; + currentOverwriteMask |= nextSegmentOverwriteMask; + } + else if (imlSegment->nextSegmentIsUncertain) + { + if (ppcImlGenContext->segmentListCount >= 5) + { + return 7; // for more complex functions we assume that CR is not passed on + } + } + return currentOverwriteMask; +} + +/* + * Returns a mask of all CR bits that are overwritten (written but not read) in the segment and all it's following segments + * If the write state of a CR bit cannot be determined, it is returned as 0 (not overwritten) + */ +uint32 PPCRecompiler_getCROverwriteMask(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + if (imlSegment->nextSegmentIsUncertain) + { + return 0; + } + if( imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchNotTaken ) + { + uint32 mask0 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, 0, 0, 0); + uint32 mask1 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, 0); + return mask0&mask1; // only return bits that are overwritten in both branches + } + else if( imlSegment->nextSegmentBranchNotTaken ) + { + uint32 mask = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, 0); + return mask; + } + else + { + // not implemented + } + return 0; +} + +void PPCRecompiler_removeRedundantCRUpdates(ppcImlGenContext_t* ppcImlGenContext) +{ + for(sint32 s=0; ssegmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + + for(sint32 i=0; iimlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP) + { + if (imlInstruction->op_conditionalJump.condition != PPCREC_JUMP_CONDITION_NONE) + { + uint32 crBitFlag = 1 << (imlInstruction->op_conditionalJump.crRegisterIndex * 4 + imlInstruction->op_conditionalJump.crBitIndex); + imlSegment->crBitsInput |= (crBitFlag&~imlSegment->crBitsWritten); // flag bits that have not already been written + imlSegment->crBitsRead |= (crBitFlag); + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32) + { + uint32 crBitFlag = 1 << (imlInstruction->op_conditional_r_s32.crRegisterIndex * 4 + imlInstruction->op_conditional_r_s32.crBitIndex); + imlSegment->crBitsInput |= (crBitFlag&~imlSegment->crBitsWritten); // flag bits that have not already been written + imlSegment->crBitsRead |= (crBitFlag); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MFCR) + { + imlSegment->crBitsRead |= 0xFFFFFFFF; + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MTCRF) + { + imlSegment->crBitsWritten |= ppc_MTCRFMaskToCRBitMask((uint32)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CR ) + { + if (imlInstruction->operation == PPCREC_IML_OP_CR_CLEAR || + imlInstruction->operation == PPCREC_IML_OP_CR_SET) + { + uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD); + imlSegment->crBitsWritten |= (crBitFlag & ~imlSegment->crBitsWritten); + } + else if (imlInstruction->operation == PPCREC_IML_OP_CR_OR || + imlInstruction->operation == PPCREC_IML_OP_CR_ORC || + imlInstruction->operation == PPCREC_IML_OP_CR_AND || + imlInstruction->operation == PPCREC_IML_OP_CR_ANDC) + { + uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD); + imlSegment->crBitsWritten |= (crBitFlag & ~imlSegment->crBitsWritten); + crBitFlag = 1 << (imlInstruction->op_cr.crA); + imlSegment->crBitsRead |= (crBitFlag & ~imlSegment->crBitsRead); + crBitFlag = 1 << (imlInstruction->op_cr.crB); + imlSegment->crBitsRead |= (crBitFlag & ~imlSegment->crBitsRead); + } + else + cemu_assert_unimplemented(); + } + else if( PPCRecompilerImlAnalyzer_canTypeWriteCR(imlInstruction) && imlInstruction->crRegister >= 0 && imlInstruction->crRegister <= 7 ) + { + imlSegment->crBitsWritten |= (0xF<<(imlInstruction->crRegister*4)); + } + else if( (imlInstruction->type == PPCREC_IML_TYPE_STORE || imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) && imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STWCX_MARKER ) + { + // overwrites CR0 + imlSegment->crBitsWritten |= (0xF<<0); + } + } + } + // flag instructions that write to CR where we can ignore individual CR bits + for(sint32 s=0; ssegmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + for(sint32 i=0; iimlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + if( PPCRecompilerImlAnalyzer_canTypeWriteCR(imlInstruction) && imlInstruction->crRegister >= 0 && imlInstruction->crRegister <= 7 ) + { + uint32 crBitFlags = 0xF<<((uint32)imlInstruction->crRegister*4); + uint32 crOverwriteMask = PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment); + uint32 crIgnoreMask = crOverwriteMask & ~imlSegment->crBitsRead; + imlInstruction->crIgnoreMask = crIgnoreMask; + } + } + } +} + +bool PPCRecompiler_checkIfGPRIsModifiedInRange(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 endIndex, sint32 vreg) +{ + PPCImlOptimizerUsedRegisters_t registersUsed; + for (sint32 i = startIndex; i <= endIndex; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlInstruction, ®istersUsed); + if (registersUsed.writtenNamedReg1 == vreg) + return true; + } + return false; +} + +sint32 PPCRecompiler_scanBackwardsForReusableRegister(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* startSegment, sint32 startIndex, sint32 name) +{ + // current segment + sint32 currentIndex = startIndex; + PPCRecImlSegment_t* currentSegment = startSegment; + sint32 segmentIterateCount = 0; + sint32 foundRegister = -1; + while (true) + { + // stop scanning if segment is enterable + if (currentSegment->isEnterable) + return -1; + while (currentIndex >= 0) + { + if (currentSegment->imlList[currentIndex].type == PPCREC_IML_TYPE_NAME_R && currentSegment->imlList[currentIndex].op_r_name.name == name) + { + foundRegister = currentSegment->imlList[currentIndex].op_r_name.registerIndex; + break; + } + // previous instruction + currentIndex--; + } + if (foundRegister >= 0) + break; + // continue at previous segment (if there is only one) + if (segmentIterateCount >= 1) + return -1; + if (currentSegment->list_prevSegments.size() != 1) + return -1; + currentSegment = currentSegment->list_prevSegments[0]; + currentIndex = currentSegment->imlListCount - 1; + segmentIterateCount++; + } + // scan again to make sure the register is not modified inbetween + currentIndex = startIndex; + currentSegment = startSegment; + segmentIterateCount = 0; + PPCImlOptimizerUsedRegisters_t registersUsed; + while (true) + { + while (currentIndex >= 0) + { + // check if register is modified + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, currentSegment->imlList+currentIndex, ®istersUsed); + if (registersUsed.writtenNamedReg1 == foundRegister) + return -1; + // check if end of scan reached + if (currentSegment->imlList[currentIndex].type == PPCREC_IML_TYPE_NAME_R && currentSegment->imlList[currentIndex].op_r_name.name == name) + { + //foundRegister = currentSegment->imlList[currentIndex].op_r_name.registerIndex; + return foundRegister; + } + // previous instruction + currentIndex--; + } + // continue at previous segment (if there is only one) + if (segmentIterateCount >= 1) + return -1; + if (currentSegment->list_prevSegments.size() != 1) + return -1; + currentSegment = currentSegment->list_prevSegments[0]; + currentIndex = currentSegment->imlListCount - 1; + segmentIterateCount++; + } + return -1; +} + +void PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 imlIndexLoad, sint32 fprIndex) +{ + PPCRecImlInstruction_t* imlInstructionLoad = imlSegment->imlList + imlIndexLoad; + if (imlInstructionLoad->op_storeLoad.flags2.notExpanded) + return; + + PPCImlOptimizerUsedRegisters_t registersUsed; + sint32 scanRangeEnd = std::min(imlIndexLoad + 25, imlSegment->imlListCount); // don't scan too far (saves performance and also the chances we can merge the load+store become low at high distances) + bool foundMatch = false; + sint32 lastStore = -1; + for (sint32 i = imlIndexLoad + 1; i < scanRangeEnd; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + if (PPCRecompiler_isSuffixInstruction(imlInstruction)) + { + break; + } + + // check if FPR is stored + if ((imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE && imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0) || + (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED && imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0)) + { + if (imlInstruction->op_storeLoad.registerData == fprIndex) + { + if (foundMatch == false) + { + // flag the load-single instruction as "don't expand" (leave single value as-is) + imlInstructionLoad->op_storeLoad.flags2.notExpanded = true; + } + // also set the flag for the store instruction + PPCRecImlInstruction_t* imlInstructionStore = imlInstruction; + imlInstructionStore->op_storeLoad.flags2.notExpanded = true; + + foundMatch = true; + lastStore = i + 1; + + continue; + } + } + + // check if FPR is overwritten (we can actually ignore read operations?) + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlInstruction, ®istersUsed); + if (registersUsed.writtenFPR1 == fprIndex) + break; + if (registersUsed.readFPR1 == fprIndex) + break; + if (registersUsed.readFPR2 == fprIndex) + break; + if (registersUsed.readFPR3 == fprIndex) + break; + if (registersUsed.readFPR4 == fprIndex) + break; + } + + if (foundMatch) + { + // insert expand instruction after store + PPCRecImlInstruction_t* newExpand = PPCRecompiler_insertInstruction(imlSegment, lastStore); + PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, newExpand, PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64, fprIndex); + } +} + +/* +* Scans for patterns: +* +* +* +* For these patterns the store and load is modified to work with un-extended values (float remains as float, no double conversion) +* The float->double extension is then executed later +* Advantages: +* Keeps denormals and other special float values intact +* Slightly improves performance +*/ +void PPCRecompiler_optimizeDirectFloatCopies(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + + for (sint32 i = 0; i < imlSegment->imlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD && imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1) + { + PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext, imlSegment, i, imlInstruction->op_storeLoad.registerData); + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED && imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1) + { + PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext, imlSegment, i, imlInstruction->op_storeLoad.registerData); + } + } + } +} + +void PPCRecompiler_optimizeDirectIntegerCopiesScanForward(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 imlIndexLoad, sint32 gprIndex) +{ + PPCRecImlInstruction_t* imlInstructionLoad = imlSegment->imlList + imlIndexLoad; + if ( imlInstructionLoad->op_storeLoad.flags2.swapEndian == false ) + return; + bool foundMatch = false; + PPCImlOptimizerUsedRegisters_t registersUsed; + sint32 scanRangeEnd = std::min(imlIndexLoad + 25, imlSegment->imlListCount); // don't scan too far (saves performance and also the chances we can merge the load+store become low at high distances) + sint32 i = imlIndexLoad + 1; + for (; i < scanRangeEnd; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + if (PPCRecompiler_isSuffixInstruction(imlInstruction)) + { + break; + } + // check if GPR is stored + if ((imlInstruction->type == PPCREC_IML_TYPE_STORE && imlInstruction->op_storeLoad.copyWidth == 32 ) ) + { + if (imlInstruction->op_storeLoad.registerMem == gprIndex) + break; + if (imlInstruction->op_storeLoad.registerData == gprIndex) + { + PPCRecImlInstruction_t* imlInstructionStore = imlInstruction; + if (foundMatch == false) + { + // switch the endian swap flag for the load instruction + imlInstructionLoad->op_storeLoad.flags2.swapEndian = !imlInstructionLoad->op_storeLoad.flags2.swapEndian; + foundMatch = true; + } + // switch the endian swap flag for the store instruction + imlInstructionStore->op_storeLoad.flags2.swapEndian = !imlInstructionStore->op_storeLoad.flags2.swapEndian; + // keep scanning + continue; + } + } + // check if GPR is accessed + PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlInstruction, ®istersUsed); + if (registersUsed.readNamedReg1 == gprIndex || + registersUsed.readNamedReg2 == gprIndex || + registersUsed.readNamedReg3 == gprIndex) + { + break; + } + if (registersUsed.writtenNamedReg1 == gprIndex) + return; // GPR overwritten, we don't need to byte swap anymore + } + if (foundMatch) + { + // insert expand instruction + PPCRecImlInstruction_t* newExpand = PPCRecompiler_insertInstruction(imlSegment, i); + PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, newExpand, PPCREC_IML_OP_ENDIAN_SWAP, gprIndex, gprIndex); + } +} + +/* +* Scans for patterns: +* +* +* +* For these patterns the store and load is modified to work with non-swapped values +* The big_endian->little_endian conversion is then executed later +* Advantages: +* Slightly improves performance +*/ +void PPCRecompiler_optimizeDirectIntegerCopies(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + + for (sint32 i = 0; i < imlSegment->imlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + if (imlInstruction->type == PPCREC_IML_TYPE_LOAD && imlInstruction->op_storeLoad.copyWidth == 32 && imlInstruction->op_storeLoad.flags2.swapEndian ) + { + PPCRecompiler_optimizeDirectIntegerCopiesScanForward(ppcImlGenContext, imlSegment, i, imlInstruction->op_storeLoad.registerData); + } + } + } +} + +sint32 _getGQRIndexFromRegister(ppcImlGenContext_t* ppcImlGenContext, sint32 registerIndex) +{ + if (registerIndex == PPC_REC_INVALID_REGISTER) + return -1; + sint32 namedReg = ppcImlGenContext->mappedRegister[registerIndex]; + if (namedReg >= (PPCREC_NAME_SPR0 + SPR_UGQR0) && namedReg <= (PPCREC_NAME_SPR0 + SPR_UGQR7)) + { + return namedReg - (PPCREC_NAME_SPR0 + SPR_UGQR0); + } + return -1; +} + +bool PPCRecompiler_isUGQRValueKnown(ppcImlGenContext_t* ppcImlGenContext, sint32 gqrIndex, uint32& gqrValue) +{ + // UGQR 2 to 7 are initialized by the OS and we assume that games won't ever permanently touch those + // todo - hack - replace with more accurate solution + if (gqrIndex == 2) + gqrValue = 0x00040004; + else if (gqrIndex == 3) + gqrValue = 0x00050005; + else if (gqrIndex == 4) + gqrValue = 0x00060006; + else if (gqrIndex == 5) + gqrValue = 0x00070007; + else + return false; + return true; +} + +/* + * If value of GQR can be predicted for a given PSQ load or store instruction then replace it with an optimized version + */ +void PPCRecompiler_optimizePSQLoadAndStore(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + for (sint32 i = 0; i < imlSegment->imlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD || imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED) + { + if(imlInstruction->op_storeLoad.mode != PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0 && + imlInstruction->op_storeLoad.mode != PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 ) + continue; + // get GQR value + cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER); + sint32 gqrIndex = _getGQRIndexFromRegister(ppcImlGenContext, imlInstruction->op_storeLoad.registerGQR); + cemu_assert(gqrIndex >= 0); + if (ppcImlGenContext->tracking.modifiesGQR[gqrIndex]) + continue; + //uint32 gqrValue = ppcInterpreterCurrentInstance->sprNew.UGQR[gqrIndex]; + uint32 gqrValue; + if (!PPCRecompiler_isUGQRValueKnown(ppcImlGenContext, gqrIndex, gqrValue)) + continue; + + uint32 formatType = (gqrValue >> 16) & 7; + uint32 scale = (gqrValue >> 24) & 0x3F; + if (scale != 0) + continue; // only generic handler supports scale + if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0) + { + if (formatType == 0) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0; + else if (formatType == 4) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U8_PS0; + else if (formatType == 5) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U16_PS0; + else if (formatType == 6) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S8_PS0; + else if (formatType == 7) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S16_PS0; + } + else if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1) + { + if (formatType == 0) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1; + else if (formatType == 4) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1; + else if (formatType == 5) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1; + else if (formatType == 6) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1; + else if (formatType == 7) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1; + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE || imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED) + { + if(imlInstruction->op_storeLoad.mode != PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0 && + imlInstruction->op_storeLoad.mode != PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1) + continue; + // get GQR value + cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER); + sint32 gqrIndex = _getGQRIndexFromRegister(ppcImlGenContext, imlInstruction->op_storeLoad.registerGQR); + cemu_assert(gqrIndex >= 0); + if (ppcImlGenContext->tracking.modifiesGQR[gqrIndex]) + continue; + uint32 gqrValue; + if(!PPCRecompiler_isUGQRValueKnown(ppcImlGenContext, gqrIndex, gqrValue)) + continue; + uint32 formatType = (gqrValue >> 16) & 7; + uint32 scale = (gqrValue >> 24) & 0x3F; + if (scale != 0) + continue; // only generic handler supports scale + if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0) + { + if (formatType == 0) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0; + else if (formatType == 4) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U8_PS0; + else if (formatType == 5) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U16_PS0; + else if (formatType == 6) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S8_PS0; + else if (formatType == 7) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S16_PS0; + } + else if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1) + { + if (formatType == 0) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1; + else if (formatType == 4) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1; + else if (formatType == 5) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1; + else if (formatType == 6) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1; + else if (formatType == 7) + imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1; + } + } + } + } +} + +/* + * Returns true if registerWrite overwrites any of the registers read by registerRead + */ +bool PPCRecompilerAnalyzer_checkForGPROverwrite(PPCImlOptimizerUsedRegisters_t* registerRead, PPCImlOptimizerUsedRegisters_t* registerWrite) +{ + if (registerWrite->writtenNamedReg1 < 0) + return false; + + if (registerWrite->writtenNamedReg1 == registerRead->readNamedReg1) + return true; + if (registerWrite->writtenNamedReg1 == registerRead->readNamedReg2) + return true; + if (registerWrite->writtenNamedReg1 == registerRead->readNamedReg3) + return true; + return false; +} + +void _reorderConditionModifyInstructions(PPCRecImlSegment_t* imlSegment) +{ + PPCRecImlInstruction_t* lastInstruction = PPCRecompilerIML_getLastInstruction(imlSegment); + // last instruction a conditional branch? + if (lastInstruction == nullptr || lastInstruction->type != PPCREC_IML_TYPE_CJUMP) + return; + if (lastInstruction->op_conditionalJump.crRegisterIndex >= 8) + return; + // get CR bitmask of bit required for conditional jump + PPCRecCRTracking_t crTracking; + PPCRecompilerImlAnalyzer_getCRTracking(lastInstruction, &crTracking); + uint32 requiredCRBits = crTracking.readCRBits; + + // scan backwards until we find the instruction that sets the CR + sint32 crSetterInstructionIndex = -1; + sint32 unsafeInstructionIndex = -1; + for (sint32 i = imlSegment->imlListCount-2; i >= 0; i--) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i; + PPCRecompilerImlAnalyzer_getCRTracking(imlInstruction, &crTracking); + if (crTracking.readCRBits != 0) + return; // dont handle complex cases for now + if (crTracking.writtenCRBits != 0) + { + if ((crTracking.writtenCRBits&requiredCRBits) != 0) + { + crSetterInstructionIndex = i; + break; + } + else + { + return; // other CR bits overwritten (dont handle complex cases) + } + } + // is safe? (no risk of overwriting x64 eflags) + if ((imlInstruction->type == PPCREC_IML_TYPE_NAME_R || imlInstruction->type == PPCREC_IML_TYPE_R_NAME || imlInstruction->type == PPCREC_IML_TYPE_NO_OP) || + (imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R || imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME) || + (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)) || + (imlInstruction->type == PPCREC_IML_TYPE_R_R && (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)) ) + continue; + // not safe + //hasUnsafeInstructions = true; + if (unsafeInstructionIndex == -1) + unsafeInstructionIndex = i; + } + if (crSetterInstructionIndex < 0) + return; + if (unsafeInstructionIndex < 0) + return; // no danger of overwriting eflags, don't reorder + // check if we can move the CR setter instruction to after unsafeInstructionIndex + PPCRecCRTracking_t crTrackingSetter = crTracking; + PPCImlOptimizerUsedRegisters_t regTrackingCRSetter; + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList+crSetterInstructionIndex, ®TrackingCRSetter); + if (regTrackingCRSetter.writtenFPR1 >= 0 || regTrackingCRSetter.readFPR1 >= 0 || regTrackingCRSetter.readFPR2 >= 0 || regTrackingCRSetter.readFPR3 >= 0 || regTrackingCRSetter.readFPR4 >= 0) + return; // we don't handle FPR dependency yet so just ignore FPR instructions + PPCImlOptimizerUsedRegisters_t registerTracking; + if (regTrackingCRSetter.writtenNamedReg1 >= 0) + { + // CR setter does write GPR + for (sint32 i = crSetterInstructionIndex + 1; i <= unsafeInstructionIndex; i++) + { + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + i, ®isterTracking); + // reads register written by CR setter? + if (PPCRecompilerAnalyzer_checkForGPROverwrite(®isterTracking, ®TrackingCRSetter)) + { + return; // cant move CR setter because of dependency + } + // writes register read by CR setter? + if (PPCRecompilerAnalyzer_checkForGPROverwrite(®TrackingCRSetter, ®isterTracking)) + { + return; // cant move CR setter because of dependency + } + // overwrites register written by CR setter? + if (regTrackingCRSetter.writtenNamedReg1 == registerTracking.writtenNamedReg1) + return; + } + } + else + { + // CR setter does not write GPR + for (sint32 i = crSetterInstructionIndex + 1; i <= unsafeInstructionIndex; i++) + { + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + i, ®isterTracking); + // writes register read by CR setter? + if (PPCRecompilerAnalyzer_checkForGPROverwrite(®TrackingCRSetter, ®isterTracking)) + { + return; // cant move CR setter because of dependency + } + } + } + + // move CR setter instruction +#ifdef CEMU_DEBUG_ASSERT + if ((unsafeInstructionIndex + 1) <= crSetterInstructionIndex) + assert_dbg(); +#endif + PPCRecImlInstruction_t* newCRSetterInstruction = PPCRecompiler_insertInstruction(imlSegment, unsafeInstructionIndex+1); + memcpy(newCRSetterInstruction, imlSegment->imlList + crSetterInstructionIndex, sizeof(PPCRecImlInstruction_t)); + PPCRecompilerImlGen_generateNewInstruction_noOp(NULL, imlSegment->imlList + crSetterInstructionIndex); +} + +/* + * Move instructions which update the condition flags closer to the instruction that consumes them + * On x64 this improves performance since we often can avoid storing CR in memory + */ +void PPCRecompiler_reorderConditionModifyInstructions(ppcImlGenContext_t* ppcImlGenContext) +{ + // check if this segment has a conditional branch + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + _reorderConditionModifyInstructions(imlSegment); + } +} diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.cpp new file mode 100644 index 00000000..d31c02d4 --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.cpp @@ -0,0 +1,399 @@ +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" +#include "PPCRecompilerImlRanges.h" +#include "util/helpers/MemoryPool.h" + +void PPCRecRARange_addLink_perVirtualGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange) +{ +#ifdef CEMU_DEBUG_ASSERT + if ((*root) && (*root)->range->virtualRegister != subrange->range->virtualRegister) + assert_dbg(); +#endif + subrange->link_sameVirtualRegisterGPR.next = *root; + if (*root) + (*root)->link_sameVirtualRegisterGPR.prev = subrange; + subrange->link_sameVirtualRegisterGPR.prev = nullptr; + *root = subrange; +} + +void PPCRecRARange_addLink_allSubrangesGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange) +{ + subrange->link_segmentSubrangesGPR.next = *root; + if (*root) + (*root)->link_segmentSubrangesGPR.prev = subrange; + subrange->link_segmentSubrangesGPR.prev = nullptr; + *root = subrange; +} + +void PPCRecRARange_removeLink_perVirtualGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange) +{ + raLivenessSubrange_t* tempPrev = subrange->link_sameVirtualRegisterGPR.prev; + if (subrange->link_sameVirtualRegisterGPR.prev) + subrange->link_sameVirtualRegisterGPR.prev->link_sameVirtualRegisterGPR.next = subrange->link_sameVirtualRegisterGPR.next; + else + (*root) = subrange->link_sameVirtualRegisterGPR.next; + if (subrange->link_sameVirtualRegisterGPR.next) + subrange->link_sameVirtualRegisterGPR.next->link_sameVirtualRegisterGPR.prev = tempPrev; +#ifdef CEMU_DEBUG_ASSERT + subrange->link_sameVirtualRegisterGPR.prev = (raLivenessSubrange_t*)1; + subrange->link_sameVirtualRegisterGPR.next = (raLivenessSubrange_t*)1; +#endif +} + +void PPCRecRARange_removeLink_allSubrangesGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange) +{ + raLivenessSubrange_t* tempPrev = subrange->link_segmentSubrangesGPR.prev; + if (subrange->link_segmentSubrangesGPR.prev) + subrange->link_segmentSubrangesGPR.prev->link_segmentSubrangesGPR.next = subrange->link_segmentSubrangesGPR.next; + else + (*root) = subrange->link_segmentSubrangesGPR.next; + if (subrange->link_segmentSubrangesGPR.next) + subrange->link_segmentSubrangesGPR.next->link_segmentSubrangesGPR.prev = tempPrev; +#ifdef CEMU_DEBUG_ASSERT + subrange->link_segmentSubrangesGPR.prev = (raLivenessSubrange_t*)1; + subrange->link_segmentSubrangesGPR.next = (raLivenessSubrange_t*)1; +#endif +} + +MemoryPoolPermanentObjects memPool_livenessRange(4096); +MemoryPoolPermanentObjects memPool_livenessSubrange(4096); + +raLivenessRange_t* PPCRecRA_createRangeBase(ppcImlGenContext_t* ppcImlGenContext, uint32 virtualRegister, uint32 name) +{ + raLivenessRange_t* livenessRange = memPool_livenessRange.acquireObj(); + livenessRange->list_subranges.resize(0); + livenessRange->virtualRegister = virtualRegister; + livenessRange->name = name; + livenessRange->physicalRegister = -1; + ppcImlGenContext->raInfo.list_ranges.push_back(livenessRange); + return livenessRange; +} + +raLivenessSubrange_t* PPCRecRA_createSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range, PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 endIndex) +{ + raLivenessSubrange_t* livenessSubrange = memPool_livenessSubrange.acquireObj(); + livenessSubrange->list_locations.resize(0); + livenessSubrange->range = range; + livenessSubrange->imlSegment = imlSegment; + PPCRecompilerIml_setSegmentPoint(&livenessSubrange->start, imlSegment, startIndex); + PPCRecompilerIml_setSegmentPoint(&livenessSubrange->end, imlSegment, endIndex); + // default values + livenessSubrange->hasStore = false; + livenessSubrange->hasStoreDelayed = false; + livenessSubrange->lastIterationIndex = 0; + livenessSubrange->subrangeBranchNotTaken = nullptr; + livenessSubrange->subrangeBranchTaken = nullptr; + livenessSubrange->_noLoad = false; + // add to range + range->list_subranges.push_back(livenessSubrange); + // add to segment + PPCRecRARange_addLink_perVirtualGPR(&(imlSegment->raInfo.linkedList_perVirtualGPR[range->virtualRegister]), livenessSubrange); + PPCRecRARange_addLink_allSubrangesGPR(&imlSegment->raInfo.linkedList_allSubranges, livenessSubrange); + return livenessSubrange; +} + +void _unlinkSubrange(raLivenessSubrange_t* subrange) +{ + PPCRecImlSegment_t* imlSegment = subrange->imlSegment; + PPCRecRARange_removeLink_perVirtualGPR(&imlSegment->raInfo.linkedList_perVirtualGPR[subrange->range->virtualRegister], subrange); + PPCRecRARange_removeLink_allSubrangesGPR(&imlSegment->raInfo.linkedList_allSubranges, subrange); +} + +void PPCRecRA_deleteSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange) +{ + _unlinkSubrange(subrange); + subrange->range->list_subranges.erase(std::find(subrange->range->list_subranges.begin(), subrange->range->list_subranges.end(), subrange)); + subrange->list_locations.clear(); + PPCRecompilerIml_removeSegmentPoint(&subrange->start); + PPCRecompilerIml_removeSegmentPoint(&subrange->end); + memPool_livenessSubrange.releaseObj(subrange); +} + +void _PPCRecRA_deleteSubrangeNoUnlinkFromRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange) +{ + _unlinkSubrange(subrange); + PPCRecompilerIml_removeSegmentPoint(&subrange->start); + PPCRecompilerIml_removeSegmentPoint(&subrange->end); + memPool_livenessSubrange.releaseObj(subrange); +} + +void PPCRecRA_deleteRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range) +{ + for (auto& subrange : range->list_subranges) + { + _PPCRecRA_deleteSubrangeNoUnlinkFromRange(ppcImlGenContext, subrange); + } + ppcImlGenContext->raInfo.list_ranges.erase(std::find(ppcImlGenContext->raInfo.list_ranges.begin(), ppcImlGenContext->raInfo.list_ranges.end(), range)); + memPool_livenessRange.releaseObj(range); +} + +void PPCRecRA_deleteRangeNoUnlink(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range) +{ + for (auto& subrange : range->list_subranges) + { + _PPCRecRA_deleteSubrangeNoUnlinkFromRange(ppcImlGenContext, subrange); + } + memPool_livenessRange.releaseObj(range); +} + +void PPCRecRA_deleteAllRanges(ppcImlGenContext_t* ppcImlGenContext) +{ + for(auto& range : ppcImlGenContext->raInfo.list_ranges) + { + PPCRecRA_deleteRangeNoUnlink(ppcImlGenContext, range); + } + ppcImlGenContext->raInfo.list_ranges.clear(); +} + +void PPCRecRA_mergeRanges(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range, raLivenessRange_t* absorbedRange) +{ + cemu_assert_debug(range != absorbedRange); + cemu_assert_debug(range->virtualRegister == absorbedRange->virtualRegister); + // move all subranges from absorbedRange to range + for (auto& subrange : absorbedRange->list_subranges) + { + range->list_subranges.push_back(subrange); + subrange->range = range; + } + absorbedRange->list_subranges.clear(); + PPCRecRA_deleteRange(ppcImlGenContext, absorbedRange); +} + +void PPCRecRA_mergeSubranges(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange, raLivenessSubrange_t* absorbedSubrange) +{ +#ifdef CEMU_DEBUG_ASSERT + PPCRecRA_debugValidateSubrange(subrange); + PPCRecRA_debugValidateSubrange(absorbedSubrange); + if (subrange->imlSegment != absorbedSubrange->imlSegment) + assert_dbg(); + if (subrange->end.index > absorbedSubrange->start.index) + assert_dbg(); + if (subrange->subrangeBranchTaken || subrange->subrangeBranchNotTaken) + assert_dbg(); + if (subrange == absorbedSubrange) + assert_dbg(); +#endif + subrange->subrangeBranchTaken = absorbedSubrange->subrangeBranchTaken; + subrange->subrangeBranchNotTaken = absorbedSubrange->subrangeBranchNotTaken; + + // merge usage locations + for (auto& location : absorbedSubrange->list_locations) + { + subrange->list_locations.push_back(location); + } + absorbedSubrange->list_locations.clear(); + + subrange->end.index = absorbedSubrange->end.index; + + PPCRecRA_debugValidateSubrange(subrange); + + PPCRecRA_deleteSubrange(ppcImlGenContext, absorbedSubrange); +} + +// remove all inter-segment connections from the range and split it into local ranges (also removes empty ranges) +void PPCRecRA_explodeRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range) +{ + if (range->list_subranges.size() == 1) + assert_dbg(); + for (auto& subrange : range->list_subranges) + { + if (subrange->list_locations.empty()) + continue; + raLivenessRange_t* newRange = PPCRecRA_createRangeBase(ppcImlGenContext, range->virtualRegister, range->name); + raLivenessSubrange_t* newSubrange = PPCRecRA_createSubrange(ppcImlGenContext, newRange, subrange->imlSegment, subrange->list_locations.data()[0].index, subrange->list_locations.data()[subrange->list_locations.size() - 1].index + 1); + // copy locations + for (auto& location : subrange->list_locations) + { + newSubrange->list_locations.push_back(location); + } + } + // remove original range + PPCRecRA_deleteRange(ppcImlGenContext, range); +} + +#ifdef CEMU_DEBUG_ASSERT +void PPCRecRA_debugValidateSubrange(raLivenessSubrange_t* subrange) +{ + // validate subrange + if (subrange->subrangeBranchTaken && subrange->subrangeBranchTaken->imlSegment != subrange->imlSegment->nextSegmentBranchTaken) + assert_dbg(); + if (subrange->subrangeBranchNotTaken && subrange->subrangeBranchNotTaken->imlSegment != subrange->imlSegment->nextSegmentBranchNotTaken) + assert_dbg(); +} +#else +void PPCRecRA_debugValidateSubrange(raLivenessSubrange_t* subrange) {} +#endif + +// split subrange at the given index +// After the split there will be two ranges/subranges: +// head -> subrange is shortned to end at splitIndex +// tail -> a new subrange that reaches from splitIndex to the end of the original subrange +// if head has a physical register assigned it will not carry over to tail +// The return value is the tail subrange +// If trimToHole is true, the end of the head subrange and the start of the tail subrange will be moved to fit the locations +// Ranges that begin at RA_INTER_RANGE_START are allowed and can be split +raLivenessSubrange_t* PPCRecRA_splitLocalSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange, sint32 splitIndex, bool trimToHole) +{ + // validation +#ifdef CEMU_DEBUG_ASSERT + if (subrange->end.index == RA_INTER_RANGE_END || subrange->end.index == RA_INTER_RANGE_START) + assert_dbg(); + if (subrange->start.index >= splitIndex) + assert_dbg(); + if (subrange->end.index <= splitIndex) + assert_dbg(); +#endif + // create tail + raLivenessRange_t* tailRange = PPCRecRA_createRangeBase(ppcImlGenContext, subrange->range->virtualRegister, subrange->range->name); + raLivenessSubrange_t* tailSubrange = PPCRecRA_createSubrange(ppcImlGenContext, tailRange, subrange->imlSegment, splitIndex, subrange->end.index); + // copy locations + for (auto& location : subrange->list_locations) + { + if (location.index >= splitIndex) + tailSubrange->list_locations.push_back(location); + } + // remove tail locations from head + for (sint32 i = 0; i < subrange->list_locations.size(); i++) + { + raLivenessLocation_t* location = subrange->list_locations.data() + i; + if (location->index >= splitIndex) + { + subrange->list_locations.resize(i); + break; + } + } + // adjust start/end + if (trimToHole) + { + if (subrange->list_locations.empty()) + { + subrange->end.index = subrange->start.index+1; + } + else + { + subrange->end.index = subrange->list_locations.back().index + 1; + } + if (tailSubrange->list_locations.empty()) + { + assert_dbg(); // should not happen? (In this case we can just avoid generating a tail at all) + } + else + { + tailSubrange->start.index = tailSubrange->list_locations.front().index; + } + } + return tailSubrange; +} + +void PPCRecRA_updateOrAddSubrangeLocation(raLivenessSubrange_t* subrange, sint32 index, bool isRead, bool isWrite) +{ + if (subrange->list_locations.empty()) + { + subrange->list_locations.emplace_back(index, isRead, isWrite); + return; + } + raLivenessLocation_t* lastLocation = subrange->list_locations.data() + (subrange->list_locations.size() - 1); + cemu_assert_debug(lastLocation->index <= index); + if (lastLocation->index == index) + { + // update + lastLocation->isRead = lastLocation->isRead || isRead; + lastLocation->isWrite = lastLocation->isWrite || isWrite; + return; + } + // add new + subrange->list_locations.emplace_back(index, isRead, isWrite); +} + +sint32 PPCRecRARange_getReadWriteCost(PPCRecImlSegment_t* imlSegment) +{ + sint32 v = imlSegment->loopDepth + 1; + v *= 5; + return v*v; // 25, 100, 225, 400 +} + +// calculate cost of entire range +// ignores data flow and does not detect avoidable reads/stores +sint32 PPCRecRARange_estimateCost(raLivenessRange_t* range) +{ + sint32 cost = 0; + + // todo - this algorithm isn't accurate. If we have 10 parallel branches with a load each then the actual cost is still only that of one branch (plus minimal extra cost for generating more code). + + // currently we calculate the cost based on the most expensive entry/exit point + + sint32 mostExpensiveRead = 0; + sint32 mostExpensiveWrite = 0; + sint32 readCount = 0; + sint32 writeCount = 0; + + for (auto& subrange : range->list_subranges) + { + if (subrange->start.index != RA_INTER_RANGE_START) + { + //cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment); + mostExpensiveRead = std::max(mostExpensiveRead, PPCRecRARange_getReadWriteCost(subrange->imlSegment)); + readCount++; + } + if (subrange->end.index != RA_INTER_RANGE_END) + { + //cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment); + mostExpensiveWrite = std::max(mostExpensiveWrite, PPCRecRARange_getReadWriteCost(subrange->imlSegment)); + writeCount++; + } + } + cost = mostExpensiveRead + mostExpensiveWrite; + cost = cost + (readCount + writeCount) / 10; + return cost; +} + +// calculate cost of range that it would have after calling PPCRecRA_explodeRange() on it +sint32 PPCRecRARange_estimateAdditionalCostAfterRangeExplode(raLivenessRange_t* range) +{ + sint32 cost = -PPCRecRARange_estimateCost(range); + for (auto& subrange : range->list_subranges) + { + if (subrange->list_locations.empty()) + continue; + cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment) * 2; // we assume a read and a store + } + return cost; +} + +sint32 PPCRecRARange_estimateAdditionalCostAfterSplit(raLivenessSubrange_t* subrange, sint32 splitIndex) +{ + // validation +#ifdef CEMU_DEBUG_ASSERT + if (subrange->end.index == RA_INTER_RANGE_END) + assert_dbg(); +#endif + + sint32 cost = 0; + // find split position in location list + if (subrange->list_locations.empty()) + { + assert_dbg(); // should not happen? + return 0; + } + if (splitIndex <= subrange->list_locations.front().index) + return 0; + if (splitIndex > subrange->list_locations.back().index) + return 0; + + // todo - determine exact cost of split subranges + + cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment) * 2; // currently we assume that the additional region will require a read and a store + + //for (sint32 f = 0; f < subrange->list_locations.size(); f++) + //{ + // raLivenessLocation_t* location = subrange->list_locations.data() + f; + // if (location->index >= splitIndex) + // { + // ... + // return cost; + // } + //} + + return cost; +} diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.h b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.h new file mode 100644 index 00000000..01970bbf --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.h @@ -0,0 +1,27 @@ +#pragma once + +raLivenessRange_t* PPCRecRA_createRangeBase(ppcImlGenContext_t* ppcImlGenContext, uint32 virtualRegister, uint32 name); +raLivenessSubrange_t* PPCRecRA_createSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range, PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 endIndex); +void PPCRecRA_deleteSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange); +void PPCRecRA_deleteRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range); +void PPCRecRA_deleteAllRanges(ppcImlGenContext_t* ppcImlGenContext); + +void PPCRecRA_mergeRanges(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range, raLivenessRange_t* absorbedRange); +void PPCRecRA_explodeRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range); + +void PPCRecRA_mergeSubranges(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange, raLivenessSubrange_t* absorbedSubrange); + +raLivenessSubrange_t* PPCRecRA_splitLocalSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange, sint32 splitIndex, bool trimToHole = false); + +void PPCRecRA_updateOrAddSubrangeLocation(raLivenessSubrange_t* subrange, sint32 index, bool isRead, bool isWrite); +void PPCRecRA_debugValidateSubrange(raLivenessSubrange_t* subrange); + +// cost estimation +sint32 PPCRecRARange_getReadWriteCost(PPCRecImlSegment_t* imlSegment); +sint32 PPCRecRARange_estimateCost(raLivenessRange_t* range); +sint32 PPCRecRARange_estimateAdditionalCostAfterRangeExplode(raLivenessRange_t* range); +sint32 PPCRecRARange_estimateAdditionalCostAfterSplit(raLivenessSubrange_t* subrange, sint32 splitIndex); + +// special values to mark the index of ranges that reach across the segment border +#define RA_INTER_RANGE_START (-1) +#define RA_INTER_RANGE_END (0x70000000) diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator.cpp new file mode 100644 index 00000000..88d387e6 --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator.cpp @@ -0,0 +1,1012 @@ +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" +#include "PPCRecompilerImlRanges.h" + +void PPCRecompiler_replaceGPRRegisterUsageMultiple(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 gprRegisterSearched[4], sint32 gprRegisterReplaced[4]); + +bool PPCRecompiler_isSuffixInstruction(PPCRecImlInstruction_t* iml); + +uint32 recRACurrentIterationIndex = 0; + +uint32 PPCRecRA_getNextIterationIndex() +{ + recRACurrentIterationIndex++; + return recRACurrentIterationIndex; +} + +bool _detectLoop(PPCRecImlSegment_t* currentSegment, sint32 depth, uint32 iterationIndex, PPCRecImlSegment_t* imlSegmentLoopBase) +{ + if (currentSegment == imlSegmentLoopBase) + return true; + if (currentSegment->raInfo.lastIterationIndex == iterationIndex) + return currentSegment->raInfo.isPartOfProcessedLoop; + if (depth >= 9) + return false; + currentSegment->raInfo.lastIterationIndex = iterationIndex; + currentSegment->raInfo.isPartOfProcessedLoop = false; + + if (currentSegment->nextSegmentIsUncertain) + return false; + if (currentSegment->nextSegmentBranchNotTaken) + { + if (currentSegment->nextSegmentBranchNotTaken->momentaryIndex > currentSegment->momentaryIndex) + { + currentSegment->raInfo.isPartOfProcessedLoop = _detectLoop(currentSegment->nextSegmentBranchNotTaken, depth + 1, iterationIndex, imlSegmentLoopBase); + } + } + if (currentSegment->nextSegmentBranchTaken) + { + if (currentSegment->nextSegmentBranchTaken->momentaryIndex > currentSegment->momentaryIndex) + { + currentSegment->raInfo.isPartOfProcessedLoop = _detectLoop(currentSegment->nextSegmentBranchTaken, depth + 1, iterationIndex, imlSegmentLoopBase); + } + } + if (currentSegment->raInfo.isPartOfProcessedLoop) + currentSegment->loopDepth++; + return currentSegment->raInfo.isPartOfProcessedLoop; +} + +void PPCRecRA_detectLoop(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegmentLoopBase) +{ + uint32 iterationIndex = PPCRecRA_getNextIterationIndex(); + imlSegmentLoopBase->raInfo.lastIterationIndex = iterationIndex; + if (_detectLoop(imlSegmentLoopBase->nextSegmentBranchTaken, 0, iterationIndex, imlSegmentLoopBase)) + { + imlSegmentLoopBase->loopDepth++; + } +} + +void PPCRecRA_identifyLoop(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + if (imlSegment->nextSegmentIsUncertain) + return; + // check if this segment has a branch that links to itself (tight loop) + if (imlSegment->nextSegmentBranchTaken == imlSegment) + { + // segment loops over itself + imlSegment->loopDepth++; + return; + } + // check if this segment has a branch that goes backwards (potential complex loop) + if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->momentaryIndex < imlSegment->momentaryIndex) + { + PPCRecRA_detectLoop(ppcImlGenContext, imlSegment); + } +} + +typedef struct +{ + sint32 name; + sint32 virtualRegister; + sint32 physicalRegister; + bool isDirty; +}raRegisterState_t; + +const sint32 _raInfo_physicalGPRCount = PPC_X64_GPR_USABLE_REGISTERS; + +raRegisterState_t* PPCRecRA_getRegisterState(raRegisterState_t* regState, sint32 virtualRegister) +{ + for (sint32 i = 0; i < _raInfo_physicalGPRCount; i++) + { + if (regState[i].virtualRegister == virtualRegister) + { +#ifdef CEMU_DEBUG_ASSERT + if (regState[i].physicalRegister < 0) + assert_dbg(); +#endif + return regState + i; + } + } + return nullptr; +} + +raRegisterState_t* PPCRecRA_getFreePhysicalRegister(raRegisterState_t* regState) +{ + for (sint32 i = 0; i < _raInfo_physicalGPRCount; i++) + { + if (regState[i].physicalRegister < 0) + { + regState[i].physicalRegister = i; + return regState + i; + } + } + return nullptr; +} + +typedef struct +{ + uint16 registerIndex; + uint16 registerName; +}raLoadStoreInfo_t; + +void PPCRecRA_insertGPRLoadInstruction(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, sint32 registerIndex, sint32 registerName) +{ + PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, 1); + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + 0); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_R_NAME; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = registerIndex; + imlInstructionItr->op_r_name.name = registerName; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; +} + +void PPCRecRA_insertGPRLoadInstructions(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, raLoadStoreInfo_t* loadList, sint32 loadCount) +{ + PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, loadCount); + memset(imlSegment->imlList + (insertIndex + 0), 0x00, sizeof(PPCRecImlInstruction_t)*loadCount); + for (sint32 i = 0; i < loadCount; i++) + { + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + i); + imlInstructionItr->type = PPCREC_IML_TYPE_R_NAME; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = (uint8)loadList[i].registerIndex; + imlInstructionItr->op_r_name.name = (uint32)loadList[i].registerName; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + } +} + +void PPCRecRA_insertGPRStoreInstruction(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, sint32 registerIndex, sint32 registerName) +{ + PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, 1); + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + 0); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_NAME_R; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = registerIndex; + imlInstructionItr->op_r_name.name = registerName; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; +} + +void PPCRecRA_insertGPRStoreInstructions(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, raLoadStoreInfo_t* storeList, sint32 storeCount) +{ + PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, storeCount); + memset(imlSegment->imlList + (insertIndex + 0), 0x00, sizeof(PPCRecImlInstruction_t)*storeCount); + for (sint32 i = 0; i < storeCount; i++) + { + PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + i); + memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t)); + imlInstructionItr->type = PPCREC_IML_TYPE_NAME_R; + imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN; + imlInstructionItr->op_r_name.registerIndex = (uint8)storeList[i].registerIndex; + imlInstructionItr->op_r_name.name = (uint32)storeList[i].registerName; + imlInstructionItr->op_r_name.copyWidth = 32; + imlInstructionItr->op_r_name.flags = 0; + } +} + +#define SUBRANGE_LIST_SIZE (128) + +sint32 PPCRecRA_countInstructionsUntilNextUse(raLivenessSubrange_t* subrange, sint32 startIndex) +{ + for (sint32 i = 0; i < subrange->list_locations.size(); i++) + { + if (subrange->list_locations.data()[i].index >= startIndex) + return subrange->list_locations.data()[i].index - startIndex; + } + return INT_MAX; +} + +// count how many instructions there are until physRegister is used by any subrange (returns 0 if register is in use at startIndex, and INT_MAX if not used for the remainder of the segment) +sint32 PPCRecRA_countInstructionsUntilNextLocalPhysRegisterUse(PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 physRegister) +{ + sint32 minDistance = INT_MAX; + // next + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while(subrangeItr) + { + if (subrangeItr->range->physicalRegister != physRegister) + { + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + continue; + } + if (startIndex >= subrangeItr->start.index && startIndex < subrangeItr->end.index) + return 0; + if (subrangeItr->start.index >= startIndex) + { + minDistance = std::min(minDistance, (subrangeItr->start.index - startIndex)); + } + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + return minDistance; +} + +typedef struct +{ + raLivenessSubrange_t* liveRangeList[64]; + sint32 liveRangesCount; +}raLiveRangeInfo_t; + +// return a bitmask that contains only registers that are not used by any colliding range +uint32 PPCRecRA_getAllowedRegisterMaskForFullRange(raLivenessRange_t* range) +{ + uint32 physRegisterMask = (1 << PPC_X64_GPR_USABLE_REGISTERS) - 1; + for (auto& subrange : range->list_subranges) + { + PPCRecImlSegment_t* imlSegment = subrange->imlSegment; + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while(subrangeItr) + { + if (subrange == subrangeItr) + { + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + continue; + } + + if (subrange->start.index < subrangeItr->end.index && subrange->end.index > subrangeItr->start.index || + (subrange->start.index == RA_INTER_RANGE_START && subrange->start.index == subrangeItr->start.index) || + (subrange->end.index == RA_INTER_RANGE_END && subrange->end.index == subrangeItr->end.index) ) + { + if(subrangeItr->range->physicalRegister >= 0) + physRegisterMask &= ~(1<<(subrangeItr->range->physicalRegister)); + } + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + } + return physRegisterMask; +} + +bool _livenessRangeStartCompare(raLivenessSubrange_t* lhs, raLivenessSubrange_t* rhs) { return lhs->start.index < rhs->start.index; } + +void _sortSegmentAllSubrangesLinkedList(PPCRecImlSegment_t* imlSegment) +{ + raLivenessSubrange_t* subrangeList[4096+1]; + sint32 count = 0; + // disassemble linked list + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while (subrangeItr) + { + if (count >= 4096) + assert_dbg(); + subrangeList[count] = subrangeItr; + count++; + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + if (count == 0) + { + imlSegment->raInfo.linkedList_allSubranges = nullptr; + return; + } + // sort + std::sort(subrangeList, subrangeList + count, _livenessRangeStartCompare); + //for (sint32 i1 = 0; i1 < count; i1++) + //{ + // for (sint32 i2 = i1+1; i2 < count; i2++) + // { + // if (subrangeList[i1]->start.index > subrangeList[i2]->start.index) + // { + // // swap + // raLivenessSubrange_t* temp = subrangeList[i1]; + // subrangeList[i1] = subrangeList[i2]; + // subrangeList[i2] = temp; + // } + // } + //} + // reassemble linked list + subrangeList[count] = nullptr; + imlSegment->raInfo.linkedList_allSubranges = subrangeList[0]; + subrangeList[0]->link_segmentSubrangesGPR.prev = nullptr; + subrangeList[0]->link_segmentSubrangesGPR.next = subrangeList[1]; + for (sint32 i = 1; i < count; i++) + { + subrangeList[i]->link_segmentSubrangesGPR.prev = subrangeList[i - 1]; + subrangeList[i]->link_segmentSubrangesGPR.next = subrangeList[i + 1]; + } + // validate list +#ifdef CEMU_DEBUG_ASSERT + sint32 count2 = 0; + subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + sint32 currentStartIndex = RA_INTER_RANGE_START; + while (subrangeItr) + { + count2++; + if (subrangeItr->start.index < currentStartIndex) + assert_dbg(); + currentStartIndex = subrangeItr->start.index; + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + if (count != count2) + assert_dbg(); +#endif +} + +bool PPCRecRA_assignSegmentRegisters(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + + // sort subranges ascending by start index + + //std::sort(imlSegment->raInfo.list_subranges.begin(), imlSegment->raInfo.list_subranges.end(), _sortSubrangesByStartIndexDepr); + _sortSegmentAllSubrangesLinkedList(imlSegment); + + raLiveRangeInfo_t liveInfo; + liveInfo.liveRangesCount = 0; + //sint32 subrangeIndex = 0; + //for (auto& subrange : imlSegment->raInfo.list_subranges) + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while(subrangeItr) + { + sint32 currentIndex = subrangeItr->start.index; + // validate subrange + PPCRecRA_debugValidateSubrange(subrangeItr); + // expire ranges + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f]; + if (liverange->end.index <= currentIndex && liverange->end.index != RA_INTER_RANGE_END) + { +#ifdef CEMU_DEBUG_ASSERT + if (liverange->subrangeBranchTaken || liverange->subrangeBranchNotTaken) + assert_dbg(); // infinite subranges should not expire +#endif + // remove entry + liveInfo.liveRangesCount--; + liveInfo.liveRangeList[f] = liveInfo.liveRangeList[liveInfo.liveRangesCount]; + f--; + } + } + // check if subrange already has register assigned + if (subrangeItr->range->physicalRegister >= 0) + { + // verify if register is actually available +#ifdef CEMU_DEBUG_ASSERT + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* liverangeItr = liveInfo.liveRangeList[f]; + if (liverangeItr->range->physicalRegister == subrangeItr->range->physicalRegister) + { + // this should never happen because we try to preventively avoid register conflicts + assert_dbg(); + } + } +#endif + // add to live ranges + liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr; + liveInfo.liveRangesCount++; + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + continue; + } + // find free register + uint32 physRegisterMask = (1<range->physicalRegister < 0) + assert_dbg(); + physRegisterMask &= ~(1<range->physicalRegister); + } + // check intersections with other ranges and determine allowed registers + uint32 allowedPhysRegisterMask = 0; + uint32 unusedRegisterMask = physRegisterMask; // mask of registers that are currently not used (does not include range checks) + if (physRegisterMask != 0) + { + allowedPhysRegisterMask = PPCRecRA_getAllowedRegisterMaskForFullRange(subrangeItr->range); + physRegisterMask &= allowedPhysRegisterMask; + } + if (physRegisterMask == 0) + { + struct + { + // estimated costs and chosen candidates for the different spill strategies + // hole cutting into a local range + struct + { + sint32 distance; + raLivenessSubrange_t* largestHoleSubrange; + sint32 cost; // additional cost of choosing this candidate + }localRangeHoleCutting; + // split current range (this is generally only a good choice when the current range is long but rarely used) + struct + { + sint32 cost; + sint32 physRegister; + sint32 distance; // size of hole + }availableRegisterHole; + // explode a inter-segment range (prefer ranges that are not read/written in this segment) + struct + { + raLivenessRange_t* range; + sint32 cost; + sint32 distance; // size of hole + // note: If we explode a range, we still have to check the size of the hole that becomes available, if too small then we need to add cost of splitting local subrange + }explodeRange; + // todo - add more strategies, make cost estimation smarter (for example, in some cases splitting can have reduced or no cost if read/store can be avoided due to data flow) + }spillStrategies; + // cant assign register + // there might be registers available, we just can't use them due to range conflicts + if (subrangeItr->end.index != RA_INTER_RANGE_END) + { + // range ends in current segment + + // Current algo looks like this: + // 1) Get the size of the largest possible hole that we can cut into any of the live local subranges + // 1.1) Check if the hole is large enough to hold the current subrange + // 2) If yes, cut hole and return false (full retry) + // 3) If no, try to reuse free register (need to determine how large the region is we can use) + // 4) If there is no free register or the range is extremely short go back to step 1+2 but additionally split the current subrange at where the hole ends + + cemu_assert_debug(currentIndex == subrangeItr->start.index); + + sint32 requiredSize = subrangeItr->end.index - subrangeItr->start.index; + // evaluate strategy: Cut hole into local subrange + spillStrategies.localRangeHoleCutting.distance = -1; + spillStrategies.localRangeHoleCutting.largestHoleSubrange = nullptr; + spillStrategies.localRangeHoleCutting.cost = INT_MAX; + if (currentIndex >= 0) + { + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* candidate = liveInfo.liveRangeList[f]; + if (candidate->end.index == RA_INTER_RANGE_END) + continue; + sint32 distance = PPCRecRA_countInstructionsUntilNextUse(candidate, currentIndex); + if (distance < 2) + continue; // not even worth the consideration + // calculate split cost of candidate + sint32 cost = PPCRecRARange_estimateAdditionalCostAfterSplit(candidate, currentIndex + distance); + // calculate additional split cost of currentRange if hole is not large enough + if (distance < requiredSize) + { + cost += PPCRecRARange_estimateAdditionalCostAfterSplit(subrangeItr, currentIndex + distance); + // we also slightly increase cost in relation to the remaining length (in order to make the algorithm prefer larger holes) + cost += (requiredSize - distance) / 10; + } + // compare cost with previous candidates + if (cost < spillStrategies.localRangeHoleCutting.cost) + { + spillStrategies.localRangeHoleCutting.cost = cost; + spillStrategies.localRangeHoleCutting.distance = distance; + spillStrategies.localRangeHoleCutting.largestHoleSubrange = candidate; + } + } + } + // evaluate strategy: Split current range to fit in available holes + spillStrategies.availableRegisterHole.cost = INT_MAX; + spillStrategies.availableRegisterHole.distance = -1; + spillStrategies.availableRegisterHole.physRegister = -1; + if (currentIndex >= 0) + { + if (unusedRegisterMask != 0) + { + for (sint32 t = 0; t < PPC_X64_GPR_USABLE_REGISTERS; t++) + { + if ((unusedRegisterMask&(1 << t)) == 0) + continue; + // get size of potential hole for this register + sint32 distance = PPCRecRA_countInstructionsUntilNextLocalPhysRegisterUse(imlSegment, currentIndex, t); + if (distance < 2) + continue; // not worth consideration + // calculate additional cost due to split + if (distance >= requiredSize) + assert_dbg(); // should not happen or else we would have selected this register + sint32 cost = PPCRecRARange_estimateAdditionalCostAfterSplit(subrangeItr, currentIndex + distance); + // add small additional cost for the remaining range (prefer larger holes) + cost += (requiredSize - distance) / 10; + if (cost < spillStrategies.availableRegisterHole.cost) + { + spillStrategies.availableRegisterHole.cost = cost; + spillStrategies.availableRegisterHole.distance = distance; + spillStrategies.availableRegisterHole.physRegister = t; + } + } + } + } + // evaluate strategy: Explode inter-segment ranges + spillStrategies.explodeRange.cost = INT_MAX; + spillStrategies.explodeRange.range = nullptr; + spillStrategies.explodeRange.distance = -1; + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* candidate = liveInfo.liveRangeList[f]; + if (candidate->end.index != RA_INTER_RANGE_END) + continue; + sint32 distance = PPCRecRA_countInstructionsUntilNextUse(liveInfo.liveRangeList[f], currentIndex); + if( distance < 2) + continue; + sint32 cost; + cost = PPCRecRARange_estimateAdditionalCostAfterRangeExplode(candidate->range); + // if the hole is not large enough, add cost of splitting current subrange + if (distance < requiredSize) + { + cost += PPCRecRARange_estimateAdditionalCostAfterSplit(subrangeItr, currentIndex + distance); + // add small additional cost for the remaining range (prefer larger holes) + cost += (requiredSize - distance) / 10; + } + // compare with current best candidate for this strategy + if (cost < spillStrategies.explodeRange.cost) + { + spillStrategies.explodeRange.cost = cost; + spillStrategies.explodeRange.distance = distance; + spillStrategies.explodeRange.range = candidate->range; + } + } + // choose strategy + if (spillStrategies.explodeRange.cost != INT_MAX && spillStrategies.explodeRange.cost <= spillStrategies.localRangeHoleCutting.cost && spillStrategies.explodeRange.cost <= spillStrategies.availableRegisterHole.cost) + { + // explode range + PPCRecRA_explodeRange(ppcImlGenContext, spillStrategies.explodeRange.range); + // split current subrange if necessary + if( requiredSize > spillStrategies.explodeRange.distance) + PPCRecRA_splitLocalSubrange(ppcImlGenContext, subrangeItr, currentIndex+spillStrategies.explodeRange.distance, true); + } + else if (spillStrategies.availableRegisterHole.cost != INT_MAX && spillStrategies.availableRegisterHole.cost <= spillStrategies.explodeRange.cost && spillStrategies.availableRegisterHole.cost <= spillStrategies.localRangeHoleCutting.cost) + { + // use available register + PPCRecRA_splitLocalSubrange(ppcImlGenContext, subrangeItr, currentIndex + spillStrategies.availableRegisterHole.distance, true); + } + else if (spillStrategies.localRangeHoleCutting.cost != INT_MAX && spillStrategies.localRangeHoleCutting.cost <= spillStrategies.explodeRange.cost && spillStrategies.localRangeHoleCutting.cost <= spillStrategies.availableRegisterHole.cost) + { + // cut hole + PPCRecRA_splitLocalSubrange(ppcImlGenContext, spillStrategies.localRangeHoleCutting.largestHoleSubrange, currentIndex + spillStrategies.localRangeHoleCutting.distance, true); + // split current subrange if necessary + if (requiredSize > spillStrategies.localRangeHoleCutting.distance) + PPCRecRA_splitLocalSubrange(ppcImlGenContext, subrangeItr, currentIndex + spillStrategies.localRangeHoleCutting.distance, true); + } + else if (subrangeItr->start.index == RA_INTER_RANGE_START) + { + // alternative strategy if we have no other choice: explode current range + PPCRecRA_explodeRange(ppcImlGenContext, subrangeItr->range); + } + else + assert_dbg(); + + return false; + } + else + { + // range exceeds segment border + // simple but bad solution -> explode the entire range (no longer allow it to cross segment boundaries) + // better solutions: 1) Depending on the situation, we can explode other ranges to resolve the conflict. Thus we should explode the range with the lowest extra cost + // 2) Or we explode the range only partially + // explode the range with the least cost + spillStrategies.explodeRange.cost = INT_MAX; + spillStrategies.explodeRange.range = nullptr; + spillStrategies.explodeRange.distance = -1; + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* candidate = liveInfo.liveRangeList[f]; + if (candidate->end.index != RA_INTER_RANGE_END) + continue; + // only select candidates that clash with current subrange + if (candidate->range->physicalRegister < 0 && candidate != subrangeItr) + continue; + + sint32 cost; + cost = PPCRecRARange_estimateAdditionalCostAfterRangeExplode(candidate->range); + // compare with current best candidate for this strategy + if (cost < spillStrategies.explodeRange.cost) + { + spillStrategies.explodeRange.cost = cost; + spillStrategies.explodeRange.distance = INT_MAX; + spillStrategies.explodeRange.range = candidate->range; + } + } + // add current range as a candidate too + sint32 ownCost; + ownCost = PPCRecRARange_estimateAdditionalCostAfterRangeExplode(subrangeItr->range); + if (ownCost < spillStrategies.explodeRange.cost) + { + spillStrategies.explodeRange.cost = ownCost; + spillStrategies.explodeRange.distance = INT_MAX; + spillStrategies.explodeRange.range = subrangeItr->range; + } + if (spillStrategies.explodeRange.cost == INT_MAX) + assert_dbg(); // should not happen + PPCRecRA_explodeRange(ppcImlGenContext, spillStrategies.explodeRange.range); + } + return false; + } + // assign register to range + sint32 registerIndex = -1; + for (sint32 f = 0; f < PPC_X64_GPR_USABLE_REGISTERS; f++) + { + if ((physRegisterMask&(1 << f)) != 0) + { + registerIndex = f; + break; + } + } + subrangeItr->range->physicalRegister = registerIndex; + // add to live ranges + liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr; + liveInfo.liveRangesCount++; + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + return true; +} + +void PPCRecRA_assignRegisters(ppcImlGenContext_t* ppcImlGenContext) +{ + // start with frequently executed segments first + sint32 maxLoopDepth = 0; + for (sint32 i = 0; i < ppcImlGenContext->segmentListCount; i++) + { + maxLoopDepth = std::max(maxLoopDepth, ppcImlGenContext->segmentList[i]->loopDepth); + } + while (true) + { + bool done = false; + for (sint32 d = maxLoopDepth; d >= 0; d--) + { + for (sint32 i = 0; i < ppcImlGenContext->segmentListCount; i++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[i]; + if (imlSegment->loopDepth != d) + continue; + done = PPCRecRA_assignSegmentRegisters(ppcImlGenContext, imlSegment); + if (done == false) + break; + } + if (done == false) + break; + } + if (done) + break; + } +} + +typedef struct +{ + raLivenessSubrange_t* subrangeList[SUBRANGE_LIST_SIZE]; + sint32 subrangeCount; + bool hasUndefinedEndings; +}subrangeEndingInfo_t; + +void _findSubrangeWriteEndings(raLivenessSubrange_t* subrange, uint32 iterationIndex, sint32 depth, subrangeEndingInfo_t* info) +{ + if (depth >= 30) + { + info->hasUndefinedEndings = true; + return; + } + if (subrange->lastIterationIndex == iterationIndex) + return; // already processed + subrange->lastIterationIndex = iterationIndex; + if (subrange->hasStoreDelayed) + return; // no need to traverse this subrange + PPCRecImlSegment_t* imlSegment = subrange->imlSegment; + if (subrange->end.index != RA_INTER_RANGE_END) + { + // ending segment + if (info->subrangeCount >= SUBRANGE_LIST_SIZE) + { + info->hasUndefinedEndings = true; + return; + } + else + { + info->subrangeList[info->subrangeCount] = subrange; + info->subrangeCount++; + } + return; + } + + // traverse next subranges in flow + if (imlSegment->nextSegmentBranchNotTaken) + { + if (subrange->subrangeBranchNotTaken == nullptr) + { + info->hasUndefinedEndings = true; + } + else + { + _findSubrangeWriteEndings(subrange->subrangeBranchNotTaken, iterationIndex, depth + 1, info); + } + } + if (imlSegment->nextSegmentBranchTaken) + { + if (subrange->subrangeBranchTaken == nullptr) + { + info->hasUndefinedEndings = true; + } + else + { + _findSubrangeWriteEndings(subrange->subrangeBranchTaken, iterationIndex, depth + 1, info); + } + } +} + +void _analyzeRangeDataFlow(raLivenessSubrange_t* subrange) +{ + if (subrange->end.index != RA_INTER_RANGE_END) + return; + // analyze data flow across segments (if this segment has writes) + if (subrange->hasStore) + { + subrangeEndingInfo_t writeEndingInfo; + writeEndingInfo.subrangeCount = 0; + writeEndingInfo.hasUndefinedEndings = false; + _findSubrangeWriteEndings(subrange, PPCRecRA_getNextIterationIndex(), 0, &writeEndingInfo); + if (writeEndingInfo.hasUndefinedEndings == false) + { + // get cost of delaying store into endings + sint32 delayStoreCost = 0; + bool alreadyStoredInAllEndings = true; + for (sint32 i = 0; i < writeEndingInfo.subrangeCount; i++) + { + raLivenessSubrange_t* subrangeItr = writeEndingInfo.subrangeList[i]; + if( subrangeItr->hasStore ) + continue; // this ending already stores, no extra cost + alreadyStoredInAllEndings = false; + sint32 storeCost = PPCRecRARange_getReadWriteCost(subrangeItr->imlSegment); + delayStoreCost = std::max(storeCost, delayStoreCost); + } + if (alreadyStoredInAllEndings) + { + subrange->hasStore = false; + subrange->hasStoreDelayed = true; + } + else if (delayStoreCost <= PPCRecRARange_getReadWriteCost(subrange->imlSegment)) + { + subrange->hasStore = false; + subrange->hasStoreDelayed = true; + for (sint32 i = 0; i < writeEndingInfo.subrangeCount; i++) + { + raLivenessSubrange_t* subrangeItr = writeEndingInfo.subrangeList[i]; + subrangeItr->hasStore = true; + } + } + } + } +} + +void PPCRecRA_generateSegmentInstructions(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + sint16 virtualReg2PhysReg[PPC_REC_MAX_VIRTUAL_GPR]; + for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) + virtualReg2PhysReg[i] = -1; + + raLiveRangeInfo_t liveInfo; + liveInfo.liveRangesCount = 0; + sint32 index = 0; + sint32 suffixInstructionCount = (imlSegment->imlListCount > 0 && PPCRecompiler_isSuffixInstruction(imlSegment->imlList + imlSegment->imlListCount - 1)) ? 1 : 0; + // load register ranges that are supplied from previous segments + raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + //for (auto& subrange : imlSegment->raInfo.list_subranges) + while(subrangeItr) + { + if (subrangeItr->start.index == RA_INTER_RANGE_START) + { + liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr; + liveInfo.liveRangesCount++; +#ifdef CEMU_DEBUG_ASSERT + // load GPR + if (subrangeItr->_noLoad == false) + { + assert_dbg(); + } + // update translation table + if (virtualReg2PhysReg[subrangeItr->range->virtualRegister] != -1) + assert_dbg(); +#endif + virtualReg2PhysReg[subrangeItr->range->virtualRegister] = subrangeItr->range->physicalRegister; + } + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + // process instructions + while(index < imlSegment->imlListCount+1) + { + // expire ranges + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f]; + if (liverange->end.index <= index) + { + // update translation table + if (virtualReg2PhysReg[liverange->range->virtualRegister] == -1) + assert_dbg(); + virtualReg2PhysReg[liverange->range->virtualRegister] = -1; + // store GPR + if (liverange->hasStore) + { + PPCRecRA_insertGPRStoreInstruction(imlSegment, std::min(index, imlSegment->imlListCount - suffixInstructionCount), liverange->range->physicalRegister, liverange->range->name); + index++; + } + // remove entry + liveInfo.liveRangesCount--; + liveInfo.liveRangeList[f] = liveInfo.liveRangeList[liveInfo.liveRangesCount]; + f--; + } + } + // load new ranges + subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + while(subrangeItr) + { + if (subrangeItr->start.index == index) + { + liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr; + liveInfo.liveRangesCount++; + // load GPR + if (subrangeItr->_noLoad == false) + { + PPCRecRA_insertGPRLoadInstruction(imlSegment, std::min(index, imlSegment->imlListCount - suffixInstructionCount), subrangeItr->range->physicalRegister, subrangeItr->range->name); + index++; + subrangeItr->start.index--; + } + // update translation table + cemu_assert_debug(virtualReg2PhysReg[subrangeItr->range->virtualRegister] == -1); + virtualReg2PhysReg[subrangeItr->range->virtualRegister] = subrangeItr->range->physicalRegister; + } + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + // replace registers + if (index < imlSegment->imlListCount) + { + PPCImlOptimizerUsedRegisters_t gprTracking; + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + index, &gprTracking); + + sint32 inputGpr[4]; + inputGpr[0] = gprTracking.gpr[0]; + inputGpr[1] = gprTracking.gpr[1]; + inputGpr[2] = gprTracking.gpr[2]; + inputGpr[3] = gprTracking.gpr[3]; + sint32 replaceGpr[4]; + for (sint32 f = 0; f < 4; f++) + { + sint32 virtualRegister = gprTracking.gpr[f]; + if (virtualRegister < 0) + { + replaceGpr[f] = -1; + continue; + } + if (virtualRegister >= PPC_REC_MAX_VIRTUAL_GPR) + assert_dbg(); + replaceGpr[f] = virtualReg2PhysReg[virtualRegister]; + cemu_assert_debug(replaceGpr[f] >= 0); + } + PPCRecompiler_replaceGPRRegisterUsageMultiple(ppcImlGenContext, imlSegment->imlList + index, inputGpr, replaceGpr); + } + // next iml instruction + index++; + } + // expire infinite subranges (subranges that cross the segment border) + sint32 storeLoadListLength = 0; + raLoadStoreInfo_t loadStoreList[PPC_REC_MAX_VIRTUAL_GPR]; + for (sint32 f = 0; f < liveInfo.liveRangesCount; f++) + { + raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f]; + if (liverange->end.index == RA_INTER_RANGE_END) + { + // update translation table + cemu_assert_debug(virtualReg2PhysReg[liverange->range->virtualRegister] != -1); + virtualReg2PhysReg[liverange->range->virtualRegister] = -1; + // store GPR + if (liverange->hasStore) + { + loadStoreList[storeLoadListLength].registerIndex = liverange->range->physicalRegister; + loadStoreList[storeLoadListLength].registerName = liverange->range->name; + storeLoadListLength++; + } + // remove entry + liveInfo.liveRangesCount--; + liveInfo.liveRangeList[f] = liveInfo.liveRangeList[liveInfo.liveRangesCount]; + f--; + } + else + { + cemu_assert_suspicious(); + } + } + if (storeLoadListLength > 0) + { + PPCRecRA_insertGPRStoreInstructions(imlSegment, imlSegment->imlListCount - suffixInstructionCount, loadStoreList, storeLoadListLength); + } + // load subranges for next segments + subrangeItr = imlSegment->raInfo.linkedList_allSubranges; + storeLoadListLength = 0; + while(subrangeItr) + { + if (subrangeItr->start.index == RA_INTER_RANGE_END) + { + liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr; + liveInfo.liveRangesCount++; + // load GPR + if (subrangeItr->_noLoad == false) + { + loadStoreList[storeLoadListLength].registerIndex = subrangeItr->range->physicalRegister; + loadStoreList[storeLoadListLength].registerName = subrangeItr->range->name; + storeLoadListLength++; + } + // update translation table + cemu_assert_debug(virtualReg2PhysReg[subrangeItr->range->virtualRegister] == -1); + virtualReg2PhysReg[subrangeItr->range->virtualRegister] = subrangeItr->range->physicalRegister; + } + // next + subrangeItr = subrangeItr->link_segmentSubrangesGPR.next; + } + if (storeLoadListLength > 0) + { + PPCRecRA_insertGPRLoadInstructions(imlSegment, imlSegment->imlListCount - suffixInstructionCount, loadStoreList, storeLoadListLength); + } +} + +void PPCRecRA_generateMoveInstructions(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + PPCRecRA_generateSegmentInstructions(ppcImlGenContext, imlSegment); + } +} + +void PPCRecRA_calculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext); +void PPCRecRA_processFlowAndCalculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext); +void PPCRecRA_analyzeRangeDataFlowV2(ppcImlGenContext_t* ppcImlGenContext); + +void PPCRecompilerImm_prepareForRegisterAllocation(ppcImlGenContext_t* ppcImlGenContext) +{ + // insert empty segments after every non-taken branch if the linked segment has more than one input + // this gives the register allocator more room to create efficient spill code + sint32 segmentIndex = 0; + while (segmentIndex < ppcImlGenContext->segmentListCount) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[segmentIndex]; + if (imlSegment->nextSegmentIsUncertain) + { + segmentIndex++; + continue; + } + if (imlSegment->nextSegmentBranchTaken == nullptr || imlSegment->nextSegmentBranchNotTaken == nullptr) + { + segmentIndex++; + continue; + } + if (imlSegment->nextSegmentBranchNotTaken->list_prevSegments.size() <= 1) + { + segmentIndex++; + continue; + } + if (imlSegment->nextSegmentBranchNotTaken->isEnterable) + { + segmentIndex++; + continue; + } + PPCRecompilerIml_insertSegments(ppcImlGenContext, segmentIndex + 1, 1); + PPCRecImlSegment_t* imlSegmentP0 = ppcImlGenContext->segmentList[segmentIndex + 0]; + PPCRecImlSegment_t* imlSegmentP1 = ppcImlGenContext->segmentList[segmentIndex + 1]; + PPCRecImlSegment_t* nextSegment = imlSegment->nextSegmentBranchNotTaken; + PPCRecompilerIML_removeLink(imlSegmentP0, nextSegment); + PPCRecompilerIml_setLinkBranchNotTaken(imlSegmentP1, nextSegment); + PPCRecompilerIml_setLinkBranchNotTaken(imlSegmentP0, imlSegmentP1); + segmentIndex++; + } + // detect loops + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + imlSegment->momentaryIndex = s; + } + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + PPCRecRA_identifyLoop(ppcImlGenContext, imlSegment); + } +} + +void PPCRecompilerImm_allocateRegisters(ppcImlGenContext_t* ppcImlGenContext) +{ + PPCRecompilerImm_prepareForRegisterAllocation(ppcImlGenContext); + + ppcImlGenContext->raInfo.list_ranges = std::vector(); + + // calculate liveness + PPCRecRA_calculateLivenessRangesV2(ppcImlGenContext); + PPCRecRA_processFlowAndCalculateLivenessRangesV2(ppcImlGenContext); + + PPCRecRA_assignRegisters(ppcImlGenContext); + + PPCRecRA_analyzeRangeDataFlowV2(ppcImlGenContext); + PPCRecRA_generateMoveInstructions(ppcImlGenContext); + + PPCRecRA_deleteAllRanges(ppcImlGenContext); +} \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator2.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator2.cpp new file mode 100644 index 00000000..abb47e92 --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator2.cpp @@ -0,0 +1,414 @@ +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" +#include "PPCRecompilerImlRanges.h" +#include + +bool _isRangeDefined(PPCRecImlSegment_t* imlSegment, sint32 vGPR) +{ + return (imlSegment->raDistances.reg[vGPR].usageStart != INT_MAX); +} + +void PPCRecRA_calculateSegmentMinMaxRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) + { + imlSegment->raDistances.reg[i].usageStart = INT_MAX; + imlSegment->raDistances.reg[i].usageEnd = INT_MIN; + } + // scan instructions for usage range + sint32 index = 0; + PPCImlOptimizerUsedRegisters_t gprTracking; + while (index < imlSegment->imlListCount) + { + // end loop at suffix instruction + if (PPCRecompiler_isSuffixInstruction(imlSegment->imlList + index)) + break; + // get accessed GPRs + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + index, &gprTracking); + for (sint32 t = 0; t < 4; t++) + { + sint32 virtualRegister = gprTracking.gpr[t]; + if (virtualRegister < 0) + continue; + cemu_assert_debug(virtualRegister < PPC_REC_MAX_VIRTUAL_GPR); + imlSegment->raDistances.reg[virtualRegister].usageStart = std::min(imlSegment->raDistances.reg[virtualRegister].usageStart, index); // index before/at instruction + imlSegment->raDistances.reg[virtualRegister].usageEnd = std::max(imlSegment->raDistances.reg[virtualRegister].usageEnd, index+1); // index after instruction + } + // next instruction + index++; + } +} + +void PPCRecRA_calculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext) +{ + // for each register calculate min/max index of usage range within each segment + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecRA_calculateSegmentMinMaxRanges(ppcImlGenContext, ppcImlGenContext->segmentList[s]); + } +} + +raLivenessSubrange_t* PPCRecRA_convertToMappedRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 vGPR, raLivenessRange_t* range) +{ + if (imlSegment->raDistances.isProcessed[vGPR]) + { + // return already existing segment + return imlSegment->raInfo.linkedList_perVirtualGPR[vGPR]; + } + imlSegment->raDistances.isProcessed[vGPR] = true; + if (_isRangeDefined(imlSegment, vGPR) == false) + return nullptr; + // create subrange + cemu_assert_debug(imlSegment->raInfo.linkedList_perVirtualGPR[vGPR] == nullptr); + raLivenessSubrange_t* subrange = PPCRecRA_createSubrange(ppcImlGenContext, range, imlSegment, imlSegment->raDistances.reg[vGPR].usageStart, imlSegment->raDistances.reg[vGPR].usageEnd); + // traverse forward + if (imlSegment->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_END) + { + if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_START) + { + subrange->subrangeBranchTaken = PPCRecRA_convertToMappedRanges(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, vGPR, range); + cemu_assert_debug(subrange->subrangeBranchTaken->start.index == RA_INTER_RANGE_START); + } + if (imlSegment->nextSegmentBranchNotTaken && imlSegment->nextSegmentBranchNotTaken->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_START) + { + subrange->subrangeBranchNotTaken = PPCRecRA_convertToMappedRanges(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, vGPR, range); + cemu_assert_debug(subrange->subrangeBranchNotTaken->start.index == RA_INTER_RANGE_START); + } + } + // traverse backward + if (imlSegment->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_START) + { + for (auto& it : imlSegment->list_prevSegments) + { + if (it->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_END) + PPCRecRA_convertToMappedRanges(ppcImlGenContext, it, vGPR, range); + } + } + // return subrange + return subrange; +} + +void PPCRecRA_createSegmentLivenessRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) + { + if( _isRangeDefined(imlSegment, i) == false ) + continue; + if( imlSegment->raDistances.isProcessed[i]) + continue; + raLivenessRange_t* range = PPCRecRA_createRangeBase(ppcImlGenContext, i, ppcImlGenContext->mappedRegister[i]); + PPCRecRA_convertToMappedRanges(ppcImlGenContext, imlSegment, i, range); + } + // create lookup table of ranges + raLivenessSubrange_t* vGPR2Subrange[PPC_REC_MAX_VIRTUAL_GPR]; + for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) + { + vGPR2Subrange[i] = imlSegment->raInfo.linkedList_perVirtualGPR[i]; +#ifdef CEMU_DEBUG_ASSERT + if (vGPR2Subrange[i] && vGPR2Subrange[i]->link_sameVirtualRegisterGPR.next != nullptr) + assert_dbg(); +#endif + } + // parse instructions and convert to locations + sint32 index = 0; + PPCImlOptimizerUsedRegisters_t gprTracking; + while (index < imlSegment->imlListCount) + { + // end loop at suffix instruction + if (PPCRecompiler_isSuffixInstruction(imlSegment->imlList + index)) + break; + // get accessed GPRs + PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + index, &gprTracking); + // handle accessed GPR + for (sint32 t = 0; t < 4; t++) + { + sint32 virtualRegister = gprTracking.gpr[t]; + if (virtualRegister < 0) + continue; + bool isWrite = (t == 3); + // add location + PPCRecRA_updateOrAddSubrangeLocation(vGPR2Subrange[virtualRegister], index, isWrite == false, isWrite); +#ifdef CEMU_DEBUG_ASSERT + if (index < vGPR2Subrange[virtualRegister]->start.index) + assert_dbg(); + if (index+1 > vGPR2Subrange[virtualRegister]->end.index) + assert_dbg(); +#endif + } + // next instruction + index++; + } +} + +void PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 vGPR) +{ + if (_isRangeDefined(imlSegment, vGPR) == false) + { + imlSegment->raDistances.reg[vGPR].usageStart = RA_INTER_RANGE_END; + imlSegment->raDistances.reg[vGPR].usageEnd = RA_INTER_RANGE_END; + return; + } + imlSegment->raDistances.reg[vGPR].usageEnd = RA_INTER_RANGE_END; +} + +void PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 vGPR) +{ + if (_isRangeDefined(imlSegment, vGPR) == false) + { + imlSegment->raDistances.reg[vGPR].usageStart = RA_INTER_RANGE_START; + imlSegment->raDistances.reg[vGPR].usageEnd = RA_INTER_RANGE_START; + } + else + { + imlSegment->raDistances.reg[vGPR].usageStart = RA_INTER_RANGE_START; + } + // propagate backwards + for (auto& it : imlSegment->list_prevSegments) + { + PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext, it, vGPR); + } +} + +void _PPCRecRA_connectRanges(ppcImlGenContext_t* ppcImlGenContext, sint32 vGPR, PPCRecImlSegment_t** route, sint32 routeDepth) +{ +#ifdef CEMU_DEBUG_ASSERT + if (routeDepth < 2) + assert_dbg(); +#endif + // extend starting range to end of segment + PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext, route[0], vGPR); + // extend all the connecting segments in both directions + for (sint32 i = 1; i < (routeDepth - 1); i++) + { + PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext, route[i], vGPR); + PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, route[i], vGPR); + } + // extend the final segment towards the beginning + PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, route[routeDepth-1], vGPR); +} + +void _PPCRecRA_checkAndTryExtendRange(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* currentSegment, sint32 vGPR, sint32 distanceLeft, PPCRecImlSegment_t** route, sint32 routeDepth) +{ + if (routeDepth >= 64) + { + cemuLog_logDebug(LogType::Force, "Recompiler RA route maximum depth exceeded for function 0x{:08x}", ppcImlGenContext->functionRef->ppcAddress); + return; + } + route[routeDepth] = currentSegment; + if (currentSegment->raDistances.reg[vGPR].usageStart == INT_MAX) + { + // measure distance to end of segment + distanceLeft -= currentSegment->imlListCount; + if (distanceLeft > 0) + { + if (currentSegment->nextSegmentBranchNotTaken) + _PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchNotTaken, vGPR, distanceLeft, route, routeDepth + 1); + if (currentSegment->nextSegmentBranchTaken) + _PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchTaken, vGPR, distanceLeft, route, routeDepth + 1); + } + return; + } + else + { + // measure distance to range + if (currentSegment->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_END) + { + if (distanceLeft < currentSegment->imlListCount) + return; // range too far away + } + else if (currentSegment->raDistances.reg[vGPR].usageStart != RA_INTER_RANGE_START && currentSegment->raDistances.reg[vGPR].usageStart > distanceLeft) + return; // out of range + // found close range -> connect ranges + _PPCRecRA_connectRanges(ppcImlGenContext, vGPR, route, routeDepth + 1); + } +} + +void PPCRecRA_checkAndTryExtendRange(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* currentSegment, sint32 vGPR) +{ +#ifdef CEMU_DEBUG_ASSERT + if (currentSegment->raDistances.reg[vGPR].usageEnd < 0) + assert_dbg(); +#endif + // count instructions to end of initial segment + if (currentSegment->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_START) + assert_dbg(); + sint32 instructionsUntilEndOfSeg; + if (currentSegment->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_END) + instructionsUntilEndOfSeg = 0; + else + instructionsUntilEndOfSeg = currentSegment->imlListCount - currentSegment->raDistances.reg[vGPR].usageEnd; + +#ifdef CEMU_DEBUG_ASSERT + if (instructionsUntilEndOfSeg < 0) + assert_dbg(); +#endif + sint32 remainingScanDist = 45 - instructionsUntilEndOfSeg; + if (remainingScanDist <= 0) + return; // can't reach end + + // also dont forget: Extending is easier if we allow 'non symetric' branches. E.g. register range one enters one branch + PPCRecImlSegment_t* route[64]; + route[0] = currentSegment; + if (currentSegment->nextSegmentBranchNotTaken) + { + _PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchNotTaken, vGPR, remainingScanDist, route, 1); + } + if (currentSegment->nextSegmentBranchTaken) + { + _PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchTaken, vGPR, remainingScanDist, route, 1); + } +} + +void PPCRecRA_mergeCloseRangesForSegmentV2(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) // todo: Use dynamic maximum or list of used vGPRs so we can avoid parsing empty entries + { + if(imlSegment->raDistances.reg[i].usageStart == INT_MAX) + continue; // not used + // check and extend if possible + PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, imlSegment, i); + } +#ifdef CEMU_DEBUG_ASSERT + if (imlSegment->list_prevSegments.empty() == false && imlSegment->isEnterable) + assert_dbg(); + if ((imlSegment->nextSegmentBranchNotTaken != nullptr || imlSegment->nextSegmentBranchTaken != nullptr) && imlSegment->nextSegmentIsUncertain) + assert_dbg(); +#endif +} + +void PPCRecRA_followFlowAndExtendRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment) +{ + std::vector list_segments; + list_segments.reserve(1000); + sint32 index = 0; + imlSegment->raRangeExtendProcessed = true; + list_segments.push_back(imlSegment); + while (index < list_segments.size()) + { + PPCRecImlSegment_t* currentSegment = list_segments[index]; + PPCRecRA_mergeCloseRangesForSegmentV2(ppcImlGenContext, currentSegment); + // follow flow + if (currentSegment->nextSegmentBranchNotTaken && currentSegment->nextSegmentBranchNotTaken->raRangeExtendProcessed == false) + { + currentSegment->nextSegmentBranchNotTaken->raRangeExtendProcessed = true; + list_segments.push_back(currentSegment->nextSegmentBranchNotTaken); + } + if (currentSegment->nextSegmentBranchTaken && currentSegment->nextSegmentBranchTaken->raRangeExtendProcessed == false) + { + currentSegment->nextSegmentBranchTaken->raRangeExtendProcessed = true; + list_segments.push_back(currentSegment->nextSegmentBranchTaken); + } + index++; + } +} + +void PPCRecRA_mergeCloseRangesV2(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + if (imlSegment->list_prevSegments.empty()) + { + if (imlSegment->raRangeExtendProcessed) + assert_dbg(); // should not happen + PPCRecRA_followFlowAndExtendRanges(ppcImlGenContext, imlSegment); + } + } +} + +void PPCRecRA_extendRangesOutOfLoopsV2(ppcImlGenContext_t* ppcImlGenContext) +{ + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + auto localLoopDepth = imlSegment->loopDepth; + if( localLoopDepth <= 0 ) + continue; // not inside a loop + // look for loop exit + bool hasLoopExit = false; + if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->loopDepth < localLoopDepth) + { + hasLoopExit = true; + } + if (imlSegment->nextSegmentBranchNotTaken && imlSegment->nextSegmentBranchNotTaken->loopDepth < localLoopDepth) + { + hasLoopExit = true; + } + if(hasLoopExit == false) + continue; + + // extend looping ranges into all exits (this allows the data flow analyzer to move stores out of the loop) + for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) // todo: Use dynamic maximum or list of used vGPRs so we can avoid parsing empty entries + { + if (imlSegment->raDistances.reg[i].usageEnd != RA_INTER_RANGE_END) + continue; // range not set or does not reach end of segment + if(imlSegment->nextSegmentBranchTaken) + PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, i); + if(imlSegment->nextSegmentBranchNotTaken) + PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, i); + } + } +} + +void PPCRecRA_processFlowAndCalculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext) +{ + // merge close ranges + PPCRecRA_mergeCloseRangesV2(ppcImlGenContext); + // extra pass to move register stores out of loops + PPCRecRA_extendRangesOutOfLoopsV2(ppcImlGenContext); + // calculate liveness ranges + for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + PPCRecRA_createSegmentLivenessRanges(ppcImlGenContext, imlSegment); + } +} + +void PPCRecRA_analyzeSubrangeDataDependencyV2(raLivenessSubrange_t* subrange) +{ + bool isRead = false; + bool isWritten = false; + bool isOverwritten = false; + for (auto& location : subrange->list_locations) + { + if (location.isRead) + { + isRead = true; + } + if (location.isWrite) + { + if (isRead == false) + isOverwritten = true; + isWritten = true; + } + } + subrange->_noLoad = isOverwritten; + subrange->hasStore = isWritten; + + if (subrange->start.index == RA_INTER_RANGE_START) + subrange->_noLoad = true; +} + +void _analyzeRangeDataFlow(raLivenessSubrange_t* subrange); + +void PPCRecRA_analyzeRangeDataFlowV2(ppcImlGenContext_t* ppcImlGenContext) +{ + // this function is called after _assignRegisters(), which means that all ranges are already final and wont change anymore + // first do a per-subrange pass + for (auto& range : ppcImlGenContext->raInfo.list_ranges) + { + for (auto& subrange : range->list_subranges) + { + PPCRecRA_analyzeSubrangeDataDependencyV2(subrange); + } + } + // then do a second pass where we scan along subrange flow + for (auto& range : ppcImlGenContext->raInfo.list_ranges) + { + for (auto& subrange : range->list_subranges) // todo - traversing this backwards should be faster and yield better results due to the nature of the algorithm + { + _analyzeRangeDataFlow(subrange); + } + } +} diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIntermediate.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIntermediate.cpp index 468af5b2..fcbe64be 100644 --- a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIntermediate.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIntermediate.cpp @@ -1,26 +1,173 @@ #include "PPCRecompiler.h" #include "PPCRecompilerIml.h" +PPCRecImlSegment_t* PPCRecompiler_getSegmentByPPCJumpAddress(ppcImlGenContext_t* ppcImlGenContext, uint32 ppcOffset) +{ + for(sint32 s=0; ssegmentListCount; s++) + { + if( ppcImlGenContext->segmentList[s]->isJumpDestination && ppcImlGenContext->segmentList[s]->jumpDestinationPPCAddress == ppcOffset ) + { + return ppcImlGenContext->segmentList[s]; + } + } + debug_printf("PPCRecompiler_getSegmentByPPCJumpAddress(): Unable to find segment (ppcOffset 0x%08x)\n", ppcOffset); + return NULL; +} + +void PPCRecompilerIml_setLinkBranchNotTaken(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst) +{ + // make sure segments aren't already linked + if (imlSegmentSrc->nextSegmentBranchNotTaken == imlSegmentDst) + return; + // add as next segment for source + if (imlSegmentSrc->nextSegmentBranchNotTaken != NULL) + assert_dbg(); + imlSegmentSrc->nextSegmentBranchNotTaken = imlSegmentDst; + // add as previous segment for destination + imlSegmentDst->list_prevSegments.push_back(imlSegmentSrc); +} + +void PPCRecompilerIml_setLinkBranchTaken(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst) +{ + // make sure segments aren't already linked + if (imlSegmentSrc->nextSegmentBranchTaken == imlSegmentDst) + return; + // add as next segment for source + if (imlSegmentSrc->nextSegmentBranchTaken != NULL) + assert_dbg(); + imlSegmentSrc->nextSegmentBranchTaken = imlSegmentDst; + // add as previous segment for destination + imlSegmentDst->list_prevSegments.push_back(imlSegmentSrc); +} + +void PPCRecompilerIML_removeLink(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst) +{ + if (imlSegmentSrc->nextSegmentBranchNotTaken == imlSegmentDst) + { + imlSegmentSrc->nextSegmentBranchNotTaken = NULL; + } + else if (imlSegmentSrc->nextSegmentBranchTaken == imlSegmentDst) + { + imlSegmentSrc->nextSegmentBranchTaken = NULL; + } + else + assert_dbg(); + + bool matchFound = false; + for (sint32 i = 0; i < imlSegmentDst->list_prevSegments.size(); i++) + { + if (imlSegmentDst->list_prevSegments[i] == imlSegmentSrc) + { + imlSegmentDst->list_prevSegments.erase(imlSegmentDst->list_prevSegments.begin()+i); + matchFound = true; + break; + } + } + if (matchFound == false) + assert_dbg(); +} + +/* + * Replaces all links to segment orig with linkts to segment new + */ +void PPCRecompilerIML_relinkInputSegment(PPCRecImlSegment_t* imlSegmentOrig, PPCRecImlSegment_t* imlSegmentNew) +{ + while (imlSegmentOrig->list_prevSegments.size() != 0) + { + PPCRecImlSegment_t* prevSegment = imlSegmentOrig->list_prevSegments[0]; + if (prevSegment->nextSegmentBranchNotTaken == imlSegmentOrig) + { + PPCRecompilerIML_removeLink(prevSegment, imlSegmentOrig); + PPCRecompilerIml_setLinkBranchNotTaken(prevSegment, imlSegmentNew); + } + else if (prevSegment->nextSegmentBranchTaken == imlSegmentOrig) + { + PPCRecompilerIML_removeLink(prevSegment, imlSegmentOrig); + PPCRecompilerIml_setLinkBranchTaken(prevSegment, imlSegmentNew); + } + else + { + assert_dbg(); + } + } +} + +void PPCRecompilerIML_linkSegments(ppcImlGenContext_t* ppcImlGenContext) +{ + for(sint32 s=0; ssegmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + + bool isLastSegment = (s+1)>=ppcImlGenContext->segmentListCount; + PPCRecImlSegment_t* nextSegment = isLastSegment?NULL:ppcImlGenContext->segmentList[s+1]; + // handle empty segment + if( imlSegment->imlListCount == 0 ) + { + if (isLastSegment == false) + PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, ppcImlGenContext->segmentList[s+1]); // continue execution to next segment + else + imlSegment->nextSegmentIsUncertain = true; + continue; + } + // check last instruction of segment + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+(imlSegment->imlListCount-1); + if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK ) + { + // find destination segment by ppc jump address + PPCRecImlSegment_t* jumpDestSegment = PPCRecompiler_getSegmentByPPCJumpAddress(ppcImlGenContext, imlInstruction->op_conditionalJump.jumpmarkAddress); + if( jumpDestSegment ) + { + if (imlInstruction->op_conditionalJump.condition != PPCREC_JUMP_CONDITION_NONE) + PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, nextSegment); + PPCRecompilerIml_setLinkBranchTaken(imlSegment, jumpDestSegment); + } + else + { + imlSegment->nextSegmentIsUncertain = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO ) + { + // currently we assume that the next segment is unknown for all macros + imlSegment->nextSegmentIsUncertain = true; + } + else + { + // all other instruction types do not branch + //imlSegment->nextSegment[0] = nextSegment; + PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, nextSegment); + //imlSegment->nextSegmentIsUncertain = true; + } + } +} + void PPCRecompilerIML_isolateEnterableSegments(ppcImlGenContext_t* ppcImlGenContext) { - size_t initialSegmentCount = ppcImlGenContext->segmentList2.size(); - for (size_t i = 0; i < initialSegmentCount; i++) + sint32 initialSegmentCount = ppcImlGenContext->segmentListCount; + for (sint32 i = 0; i < ppcImlGenContext->segmentListCount; i++) { - IMLSegment* imlSegment = ppcImlGenContext->segmentList2[i]; + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[i]; if (imlSegment->list_prevSegments.empty() == false && imlSegment->isEnterable) { // spawn new segment at end - PPCRecompilerIml_insertSegments(ppcImlGenContext, ppcImlGenContext->segmentList2.size(), 1); - IMLSegment* entrySegment = ppcImlGenContext->segmentList2[ppcImlGenContext->segmentList2.size()-1]; + PPCRecompilerIml_insertSegments(ppcImlGenContext, ppcImlGenContext->segmentListCount, 1); + PPCRecImlSegment_t* entrySegment = ppcImlGenContext->segmentList[ppcImlGenContext->segmentListCount-1]; entrySegment->isEnterable = true; entrySegment->enterPPCAddress = imlSegment->enterPPCAddress; // create jump instruction PPCRecompiler_pushBackIMLInstructions(entrySegment, 0, 1); - entrySegment->imlList.data()[0].make_jump(); - IMLSegment_SetLinkBranchTaken(entrySegment, imlSegment); + PPCRecompilerImlGen_generateNewInstruction_jumpSegment(ppcImlGenContext, entrySegment->imlList + 0); + PPCRecompilerIml_setLinkBranchTaken(entrySegment, imlSegment); // remove enterable flag from original segment imlSegment->isEnterable = false; imlSegment->enterPPCAddress = 0; } } -} \ No newline at end of file +} + +PPCRecImlInstruction_t* PPCRecompilerIML_getLastInstruction(PPCRecImlSegment_t* imlSegment) +{ + if (imlSegment->imlListCount == 0) + return nullptr; + return imlSegment->imlList + (imlSegment->imlListCount - 1); +} diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.cpp new file mode 100644 index 00000000..97b2c14c --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.cpp @@ -0,0 +1,2687 @@ +#include "Cafe/HW/Espresso/PPCState.h" +#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h" +#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterHelper.h" +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" +#include "Cafe/OS/libs/coreinit/coreinit_Time.h" +#include "util/MemMapper/MemMapper.h" +#include "Common/cpu_features.h" + +sint32 x64Gen_registerMap[12] = // virtual GPR to x64 register mapping +{ + REG_RAX, REG_RDX, REG_RBX, REG_RBP, REG_RSI, REG_RDI, REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_RCX +}; + +/* +* Remember current instruction output offset for reloc +* The instruction generated after this method has been called will be adjusted +*/ +void PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext_t* x64GenContext, uint8 type, void* extraInfo = nullptr) +{ + if( x64GenContext->relocateOffsetTableCount >= x64GenContext->relocateOffsetTableSize ) + { + x64GenContext->relocateOffsetTableSize = std::max(4, x64GenContext->relocateOffsetTableSize*2); + x64GenContext->relocateOffsetTable = (x64RelocEntry_t*)realloc(x64GenContext->relocateOffsetTable, sizeof(x64RelocEntry_t)*x64GenContext->relocateOffsetTableSize); + } + x64GenContext->relocateOffsetTable[x64GenContext->relocateOffsetTableCount].offset = x64GenContext->codeBufferIndex; + x64GenContext->relocateOffsetTable[x64GenContext->relocateOffsetTableCount].type = type; + x64GenContext->relocateOffsetTable[x64GenContext->relocateOffsetTableCount].extraInfo = extraInfo; + x64GenContext->relocateOffsetTableCount++; +} + +/* +* Overwrites the currently cached (in x64 cf) cr* register +* Should be called before each x64 instruction which overwrites the current status flags (with mappedCRRegister set to PPCREC_CR_TEMPORARY unless explicitly set by PPC instruction) +*/ +void PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, sint32 mappedCRRegister, sint32 crState) +{ + x64GenContext->activeCRRegister = mappedCRRegister; + x64GenContext->activeCRState = crState; +} + +/* +* Reset cached cr* register without storing it first +*/ +void PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext) +{ + x64GenContext->activeCRRegister = PPC_REC_INVALID_REGISTER; +} + +void PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext_t* x64GenContext, sint32 jumpInstructionOffset, sint32 destinationOffset) +{ + uint8* instructionData = x64GenContext->codeBuffer + jumpInstructionOffset; + if (instructionData[0] == 0x0F && (instructionData[1] >= 0x80 && instructionData[1] <= 0x8F)) + { + // far conditional jump + *(uint32*)(instructionData + 2) = (destinationOffset - (jumpInstructionOffset + 6)); + } + else if (instructionData[0] >= 0x70 && instructionData[0] <= 0x7F) + { + // short conditional jump + sint32 distance = (sint32)((destinationOffset - (jumpInstructionOffset + 2))); + cemu_assert_debug(distance >= -128 && distance <= 127); + *(uint8*)(instructionData + 1) = (uint8)distance; + } + else if (instructionData[0] == 0xE9) + { + *(uint32*)(instructionData + 1) = (destinationOffset - (jumpInstructionOffset + 5)); + } + else if (instructionData[0] == 0xEB) + { + sint32 distance = (sint32)((destinationOffset - (jumpInstructionOffset + 2))); + cemu_assert_debug(distance >= -128 && distance <= 127); + *(uint8*)(instructionData + 1) = (uint8)distance; + } + else + { + assert_dbg(); + } +} + +void PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + sint32 crRegister = imlInstruction->crRegister; + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); // check for sign instead of _BELOW (CF) which is not set by TEST + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + // todo: Set CR SO if XER SO bit is set + PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, crRegister, PPCREC_CR_STATE_TYPE_LOGICAL); +} + +void* ATTR_MS_ABI PPCRecompiler_virtualHLE(PPCInterpreter_t* hCPU, uint32 hleFuncId) +{ + void* prevRSPTemp = hCPU->rspTemp; + if( hleFuncId == 0xFFD0 ) + { + hCPU->remainingCycles -= 500; // let subtract about 500 cycles for each HLE call + hCPU->gpr[3] = 0; + PPCInterpreter_nextInstruction(hCPU); + return hCPU; + } + else + { + auto hleCall = PPCInterpreter_getHLECall(hleFuncId); + cemu_assert(hleCall != nullptr); + hleCall(hCPU); + } + hCPU->rspTemp = prevRSPTemp; + return PPCInterpreter_getCurrentInstance(); +} + +void ATTR_MS_ABI PPCRecompiler_getTBL(PPCInterpreter_t* hCPU, uint32 gprIndex) +{ + uint64 coreTime = coreinit::OSGetSystemTime(); + hCPU->gpr[gprIndex] = (uint32)(coreTime&0xFFFFFFFF); +} + +void ATTR_MS_ABI PPCRecompiler_getTBU(PPCInterpreter_t* hCPU, uint32 gprIndex) +{ + uint64 coreTime = coreinit::OSGetSystemTime(); + hCPU->gpr[gprIndex] = (uint32)((coreTime>>32)&0xFFFFFFFF); +} + +bool PPCRecompilerX64Gen_imlInstruction_macro(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BLRL ) + { + uint32 currentInstructionAddress = imlInstruction->op_macro.param; + // MOV EDX, [SPR_LR] + x64Emit_mov_reg64_mem32(x64GenContext, REG_RDX, REG_RSP, offsetof(PPCInterpreter_t, spr.LR)); + // if BLRL, then update SPR LR + if (imlInstruction->operation == PPCREC_IML_MACRO_BLRL) + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), currentInstructionAddress + 4); + // JMP [offset+RDX*(8/4)+R15] + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA4); + x64Gen_writeU8(x64GenContext, 0x57); + x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_BCTR || imlInstruction->operation == PPCREC_IML_MACRO_BCTRL ) + { + uint32 currentInstructionAddress = imlInstruction->op_macro.param; + // MOV EDX, [SPR_CTR] + x64Emit_mov_reg64_mem32(x64GenContext, REG_RDX, REG_RSP, offsetof(PPCInterpreter_t, spr.CTR)); + // if BCTRL, then update SPR LR + if (imlInstruction->operation == PPCREC_IML_MACRO_BCTRL) + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), currentInstructionAddress + 4); + // JMP [offset+RDX*(8/4)+R15] + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA4); + x64Gen_writeU8(x64GenContext, 0x57); + x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_BL ) + { + // MOV DWORD [SPR_LinkRegister], newLR + uint32 newLR = imlInstruction->op_macro.param + 4; + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), newLR); + // remember new instruction pointer in RDX + uint32 newIP = imlInstruction->op_macro.param2; + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, newIP); + // since RDX is constant we can use JMP [R15+const_offset] if jumpTableOffset+RDX*2 does not exceed the 2GB boundary + uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; + if (lookupOffset >= 0x80000000ULL) + { + // JMP [offset+RDX*(8/4)+R15] + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA4); + x64Gen_writeU8(x64GenContext, 0x57); + x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); + } + else + { + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA7); + x64Gen_writeU32(x64GenContext, (uint32)lookupOffset); + } + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_B_FAR ) + { + // remember new instruction pointer in RDX + uint32 newIP = imlInstruction->op_macro.param2; + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, newIP); + // Since RDX is constant we can use JMP [R15+const_offset] if jumpTableOffset+RDX*2 does not exceed the 2GB boundary + uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; + if (lookupOffset >= 0x80000000ULL) + { + // JMP [offset+RDX*(8/4)+R15] + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA4); + x64Gen_writeU8(x64GenContext, 0x57); + x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); + } + else + { + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA7); + x64Gen_writeU32(x64GenContext, (uint32)lookupOffset); + } + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_LEAVE ) + { + uint32 currentInstructionAddress = imlInstruction->op_macro.param; + // remember PC value in REG_EDX + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, currentInstructionAddress); + + uint32 newIP = 0; // special value for recompiler exit + uint64 lookupOffset = (uint64)&(((PPCRecompilerInstanceData_t*)NULL)->ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL; + // JMP [R15+offset] + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0xFF); + x64Gen_writeU8(x64GenContext, 0xA7); + x64Gen_writeU32(x64GenContext, (uint32)lookupOffset); + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK ) + { + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, imlInstruction->op_macro.param2); + x64Gen_int3(x64GenContext); + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES ) + { + uint32 cycleCount = imlInstruction->op_macro.param; + x64Gen_sub_mem32reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), cycleCount); + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_HLE ) + { + uint32 ppcAddress = imlInstruction->op_macro.param; + uint32 funcId = imlInstruction->op_macro.param2; + //x64Gen_int3(x64GenContext); + // update instruction pointer + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer), ppcAddress); + //// save hCPU (RSP) + //x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)&ppcRecompilerX64_hCPUTemp); + //x64Emit_mov_mem64_reg64(x64GenContext, REG_RESV_TEMP, 0, REG_RSP); + // set parameters + x64Gen_mov_reg64_reg64(x64GenContext, REG_RCX, REG_RSP); + x64Gen_mov_reg64_imm64(x64GenContext, REG_RDX, funcId); + // restore stackpointer from executionContext/hCPU->rspTemp + x64Emit_mov_reg64_mem64(x64GenContext, REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp)); + //x64Emit_mov_reg64_mem64(x64GenContext, REG_RSP, REG_R14, 0); + //x64Gen_int3(x64GenContext); + // reserve space on stack for call parameters + x64Gen_sub_reg64_imm32(x64GenContext, REG_RSP, 8*11); // must be uneven number in order to retain stack 0x10 alignment + x64Gen_mov_reg64_imm64(x64GenContext, REG_RBP, 0); + // call HLE function + x64Gen_mov_reg64_imm64(x64GenContext, REG_RAX, (uint64)PPCRecompiler_virtualHLE); + x64Gen_call_reg64(x64GenContext, REG_RAX); + // restore RSP to hCPU (from RAX, result of PPCRecompiler_virtualHLE) + //x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)&ppcRecompilerX64_hCPUTemp); + //x64Emit_mov_reg64_mem64Reg64(x64GenContext, REG_RSP, REG_RESV_TEMP, 0); + x64Gen_mov_reg64_reg64(x64GenContext, REG_RSP, REG_RAX); + // MOV R15, ppcRecompilerInstanceData + x64Gen_mov_reg64_imm64(x64GenContext, REG_R15, (uint64)ppcRecompilerInstanceData); + // MOV R13, memory_base + x64Gen_mov_reg64_imm64(x64GenContext, REG_R13, (uint64)memory_base); + // check if cycles where decreased beyond zero, if yes -> leave recompiler + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), 31); // check if negative + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NOT_CARRY, 0); + //x64Gen_int3(x64GenContext); + //x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, ppcAddress); + + x64Emit_mov_reg64_mem32(x64GenContext, REG_RDX, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer)); + // set EAX to 0 (we assume that ppcRecompilerDirectJumpTable[0] will be a recompiler escape function) + x64Gen_xor_reg32_reg32(x64GenContext, REG_RAX, REG_RAX); + // ADD RAX, R15 (R15 -> Pointer to ppcRecompilerInstanceData + x64Gen_add_reg64_reg64(x64GenContext, REG_RAX, REG_R15); + //// JMP [recompilerCallTable+EAX/4*8] + //x64Gen_int3(x64GenContext); + x64Gen_jmp_memReg64(x64GenContext, REG_RAX, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + // check if instruction pointer was changed + // assign new instruction pointer to EAX + x64Emit_mov_reg64_mem32(x64GenContext, REG_RAX, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer)); + // remember instruction pointer in REG_EDX + x64Gen_mov_reg64_reg64(x64GenContext, REG_RDX, REG_RAX); + // EAX *= 2 + x64Gen_add_reg64_reg64(x64GenContext, REG_RAX, REG_RAX); + // ADD RAX, R15 (R15 -> Pointer to ppcRecompilerInstanceData + x64Gen_add_reg64_reg64(x64GenContext, REG_RAX, REG_R15); + // JMP [ppcRecompilerDirectJumpTable+RAX/4*8] + x64Gen_jmp_memReg64(x64GenContext, REG_RAX, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable)); + return true; + } + else if( imlInstruction->operation == PPCREC_IML_MACRO_MFTB ) + { + uint32 ppcAddress = imlInstruction->op_macro.param; + uint32 sprId = imlInstruction->op_macro.param2&0xFFFF; + uint32 gprIndex = (imlInstruction->op_macro.param2>>16)&0x1F; + // update instruction pointer + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer), ppcAddress); + // set parameters + x64Gen_mov_reg64_reg64(x64GenContext, REG_RCX, REG_RSP); + x64Gen_mov_reg64_imm64(x64GenContext, REG_RDX, gprIndex); + // restore stackpointer to original RSP + x64Emit_mov_reg64_mem64(x64GenContext, REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp)); + // push hCPU on stack + x64Gen_push_reg64(x64GenContext, REG_RCX); + // reserve space on stack for call parameters + x64Gen_sub_reg64_imm32(x64GenContext, REG_RSP, 8*11 + 8); + x64Gen_mov_reg64_imm64(x64GenContext, REG_RBP, 0); + // call HLE function + if( sprId == SPR_TBL ) + x64Gen_mov_reg64_imm64(x64GenContext, REG_RAX, (uint64)PPCRecompiler_getTBL); + else if( sprId == SPR_TBU ) + x64Gen_mov_reg64_imm64(x64GenContext, REG_RAX, (uint64)PPCRecompiler_getTBU); + else + assert_dbg(); + x64Gen_call_reg64(x64GenContext, REG_RAX); + // restore hCPU from stack + x64Gen_add_reg64_imm32(x64GenContext, REG_RSP, 8 * 11 + 8); + x64Gen_pop_reg64(x64GenContext, REG_RSP); + // MOV R15, ppcRecompilerInstanceData + x64Gen_mov_reg64_imm64(x64GenContext, REG_R15, (uint64)ppcRecompilerInstanceData); + // MOV R13, memory_base + x64Gen_mov_reg64_imm64(x64GenContext, REG_R13, (uint64)memory_base); + return true; + } + else + { + debug_printf("Unknown recompiler macro operation %d\n", imlInstruction->operation); + assert_dbg(); + } + return false; +} + +/* +* Load from memory +*/ +bool PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed) +{ + sint32 realRegisterData = tempToRealRegister(imlInstruction->op_storeLoad.registerData); + sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem); + sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER; + if( indexed ) + realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2); + if( false )//imlInstruction->op_storeLoad.flags & PPCREC_IML_OP_FLAG_FASTMEMACCESS ) + { + // load u8/u16/u32 via direct memory access + optional sign extend + assert_dbg(); // todo + } + else + { + if( indexed && realRegisterMem == realRegisterMem2 ) + { + return false; + } + if( indexed && realRegisterData == realRegisterMem2 ) + { + // for indexed memory access realRegisterData must not be the same register as the second memory register, + // this can easily be fixed by swapping the logic of realRegisterMem and realRegisterMem2 + sint32 temp = realRegisterMem; + realRegisterMem = realRegisterMem2; + realRegisterMem2 = temp; + } + + bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend; + bool switchEndian = imlInstruction->op_storeLoad.flags2.swapEndian; + if( imlInstruction->op_storeLoad.copyWidth == 32 ) + { + //if( indexed ) + // PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if (indexed) + { + x64Gen_lea_reg64Low32_reg64Low32PlusReg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem, realRegisterMem2); + } + if( g_CPUFeatures.x86.movbe && switchEndian ) + { + if (indexed) + { + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); + //if (indexed && realRegisterMem != realRegisterData) + // x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else + { + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + } + } + else + { + if (indexed) + { + x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); + //if (realRegisterMem != realRegisterData) + // x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + if (switchEndian) + x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData); + } + else + { + x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if (switchEndian) + x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData); + } + } + } + else if( imlInstruction->op_storeLoad.copyWidth == 16 ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); // todo: We can avoid this if MOVBE is available + if (indexed) + { + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + if( g_CPUFeatures.x86.movbe && switchEndian ) + { + x64Gen_movBEZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if( indexed && realRegisterMem != realRegisterData ) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else + { + x64Gen_movZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if( indexed && realRegisterMem != realRegisterData ) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + if( switchEndian ) + x64Gen_rol_reg64Low16_imm8(x64GenContext, realRegisterData, 8); + } + if( signExtend ) + x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, realRegisterData, realRegisterData); + else + x64Gen_movZeroExtend_reg64Low32_reg64Low16(x64GenContext, realRegisterData, realRegisterData); + } + else if( imlInstruction->op_storeLoad.copyWidth == 8 ) + { + if( indexed ) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + // todo: Optimize by using only MOVZX/MOVSX + if( indexed ) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + // todo: Use sign extend move from memory instead of separate sign-extend? + if( signExtend ) + x64Gen_movSignExtend_reg64Low32_mem8Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + else + x64Emit_movZX_reg32_mem8(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if( indexed && realRegisterMem != realRegisterData ) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else if( imlInstruction->op_storeLoad.copyWidth == PPC_REC_LOAD_LWARX_MARKER ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->op_storeLoad.immS32 != 0 ) + assert_dbg(); // not supported + if( indexed ) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, reservedMemAddr), realRegisterMem); // remember EA for reservation + x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if( indexed && realRegisterMem != realRegisterData ) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + if( switchEndian ) + x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData); + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, reservedMemValue), realRegisterData); // remember value for reservation + // LWARX instruction costs extra cycles (this speeds up busy loops) + x64Gen_sub_mem32reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), 20); + } + else if( imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_LSWI_3 ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( switchEndian == false ) + assert_dbg(); + if( indexed ) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); // can be replaced with LEA temp, [memReg1+memReg2] (this way we can avoid the SUB instruction after the move) + if( g_CPUFeatures.x86.movbe ) + { + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if( indexed && realRegisterMem != realRegisterData ) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else + { + x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32); + if( indexed && realRegisterMem != realRegisterData ) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData); + } + x64Gen_and_reg64Low32_imm32(x64GenContext, realRegisterData, 0xFFFFFF00); + } + else + return false; + return true; + } + return false; +} + +/* +* Write to memory +*/ +bool PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed) +{ + sint32 realRegisterData = tempToRealRegister(imlInstruction->op_storeLoad.registerData); + sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem); + sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER; + if (indexed) + realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2); + + if (false)//imlInstruction->op_storeLoad.flags & PPCREC_IML_OP_FLAG_FASTMEMACCESS ) + { + // load u8/u16/u32 via direct memory access + optional sign extend + assert_dbg(); // todo + } + else + { + if (indexed && realRegisterMem == realRegisterMem2) + { + return false; + } + if (indexed && realRegisterData == realRegisterMem2) + { + // for indexed memory access realRegisterData must not be the same register as the second memory register, + // this can easily be fixed by swapping the logic of realRegisterMem and realRegisterMem2 + sint32 temp = realRegisterMem; + realRegisterMem = realRegisterMem2; + realRegisterMem2 = temp; + } + + bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend; + bool swapEndian = imlInstruction->op_storeLoad.flags2.swapEndian; + if (imlInstruction->op_storeLoad.copyWidth == 32) + { + if (indexed) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + uint32 valueRegister; + if ((swapEndian == false || g_CPUFeatures.x86.movbe) && realRegisterMem != realRegisterData) + { + valueRegister = realRegisterData; + } + else + { + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); + valueRegister = REG_RESV_TEMP; + } + if (g_CPUFeatures.x86.movbe == false && swapEndian) + x64Gen_bswap_reg64Lower32bit(x64GenContext, valueRegister); + if (indexed) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + if (g_CPUFeatures.x86.movbe && swapEndian) + x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, valueRegister); + else + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, valueRegister); + if (indexed) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else if (imlInstruction->op_storeLoad.copyWidth == 16) + { + if (indexed || swapEndian) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); + if (swapEndian) + x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); + if (indexed) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); + if (indexed) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + // todo: Optimize this, e.g. by using MOVBE + } + else if (imlInstruction->op_storeLoad.copyWidth == 8) + { + if (indexed) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if (indexed && realRegisterMem == realRegisterData) + { + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); + realRegisterData = REG_RESV_TEMP; + } + if (indexed) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, realRegisterData); + if (indexed) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else if (imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STWCX_MARKER) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if (imlInstruction->op_storeLoad.immS32 != 0) + assert_dbg(); // todo + // reset cr0 LT, GT and EQ + sint32 crRegister = 0; + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_LT), 0); + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_GT), 0); + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_EQ), 0); + // calculate effective address + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); + if (swapEndian) + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + if (indexed) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + // realRegisterMem now holds EA + x64Gen_cmp_reg64Low32_mem32reg64(x64GenContext, realRegisterMem, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemAddr)); + sint32 jumpInstructionOffsetJumpToEnd = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NOT_EQUAL, 0); + // EA matches reservation + // backup EAX (since it's an explicit operand of CMPXCHG and will be overwritten) + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), REG_EAX); + // backup REG_RESV_MEMBASE + x64Emit_mov_mem64_reg64(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[2]), REG_RESV_MEMBASE); + // add mem register to REG_RESV_MEMBASE + x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem); + // load reserved value in EAX + x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemValue)); + // bswap EAX + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_EAX); + + //x64Gen_lock_cmpxchg_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, 0, REG_RESV_TEMP); + x64Gen_lock_cmpxchg_mem32Reg64_reg64(x64GenContext, REG_RESV_MEMBASE, 0, REG_RESV_TEMP); + + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_EQ)); + + // reset reservation + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, reservedMemAddr), 0); + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, reservedMemValue), 0); + + // restore EAX + x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0])); + // restore REG_RESV_MEMBASE + x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_MEMBASE, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[2])); + + // copy XER SO to CR0 SO + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.XER), 31); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RESV_HCPU, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_SO)); + // end + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffsetJumpToEnd, x64GenContext->codeBufferIndex); + } + else if (imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STSWI_2) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 16); // store upper 2 bytes .. + x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); // .. as big-endian + if (indexed) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + + x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); + if (indexed) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else if (imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STSWI_3) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData); + if (indexed) + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 8); + x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32 + 2, REG_RESV_TEMP); + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 8); + x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32 + 1, REG_RESV_TEMP); + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 8); + x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32 + 0, REG_RESV_TEMP); + + if (indexed) + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else + return false; + return true; + } + return false; +} + +/* + * Copy byte/word/dword from memory to memory + */ +void PPCRecompilerX64Gen_imlInstruction_mem2mem(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + sint32 realSrcMemReg = tempToRealRegister(imlInstruction->op_mem2mem.src.registerMem); + sint32 realSrcMemImm = imlInstruction->op_mem2mem.src.immS32; + sint32 realDstMemReg = tempToRealRegister(imlInstruction->op_mem2mem.dst.registerMem); + sint32 realDstMemImm = imlInstruction->op_mem2mem.dst.immS32; + // PPCRecompilerX64Gen_crConditionFlags_forget() is not needed here, since MOVs don't affect eflags + if (imlInstruction->op_mem2mem.copyWidth == 32) + { + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, realSrcMemReg, realSrcMemImm); + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realDstMemReg, realDstMemImm, REG_RESV_TEMP); + } + else + { + assert_dbg(); + } +} + +bool PPCRecompilerX64Gen_imlInstruction_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN) + { + // registerResult = registerA + if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER) + { + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + if (imlInstruction->crMode == PPCREC_CR_MODE_LOGICAL) + { + // since MOV doesn't set eflags we need another test instruction + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult)); + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + else + { + assert_dbg(); + } + } + else + { + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + } + else if (imlInstruction->operation == PPCREC_IML_OP_ENDIAN_SWAP) + { + // registerResult = endianSwap32(registerA) + if (imlInstruction->op_r_r.registerA != imlInstruction->op_r_r.registerResult) + assert_dbg(); + x64Gen_bswap_reg64Lower32bit(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_ADD ) + { + // registerResult += registerA + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32 ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_movSignExtend_reg64Low32_reg64Low8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode == PPCREC_CR_MODE_ARITHMETIC ) + { + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult)); + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + else + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation\n"); + assert_dbg(); + } + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_OR || imlInstruction->operation == PPCREC_IML_OP_AND || imlInstruction->operation == PPCREC_IML_OP_XOR ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->operation == PPCREC_IML_OP_OR ) + { + // registerResult |= registerA + x64Gen_or_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_AND ) + { + // registerResult &= registerA + x64Gen_and_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + else + { + // registerResult ^= registerA + x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_NOT ) + { + // copy register content if different registers + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA ) + { + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + // NOT destination register + x64Gen_not_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult)); + // update cr bits + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + // NOT instruction does not update flags, so we have to generate an additional TEST instruction + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult)); + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_CNTLZW ) + { + // count leading zeros + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + if( g_CPUFeatures.x86.lzcnt ) + { + x64Gen_lzcnt_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + else + { + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerA), tempToRealRegister(imlInstruction->op_r_r.registerA)); + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_bsr_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + x64Gen_neg_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult)); + x64Gen_add_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), 32-1); + sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + x64Gen_mov_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), 32); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED ) + { + // registerA CMP registerB (arithmetic compare) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->crRegister == PPC_REC_INVALID_REGISTER ) + { + return false; // a NO-OP instruction + } + if( imlInstruction->crRegister >= 8 ) + { + return false; + } + // update state of cr register + if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED ) + PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC); + else + PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC); + // create compare instruction + x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + // set cr bits + sint32 crRegister = imlInstruction->crRegister; + if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED ) + { + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_LESS, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + // todo: Also set summary overflow if xer bit is set + } + else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED ) + { + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + // todo: Also set summary overflow if xer bit is set + } + else + assert_dbg(); + } + else if( imlInstruction->operation == PPCREC_IML_OP_NEG ) + { + // copy register content if different registers + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA ) + { + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + // NEG destination register + x64Gen_neg_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult)); + // update cr bits + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + // copy operand to result if different registers + if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA ) + { + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + // copy xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // add carry bit + x64Gen_adc_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), 0); + // update xer carry + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + // set cr bits + sint32 crRegister = imlInstruction->crRegister; + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); // check for sign instead of _BELOW (CF) which is not set by AND/OR + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + // todo: Use different version of PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction) + // todo: Also set summary overflow if xer bit is set + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_ME ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + // copy operand to result if different registers + if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA ) + { + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA)); + } + // copy xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // add carry bit + x64Gen_adc_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), (uint32)-1); + // update xer carry + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + // set cr bits + sint32 crRegister = imlInstruction->crRegister; + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult)); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY ) + { + // registerResult = ~registerOperand1 + carry + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r.registerA); + // copy operand to result register + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + // execute NOT on result + x64Gen_not_reg64Low32(x64GenContext, rRegResult); + // copy xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // add carry + x64Gen_adc_reg64Low32_imm32(x64GenContext, rRegResult, 0); + // update carry + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + // update cr if requested + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode == PPCREC_CR_MODE_LOGICAL ) + { + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + else + { + assert_dbg(); + } + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32 ) + { + // registerResult = (uint32)(sint32)(sint16)registerA + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), reg32ToReg16(tempToRealRegister(imlInstruction->op_r_r.registerA))); + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode == PPCREC_CR_MODE_ARITHMETIC ) + { + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult)); + // set cr bits + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + else + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation\n"); + assert_dbg(); + } + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_DCBZ ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA ) + { + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r.registerA)); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r.registerResult)); + x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, ~0x1F); + x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE); + for(sint32 f=0; f<0x20; f+=8) + x64Gen_mov_mem64Reg64_imm32(x64GenContext, REG_RESV_TEMP, f, 0); + } + else + { + // calculate effective address + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r.registerA)); + x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, ~0x1F); + x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE); + for(sint32 f=0; f<0x20; f+=8) + x64Gen_mov_mem64Reg64_imm32(x64GenContext, REG_RESV_TEMP, f, 0); + } + } + else + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation 0x%x\n", imlInstruction->operation); + return false; + } + return true; +} + +bool PPCRecompilerX64Gen_imlInstruction_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN ) + { + // registerResult = immS32 + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_mov_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_ADD ) + { + // registerResult += immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_add_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_SUB ) + { + // registerResult -= immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if (imlInstruction->crRegister == PPCREC_CR_REG_TEMP) + { + // do nothing -> SUB is for BDNZ instruction + } + else if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + // update cr register + assert_dbg(); + } + x64Gen_sub_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_AND ) + { + // registerResult &= immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_and_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32); + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + // set cr bits + sint32 crRegister = imlInstruction->crRegister; + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + // todo: Set CR SO if XER SO bit is set + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_OR ) + { + // registerResult |= immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_or_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_XOR ) + { + // registerResult ^= immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_xor_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE ) + { + // registerResult <<<= immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + if( (imlInstruction->op_r_immS32.immS32&0x80) ) + assert_dbg(); // should not happen + x64Gen_rol_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint8)imlInstruction->op_r_immS32.immS32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED ) + { + // registerResult CMP immS32 (arithmetic compare) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->crRegister == PPC_REC_INVALID_REGISTER ) + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_s32(): No-Op CMP found\n"); + return true; // a NO-OP instruction + } + if( imlInstruction->crRegister >= 8 ) + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_s32(): Unsupported CMP with crRegister = 8\n"); + return false; + } + // update state of cr register + if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED ) + PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC); + else + PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC); + // create compare instruction + x64Gen_cmp_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), imlInstruction->op_r_immS32.immS32); + // set cr bits + uint32 crRegister = imlInstruction->crRegister; + if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED ) + { + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_LESS, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED ) + { + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 ) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + } + else + assert_dbg(); + // todo: Also set summary overflow if xer bit is set? + } + else if( imlInstruction->operation == PPCREC_IML_OP_MFCR ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + uint32 destRegister = tempToRealRegister(imlInstruction->op_r_immS32.registerIndex); + x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, destRegister, destRegister); + for(sint32 f=0; f<32; f++) + { + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+f, 0); + x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, destRegister, destRegister); + } + } + else if (imlInstruction->operation == PPCREC_IML_OP_MTCRF) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + uint32 srcRegister = tempToRealRegister(imlInstruction->op_r_immS32.registerIndex); + uint32 crBitMask = ppc_MTCRFMaskToCRBitMask((uint32)imlInstruction->op_r_immS32.immS32); + for (sint32 f = 0; f < 32; f++) + { + if(((crBitMask >> f) & 1) == 0) + continue; + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_ESP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8) * (f), 0); + x64Gen_test_reg64Low32_imm32(x64GenContext, srcRegister, 0x80000000>>f); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_NOT_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8) * (f)); + } + } + else + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_s32(): Unsupported operation 0x%x\n", imlInstruction->operation); + return false; + } + return true; +} + +bool PPCRecompilerX64Gen_imlInstruction_conditional_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN) + { + // registerResult = immS32 (conditional) + if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER) + { + assert_dbg(); + } + + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (uint32)imlInstruction->op_conditional_r_s32.immS32); + + uint8 crBitIndex = imlInstruction->op_conditional_r_s32.crRegisterIndex * 4 + imlInstruction->op_conditional_r_s32.crBitIndex; + if (imlInstruction->op_conditional_r_s32.crRegisterIndex == x64GenContext->activeCRRegister) + { + if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC) + { + if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_LT) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_CARRY : X86_CONDITION_NOT_CARRY, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_EQ) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_GT) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_UNSIGNED_ABOVE : X86_CONDITION_UNSIGNED_BELOW_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + } + else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC) + { + if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_LT) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGNED_LESS : X86_CONDITION_SIGNED_GREATER_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_EQ) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_GT) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + } + else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_LOGICAL) + { + if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_LT) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGN : X86_CONDITION_NOT_SIGN, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_EQ) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_GT) + { + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + } + } + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + crBitIndex * sizeof(uint8), 0); + if (imlInstruction->op_conditional_r_s32.bitMustBeSet) + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, X86_CONDITION_CARRY, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + else + x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, X86_CONDITION_NOT_CARRY, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP); + return true; + } + return false; +} + +bool PPCRecompilerX64Gen_imlInstruction_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + if( imlInstruction->operation == PPCREC_IML_OP_ADD || imlInstruction->operation == PPCREC_IML_OP_ADD_UPDATE_CARRY || imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY ) + { + // registerResult = registerOperand1 + registerOperand2 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + + bool addCarry = imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY; + if( (rRegResult == rRegOperand1) || (rRegResult == rRegOperand2) ) + { + // be careful not to overwrite the operand before we use it + if( rRegResult == rRegOperand1 ) + { + if( addCarry ) + { + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + else + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + else + { + if( addCarry ) + { + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + } + else + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + } + } + else + { + // copy operand1 to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); + // add operand2 + if( addCarry ) + { + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + else + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + // update carry + if( imlInstruction->operation == PPCREC_IML_OP_ADD_UPDATE_CARRY || imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY ) + { + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + } + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + sint32 crRegister = imlInstruction->crRegister; + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + return true; + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SUB ) + { + // registerResult = registerOperand1 - registerOperand2 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + if( rRegOperand1 == rRegOperand2 ) + { + // result = operand1 - operand1 -> 0 + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + } + else if( rRegResult == rRegOperand1 ) + { + // result = result - operand2 + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + else if ( rRegResult == rRegOperand2 ) + { + // result = operand1 - result + // NEG result + x64Gen_neg_reg64Low32(x64GenContext, rRegResult); + // ADD result, operand1 + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + } + else + { + // copy operand1 to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); + // sub operand2 + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + sint32 crRegister = imlInstruction->crRegister; + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + return true; + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY ) + { + // registerResult = registerOperand1 - registerOperand2 + carry + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + if( rRegOperand1 == rRegOperand2 ) + { + // copy xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + x64Gen_cmc(x64GenContext); + // result = operand1 - operand1 -> 0 + x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + } + else if( rRegResult == rRegOperand1 ) + { + // copy inverted xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + x64Gen_cmc(x64GenContext); + // result = result - operand2 + x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + else if ( rRegResult == rRegOperand2 ) + { + // result = operand1 - result + // NOT result + x64Gen_not_reg64Low32(x64GenContext, rRegResult); + // copy xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // ADC result, operand1 + x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + } + else + { + // copy operand1 to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); + // copy xer_ca to eflags carry + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + x64Gen_cmc(x64GenContext); + // sub operand2 + x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + // update carry flag (todo: is this actually correct in all cases?) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + // update cr0 if requested + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + assert_dbg(); + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED ) + { + // registerResult = registerOperand1 * registerOperand2 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + if( (rRegResult == rRegOperand1) || (rRegResult == rRegOperand2) ) + { + // be careful not to overwrite the operand before we use it + if( rRegResult == rRegOperand1 ) + x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + else + x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + } + else + { + // copy operand1 to destination register before doing multiplication + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1); + // add operand2 + x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2); + } + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + // since IMUL instruction leaves relevant flags undefined, we have to use another TEST instruction to get the correct results + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SUBFC ) + { + // registerResult = registerOperand2(rB) - registerOperand1(rA) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + // updates carry flag + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + return false; + } + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperandA = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperandB = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + // update carry flag + // carry flag is detected this way: + //if ((~a+b) < a) { + // return true; + //} + //if ((~a+b+1) < 1) { + // return true; + //} + // set carry to zero + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // ((~a+b)<~a) == true -> ca = 1 + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperandA); + x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperandB); + x64Gen_not_reg64Low32(x64GenContext, rRegOperandA); + x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperandA); + x64Gen_not_reg64Low32(x64GenContext, rRegOperandA); + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0); + // reset carry flag + jump destination afterwards + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + // OR ((~a+b+1)<1) == true -> ca = 1 + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperandA); + // todo: Optimize by reusing result in REG_RESV_TEMP from above and only add 1 + x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperandB); + x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1); + sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0); + // reset carry flag + jump destination afterwards + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex); + // do subtraction + if( rRegOperandB == rRegOperandA ) + { + // result = operandA - operandA -> 0 + x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + } + else if( rRegResult == rRegOperandB ) + { + // result = result - operandA + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperandA); + } + else if ( rRegResult == rRegOperandA ) + { + // result = operandB - result + // NEG result + x64Gen_neg_reg64Low32(x64GenContext, rRegResult); + // ADD result, operandB + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperandB); + } + else + { + // copy operand1 to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperandB); + // sub operand2 + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperandA); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SLW || imlInstruction->operation == PPCREC_IML_OP_SRW ) + { + // registerResult = registerOperand1(rA) >> registerOperand2(rB) (up to 63 bits) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + + if (g_CPUFeatures.x86.bmi2 && imlInstruction->operation == PPCREC_IML_OP_SRW) + { + // use BMI2 SHRX if available + x64Gen_shrx_reg64_reg64_reg64(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); + } + else if (g_CPUFeatures.x86.bmi2 && imlInstruction->operation == PPCREC_IML_OP_SLW) + { + // use BMI2 SHLX if available + x64Gen_shlx_reg64_reg64_reg64(x64GenContext, rRegResult, rRegOperand1, rRegOperand2); + x64Gen_and_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); // trim result to 32bit + } + else + { + // lazy and slow way to do shift by register without relying on ECX/CL or BMI2 + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); + for (sint32 b = 0; b < 6; b++) + { + x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1 << b)); + sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set + if (b == 5) + { + x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + } + else + { + if (imlInstruction->operation == PPCREC_IML_OP_SLW) + x64Gen_shl_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1 << b)); + else + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1 << b)); + } + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex); + } + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); + } + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + // todo: Use BMI2 rotate if available + // check if CL/ECX/RCX is available + if( rRegResult != REG_RCX && rRegOperand1 != REG_RCX && rRegOperand2 != REG_RCX ) + { + // swap operand 2 with RCX + x64Gen_xchg_reg64_reg64(x64GenContext, REG_RCX, rRegOperand2); + // move operand 1 to temp register + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); + // rotate + x64Gen_rol_reg64Low32_cl(x64GenContext, REG_RESV_TEMP); + // undo swap operand 2 with RCX + x64Gen_xchg_reg64_reg64(x64GenContext, REG_RCX, rRegOperand2); + // copy to result register + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); + } + else + { + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); + // lazy and slow way to do shift by register without relying on ECX/CL + for(sint32 b=0; b<5; b++) + { + x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1<codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set + x64Gen_rol_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<codeBufferIndex); + } + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); + } + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SRAW ) + { + // registerResult = (sint32)registerOperand1(rA) >> (sint32)registerOperand2(rB) (up to 63 bits) + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + // save cr + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + return false; + } + // todo: Use BMI instructions if available? + // MOV registerResult, registerOperand (if different) + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1); + // reset carry + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // we use the same shift by register approach as in SLW/SRW, but we have to differentiate by signed/unsigned shift since it influences how the carry flag is set + x64Gen_test_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 0x80000000); + sint32 jumpInstructionJumpToSignedShift = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_EQUAL, 0); + //sint32 jumpInstructionJumpToEnd = x64GenContext->codeBufferIndex; + //x64Gen_jmpc(x64GenContext, X86_CONDITION_EQUAL, 0); + // unsigned shift (MSB of input register is not set) + for(sint32 b=0; b<6; b++) + { + x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1<codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set + if( b == 5 ) + { + x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<codeBufferIndex); + } + sint32 jumpInstructionJumpToEnd = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NONE, 0); + // signed shift + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionJumpToSignedShift, x64GenContext->codeBufferIndex); + for(sint32 b=0; b<6; b++) + { + // check if we need to shift by (1<codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set + // set ca if any non-zero bit is shifted out + x64Gen_test_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (1<<(1<codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if no bit is set + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionJumpToAfterCa, x64GenContext->codeBufferIndex); + // arithmetic shift + if( b == 5 ) + { + // copy sign bit into all bits + x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<codeBufferIndex); + } + // end + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionJumpToEnd, x64GenContext->codeBufferIndex); + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP); + // update CR if requested + // todo + } + else if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_DIVIDE_UNSIGNED ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), REG_EAX); + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]), REG_EDX); + // mov operand 2 to temp register + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2); + // mov operand1 to EAX + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_EAX, rRegOperand1); + // sign or zero extend EAX to EDX:EAX based on division sign mode + if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED ) + x64Gen_cdq(x64GenContext); + else + x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_EDX, REG_EDX); + // make sure we avoid division by zero + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 3); + // divide + if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED ) + x64Gen_idiv_reg64Low32(x64GenContext, REG_RESV_TEMP); + else + x64Gen_div_reg64Low32(x64GenContext, REG_RESV_TEMP); + // result of division is now stored in EAX, move it to result register + if( rRegResult != REG_EAX ) + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_EAX); + // restore EAX / EDX + if( rRegResult != REG_RAX ) + x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0])); + if( rRegResult != REG_RDX ) + x64Emit_mov_reg64_mem32(x64GenContext, REG_EDX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1])); + // set cr bits if requested + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_ARITHMETIC ) + { + assert_dbg(); + } + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED || imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), REG_EAX); + x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]), REG_EDX); + // mov operand 2 to temp register + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2); + // mov operand1 to EAX + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_EAX, rRegOperand1); + if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED ) + { + // zero extend EAX to EDX:EAX + x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_EDX, REG_EDX); + } + else + { + // sign extend EAX to EDX:EAX + x64Gen_cdq(x64GenContext); + } + // multiply + if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED ) + x64Gen_imul_reg64Low32(x64GenContext, REG_RESV_TEMP); + else + x64Gen_mul_reg64Low32(x64GenContext, REG_RESV_TEMP); + // result of multiplication is now stored in EDX:EAX, move it to result register + if( rRegResult != REG_EDX ) + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_EDX); + // restore EAX / EDX + if( rRegResult != REG_RAX ) + x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0])); + if( rRegResult != REG_RDX ) + x64Emit_mov_reg64_mem32(x64GenContext, REG_EDX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1])); + // set cr bits if requested + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_ORC ) + { + // registerResult = registerOperand1 | ~registerOperand2 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult); + sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA); + sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB); + + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2); + x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP); + if( rRegResult != rRegOperand1 ) + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1); + x64Gen_or_reg64Low32_reg64Low32(x64GenContext, rRegResult, REG_RESV_TEMP); + + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + return true; + } + } + else + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r_r(): Unsupported operation 0x%x\n", imlInstruction->operation); + return false; + } + return true; +} + +bool PPCRecompilerX64Gen_imlInstruction_r_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + if( imlInstruction->operation == PPCREC_IML_OP_ADD ) + { + // registerResult = registerOperand + immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult); + sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA); + uint32 immU32 = (uint32)imlInstruction->op_r_r_s32.immS32; + if( rRegResult != rRegOperand ) + { + // copy value to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand); + } + x64Gen_add_reg64Low32_imm32(x64GenContext, rRegResult, (uint32)immU32); + } + else if( imlInstruction->operation == PPCREC_IML_OP_ADD_UPDATE_CARRY ) + { + // registerResult = registerOperand + immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult); + sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA); + uint32 immU32 = (uint32)imlInstruction->op_r_r_s32.immS32; + if( rRegResult != rRegOperand ) + { + // copy value to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand); + } + x64Gen_add_reg64Low32_imm32(x64GenContext, rRegResult, (uint32)immU32); + // update carry flag + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + // set cr bits if enabled + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL ) + { + assert_dbg(); + } + sint32 crRegister = imlInstruction->crRegister; + //x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); + //x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + //x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_SUBFC ) + { + // registerResult = immS32 - registerOperand + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult); + sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA); + sint32 immS32 = (sint32)imlInstruction->op_r_r_s32.immS32; + if( rRegResult != rRegOperand ) + { + // copy value to destination register before doing addition + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand); + } + // set carry to zero + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // ((~a+b)<~a) == true -> ca = 1 + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand); + x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP); + x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (uint32)immS32); + x64Gen_not_reg64Low32(x64GenContext, rRegOperand); + x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperand); + x64Gen_not_reg64Low32(x64GenContext, rRegOperand); + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0); + // reset carry flag + jump destination afterwards + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + // OR ((~a+b+1)<1) == true -> ca = 1 + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand); + // todo: Optimize by reusing result in REG_RESV_TEMP from above and only add 1 + x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP); + x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (uint32)immS32); + x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1); + sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0); + // reset carry flag + jump destination afterwards + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex); + // do actual computation of value, note: a - b is equivalent to a + ~b + 1 + x64Gen_not_reg64Low32(x64GenContext, rRegResult); + x64Gen_add_reg64Low32_imm32(x64GenContext, rRegResult, (uint32)immS32 + 1); + } + else if( imlInstruction->operation == PPCREC_IML_OP_RLWIMI ) + { + // registerResult = ((registerResult<<op_r_r_s32.immS32; + uint32 mb = (vImm>>0)&0xFF; + uint32 me = (vImm>>8)&0xFF; + uint32 sh = (vImm>>16)&0xFF; + uint32 mask = ppc_mask(mb, me); + // save cr + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // copy rS to temporary register + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r_s32.registerA)); + // rotate destination register + if( sh ) + x64Gen_rol_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (uint8)sh&0x1F); + // AND destination register with inverted mask + x64Gen_and_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), ~mask); + // AND temporary rS register with mask + x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, mask); + // OR result with temporary + x64Gen_or_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), REG_RESV_TEMP); + } + else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED ) + { + // registerResult = registerOperand * immS32 + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult); + sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA); + sint32 immS32 = (uint32)imlInstruction->op_r_r_s32.immS32; + x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (sint64)immS32); // todo: Optimize + if( rRegResult != rRegOperand ) + x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand); + x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, REG_RESV_TEMP); + } + else if( imlInstruction->operation == PPCREC_IML_OP_SRAW ) + { + // registerResult = registerOperand>>SH and set xer ca flag + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + uint32 sh = (uint32)imlInstruction->op_r_r_s32.immS32; + // MOV registerResult, registerOperand (if different) + if( imlInstruction->op_r_r_s32.registerA != imlInstruction->op_r_r_s32.registerResult ) + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerA)); + // todo: Detect if we don't need to update carry + // generic case + // TEST registerResult, (1<<(SH+1))-1 + uint32 caTestMask = 0; + if (sh >= 31) + caTestMask = 0x7FFFFFFF; + else + caTestMask = (1 << (sh)) - 1; + x64Gen_test_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), caTestMask); + // SETNE/NZ [ESP+XER_CA] + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_NOT_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, xer_ca)); + // SAR registerResult, SH + x64Gen_sar_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), sh); + // JNS (if sign not set) + sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_SIGN, 0); // todo: Can use 2-byte form of jump instruction here + // MOV BYTE [ESP+xer_ca], 0 + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0); + // jump destination + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex); + // CR update + if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER) + { + sint32 crRegister = imlInstruction->crRegister; + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerResult)); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_LT)); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_GT)); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_EQ)); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT || + imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT ) + { + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + // MOV registerResult, registerOperand (if different) + if( imlInstruction->op_r_r_s32.registerA != imlInstruction->op_r_r_s32.registerResult ) + x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerA)); + // Shift + if( imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT ) + x64Gen_shl_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), imlInstruction->op_r_r_s32.immS32); + else + x64Gen_shr_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), imlInstruction->op_r_r_s32.immS32); + // CR update + if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER) + { + // since SHL/SHR only modifies the OF flag we need another TEST reg,reg here + x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerResult)); + PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction); + } + } + else + { + debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r_s32(): Unsupported operation 0x%x\n", imlInstruction->operation); + return false; + } + return true; +} + +bool PPCRecompilerX64Gen_imlInstruction_conditionalJump(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlSegment_t* imlSegment, PPCRecImlInstruction_t* imlInstruction) +{ + if( imlInstruction->op_conditionalJump.condition == PPCREC_JUMP_CONDITION_NONE ) + { + // jump always + if (imlInstruction->op_conditionalJump.jumpAccordingToSegment) + { + // jump to segment + if (imlSegment->nextSegmentBranchTaken == nullptr) + assert_dbg(); + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_SEGMENT, imlSegment->nextSegmentBranchTaken); + x64Gen_jmp_imm32(x64GenContext, 0); + } + else + { + // deprecated (jump to jumpmark) + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmp_imm32(x64GenContext, 0); + } + } + else + { + if (imlInstruction->op_conditionalJump.jumpAccordingToSegment) + assert_dbg(); + // generate jump update marker + if( imlInstruction->op_conditionalJump.crRegisterIndex == PPCREC_CR_TEMPORARY || imlInstruction->op_conditionalJump.crRegisterIndex >= 8 ) + { + // temporary cr is used, which means we use the currently active eflags + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + sint32 condition = imlInstruction->op_conditionalJump.condition; + if( condition == PPCREC_JUMP_CONDITION_E ) + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + else if( condition == PPCREC_JUMP_CONDITION_NE ) + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_EQUAL, 0); + else + assert_dbg(); + } + else + { + uint8 crBitIndex = imlInstruction->op_conditionalJump.crRegisterIndex*4 + imlInstruction->op_conditionalJump.crBitIndex; + if (imlInstruction->op_conditionalJump.crRegisterIndex == x64GenContext->activeCRRegister ) + { + if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC) + { + if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_LT) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_CARRY : X86_CONDITION_NOT_CARRY, 0); + return true; + } + else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_EQ) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, 0); + return true; + } + else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_GT) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_UNSIGNED_ABOVE : X86_CONDITION_UNSIGNED_BELOW_EQUAL, 0); + return true; + } + } + else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC) + { + if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_LT) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGNED_LESS : X86_CONDITION_SIGNED_GREATER_EQUAL, 0); + return true; + } + else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_EQ) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, 0); + return true; + } + else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_GT) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, 0); + return true; + } + } + else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_LOGICAL) + { + if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_LT) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGN : X86_CONDITION_NOT_SIGN, 0); + return true; + } + else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_EQ) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, 0); + return true; + } + else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_GT) + { + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, 0); + return true; + } + } + } + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + crBitIndex * sizeof(uint8), 0); + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + if( imlInstruction->op_conditionalJump.bitMustBeSet ) + { + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_CARRY, 0); + } + else + { + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_CARRY, 0); + } + } + } + return true; +} + +bool PPCRecompilerX64Gen_imlInstruction_conditionalJumpCycleCheck(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + // some tests (all performed on a i7-4790K) + // 1) DEC [mem] + JNS has significantly worse performance than BT + JNC (probably due to additional memory write) + // 2) CMP [mem], 0 + JG has about equal (or slightly worse) performance than BT + JNC + + // BT + x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), 31); // check if negative + PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress); + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_CARRY, 0); + return true; +} + +/* +* PPC condition register operation +*/ +bool PPCRecompilerX64Gen_imlInstruction_cr(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); // while these instruction do not directly affect eflags, they change the CR bit + if (imlInstruction->operation == PPCREC_IML_OP_CR_CLEAR) + { + // clear cr bit + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crD, 0); + return true; + } + else if (imlInstruction->operation == PPCREC_IML_OP_CR_SET) + { + // set cr bit + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crD, 1); + return true; + } + else if(imlInstruction->operation == PPCREC_IML_OP_CR_OR || imlInstruction->operation == PPCREC_IML_OP_CR_ORC || + imlInstruction->operation == PPCREC_IML_OP_CR_AND || imlInstruction->operation == PPCREC_IML_OP_CR_ANDC ) + { + x64Emit_movZX_reg64_mem8(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crB); + if (imlInstruction->operation == PPCREC_IML_OP_CR_ORC || imlInstruction->operation == PPCREC_IML_OP_CR_ANDC) + { + return false; // untested + x64Gen_int3(x64GenContext); + x64Gen_xor_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1); // complement + } + if(imlInstruction->operation == PPCREC_IML_OP_CR_OR || imlInstruction->operation == PPCREC_IML_OP_CR_ORC) + x64Gen_or_reg64Low8_mem8Reg64(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crA); + else + x64Gen_and_reg64Low8_mem8Reg64(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crA); + + x64Gen_mov_mem8Reg64_reg64Low8(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crD); + + return true; + } + else + { + assert_dbg(); + } + return false; +} + + +void PPCRecompilerX64Gen_imlInstruction_ppcEnter(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + imlInstruction->op_ppcEnter.x64Offset = x64GenContext->codeBufferIndex; + // generate code + if( ppcImlGenContext->hasFPUInstruction ) + { + // old FPU unavailable code + //PPCRecompilerX86_crConditionFlags_saveBeforeOverwrite(PPCRecFunction, ppcImlGenContext, x64GenContext); + //// skip if FP bit in MSR is set + //// #define MSR_FP (1<<13) + //x64Gen_bt_mem8(x64GenContext, REG_ESP, offsetof(PPCInterpreter_t, msr), 13); + //uint32 jmpCodeOffset = x64GenContext->codeBufferIndex; + //x64Gen_jmpc(x64GenContext, X86_CONDITION_CARRY, 0); + //x64Gen_mov_reg32_imm32(x64GenContext, REG_EAX, imlInstruction->op_ppcEnter.ppcAddress&0x7FFFFFFF); + //PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X86_RELOC_MAKE_RELATIVE); + //x64Gen_jmp_imm32(x64GenContext, (uint32)PPCRecompiler_recompilerCallEscapeAndCallFPUUnavailable); + //// patch jump + //*(uint32*)(x64GenContext->codeBuffer+jmpCodeOffset+2) = x64GenContext->codeBufferIndex-jmpCodeOffset-6; + } +} + +void PPCRecompilerX64Gen_imlInstruction_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + uint32 name = imlInstruction->op_r_name.name; + if( name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0+32 ) + { + x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, gpr)+sizeof(uint32)*(name-PPCREC_NAME_R0)); + } + else if( name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0+999 ) + { + sint32 sprIndex = (name - PPCREC_NAME_SPR0); + if (sprIndex == SPR_LR) + x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr.LR)); + else if (sprIndex == SPR_CTR) + x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr.CTR)); + else if (sprIndex == SPR_XER) + x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr.XER)); + else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7) + { + sint32 memOffset = offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0); + x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, memOffset); + } + else + assert_dbg(); + //x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr)+sizeof(uint32)*(name-PPCREC_NAME_SPR0)); + } + else + assert_dbg(); +} + +void PPCRecompilerX64Gen_imlInstruction_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + uint32 name = imlInstruction->op_r_name.name; + if( name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0+32 ) + { + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, gpr)+sizeof(uint32)*(name-PPCREC_NAME_R0), tempToRealRegister(imlInstruction->op_r_name.registerIndex)); + } + else if( name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0+999 ) + { + uint32 sprIndex = (name - PPCREC_NAME_SPR0); + if (sprIndex == SPR_LR) + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), tempToRealRegister(imlInstruction->op_r_name.registerIndex)); + else if (sprIndex == SPR_CTR) + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.CTR), tempToRealRegister(imlInstruction->op_r_name.registerIndex)); + else if (sprIndex == SPR_XER) + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.XER), tempToRealRegister(imlInstruction->op_r_name.registerIndex)); + else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7) + { + sint32 memOffset = offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0); + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, memOffset, tempToRealRegister(imlInstruction->op_r_name.registerIndex)); + } + else + assert_dbg(); + } + else + assert_dbg(); +} + +uint8* codeMemoryBlock = nullptr; +sint32 codeMemoryBlockIndex = 0; +sint32 codeMemoryBlockSize = 0; + +std::mutex mtx_allocExecutableMemory; + +uint8* PPCRecompilerX86_allocateExecutableMemory(sint32 size) +{ + std::lock_guard lck(mtx_allocExecutableMemory); + if( codeMemoryBlockIndex+size > codeMemoryBlockSize ) + { + // allocate new block + codeMemoryBlockSize = std::max(1024*1024*4, size+1024); // 4MB (or more if the function is larger than 4MB) + codeMemoryBlockIndex = 0; + codeMemoryBlock = (uint8*)MemMapper::AllocateMemory(nullptr, codeMemoryBlockSize, MemMapper::PAGE_PERMISSION::P_RWX); + } + uint8* codeMem = codeMemoryBlock + codeMemoryBlockIndex; + codeMemoryBlockIndex += size; + // pad to 4 byte alignment + while (codeMemoryBlockIndex & 3) + { + codeMemoryBlock[codeMemoryBlockIndex] = 0x90; + codeMemoryBlockIndex++; + } + return codeMem; +} + +void PPCRecompiler_dumpIML(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext); + +bool PPCRecompiler_generateX64Code(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext) +{ + x64GenContext_t x64GenContext = {0}; + x64GenContext.codeBufferSize = 1024; + x64GenContext.codeBuffer = (uint8*)malloc(x64GenContext.codeBufferSize); + x64GenContext.codeBufferIndex = 0; + x64GenContext.activeCRRegister = PPC_REC_INVALID_REGISTER; + + // generate iml instruction code + bool codeGenerationFailed = false; + for(sint32 s=0; ssegmentListCount; s++) + { + PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s]; + ppcImlGenContext->segmentList[s]->x64Offset = x64GenContext.codeBufferIndex; + for(sint32 i=0; iimlListCount; i++) + { + PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i; + + if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME ) + { + PPCRecompilerX64Gen_imlInstruction_r_name(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R ) + { + PPCRecompilerX64Gen_imlInstruction_name_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R ) + { + if( PPCRecompilerX64Gen_imlInstruction_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) + { + codeGenerationFailed = true; + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32) + { + if (PPCRecompilerX64Gen_imlInstruction_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) + { + codeGenerationFailed = true; + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32) + { + if (PPCRecompilerX64Gen_imlInstruction_conditional_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_S32 ) + { + if( PPCRecompilerX64Gen_imlInstruction_r_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_R ) + { + if( PPCRecompilerX64Gen_imlInstruction_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP ) + { + if( PPCRecompilerX64Gen_imlInstruction_conditionalJump(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlSegment, imlInstruction) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK ) + { + PPCRecompilerX64Gen_imlInstruction_conditionalJumpCycleCheck(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO ) + { + if( PPCRecompilerX64Gen_imlInstruction_macro(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD ) + { + if( PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED ) + { + if( PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_STORE ) + { + if( PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED ) + { + if( PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) + { + codeGenerationFailed = true; + } + } + else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM) + { + PPCRecompilerX64Gen_imlInstruction_mem2mem(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_CR ) + { + if( PPCRecompilerX64Gen_imlInstruction_cr(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK ) + { + // no op + } + else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP ) + { + // no op + } + else if( imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER ) + { + PPCRecompilerX64Gen_imlInstruction_ppcEnter(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME ) + { + PPCRecompilerX64Gen_imlInstruction_fpr_r_name(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R ) + { + PPCRecompilerX64Gen_imlInstruction_fpr_name_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD ) + { + if( PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED ) + { + if( PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE ) + { + if( PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED ) + { + if( PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false ) + { + codeGenerationFailed = true; + } + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R ) + { + PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R ) + { + PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R ) + { + PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R ) + { + PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction); + } + else + { + debug_printf("PPCRecompiler_generateX64Code(): Unsupported iml type 0x%x\n", imlInstruction->type); + assert_dbg(); + } + } + } + // handle failed code generation + if( codeGenerationFailed ) + { + free(x64GenContext.codeBuffer); + if (x64GenContext.relocateOffsetTable) + free(x64GenContext.relocateOffsetTable); + return false; + } + // allocate executable memory + uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.codeBufferIndex); + size_t baseAddress = (size_t)executableMemory; + // fix relocs + for(sint32 i=0; isegmentListCount; s++) + { + if (ppcImlGenContext->segmentList[s]->isJumpDestination && ppcImlGenContext->segmentList[s]->jumpDestinationPPCAddress == ppcOffset) + { + x64Offset = ppcImlGenContext->segmentList[s]->x64Offset; + break; + } + } + if (x64Offset == 0xFFFFFFFF) + { + debug_printf("Recompiler could not resolve jump (function at 0x%08x)\n", PPCRecFunction->ppcAddress); + // todo: Cleanup + return false; + } + } + else + { + PPCRecImlSegment_t* destSegment = (PPCRecImlSegment_t*)x64GenContext.relocateOffsetTable[i].extraInfo; + x64Offset = destSegment->x64Offset; + } + uint32 relocBase = x64GenContext.relocateOffsetTable[i].offset; + uint8* relocInstruction = x64GenContext.codeBuffer+relocBase; + if( relocInstruction[0] == 0x0F && (relocInstruction[1] >= 0x80 && relocInstruction[1] <= 0x8F) ) + { + // Jcc relativeImm32 + sint32 distanceNearJump = (sint32)((baseAddress + x64Offset) - (baseAddress + relocBase + 2)); + if (distanceNearJump >= -128 && distanceNearJump < 127) // disabled + { + // convert to near Jcc + *(uint8*)(relocInstruction + 0) = (uint8)(relocInstruction[1]-0x80 + 0x70); + // patch offset + *(uint8*)(relocInstruction + 1) = (uint8)distanceNearJump; + // replace unused 4 bytes with NOP instruction + relocInstruction[2] = 0x0F; + relocInstruction[3] = 0x1F; + relocInstruction[4] = 0x40; + relocInstruction[5] = 0x00; + } + else + { + // patch offset + *(uint32*)(relocInstruction + 2) = (uint32)((baseAddress + x64Offset) - (baseAddress + relocBase + 6)); + } + } + else if( relocInstruction[0] == 0xE9 ) + { + // JMP relativeImm32 + *(uint32*)(relocInstruction+1) = (uint32)((baseAddress+x64Offset)-(baseAddress+relocBase+5)); + } + else + assert_dbg(); + } + else + { + assert_dbg(); + } + } + + // copy code to executable memory + memcpy(executableMemory, x64GenContext.codeBuffer, x64GenContext.codeBufferIndex); + free(x64GenContext.codeBuffer); + x64GenContext.codeBuffer = nullptr; + if (x64GenContext.relocateOffsetTable) + free(x64GenContext.relocateOffsetTable); + // set code + PPCRecFunction->x86Code = executableMemory; + PPCRecFunction->x86Size = x64GenContext.codeBufferIndex; + return true; +} + +void PPCRecompilerX64Gen_generateEnterRecompilerCode() +{ + x64GenContext_t x64GenContext = {0}; + x64GenContext.codeBufferSize = 1024; + x64GenContext.codeBuffer = (uint8*)malloc(x64GenContext.codeBufferSize); + x64GenContext.codeBufferIndex = 0; + x64GenContext.activeCRRegister = PPC_REC_INVALID_REGISTER; + + // start of recompiler entry function + x64Gen_push_reg64(&x64GenContext, REG_RAX); + x64Gen_push_reg64(&x64GenContext, REG_RCX); + x64Gen_push_reg64(&x64GenContext, REG_RDX); + x64Gen_push_reg64(&x64GenContext, REG_RBX); + x64Gen_push_reg64(&x64GenContext, REG_RBP); + x64Gen_push_reg64(&x64GenContext, REG_RDI); + x64Gen_push_reg64(&x64GenContext, REG_RSI); + x64Gen_push_reg64(&x64GenContext, REG_R8); + x64Gen_push_reg64(&x64GenContext, REG_R9); + x64Gen_push_reg64(&x64GenContext, REG_R10); + x64Gen_push_reg64(&x64GenContext, REG_R11); + x64Gen_push_reg64(&x64GenContext, REG_R12); + x64Gen_push_reg64(&x64GenContext, REG_R13); + x64Gen_push_reg64(&x64GenContext, REG_R14); + x64Gen_push_reg64(&x64GenContext, REG_R15); + + // 000000007775EF04 | E8 00 00 00 00 call +0x00 + x64Gen_writeU8(&x64GenContext, 0xE8); + x64Gen_writeU8(&x64GenContext, 0x00); + x64Gen_writeU8(&x64GenContext, 0x00); + x64Gen_writeU8(&x64GenContext, 0x00); + x64Gen_writeU8(&x64GenContext, 0x00); + //000000007775EF09 | 48 83 04 24 05 add qword ptr ss:[rsp],5 + x64Gen_writeU8(&x64GenContext, 0x48); + x64Gen_writeU8(&x64GenContext, 0x83); + x64Gen_writeU8(&x64GenContext, 0x04); + x64Gen_writeU8(&x64GenContext, 0x24); + uint32 jmpPatchOffset = x64GenContext.codeBufferIndex; + x64Gen_writeU8(&x64GenContext, 0); // skip the distance until after the JMP + x64Emit_mov_mem64_reg64(&x64GenContext, REG_RDX, offsetof(PPCInterpreter_t, rspTemp), REG_RSP); + + + // MOV RSP, RDX (ppc interpreter instance) + x64Gen_mov_reg64_reg64(&x64GenContext, REG_RSP, REG_RDX); + // MOV R15, ppcRecompilerInstanceData + x64Gen_mov_reg64_imm64(&x64GenContext, REG_R15, (uint64)ppcRecompilerInstanceData); + // MOV R13, memory_base + x64Gen_mov_reg64_imm64(&x64GenContext, REG_R13, (uint64)memory_base); + + //JMP recFunc + x64Gen_jmp_reg64(&x64GenContext, REG_RCX); // call argument 1 + + x64GenContext.codeBuffer[jmpPatchOffset] = (x64GenContext.codeBufferIndex-(jmpPatchOffset-4)); + + //recompilerExit1: + x64Gen_pop_reg64(&x64GenContext, REG_R15); + x64Gen_pop_reg64(&x64GenContext, REG_R14); + x64Gen_pop_reg64(&x64GenContext, REG_R13); + x64Gen_pop_reg64(&x64GenContext, REG_R12); + x64Gen_pop_reg64(&x64GenContext, REG_R11); + x64Gen_pop_reg64(&x64GenContext, REG_R10); + x64Gen_pop_reg64(&x64GenContext, REG_R9); + x64Gen_pop_reg64(&x64GenContext, REG_R8); + x64Gen_pop_reg64(&x64GenContext, REG_RSI); + x64Gen_pop_reg64(&x64GenContext, REG_RDI); + x64Gen_pop_reg64(&x64GenContext, REG_RBP); + x64Gen_pop_reg64(&x64GenContext, REG_RBX); + x64Gen_pop_reg64(&x64GenContext, REG_RDX); + x64Gen_pop_reg64(&x64GenContext, REG_RCX); + x64Gen_pop_reg64(&x64GenContext, REG_RAX); + // RET + x64Gen_ret(&x64GenContext); + + uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.codeBufferIndex); + // copy code to executable memory + memcpy(executableMemory, x64GenContext.codeBuffer, x64GenContext.codeBufferIndex); + free(x64GenContext.codeBuffer); + PPCRecompiler_enterRecompilerCode = (void ATTR_MS_ABI (*)(uint64,uint64))executableMemory; +} + + +void* PPCRecompilerX64Gen_generateLeaveRecompilerCode() +{ + x64GenContext_t x64GenContext = {0}; + x64GenContext.codeBufferSize = 128; + x64GenContext.codeBuffer = (uint8*)malloc(x64GenContext.codeBufferSize); + x64GenContext.codeBufferIndex = 0; + x64GenContext.activeCRRegister = PPC_REC_INVALID_REGISTER; + + // update instruction pointer + // LR is in EDX + x64Emit_mov_mem32_reg32(&x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer), REG_EDX); + + // MOV RSP, [ppcRecompilerX64_rspTemp] + x64Emit_mov_reg64_mem64(&x64GenContext, REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp)); + + // RET + x64Gen_ret(&x64GenContext); + + uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.codeBufferIndex); + // copy code to executable memory + memcpy(executableMemory, x64GenContext.codeBuffer, x64GenContext.codeBufferIndex); + free(x64GenContext.codeBuffer); + return executableMemory; +} + +void PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions() +{ + PPCRecompilerX64Gen_generateEnterRecompilerCode(); + PPCRecompiler_leaveRecompilerCode_unvisited = (void ATTR_MS_ABI (*)())PPCRecompilerX64Gen_generateLeaveRecompilerCode(); + PPCRecompiler_leaveRecompilerCode_visited = (void ATTR_MS_ABI (*)())PPCRecompilerX64Gen_generateLeaveRecompilerCode(); + cemu_assert_debug(PPCRecompiler_leaveRecompilerCode_unvisited != PPCRecompiler_leaveRecompilerCode_visited); +} \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64.h b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.h similarity index 81% rename from src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64.h rename to src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.h index de415ca9..1d37a77e 100644 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64.h +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.h @@ -1,56 +1,104 @@ -#include "../PPCRecompiler.h" // todo - get rid of dependency - -#include "x86Emitter.h" - -struct x64RelocEntry_t +typedef struct { - x64RelocEntry_t(uint32 offset, void* extraInfo) : offset(offset), extraInfo(extraInfo) {}; - uint32 offset; + uint8 type; void* extraInfo; -}; +}x64RelocEntry_t; -struct x64GenContext_t +typedef struct { - IMLSegment* currentSegment{}; - x86Assembler64* emitter; - sint32 m_currentInstructionEmitIndex; - - x64GenContext_t() - { - emitter = new x86Assembler64(); - } - - ~x64GenContext_t() - { - delete emitter; - } - - IMLInstruction* GetNextInstruction(sint32 relativeIndex = 1) - { - sint32 index = m_currentInstructionEmitIndex + relativeIndex; - if(index < 0 || index >= (sint32)currentSegment->imlList.size()) - return nullptr; - return currentSegment->imlList.data() + index; - } - + uint8* codeBuffer; + sint32 codeBufferIndex; + sint32 codeBufferSize; + // cr state + sint32 activeCRRegister; // current x86 condition flags reflect this cr* register + sint32 activeCRState; // describes the way in which x86 flags map to the cr register (signed / unsigned) // relocate offsets - std::vector relocateOffsetTable2; -}; + x64RelocEntry_t* relocateOffsetTable; + sint32 relocateOffsetTableSize; + sint32 relocateOffsetTableCount; +}x64GenContext_t; + +// Some of these are defined by winnt.h and gnu headers +#undef REG_EAX +#undef REG_ECX +#undef REG_EDX +#undef REG_EBX +#undef REG_ESP +#undef REG_EBP +#undef REG_ESI +#undef REG_EDI +#undef REG_NONE +#undef REG_RAX +#undef REG_RCX +#undef REG_RDX +#undef REG_RBX +#undef REG_RSP +#undef REG_RBP +#undef REG_RSI +#undef REG_RDI +#undef REG_R8 +#undef REG_R9 +#undef REG_R10 +#undef REG_R11 +#undef REG_R12 +#undef REG_R13 +#undef REG_R14 +#undef REG_R15 + +#define REG_EAX 0 +#define REG_ECX 1 +#define REG_EDX 2 +#define REG_EBX 3 +#define REG_ESP 4 // reserved for low half of hCPU pointer +#define REG_EBP 5 +#define REG_ESI 6 +#define REG_EDI 7 +#define REG_NONE -1 + +#define REG_RAX 0 +#define REG_RCX 1 +#define REG_RDX 2 +#define REG_RBX 3 +#define REG_RSP 4 // reserved for hCPU pointer +#define REG_RBP 5 +#define REG_RSI 6 +#define REG_RDI 7 +#define REG_R8 8 +#define REG_R9 9 +#define REG_R10 10 +#define REG_R11 11 +#define REG_R12 12 +#define REG_R13 13 // reserved to hold pointer to memory base? (Not decided yet) +#define REG_R14 14 // reserved as temporary register +#define REG_R15 15 // reserved for pointer to ppcRecompilerInstanceData + +#define REG_AL 0 +#define REG_CL 1 +#define REG_DL 2 +#define REG_BL 3 +#define REG_AH 4 +#define REG_CH 5 +#define REG_DH 6 +#define REG_BH 7 // reserved registers -#define REG_RESV_TEMP (X86_REG_R14) -#define REG_RESV_HCPU (X86_REG_RSP) -#define REG_RESV_MEMBASE (X86_REG_R13) -#define REG_RESV_RECDATA (X86_REG_R15) +#define REG_RESV_TEMP (REG_R14) +#define REG_RESV_HCPU (REG_RSP) +#define REG_RESV_MEMBASE (REG_R13) +#define REG_RESV_RECDATA (REG_R15) // reserved floating-point registers #define REG_RESV_FPR_TEMP (15) -#define reg32ToReg16(__x) (__x) // deprecated -// deprecated condition flags +extern sint32 x64Gen_registerMap[12]; + +#define tempToRealRegister(__x) (x64Gen_registerMap[__x]) +#define tempToRealFPRRegister(__x) (__x) +#define reg32ToReg16(__x) (__x) + enum { X86_CONDITION_EQUAL, // or zero @@ -71,23 +119,36 @@ enum X86_CONDITION_NONE, // no condition, jump always }; -bool PPCRecompiler_generateX64Code(struct PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext); +#define PPCREC_CR_TEMPORARY (8) // never stored +#define PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC (0) // for signed arithmetic operations (ADD, CMPI) +#define PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC (1) // for unsigned arithmetic operations (ADD, CMPI) +#define PPCREC_CR_STATE_TYPE_LOGICAL (2) // for unsigned operations (CMPLI) + +#define X86_RELOC_MAKE_RELATIVE (0) // make code imm relative to instruction +#define X64_RELOC_LINK_TO_PPC (1) // translate from ppc address to x86 offset +#define X64_RELOC_LINK_TO_SEGMENT (2) // link to beginning of segment + +#define PPC_X64_GPR_USABLE_REGISTERS (16-4) +#define PPC_X64_FPR_USABLE_REGISTERS (16-1) // Use XMM0 - XMM14, XMM15 is the temp register + + +bool PPCRecompiler_generateX64Code(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext); + +void PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext); void PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext_t* x64GenContext, sint32 jumpInstructionOffset, sint32 destinationOffset); void PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions(); -void PPCRecompilerX64Gen_imlInstruction_fpr_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); -void PPCRecompilerX64Gen_imlInstruction_fpr_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); -bool PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, bool indexed); -bool PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction, bool indexed); +void PPCRecompilerX64Gen_imlInstruction_fpr_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction); +void PPCRecompilerX64Gen_imlInstruction_fpr_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction); +bool PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed); +bool PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed); -void PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); -void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); -void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); -void PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); - -void PPCRecompilerX64Gen_imlInstruction_fpr_compare(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, IMLInstruction* imlInstruction); +void PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction); +void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction); +void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction); +void PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction); // ASM gen void x64Gen_writeU8(x64GenContext_t* x64GenContext, uint8 v); @@ -135,6 +196,9 @@ void x64Gen_or_reg64Low8_mem8Reg64(x64GenContext_t* x64GenContext, sint32 dstReg void x64Gen_and_reg64Low8_mem8Reg64(x64GenContext_t* x64GenContext, sint32 dstRegister, sint32 memRegister64, sint32 memImmS32); void x64Gen_mov_mem8Reg64_reg64Low8(x64GenContext_t* x64GenContext, sint32 dstRegister, sint32 memRegister64, sint32 memImmS32); +void x64Gen_lock_cmpxchg_mem32Reg64PlusReg64_reg64(x64GenContext_t* x64GenContext, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32, sint32 srcRegister); +void x64Gen_lock_cmpxchg_mem32Reg64_reg64(x64GenContext_t* x64GenContext, sint32 memRegister64, sint32 memImmS32, sint32 srcRegister); + void x64Gen_add_reg64_reg64(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister); void x64Gen_add_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister); void x64Gen_add_reg64_imm32(x64GenContext_t* x64GenContext, sint32 srcRegister, uint32 immU32); @@ -143,6 +207,9 @@ void x64Gen_sub_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 des void x64Gen_sub_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegister, uint32 immU32); void x64Gen_sub_reg64_imm32(x64GenContext_t* x64GenContext, sint32 srcRegister, uint32 immU32); void x64Gen_sub_mem32reg64_imm32(x64GenContext_t* x64GenContext, sint32 memRegister, sint32 memImmS32, uint64 immU32); +void x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister); +void x64Gen_adc_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister); +void x64Gen_adc_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegister, uint32 immU32); void x64Gen_dec_mem32(x64GenContext_t* x64GenContext, sint32 memoryRegister, uint32 memoryImmU32); void x64Gen_imul_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 operandRegister); void x64Gen_idiv_reg64Low32(x64GenContext_t* x64GenContext, sint32 operandRegister); @@ -174,7 +241,9 @@ void x64Gen_not_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister); void x64Gen_neg_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister); void x64Gen_cdq(x64GenContext_t* x64GenContext); +void x64Gen_bswap_reg64(x64GenContext_t* x64GenContext, sint32 destRegister); void x64Gen_bswap_reg64Lower32bit(x64GenContext_t* x64GenContext, sint32 destRegister); +void x64Gen_bswap_reg64Lower16bit(x64GenContext_t* x64GenContext, sint32 destRegister); void x64Gen_lzcnt_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister); void x64Gen_bsr_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister); @@ -205,7 +274,6 @@ void x64Gen_movddup_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegi void x64Gen_movhlps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); void x64Gen_movsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); void x64Gen_movsd_memReg64_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32); -void x64Gen_movsd_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32); void x64Gen_movlpd_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32); void x64Gen_unpcklpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); void x64Gen_unpckhpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); @@ -231,7 +299,6 @@ void x64Gen_andps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegist void x64Gen_pcmpeqd_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, uint32 memReg, uint32 memImmS32); void x64Gen_cvttpd2dq_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); void x64Gen_cvttsd2si_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 registerDest, sint32 xmmRegisterSrc); -void x64Gen_cvtsi2sd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 registerSrc); void x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); void x64Gen_cvtpd2ps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); void x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc); @@ -262,8 +329,4 @@ void x64Gen_movBEZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext_t* x64G void x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext_t* x64GenContext, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32, sint32 srcRegister); void x64Gen_shrx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); -void x64Gen_shrx_reg32_reg32_reg32(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); -void x64Gen_sarx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); -void x64Gen_sarx_reg32_reg32_reg32(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); -void x64Gen_shlx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); -void x64Gen_shlx_reg32_reg32_reg32(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); \ No newline at end of file +void x64Gen_shlx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB); \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64AVX.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64AVX.cpp similarity index 92% rename from src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64AVX.cpp rename to src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64AVX.cpp index b0ef8640..619c3985 100644 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64AVX.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64AVX.cpp @@ -1,4 +1,5 @@ -#include "BackendX64.h" +#include "PPCRecompiler.h" +#include "PPCRecompilerX64.h" void _x64Gen_writeMODRMDeprecated(x64GenContext_t* x64GenContext, sint32 dataRegister, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32); @@ -20,10 +21,11 @@ void _x64Gen_vex128_nds(x64GenContext_t* x64GenContext, uint8 opcodeMap, uint8 a x64Gen_writeU8(x64GenContext, opcode); } -#define VEX_PP_0F 0 +#define VEX_PP_0F 0 // guessed #define VEX_PP_66_0F 1 -#define VEX_PP_F3_0F 2 -#define VEX_PP_F2_0F 3 +#define VEX_PP_F3_0F 2 // guessed +#define VEX_PP_F2_0F 3 // guessed + void x64Gen_avx_VPUNPCKHQDQ_xmm_xmm_xmm(x64GenContext_t* x64GenContext, sint32 dstRegister, sint32 srcRegisterA, sint32 srcRegisterB) { diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64BMI.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64BMI.cpp similarity index 67% rename from src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64BMI.cpp rename to src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64BMI.cpp index bbb707e0..5a71e93d 100644 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64BMI.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64BMI.cpp @@ -1,4 +1,5 @@ -#include "BackendX64.h" +#include "PPCRecompiler.h" +#include "PPCRecompilerX64.h" void _x64Gen_writeMODRMDeprecated(x64GenContext_t* x64GenContext, sint32 dataRegister, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32); @@ -68,34 +69,6 @@ void x64Gen_shrx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 regist x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7)); } -void x64Gen_shrx_reg32_reg32_reg32(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB) -{ - x64Gen_writeU8(x64GenContext, 0xC4); - x64Gen_writeU8(x64GenContext, 0xE2 - ((registerDst >= 8) ? 0x80 : 0) - ((registerA >= 8) ? 0x20 : 0)); - x64Gen_writeU8(x64GenContext, 0x7B - registerB * 8); - x64Gen_writeU8(x64GenContext, 0xF7); - x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7)); -} - -void x64Gen_sarx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB) -{ - // SARX reg64, reg64, reg64 - x64Gen_writeU8(x64GenContext, 0xC4); - x64Gen_writeU8(x64GenContext, 0xE2 - ((registerDst >= 8) ? 0x80 : 0) - ((registerA >= 8) ? 0x20 : 0)); - x64Gen_writeU8(x64GenContext, 0xFA - registerB * 8); - x64Gen_writeU8(x64GenContext, 0xF7); - x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7)); -} - -void x64Gen_sarx_reg32_reg32_reg32(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB) -{ - x64Gen_writeU8(x64GenContext, 0xC4); - x64Gen_writeU8(x64GenContext, 0xE2 - ((registerDst >= 8) ? 0x80 : 0) - ((registerA >= 8) ? 0x20 : 0)); - x64Gen_writeU8(x64GenContext, 0x7A - registerB * 8); - x64Gen_writeU8(x64GenContext, 0xF7); - x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7)); -} - void x64Gen_shlx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB) { // SHLX reg64, reg64, reg64 @@ -104,13 +77,4 @@ void x64Gen_shlx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 regist x64Gen_writeU8(x64GenContext, 0xF9 - registerB * 8); x64Gen_writeU8(x64GenContext, 0xF7); x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7)); -} - -void x64Gen_shlx_reg32_reg32_reg32(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB) -{ - x64Gen_writeU8(x64GenContext, 0xC4); - x64Gen_writeU8(x64GenContext, 0xE2 - ((registerDst >= 8) ? 0x80 : 0) - ((registerA >= 8) ? 0x20 : 0)); - x64Gen_writeU8(x64GenContext, 0x79 - registerB * 8); - x64Gen_writeU8(x64GenContext, 0xF7); - x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7)); } \ No newline at end of file diff --git a/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64FPU.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64FPU.cpp new file mode 100644 index 00000000..d83f67de --- /dev/null +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64FPU.cpp @@ -0,0 +1,1245 @@ +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" +#include "asm/x64util.h" +#include "Common/cpu_features.h" + +void PPCRecompilerX64Gen_imlInstruction_fpr_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + uint32 name = imlInstruction->op_r_name.name; + if( name >= PPCREC_NAME_FPR0 && name < (PPCREC_NAME_FPR0+32) ) + { + x64Gen_movupd_xmmReg_memReg128(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, fpr)+sizeof(FPR_t)*(name-PPCREC_NAME_FPR0)); + } + else if( name >= PPCREC_NAME_TEMPORARY_FPR0 || name < (PPCREC_NAME_TEMPORARY_FPR0+8) ) + { + x64Gen_movupd_xmmReg_memReg128(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, temporaryFPR)+sizeof(FPR_t)*(name-PPCREC_NAME_TEMPORARY_FPR0)); + } + else + { + cemu_assert_debug(false); + } +} + +void PPCRecompilerX64Gen_imlInstruction_fpr_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + uint32 name = imlInstruction->op_r_name.name; + if( name >= PPCREC_NAME_FPR0 && name < (PPCREC_NAME_FPR0+32) ) + { + x64Gen_movupd_memReg128_xmmReg(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, fpr)+sizeof(FPR_t)*(name-PPCREC_NAME_FPR0)); + } + else if( name >= PPCREC_NAME_TEMPORARY_FPR0 && name < (PPCREC_NAME_TEMPORARY_FPR0+8) ) + { + x64Gen_movupd_memReg128_xmmReg(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, temporaryFPR)+sizeof(FPR_t)*(name-PPCREC_NAME_TEMPORARY_FPR0)); + } + else + { + cemu_assert_debug(false); + } +} + +void PPCRecompilerX64Gen_imlInstr_gqr_generateScaleCode(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, sint32 registerXMM, bool isLoad, bool scalePS1, sint32 registerGQR) +{ + // load GQR + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, registerGQR); + // extract scale field and multiply by 16 to get array offset + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (isLoad?16:0)+8-4); + x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (0x3F<<4)); + // multiply xmm by scale + x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_TEMP, REG_RESV_RECDATA); + if (isLoad) + { + if(scalePS1) + x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_ld_scale_ps0_ps1)); + else + x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_ld_scale_ps0_1)); + } + else + { + if (scalePS1) + x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_st_scale_ps0_ps1)); + else + x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_st_scale_ps0_1)); + } +} + +// generate code for PSQ load for a particular type +// if scaleGQR is -1 then a scale of 1.0 is assumed (no scale) +void PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR = -1) +{ + if (mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1) + { + if (indexed) + { + assert_dbg(); + } + // optimized code for ps float load + x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memImmS32); + x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP); + x64Gen_rol_reg64_imm8(x64GenContext, REG_RESV_TEMP, 32); // swap upper and lower DWORD + x64Gen_movq_xmmReg_reg64(x64GenContext, registerXMM, REG_RESV_TEMP); + x64Gen_cvtps2pd_xmmReg_xmmReg(x64GenContext, registerXMM, registerXMM); + // note: floats are not scaled + } + else if (mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0) + { + if (indexed) + { + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, memRegEx); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, memReg); + if (g_CPUFeatures.x86.movbe) + { + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, memImmS32); + } + else + { + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, memImmS32); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + } + } + else + { + if (g_CPUFeatures.x86.movbe) + { + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, memReg, memImmS32); + } + else + { + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, memReg, memImmS32); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + } + } + if (g_CPUFeatures.x86.avx) + { + x64Gen_movd_xmmReg_reg64Low32(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP); + } + else + { + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR), REG_RESV_TEMP); + x64Gen_movddup_xmmReg_memReg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_FPR_TEMP); + // load constant 1.0 into lower half and upper half of temp register + x64Gen_movddup_xmmReg_memReg64(x64GenContext, registerXMM, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble1_1)); + // overwrite lower half with single from memory + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, registerXMM, REG_RESV_FPR_TEMP); + // note: floats are not scaled + } + else + { + sint32 readSize; + bool isSigned = false; + if (mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1) + { + readSize = 16; + isSigned = true; + } + else if (mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1) + { + readSize = 16; + isSigned = false; + } + else if (mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1) + { + readSize = 8; + isSigned = true; + } + else if (mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1) + { + readSize = 8; + isSigned = false; + } + else + assert_dbg(); + + bool loadPS1 = (mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1); + for (sint32 wordIndex = 0; wordIndex < 2; wordIndex++) + { + if (indexed) + { + assert_dbg(); + } + // read from memory + if (wordIndex == 1 && loadPS1 == false) + { + // store constant 1 + x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR) + sizeof(uint32) * 1, 1); + } + else + { + uint32 memOffset = memImmS32 + wordIndex * (readSize / 8); + if (readSize == 16) + { + // half word + x64Gen_movZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memOffset); + x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); // endian swap + if (isSigned) + x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + else + x64Gen_movZeroExtend_reg64Low32_reg64Low16(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + } + else if (readSize == 8) + { + // byte + x64Emit_mov_reg64b_mem8(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memOffset); + if (isSigned) + x64Gen_movSignExtend_reg64Low32_reg64Low8(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + else + x64Gen_movZeroExtend_reg64Low32_reg64Low8(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + } + // store + x64Emit_mov_mem32_reg32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR) + sizeof(uint32) * wordIndex, REG_RESV_TEMP); + } + } + // convert the two integers to doubles + x64Gen_cvtpi2pd_xmmReg_mem64Reg64(x64GenContext, registerXMM, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR)); + // scale + if (registerGQR >= 0) + PPCRecompilerX64Gen_imlInstr_gqr_generateScaleCode(ppcImlGenContext, x64GenContext, registerXMM, true, loadPS1, registerGQR); + } +} + +void PPCRecompilerX64Gen_imlInstr_psq_load_generic(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR) +{ + bool loadPS1 = (mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1); + // load GQR + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, registerGQR); + // extract load type field + x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 16); + x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7); + // jump cases + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 4); // type 4 -> u8 + sint32 jumpOffset_caseU8 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 5); // type 5 -> u16 + sint32 jumpOffset_caseU16 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 6); // type 4 -> s8 + sint32 jumpOffset_caseS8 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7); // type 5 -> s16 + sint32 jumpOffset_caseS16 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + // default case -> float + + // generate cases + uint32 jumpOffset_endOfFloat; + uint32 jumpOffset_endOfU8; + uint32 jumpOffset_endOfU16; + uint32 jumpOffset_endOfS8; + + PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfFloat = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU16, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_U16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfU8 = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS16, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_S16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfU16 = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU8, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_U8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfS8 = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS8, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_S8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfFloat, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU8, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU16, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfS8, x64GenContext->codeBufferIndex); +} + +// load from memory +bool PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 realRegisterXMM = tempToRealFPRRegister(imlInstruction->op_storeLoad.registerData); + sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem); + sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER; + if( indexed ) + realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2); + uint8 mode = imlInstruction->op_storeLoad.mode; + + if( mode == PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1 ) + { + // load byte swapped single into temporary FPR + if( indexed ) + { + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); + if( g_CPUFeatures.x86.movbe ) + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); + else + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32); + } + else + { + if( g_CPUFeatures.x86.movbe ) + x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); + else + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32); + } + if( g_CPUFeatures.x86.movbe == false ) + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + if( g_CPUFeatures.x86.avx ) + { + x64Gen_movd_xmmReg_reg64Low32(x64GenContext, realRegisterXMM, REG_RESV_TEMP); + } + else + { + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR), REG_RESV_TEMP); + x64Gen_movddup_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + + if (imlInstruction->op_storeLoad.flags2.notExpanded) + { + // leave value as single + } + else + { + x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, realRegisterXMM); + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, realRegisterXMM, realRegisterXMM); + } + } + else if( mode == PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0 ) + { + if( g_CPUFeatures.x86.avx ) + { + if( indexed ) + { + // calculate offset + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); + // load value + x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+0); + x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP); + x64Gen_movq_xmmReg_reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP); + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_FPR_TEMP); + } + else + { + x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+0); + x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP); + x64Gen_movq_xmmReg_reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP); + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_FPR_TEMP); + } + } + else + { + if( indexed ) + { + // calculate offset + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); + // load double low part to temporaryFPR + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+0); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+4, REG_RESV_TEMP); + // calculate offset again + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2); + // load double high part to temporaryFPR + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+4); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+0, REG_RESV_TEMP); + // load double from temporaryFPR + x64Gen_movlpd_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + else + { + // load double low part to temporaryFPR + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+0); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+4, REG_RESV_TEMP); + // load double high part to temporaryFPR + x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+4); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+0, REG_RESV_TEMP); + // load double from temporaryFPR + x64Gen_movlpd_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + } + } + else if (mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0 || + mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1 ) + { + PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed); + } + else if (mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 || + mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0) + { + PPCRecompilerX64Gen_imlInstr_psq_load_generic(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed, tempToRealRegister(imlInstruction->op_storeLoad.registerGQR)); + } + else + { + return false; + } + return true; +} + +void PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR = -1) +{ + bool storePS1 = (mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1); + bool isFloat = mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0 || mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1; + if (registerGQR >= 0) + { + // move to temporary xmm and update registerXMM + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM); + registerXMM = REG_RESV_FPR_TEMP; + // apply scale + if(isFloat == false) + PPCRecompilerX64Gen_imlInstr_gqr_generateScaleCode(ppcImlGenContext, x64GenContext, registerXMM, false, storePS1, registerGQR); + } + if (mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0) + { + x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM); + if (g_CPUFeatures.x86.avx) + { + x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP); + } + else + { + x64Gen_movsd_memReg64_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + if (g_CPUFeatures.x86.movbe == false) + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + if (indexed) + { + cemu_assert_debug(memReg != memRegEx); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, memReg, memRegEx); + } + if (g_CPUFeatures.x86.movbe) + x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, memReg, memImmS32, REG_RESV_TEMP); + else + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, memReg, memImmS32, REG_RESV_TEMP); + if (indexed) + { + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, memReg, memRegEx); + } + return; + } + else if (mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1) + { + if (indexed) + assert_dbg(); // todo + x64Gen_cvtpd2ps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM); + x64Gen_movq_reg64_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP); + x64Gen_rol_reg64_imm8(x64GenContext, REG_RESV_TEMP, 32); // swap upper and lower DWORD + x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP); + x64Gen_mov_mem64Reg64PlusReg64_reg64(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memImmS32); + return; + } + // store as integer + // get limit from mode + sint32 clampMin, clampMax; + sint32 bitWriteSize; + if (mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 ) + { + clampMin = -128; + clampMax = 127; + bitWriteSize = 8; + } + else if (mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 ) + { + clampMin = 0; + clampMax = 255; + bitWriteSize = 8; + } + else if (mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 ) + { + clampMin = 0; + clampMax = 0xFFFF; + bitWriteSize = 16; + } + else if (mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1 ) + { + clampMin = -32768; + clampMax = 32767; + bitWriteSize = 16; + } + else + { + cemu_assert(false); + } + for (sint32 valueIndex = 0; valueIndex < (storePS1?2:1); valueIndex++) + { + // todo - multiply by GQR scale + if (valueIndex == 0) + { + // convert low half (PS0) to integer + x64Gen_cvttsd2si_reg64Low_xmmReg(x64GenContext, REG_RESV_TEMP, registerXMM); + } + else + { + // load top half (PS1) into bottom half of temporary register + x64Gen_movhlps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM); + // convert low half to integer + x64Gen_cvttsd2si_reg64Low_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP); + } + // max(i, -clampMin) + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMin); + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_SIGNED_GREATER_EQUAL, 0); + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMin); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + // min(i, clampMax) + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMax); + sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_SIGNED_LESS_EQUAL, 0); + x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMax); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex); + // endian swap + if( bitWriteSize == 16) + x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); + // write to memory + if (indexed) + assert_dbg(); // unsupported + sint32 memOffset = memImmS32 + valueIndex * (bitWriteSize/8); + if (bitWriteSize == 8) + x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, memReg, memOffset, REG_RESV_TEMP); + else if (bitWriteSize == 16) + x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, memReg, memOffset, REG_RESV_TEMP); + } +} + +void PPCRecompilerX64Gen_imlInstr_psq_store_generic(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR) +{ + bool storePS1 = (mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1); + // load GQR + x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, registerGQR); + // extract store type field + x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7); + // jump cases + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 4); // type 4 -> u8 + sint32 jumpOffset_caseU8 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 5); // type 5 -> u16 + sint32 jumpOffset_caseU16 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 6); // type 4 -> s8 + sint32 jumpOffset_caseS8 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7); // type 5 -> s16 + sint32 jumpOffset_caseS16 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0); + // default case -> float + + // generate cases + uint32 jumpOffset_endOfFloat; + uint32 jumpOffset_endOfU8; + uint32 jumpOffset_endOfU16; + uint32 jumpOffset_endOfS8; + + PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfFloat = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU16, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_U16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfU8 = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS16, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_S16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfU16 = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU8, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_U8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + jumpOffset_endOfS8 = x64GenContext->codeBufferIndex; + x64Gen_jmp_imm32(x64GenContext, 0); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS8, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_S8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR); + + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfFloat, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU8, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU16, x64GenContext->codeBufferIndex); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfS8, x64GenContext->codeBufferIndex); +} + +// store to memory +bool PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + sint32 realRegisterXMM = tempToRealFPRRegister(imlInstruction->op_storeLoad.registerData); + sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem); + sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER; + if( indexed ) + realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2); + uint8 mode = imlInstruction->op_storeLoad.mode; + if( mode == PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0 ) + { + if (imlInstruction->op_storeLoad.flags2.notExpanded) + { + // value is already in single format + if (g_CPUFeatures.x86.avx) + { + x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, realRegisterXMM); + } + else + { + x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + } + else + { + x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, realRegisterXMM); + if (g_CPUFeatures.x86.avx) + { + x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP); + } + else + { + x64Gen_movsd_memReg64_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + } + if( g_CPUFeatures.x86.movbe == false ) + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + if( indexed ) + { + if( realRegisterMem == realRegisterMem2 ) + assert_dbg(); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + if( g_CPUFeatures.x86.movbe ) + x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); + else + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); + if( indexed ) + { + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + } + else if( mode == PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0 ) + { + if( indexed ) + { + if( realRegisterMem == realRegisterMem2 ) + assert_dbg(); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + // store double low part + x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+0); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+4, REG_RESV_TEMP); + // store double high part + x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+4); + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+0, REG_RESV_TEMP); + if( indexed ) + { + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + } + else if( mode == PPCREC_FPR_ST_MODE_UI32_FROM_PS0 ) + { + if( g_CPUFeatures.x86.avx ) + { + x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, realRegisterXMM); + } + else + { + x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)); + } + x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP); + if( indexed ) + { + if( realRegisterMem == realRegisterMem2 ) + assert_dbg(); + x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); + x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); + } + else + { + x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP); + } + } + else if(mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0 || + mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 ) + { + cemu_assert_debug(imlInstruction->op_storeLoad.flags2.notExpanded == false); + PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed); + } + else if (mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1 || + mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0) + { + PPCRecompilerX64Gen_imlInstr_psq_store_generic(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed, tempToRealRegister(imlInstruction->op_storeLoad.registerGQR)); + } + else + { + if( indexed ) + assert_dbg(); // todo + debug_printf("PPCRecompilerX64Gen_imlInstruction_fpr_store(): Unsupported mode %d\n", mode); + return false; + } + return true; +} + +void _swapPS0PS1(x64GenContext_t* x64GenContext, sint32 xmmReg) +{ + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, xmmReg, xmmReg, 1); +} + +// FPR op FPR +void PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + // VPUNPCKHQDQ + if (imlInstruction->op_fpr_r_r.registerResult == imlInstruction->op_fpr_r_r.registerOperand) + { + // unpack top to bottom and top + x64Gen_unpckhpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + //else if ( g_CPUFeatures.x86.avx ) + //{ + // // unpack top to bottom and top with non-destructive destination + // // update: On Ivy Bridge this causes weird stalls? + // x64Gen_avx_VUNPCKHPD_xmm_xmm_xmm(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand, imlInstruction->op_fpr_r_r.registerOperand); + //} + else + { + // move top to bottom + x64Gen_movhlps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + // duplicate bottom + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerResult); + } + + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_unpcklpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + if( imlInstruction->op_fpr_r_r.registerResult != imlInstruction->op_fpr_r_r.registerOperand ) + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand, 2); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // use unpckhpd here? + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand, 3); + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_PAIR ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_mulpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_BOTTOM ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_divsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if (imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_PAIR) + { + if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER) + { + assert_dbg(); + } + x64Gen_divpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_BOTTOM ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_PAIR ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_addpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_PAIR ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_subpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_BOTTOM ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_subsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + x64Gen_cvttsd2si_xmmReg_xmmReg(x64GenContext, REG_RESV_TEMP, imlInstruction->op_fpr_r_r.registerOperand); + x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP); + // move to FPR register + x64Gen_movq_xmmReg_reg64(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_TEMP); + } + else if(imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_BOTTOM || + imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_TOP || + imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPO_BOTTOM ) + { + if( imlInstruction->crRegister == PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + if (imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_BOTTOM) + x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + else if (imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_TOP) + { + // temporarily switch top/bottom of both operands and compare + if (imlInstruction->op_fpr_r_r.registerResult == imlInstruction->op_fpr_r_r.registerOperand) + { + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult); + x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult); + } + else + { + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult); + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerOperand); + x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult); + _swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerOperand); + } + } + else + x64Gen_comisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + // todo: handle FPSCR updates + // update cr + sint32 crRegister = imlInstruction->crRegister; + // if the parity bit is set (NaN) we need to manually set CR LT, GT and EQ to 0 (comisd/ucomisd sets the respective flags to 1 in case of NaN) + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_PARITY, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_SO)); // unordered + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_PARITY, 0); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); // same as X64_CONDITION_CARRY + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT)); + x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ)); + sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT), 0); + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT), 0); + x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ), 0); + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP ) + { + if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER ) + { + assert_dbg(); + } + // move register to XMM15 + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand); + + // call assembly routine to calculate accurate FRES result in XMM15 + x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_fres); + x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP); + + // copy result to bottom and top half of result register + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP); + } + else if (imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // move register to XMM15 + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand); + + // call assembly routine to calculate accurate FRSQRTE result in XMM15 + x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_frsqrte); + x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP); + + // copy result to bottom of result register + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_PAIR ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // copy register + if( imlInstruction->op_fpr_r_r.registerResult != imlInstruction->op_fpr_r_r.registerOperand ) + { + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + // toggle sign bits + x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskPair)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_PAIR ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // copy register + if( imlInstruction->op_fpr_r_r.registerResult != imlInstruction->op_fpr_r_r.registerOperand ) + { + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand); + } + // set sign bit to 0 + x64Gen_andps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_andAbsMaskPair)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_FRES_PAIR || imlInstruction->operation == PPCREC_IML_OP_FPR_FRSQRTE_PAIR) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // calculate bottom half of result + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand); + if(imlInstruction->operation == PPCREC_IML_OP_FPR_FRES_PAIR) + x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_fres); + else + x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_frsqrte); + x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP); // calculate fres result in xmm15 + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP); + + // calculate top half of result + // todo - this top to bottom copy can be optimized? + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand, 3); + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_FPR_TEMP, 1); // swap top and bottom + + x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP); // calculate fres result in xmm15 + + x64Gen_unpcklpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP); // copy bottom to top + } + else + { + assert_dbg(); + } +} + +/* + * FPR = op (fprA, fprB) + */ +void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + + if (imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM) + { + if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER) + { + assert_dbg(); + } + if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA) + { + x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + else if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB) + { + x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA); + } + else + { + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA); + x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + } + else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_BOTTOM) + { + // registerResult(fp0) = registerOperandA(fp0) + registerOperandB(fp0) + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // todo: Use AVX 3-operand VADDSD if available + if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA) + { + x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + else if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB) + { + x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA); + } + else + { + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA); + x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + } + else if (imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_PAIR) + { + // registerResult = registerOperandA - registerOperandB + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA ) + { + x64Gen_subpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + else if (g_CPUFeatures.x86.avx) + { + x64Gen_avx_VSUBPD_xmm_xmm_xmm(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + else if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB ) + { + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandA); + x64Gen_subpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandB); + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, REG_RESV_FPR_TEMP); + } + else + { + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA); + x64Gen_subpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA ) + { + x64Gen_subsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + else if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB ) + { + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandA); + x64Gen_subsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandB); + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, REG_RESV_FPR_TEMP); + } + else + { + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA); + x64Gen_subsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB); + } + } + else + assert_dbg(); +} + +/* + * FPR = op (fprA, fprB, fprC) + */ +void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUM0 ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + + // todo: Investigate if there are other optimizations possible if the operand registers overlap + // generic case + // 1) move frA bottom to frTemp bottom and top + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandA); + // 2) add frB (both halfs, lower half is overwritten in the next step) + x64Gen_addpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandB); + // 3) Interleave top of frTemp and frC + x64Gen_unpckhpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandC); + // todo: We can optimize the REG_RESV_FPR_TEMP -> resultReg copy operation away when the result register does not overlap with any of the operand registers + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, REG_RESV_FPR_TEMP); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUM1 ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // todo: Investigate if there are other optimizations possible if the operand registers overlap + // 1) move frA bottom to frTemp bottom and top + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandA); + // 2) add frB (both halfs, lower half is overwritten in the next step) + x64Gen_addpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandB); + // 3) Copy bottom from frC + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandC); + //// 4) Swap bottom and top half + //x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_FPR_TEMP, 1); + // todo: We can optimize the REG_RESV_FPR_TEMP -> resultReg copy operation away when the result register does not overlap with any of the operand registers + x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, REG_RESV_FPR_TEMP); + + //float s0 = (float)hCPU->fpr[frC].fp0; + //float s1 = (float)(hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp1); + //hCPU->fpr[frD].fp0 = s0; + //hCPU->fpr[frD].fp1 = s1; + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SELECT_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerOperandA, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0)); + sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0); + // select C + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC); + sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); + // select B + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex); + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB); + // end + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SELECT_PAIR ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // select bottom + x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerOperandA, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0)); + sint32 jumpInstructionOffset1_bottom = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0); + // select C bottom + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC); + sint32 jumpInstructionOffset2_bottom = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); + // select B bottom + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1_bottom, x64GenContext->codeBufferIndex); + x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB); + // end + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2_bottom, x64GenContext->codeBufferIndex); + // select top + x64Gen_movhlps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandA); // copy top to bottom (todo: May cause stall?) + x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0)); + sint32 jumpInstructionOffset1_top = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0); + // select C top + //x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC); + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC, 2); + sint32 jumpInstructionOffset2_top = x64GenContext->codeBufferIndex; + x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0); + // select B top + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1_top, x64GenContext->codeBufferIndex); + //x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB); + x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB, 2); + // end + PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2_top, x64GenContext->codeBufferIndex); + } + else + assert_dbg(); +} + +/* + * Single FPR operation + */ +void PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction) +{ + PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); + if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // toggle sign bit + x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // mask out sign bit + x64Gen_andps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_andAbsMaskBottom)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // set sign bit + x64Gen_orps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom)); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // convert to 32bit single + x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult); + // convert back to 64bit double + x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult); + } + else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR ) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // convert to 32bit singles + x64Gen_cvtpd2ps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult); + // convert back to 64bit doubles + x64Gen_cvtps2pd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult); + } + else if (imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64) + { + cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER); + // convert bottom to 64bit double + x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult); + // copy to top half + x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult); + } + else + { + cemu_assert_unimplemented(); + } +} diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64Gen.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64Gen.cpp similarity index 90% rename from src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64Gen.cpp rename to src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64Gen.cpp index efe929d0..19327f46 100644 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64Gen.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64Gen.cpp @@ -1,31 +1,62 @@ -#include "BackendX64.h" +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" // x86/x64 extension opcodes that could be useful: // ANDN // mulx, rorx, sarx, shlx, shrx // PDEP, PEXT +void x64Gen_checkBuffer(x64GenContext_t* x64GenContext) +{ + // todo +} + void x64Gen_writeU8(x64GenContext_t* x64GenContext, uint8 v) { - x64GenContext->emitter->_emitU8(v); + if( x64GenContext->codeBufferIndex+1 > x64GenContext->codeBufferSize ) + { + x64GenContext->codeBufferSize *= 2; + x64GenContext->codeBuffer = (uint8*)realloc(x64GenContext->codeBuffer, x64GenContext->codeBufferSize); + } + *(uint8*)(x64GenContext->codeBuffer+x64GenContext->codeBufferIndex) = v; + x64GenContext->codeBufferIndex++; } void x64Gen_writeU16(x64GenContext_t* x64GenContext, uint32 v) { - x64GenContext->emitter->_emitU16(v); + if( x64GenContext->codeBufferIndex+2 > x64GenContext->codeBufferSize ) + { + x64GenContext->codeBufferSize *= 2; + x64GenContext->codeBuffer = (uint8*)realloc(x64GenContext->codeBuffer, x64GenContext->codeBufferSize); + } + *(uint16*)(x64GenContext->codeBuffer+x64GenContext->codeBufferIndex) = v; + x64GenContext->codeBufferIndex += 2; } void x64Gen_writeU32(x64GenContext_t* x64GenContext, uint32 v) { - x64GenContext->emitter->_emitU32(v); + if( x64GenContext->codeBufferIndex+4 > x64GenContext->codeBufferSize ) + { + x64GenContext->codeBufferSize *= 2; + x64GenContext->codeBuffer = (uint8*)realloc(x64GenContext->codeBuffer, x64GenContext->codeBufferSize); + } + *(uint32*)(x64GenContext->codeBuffer+x64GenContext->codeBufferIndex) = v; + x64GenContext->codeBufferIndex += 4; } void x64Gen_writeU64(x64GenContext_t* x64GenContext, uint64 v) { - x64GenContext->emitter->_emitU64(v); + if( x64GenContext->codeBufferIndex+8 > x64GenContext->codeBufferSize ) + { + x64GenContext->codeBufferSize *= 2; + x64GenContext->codeBuffer = (uint8*)realloc(x64GenContext->codeBuffer, x64GenContext->codeBufferSize); + } + *(uint64*)(x64GenContext->codeBuffer+x64GenContext->codeBufferIndex) = v; + x64GenContext->codeBufferIndex += 8; } -#include "X64Emit.hpp" +#include "x64Emit.hpp" void _x64Gen_writeMODRMDeprecated(x64GenContext_t* x64GenContext, sint32 dataRegister, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32) { @@ -36,7 +67,7 @@ void _x64Gen_writeMODRMDeprecated(x64GenContext_t* x64GenContext, sint32 dataReg forceUseOffset = true; } - if (memRegisterB64 == X86_REG_NONE) + if (memRegisterB64 == REG_NONE) { // memRegisterA64 + memImmS32 uint8 modRM = (dataRegister & 7) * 8 + (memRegisterA64 & 7); @@ -321,7 +352,7 @@ void x64Gen_mov_mem32Reg64_imm32(x64GenContext_t* x64GenContext, sint32 memRegis void x64Gen_mov_mem64Reg64_imm32(x64GenContext_t* x64GenContext, sint32 memRegister, uint32 memImmU32, uint32 dataImmU32) { // MOV QWORD [+], dataImmU32 - if( memRegister == X86_REG_R14 ) + if( memRegister == REG_R14 ) { sint32 memImmS32 = (sint32)memImmU32; if( memImmS32 == 0 ) @@ -353,7 +384,7 @@ void x64Gen_mov_mem64Reg64_imm32(x64GenContext_t* x64GenContext, sint32 memRegis void x64Gen_mov_mem8Reg64_imm8(x64GenContext_t* x64GenContext, sint32 memRegister, uint32 memImmU32, uint8 dataImmU8) { // MOV BYTE [+], dataImmU8 - if( memRegister == X86_REG_RSP ) + if( memRegister == REG_RSP ) { sint32 memImmS32 = (sint32)memImmU32; if( memImmS32 >= -128 && memImmS32 <= 127 ) @@ -594,7 +625,7 @@ void _x64_op_reg64Low_mem8Reg64(x64GenContext_t* x64GenContext, sint32 dstRegist if (memRegister64 >= 8) x64Gen_writeU8(x64GenContext, 0x41); x64Gen_writeU8(x64GenContext, opByte); - _x64Gen_writeMODRMDeprecated(x64GenContext, dstRegister, memRegister64, X86_REG_NONE, memImmS32); + _x64Gen_writeMODRMDeprecated(x64GenContext, dstRegister, memRegister64, REG_NONE, memImmS32); } void x64Gen_or_reg64Low8_mem8Reg64(x64GenContext_t* x64GenContext, sint32 dstRegister, sint32 memRegister64, sint32 memImmS32) @@ -612,6 +643,40 @@ void x64Gen_mov_mem8Reg64_reg64Low8(x64GenContext_t* x64GenContext, sint32 dstRe _x64_op_reg64Low_mem8Reg64(x64GenContext, dstRegister, memRegister64, memImmS32, 0x88); } +void x64Gen_lock_cmpxchg_mem32Reg64PlusReg64_reg64(x64GenContext_t* x64GenContext, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32, sint32 srcRegister) +{ + // LOCK CMPXCHG DWORD [ + + ], (low dword) + x64Gen_writeU8(x64GenContext, 0xF0); // LOCK prefix + + if( srcRegister >= 8 || memRegisterA64 >= 8|| memRegisterB64 >= 8 ) + x64Gen_writeU8(x64GenContext, 0x40+((srcRegister>=8)?4:0)+((memRegisterA64>=8)?1:0)+((memRegisterB64>=8)?2:0)); + + x64Gen_writeU8(x64GenContext, 0x0F); + x64Gen_writeU8(x64GenContext, 0xB1); + + _x64Gen_writeMODRMDeprecated(x64GenContext, srcRegister, memRegisterA64, memRegisterB64, memImmS32); +} + +void x64Gen_lock_cmpxchg_mem32Reg64_reg64(x64GenContext_t* x64GenContext, sint32 memRegister64, sint32 memImmS32, sint32 srcRegister) +{ + // LOCK CMPXCHG DWORD [ + ], (low dword) + x64Gen_writeU8(x64GenContext, 0xF0); // LOCK prefix + + if( srcRegister >= 8 || memRegister64 >= 8 ) + x64Gen_writeU8(x64GenContext, 0x40+((srcRegister>=8)?4:0)+((memRegister64>=8)?1:0)); + + x64Gen_writeU8(x64GenContext, 0x0F); + x64Gen_writeU8(x64GenContext, 0xB1); + + if( memImmS32 == 0 ) + { + x64Gen_writeU8(x64GenContext, 0x45+(srcRegister&7)*8); + x64Gen_writeU8(x64GenContext, 0x00); + } + else + assert_dbg(); +} + void x64Gen_add_reg64_reg64(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister) { // ADD , @@ -667,7 +732,7 @@ void x64Gen_add_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegis } else { - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special EAX short form x64Gen_writeU8(x64GenContext, 0x05); @@ -707,7 +772,7 @@ void x64Gen_sub_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegis } else { - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special EAX short form x64Gen_writeU8(x64GenContext, 0x2D); @@ -746,7 +811,7 @@ void x64Gen_sub_mem32reg64_imm32(x64GenContext_t* x64GenContext, sint32 memRegis { // SUB , sint32 immS32 = (sint32)immU32; - if( memRegister == X86_REG_RSP ) + if( memRegister == REG_RSP ) { if( memImmS32 >= 128 ) { @@ -778,11 +843,64 @@ void x64Gen_sub_mem32reg64_imm32(x64GenContext_t* x64GenContext, sint32 memRegis } } +void x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister) +{ + // SBB , + if( destRegister >= 8 && srcRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x45); + else if( srcRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x44); + else if( destRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0x19); + x64Gen_writeU8(x64GenContext, 0xC0+(srcRegister&7)*8+(destRegister&7)); +} + +void x64Gen_adc_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister) +{ + // ADC , + if( destRegister >= 8 && srcRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x45); + else if( srcRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x44); + else if( destRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x41); + x64Gen_writeU8(x64GenContext, 0x11); + x64Gen_writeU8(x64GenContext, 0xC0+(srcRegister&7)*8+(destRegister&7)); +} + +void x64Gen_adc_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegister, uint32 immU32) +{ + sint32 immS32 = (sint32)immU32; + if( srcRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x41); + if( immS32 >= -128 && immS32 <= 127 ) + { + x64Gen_writeU8(x64GenContext, 0x83); + x64Gen_writeU8(x64GenContext, 0xD0+(srcRegister&7)); + x64Gen_writeU8(x64GenContext, (uint8)immS32); + } + else + { + if( srcRegister == REG_RAX ) + { + // special EAX short form + x64Gen_writeU8(x64GenContext, 0x15); + } + else + { + x64Gen_writeU8(x64GenContext, 0x81); + x64Gen_writeU8(x64GenContext, 0xD0+(srcRegister&7)); + } + x64Gen_writeU32(x64GenContext, immU32); + } +} + void x64Gen_dec_mem32(x64GenContext_t* x64GenContext, sint32 memoryRegister, uint32 memoryImmU32) { // DEC dword [+imm] sint32 memoryImmS32 = (sint32)memoryImmU32; - if (memoryRegister != X86_REG_RSP) + if (memoryRegister != REG_RSP) assert_dbg(); // not supported yet if (memoryImmS32 >= -128 && memoryImmS32 <= 127) { @@ -863,7 +981,7 @@ void x64Gen_and_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegis } else { - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special EAX short form x64Gen_writeU8(x64GenContext, 0x25); @@ -908,7 +1026,7 @@ void x64Gen_test_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegi sint32 immS32 = (sint32)immU32; if( srcRegister >= 8 ) x64Gen_writeU8(x64GenContext, 0x41); - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special EAX short form x64Gen_writeU8(x64GenContext, 0xA9); @@ -934,7 +1052,7 @@ void x64Gen_cmp_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegis } else { - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special RAX short form x64Gen_writeU8(x64GenContext, 0x3D); @@ -964,7 +1082,7 @@ void x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 des void x64Gen_cmp_reg64Low32_mem32reg64(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 memRegister, sint32 memImmS32) { // CMP , DWORD [+] - if( memRegister == X86_REG_RSP ) + if( memRegister == REG_RSP ) { if( memImmS32 >= -128 && memImmS32 <= 127 ) assert_dbg(); // todo -> Shorter instruction form @@ -994,7 +1112,7 @@ void x64Gen_or_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegist } else { - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special EAX short form x64Gen_writeU8(x64GenContext, 0x0D); @@ -1054,7 +1172,7 @@ void x64Gen_xor_reg64Low32_imm32(x64GenContext_t* x64GenContext, sint32 srcRegis } else { - if( srcRegister == X86_REG_RAX ) + if( srcRegister == REG_RAX ) { // special EAX short form x64Gen_writeU8(x64GenContext, 0x35); @@ -1208,6 +1326,16 @@ void x64Gen_cdq(x64GenContext_t* x64GenContext) x64Gen_writeU8(x64GenContext, 0x99); } +void x64Gen_bswap_reg64(x64GenContext_t* x64GenContext, sint32 destRegister) +{ + if( destRegister >= 8 ) + x64Gen_writeU8(x64GenContext, 0x41|8); + else + x64Gen_writeU8(x64GenContext, 0x40|8); + x64Gen_writeU8(x64GenContext, 0x0F); + x64Gen_writeU8(x64GenContext, 0xC8+(destRegister&7)); +} + void x64Gen_bswap_reg64Lower32bit(x64GenContext_t* x64GenContext, sint32 destRegister) { if( destRegister >= 8 ) @@ -1216,6 +1344,16 @@ void x64Gen_bswap_reg64Lower32bit(x64GenContext_t* x64GenContext, sint32 destReg x64Gen_writeU8(x64GenContext, 0xC8+(destRegister&7)); } +void x64Gen_bswap_reg64Lower16bit(x64GenContext_t* x64GenContext, sint32 destRegister) +{ + assert_dbg(); // do not use this instruction, it's result is always undefined. Instead use ROL , 8 + //x64Gen_writeU8(x64GenContext, 0x66); + //if( destRegister >= 8 ) + // x64Gen_writeU8(x64GenContext, 0x41); + //x64Gen_writeU8(x64GenContext, 0x0F); + //x64Gen_writeU8(x64GenContext, 0xC8+(destRegister&7)); +} + void x64Gen_lzcnt_reg64Low32_reg64Low32(x64GenContext_t* x64GenContext, sint32 destRegister, sint32 srcRegister) { // SSE4 @@ -1250,7 +1388,7 @@ void x64Gen_setcc_mem8(x64GenContext_t* x64GenContext, sint32 conditionType, sin { // SETcc [+imm] sint32 memoryImmS32 = (sint32)memoryImmU32; - if( memoryRegister != X86_REG_RSP ) + if( memoryRegister != REG_RSP ) assert_dbg(); // not supported if( memoryRegister >= 8 ) assert_dbg(); // not supported @@ -1489,7 +1627,7 @@ void x64Gen_bt_mem8(x64GenContext_t* x64GenContext, sint32 memoryRegister, uint3 { // BT [+imm], bitIndex (bit test) sint32 memoryImmS32 = (sint32)memoryImmU32; - if( memoryRegister != X86_REG_RSP ) + if( memoryRegister != REG_RSP ) assert_dbg(); // not supported yet if( memoryImmS32 >= -128 && memoryImmS32 <= 127 ) { @@ -1524,7 +1662,7 @@ void x64Gen_jmp_imm32(x64GenContext_t* x64GenContext, uint32 destImm32) void x64Gen_jmp_memReg64(x64GenContext_t* x64GenContext, sint32 memRegister, uint32 immU32) { - if( memRegister == X86_REG_NONE ) + if( memRegister == REG_NONE ) { assert_dbg(); } diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64GenFPU.cpp b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64GenFPU.cpp similarity index 92% rename from src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64GenFPU.cpp rename to src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64GenFPU.cpp index 4bbcc025..92289d68 100644 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/BackendX64GenFPU.cpp +++ b/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64GenFPU.cpp @@ -1,4 +1,6 @@ -#include "BackendX64.h" +#include "PPCRecompiler.h" +#include "PPCRecompilerIml.h" +#include "PPCRecompilerX64.h" void x64Gen_genSSEVEXPrefix2(x64GenContext_t* x64GenContext, sint32 xmmRegister1, sint32 xmmRegister2, bool use64BitMode) { @@ -42,7 +44,7 @@ void x64Gen_movupd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRe // SSE2 // move two doubles from memory into xmm register // MOVUPD , [+] - if( memRegister == X86_REG_ESP ) + if( memRegister == REG_ESP ) { // todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range // 66 0F 10 84 E4 23 01 00 00 @@ -54,7 +56,7 @@ void x64Gen_movupd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRe x64Gen_writeU8(x64GenContext, 0xE4); x64Gen_writeU32(x64GenContext, memImmU32); } - else if( memRegister == X86_REG_NONE ) + else if( memRegister == REG_NONE ) { assert_dbg(); //x64Gen_writeU8(x64GenContext, 0x66); @@ -74,7 +76,7 @@ void x64Gen_movupd_memReg128_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRe // SSE2 // move two doubles from memory into xmm register // MOVUPD [+], - if( memRegister == X86_REG_ESP ) + if( memRegister == REG_ESP ) { // todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range x64Gen_writeU8(x64GenContext, 0x66); @@ -85,7 +87,7 @@ void x64Gen_movupd_memReg128_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRe x64Gen_writeU8(x64GenContext, 0xE4); x64Gen_writeU32(x64GenContext, memImmU32); } - else if( memRegister == X86_REG_NONE ) + else if( memRegister == REG_NONE ) { assert_dbg(); //x64Gen_writeU8(x64GenContext, 0x66); @@ -104,7 +106,7 @@ void x64Gen_movddup_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRe { // SSE3 // move one double from memory into lower and upper half of a xmm register - if( memRegister == X86_REG_RSP ) + if( memRegister == REG_RSP ) { // MOVDDUP , [+] // todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range @@ -117,7 +119,7 @@ void x64Gen_movddup_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRe x64Gen_writeU8(x64GenContext, 0xE4); x64Gen_writeU32(x64GenContext, memImmU32); } - else if( memRegister == X86_REG_R15 ) + else if( memRegister == REG_R15 ) { // MOVDDUP , [+] // todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range @@ -129,7 +131,7 @@ void x64Gen_movddup_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRe x64Gen_writeU8(x64GenContext, 0x87+(xmmRegister&7)*8); x64Gen_writeU32(x64GenContext, memImmU32); } - else if( memRegister == X86_REG_NONE ) + else if( memRegister == REG_NONE ) { // MOVDDUP , [] // 36 F2 0F 12 05 - 00 00 00 00 @@ -183,7 +185,7 @@ void x64Gen_movsd_memReg64_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegi { // SSE2 // move lower 64bits (double) of xmm register to memory location - if( memRegister == X86_REG_NONE ) + if( memRegister == REG_NONE ) { // MOVSD [], // F2 0F 11 05 - 45 23 01 00 @@ -195,7 +197,7 @@ void x64Gen_movsd_memReg64_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegi //x64Gen_writeU8(x64GenContext, 0x05+xmmRegister*8); //x64Gen_writeU32(x64GenContext, memImmU32); } - else if( memRegister == X86_REG_RSP ) + else if( memRegister == REG_RSP ) { // MOVSD [RSP+], // F2 0F 11 84 24 - 33 22 11 00 @@ -213,42 +215,11 @@ void x64Gen_movsd_memReg64_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegi } } -void x64Gen_movsd_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32) -{ - // SSE2 - if( memRegister == X86_REG_RSP ) - { - // MOVSD , [RSP+] - x64Gen_writeU8(x64GenContext, 0xF2); - x64Gen_genSSEVEXPrefix2(x64GenContext, 0, xmmRegister, false); - x64Gen_writeU8(x64GenContext, 0x0F); - x64Gen_writeU8(x64GenContext, 0x10); - x64Gen_writeU8(x64GenContext, 0x84+(xmmRegister&7)*8); - x64Gen_writeU8(x64GenContext, 0x24); - x64Gen_writeU32(x64GenContext, memImmU32); - } - else if( memRegister == 15 ) - { - // MOVSD , [R15+] - x64Gen_writeU8(x64GenContext, 0x36); - x64Gen_writeU8(x64GenContext, 0xF2); - x64Gen_genSSEVEXPrefix2(x64GenContext, memRegister, xmmRegister, false); - x64Gen_writeU8(x64GenContext, 0x0F); - x64Gen_writeU8(x64GenContext, 0x10); - x64Gen_writeU8(x64GenContext, 0x87+(xmmRegister&7)*8); - x64Gen_writeU32(x64GenContext, memImmU32); - } - else - { - assert_dbg(); - } -} - void x64Gen_movlpd_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32) { // SSE3 // move one double from memory into lower half of a xmm register, leave upper half unchanged(?) - if( memRegister == X86_REG_NONE ) + if( memRegister == REG_NONE ) { // MOVLPD , [] //x64Gen_writeU8(x64GenContext, 0x66); @@ -258,7 +229,7 @@ void x64Gen_movlpd_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmReg //x64Gen_writeU32(x64GenContext, memImmU32); assert_dbg(); } - else if( memRegister == X86_REG_RSP ) + else if( memRegister == REG_RSP ) { // MOVLPD , [+] // 66 0F 12 84 24 - 33 22 11 00 @@ -377,11 +348,11 @@ void x64Gen_mulpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegist void x64Gen_mulpd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32) { // SSE2 - if (memRegister == X86_REG_NONE) + if (memRegister == REG_NONE) { assert_dbg(); } - else if (memRegister == X86_REG_R14) + else if (memRegister == REG_R14) { x64Gen_writeU8(x64GenContext, 0x66); x64Gen_writeU8(x64GenContext, (xmmRegister < 8) ? 0x41 : 0x45); @@ -433,7 +404,7 @@ void x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext_t* x64GenContext, sint32 xmmR { // SSE2 // compare bottom double with double from memory location - if( memoryReg == X86_REG_R15 ) + if( memoryReg == REG_R15 ) { x64Gen_writeU8(x64GenContext, 0x66); x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); @@ -461,7 +432,7 @@ void x64Gen_comiss_xmmReg_mem64Reg64(x64GenContext_t* x64GenContext, sint32 xmmR { // SSE2 // compare bottom float with float from memory location - if (memoryReg == X86_REG_R15) + if (memoryReg == REG_R15) { x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); x64Gen_writeU8(x64GenContext, 0x0F); @@ -477,7 +448,7 @@ void x64Gen_orps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmRe { // SSE2 // and xmm register with 128 bit value from memory - if( memReg == X86_REG_R15 ) + if( memReg == REG_R15 ) { x64Gen_genSSEVEXPrefix2(x64GenContext, memReg, xmmRegisterDest, false); x64Gen_writeU8(x64GenContext, 0x0F); @@ -493,7 +464,7 @@ void x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmR { // SSE2 // xor xmm register with 128 bit value from memory - if( memReg == X86_REG_R15 ) + if( memReg == REG_R15 ) { x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); // todo: should be x64Gen_genSSEVEXPrefix2() with memReg? x64Gen_writeU8(x64GenContext, 0x0F); @@ -508,11 +479,11 @@ void x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmR void x64Gen_andpd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32) { // SSE2 - if (memRegister == X86_REG_NONE) + if (memRegister == REG_NONE) { assert_dbg(); } - else if (memRegister == X86_REG_R14) + else if (memRegister == REG_R14) { x64Gen_writeU8(x64GenContext, 0x66); x64Gen_writeU8(x64GenContext, (xmmRegister < 8) ? 0x41 : 0x45); @@ -531,7 +502,7 @@ void x64Gen_andps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmR { // SSE2 // and xmm register with 128 bit value from memory - if( memReg == X86_REG_R15 ) + if( memReg == REG_R15 ) { x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); // todo: should be x64Gen_genSSEVEXPrefix2() with memReg? x64Gen_writeU8(x64GenContext, 0x0F); @@ -557,7 +528,7 @@ void x64Gen_pcmpeqd_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xm { // SSE2 // doubleword integer compare - if( memReg == X86_REG_R15 ) + if( memReg == REG_R15 ) { x64Gen_writeU8(x64GenContext, 0x66); x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); @@ -592,16 +563,6 @@ void x64Gen_cvttsd2si_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 regis x64Gen_writeU8(x64GenContext, 0xC0+(registerDest&7)*8+(xmmRegisterSrc&7)); } -void x64Gen_cvtsi2sd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 registerSrc) -{ - // SSE2 - x64Gen_writeU8(x64GenContext, 0xF2); - x64Gen_genSSEVEXPrefix2(x64GenContext, registerSrc, xmmRegisterDest, false); - x64Gen_writeU8(x64GenContext, 0x0F); - x64Gen_writeU8(x64GenContext, 0x2A); - x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(registerSrc&7)); -} - void x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc) { // SSE2 @@ -649,7 +610,7 @@ void x64Gen_cvtpi2pd_xmmReg_mem64Reg64(x64GenContext_t* x64GenContext, sint32 xm { // SSE2 // converts two signed 32bit integers to two doubles - if( memReg == X86_REG_RSP ) + if( memReg == REG_RSP ) { x64Gen_writeU8(x64GenContext, 0x66); x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, false); @@ -723,7 +684,7 @@ void x64Gen_rcpss_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegist void x64Gen_mulss_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32) { // SSE2 - if( memRegister == X86_REG_NONE ) + if( memRegister == REG_NONE ) { assert_dbg(); } diff --git a/src/Cafe/HW/Espresso/Recompiler/BackendX64/X64Emit.hpp b/src/Cafe/HW/Espresso/Recompiler/x64Emit.hpp similarity index 99% rename from src/Cafe/HW/Espresso/Recompiler/BackendX64/X64Emit.hpp rename to src/Cafe/HW/Espresso/Recompiler/x64Emit.hpp index b4021931..e936f1d8 100644 --- a/src/Cafe/HW/Espresso/Recompiler/BackendX64/X64Emit.hpp +++ b/src/Cafe/HW/Espresso/Recompiler/x64Emit.hpp @@ -203,6 +203,7 @@ template void _x64Gen_writeMODRM_internal(x64GenContext_t* x64GenContext, TA opA, TB opB) { static_assert(TA::getType() == MODRM_OPR_TYPE::REG); + x64Gen_checkBuffer(x64GenContext); // REX prefix // 0100 WRXB if constexpr (TA::getType() == MODRM_OPR_TYPE::REG && TB::getType() == MODRM_OPR_TYPE::REG) diff --git a/src/Cafe/HW/Latte/Core/Latte.h b/src/Cafe/HW/Latte/Core/Latte.h index 2636467b..e5e9dd5c 100644 --- a/src/Cafe/HW/Latte/Core/Latte.h +++ b/src/Cafe/HW/Latte/Core/Latte.h @@ -47,6 +47,8 @@ struct LatteGPUState_t gx2GPUSharedArea_t* sharedArea; // quick reference to shared area MPTR sharedAreaAddr; // other + // todo: Currently we have the command buffer logic implemented as a FIFO ringbuffer. On real HW it's handled as a series of command buffers that are pushed individually. + std::atomic lastSubmittedCommandBufferTimestamp; uint32 gx2InitCalled; // incremented every time GX2Init() is called // OpenGL control uint32 glVendor; // GLVENDOR_* @@ -73,6 +75,8 @@ struct LatteGPUState_t extern LatteGPUState_t LatteGPUState; +extern uint8* gxRingBufferReadPtr; // currently active read pointer (gx2 ring buffer or display list) + // texture #include "Cafe/HW/Latte/Core/LatteTexture.h" diff --git a/src/Cafe/HW/Latte/Core/LatteCommandProcessor.cpp b/src/Cafe/HW/Latte/Core/LatteCommandProcessor.cpp index 4385cf49..f592cc9e 100644 --- a/src/Cafe/HW/Latte/Core/LatteCommandProcessor.cpp +++ b/src/Cafe/HW/Latte/Core/LatteCommandProcessor.cpp @@ -13,7 +13,6 @@ #include "Cafe/HW/Latte/Core/LattePM4.h" #include "Cafe/OS/libs/coreinit/coreinit_Time.h" -#include "Cafe/OS/libs/TCL/TCL.h" // TCL currently handles the GPU command ringbuffer #include "Cafe/CafeSystem.h" @@ -29,6 +28,11 @@ typedef uint32be* LatteCMDPtr; #define LatteReadCMD() ((uint32)*(cmd++)) #define LatteSkipCMD(_nWords) cmd += (_nWords) +uint8* gxRingBufferReadPtr; // currently active read pointer (gx2 ring buffer or display list) +uint8* gx2CPParserDisplayListPtr; +uint8* gx2CPParserDisplayListStart; // used for debugging +uint8* gx2CPParserDisplayListEnd; + void LatteThread_HandleOSScreen(); void LatteThread_Exit(); @@ -151,12 +155,16 @@ void LatteCP_signalEnterWait() */ uint32 LatteCP_readU32Deprc() { + uint32 v; + uint8* gxRingBufferWritePtr; + sint32 readDistance; // no display list active while (true) { - uint32 cmdWord; - if ( TCL::TCLGPUReadRBWord(cmdWord) ) - return cmdWord; + gxRingBufferWritePtr = gx2WriteGatherPipe.writeGatherPtrGxBuffer[GX2::sGX2MainCoreIndex]; + readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr); + if (readDistance != 0) + break; g_renderer->NotifyLatteCommandProcessorIdle(); // let the renderer know in case it wants to flush any commands performanceMonitor.gpuTime_idleTime.beginMeasuring(); @@ -167,8 +175,56 @@ uint32 LatteCP_readU32Deprc() } LatteThread_HandleOSScreen(); // check if new frame was presented via OSScreen API - if ( TCL::TCLGPUReadRBWord(cmdWord) ) - return cmdWord; + readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr); + if (readDistance != 0) + break; + if (Latte_GetStopSignal()) + LatteThread_Exit(); + + // still no command data available, do some other tasks + LatteTiming_HandleTimedVsync(); + LatteAsyncCommands_checkAndExecute(); + std::this_thread::yield(); + performanceMonitor.gpuTime_idleTime.endMeasuring(); + } + v = *(uint32*)gxRingBufferReadPtr; + gxRingBufferReadPtr += 4; +#ifdef CEMU_DEBUG_ASSERT + if (v == 0xcdcdcdcd) + assert_dbg(); +#endif + v = _swapEndianU32(v); + return v; +} + +void LatteCP_waitForNWords(uint32 numWords) +{ + uint8* gxRingBufferWritePtr; + sint32 readDistance; + bool isFlushed = false; + sint32 waitDistance = numWords * sizeof(uint32be); + // no display list active + while (true) + { + gxRingBufferWritePtr = gx2WriteGatherPipe.writeGatherPtrGxBuffer[GX2::sGX2MainCoreIndex]; + readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr); + if (readDistance < 0) + return; // wrap around means there is at least one full command queued after this + if (readDistance >= waitDistance) + break; + g_renderer->NotifyLatteCommandProcessorIdle(); // let the renderer know in case it wants to flush any commands + performanceMonitor.gpuTime_idleTime.beginMeasuring(); + // no command data available, spin in a busy loop for a while then check again + for (sint32 busy = 0; busy < 80; busy++) + { + _mm_pause(); + } + readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr); + if (readDistance < 0) + return; // wrap around means there is at least one full command queued after this + if (readDistance >= waitDistance) + break; + if (Latte_GetStopSignal()) LatteThread_Exit(); @@ -178,7 +234,6 @@ uint32 LatteCP_readU32Deprc() std::this_thread::yield(); performanceMonitor.gpuTime_idleTime.endMeasuring(); } - UNREACHABLE; } template @@ -215,23 +270,21 @@ void LatteCP_itIndirectBufferDepr(LatteCMDPtr cmd, uint32 nWords) cemu_assert_debug(nWords == 3); uint32 physicalAddress = LatteReadCMD(); uint32 physicalAddressHigh = LatteReadCMD(); // unused - uint32 sizeInU32s = LatteReadCMD(); + uint32 sizeInDWords = LatteReadCMD(); + uint32 displayListSize = sizeInDWords * 4; + DrawPassContext drawPassCtx; #ifdef LATTE_CP_LOGGING if (GetAsyncKeyState('A')) LatteCP_DebugPrintCmdBuffer(MEMPTR(physicalAddress), displayListSize); #endif - if (sizeInU32s > 0) - { - DrawPassContext drawPassCtx; - uint32be* buf = MEMPTR(physicalAddress).GetPtr(); - drawPassCtx.PushCurrentCommandQueuePos(buf, buf, buf + sizeInU32s); + uint32be* buf = MEMPTR(physicalAddress).GetPtr(); + drawPassCtx.PushCurrentCommandQueuePos(buf, buf, buf + sizeInDWords); - LatteCP_processCommandBuffer(drawPassCtx); - if (drawPassCtx.isWithinDrawPass()) - drawPassCtx.endDrawPass(); - } + LatteCP_processCommandBuffer(drawPassCtx); + if (drawPassCtx.isWithinDrawPass()) + drawPassCtx.endDrawPass(); } // pushes the command buffer to the stack @@ -241,12 +294,11 @@ void LatteCP_itIndirectBuffer(LatteCMDPtr cmd, uint32 nWords, DrawPassContext& d uint32 physicalAddress = LatteReadCMD(); uint32 physicalAddressHigh = LatteReadCMD(); // unused uint32 sizeInDWords = LatteReadCMD(); - if (sizeInDWords > 0) - { - uint32 displayListSize = sizeInDWords * 4; - uint32be* buf = MEMPTR(physicalAddress).GetPtr(); - drawPassCtx.PushCurrentCommandQueuePos(buf, buf, buf + sizeInDWords); - } + uint32 displayListSize = sizeInDWords * 4; + cemu_assert_debug(displayListSize >= 4); + + uint32be* buf = MEMPTR(physicalAddress).GetPtr(); + drawPassCtx.PushCurrentCommandQueuePos(buf, buf, buf + sizeInDWords); } LatteCMDPtr LatteCP_itStreamoutBufferUpdate(LatteCMDPtr cmd, uint32 nWords) @@ -513,55 +565,26 @@ LatteCMDPtr LatteCP_itMemWrite(LatteCMDPtr cmd, uint32 nWords) if (word1 == 0x40000) { // write U32 - stdx::atomic_ref atomicRef(*memPtr); - atomicRef.store(word2); + *memPtr = word2; } else if (word1 == 0x00000) { - // write U64 - // note: The U32s are swapped here, but needs verification. Also, it seems like the two U32 halves are written independently and the U64 as a whole is not atomic -> investiagte - stdx::atomic_ref atomicRef(*(uint64be*)memPtr); - atomicRef.store(((uint64le)word2 << 32) | word3); + // write U64 (as two U32) + // note: The U32s are swapped + memPtr[0] = word2; + memPtr[1] = word3; } else if (word1 == 0x20000) { // write U64 (little endian) - stdx::atomic_ref atomicRef(*(uint64le*)memPtr); - atomicRef.store(((uint64le)word3 << 32) | word2); + memPtr[0] = _swapEndianU32(word2); + memPtr[1] = _swapEndianU32(word3); } else cemu_assert_unimplemented(); return cmd; } -LatteCMDPtr LatteCP_itEventWriteEOP(LatteCMDPtr cmd, uint32 nWords) -{ - cemu_assert_debug(nWords == 5); - uint32 word0 = LatteReadCMD(); - uint32 word1 = LatteReadCMD(); - uint32 word2 = LatteReadCMD(); - uint32 word3 = LatteReadCMD(); // value low bits - uint32 word4 = LatteReadCMD(); // value high bits - - cemu_assert_debug(word2 == 0x40000000 || word2 == 0x42000000); - - if (word0 == 0x504 && (word2&0x40000000)) // todo - figure out the flags - { - stdx::atomic_ref atomicRef(*(uint64be*)memory_getPointerFromPhysicalOffset(word1)); - uint64 val = ((uint64)word4 << 32) | word3; - atomicRef.store(val); - } - else - { cemu_assert_unimplemented(); - } - bool triggerInterrupt = (word2 & 0x2000000) != 0; - if (triggerInterrupt) - { - // todo - timestamp interrupt - } - TCL::TCLGPUNotifyNewRetirementTimestamp(); - return cmd; -} LatteCMDPtr LatteCP_itMemSemaphore(LatteCMDPtr cmd, uint32 nWords) { @@ -760,6 +783,16 @@ LatteCMDPtr LatteCP_itDrawImmediate(LatteCMDPtr cmd, uint32 nWords, DrawPassCont drawPassCtx.executeDraw(count, false, _tempIndexArrayMPTR); return cmd; + +} + +LatteCMDPtr LatteCP_itHLEFifoWrapAround(LatteCMDPtr cmd, uint32 nWords) +{ + cemu_assert_debug(nWords == 1); + uint32 unused = LatteReadCMD(); + gxRingBufferReadPtr = gx2WriteGatherPipe.gxRingBuffer; + cmd = (LatteCMDPtr)gxRingBufferReadPtr; + return cmd; } LatteCMDPtr LatteCP_itHLESampleTimer(LatteCMDPtr cmd, uint32 nWords) @@ -786,6 +819,16 @@ LatteCMDPtr LatteCP_itHLESpecialState(LatteCMDPtr cmd, uint32 nWords) return cmd; } +LatteCMDPtr LatteCP_itHLESetRetirementTimestamp(LatteCMDPtr cmd, uint32 nWords) +{ + cemu_assert_debug(nWords == 2); + uint32 timestampHigh = (uint32)LatteReadCMD(); + uint32 timestampLow = (uint32)LatteReadCMD(); + uint64 timestamp = ((uint64)timestampHigh << 32ULL) | (uint64)timestampLow; + GX2::__GX2NotifyNewRetirementTimestamp(timestamp); + return cmd; +} + LatteCMDPtr LatteCP_itHLEBeginOcclusionQuery(LatteCMDPtr cmd, uint32 nWords) { cemu_assert_debug(nWords == 1); @@ -1102,10 +1145,9 @@ void LatteCP_processCommandBuffer(DrawPassContext& drawPassCtx) LatteCMDPtr cmd, cmdStart, cmdEnd; if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd)) break; - uint32 itHeader; while (cmd < cmdEnd) { - itHeader = LatteReadCMD(); + uint32 itHeader = LatteReadCMD(); uint32 itHeaderType = (itHeader >> 30) & 3; if (itHeaderType == 3) { @@ -1319,6 +1361,11 @@ void LatteCP_processCommandBuffer(DrawPassContext& drawPassCtx) LatteCP_itHLEEndOcclusionQuery(cmdData, nWords); break; } + case IT_HLE_SET_CB_RETIREMENT_TIMESTAMP: + { + LatteCP_itHLESetRetirementTimestamp(cmdData, nWords); + break; + } case IT_HLE_BOTTOM_OF_PIPE_CB: { LatteCP_itHLEBottomOfPipeCB(cmdData, nWords); @@ -1374,7 +1421,6 @@ void LatteCP_processCommandBuffer(DrawPassContext& drawPassCtx) void LatteCP_ProcessRingbuffer() { sint32 timerRecheck = 0; // estimates how much CP processing time has elapsed based on the executed commands, if the value exceeds CP_TIMER_RECHECK then _handleTimers() is called - uint32be tmpBuffer[128]; while (true) { uint32 itHeader = LatteCP_readU32Deprc(); @@ -1383,13 +1429,10 @@ void LatteCP_ProcessRingbuffer() { uint32 itCode = (itHeader >> 8) & 0xFF; uint32 nWords = ((itHeader >> 16) & 0x3FFF) + 1; - cemu_assert(nWords < 128); - for (sint32 i=0; i -#elif defined(__aarch64__) -#include #endif struct @@ -504,114 +502,6 @@ void LatteIndices_fastConvertU32_AVX2(const void* indexDataInput, void* indexDat indexMax = std::max(indexMax, _maxIndex); indexMin = std::min(indexMin, _minIndex); } -#elif defined(__aarch64__) - -void LatteIndices_fastConvertU16_NEON(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax) -{ - const uint16* indicesU16BE = (const uint16*)indexDataInput; - uint16* indexOutput = (uint16*)indexDataOutput; - sint32 count8 = count >> 3; - sint32 countRemaining = count & 7; - - if (count8) - { - uint16x8_t mMin = vdupq_n_u16(0xFFFF); - uint16x8_t mMax = vdupq_n_u16(0x0000); - uint16x8_t mTemp; - uint16x8_t* mRawIndices = (uint16x8_t*) indicesU16BE; - indicesU16BE += count8 * 8; - uint16x8_t* mOutputIndices = (uint16x8_t*) indexOutput; - indexOutput += count8 * 8; - - while (count8--) - { - mTemp = vld1q_u16((uint16*)mRawIndices); - mRawIndices++; - mTemp = vrev16q_u8(mTemp); - mMin = vminq_u16(mMin, mTemp); - mMax = vmaxq_u16(mMax, mTemp); - vst1q_u16((uint16*)mOutputIndices, mTemp); - mOutputIndices++; - } - - uint16* mMaxU16 = (uint16*)&mMax; - uint16* mMinU16 = (uint16*)&mMin; - - for (int i = 0; i < 8; ++i) { - indexMax = std::max(indexMax, (uint32)mMaxU16[i]); - indexMin = std::min(indexMin, (uint32)mMinU16[i]); - } - } - // process remaining indices - uint32 _minIndex = 0xFFFFFFFF; - uint32 _maxIndex = 0; - for (sint32 i = countRemaining; (--i) >= 0;) - { - uint16 idx = _swapEndianU16(*indicesU16BE); - *indexOutput = idx; - indexOutput++; - indicesU16BE++; - _maxIndex = std::max(_maxIndex, (uint32)idx); - _minIndex = std::min(_minIndex, (uint32)idx); - } - // update min/max - indexMax = std::max(indexMax, _maxIndex); - indexMin = std::min(indexMin, _minIndex); -} - -void LatteIndices_fastConvertU32_NEON(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax) -{ - const uint32* indicesU32BE = (const uint32*)indexDataInput; - uint32* indexOutput = (uint32*)indexDataOutput; - sint32 count8 = count >> 2; - sint32 countRemaining = count & 3; - - if (count8) - { - uint32x4_t mMin = vdupq_n_u32(0xFFFFFFFF); - uint32x4_t mMax = vdupq_n_u32(0x00000000); - uint32x4_t mTemp; - uint32x4_t* mRawIndices = (uint32x4_t*) indicesU32BE; - indicesU32BE += count8 * 4; - uint32x4_t* mOutputIndices = (uint32x4_t*) indexOutput; - indexOutput += count8 * 4; - - while (count8--) - { - mTemp = vld1q_u32((uint32*)mRawIndices); - mRawIndices++; - mTemp = vrev32q_u8(mTemp); - mMin = vminq_u32(mMin, mTemp); - mMax = vmaxq_u32(mMax, mTemp); - vst1q_u32((uint32*)mOutputIndices, mTemp); - mOutputIndices++; - } - - uint32* mMaxU32 = (uint32*)&mMax; - uint32* mMinU32 = (uint32*)&mMin; - - for (int i = 0; i < 4; ++i) { - indexMax = std::max(indexMax, mMaxU32[i]); - indexMin = std::min(indexMin, mMinU32[i]); - } - } - // process remaining indices - uint32 _minIndex = 0xFFFFFFFF; - uint32 _maxIndex = 0; - for (sint32 i = countRemaining; (--i) >= 0;) - { - uint32 idx = _swapEndianU32(*indicesU32BE); - *indexOutput = idx; - indexOutput++; - indicesU32BE++; - _maxIndex = std::max(_maxIndex, idx); - _minIndex = std::min(_minIndex, idx); - } - // update min/max - indexMax = std::max(indexMax, _maxIndex); - indexMin = std::min(indexMin, _minIndex); -} - #endif template @@ -798,31 +688,27 @@ void LatteIndices_decode(const void* indexData, LatteIndexType indexType, uint32 { if (indexType == LatteIndexType::U16_BE) { -#if defined(ARCH_X86_64) + #if defined(ARCH_X86_64) if (g_CPUFeatures.x86.avx2) LatteIndices_fastConvertU16_AVX2(indexData, indexOutputPtr, count, indexMin, indexMax); else if (g_CPUFeatures.x86.sse4_1 && g_CPUFeatures.x86.ssse3) LatteIndices_fastConvertU16_SSE41(indexData, indexOutputPtr, count, indexMin, indexMax); else LatteIndices_convertBE(indexData, indexOutputPtr, count, indexMin, indexMax); -#elif defined(__aarch64__) - LatteIndices_fastConvertU16_NEON(indexData, indexOutputPtr, count, indexMin, indexMax); -#else + #else LatteIndices_convertBE(indexData, indexOutputPtr, count, indexMin, indexMax); -#endif + #endif } else if (indexType == LatteIndexType::U32_BE) { -#if defined(ARCH_X86_64) + #if defined(ARCH_X86_64) if (g_CPUFeatures.x86.avx2) LatteIndices_fastConvertU32_AVX2(indexData, indexOutputPtr, count, indexMin, indexMax); else LatteIndices_convertBE(indexData, indexOutputPtr, count, indexMin, indexMax); -#elif defined(__aarch64__) - LatteIndices_fastConvertU32_NEON(indexData, indexOutputPtr, count, indexMin, indexMax); -#else + #else LatteIndices_convertBE(indexData, indexOutputPtr, count, indexMin, indexMax); -#endif + #endif } else if (indexType == LatteIndexType::U16_LE) { diff --git a/src/Cafe/HW/Latte/Core/LattePM4.h b/src/Cafe/HW/Latte/Core/LattePM4.h index 1f5d2129..8079a89c 100644 --- a/src/Cafe/HW/Latte/Core/LattePM4.h +++ b/src/Cafe/HW/Latte/Core/LattePM4.h @@ -14,7 +14,6 @@ #define IT_MEM_WRITE 0x3D #define IT_SURFACE_SYNC 0x43 #define IT_EVENT_WRITE 0x46 -#define IT_EVENT_WRITE_EOP 0x47 // end of pipe #define IT_LOAD_CONFIG_REG 0x60 #define IT_LOAD_CONTEXT_REG 0x61 @@ -48,12 +47,14 @@ #define IT_HLE_WAIT_FOR_FLIP 0xF1 #define IT_HLE_BOTTOM_OF_PIPE_CB 0xF2 #define IT_HLE_COPY_COLORBUFFER_TO_SCANBUFFER 0xF3 +#define IT_HLE_FIFO_WRAP_AROUND 0xF4 #define IT_HLE_CLEAR_COLOR_DEPTH_STENCIL 0xF5 #define IT_HLE_SAMPLE_TIMER 0xF7 #define IT_HLE_TRIGGER_SCANBUFFER_SWAP 0xF8 #define IT_HLE_SPECIAL_STATE 0xF9 #define IT_HLE_BEGIN_OCCLUSION_QUERY 0xFA #define IT_HLE_END_OCCLUSION_QUERY 0xFB +#define IT_HLE_SET_CB_RETIREMENT_TIMESTAMP 0xFD #define pm4HeaderType3(__itCode, __dataDWordCount) (0xC0000000|((uint32)(__itCode)<<8)|((uint32)((__dataDWordCount)-1)<<16)) #define pm4HeaderType2Filler() (0x80000000) diff --git a/src/Cafe/HW/Latte/Core/LatteShaderCache.cpp b/src/Cafe/HW/Latte/Core/LatteShaderCache.cpp index 86035973..0d427e34 100644 --- a/src/Cafe/HW/Latte/Core/LatteShaderCache.cpp +++ b/src/Cafe/HW/Latte/Core/LatteShaderCache.cpp @@ -209,7 +209,7 @@ class BootSoundPlayer try { - bootSndAudioDev = IAudioAPI::CreateDeviceFromConfig(IAudioAPI::AudioType::TV, sampleRate, nChannels, samplesPerBlock, bitsPerSample); + bootSndAudioDev = IAudioAPI::CreateDeviceFromConfig(true, sampleRate, nChannels, samplesPerBlock, bitsPerSample); if(!bootSndAudioDev) return; } diff --git a/src/Cafe/HW/Latte/Core/LatteThread.cpp b/src/Cafe/HW/Latte/Core/LatteThread.cpp index e42dce5f..92a7fdbb 100644 --- a/src/Cafe/HW/Latte/Core/LatteThread.cpp +++ b/src/Cafe/HW/Latte/Core/LatteThread.cpp @@ -207,6 +207,7 @@ int Latte_ThreadEntry() if (Latte_GetStopSignal()) LatteThread_Exit(); } + gxRingBufferReadPtr = gx2WriteGatherPipe.gxRingBuffer; LatteCP_ProcessRingbuffer(); cemu_assert_debug(false); // should never reach return 0; @@ -234,8 +235,6 @@ void Latte_Start() void Latte_Stop() { std::unique_lock _lock(sLatteThreadStateMutex); - if (!sLatteThreadRunning) - return; sLatteThreadRunning = false; _lock.unlock(); sLatteThread.join(); diff --git a/src/Cafe/HW/Latte/Renderer/RendererOuputShader.cpp b/src/Cafe/HW/Latte/Renderer/RendererOuputShader.cpp index afe53a16..3a00c36a 100644 --- a/src/Cafe/HW/Latte/Renderer/RendererOuputShader.cpp +++ b/src/Cafe/HW/Latte/Renderer/RendererOuputShader.cpp @@ -187,8 +187,8 @@ std::string RendererOutputShader::GetOpenGlVertexSource(bool render_upside_down) // vertex shader std::ostringstream vertex_source; vertex_source << - R"(#version 420 -layout(location = 0) smooth out vec2 passUV; + R"(#version 400 +out vec2 passUV; out gl_PerVertex { @@ -297,7 +297,7 @@ uniform vec2 nativeResolution; uniform vec2 outputResolution; #endif -layout(location = 0) smooth in vec2 passUV; +layout(location = 0) in vec2 passUV; layout(binding = 0) uniform sampler2D textureSrc; layout(location = 0) out vec4 colorOut0; )" + shaderSrc; diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VKRBase.h b/src/Cafe/HW/Latte/Renderer/Vulkan/VKRBase.h index 7dcd3ebc..9c7e03f3 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VKRBase.h +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VKRBase.h @@ -221,14 +221,11 @@ public: VKRObjectPipeline(); ~VKRObjectPipeline() override; - void SetPipeline(VkPipeline newPipeline); - VkPipeline GetPipeline() const { return m_pipeline; } + void setPipeline(VkPipeline newPipeline); - VkDescriptorSetLayout m_vertexDSL = VK_NULL_HANDLE, m_pixelDSL = VK_NULL_HANDLE, m_geometryDSL = VK_NULL_HANDLE; - VkPipelineLayout m_pipelineLayout = VK_NULL_HANDLE; - -private: - VkPipeline m_pipeline = VK_NULL_HANDLE; + VkPipeline pipeline = VK_NULL_HANDLE; + VkDescriptorSetLayout vertexDSL = VK_NULL_HANDLE, pixelDSL = VK_NULL_HANDLE, geometryDSL = VK_NULL_HANDLE; + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; }; class VKRObjectDescriptorSet : public VKRDestructibleObject diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VKRPipelineInfo.cpp b/src/Cafe/HW/Latte/Renderer/Vulkan/VKRPipelineInfo.cpp index b316b9c5..fd5a5b78 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VKRPipelineInfo.cpp +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VKRPipelineInfo.cpp @@ -26,6 +26,7 @@ PipelineInfo::PipelineInfo(uint64 minimalStateHash, uint64 pipelineHash, LatteFe // init VKRObjPipeline m_vkrObjPipeline = new VKRObjectPipeline(); + m_vkrObjPipeline->pipeline = VK_NULL_HANDLE; // track dependency with shaders if (vertexShaderVk) diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.cpp b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.cpp index 7555c03a..ba094a84 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.cpp +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.cpp @@ -558,8 +558,8 @@ void PipelineCompiler::InitRasterizerState(const LatteContextRegister& latteRegi rasterizerExt.flags = 0; rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterizer.pNext = &rasterizerExt; rasterizer.rasterizerDiscardEnable = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_DX_RASTERIZATION_KILL(); - rasterizer.pNext = VulkanRenderer::GetInstance()->m_featureControl.deviceExtensions.depth_clip_enable ? &rasterizerExt : nullptr; // GX2SetSpecialState(0, true) workaround if (!LatteGPUState.contextNew.PA_CL_VTE_CNTL.get_VPORT_X_OFFSET_ENA()) rasterizer.rasterizerDiscardEnable = false; @@ -730,7 +730,7 @@ void PipelineCompiler::InitDescriptorSetLayouts(VulkanRenderer* vkRenderer, Pipe { cemu_assert_debug(descriptorSetLayoutCount == 0); CreateDescriptorSetLayout(vkRenderer, vertexShader, descriptorSetLayout[descriptorSetLayoutCount], vkrPipelineInfo); - vkObjPipeline->m_vertexDSL = descriptorSetLayout[descriptorSetLayoutCount]; + vkObjPipeline->vertexDSL = descriptorSetLayout[descriptorSetLayoutCount]; descriptorSetLayoutCount++; } @@ -738,7 +738,7 @@ void PipelineCompiler::InitDescriptorSetLayouts(VulkanRenderer* vkRenderer, Pipe { cemu_assert_debug(descriptorSetLayoutCount == 1); CreateDescriptorSetLayout(vkRenderer, pixelShader, descriptorSetLayout[descriptorSetLayoutCount], vkrPipelineInfo); - vkObjPipeline->m_pixelDSL = descriptorSetLayout[descriptorSetLayoutCount]; + vkObjPipeline->pixelDSL = descriptorSetLayout[descriptorSetLayoutCount]; descriptorSetLayoutCount++; } else if (geometryShader) @@ -757,7 +757,7 @@ void PipelineCompiler::InitDescriptorSetLayouts(VulkanRenderer* vkRenderer, Pipe { cemu_assert_debug(descriptorSetLayoutCount == 2); CreateDescriptorSetLayout(vkRenderer, geometryShader, descriptorSetLayout[descriptorSetLayoutCount], vkrPipelineInfo); - vkObjPipeline->m_geometryDSL = descriptorSetLayout[descriptorSetLayoutCount]; + vkObjPipeline->geometryDSL = descriptorSetLayout[descriptorSetLayoutCount]; descriptorSetLayoutCount++; } } @@ -873,7 +873,7 @@ void PipelineCompiler::InitDynamicState(PipelineInfo* pipelineInfo, bool usesBle dynamicState.pDynamicStates = dynamicStates.data(); } -bool PipelineCompiler::InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const LatteContextRegister& latteRegister, VKRObjectRenderPass* renderPassObj, bool requireRobustBufferAccess) +bool PipelineCompiler::InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const LatteContextRegister& latteRegister, VKRObjectRenderPass* renderPassObj) { VulkanRenderer* vkRenderer = VulkanRenderer::GetInstance(); @@ -888,7 +888,6 @@ bool PipelineCompiler::InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const m_vkGeometryShader = pipelineInfo->geometryShaderVk; m_vkrObjPipeline = pipelineInfo->m_vkrObjPipeline; m_renderPassObj = renderPassObj; - m_requestRobustBufferAccess = requireRobustBufferAccess; // if required generate RECT emulation geometry shader if (!vkRenderer->m_featureControl.deviceExtensions.nv_fill_rectangle && isPrimitiveRect) @@ -919,7 +918,7 @@ bool PipelineCompiler::InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const pipelineLayoutInfo.pPushConstantRanges = nullptr; pipelineLayoutInfo.pushConstantRangeCount = 0; - VkResult result = vkCreatePipelineLayout(vkRenderer->m_logicalDevice, &pipelineLayoutInfo, nullptr, &m_pipelineLayout); + VkResult result = vkCreatePipelineLayout(vkRenderer->m_logicalDevice, &pipelineLayoutInfo, nullptr, &m_pipeline_layout); if (result != VK_SUCCESS) { cemuLog_log(LogType::Force, "Failed to create pipeline layout: {}", result); @@ -937,7 +936,7 @@ bool PipelineCompiler::InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const // ########################################################################################################################################## - pipelineInfo->m_vkrObjPipeline->m_pipelineLayout = m_pipelineLayout; + pipelineInfo->m_vkrObjPipeline->pipeline_layout = m_pipeline_layout; // increment ref counter for vkrObjPipeline and renderpass object to make sure they dont get released while we are using them m_vkrObjPipeline->incRef(); @@ -990,7 +989,7 @@ bool PipelineCompiler::Compile(bool forceCompile, bool isRenderThread, bool show pipelineInfo.pRasterizationState = &rasterizer; pipelineInfo.pMultisampleState = &multisampling; pipelineInfo.pColorBlendState = &colorBlending; - pipelineInfo.layout = m_pipelineLayout; + pipelineInfo.layout = m_pipeline_layout; pipelineInfo.renderPass = m_renderPassObj->m_renderPass; pipelineInfo.pDepthStencilState = &depthStencilState; pipelineInfo.subpass = 0; @@ -999,8 +998,6 @@ bool PipelineCompiler::Compile(bool forceCompile, bool isRenderThread, bool show if (!forceCompile) pipelineInfo.flags |= VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT; - void* prevStruct = nullptr; - VkPipelineCreationFeedbackCreateInfoEXT creationFeedbackInfo; VkPipelineCreationFeedbackEXT creationFeedback; std::vector creationStageFeedback(0); @@ -1018,25 +1015,9 @@ bool PipelineCompiler::Compile(bool forceCompile, bool isRenderThread, bool show creationFeedbackInfo.pPipelineCreationFeedback = &creationFeedback; creationFeedbackInfo.pPipelineStageCreationFeedbacks = creationStageFeedback.data(); creationFeedbackInfo.pipelineStageCreationFeedbackCount = pipelineInfo.stageCount; - creationFeedbackInfo.pNext = prevStruct; - prevStruct = &creationFeedbackInfo; + pipelineInfo.pNext = &creationFeedbackInfo; } - VkPipelineRobustnessCreateInfoEXT pipelineRobustnessCreateInfo{}; - if (vkRenderer->m_featureControl.deviceExtensions.pipeline_robustness && m_requestRobustBufferAccess) - { - // per-pipeline handling of robust buffer access, if the extension is not available then we fall back to device feature robustBufferAccess - pipelineRobustnessCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT; - pipelineRobustnessCreateInfo.pNext = prevStruct; - prevStruct = &pipelineRobustnessCreateInfo; - pipelineRobustnessCreateInfo.storageBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT; - pipelineRobustnessCreateInfo.uniformBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT; - pipelineRobustnessCreateInfo.vertexInputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT; - pipelineRobustnessCreateInfo.images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT; - } - - pipelineInfo.pNext = prevStruct; - VkPipeline pipeline = VK_NULL_HANDLE; VkResult result; uint8 retryCount = 0; @@ -1056,7 +1037,7 @@ bool PipelineCompiler::Compile(bool forceCompile, bool isRenderThread, bool show } else if (result == VK_SUCCESS) { - m_vkrObjPipeline->SetPipeline(pipeline); + m_vkrObjPipeline->setPipeline(pipeline); } else { @@ -1094,31 +1075,3 @@ void PipelineCompiler::TrackAsCached(uint64 baseHash, uint64 pipelineStateHash) return; pipelineCache.AddCurrentStateToCache(baseHash, pipelineStateHash); } - -// calculate whether the pipeline requires robust buffer access -// if there is a potential risk for a shader to do out-of-bounds reads or writes we need to enable robust buffer access -// this can happen when: -// - Streamout is used with too small of a buffer (probably? Could also be some issue with how the streamout array index is calculated -> We can maybe fix this in the future) -// - The shader uses dynamic indices for uniform access. This will trigger the uniform mode to be FULL_CBANK -bool PipelineCompiler::CalcRobustBufferAccessRequirement(LatteDecompilerShader* vertexShader, LatteDecompilerShader* pixelShader, LatteDecompilerShader* geometryShader) -{ - bool requiresRobustBufferAcces = false; - if (vertexShader) - { - cemu_assert_debug(vertexShader->shaderType == LatteConst::ShaderType::Vertex); - requiresRobustBufferAcces |= vertexShader->hasStreamoutBufferWrite; - requiresRobustBufferAcces |= vertexShader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK; - } - if (geometryShader) - { - cemu_assert_debug(geometryShader->shaderType == LatteConst::ShaderType::Geometry); - requiresRobustBufferAcces |= geometryShader->hasStreamoutBufferWrite; - requiresRobustBufferAcces |= geometryShader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK; - } - if (pixelShader) - { - cemu_assert_debug(pixelShader->shaderType == LatteConst::ShaderType::Pixel); - requiresRobustBufferAcces |= pixelShader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK; - } - return requiresRobustBufferAcces; -} diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h index 7297049e..304a7b31 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h @@ -38,14 +38,11 @@ public: RendererShaderVk* m_vkPixelShader{}; RendererShaderVk* m_vkGeometryShader{}; - bool InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const LatteContextRegister& latteRegister, VKRObjectRenderPass* renderPassObj, bool requireRobustBufferAccess); + bool InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const LatteContextRegister& latteRegister, VKRObjectRenderPass* renderPassObj); void TrackAsCached(uint64 baseHash, uint64 pipelineStateHash); // stores pipeline to permanent cache if not yet cached. Must be called synchronously from render thread due to dependency on GPU state - static bool CalcRobustBufferAccessRequirement(LatteDecompilerShader* vertexShader, LatteDecompilerShader* pixelShader, LatteDecompilerShader* geometryShader); - - VkPipelineLayout m_pipelineLayout; + VkPipelineLayout m_pipeline_layout; VKRObjectRenderPass* m_renderPassObj{}; - bool m_requestRobustBufferAccess{false}; /* shader stages */ std::vector shaderStages; diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.cpp b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.cpp index 9f8f4491..123120d3 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.cpp +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.cpp @@ -277,9 +277,8 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span fileData) m_pipelineIsCachedLock.unlock(); // compile { - PipelineCompiler pipelineCompiler; - bool requiresRobustBufferAccess = PipelineCompiler::CalcRobustBufferAccessRequirement(vertexShader, pixelShader, geometryShader); - if (!pipelineCompiler.InitFromCurrentGPUState(pipelineInfo, *lcr, renderPass, requiresRobustBufferAccess)) + PipelineCompiler pp; + if (!pp.InitFromCurrentGPUState(pipelineInfo, *lcr, renderPass)) { s_spinlockSharedInternal.lock(); delete lcr; @@ -287,7 +286,8 @@ void VulkanPipelineStableCache::LoadPipelineFromCache(std::span fileData) s_spinlockSharedInternal.unlock(); return; } - pipelineCompiler.Compile(true, true, false); + pp.Compile(true, true, false); + // destroy pp early } // on success, calculate pipeline hash and flag as present in cache uint64 pipelineBaseHash = vertexShader->baseHash; diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.cpp b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.cpp index aed0db25..4ff2faac 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.cpp +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.cpp @@ -49,9 +49,7 @@ const std::vector kOptionalDeviceExtensions = VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME, VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME, VK_KHR_PRESENT_WAIT_EXTENSION_NAME, - VK_KHR_PRESENT_ID_EXTENSION_NAME, - VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME, - VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME + VK_KHR_PRESENT_ID_EXTENSION_NAME }; const std::vector kRequiredDeviceExtensions = @@ -84,6 +82,8 @@ VKAPI_ATTR VkBool32 VKAPI_CALL DebugUtilsCallback(VkDebugUtilsMessageSeverityFla if (strstr(pCallbackData->pMessage, "Number of currently valid sampler objects is not less than the maximum allowed")) return VK_FALSE; + assert_dbg(); + #endif cemuLog_log(LogType::Force, (char*)pCallbackData->pMessage); @@ -264,14 +264,6 @@ void VulkanRenderer::GetDeviceFeatures() pwf.pNext = prevStruct; prevStruct = &pwf; - VkPhysicalDevicePipelineRobustnessFeaturesEXT pprf{}; - if (m_featureControl.deviceExtensions.pipeline_robustness) - { - pprf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT; - pprf.pNext = prevStruct; - prevStruct = &pprf; - } - VkPhysicalDeviceFeatures2 physicalDeviceFeatures2{}; physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; physicalDeviceFeatures2.pNext = prevStruct; @@ -322,15 +314,7 @@ void VulkanRenderer::GetDeviceFeatures() cemuLog_log(LogType::Force, "VK_EXT_custom_border_color not supported. Cannot emulate arbitrary border color"); } } - if (!m_featureControl.deviceExtensions.depth_clip_enable) - { - cemuLog_log(LogType::Force, "VK_EXT_depth_clip_enable not supported"); - } - if (m_featureControl.deviceExtensions.pipeline_robustness) - { - if ( pprf.pipelineRobustness != VK_TRUE ) - m_featureControl.deviceExtensions.pipeline_robustness = false; - } + // get limits m_featureControl.limits.minUniformBufferOffsetAlignment = std::max(prop2.properties.limits.minUniformBufferOffsetAlignment, (VkDeviceSize)4); m_featureControl.limits.nonCoherentAtomSize = std::max(prop2.properties.limits.nonCoherentAtomSize, (VkDeviceSize)4); @@ -489,17 +473,11 @@ VulkanRenderer::VulkanRenderer() deviceFeatures.occlusionQueryPrecise = VK_TRUE; deviceFeatures.depthClamp = VK_TRUE; deviceFeatures.depthBiasClamp = VK_TRUE; - - if (m_featureControl.deviceExtensions.pipeline_robustness) + if (m_vendor == GfxVendor::AMD) { - deviceFeatures.robustBufferAccess = VK_FALSE; - } - else - { - cemuLog_log(LogType::Force, "VK_EXT_pipeline_robustness not supported. Falling back to robustBufferAccess"); deviceFeatures.robustBufferAccess = VK_TRUE; + cemuLog_log(LogType::Force, "Enable robust buffer access"); } - if (m_featureControl.mode.useTFEmulationViaSSBO) { deviceFeatures.vertexPipelineStoresAndAtomics = true; @@ -544,15 +522,6 @@ VulkanRenderer::VulkanRenderer() deviceExtensionFeatures = &presentWaitFeature; presentWaitFeature.presentWait = VK_TRUE; } - // enable VK_EXT_pipeline_robustness - VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeature{}; - if (m_featureControl.deviceExtensions.pipeline_robustness) - { - pipelineRobustnessFeature.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT; - pipelineRobustnessFeature.pNext = deviceExtensionFeatures; - deviceExtensionFeatures = &pipelineRobustnessFeature; - pipelineRobustnessFeature.pipelineRobustness = VK_TRUE; - } std::vector used_extensions; VkDeviceCreateInfo createInfo = CreateDeviceCreateInfo(queueCreateInfos, deviceFeatures, deviceExtensionFeatures, used_extensions); @@ -1149,15 +1118,10 @@ VkDeviceCreateInfo VulkanRenderer::CreateDeviceCreateInfo(const std::vector need reinit + VkAttachmentDescription colorAttachment = {}; + colorAttachment.format = m_mainSwapchainInfo->m_surfaceFormat.format; + colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT; + colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; + colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + colorAttachment.initialLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - VkAttachmentDescription colorAttachment = {}; - colorAttachment.format = m_mainSwapchainInfo->m_surfaceFormat.format; - colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT; - colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; - colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; - colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; - colorAttachment.initialLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + VkAttachmentReference colorAttachmentRef = {}; + colorAttachmentRef.attachment = 0; + colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + VkSubpassDescription subpass = {}; + subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &colorAttachmentRef; - VkAttachmentReference colorAttachmentRef = {}; - colorAttachmentRef.attachment = 0; - colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - VkSubpassDescription subpass = {}; - subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass.colorAttachmentCount = 1; - subpass.pColorAttachments = &colorAttachmentRef; - - VkRenderPassCreateInfo renderPassInfo = {}; - renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - renderPassInfo.attachmentCount = 1; - renderPassInfo.pAttachments = &colorAttachment; - renderPassInfo.subpassCount = 1; - renderPassInfo.pSubpasses = &subpass; - const auto result = vkCreateRenderPass(m_logicalDevice, &renderPassInfo, nullptr, &m_imguiRenderPass); - if (result != VK_SUCCESS) - throw VkException(result, "can't create imgui renderpass"); + VkRenderPassCreateInfo renderPassInfo = {}; + renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + renderPassInfo.attachmentCount = 1; + renderPassInfo.pAttachments = &colorAttachment; + renderPassInfo.subpassCount = 1; + renderPassInfo.pSubpasses = &subpass; + const auto result = vkCreateRenderPass(m_logicalDevice, &renderPassInfo, nullptr, &m_imguiRenderPass); + if (result != VK_SUCCESS) + throw VkException(result, "can't create imgui renderpass"); + } ImGui_ImplVulkan_InitInfo info{}; info.Instance = m_instance; @@ -1669,9 +1633,6 @@ void VulkanRenderer::ImguiInit() info.ImageCount = info.MinImageCount; ImGui_ImplVulkan_Init(&info, m_imguiRenderPass); - - if (prevRenderPass != VK_NULL_HANDLE) - vkDestroyRenderPass(GetLogicalDevice(), prevRenderPass, nullptr); } void VulkanRenderer::Initialize() @@ -1684,10 +1645,10 @@ void VulkanRenderer::Initialize() void VulkanRenderer::Shutdown() { - SubmitCommandBuffer(); - WaitDeviceIdle(); DeleteFontTextures(); Renderer::Shutdown(); + SubmitCommandBuffer(); + WaitDeviceIdle(); if (m_imguiRenderPass != VK_NULL_HANDLE) { vkDestroyRenderPass(m_logicalDevice, m_imguiRenderPass, nullptr); @@ -4151,36 +4112,33 @@ VKRObjectFramebuffer::~VKRObjectFramebuffer() VKRObjectPipeline::VKRObjectPipeline() { + // todo } -void VKRObjectPipeline::SetPipeline(VkPipeline newPipeline) +void VKRObjectPipeline::setPipeline(VkPipeline newPipeline) { - if (m_pipeline == newPipeline) - return; - cemu_assert_debug(m_pipeline == VK_NULL_HANDLE); // replacing an already assigned pipeline is not intended - if(m_pipeline == VK_NULL_HANDLE && newPipeline != VK_NULL_HANDLE) + cemu_assert_debug(pipeline == VK_NULL_HANDLE); + pipeline = newPipeline; + if(newPipeline != VK_NULL_HANDLE) performanceMonitor.vk.numGraphicPipelines.increment(); - else if(m_pipeline != VK_NULL_HANDLE && newPipeline == VK_NULL_HANDLE) - performanceMonitor.vk.numGraphicPipelines.decrement(); - m_pipeline = newPipeline; } VKRObjectPipeline::~VKRObjectPipeline() { auto vkr = VulkanRenderer::GetInstance(); - if (m_pipeline != VK_NULL_HANDLE) + if (pipeline != VK_NULL_HANDLE) { - vkDestroyPipeline(vkr->GetLogicalDevice(), m_pipeline, nullptr); + vkDestroyPipeline(vkr->GetLogicalDevice(), pipeline, nullptr); performanceMonitor.vk.numGraphicPipelines.decrement(); } - if (m_vertexDSL != VK_NULL_HANDLE) - vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), m_vertexDSL, nullptr); - if (m_pixelDSL != VK_NULL_HANDLE) - vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), m_pixelDSL, nullptr); - if (m_geometryDSL != VK_NULL_HANDLE) - vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), m_geometryDSL, nullptr); - if (m_pipelineLayout != VK_NULL_HANDLE) - vkDestroyPipelineLayout(vkr->GetLogicalDevice(), m_pipelineLayout, nullptr); + if (vertexDSL != VK_NULL_HANDLE) + vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), vertexDSL, nullptr); + if (pixelDSL != VK_NULL_HANDLE) + vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), pixelDSL, nullptr); + if (geometryDSL != VK_NULL_HANDLE) + vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), geometryDSL, nullptr); + if (pipeline_layout != VK_NULL_HANDLE) + vkDestroyPipelineLayout(vkr->GetLogicalDevice(), pipeline_layout, nullptr); } VKRObjectDescriptorSet::VKRObjectDescriptorSet() diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h index 5cc0a6f1..f1450487 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h @@ -452,8 +452,6 @@ private: bool dynamic_rendering = false; // VK_KHR_dynamic_rendering bool shader_float_controls = false; // VK_KHR_shader_float_controls bool present_wait = false; // VK_KHR_present_wait - bool depth_clip_enable = false; // VK_EXT_depth_clip_enable - bool pipeline_robustness = false; // VK_EXT_pipeline_robustness }deviceExtensions; struct diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRendererCore.cpp b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRendererCore.cpp index 32ef7007..3e23b0aa 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRendererCore.cpp +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRendererCore.cpp @@ -298,8 +298,7 @@ PipelineInfo* VulkanRenderer::draw_createGraphicsPipeline(uint32 indexCount) // init pipeline compiler PipelineCompiler* pipelineCompiler = new PipelineCompiler(); - bool requiresRobustBufferAccess = PipelineCompiler::CalcRobustBufferAccessRequirement(vertexShader, pixelShader, geometryShader); - pipelineCompiler->InitFromCurrentGPUState(pipelineInfo, LatteGPUState.contextNew, vkFBO->GetRenderPassObj(), requiresRobustBufferAccess); + pipelineCompiler->InitFromCurrentGPUState(pipelineInfo, LatteGPUState.contextNew, vkFBO->GetRenderPassObj()); pipelineCompiler->TrackAsCached(vsBaseHash, pipelineHash); // use heuristics based on parameter patterns to determine if the current drawcall is essential (non-skipable) @@ -604,7 +603,7 @@ VkDescriptorSetInfo* VulkanRenderer::draw_getOrCreateDescriptorSet(PipelineInfo* const auto it = pipeline_info->vertex_ds_cache.find(stateHash); if (it != pipeline_info->vertex_ds_cache.cend()) return it->second; - descriptor_set_layout = pipeline_info->m_vkrObjPipeline->m_vertexDSL; + descriptor_set_layout = pipeline_info->m_vkrObjPipeline->vertexDSL; break; } case LatteConst::ShaderType::Pixel: @@ -612,7 +611,7 @@ VkDescriptorSetInfo* VulkanRenderer::draw_getOrCreateDescriptorSet(PipelineInfo* const auto it = pipeline_info->pixel_ds_cache.find(stateHash); if (it != pipeline_info->pixel_ds_cache.cend()) return it->second; - descriptor_set_layout = pipeline_info->m_vkrObjPipeline->m_pixelDSL; + descriptor_set_layout = pipeline_info->m_vkrObjPipeline->pixelDSL; break; } case LatteConst::ShaderType::Geometry: @@ -620,7 +619,7 @@ VkDescriptorSetInfo* VulkanRenderer::draw_getOrCreateDescriptorSet(PipelineInfo* const auto it = pipeline_info->geometry_ds_cache.find(stateHash); if (it != pipeline_info->geometry_ds_cache.cend()) return it->second; - descriptor_set_layout = pipeline_info->m_vkrObjPipeline->m_geometryDSL; + descriptor_set_layout = pipeline_info->m_vkrObjPipeline->geometryDSL; break; } default: @@ -1482,7 +1481,8 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 } auto vkObjPipeline = pipeline_info->m_vkrObjPipeline; - if (vkObjPipeline->GetPipeline() == VK_NULL_HANDLE) + + if (vkObjPipeline->pipeline == VK_NULL_HANDLE) { // invalid/uninitialized pipeline m_state.activeVertexDS = nullptr; @@ -1509,11 +1509,11 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 draw_setRenderPass(); - if (m_state.currentPipeline != vkObjPipeline->GetPipeline()) + if (m_state.currentPipeline != vkObjPipeline->pipeline) { - vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vkObjPipeline->GetPipeline()); + vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vkObjPipeline->pipeline); vkObjPipeline->flagForCurrentCommandBuffer(); - m_state.currentPipeline = vkObjPipeline->GetPipeline(); + m_state.currentPipeline = vkObjPipeline->pipeline; // depth bias if (pipeline_info->usesDepthBias) draw_updateDepthBias(true); @@ -1545,7 +1545,7 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 dsArray[1] = pixelDS->m_vkObjDescriptorSet->descriptorSet; vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, - vkObjPipeline->m_pipelineLayout, 0, 2, dsArray, numDynOffsetsVS + numDynOffsetsPS, + vkObjPipeline->pipeline_layout, 0, 2, dsArray, numDynOffsetsVS + numDynOffsetsPS, dynamicOffsets); } else if (vertexDS) @@ -1554,7 +1554,7 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX, dynamicOffsets, numDynOffsets, pipeline_info); vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, - vkObjPipeline->m_pipelineLayout, 0, 1, &vertexDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets, + vkObjPipeline->pipeline_layout, 0, 1, &vertexDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets, dynamicOffsets); } else if (pixelDS) @@ -1563,7 +1563,7 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT, dynamicOffsets, numDynOffsets, pipeline_info); vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, - vkObjPipeline->m_pipelineLayout, 1, 1, &pixelDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets, + vkObjPipeline->pipeline_layout, 1, 1, &pixelDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets, dynamicOffsets); } if (geometryDS) @@ -1572,7 +1572,7 @@ void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY, dynamicOffsets, numDynOffsets, pipeline_info); vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, - vkObjPipeline->m_pipelineLayout, 2, 1, &geometryDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets, + vkObjPipeline->pipeline_layout, 2, 1, &geometryDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets, dynamicOffsets); } diff --git a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanSurfaceCopy.cpp b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanSurfaceCopy.cpp index e3e42012..f98eb452 100644 --- a/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanSurfaceCopy.cpp +++ b/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanSurfaceCopy.cpp @@ -357,7 +357,7 @@ CopySurfacePipelineInfo* VulkanRenderer::copySurface_getOrCreateGraphicsPipeline layoutInfo.bindingCount = (uint32_t)descriptorSetLayoutBindings.size(); layoutInfo.pBindings = descriptorSetLayoutBindings.data(); - if (vkCreateDescriptorSetLayout(m_logicalDevice, &layoutInfo, nullptr, &vkObjPipeline->m_pixelDSL) != VK_SUCCESS) + if (vkCreateDescriptorSetLayout(m_logicalDevice, &layoutInfo, nullptr, &vkObjPipeline->pixelDSL) != VK_SUCCESS) UnrecoverableError(fmt::format("Failed to create descriptor set layout for surface copy shader").c_str()); // ########################################################################################################################################## @@ -370,15 +370,15 @@ CopySurfacePipelineInfo* VulkanRenderer::copySurface_getOrCreateGraphicsPipeline VkPipelineLayoutCreateInfo pipelineLayoutInfo{}; pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipelineLayoutInfo.setLayoutCount = 1; - pipelineLayoutInfo.pSetLayouts = &vkObjPipeline->m_pixelDSL; + pipelineLayoutInfo.pSetLayouts = &vkObjPipeline->pixelDSL; pipelineLayoutInfo.pPushConstantRanges = &pushConstantRange; pipelineLayoutInfo.pushConstantRangeCount = 1; - VkResult result = vkCreatePipelineLayout(m_logicalDevice, &pipelineLayoutInfo, nullptr, &vkObjPipeline->m_pipelineLayout); + VkResult result = vkCreatePipelineLayout(m_logicalDevice, &pipelineLayoutInfo, nullptr, &vkObjPipeline->pipeline_layout); if (result != VK_SUCCESS) { cemuLog_log(LogType::Force, "Failed to create pipeline layout: {}", result); - vkObjPipeline->SetPipeline(VK_NULL_HANDLE); + vkObjPipeline->pipeline = VK_NULL_HANDLE; return copyPipeline; } @@ -425,7 +425,7 @@ CopySurfacePipelineInfo* VulkanRenderer::copySurface_getOrCreateGraphicsPipeline pipelineInfo.pRasterizationState = &rasterizer; pipelineInfo.pMultisampleState = &multisampling; pipelineInfo.pColorBlendState = state.destinationTexture->isDepth?nullptr:&colorBlending; - pipelineInfo.layout = vkObjPipeline->m_pipelineLayout; + pipelineInfo.layout = vkObjPipeline->pipeline_layout; pipelineInfo.renderPass = copyPipeline->vkObjRenderPass->m_renderPass; pipelineInfo.pDepthStencilState = &depthStencilState; pipelineInfo.subpass = 0; @@ -434,16 +434,17 @@ CopySurfacePipelineInfo* VulkanRenderer::copySurface_getOrCreateGraphicsPipeline copyPipeline->vkObjPipeline = vkObjPipeline; - VkPipeline pipeline = VK_NULL_HANDLE; - result = vkCreateGraphicsPipelines(m_logicalDevice, m_pipeline_cache, 1, &pipelineInfo, nullptr, &pipeline); + result = vkCreateGraphicsPipelines(m_logicalDevice, m_pipeline_cache, 1, &pipelineInfo, nullptr, ©Pipeline->vkObjPipeline->pipeline); if (result != VK_SUCCESS) { - copyPipeline->vkObjPipeline->SetPipeline(nullptr); cemuLog_log(LogType::Force, "Failed to create graphics pipeline for surface copy. Error {} Info:", (sint32)result); - cemu_assert_suspicious(); + cemu_assert_debug(false); + copyPipeline->vkObjPipeline->pipeline = VK_NULL_HANDLE; } - else - copyPipeline->vkObjPipeline->SetPipeline(pipeline); + //performanceMonitor.vk.numGraphicPipelines.increment(); + + //m_pipeline_cache_semaphore.notify(); + return copyPipeline; } @@ -521,7 +522,7 @@ VKRObjectDescriptorSet* VulkanRenderer::surfaceCopy_getOrCreateDescriptorSet(VkC allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; allocInfo.descriptorPool = m_descriptorPool; allocInfo.descriptorSetCount = 1; - allocInfo.pSetLayouts = &pipelineInfo->vkObjPipeline->m_pixelDSL; + allocInfo.pSetLayouts = &(pipelineInfo->vkObjPipeline->pixelDSL); if (vkAllocateDescriptorSets(m_logicalDevice, &allocInfo, &vkObjDescriptorSet->descriptorSet) != VK_SUCCESS) { @@ -643,7 +644,7 @@ void VulkanRenderer::surfaceCopy_viaDrawcall(LatteTextureVk* srcTextureVk, sint3 pushConstantData.srcTexelOffset[0] = 0; pushConstantData.srcTexelOffset[1] = 0; - vkCmdPushConstants(m_state.currentCommandBuffer, copySurfacePipelineInfo->vkObjPipeline->m_pipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pushConstantData), &pushConstantData); + vkCmdPushConstants(m_state.currentCommandBuffer, copySurfacePipelineInfo->vkObjPipeline->pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pushConstantData), &pushConstantData); // draw VkRenderPassBeginInfo renderPassInfo{}; @@ -679,13 +680,13 @@ void VulkanRenderer::surfaceCopy_viaDrawcall(LatteTextureVk* srcTextureVk, sint3 vkCmdBeginRenderPass(m_state.currentCommandBuffer, &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE); - vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, copySurfacePipelineInfo->vkObjPipeline->GetPipeline()); + vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, copySurfacePipelineInfo->vkObjPipeline->pipeline); copySurfacePipelineInfo->vkObjPipeline->flagForCurrentCommandBuffer(); - m_state.currentPipeline = copySurfacePipelineInfo->vkObjPipeline->GetPipeline(); + m_state.currentPipeline = copySurfacePipelineInfo->vkObjPipeline->pipeline; vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, - copySurfacePipelineInfo->vkObjPipeline->m_pipelineLayout, 0, 1, &vkObjDescriptorSet->descriptorSet, 0, nullptr); + copySurfacePipelineInfo->vkObjPipeline->pipeline_layout, 0, 1, &vkObjDescriptorSet->descriptorSet, 0, nullptr); vkObjDescriptorSet->flagForCurrentCommandBuffer(); vkCmdDraw(m_state.currentCommandBuffer, 6, 1, 0, 0); diff --git a/src/Cafe/HW/MMU/MMU.cpp b/src/Cafe/HW/MMU/MMU.cpp index f88c3d0d..04ee8877 100644 --- a/src/Cafe/HW/MMU/MMU.cpp +++ b/src/Cafe/HW/MMU/MMU.cpp @@ -500,7 +500,7 @@ namespace MMU // todo - instead of passing the physical address to Read/WriteMMIO we should pass an interface id and a relative address? This would allow remapping the hardware address (tho we can just unregister + register at different addresses) - uint32 ReadMMIO_32(PAddr address) + uint16 ReadMMIO_32(PAddr address) { cemu_assert_debug((address & 0x3) == 0); auto itr = g_mmioHandlerR32->find(address); diff --git a/src/Cafe/HW/MMU/MMU.h b/src/Cafe/HW/MMU/MMU.h index a8367f88..794785fa 100644 --- a/src/Cafe/HW/MMU/MMU.h +++ b/src/Cafe/HW/MMU/MMU.h @@ -261,7 +261,7 @@ namespace MMU void WriteMMIO_32(PAddr address, uint32 value); void WriteMMIO_16(PAddr address, uint16 value); - uint32 ReadMMIO_32(PAddr address); + uint16 ReadMMIO_32(PAddr address); uint16 ReadMMIO_16(PAddr address); } diff --git a/src/Cafe/HW/SI/SI.cpp b/src/Cafe/HW/SI/SI.cpp index 026543d8..16cfb894 100644 --- a/src/Cafe/HW/SI/SI.cpp +++ b/src/Cafe/HW/SI/SI.cpp @@ -87,6 +87,7 @@ namespace HW_SI HWREG::SICOMCSR SI_COMCSR_R32(PAddr addr) { + //cemuLog_logDebug(LogType::Force, "Read SICOMCSR"); return g_si.registerState.sicomcsr; } diff --git a/src/Cafe/IOSU/PDM/iosu_pdm.cpp b/src/Cafe/IOSU/PDM/iosu_pdm.cpp index b9dda445..d94b1dbf 100644 --- a/src/Cafe/IOSU/PDM/iosu_pdm.cpp +++ b/src/Cafe/IOSU/PDM/iosu_pdm.cpp @@ -464,34 +464,5 @@ namespace iosu return static_cast(&sIOSUModuleNNPDM); } - - bool GameListStat::LastPlayDate::operator<(const LastPlayDate& b) const - { - const auto& a = *this; - - if(a.year < b.year) - return true; - if(a.year > b.year) - return false; - - // same year - if(a.month < b.month) - return true; - if(a.month > b.month) - return false; - - // same year and month - return a.day < b.day; - } - - bool GameListStat::LastPlayDate::operator==(const LastPlayDate& b) const - { - const auto& a = *this; - return a.year == b.year && - a.month == b.month && - a.day == b.day; - } - std::weak_ordering GameListStat::LastPlayDate::operator<=>(const LastPlayDate& b) const = default; - }; }; diff --git a/src/Cafe/IOSU/PDM/iosu_pdm.h b/src/Cafe/IOSU/PDM/iosu_pdm.h index 63f99a4a..0dd8a39d 100644 --- a/src/Cafe/IOSU/PDM/iosu_pdm.h +++ b/src/Cafe/IOSU/PDM/iosu_pdm.h @@ -21,15 +21,11 @@ namespace iosu /* Helper for UI game list */ struct GameListStat { - struct LastPlayDate + struct { uint32 year; // if 0 -> never played uint32 month; uint32 day; - - bool operator<(const LastPlayDate& b) const; - bool operator==(const LastPlayDate& b) const; - std::weak_ordering operator<=>(const LastPlayDate& b) const; }last_played; uint32 numMinutesPlayed; }; diff --git a/src/Cafe/IOSU/legacy/iosu_boss.cpp b/src/Cafe/IOSU/legacy/iosu_boss.cpp index 212d42a0..7ab25f68 100644 --- a/src/Cafe/IOSU/legacy/iosu_boss.cpp +++ b/src/Cafe/IOSU/legacy/iosu_boss.cpp @@ -502,7 +502,6 @@ namespace iosu curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, task_header_callback); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &(*it)); curl_easy_setopt(curl, CURLOPT_TIMEOUT, 0x3C); - curl_easy_setopt(curl, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); if (IsNetworkServiceSSLDisabled(ActiveSettings::GetNetworkService())) { curl_easy_setopt(curl,CURLOPT_SSL_VERIFYPEER,0L); diff --git a/src/Cafe/IOSU/legacy/iosu_fpd.cpp b/src/Cafe/IOSU/legacy/iosu_fpd.cpp index a667e61c..28d248ae 100644 --- a/src/Cafe/IOSU/legacy/iosu_fpd.cpp +++ b/src/Cafe/IOSU/legacy/iosu_fpd.cpp @@ -132,7 +132,7 @@ namespace iosu void convertMultiByteStringToBigEndianWidechar(const char* input, uint16be* output, sint32 maxOutputLength) { - std::vector beStr = StringHelpers::FromUtf8(input); + std::basic_string beStr = StringHelpers::FromUtf8(input); if (beStr.size() >= maxOutputLength - 1) beStr.resize(maxOutputLength-1); for (size_t i = 0; i < beStr.size(); i++) @@ -723,7 +723,7 @@ namespace iosu { if(numVecIn != 0 || numVecOut != 1) return FPResult_InvalidIPCParam; - std::vector myComment; + std::basic_string myComment; if(g_fpd.nexFriendSession) { if(vecOut->size != MY_COMMENT_LENGTH * sizeof(uint16be)) @@ -735,8 +735,8 @@ namespace iosu g_fpd.nexFriendSession->getMyComment(myNexComment); myComment = StringHelpers::FromUtf8(myNexComment.commentString); } - myComment.insert(myComment.begin(), '\0'); - memcpy(vecOut->basePhys.GetPtr(), myComment.data(), MY_COMMENT_LENGTH * sizeof(uint16be)); + myComment.insert(0, 1, '\0'); + memcpy(vecOut->basePhys.GetPtr(), myComment.c_str(), MY_COMMENT_LENGTH * sizeof(uint16be)); return FPResult_Ok; } diff --git a/src/Cafe/OS/libs/TCL/TCL.cpp b/src/Cafe/OS/libs/TCL/TCL.cpp index 8b345b5e..d0c29d44 100644 --- a/src/Cafe/OS/libs/TCL/TCL.cpp +++ b/src/Cafe/OS/libs/TCL/TCL.cpp @@ -1,161 +1,28 @@ #include "Cafe/OS/common/OSCommon.h" #include "Cafe/OS/libs/TCL/TCL.h" -#include "HW/Latte/Core/LattePM4.h" - namespace TCL { - SysAllocator s_updateRetirementEvent; - uint64 s_currentRetireMarker = 0; - struct TCLStatePPC // mapped into PPC space + enum class TCL_SUBMISSION_FLAG : uint32 { - uint64be gpuRetireMarker; // written by GPU + SURFACE_SYNC = 0x400000, // submit surface sync packet before cmd + TRIGGER_INTERRUPT = 0x200000, // probably + UKN_20000000 = 0x20000000, }; - SysAllocator s_tclStatePPC; - - // called from GPU for timestamp EOP event - void TCLGPUNotifyNewRetirementTimestamp() + int TCLSubmitToRing(uint32be* cmd, uint32 cmdLen, uint32be* controlFlags, uint64* submissionTimestamp) { - // gpuRetireMarker is updated via event eop command - __OSLockScheduler(); - coreinit::OSSignalEventAllInternal(s_updateRetirementEvent.GetPtr()); - __OSUnlockScheduler(); - } + // todo - figure out all the bits of *controlFlags + // if submissionTimestamp != nullptr then set it to the timestamp of the submission. Note: We should make sure that uint64's are written atomically by the GPU command processor - int TCLTimestamp(TCLTimestampId id, uint64be* timestampOut) - { - if (id == TCLTimestampId::TIMESTAMP_LAST_BUFFER_RETIRED) - { - MEMPTR b; - // this is the timestamp of the last buffer that was retired by the GPU - stdx::atomic_ref retireTimestamp(s_tclStatePPC->gpuRetireMarker); - *timestampOut = retireTimestamp.load(); - return 0; - } - else - { - cemuLog_log(LogType::Force, "TCLTimestamp(): Unsupported timestamp ID {}", (uint32)id); - *timestampOut = 0; - return 0; - } - } + cemu_assert_debug(false); - int TCLWaitTimestamp(TCLTimestampId id, uint64 waitTs, uint64 timeout) - { - if (id == TCLTimestampId::TIMESTAMP_LAST_BUFFER_RETIRED) - { - while ( true ) - { - stdx::atomic_ref retireTimestamp(s_tclStatePPC->gpuRetireMarker); - uint64 currentTimestamp = retireTimestamp.load(); - if (currentTimestamp >= waitTs) - return 0; - coreinit::OSWaitEvent(s_updateRetirementEvent.GetPtr()); - } - } - else - { - cemuLog_log(LogType::Force, "TCLWaitTimestamp(): Unsupported timestamp ID {}", (uint32)id); - } - return 0; - } - - static constexpr uint32 TCL_RING_BUFFER_SIZE = 4096; // in U32s - - std::atomic tclRingBufferA[TCL_RING_BUFFER_SIZE]; - std::atomic tclRingBufferA_readIndex{0}; - uint32 tclRingBufferA_writeIndex{0}; - - // GPU code calls this to grab the next command word - bool TCLGPUReadRBWord(uint32& cmdWord) - { - if (tclRingBufferA_readIndex == tclRingBufferA_writeIndex) - return false; - cmdWord = tclRingBufferA[tclRingBufferA_readIndex]; - tclRingBufferA_readIndex = (tclRingBufferA_readIndex+1) % TCL_RING_BUFFER_SIZE; - return true; - } - - void TCLWaitForRBSpace(uint32be numU32s) - { - while ( true ) - { - uint32 distance = (tclRingBufferA_readIndex + TCL_RING_BUFFER_SIZE - tclRingBufferA_writeIndex) & (TCL_RING_BUFFER_SIZE - 1); - if (tclRingBufferA_writeIndex == tclRingBufferA_readIndex) // buffer completely empty - distance = TCL_RING_BUFFER_SIZE; - if (distance >= numU32s+1) // assume distance minus one, because we are never allowed to completely wrap around - break; - _mm_pause(); - } - } - - // this function assumes that TCLWaitForRBSpace was called and that there is enough space - void TCLWriteCmd(uint32be* cmd, uint32 cmdLen) - { - while (cmdLen > 0) - { - tclRingBufferA[tclRingBufferA_writeIndex] = *cmd; - tclRingBufferA_writeIndex++; - tclRingBufferA_writeIndex &= (TCL_RING_BUFFER_SIZE - 1); - cmd++; - cmdLen--; - } - } - - #define EVENT_TYPE_TS 5 - - void TCLSubmitRetireMarker(bool triggerEventInterrupt) - { - s_currentRetireMarker++; - uint32be cmd[6]; - cmd[0] = pm4HeaderType3(IT_EVENT_WRITE_EOP, 5); - cmd[1] = (4 | (EVENT_TYPE_TS << 8)); // event type (bits 8-15) and event index (bits 0-7). - cmd[2] = MEMPTR(&s_tclStatePPC->gpuRetireMarker).GetMPTR(); // address lower 32bits + data sel bits - cmd[3] = 0x40000000; // select 64bit write, lower 16 bits are the upper bits of the address - if (triggerEventInterrupt) - cmd[3] |= 0x2000000; // trigger interrupt after value has been written - cmd[4] = (uint32)s_currentRetireMarker; // data lower 32 bits - cmd[5] = (uint32)(s_currentRetireMarker>>32); // data higher 32 bits - TCLWriteCmd(cmd, 6); - } - - int TCLSubmitToRing(uint32be* cmd, uint32 cmdLen, betype* controlFlags, uint64be* timestampValueOut) - { - TCLSubmissionFlag flags = *controlFlags; - cemu_assert_debug(timestampValueOut); // handle case where this is null - - // make sure there is enough space to submit all commands at one - uint32 totalCommandLength = cmdLen; - totalCommandLength += 6; // space needed for TCLSubmitRetireMarker - - TCLWaitForRBSpace(totalCommandLength); - - // submit command buffer - TCLWriteCmd(cmd, cmdLen); - - // create new marker timestamp and tell GPU to write it to our variable after its done processing the command - if ((HAS_FLAG(flags, TCLSubmissionFlag::USE_RETIRED_MARKER))) - { - TCLSubmitRetireMarker(!HAS_FLAG(flags, TCLSubmissionFlag::NO_MARKER_INTERRUPT)); - *timestampValueOut = s_currentRetireMarker; // incremented before each submit - } - else - { - cemu_assert_unimplemented(); - } return 0; } void Initialize() { cafeExportRegister("TCL", TCLSubmitToRing, LogType::Placeholder); - cafeExportRegister("TCL", TCLTimestamp, LogType::Placeholder); - cafeExportRegister("TCL", TCLWaitTimestamp, LogType::Placeholder); - - s_currentRetireMarker = 0; - s_tclStatePPC->gpuRetireMarker = 0; - coreinit::OSInitEvent(s_updateRetirementEvent.GetPtr(), coreinit::OSEvent::EVENT_STATE::STATE_NOT_SIGNALED, coreinit::OSEvent::EVENT_MODE::MODE_AUTO); } } diff --git a/src/Cafe/OS/libs/TCL/TCL.h b/src/Cafe/OS/libs/TCL/TCL.h index 35f0a6bf..ab5358b0 100644 --- a/src/Cafe/OS/libs/TCL/TCL.h +++ b/src/Cafe/OS/libs/TCL/TCL.h @@ -1,25 +1,4 @@ namespace TCL { - enum class TCLTimestampId - { - TIMESTAMP_LAST_BUFFER_RETIRED = 1, - }; - - enum class TCLSubmissionFlag : uint32 - { - SURFACE_SYNC = 0x400000, // submit surface sync packet before cmd - NO_MARKER_INTERRUPT = 0x200000, - USE_RETIRED_MARKER = 0x20000000, // Controls whether the timer is updated before or after (retired) the cmd. Also controls which timestamp is returned for the submission. Before and after using separate counters - }; - - int TCLTimestamp(TCLTimestampId id, uint64be* timestampOut); - int TCLWaitTimestamp(TCLTimestampId id, uint64 waitTs, uint64 timeout); - int TCLSubmitToRing(uint32be* cmd, uint32 cmdLen, betype* controlFlags, uint64be* timestampValueOut); - - // called from Latte code - bool TCLGPUReadRBWord(uint32& cmdWord); - void TCLGPUNotifyNewRetirementTimestamp(); - void Initialize(); -} -ENABLE_BITMASK_OPERATORS(TCL::TCLSubmissionFlag); +} \ No newline at end of file diff --git a/src/Cafe/OS/libs/coreinit/coreinit_FS.cpp b/src/Cafe/OS/libs/coreinit/coreinit_FS.cpp index 12ddb8df..0fc8912f 100644 --- a/src/Cafe/OS/libs/coreinit/coreinit_FS.cpp +++ b/src/Cafe/OS/libs/coreinit/coreinit_FS.cpp @@ -742,8 +742,7 @@ namespace coreinit } __FSCmdSubmitResult(cmd, fsStatus); - // dont read from cmd after this point, since the game could already have modified it - __FSUpdateQueue(&client->fsCmdQueue); + __FSUpdateQueue(&cmd->fsClientBody->fsCmdQueue); osLib_returnFromFunction(hCPU, 0); } diff --git a/src/Cafe/OS/libs/coreinit/coreinit_OSScreen.cpp b/src/Cafe/OS/libs/coreinit/coreinit_OSScreen.cpp index 7b51629e..371ead3c 100644 --- a/src/Cafe/OS/libs/coreinit/coreinit_OSScreen.cpp +++ b/src/Cafe/OS/libs/coreinit/coreinit_OSScreen.cpp @@ -96,6 +96,7 @@ namespace coreinit { ppcDefineParamU32(screenIndex, 0); cemu_assert(screenIndex < 2); + cemuLog_logDebug(LogType::Force, "OSScreenFlipBuffersEx {}", screenIndex); LatteGPUState.osScreen.screen[screenIndex].flipRequestCount++; _updateCurrentDrawScreen(screenIndex); osLib_returnFromFunction(hCPU, 0); diff --git a/src/Cafe/OS/libs/coreinit/coreinit_Thread.cpp b/src/Cafe/OS/libs/coreinit/coreinit_Thread.cpp index 2f89000b..870d1850 100644 --- a/src/Cafe/OS/libs/coreinit/coreinit_Thread.cpp +++ b/src/Cafe/OS/libs/coreinit/coreinit_Thread.cpp @@ -25,11 +25,7 @@ void nnNfp_update(); namespace coreinit { -#ifdef __arm64__ - void __OSFiberThreadEntry(uint32, uint32); -#else void __OSFiberThreadEntry(void* thread); -#endif void __OSAddReadyThreadToRunQueue(OSThread_t* thread); void __OSRemoveThreadFromRunQueues(OSThread_t* thread); }; @@ -53,7 +49,7 @@ namespace coreinit struct OSHostThread { - OSHostThread(OSThread_t* thread) : m_thread(thread), m_fiber((void(*)(void*))__OSFiberThreadEntry, this, this) + OSHostThread(OSThread_t* thread) : m_thread(thread), m_fiber(__OSFiberThreadEntry, this, this) { } @@ -717,10 +713,7 @@ namespace coreinit thread->id = 0x8000; if (!thread->deallocatorFunc.IsNull()) - { __OSQueueThreadDeallocation(thread); - PPCCore_switchToSchedulerWithLock(); // make sure the deallocation function runs before we return - } __OSUnlockScheduler(); @@ -1311,14 +1304,8 @@ namespace coreinit __OSThreadStartTimeslice(hostThread->m_thread, &hostThread->ppcInstance); } -#ifdef __arm64__ - void __OSFiberThreadEntry(uint32 _high, uint32 _low) - { - uint64 _thread = (uint64) _high << 32 | _low; -#else void __OSFiberThreadEntry(void* _thread) { -#endif OSHostThread* hostThread = (OSHostThread*)_thread; #if defined(ARCH_X86_64) @@ -1528,7 +1515,7 @@ namespace coreinit } // queue thread deallocation to run after current thread finishes - // the termination threads run at a higher priority on the same core + // the termination threads run at a higher priority on the same threads void __OSQueueThreadDeallocation(OSThread_t* thread) { uint32 coreIndex = OSGetCoreId(); diff --git a/src/Cafe/OS/libs/dmae/dmae.cpp b/src/Cafe/OS/libs/dmae/dmae.cpp index c35fce2e..7c513784 100644 --- a/src/Cafe/OS/libs/dmae/dmae.cpp +++ b/src/Cafe/OS/libs/dmae/dmae.cpp @@ -36,16 +36,6 @@ void dmaeExport_DMAECopyMem(PPCInterpreter_t* hCPU) dstBuffer[i] = _swapEndianU32(srcBuffer[i]); } } - else if( hCPU->gpr[6] == DMAE_ENDIAN_16 ) - { - // swap per uint16 - uint16* srcBuffer = (uint16*)memory_getPointerFromVirtualOffset(hCPU->gpr[4]); - uint16* dstBuffer = (uint16*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); - for(uint32 i=0; igpr[5]*2; i++) - { - dstBuffer[i] = _swapEndianU16(srcBuffer[i]); - } - } else { cemuLog_logDebug(LogType::Force, "DMAECopyMem(): Unsupported endian swap\n"); diff --git a/src/Cafe/OS/libs/gx2/GX2.cpp b/src/Cafe/OS/libs/gx2/GX2.cpp index 1c3a8dcc..593d31fb 100644 --- a/src/Cafe/OS/libs/gx2/GX2.cpp +++ b/src/Cafe/OS/libs/gx2/GX2.cpp @@ -59,7 +59,7 @@ void gx2Export_GX2SwapScanBuffers(PPCInterpreter_t* hCPU) if (isPokken) GX2::GX2DrawDone(); - GX2::GX2ReserveCmdSpace(5+2); + GX2ReserveCmdSpace(5+2); uint64 tick64 = PPCInterpreter_getMainCoreCycleCounter() / 20ULL; lastSwapTime = tick64; @@ -86,16 +86,24 @@ void gx2Export_GX2SwapScanBuffers(PPCInterpreter_t* hCPU) GX2::GX2WaitForFlip(); } + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); osLib_returnFromFunction(hCPU, 0); } void gx2Export_GX2CopyColorBufferToScanBuffer(PPCInterpreter_t* hCPU) { cemuLog_log(LogType::GX2, "GX2CopyColorBufferToScanBuffer(0x{:08x},{})", hCPU->gpr[3], hCPU->gpr[4]); - GX2::GX2ReserveCmdSpace(10); + GX2ReserveCmdSpace(5); // todo: proper implementation + // hack: Avoid running to far ahead of GPU. Normally this would be guaranteed by the circular buffer model, which we currently dont fully emulate + if(GX2::GX2WriteGather_getReadWriteDistance() > 32*1024*1024 ) + { + debug_printf("Waiting for GPU to catch up...\n"); + PPCInterpreter_relinquishTimeslice(); // release current thread + return; + } GX2ColorBuffer* colorBuffer = (GX2ColorBuffer*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_HLE_COPY_COLORBUFFER_TO_SCANBUFFER, 9)); @@ -301,6 +309,81 @@ void gx2Export_GX2SetSemaphore(PPCInterpreter_t* hCPU) osLib_returnFromFunction(hCPU, 0); } +void gx2Export_GX2Flush(PPCInterpreter_t* hCPU) +{ + cemuLog_log(LogType::GX2, "GX2Flush()"); + _GX2SubmitToTCL(); + osLib_returnFromFunction(hCPU, 0); +} + +uint8* _GX2LastFlushPtr[PPC_CORE_COUNT] = {NULL}; + +uint64 _prevReturnedGPUTime = 0; + +uint64 Latte_GetTime() +{ + uint64 gpuTime = coreinit::OSGetSystemTime(); + gpuTime *= 20000ULL; + if (gpuTime <= _prevReturnedGPUTime) + gpuTime = _prevReturnedGPUTime + 1; // avoid ever returning identical timestamps + _prevReturnedGPUTime = gpuTime; + return gpuTime; +} + +void _GX2SubmitToTCL() +{ + uint32 coreIndex = PPCInterpreter_getCoreIndex(PPCInterpreter_getCurrentInstance()); + // do nothing if called from non-main GX2 core + if (GX2::sGX2MainCoreIndex != coreIndex) + { + cemuLog_logDebug(LogType::Force, "_GX2SubmitToTCL() called on non-main GX2 core"); + return; + } + if( gx2WriteGatherPipe.displayListStart[coreIndex] != MPTR_NULL ) + return; // quit if in display list + _GX2LastFlushPtr[coreIndex] = (gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex]); + // update last submitted CB timestamp + uint64 commandBufferTimestamp = Latte_GetTime(); + LatteGPUState.lastSubmittedCommandBufferTimestamp.store(commandBufferTimestamp); + cemuLog_log(LogType::GX2, "Submitting GX2 command buffer with timestamp {:016x}", commandBufferTimestamp); + // submit HLE packet to write retirement timestamp + gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_HLE_SET_CB_RETIREMENT_TIMESTAMP, 2)); + gx2WriteGather_submitU32AsBE((uint32)(commandBufferTimestamp>>32ULL)); + gx2WriteGather_submitU32AsBE((uint32)(commandBufferTimestamp&0xFFFFFFFFULL)); +} + +uint32 _GX2GetUnflushedBytes(uint32 coreIndex) +{ + uint32 unflushedBytes = 0; + if (_GX2LastFlushPtr[coreIndex] != NULL) + { + if (_GX2LastFlushPtr[coreIndex] > gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex]) + unflushedBytes = (uint32)(gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] - gx2WriteGatherPipe.gxRingBuffer + 4); // this isn't 100% correct since we ignore the bytes between the last flush address and the start of the wrap around + else + unflushedBytes = (uint32)(gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] - _GX2LastFlushPtr[coreIndex]); + } + else + unflushedBytes = (uint32)(gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] - gx2WriteGatherPipe.gxRingBuffer); + return unflushedBytes; +} + +/* + * Guarantees that the requested amount of space is available on the current command buffer + * If the space is not available, the current command buffer is pushed to the GPU and a new one is allocated + */ +void GX2ReserveCmdSpace(uint32 reservedFreeSpaceInU32) +{ + uint32 coreIndex = coreinit::OSGetCoreId(); + // if we are in a display list then do nothing + if( gx2WriteGatherPipe.displayListStart[coreIndex] != MPTR_NULL ) + return; + uint32 unflushedBytes = _GX2GetUnflushedBytes(coreIndex); + if( unflushedBytes >= 0x1000 ) + { + _GX2SubmitToTCL(); + } +} + void gx2_load() { osLib_addFunction("gx2", "GX2GetContextStateDisplayList", gx2Export_GX2GetContextStateDisplayList); @@ -362,6 +445,10 @@ void gx2_load() // semaphore osLib_addFunction("gx2", "GX2SetSemaphore", gx2Export_GX2SetSemaphore); + // command buffer + osLib_addFunction("gx2", "GX2Flush", gx2Export_GX2Flush); + + GX2::GX2Init_writeGather(); GX2::GX2MemInit(); GX2::GX2ResourceInit(); GX2::GX2CommandInit(); diff --git a/src/Cafe/OS/libs/gx2/GX2.h b/src/Cafe/OS/libs/gx2/GX2.h index 92452864..a22719f4 100644 --- a/src/Cafe/OS/libs/gx2/GX2.h +++ b/src/Cafe/OS/libs/gx2/GX2.h @@ -67,4 +67,10 @@ void gx2Export_GX2MarkScanBufferCopied(PPCInterpreter_t* hCPU); void gx2Export_GX2SetDefaultState(PPCInterpreter_t* hCPU); void gx2Export_GX2SetupContextStateEx(PPCInterpreter_t* hCPU); -void gx2Export_GX2SetContextState(PPCInterpreter_t* hCPU); \ No newline at end of file +void gx2Export_GX2SetContextState(PPCInterpreter_t* hCPU); + +// command buffer + +uint32 _GX2GetUnflushedBytes(uint32 coreIndex); +void _GX2SubmitToTCL(); +void GX2ReserveCmdSpace(uint32 reservedFreeSpaceInU32); \ No newline at end of file diff --git a/src/Cafe/OS/libs/gx2/GX2_Blit.cpp b/src/Cafe/OS/libs/gx2/GX2_Blit.cpp index 6e0db6aa..c8080f38 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Blit.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Blit.cpp @@ -82,88 +82,35 @@ namespace GX2 } } - void SubmitHLEClear(GX2ColorBuffer* colorBuffer, float colorRGBA[4], GX2DepthBuffer* depthBuffer, float depthClearValue, uint8 stencilClearValue, bool clearColor, bool clearDepth, bool clearStencil) - { - GX2ReserveCmdSpace(50); - uint32 hleClearFlags = 0; - if (clearColor) - hleClearFlags |= 1; - if (clearDepth) - hleClearFlags |= 2; - if (clearStencil) - hleClearFlags |= 4; - // color buffer - MPTR colorPhysAddr = MPTR_NULL; - uint32 colorFormat = 0; - uint32 colorTileMode = 0; - uint32 colorWidth = 0; - uint32 colorHeight = 0; - uint32 colorPitch = 0; - uint32 colorFirstSlice = 0; - uint32 colorNumSlices = 0; - if (colorBuffer != nullptr) - { - colorPhysAddr = memory_virtualToPhysical(colorBuffer->surface.imagePtr); - colorFormat = (uint32)colorBuffer->surface.format.value(); - colorTileMode = (uint32)colorBuffer->surface.tileMode.value(); - colorWidth = colorBuffer->surface.width; - colorHeight = colorBuffer->surface.height; - colorPitch = colorBuffer->surface.pitch; - colorFirstSlice = _swapEndianU32(colorBuffer->viewFirstSlice); - colorNumSlices = _swapEndianU32(colorBuffer->viewNumSlices); - } - // depth buffer - MPTR depthPhysAddr = MPTR_NULL; - uint32 depthFormat = 0; - uint32 depthTileMode = 0; - uint32 depthWidth = 0; - uint32 depthHeight = 0; - uint32 depthPitch = 0; - uint32 depthFirstSlice = 0; - uint32 depthNumSlices = 0; - if (depthBuffer != nullptr) - { - depthPhysAddr = memory_virtualToPhysical(depthBuffer->surface.imagePtr); - depthFormat = (uint32)depthBuffer->surface.format.value(); - depthTileMode = (uint32)depthBuffer->surface.tileMode.value(); - depthWidth = depthBuffer->surface.width; - depthHeight = depthBuffer->surface.height; - depthPitch = depthBuffer->surface.pitch; - depthFirstSlice = _swapEndianU32(depthBuffer->viewFirstSlice); - depthNumSlices = _swapEndianU32(depthBuffer->viewNumSlices); - } - gx2WriteGather_submit(pm4HeaderType3(IT_HLE_CLEAR_COLOR_DEPTH_STENCIL, 23), - hleClearFlags, - colorPhysAddr, - colorFormat, - colorTileMode, - colorWidth, - colorHeight, - colorPitch, - colorFirstSlice, - colorNumSlices, - depthPhysAddr, - depthFormat, - depthTileMode, - depthWidth, - depthHeight, - depthPitch, - depthFirstSlice, - depthNumSlices, - (uint32)(colorRGBA[0] * 255.0f), - (uint32)(colorRGBA[1] * 255.0f), - (uint32)(colorRGBA[2] * 255.0f), - (uint32)(colorRGBA[3] * 255.0f), - *(uint32*)&depthClearValue, - stencilClearValue&0xFF); - } - void GX2ClearColor(GX2ColorBuffer* colorBuffer, float r, float g, float b, float a) { + GX2ReserveCmdSpace(50); if ((colorBuffer->surface.resFlag & GX2_RESFLAG_USAGE_COLOR_BUFFER) != 0) { - float colorRGBA[4] = { r, g, b, a }; - SubmitHLEClear(colorBuffer, colorRGBA, nullptr, 0.0f, 0, true, false, false); + gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_HLE_CLEAR_COLOR_DEPTH_STENCIL, 23)); + gx2WriteGather_submitU32AsBE(1); // color (1) + gx2WriteGather_submitU32AsBE(memory_virtualToPhysical(colorBuffer->surface.imagePtr)); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.format.value()); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.tileMode.value()); + gx2WriteGather_submitU32AsBE(colorBuffer->surface.width); + gx2WriteGather_submitU32AsBE(colorBuffer->surface.height); + gx2WriteGather_submitU32AsBE(colorBuffer->surface.pitch); + gx2WriteGather_submitU32AsBE(_swapEndianU32(colorBuffer->viewFirstSlice)); + gx2WriteGather_submitU32AsBE(_swapEndianU32(colorBuffer->viewNumSlices)); + gx2WriteGather_submitU32AsBE(MPTR_NULL); + gx2WriteGather_submitU32AsBE(0); // depth buffer format + gx2WriteGather_submitU32AsBE(0); // tilemode for depth buffer + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE((uint32)(r * 255.0f)); + gx2WriteGather_submitU32AsBE((uint32)(g * 255.0f)); + gx2WriteGather_submitU32AsBE((uint32)(b * 255.0f)); + gx2WriteGather_submitU32AsBE((uint32)(a * 255.0f)); + gx2WriteGather_submitU32AsBE(0); // clear depth + gx2WriteGather_submitU32AsBE(0); // clear stencil } else { @@ -173,6 +120,7 @@ namespace GX2 void GX2ClearBuffersEx(GX2ColorBuffer* colorBuffer, GX2DepthBuffer* depthBuffer, float r, float g, float b, float a, float depthClearValue, uint8 stencilClearValue, GX2ClearFlags clearFlags) { + GX2ReserveCmdSpace(50); _updateDepthStencilClearRegs(depthClearValue, stencilClearValue, clearFlags); uint32 hleClearFlags = 0; @@ -182,13 +130,42 @@ namespace GX2 hleClearFlags |= 4; hleClearFlags |= 1; - float colorRGBA[4] = { r, g, b, a }; - SubmitHLEClear(colorBuffer, colorRGBA, depthBuffer, depthClearValue, stencilClearValue, true, (clearFlags & GX2ClearFlags::CLEAR_DEPTH) != 0, (clearFlags & GX2ClearFlags::CLEAR_STENCIL) != 0); + // send command to clear color, depth and stencil + if (_swapEndianU32(colorBuffer->viewFirstSlice) != 0) + debugBreakpoint(); + gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_HLE_CLEAR_COLOR_DEPTH_STENCIL, 23)); + gx2WriteGather_submitU32AsBE(hleClearFlags); // color (1), depth (2), stencil (4) + gx2WriteGather_submitU32AsBE(memory_virtualToPhysical(colorBuffer->surface.imagePtr)); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.format.value()); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.tileMode.value()); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.width); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.height); + gx2WriteGather_submitU32AsBE((uint32)colorBuffer->surface.pitch); + gx2WriteGather_submitU32AsBE(_swapEndianU32(colorBuffer->viewFirstSlice)); + gx2WriteGather_submitU32AsBE(_swapEndianU32(colorBuffer->viewNumSlices)); + gx2WriteGather_submitU32AsBE(memory_virtualToPhysical(depthBuffer->surface.imagePtr)); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.format.value()); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.tileMode.value()); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.width); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.height); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.pitch); + gx2WriteGather_submitU32AsBE(_swapEndianU32(depthBuffer->viewFirstSlice)); + gx2WriteGather_submitU32AsBE(_swapEndianU32(depthBuffer->viewNumSlices)); + + gx2WriteGather_submitU32AsBE((uint32)(r * 255.0f)); + gx2WriteGather_submitU32AsBE((uint32)(g * 255.0f)); + gx2WriteGather_submitU32AsBE((uint32)(b * 255.0f)); + gx2WriteGather_submitU32AsBE((uint32)(a * 255.0f)); + + gx2WriteGather_submitU32AsBE(*(uint32*)&depthClearValue); // clear depth + gx2WriteGather_submitU32AsBE(stencilClearValue&0xFF); // clear stencil } // always uses passed depthClearValue/stencilClearValue for clearing, even if clear flags dont specify value updates void GX2ClearDepthStencilEx(GX2DepthBuffer* depthBuffer, float depthClearValue, uint8 stencilClearValue, GX2ClearFlags clearFlags) { + GX2ReserveCmdSpace(50); + if (!depthBuffer && (depthBuffer->surface.width == 0 || depthBuffer->surface.height == 0)) { // Super Smash Bros tries to clear an uninitialized depth surface? @@ -198,8 +175,41 @@ namespace GX2 _updateDepthStencilClearRegs(depthClearValue, stencilClearValue, clearFlags); - float colorRGBA[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; - SubmitHLEClear(nullptr, colorRGBA, depthBuffer, depthClearValue, stencilClearValue, false, (clearFlags & GX2ClearFlags::CLEAR_DEPTH) != 0, (clearFlags & GX2ClearFlags::CLEAR_STENCIL) != 0); + uint32 hleClearFlags = 0; + if ((clearFlags & GX2ClearFlags::CLEAR_DEPTH) != 0) + hleClearFlags |= 2; + if ((clearFlags & GX2ClearFlags::CLEAR_STENCIL) != 0) + hleClearFlags |= 4; + + // send command to clear color, depth and stencil + if (hleClearFlags != 0) + { + gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_HLE_CLEAR_COLOR_DEPTH_STENCIL, 23)); + gx2WriteGather_submitU32AsBE(hleClearFlags); // color (1), depth (2), stencil (4) + gx2WriteGather_submitU32AsBE(MPTR_NULL); + gx2WriteGather_submitU32AsBE(0); // format for color buffer + gx2WriteGather_submitU32AsBE(0); // tilemode for color buffer + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(memory_virtualToPhysical(depthBuffer->surface.imagePtr)); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.format.value()); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.tileMode.value()); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.width); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.height); + gx2WriteGather_submitU32AsBE((uint32)depthBuffer->surface.pitch); + gx2WriteGather_submitU32AsBE(_swapEndianU32(depthBuffer->viewFirstSlice)); + gx2WriteGather_submitU32AsBE(_swapEndianU32(depthBuffer->viewNumSlices)); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + gx2WriteGather_submitU32AsBE(0); + + gx2WriteGather_submitU32AsBE(*(uint32*)&depthClearValue); // clear depth + gx2WriteGather_submitU32AsBE(stencilClearValue & 0xFF); // clear stencil + } } void GX2BlitInit() diff --git a/src/Cafe/OS/libs/gx2/GX2_Command.cpp b/src/Cafe/OS/libs/gx2/GX2_Command.cpp index d12bf210..ec96a4ff 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Command.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Command.cpp @@ -4,402 +4,178 @@ #include "Cafe/HW/Latte/Core/LattePM4.h" #include "Cafe/OS/libs/coreinit/coreinit.h" #include "Cafe/OS/libs/coreinit/coreinit_Thread.h" -#include "Cafe/OS/libs/TCL/TCL.h" #include "Cafe/HW/Latte/ISA/RegDefines.h" #include "GX2.h" #include "GX2_Command.h" #include "GX2_Shader.h" #include "GX2_Misc.h" -#include "OS/libs/coreinit/coreinit_MEM.h" -namespace GX2 -{ - GX2PerCoreCBState s_perCoreCBState[Espresso::CORE_COUNT]; -} +extern uint8* gxRingBufferReadPtr; + +GX2WriteGatherPipeState gx2WriteGatherPipe = { 0 }; void gx2WriteGather_submitU32AsBE(uint32 v) { uint32 coreIndex = PPCInterpreter_getCoreIndex(PPCInterpreter_getCurrentInstance()); - if (GX2::s_perCoreCBState[coreIndex].currentWritePtr == nullptr) + if (gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] == NULL) return; - *(uint32*)(GX2::s_perCoreCBState[coreIndex].currentWritePtr) = _swapEndianU32(v); - GX2::s_perCoreCBState[coreIndex].currentWritePtr++; - cemu_assert_debug(GX2::s_perCoreCBState[coreIndex].currentWritePtr <= (GX2::s_perCoreCBState[coreIndex].bufferPtr + GX2::s_perCoreCBState[coreIndex].bufferSizeInU32s)); + *(uint32*)(*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]) = _swapEndianU32(v); + (*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]) += 4; } void gx2WriteGather_submitU32AsLE(uint32 v) { uint32 coreIndex = PPCInterpreter_getCoreIndex(PPCInterpreter_getCurrentInstance()); - if (GX2::s_perCoreCBState[coreIndex].currentWritePtr == nullptr) + if (gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] == NULL) return; - *(uint32*)(GX2::s_perCoreCBState[coreIndex].currentWritePtr) = v; - GX2::s_perCoreCBState[coreIndex].currentWritePtr++; - cemu_assert_debug(GX2::s_perCoreCBState[coreIndex].currentWritePtr <= (GX2::s_perCoreCBState[coreIndex].bufferPtr + GX2::s_perCoreCBState[coreIndex].bufferSizeInU32s)); + *(uint32*)(*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]) = v; + (*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]) += 4; } void gx2WriteGather_submitU32AsLEArray(uint32* v, uint32 numValues) { uint32 coreIndex = PPCInterpreter_getCoreIndex(PPCInterpreter_getCurrentInstance()); - if (GX2::s_perCoreCBState[coreIndex].currentWritePtr == nullptr) + if (gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] == NULL) return; - memcpy_dwords(GX2::s_perCoreCBState[coreIndex].currentWritePtr, v, numValues); - GX2::s_perCoreCBState[coreIndex].currentWritePtr += numValues; - cemu_assert_debug(GX2::s_perCoreCBState[coreIndex].currentWritePtr <= (GX2::s_perCoreCBState[coreIndex].bufferPtr + GX2::s_perCoreCBState[coreIndex].bufferSizeInU32s)); + memcpy_dwords((*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]), v, numValues); + (*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]) += 4 * numValues; } namespace GX2 { + sint32 gx2WriteGatherCurrentMainCoreIndex = -1; + bool gx2WriteGatherInited = false; - struct GX2CommandState // mapped to PPC space since the GPU writes here + void GX2WriteGather_ResetToDefaultState() { - // command pool - MEMPTR commandPoolBase; - uint32 commandPoolSizeInU32s; - MEMPTR gpuCommandReadPtr; - // timestamp - uint64be lastSubmissionTime; - }; + gx2WriteGatherCurrentMainCoreIndex = -1; + gx2WriteGatherInited = false; + } - SysAllocator s_commandState; - GX2PerCoreCBState s_mainCoreLastCommandState; - bool s_cbBufferIsInternallyAllocated; - - void GX2Command_StartNewCommandBuffer(uint32 numU32s); - - // called from GX2Init. Allocates a 4MB memory chunk from which command buffers are suballocated from - void GX2Init_commandBufferPool(void* bufferBase, uint32 bufferSize) + void GX2Init_writeGather() // init write gather, make current core { - cemu_assert_debug(!s_commandState->commandPoolBase); // should not be allocated already - // setup command buffer pool. If not provided allocate a 4MB or custom size buffer - uint32 poolSize = bufferSize ? bufferSize : 0x400000; // 4MB (can be overwritten by custom GX2Init parameters?) - if (bufferBase) + if (gx2WriteGatherPipe.gxRingBuffer == NULL) + gx2WriteGatherPipe.gxRingBuffer = (uint8*)malloc(GX2_COMMAND_RING_BUFFER_SIZE); + if (gx2WriteGatherCurrentMainCoreIndex == sGX2MainCoreIndex) + return; // write gather already configured for same core + for (sint32 i = 0; i < PPC_CORE_COUNT; i++) { - s_commandState->commandPoolBase = (uint32be*)bufferBase; - s_cbBufferIsInternallyAllocated = false; - } - else - { - s_commandState->commandPoolBase = (uint32be*)coreinit::_weak_MEMAllocFromDefaultHeapEx(poolSize, 0x100); - s_cbBufferIsInternallyAllocated = true; - } - if (!s_commandState->commandPoolBase) - { - cemuLog_log(LogType::Force, "GX2: Failed to allocate command buffer pool"); - } - s_commandState->commandPoolSizeInU32s = poolSize / sizeof(uint32be); - s_commandState->gpuCommandReadPtr = s_commandState->commandPoolBase; - // init per-core command buffer state - for (uint32 i = 0; i < Espresso::CORE_COUNT; i++) - { - s_perCoreCBState[i].bufferPtr = nullptr; - s_perCoreCBState[i].bufferSizeInU32s = 0; - s_perCoreCBState[i].currentWritePtr = nullptr; - } - // start first command buffer for main core - GX2Command_StartNewCommandBuffer(0x100); - } - - void GX2Shutdown_commandBufferPool() - { - if (!s_commandState->commandPoolBase) - return; - if (s_cbBufferIsInternallyAllocated) - coreinit::_weak_MEMFreeToDefaultHeap(s_commandState->commandPoolBase.GetPtr()); - s_cbBufferIsInternallyAllocated = false; - s_commandState->commandPoolBase = nullptr; - s_commandState->commandPoolSizeInU32s = 0; - s_commandState->gpuCommandReadPtr = nullptr; - } - - // current position of where the GPU is reading from. Updated via a memory write command submitted to the GPU - uint32 GX2Command_GetPoolGPUReadIndex() - { - stdx::atomic_ref> _readPtr(s_commandState->gpuCommandReadPtr); - MEMPTR currentReadPtr = _readPtr.load(); - cemu_assert_debug(currentReadPtr); - return (uint32)(currentReadPtr.GetPtr() - s_commandState->commandPoolBase.GetPtr()); - } - - void GX2Command_WaitForNextBufferRetired() - { - uint64 retiredTimeStamp = GX2GetRetiredTimeStamp(); - retiredTimeStamp += 1; - // but cant be higher than the submission timestamp - stdx::atomic_ref _lastSubmissionTime(s_commandState->lastSubmissionTime); - uint64 submissionTimeStamp = _lastSubmissionTime.load(); - if (retiredTimeStamp > submissionTimeStamp) - retiredTimeStamp = submissionTimeStamp; - GX2WaitTimeStamp(retiredTimeStamp); - } - - void GX2Command_SetupCoreCommandBuffer(uint32be* buffer, uint32 sizeInU32s, bool isDisplayList) - { - uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - coreCBState.bufferPtr = buffer; - coreCBState.bufferSizeInU32s = sizeInU32s; - coreCBState.currentWritePtr = buffer; - coreCBState.isDisplayList = isDisplayList; - } - - void GX2Command_StartNewCommandBuffer(uint32 numU32s) - { - // On submission command buffers are padded to 32 byte alignment - // but nowhere is it guaranteed that internal command buffers have their size aligned to 32 byte (even on console, but testing is required) - // Thus the padding can write out of bounds but this seems to trigger only very rarely in partice. As a workaround we always pad the command buffer size to 32 bytes here - numU32s = (numU32s + 7) & ~0x7; - - uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - numU32s = std::max(numU32s, 0x100); - // grab space from command buffer pool and if necessary wait for it - uint32be* bufferPtr = nullptr; - uint32 bufferSizeInU32s = 0; - uint32 readIndex; - while (true) - { - // try to grab buffer data from first available spot: - // 1. At the current write location up to the end of the buffer (avoiding an overlap with the read location) - // 2. From the start of the buffer up to the read location - readIndex = GX2Command_GetPoolGPUReadIndex(); - uint32be* nextWritePos = coreCBState.bufferPtr ? coreCBState.bufferPtr + coreCBState.bufferSizeInU32s : s_commandState->commandPoolBase.GetPtr(); - uint32 writeIndex = nextWritePos - s_commandState->commandPoolBase; - uint32 poolSizeInU32s = s_commandState->commandPoolSizeInU32s; - // readIndex == writeIndex can mean either buffer full or buffer empty - // we could use GX2GetRetiredTimeStamp() == GX2GetLastSubmittedTimeStamp() to determine if the buffer is truly empty - // but this can have false negatives since the last submission timestamp is updated independently of the read index - // so instead we just avoid ever filling the buffer completely - cemu_assert_debug(readIndex < poolSizeInU32s); - cemu_assert_debug(writeIndex < poolSizeInU32s); - if (writeIndex < readIndex) + if (i == sGX2MainCoreIndex) { - // writeIndex has wrapped around - uint32 wordsAvailable = readIndex - writeIndex; - if (wordsAvailable > 0) - wordsAvailable--; // avoid writeIndex becoming equal to readIndex - if (wordsAvailable >= numU32s) - { - bufferPtr = s_commandState->commandPoolBase + writeIndex; - bufferSizeInU32s = wordsAvailable; - break; - } + gx2WriteGatherPipe.writeGatherPtrGxBuffer[i] = gx2WriteGatherPipe.gxRingBuffer; + gx2WriteGatherPipe.writeGatherPtrWrite[i] = &gx2WriteGatherPipe.writeGatherPtrGxBuffer[i]; } else { - uint32 wordsAvailable = poolSizeInU32s - writeIndex; - if (wordsAvailable > 0) - wordsAvailable--; // avoid writeIndex becoming equal to readIndex - if (wordsAvailable >= numU32s) - { - bufferPtr = nextWritePos; - bufferSizeInU32s = wordsAvailable; - break; - } - // not enough space at end of buffer, try to grab from the beginning of the buffer - wordsAvailable = readIndex; - if (wordsAvailable > 0) - wordsAvailable--; // avoid writeIndex becoming equal to readIndex - if (wordsAvailable >= numU32s) - { - bufferPtr = s_commandState->commandPoolBase; - bufferSizeInU32s = wordsAvailable; - break; - } + gx2WriteGatherPipe.writeGatherPtrGxBuffer[i] = NULL; + gx2WriteGatherPipe.writeGatherPtrWrite[i] = NULL; } - GX2Command_WaitForNextBufferRetired(); - } - cemu_assert_debug(bufferPtr); - bufferSizeInU32s = std::min(numU32s, 0x20000); // size cap -#ifdef CEMU_DEBUG_ASSERT - uint32 newWriteIndex = ((bufferPtr - s_commandState->commandPoolBase) + bufferSizeInU32s) % s_commandState->commandPoolSizeInU32s; - cemu_assert_debug(newWriteIndex != readIndex); -#endif - // setup buffer and make it the current write gather target - cemu_assert_debug(bufferPtr >= s_commandState->commandPoolBase && (bufferPtr + bufferSizeInU32s) <= s_commandState->commandPoolBase + s_commandState->commandPoolSizeInU32s); - GX2Command_SetupCoreCommandBuffer(bufferPtr, bufferSizeInU32s, false); - } - - void GX2Command_SubmitCommandBuffer(uint32be* buffer, uint32 sizeInU32s, MEMPTR* completionGPUReadPointer, bool triggerMarkerInterrupt) - { - uint32be cmd[10]; - uint32 cmdLen = 4; - cmd[0] = pm4HeaderType3(IT_INDIRECT_BUFFER_PRIV, 3); - cmd[1] = memory_virtualToPhysical(MEMPTR(buffer).GetMPTR()); - cmd[2] = 0x00000000; // address high bits - cmd[3] = sizeInU32s; - if (completionGPUReadPointer) - { - // append command to update completionGPUReadPointer after the GPU is done with the command buffer - cmd[4] = pm4HeaderType3(IT_MEM_WRITE, 4); - cmd[5] = memory_virtualToPhysical(MEMPTR(completionGPUReadPointer).GetMPTR()) | 2; - cmd[6] = 0x40000; - cmd[7] = MEMPTR(buffer + sizeInU32s).GetMPTR(); // value to write - cmd[8] = 0x00000000; - cmdLen = 9; - } - - betype submissionFlags{}; - if (!triggerMarkerInterrupt) - submissionFlags |= TCL::TCLSubmissionFlag::NO_MARKER_INTERRUPT; - submissionFlags |= TCL::TCLSubmissionFlag::USE_RETIRED_MARKER; - - TCL::TCLSubmitToRing(cmd, cmdLen, &submissionFlags, &s_commandState->lastSubmissionTime); - } - - void GX2Command_PadCurrentBuffer() - { - uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - if (!coreCBState.currentWritePtr) - return; - uint32 writeDistance = (uint32)(coreCBState.currentWritePtr - coreCBState.bufferPtr); - if ((writeDistance&7) != 0) - { - uint32 distanceToPad = 0x8 - (writeDistance & 0x7); - while (distanceToPad) - { - *coreCBState.currentWritePtr = pm4HeaderType2Filler(); - coreCBState.currentWritePtr++; - distanceToPad--; - } - } - } - - void GX2Command_Flush(uint32 numU32sForNextBuffer, bool triggerMarkerInterrupt) - { - uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - if (coreCBState.isDisplayList) - { - // display list - cemu_assert_debug((uint32)(coreCBState.currentWritePtr - coreCBState.bufferPtr) < coreCBState.bufferSizeInU32s); - cemuLog_logDebugOnce(LogType::Force, "GX2 flush called on display list"); - } - else - { - // command buffer - if (coreCBState.currentWritePtr != coreCBState.bufferPtr) - { - // pad the command buffer to 32 byte alignment - GX2Command_PadCurrentBuffer(); - // submit it to the GPU - uint32 bufferLength = (uint32)(coreCBState.currentWritePtr - coreCBState.bufferPtr); - cemu_assert_debug(bufferLength <= coreCBState.bufferSizeInU32s); - GX2Command_SubmitCommandBuffer(coreCBState.bufferPtr, bufferLength, &s_commandState->gpuCommandReadPtr, triggerMarkerInterrupt); - GX2Command_StartNewCommandBuffer(numU32sForNextBuffer); - } - else - { - // current buffer is empty so we dont need to queue it - if (numU32sForNextBuffer > s_commandState->commandPoolSizeInU32s) - GX2Command_StartNewCommandBuffer(numU32sForNextBuffer); - } - } - } - - void GX2Flush() - { - GX2Command_Flush(256, true); - } - - uint64 GX2GetLastSubmittedTimeStamp() - { - stdx::atomic_ref _lastSubmissionTime(s_commandState->lastSubmissionTime); - return _lastSubmissionTime.load(); - } - - uint64 GX2GetRetiredTimeStamp() - { - uint64be ts = 0; - TCL::TCLTimestamp(TCL::TCLTimestampId::TIMESTAMP_LAST_BUFFER_RETIRED, &ts); - return ts; - } - - bool GX2WaitTimeStamp(uint64 tsWait) - { - // handle GPU timeout here? But for now we timeout after 60 seconds - TCL::TCLWaitTimestamp(TCL::TCLTimestampId::TIMESTAMP_LAST_BUFFER_RETIRED, tsWait, Espresso::TIMER_CLOCK * 60); - return true; - } - - /* - * Guarantees that the requested amount of space is available on the current command buffer - * If the space is not available, the current command buffer is pushed to the GPU and a new one is allocated - */ - void GX2ReserveCmdSpace(uint32 reservedFreeSpaceInU32) - { - uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - if (coreCBState.currentWritePtr == nullptr) - return; - uint32 writeDistance = (uint32)(coreCBState.currentWritePtr - coreCBState.bufferPtr); - if (writeDistance + reservedFreeSpaceInU32 > coreCBState.bufferSizeInU32s) - { - GX2Command_Flush(reservedFreeSpaceInU32, true); + gx2WriteGatherPipe.displayListStart[i] = MPTR_NULL; + gx2WriteGatherPipe.writeGatherPtrDisplayList[i] = NULL; + gx2WriteGatherPipe.displayListMaxSize[i] = 0; } + gx2WriteGatherCurrentMainCoreIndex = sGX2MainCoreIndex; + gx2WriteGatherInited = true; } void GX2WriteGather_beginDisplayList(PPCInterpreter_t* hCPU, MPTR buffer, uint32 maxSize) { uint32 coreIndex = PPCInterpreter_getCoreIndex(hCPU); - if (coreIndex == sGX2MainCoreIndex) - { - GX2Command_PadCurrentBuffer(); - cemu_assert_debug(!s_perCoreCBState[coreIndex].isDisplayList); - s_mainCoreLastCommandState = s_perCoreCBState[coreIndex]; - } - GX2Command_SetupCoreCommandBuffer(MEMPTR(buffer), maxSize/4, true); + gx2WriteGatherPipe.displayListStart[coreIndex] = buffer; + gx2WriteGatherPipe.displayListMaxSize[coreIndex] = maxSize; + // set new write gather ptr + gx2WriteGatherPipe.writeGatherPtrDisplayList[coreIndex] = memory_getPointerFromVirtualOffset(gx2WriteGatherPipe.displayListStart[coreIndex]); + gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] = &gx2WriteGatherPipe.writeGatherPtrDisplayList[coreIndex]; } uint32 GX2WriteGather_getDisplayListWriteDistance(sint32 coreIndex) { - auto& coreCBState = s_perCoreCBState[coreIndex]; - cemu_assert_debug(coreCBState.isDisplayList); - if (coreCBState.currentWritePtr == nullptr) - return 0; - return (uint32)(coreCBState.currentWritePtr - coreCBState.bufferPtr) * 4; + return (uint32)(*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] - memory_getPointerFromVirtualOffset(gx2WriteGatherPipe.displayListStart[coreIndex])); + } + + uint32 GX2WriteGather_getFifoWriteDistance(uint32 coreIndex) + { + uint32 writeDistance = (uint32)(gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] - gx2WriteGatherPipe.gxRingBuffer); + return writeDistance; } uint32 GX2WriteGather_endDisplayList(PPCInterpreter_t* hCPU, MPTR buffer) { - uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - GX2Command_PadCurrentBuffer(); - uint32 finalWriteIndex = coreCBState.currentWritePtr - coreCBState.bufferPtr; - cemu_assert_debug(finalWriteIndex <= coreCBState.bufferSizeInU32s); - // if we are on the main GX2 core then restore the GPU command buffer - if (coreIndex == sGX2MainCoreIndex) + uint32 coreIndex = PPCInterpreter_getCoreIndex(hCPU); + if (gx2WriteGatherPipe.displayListStart[coreIndex] != MPTR_NULL) { - coreCBState = s_mainCoreLastCommandState; + uint32 currentWriteSize = GX2WriteGather_getDisplayListWriteDistance(coreIndex); + // pad to 32 byte + if (gx2WriteGatherPipe.displayListMaxSize[coreIndex] >= ((gx2WriteGatherPipe.displayListMaxSize[coreIndex] + 0x1F) & ~0x1F)) + { + while ((currentWriteSize & 0x1F) != 0) + { + gx2WriteGather_submitU32AsBE(pm4HeaderType2Filler()); + currentWriteSize += 4; + } + } + // get size of written data + currentWriteSize = GX2WriteGather_getDisplayListWriteDistance(coreIndex); + // disable current display list and restore write gather ptr + gx2WriteGatherPipe.displayListStart[coreIndex] = MPTR_NULL; + if (sGX2MainCoreIndex == coreIndex) + gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] = &gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex]; + else + gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] = NULL; + // return size of (written) display list + return currentWriteSize; } else { - coreCBState.bufferPtr = nullptr; - coreCBState.currentWritePtr = nullptr; - coreCBState.bufferSizeInU32s = 0; - coreCBState.isDisplayList = false; + // no active display list + // return a size of 0 + return 0; } - return finalWriteIndex * 4; } - bool GX2GetCurrentDisplayList(MEMPTR* displayListAddr, uint32be* displayListSize) + bool GX2GetCurrentDisplayList(betype* displayListAddr, uint32be* displayListSize) { uint32 coreIndex = coreinit::OSGetCoreId(); - auto& coreCBState = s_perCoreCBState[coreIndex]; - if (!coreCBState.isDisplayList) + if (gx2WriteGatherPipe.displayListStart[coreIndex] == MPTR_NULL) return false; + if (displayListAddr) - *displayListAddr = coreCBState.bufferPtr; + *displayListAddr = gx2WriteGatherPipe.displayListStart[coreIndex]; if (displayListSize) - *displayListSize = coreCBState.bufferSizeInU32s * sizeof(uint32be); + *displayListSize = gx2WriteGatherPipe.displayListMaxSize[coreIndex]; + return true; } - // returns true if we are writing to a display list bool GX2GetDisplayListWriteStatus() { + // returns true if we are writing to a display list uint32 coreIndex = coreinit::OSGetCoreId(); - return s_perCoreCBState[coreIndex].isDisplayList; + return gx2WriteGatherPipe.displayListStart[coreIndex] != MPTR_NULL; + } + + uint32 GX2WriteGather_getReadWriteDistance() + { + uint32 coreIndex = sGX2MainCoreIndex; + uint32 writeDistance = (uint32)(gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] + GX2_COMMAND_RING_BUFFER_SIZE - gxRingBufferReadPtr); + writeDistance %= GX2_COMMAND_RING_BUFFER_SIZE; + return writeDistance; + } + + void GX2WriteGather_checkAndInsertWrapAroundMark() + { + uint32 coreIndex = coreinit::OSGetCoreId(); + if (coreIndex != sGX2MainCoreIndex) // only if main gx2 core + return; + if (gx2WriteGatherPipe.displayListStart[coreIndex] != MPTR_NULL) + return; + uint32 writeDistance = GX2WriteGather_getFifoWriteDistance(coreIndex); + if (writeDistance >= (GX2_COMMAND_RING_BUFFER_SIZE * 3 / 5)) + { + gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_HLE_FIFO_WRAP_AROUND, 1)); + gx2WriteGather_submitU32AsBE(0); // empty word since we can't send commands with zero data words + gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] = gx2WriteGatherPipe.gxRingBuffer; + } } void GX2BeginDisplayList(MEMPTR displayListAddr, uint32 size) @@ -428,23 +204,28 @@ namespace GX2 memory_virtualToPhysical(addr), 0, // high address bits size / 4); + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); } void GX2DirectCallDisplayList(void* addr, uint32 size) { // this API submits to TCL directly and bypasses write-gatherer // its basically a way to manually submit a command buffer to the GPU - uint32 coreIndex = coreinit::OSGetCoreId(); - if (coreIndex != sGX2MainCoreIndex) - { - cemuLog_logDebugOnce(LogType::Force, "GX2DirectCallDisplayList() called on non-main GX2 core"); - } - if (!s_perCoreCBState[coreIndex].isDisplayList) - { - // make sure any preceeding commands are submitted first - GX2Command_Flush(0x100, false); - } - GX2Command_SubmitCommandBuffer(static_cast(addr), size / 4, nullptr, false); + // as such it also affects the submission and retire timestamps + + uint32 coreIndex = PPCInterpreter_getCoreIndex(PPCInterpreter_getCurrentInstance()); + cemu_assert_debug(coreIndex == sGX2MainCoreIndex); + coreIndex = sGX2MainCoreIndex; // always submit to main queue which is owned by GX2 main core (TCLSubmitToRing does not need this workaround) + + uint32be* cmdStream = (uint32be*)(gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex]); + cmdStream[0] = pm4HeaderType3(IT_INDIRECT_BUFFER_PRIV, 3); + cmdStream[1] = memory_virtualToPhysical(MEMPTR(addr).GetMPTR()); + cmdStream[2] = 0; + cmdStream[3] = size / 4; + gx2WriteGatherPipe.writeGatherPtrGxBuffer[coreIndex] += 16; + + // update submission timestamp and retired timestamp + _GX2SubmitToTCL(); } void GX2CopyDisplayList(MEMPTR addr, uint32 size) @@ -507,12 +288,6 @@ namespace GX2 void GX2CommandInit() { - cafeExportRegister("gx2", GX2Flush, LogType::GX2); - - cafeExportRegister("gx2", GX2GetLastSubmittedTimeStamp, LogType::GX2); - cafeExportRegister("gx2", GX2GetRetiredTimeStamp, LogType::GX2); - cafeExportRegister("gx2", GX2WaitTimeStamp, LogType::GX2); - cafeExportRegister("gx2", GX2BeginDisplayList, LogType::GX2); cafeExportRegister("gx2", GX2BeginDisplayListEx, LogType::GX2); cafeExportRegister("gx2", GX2EndDisplayList, LogType::GX2); @@ -520,6 +295,7 @@ namespace GX2 cafeExportRegister("gx2", GX2GetCurrentDisplayList, LogType::GX2); cafeExportRegister("gx2", GX2GetDisplayListWriteStatus, LogType::GX2); + cafeExportRegister("gx2", GX2CallDisplayList, LogType::GX2); cafeExportRegister("gx2", GX2DirectCallDisplayList, LogType::GX2); cafeExportRegister("gx2", GX2CopyDisplayList, LogType::GX2); @@ -529,10 +305,7 @@ namespace GX2 void GX2CommandResetToDefaultState() { - s_commandState->commandPoolBase = nullptr; - s_commandState->commandPoolSizeInU32s = 0; - s_commandState->gpuCommandReadPtr = nullptr; - s_cbBufferIsInternallyAllocated = false; + GX2WriteGather_ResetToDefaultState(); } } diff --git a/src/Cafe/OS/libs/gx2/GX2_Command.h b/src/Cafe/OS/libs/gx2/GX2_Command.h index 00f5d427..51c04928 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Command.h +++ b/src/Cafe/OS/libs/gx2/GX2_Command.h @@ -2,19 +2,21 @@ #include "Cafe/HW/Latte/ISA/LatteReg.h" #include "Cafe/HW/Espresso/Const.h" -namespace GX2 +struct GX2WriteGatherPipeState { - struct GX2PerCoreCBState - { - uint32be* bufferPtr; - uint32 bufferSizeInU32s; - uint32be* currentWritePtr; - bool isDisplayList; - }; - - extern GX2PerCoreCBState s_perCoreCBState[Espresso::CORE_COUNT]; + uint8* gxRingBuffer; + // each core has it's own write gatherer and display list state (writing) + uint8* writeGatherPtrGxBuffer[Espresso::CORE_COUNT]; + uint8** writeGatherPtrWrite[Espresso::CORE_COUNT]; + uint8* writeGatherPtrDisplayList[Espresso::CORE_COUNT]; + MPTR displayListStart[Espresso::CORE_COUNT]; + uint32 displayListMaxSize[Espresso::CORE_COUNT]; }; +extern GX2WriteGatherPipeState gx2WriteGatherPipe; + +void GX2ReserveCmdSpace(uint32 reservedFreeSpaceInU32); // move to GX2 namespace eventually + void gx2WriteGather_submitU32AsBE(uint32 v); void gx2WriteGather_submitU32AsLE(uint32 v); void gx2WriteGather_submitU32AsLEArray(uint32* v, uint32 numValues); @@ -25,8 +27,7 @@ uint32 PPCInterpreter_getCurrentCoreIndex(); template inline void gx2WriteGather_submit_(uint32 coreIndex, uint32be* writePtr) { - GX2::s_perCoreCBState[coreIndex].currentWritePtr = writePtr; - cemu_assert_debug(GX2::s_perCoreCBState[coreIndex].currentWritePtr <= (GX2::s_perCoreCBState[coreIndex].bufferPtr + GX2::s_perCoreCBState[coreIndex].bufferSizeInU32s)); + (*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]) = (uint8*)writePtr; } template @@ -74,23 +75,17 @@ template inline void gx2WriteGather_submit(Targs... args) { uint32 coreIndex = PPCInterpreter_getCurrentCoreIndex(); - if (GX2::s_perCoreCBState[coreIndex].currentWritePtr == nullptr) - { - cemu_assert_suspicious(); // writing to command buffer without valid write pointer? + if (gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex] == nullptr) return; - } - uint32be* writePtr = GX2::s_perCoreCBState[coreIndex].currentWritePtr; + + uint32be* writePtr = (uint32be*)(*gx2WriteGatherPipe.writeGatherPtrWrite[coreIndex]); gx2WriteGather_submit_(coreIndex, writePtr, std::forward(args)...); } namespace GX2 { - void GX2Command_Flush(uint32 numU32sForNextBuffer, bool triggerMarkerInterrupt = true); - void GX2ReserveCmdSpace(uint32 reservedFreeSpaceInU32); - - uint64 GX2GetLastSubmittedTimeStamp(); - uint64 GX2GetRetiredTimeStamp(); - bool GX2WaitTimeStamp(uint64 tsWait); + uint32 GX2WriteGather_getReadWriteDistance(); + void GX2WriteGather_checkAndInsertWrapAroundMark(); void GX2BeginDisplayList(MEMPTR displayListAddr, uint32 size); void GX2BeginDisplayListEx(MEMPTR displayListAddr, uint32 size, bool profiling); @@ -101,8 +96,7 @@ namespace GX2 bool GX2GetDisplayListWriteStatus(); + void GX2Init_writeGather(); void GX2CommandInit(); - void GX2Init_commandBufferPool(void* bufferBase, uint32 bufferSize); - void GX2Shutdown_commandBufferPool(); void GX2CommandResetToDefaultState(); } \ No newline at end of file diff --git a/src/Cafe/OS/libs/gx2/GX2_ContextState.cpp b/src/Cafe/OS/libs/gx2/GX2_ContextState.cpp index fb631a11..cf150b47 100644 --- a/src/Cafe/OS/libs/gx2/GX2_ContextState.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_ContextState.cpp @@ -168,7 +168,7 @@ uint32 _GX2Context_CalcStateSize() void _GX2Context_CreateLoadDL() { - GX2::GX2ReserveCmdSpace(3); + GX2ReserveCmdSpace(3); gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_CONTEXT_CONTROL, 2)); gx2WriteGather_submitU32AsBE(0x80000077); gx2WriteGather_submitU32AsBE(0x80000077); @@ -176,7 +176,7 @@ void _GX2Context_CreateLoadDL() void _GX2Context_WriteCmdDisableStateShadowing() { - GX2::GX2ReserveCmdSpace(3); + GX2ReserveCmdSpace(3); gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_CONTEXT_CONTROL, 2)); gx2WriteGather_submitU32AsBE(0x80000000); gx2WriteGather_submitU32AsBE(0x80000000); @@ -184,7 +184,7 @@ void _GX2Context_WriteCmdDisableStateShadowing() void _GX2Context_cmdLoad(void* gx2ukn, uint32 pm4Header, MPTR physAddrRegArea, uint32 waitForIdle, uint32 numRegOffsetEntries, GX2RegLoadPktEntry_t* regOffsetEntries) { - GX2::GX2ReserveCmdSpace(3 + numRegOffsetEntries*2); + GX2ReserveCmdSpace(3 + numRegOffsetEntries*2); gx2WriteGather_submitU32AsBE(pm4Header); gx2WriteGather_submitU32AsBE(physAddrRegArea); gx2WriteGather_submitU32AsBE(waitForIdle); @@ -199,6 +199,7 @@ void _GX2Context_cmdLoad(void* gx2ukn, uint32 pm4Header, MPTR physAddrRegArea, u void _GX2Context_WriteCmdRestoreState(GX2ContextState_t* gx2ContextState, uint32 ukn) { + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); MPTR physAddrContextState = memory_virtualToPhysical(memory_getVirtualOffsetFromPointer(gx2ContextState)); _GX2Context_CreateLoadDL(); __cmdStateLoad(NULL, IT_LOAD_CONFIG_REG, gx2ContextState->hwContext.areaConfigReg, 0x80000000, configReg_loadPktEntries); @@ -211,7 +212,7 @@ void _GX2Context_WriteCmdRestoreState(GX2ContextState_t* gx2ContextState, uint32 void GX2SetDefaultState() { - GX2::GX2ReserveCmdSpace(0x100); + GX2ReserveCmdSpace(0x100); Latte::LATTE_PA_CL_VTE_CNTL reg{}; reg.set_VPORT_X_OFFSET_ENA(true).set_VPORT_X_SCALE_ENA(true); @@ -375,6 +376,7 @@ void gx2Export_GX2SetContextState(PPCInterpreter_t* hCPU) osLib_returnFromFunction(hCPU, 0); } + void gx2Export_GX2GetContextStateDisplayList(PPCInterpreter_t* hCPU) { cemuLog_log(LogType::GX2, "GX2GetContextStateDisplayList(0x{:08x}, 0x{:08x}, 0x{:08x})", hCPU->gpr[3], hCPU->gpr[4], hCPU->gpr[5]); diff --git a/src/Cafe/OS/libs/gx2/GX2_Draw.cpp b/src/Cafe/OS/libs/gx2/GX2_Draw.cpp index 958978e1..053b787b 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Draw.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Draw.cpp @@ -52,6 +52,7 @@ namespace GX2 0, count, 0); + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); } void GX2DrawIndexedEx2(GX2PrimitiveMode2 primitiveMode, uint32 count, GX2IndexType indexType, void* indexData, uint32 baseVertex, uint32 numInstances, uint32 baseInstance) @@ -84,6 +85,7 @@ namespace GX2 pm4HeaderType3(IT_SET_CTL_CONST, 2), 1, 0 // baseInstance ); + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); } void GX2DrawEx(GX2PrimitiveMode2 primitiveMode, uint32 count, uint32 baseVertex, uint32 numInstances) @@ -107,6 +109,7 @@ namespace GX2 count, 0 // DRAW_INITIATOR ); + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); } void GX2DrawIndexedImmediateEx(GX2PrimitiveMode2 primitiveMode, uint32 count, GX2IndexType indexType, void* indexData, uint32 baseVertex, uint32 numInstances) @@ -174,6 +177,7 @@ namespace GX2 } } + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); } struct GX2DispatchComputeParam diff --git a/src/Cafe/OS/libs/gx2/GX2_Event.cpp b/src/Cafe/OS/libs/gx2/GX2_Event.cpp index 645f0a79..9748e20b 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Event.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Event.cpp @@ -16,6 +16,18 @@ namespace GX2 SysAllocator g_vsyncThreadQueue; SysAllocator g_flipThreadQueue; + SysAllocator s_updateRetirementEvent; + std::atomic s_lastRetirementTimestamp = 0; + + // called from GPU code when a command buffer is retired + void __GX2NotifyNewRetirementTimestamp(uint64 tsRetire) + { + __OSLockScheduler(); + s_lastRetirementTimestamp = tsRetire; + coreinit::OSSignalEventAllInternal(s_updateRetirementEvent.GetPtr()); + __OSUnlockScheduler(); + } + void GX2SetGPUFence(uint32be* fencePtr, uint32 mask, uint32 compareOp, uint32 compareValue) { GX2ReserveCmdSpace(7); @@ -198,6 +210,16 @@ namespace GX2 osLib_returnFromFunction(hCPU, 0); } + uint64 GX2GetLastSubmittedTimeStamp() + { + return LatteGPUState.lastSubmittedCommandBufferTimestamp.load(); + } + + uint64 GX2GetRetiredTimeStamp() + { + return s_lastRetirementTimestamp; + } + void GX2WaitForVsync() { __OSLockScheduler(); @@ -214,6 +236,19 @@ namespace GX2 __OSUnlockScheduler(); } + bool GX2WaitTimeStamp(uint64 tsWait) + { + __OSLockScheduler(); + while (tsWait > s_lastRetirementTimestamp) + { + // GPU hasn't caught up yet + coreinit::OSWaitEventInternal(s_updateRetirementEvent.GetPtr()); + } + __OSUnlockScheduler(); + // return true to indicate no timeout + return true; + } + void GX2DrawDone() { // optional force full sync (texture readback and occlusion queries) @@ -228,10 +263,13 @@ namespace GX2 gx2WriteGather_submitU32AsBE(0x00000000); // unused } // flush pipeline - GX2Command_Flush(0x100, true); + if (_GX2GetUnflushedBytes(coreinit::OSGetCoreId()) > 0) + _GX2SubmitToTCL(); uint64 ts = GX2GetLastSubmittedTimeStamp(); GX2WaitTimeStamp(ts); + + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); } void GX2Init_event() @@ -256,19 +294,25 @@ namespace GX2 cafeExportRegister("gx2", GX2SetEventCallback, LogType::GX2); cafeExportRegister("gx2", GX2GetEventCallback, LogType::GX2); + cafeExportRegister("gx2", GX2GetLastSubmittedTimeStamp, LogType::GX2); + cafeExportRegister("gx2", GX2GetRetiredTimeStamp, LogType::GX2); + cafeExportRegister("gx2", GX2WaitForVsync, LogType::GX2); cafeExportRegister("gx2", GX2WaitForFlip, LogType::GX2); + cafeExportRegister("gx2", GX2WaitTimeStamp, LogType::GX2); cafeExportRegister("gx2", GX2DrawDone, LogType::GX2); coreinit::OSInitThreadQueue(g_vsyncThreadQueue.GetPtr()); coreinit::OSInitThreadQueue(g_flipThreadQueue.GetPtr()); + coreinit::OSInitEvent(s_updateRetirementEvent, coreinit::OSEvent::EVENT_STATE::STATE_NOT_SIGNALED, coreinit::OSEvent::EVENT_MODE::MODE_AUTO); coreinit::OSInitSemaphore(s_eventCbQueueSemaphore, 0); } void GX2EventResetToDefaultState() { s_callbackThreadLaunched = false; + s_lastRetirementTimestamp = 0; for(auto& it : s_eventCallback) { it.callbackFuncPtr = nullptr; diff --git a/src/Cafe/OS/libs/gx2/GX2_Misc.cpp b/src/Cafe/OS/libs/gx2/GX2_Misc.cpp index e7830cd8..3c7ea3f9 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Misc.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Misc.cpp @@ -81,68 +81,19 @@ namespace GX2 void _test_AddrLib(); - using GX2InitArg = uint32; - enum class GX2InitArgId : GX2InitArg - { - EndOfArgs = 0, - CommandPoolBase = 1, - CommandPoolSize = 2, - UknArg7 = 7, - UknArg8 = 8, - UknArg9 = 9, - UknArg11 = 11, - }; - - void GX2Init(betype* initArgStream) + void GX2Init(void* initSettings) { if (LatteGPUState.gx2InitCalled) { cemuLog_logDebug(LogType::Force, "GX2Init() called while already initialized"); return; } - // parse init params from the stream - MEMPTR commandPoolBase = nullptr; - uint32 commandPoolSize = 0; - if (initArgStream) - { - while (true) - { - GX2InitArgId paramId = static_cast((GX2InitArg)*initArgStream); - initArgStream++; - if (paramId == GX2InitArgId::EndOfArgs) - { - break; - } - else if (paramId == GX2InitArgId::CommandPoolBase) - { - commandPoolBase = MEMPTR(*initArgStream); - initArgStream++; - } - else if (paramId == GX2InitArgId::CommandPoolSize) - { - commandPoolSize = *initArgStream; - initArgStream++; - } - else if (paramId == GX2InitArgId::UknArg7 || - paramId == GX2InitArgId::UknArg8 || - paramId == GX2InitArgId::UknArg9 || - paramId == GX2InitArgId::UknArg11) - { - initArgStream++; - } - else - { - cemuLog_log(LogType::Force, "GX2Init: Unsupported init arg {}", (uint32)paramId); - } - } - } - // init main core uint32 coreIndex = coreinit::OSGetCoreId(); cemuLog_log(LogType::GX2, "GX2Init() on core {} by thread 0x{:08x}", coreIndex, MEMPTR(coreinit::OSGetCurrentThread()).GetMPTR()); sGX2MainCoreIndex = coreIndex; // init submodules GX2::GX2Init_event(); - GX2::GX2Init_commandBufferPool(commandPoolBase, commandPoolSize); + GX2::GX2Init_writeGather(); // init shared area if (LatteGPUState.sharedAreaAddr == MPTR_NULL) { @@ -161,21 +112,6 @@ namespace GX2 _test_AddrLib(); } - void GX2Shutdown() - { - if (!LatteGPUState.gx2InitCalled) - { - cemuLog_logDebug(LogType::Force, "GX2Shutdown() called while not initialized"); - return; - } - LatteGPUState.gx2InitCalled--; - if (LatteGPUState.gx2InitCalled != 0) - return; - GX2DrawDone(); - GX2Shutdown_commandBufferPool(); - cemuLog_log(LogType::Force, "GX2 shutdown"); - } - void _GX2DriverReset() { LatteGPUState.gx2InitCalled = 0; @@ -301,7 +237,6 @@ namespace GX2 void GX2MiscInit() { cafeExportRegister("gx2", GX2Init, LogType::GX2); - cafeExportRegister("gx2", GX2Shutdown, LogType::GX2); cafeExportRegister("gx2", GX2GetMainCoreId, LogType::GX2); cafeExportRegister("gx2", GX2ResetGPU, LogType::GX2); diff --git a/src/Cafe/OS/libs/gx2/GX2_RenderTarget.cpp b/src/Cafe/OS/libs/gx2/GX2_RenderTarget.cpp index 8abc3613..2a257a67 100644 --- a/src/Cafe/OS/libs/gx2/GX2_RenderTarget.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_RenderTarget.cpp @@ -135,7 +135,7 @@ void gx2Export_GX2InitDepthBufferRegs(PPCInterpreter_t* hCPU) void gx2Export_GX2SetColorBuffer(PPCInterpreter_t* hCPU) { cemuLog_log(LogType::GX2, "GX2SetColorBuffer(0x{:08x}, {})", hCPU->gpr[3], hCPU->gpr[4]); - GX2::GX2ReserveCmdSpace(20); + GX2ReserveCmdSpace(20); GX2ColorBuffer* colorBufferBE = (GX2ColorBuffer*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); @@ -198,13 +198,15 @@ void gx2Export_GX2SetColorBuffer(PPCInterpreter_t* hCPU) mmCB_COLOR0_INFO - 0xA000 + hCPU->gpr[4], colorBufferBE->reg_info); + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); + osLib_returnFromFunction(hCPU, 0); } void gx2Export_GX2SetDepthBuffer(PPCInterpreter_t* hCPU) { cemuLog_log(LogType::GX2, "GX2SetDepthBuffer(0x{:08x})", hCPU->gpr[3]); - GX2::GX2ReserveCmdSpace(20); + GX2ReserveCmdSpace(20); GX2DepthBuffer* depthBufferBE = (GX2DepthBuffer*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); @@ -262,6 +264,8 @@ void gx2Export_GX2SetDepthBuffer(PPCInterpreter_t* hCPU) gx2WriteGather_submitU32AsBE(mmDB_DEPTH_VIEW - 0xA000); gx2WriteGather_submitU32AsBE(db_view); + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); + osLib_returnFromFunction(hCPU, 0); } @@ -277,7 +281,7 @@ void gx2Export_GX2MarkScanBufferCopied(PPCInterpreter_t* hCPU) uint32 scanTarget = hCPU->gpr[3]; if( scanTarget == GX2_SCAN_TARGET_TV ) { - GX2::GX2ReserveCmdSpace(10); + GX2ReserveCmdSpace(10); uint32 physAddr = (MEMORY_TILINGAPERTURE_AREA_ADDR+0x200000); diff --git a/src/Cafe/OS/libs/gx2/GX2_Shader.cpp b/src/Cafe/OS/libs/gx2/GX2_Shader.cpp index 20a773e0..7a153737 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Shader.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Shader.cpp @@ -303,27 +303,7 @@ namespace GX2 void GX2SetVertexShader(GX2VertexShader* vertexShader) { - uint32 numOutputIds = vertexShader->regs.vsOutIdTableSize; - numOutputIds = std::min(numOutputIds, 0xA); - uint32 vsSemanticTableSize = vertexShader->regs.semanticTableSize; - - uint32 reserveSize = 31; - if (vertexShader->shaderMode == GX2_SHADER_MODE::GEOMETRY_SHADER) - { - reserveSize += 7; - } - else - { - reserveSize += 18; - reserveSize += numOutputIds; - if (vertexShader->usesStreamOut != 0) - reserveSize += 2+12; - } - if (vsSemanticTableSize > 0) - { - reserveSize += 5 + vsSemanticTableSize; - } - GX2ReserveCmdSpace(reserveSize); + GX2ReserveCmdSpace(100); MPTR shaderProgramAddr; uint32 shaderProgramSize; @@ -381,6 +361,8 @@ namespace GX2 cemu_assert_debug(vertexShader->regs.SPI_VS_OUT_CONFIG.value().get_VS_PER_COMPONENT() == false); // not handled on the GPU side + uint32 numOutputIds = vertexShader->regs.vsOutIdTableSize; + numOutputIds = std::min(numOutputIds, 0xA); gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_SET_CONTEXT_REG, 1+numOutputIds)); gx2WriteGather_submitU32AsBE(Latte::REGADDR::SPI_VS_OUT_ID_0-0xA000); for(uint32 i=0; iregs.semanticTableSize; if (vsSemanticTableSize > 0) { gx2WriteGather_submit( diff --git a/src/Cafe/OS/libs/gx2/GX2_State.cpp b/src/Cafe/OS/libs/gx2/GX2_State.cpp index 795ff527..d9c0420f 100644 --- a/src/Cafe/OS/libs/gx2/GX2_State.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_State.cpp @@ -213,6 +213,7 @@ namespace GX2 void GX2SetViewportReg(GX2ViewportReg* viewportReg) { + GX2::GX2WriteGather_checkAndInsertWrapAroundMark(); GX2ReserveCmdSpace(2 + 6); gx2WriteGather_submit(pm4HeaderType3(IT_SET_CONTEXT_REG, 1 + 6), diff --git a/src/Cafe/OS/libs/gx2/GX2_Surface_Copy.cpp b/src/Cafe/OS/libs/gx2/GX2_Surface_Copy.cpp index ce85048e..fe785d61 100644 --- a/src/Cafe/OS/libs/gx2/GX2_Surface_Copy.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_Surface_Copy.cpp @@ -264,7 +264,7 @@ void gx2Surface_GX2CopySurface(GX2Surface* srcSurface, uint32 srcMip, uint32 src // send copy command to GPU if( srcHwTileMode > 0 && srcHwTileMode < 16 && dstHwTileMode > 0 && dstHwTileMode < 16 || requestGPURAMCopy ) { - GX2::GX2ReserveCmdSpace(1+13*2); + GX2ReserveCmdSpace(1+13*2); gx2WriteGather_submit(pm4HeaderType3(IT_HLE_COPY_SURFACE_NEW, 13*2), // src @@ -540,7 +540,7 @@ void gx2Export_GX2ResolveAAColorBuffer(PPCInterpreter_t* hCPU) uint32 dstDepth = std::max(surfOutDst.depth, 1); // send copy command to GPU - GX2::GX2ReserveCmdSpace(1 + 13 * 2); + GX2ReserveCmdSpace(1 + 13 * 2); gx2WriteGather_submit(pm4HeaderType3(IT_HLE_COPY_SURFACE_NEW, 13 * 2), // src (uint32)srcSurface->imagePtr, @@ -619,7 +619,7 @@ void gx2Export_GX2ConvertDepthBufferToTextureSurface(PPCInterpreter_t* hCPU) sint32 srcMip = 0; uint32 numSlices = std::max(_swapEndianU32(depthBuffer->viewNumSlices), 1); - GX2::GX2ReserveCmdSpace((1 + 13 * 2) * numSlices); + GX2ReserveCmdSpace((1 + 13 * 2) * numSlices); for (uint32 subSliceIndex = 0; subSliceIndex < numSlices; subSliceIndex++) { // send copy command to GPU diff --git a/src/Cafe/OS/libs/gx2/GX2_shader_legacy.cpp b/src/Cafe/OS/libs/gx2/GX2_shader_legacy.cpp index d91a8529..b0a5d2fa 100644 --- a/src/Cafe/OS/libs/gx2/GX2_shader_legacy.cpp +++ b/src/Cafe/OS/libs/gx2/GX2_shader_legacy.cpp @@ -11,14 +11,9 @@ void gx2Export_GX2SetPixelShader(PPCInterpreter_t* hCPU) { cemuLog_log(LogType::GX2, "GX2SetPixelShader(0x{:08x})", hCPU->gpr[3]); + GX2ReserveCmdSpace(100); + GX2PixelShader_t* pixelShader = (GX2PixelShader_t*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); - - uint32 numInputs = _swapEndianU32(pixelShader->regs[4]); - if( numInputs > 0x20 ) - numInputs = 0x20; - - GX2::GX2ReserveCmdSpace(26 + numInputs); - MPTR shaderProgramAddr; uint32 shaderProgramSize; @@ -49,6 +44,9 @@ void gx2Export_GX2SetPixelShader(PPCInterpreter_t* hCPU) _swapEndianU32(pixelShader->regs[2]), _swapEndianU32(pixelShader->regs[3])); // setup pixel shader extended inputs control + uint32 numInputs = _swapEndianU32(pixelShader->regs[4]); + if( numInputs > 0x20 ) + numInputs = 0x20; gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_SET_CONTEXT_REG, 1+numInputs)); gx2WriteGather_submitU32AsBE(mmSPI_PS_INPUT_CNTL_0-0xA000); for(uint32 i=0; igpr[3]); + GX2ReserveCmdSpace(100); GX2GeometryShader_t* geometryShader = (GX2GeometryShader_t*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); - uint32 numOutputIds = _swapEndianU32(geometryShader->regs[7]); - numOutputIds = std::min(numOutputIds, 0xA); - uint32 reserveSize = 38; // 38 fixed parameters - if (numOutputIds != 0) - reserveSize += 2 + numOutputIds; - if( _swapEndianU32(geometryShader->useStreamout) != 0 ) - reserveSize += 2 + 12; - - GX2::GX2ReserveCmdSpace(reserveSize); MPTR shaderProgramAddr; uint32 shaderProgramSize; @@ -138,7 +128,6 @@ void gx2Export_GX2SetGeometryShader(PPCInterpreter_t* hCPU) if( _swapEndianU32(geometryShader->useStreamout) != 0 ) { - // todo - IT_EVENT_WRITE packet here // stride 0 gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_SET_CONTEXT_REG, 2)); gx2WriteGather_submitU32AsBE(mmVGT_STRMOUT_VTX_STRIDE_0-0xA000); @@ -191,6 +180,8 @@ void gx2Export_GX2SetGeometryShader(PPCInterpreter_t* hCPU) gx2WriteGather_submitU32AsBE(_swapEndianU32(geometryShader->regs[3])); // GS outputs + uint32 numOutputIds = _swapEndianU32(geometryShader->regs[7]); + numOutputIds = std::min(numOutputIds, 0xA); if( numOutputIds != 0 ) { gx2WriteGather_submitU32AsBE(pm4HeaderType3(IT_SET_CONTEXT_REG, 1+numOutputIds)); @@ -263,7 +254,8 @@ void gx2Export_GX2SetComputeShader(PPCInterpreter_t* hCPU) shaderPtr = computeShader->rBuffer.GetVirtualAddr(); shaderSize = computeShader->rBuffer.GetSize(); } - GX2::GX2ReserveCmdSpace(0x11); + + GX2ReserveCmdSpace(0x11); gx2WriteGather_submit(pm4HeaderType3(IT_SET_CONTEXT_REG, 6), mmSQ_PGM_START_ES-0xA000, @@ -280,7 +272,7 @@ void gx2Export_GX2SetComputeShader(PPCInterpreter_t* hCPU) void _GX2SubmitUniformBlock(uint32 registerBase, uint32 index, MPTR virtualAddress, uint32 size) { - GX2::GX2ReserveCmdSpace(9); + GX2ReserveCmdSpace(9); gx2WriteGather_submit(pm4HeaderType3(IT_SET_RESOURCE, 8), registerBase + index * 7, memory_virtualToPhysical(virtualAddress), @@ -315,7 +307,7 @@ void gx2Export_GX2SetGeometryUniformBlock(PPCInterpreter_t* hCPU) void gx2Export_GX2RSetVertexUniformBlock(PPCInterpreter_t* hCPU) { - GX2::GX2ReserveCmdSpace(9); + GX2ReserveCmdSpace(9); GX2RBuffer* bufferPtr = (GX2RBuffer*)memory_getPointerFromVirtualOffset(hCPU->gpr[3]); uint32 index = hCPU->gpr[4]; @@ -328,7 +320,7 @@ void gx2Export_GX2RSetVertexUniformBlock(PPCInterpreter_t* hCPU) void gx2Export_GX2SetShaderModeEx(PPCInterpreter_t* hCPU) { - GX2::GX2ReserveCmdSpace(8+4); + GX2ReserveCmdSpace(8+4); uint32 mode = hCPU->gpr[3]; uint32 sqConfig = hCPU->gpr[3] == 0 ? 4 : 0; diff --git a/src/Cafe/OS/libs/nn_olv/nn_olv_DownloadCommunityTypes.cpp b/src/Cafe/OS/libs/nn_olv/nn_olv_DownloadCommunityTypes.cpp index 6e7632e9..db1885af 100644 --- a/src/Cafe/OS/libs/nn_olv/nn_olv_DownloadCommunityTypes.cpp +++ b/src/Cafe/OS/libs/nn_olv/nn_olv_DownloadCommunityTypes.cpp @@ -145,8 +145,7 @@ namespace nn if (name.size() != 0) { - auto name_utf16 = StringHelpers::FromUtf8(name); - name_utf16.resize(std::min(name_utf16.size(), 128)); + auto name_utf16 = StringHelpers::FromUtf8(name).substr(0, 128); if (name_utf16.size() != 0) { for (int i = 0; i < name_utf16.size(); i++) @@ -161,8 +160,7 @@ namespace nn if (description.size() != 0) { - auto description_utf16 = StringHelpers::FromUtf8(description); - description_utf16.resize(std::min(description_utf16.size(), 256)); + auto description_utf16 = StringHelpers::FromUtf8(description).substr(0, 256); if (description_utf16.size() != 0) { for (int i = 0; i < description_utf16.size(); i++) @@ -208,8 +206,7 @@ namespace nn if (screen_name.size() != 0) { - auto screen_name_utf16 = StringHelpers::FromUtf8(screen_name); - screen_name_utf16.resize(std::min(screen_name_utf16.size(), 32)); + auto screen_name_utf16 = StringHelpers::FromUtf8(screen_name).substr(0, 32); if (screen_name_utf16.size() != 0) { for (int i = 0; i < screen_name_utf16.size(); i++) diff --git a/src/Cafe/OS/libs/nn_olv/nn_olv_UploadCommunityTypes.cpp b/src/Cafe/OS/libs/nn_olv/nn_olv_UploadCommunityTypes.cpp index 21952ceb..6f3c43b9 100644 --- a/src/Cafe/OS/libs/nn_olv/nn_olv_UploadCommunityTypes.cpp +++ b/src/Cafe/OS/libs/nn_olv/nn_olv_UploadCommunityTypes.cpp @@ -250,8 +250,7 @@ namespace nn if (name.size() != 0) { - auto name_utf16 = StringHelpers::FromUtf8(name); - name_utf16.resize(std::min(name_utf16.size(), 128)); + auto name_utf16 = StringHelpers::FromUtf8(name).substr(0, 128); if (name_utf16.size() != 0) { for (int i = 0; i < name_utf16.size(); i++) @@ -266,8 +265,7 @@ namespace nn if (description.size() != 0) { - auto description_utf16 = StringHelpers::FromUtf8(description); - description_utf16.resize(std::min(description_utf16.size(), 256)); + auto description_utf16 = StringHelpers::FromUtf8(description).substr(0, 256); if (description_utf16.size() != 0) { for (int i = 0; i < description_utf16.size(); i++) diff --git a/src/Cafe/OS/libs/nn_olv/nn_olv_UploadFavoriteTypes.cpp b/src/Cafe/OS/libs/nn_olv/nn_olv_UploadFavoriteTypes.cpp index 912e7a11..1e2d40ab 100644 --- a/src/Cafe/OS/libs/nn_olv/nn_olv_UploadFavoriteTypes.cpp +++ b/src/Cafe/OS/libs/nn_olv/nn_olv_UploadFavoriteTypes.cpp @@ -1,6 +1,5 @@ #include "nn_olv_UploadFavoriteTypes.h" #include -#include namespace nn { @@ -116,8 +115,7 @@ namespace nn if (name.size() != 0) { - auto name_utf16 = StringHelpers::FromUtf8(name); - name_utf16.resize(std::min(name_utf16.size(), 128)); + auto name_utf16 = StringHelpers::FromUtf8(name).substr(0, 128); if (name_utf16.size() != 0) { for (int i = 0; i < name_utf16.size(); i++) @@ -132,8 +130,7 @@ namespace nn if (description.size() != 0) { - auto description_utf16 = StringHelpers::FromUtf8(description); - description_utf16.resize(std::min(description_utf16.size(), 256)); + auto description_utf16 = StringHelpers::FromUtf8(description).substr(0, 256); if (description_utf16.size() != 0) { for (int i = 0; i < description_utf16.size(); i++) diff --git a/src/Cafe/OS/libs/nsyshid/Backend.h b/src/Cafe/OS/libs/nsyshid/Backend.h index bfd7a235..67dad4fe 100644 --- a/src/Cafe/OS/libs/nsyshid/Backend.h +++ b/src/Cafe/OS/libs/nsyshid/Backend.h @@ -172,7 +172,7 @@ namespace nsyshid std::shared_ptr FindDevice(std::function&)> isWantedDevice); - std::shared_ptr FindDeviceById(uint16 vendorId, uint16 productId); + bool FindDeviceById(uint16 vendorId, uint16 productId); bool IsDeviceWhitelisted(uint16 vendorId, uint16 productId); diff --git a/src/Cafe/OS/libs/nsyshid/BackendEmulated.cpp b/src/Cafe/OS/libs/nsyshid/BackendEmulated.cpp index a5eb95c1..533d349e 100644 --- a/src/Cafe/OS/libs/nsyshid/BackendEmulated.cpp +++ b/src/Cafe/OS/libs/nsyshid/BackendEmulated.cpp @@ -4,7 +4,6 @@ #include "Infinity.h" #include "Skylander.h" #include "config/CemuConfig.h" -#include "SkylanderXbox360.h" namespace nsyshid::backend::emulated { @@ -29,13 +28,6 @@ namespace nsyshid::backend::emulated auto device = std::make_shared(); AttachDevice(device); } - else if (auto usb_portal = FindDeviceById(0x1430, 0x1F17)) - { - cemuLog_logDebug(LogType::Force, "Attaching Xbox 360 Portal"); - // Add Skylander Xbox 360 Portal - auto device = std::make_shared(usb_portal); - AttachDevice(device); - } if (GetConfig().emulated_usb_devices.emulate_infinity_base && !FindDeviceById(0x0E6F, 0x0129)) { cemuLog_logDebug(LogType::Force, "Attaching Emulated Base"); diff --git a/src/Cafe/OS/libs/nsyshid/Skylander.cpp b/src/Cafe/OS/libs/nsyshid/Skylander.cpp index 78337962..9fab17b6 100644 --- a/src/Cafe/OS/libs/nsyshid/Skylander.cpp +++ b/src/Cafe/OS/libs/nsyshid/Skylander.cpp @@ -6,8 +6,6 @@ #include "Backend.h" #include "Common/FileStream.h" -#include "audio/IAudioAPI.h" -#include "config/CemuConfig.h" namespace nsyshid { @@ -560,26 +558,6 @@ namespace nsyshid Device::WriteResult SkylanderPortalDevice::Write(WriteMessage* message) { - if (message->length != 64) { - cemu_assert_error(); - } - - if (!g_portalAudio) - { - // Portal audio is mono channel, 16 bit audio. - // Audio is unsigned 16 bit, supplied as 64 bytes which is 32 samples per block - g_portalAudio = IAudioAPI::CreateDeviceFromConfig(IAudioAPI::AudioType::Portal, 8000, 32, 16); - } - std::array mono_samples; - for (unsigned int i = 0; i < mono_samples.size(); ++i) - { - sint16 sample = static_cast(message->data[i * 2 + 1]) << 8 | static_cast(message->data[i * 2]); - mono_samples[i] = sample; - } - if (g_portalAudio) - { - g_portalAudio->FeedBlock(mono_samples.data()); - } message->bytesWritten = message->length; return Device::WriteResult::Success; } @@ -626,20 +604,20 @@ namespace nsyshid *(uint16be*)(currentWritePtr + 7) = 0x001D; // wDescriptorLength currentWritePtr = currentWritePtr + 9; // endpoint descriptor 1 - *(uint8*)(currentWritePtr + 0) = 7; // bLength - *(uint8*)(currentWritePtr + 1) = 0x05; // bDescriptorType - *(uint8*)(currentWritePtr + 2) = 0x81; // bEndpointAddress - *(uint8*)(currentWritePtr + 3) = 0x03; // bmAttributes + *(uint8*)(currentWritePtr + 0) = 7; // bLength + *(uint8*)(currentWritePtr + 1) = 0x05; // bDescriptorType + *(uint8*)(currentWritePtr + 2) = 0x81; // bEndpointAddress + *(uint8*)(currentWritePtr + 3) = 0x03; // bmAttributes *(uint16be*)(currentWritePtr + 4) = 0x0040; // wMaxPacketSize - *(uint8*)(currentWritePtr + 6) = 0x01; // bInterval + *(uint8*)(currentWritePtr + 6) = 0x01; // bInterval currentWritePtr = currentWritePtr + 7; // endpoint descriptor 2 - *(uint8*)(currentWritePtr + 0) = 7; // bLength - *(uint8*)(currentWritePtr + 1) = 0x05; // bDescriptorType - *(uint8*)(currentWritePtr + 2) = 0x02; // bEndpointAddress - *(uint8*)(currentWritePtr + 3) = 0x03; // bmAttributes + *(uint8*)(currentWritePtr + 0) = 7; // bLength + *(uint8*)(currentWritePtr + 1) = 0x05; // bDescriptorType + *(uint8*)(currentWritePtr + 2) = 0x02; // bEndpointAddress + *(uint8*)(currentWritePtr + 3) = 0x03; // bmAttributes *(uint16be*)(currentWritePtr + 4) = 0x0040; // wMaxPacketSize - *(uint8*)(currentWritePtr + 6) = 0x01; // bInterval + *(uint8*)(currentWritePtr + 6) = 0x01; // bInterval currentWritePtr = currentWritePtr + 7; cemu_assert_debug((currentWritePtr - configurationDescriptor) == 0x29); @@ -650,8 +628,8 @@ namespace nsyshid } bool SkylanderPortalDevice::SetIdle(uint8 ifIndex, - uint8 reportId, - uint8 duration) + uint8 reportId, + uint8 duration) { return true; } diff --git a/src/Cafe/OS/libs/nsyshid/SkylanderXbox360.cpp b/src/Cafe/OS/libs/nsyshid/SkylanderXbox360.cpp deleted file mode 100644 index eba8ff9e..00000000 --- a/src/Cafe/OS/libs/nsyshid/SkylanderXbox360.cpp +++ /dev/null @@ -1,160 +0,0 @@ -#include "SkylanderXbox360.h" - -namespace nsyshid -{ - SkylanderXbox360PortalLibusb::SkylanderXbox360PortalLibusb(std::shared_ptr usbPortal) - : Device(0x1430, 0x0150, 1, 2, 0) - { - m_IsOpened = false; - m_usbPortal = std::static_pointer_cast(usbPortal); - } - - bool SkylanderXbox360PortalLibusb::Open() - { - return m_usbPortal->Open(); - } - - void SkylanderXbox360PortalLibusb::Close() - { - return m_usbPortal->Close(); - } - - bool SkylanderXbox360PortalLibusb::IsOpened() - { - return m_usbPortal->IsOpened(); - } - - Device::ReadResult SkylanderXbox360PortalLibusb::Read(ReadMessage* message) - { - std::vector xboxData(std::min(32, message->length + sizeof(XBOX_DATA_HEADER))); - memcpy(xboxData.data(), XBOX_DATA_HEADER, sizeof(XBOX_DATA_HEADER)); - memcpy(xboxData.data() + sizeof(XBOX_DATA_HEADER), message->data, message->length - sizeof(XBOX_DATA_HEADER)); - - ReadMessage xboxMessage(xboxData.data(), xboxData.size(), 0); - auto result = m_usbPortal->Read(&xboxMessage); - - memcpy(message->data, xboxData.data() + sizeof(XBOX_DATA_HEADER), message->length); - message->bytesRead = xboxMessage.bytesRead; - - return result; - } - - // Use InterruptTransfer instead of ControlTransfer - bool SkylanderXbox360PortalLibusb::SetReport(ReportMessage* message) - { - if (message->data[0] == 'M' && message->data[1] == 0x01) // Enables Speaker - g72x_init_state(&m_state); - - std::vector xboxData(message->length + sizeof(XBOX_DATA_HEADER)); - memcpy(xboxData.data(), XBOX_DATA_HEADER, sizeof(XBOX_DATA_HEADER)); - memcpy(xboxData.data() + sizeof(XBOX_DATA_HEADER), message->data, message->length); - - WriteMessage xboxMessage(xboxData.data(), xboxData.size(), 0); - auto result = m_usbPortal->Write(&xboxMessage); - - memcpy(message->data, xboxData.data() + sizeof(XBOX_DATA_HEADER), message->length); - - return result == WriteResult::Success; - } - - Device::WriteResult SkylanderXbox360PortalLibusb::Write(WriteMessage* message) - { - std::vector audioData(message->data, message->data + message->length); - - std::vector xboxAudioData(audioData.size() / 4); - for (size_t i = 0; i < audioData.size(); i += 4) - { - int16_t sample1 = (static_cast(audioData[i + 1]) << 8) | audioData[i]; - int16_t sample2 = (static_cast(audioData[i + 3]) << 8) | audioData[i + 2]; - - uint8_t encoded1 = g721_encoder(sample1, &m_state) & 0x0F; - uint8_t encoded2 = g721_encoder(sample2, &m_state) & 0x0F; - - xboxAudioData[i / 4] = ((encoded2 << 4) | encoded1); - } - - std::vector xboxData(xboxAudioData.size() + sizeof(XBOX_AUDIO_DATA_HEADER)); - memcpy(xboxData.data(), XBOX_AUDIO_DATA_HEADER, sizeof(XBOX_AUDIO_DATA_HEADER)); - memcpy(xboxData.data() + sizeof(XBOX_AUDIO_DATA_HEADER), xboxAudioData.data(), xboxAudioData.size()); - - WriteMessage xboxMessage(xboxData.data(), xboxData.size(), 0); - auto result = m_usbPortal->Write(&xboxMessage); - - memcpy(message->data, xboxData.data() + sizeof(XBOX_AUDIO_DATA_HEADER), xboxAudioData.size()); - message->bytesWritten = xboxMessage.bytesWritten - sizeof(XBOX_AUDIO_DATA_HEADER); - return result; - } - - bool SkylanderXbox360PortalLibusb::GetDescriptor(uint8 descType, uint8 descIndex, uint16 lang, uint8* output, uint32 outputMaxLength) - { - uint8 configurationDescriptor[0x29]; - - uint8* currentWritePtr; - - // configuration descriptor - currentWritePtr = configurationDescriptor + 0; - *(uint8*)(currentWritePtr + 0) = 9; // bLength - *(uint8*)(currentWritePtr + 1) = 2; // bDescriptorType - *(uint16be*)(currentWritePtr + 2) = 0x0029; // wTotalLength - *(uint8*)(currentWritePtr + 4) = 1; // bNumInterfaces - *(uint8*)(currentWritePtr + 5) = 1; // bConfigurationValue - *(uint8*)(currentWritePtr + 6) = 0; // iConfiguration - *(uint8*)(currentWritePtr + 7) = 0x80; // bmAttributes - *(uint8*)(currentWritePtr + 8) = 0xFA; // MaxPower - currentWritePtr = currentWritePtr + 9; - // interface descriptor - *(uint8*)(currentWritePtr + 0) = 9; // bLength - *(uint8*)(currentWritePtr + 1) = 0x04; // bDescriptorType - *(uint8*)(currentWritePtr + 2) = 0; // bInterfaceNumber - *(uint8*)(currentWritePtr + 3) = 0; // bAlternateSetting - *(uint8*)(currentWritePtr + 4) = 2; // bNumEndpoints - *(uint8*)(currentWritePtr + 5) = 3; // bInterfaceClass - *(uint8*)(currentWritePtr + 6) = 0; // bInterfaceSubClass - *(uint8*)(currentWritePtr + 7) = 0; // bInterfaceProtocol - *(uint8*)(currentWritePtr + 8) = 0; // iInterface - currentWritePtr = currentWritePtr + 9; - // HID descriptor - *(uint8*)(currentWritePtr + 0) = 9; // bLength - *(uint8*)(currentWritePtr + 1) = 0x21; // bDescriptorType - *(uint16be*)(currentWritePtr + 2) = 0x0111; // bcdHID - *(uint8*)(currentWritePtr + 4) = 0x00; // bCountryCode - *(uint8*)(currentWritePtr + 5) = 0x01; // bNumDescriptors - *(uint8*)(currentWritePtr + 6) = 0x22; // bDescriptorType - *(uint16be*)(currentWritePtr + 7) = 0x001D; // wDescriptorLength - currentWritePtr = currentWritePtr + 9; - // endpoint descriptor 1 - *(uint8*)(currentWritePtr + 0) = 7; // bLength - *(uint8*)(currentWritePtr + 1) = 0x05; // bDescriptorType - *(uint8*)(currentWritePtr + 2) = 0x81; // bEndpointAddress - *(uint8*)(currentWritePtr + 3) = 0x03; // bmAttributes - *(uint16be*)(currentWritePtr + 4) = 0x0040; // wMaxPacketSize - *(uint8*)(currentWritePtr + 6) = 0x01; // bInterval - currentWritePtr = currentWritePtr + 7; - // endpoint descriptor 2 - *(uint8*)(currentWritePtr + 0) = 7; // bLength - *(uint8*)(currentWritePtr + 1) = 0x05; // bDescriptorType - *(uint8*)(currentWritePtr + 2) = 0x02; // bEndpointAddress - *(uint8*)(currentWritePtr + 3) = 0x03; // bmAttributes - *(uint16be*)(currentWritePtr + 4) = 0x0040; // wMaxPacketSize - *(uint8*)(currentWritePtr + 6) = 0x01; // bInterval - currentWritePtr = currentWritePtr + 7; - - cemu_assert_debug((currentWritePtr - configurationDescriptor) == 0x29); - - memcpy(output, configurationDescriptor, - std::min(outputMaxLength, sizeof(configurationDescriptor))); - return true; - } - - bool SkylanderXbox360PortalLibusb::SetIdle(uint8 ifIndex, - uint8 reportId, - uint8 duration) - { - return true; - } - - bool SkylanderXbox360PortalLibusb::SetProtocol(uint8 ifIndex, uint8 protocol) - { - return true; - } -} // namespace nsyshid \ No newline at end of file diff --git a/src/Cafe/OS/libs/nsyshid/SkylanderXbox360.h b/src/Cafe/OS/libs/nsyshid/SkylanderXbox360.h deleted file mode 100644 index 901c63f9..00000000 --- a/src/Cafe/OS/libs/nsyshid/SkylanderXbox360.h +++ /dev/null @@ -1,46 +0,0 @@ -#pragma once - -#include "nsyshid.h" -#include "BackendLibusb.h" -#include "g721/g721.h" - -namespace nsyshid -{ - class SkylanderXbox360PortalLibusb final : public Device { - public: - SkylanderXbox360PortalLibusb(std::shared_ptr usbPortal); - ~SkylanderXbox360PortalLibusb() = default; - - bool Open() override; - - void Close() override; - - bool IsOpened() override; - - ReadResult Read(ReadMessage* message) override; - - WriteResult Write(WriteMessage* message) override; - - bool GetDescriptor(uint8 descType, - uint8 descIndex, - uint16 lang, - uint8* output, - uint32 outputMaxLength) override; - - bool SetIdle(uint8 ifIndex, - uint8 reportId, - uint8 duration) override; - - bool SetProtocol(uint8 ifIndex, uint8 protocol) override; - - bool SetReport(ReportMessage* message) override; - - private: - std::shared_ptr m_usbPortal; - bool m_IsOpened; - struct g72x_state m_state; - }; - - constexpr uint8 XBOX_DATA_HEADER[] = { 0x0B, 0x14 }; - constexpr uint8 XBOX_AUDIO_DATA_HEADER[] = { 0x0B, 0x17 }; -} // namespace nsyshid \ No newline at end of file diff --git a/src/Cafe/OS/libs/nsyshid/Whitelist.cpp b/src/Cafe/OS/libs/nsyshid/Whitelist.cpp index 783384ec..f20e4c45 100644 --- a/src/Cafe/OS/libs/nsyshid/Whitelist.cpp +++ b/src/Cafe/OS/libs/nsyshid/Whitelist.cpp @@ -16,12 +16,8 @@ namespace nsyshid m_devices.emplace_back(0x0e6f, 0x0241); // skylanders portal m_devices.emplace_back(0x1430, 0x0150); - // skylanders 360 portal - m_devices.emplace_back(0x1430, 0x1F17); // disney infinity base m_devices.emplace_back(0x0e6f, 0x0129); - // kamen rider ride gate - m_devices.emplace_back(0x0e6f, 0x200A); } } diff --git a/src/Cafe/OS/libs/nsyshid/g721/g721.cpp b/src/Cafe/OS/libs/nsyshid/g721/g721.cpp deleted file mode 100644 index 995212c2..00000000 --- a/src/Cafe/OS/libs/nsyshid/g721/g721.cpp +++ /dev/null @@ -1,543 +0,0 @@ -/* - * This source code is a product of Sun Microsystems, Inc. and is provided - * for unrestricted use. Users may copy or modify this source code without - * charge. - * - * SUN SOURCE CODE IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING - * THE WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. - * - * Sun source code is provided with no support and without any obligation on - * the part of Sun Microsystems, Inc. to assist in its use, correction, - * modification or enhancement. - * - * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE - * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY THIS SOFTWARE - * OR ANY PART THEREOF. - * - * In no event will Sun Microsystems, Inc. be liable for any lost revenue - * or profits or other special, indirect and consequential damages, even if - * Sun has been advised of the possibility of such damages. - * - * Sun Microsystems, Inc. - * 2550 Garcia Avenue - * Mountain View, California 94043 - */ - -/* - * g721.c - * - * Description: - * - * g721_encoder(), g721_decoder() - * - * These routines comprise an implementation of the CCITT G.721 ADPCM - * coding algorithm. Essentially, this implementation is identical to - * the bit level description except for a few deviations which - * take advantage of work station attributes, such as hardware 2's - * complement arithmetic and large memory. Specifically, certain time - * consuming operations such as multiplications are replaced - * with lookup tables and software 2's complement operations are - * replaced with hardware 2's complement. - * - * The deviation from the bit level specification (lookup tables) - * preserves the bit level performance specifications. - * - * As outlined in the G.721 Recommendation, the algorithm is broken - * down into modules. Each section of code below is preceded by - * the name of the module which it is implementing. - * - */ -#include "g721.h" -#include - -static short qtab_721[7] = { -124, 80, 178, 246, 300, 349, 400 }; -/* - * Maps G.721 code word to reconstructed scale factor normalized log - * magnitude values. - */ -static short _dqlntab[16] = { -2048, 4, 135, 213, 273, 323, 373, 425, - 425, 373, 323, 273, 213, 135, 4, -2048 }; - -/* Maps G.721 code word to log of scale factor multiplier. */ -static short _witab[16] = { -12, 18, 41, 64, 112, 198, 355, 1122, - 1122, 355, 198, 112, 64, 41, 18, -12 }; -/* - * Maps G.721 code words to a set of values whose long and short - * term averages are computed and then compared to give an indication - * how stationary (steady state) the signal is. - */ -static short _fitab[16] = { 0, 0, 0, 0x200, 0x200, 0x200, 0x600, 0xE00, - 0xE00, 0x600, 0x200, 0x200, 0x200, 0, 0, 0 }; - -/* - * g721_encoder() - * - * Encodes the input value of linear PCM from sl and returns - * the resulting code. - */ -int g721_encoder(int sl, struct g72x_state* state_ptr) -{ - short sezi, se, sez; /* ACCUM */ - short d; /* SUBTA */ - short sr; /* ADDB */ - short y; /* MIX */ - short dqsez; /* ADDC */ - short dq, i; - - sl >>= 2; /* linearize input sample to 14-bit PCM */ - - sezi = predictor_zero(state_ptr); - sez = sezi >> 1; - se = (sezi + predictor_pole(state_ptr)) >> 1; /* estimated signal */ - - d = sl - se; /* estimation difference */ - - /* quantize the prediction difference */ - y = step_size(state_ptr); /* quantizer step size */ - i = quantize(d, y, qtab_721, 7); /* i = ADPCM code */ - - dq = reconstruct(i & 8, _dqlntab[i], y); /* quantized est diff */ - - sr = (dq < 0) ? se - (dq & 0x3FFF) : se + dq; /* reconst. signal */ - - dqsez = sr + sez - se; /* pole prediction diff. */ - - update(4, y, _witab[i] << 5, _fitab[i], dq, sr, dqsez, state_ptr); - - return (i); -} - -/* - * g721_decoder() - * - * Description: - * - * Decodes a 4-bit code of G.721 encoded data of i and - * returns the resulting linear PCM - */ -int g721_decoder(int i, struct g72x_state* state_ptr) -{ - short sezi, sei, sez, se; /* ACCUM */ - short y; /* MIX */ - short sr; /* ADDB */ - short dq; - short dqsez; - - i &= 0x0f; /* mask to get proper bits */ - sezi = predictor_zero(state_ptr); - sez = sezi >> 1; - sei = sezi + predictor_pole(state_ptr); - se = sei >> 1; /* se = estimated signal */ - - y = step_size(state_ptr); /* dynamic quantizer step size */ - - dq = reconstruct(i & 0x08, _dqlntab[i], y); /* quantized diff. */ - - sr = (dq < 0) ? (se - (dq & 0x3FFF)) : se + dq; /* reconst. signal */ - - dqsez = sr - se + sez; /* pole prediction diff. */ - - update(4, y, _witab[i] << 5, _fitab[i], dq, sr, dqsez, state_ptr); - - return (sr << 2); -} - - -static short power2[15] = { 1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000 }; - -/* - * quan() - * - * quantizes the input val against the table of size short integers. - * It returns i if table[i - 1] <= val < table[i]. - * - * Using linear search for simple coding. - */ -static int quan(int val, short* table, int size) -{ - int i; - - for (i = 0; i < size; i++) - if (val < *table++) - break; - return (i); -} - -/* - * fmult() - * - * returns the integer product of the 14-bit integer "an" and - * "floating point" representation (4-bit exponent, 6-bit mantessa) "srn". - */ -static int fmult(int an, int srn) -{ - short anmag, anexp, anmant; - short wanexp, wanmant; - short retval; - - anmag = (an > 0) ? an : ((-an) & 0x1FFF); - anexp = quan(anmag, power2, 15) - 6; - anmant = (anmag == 0) ? 32 : (anexp >= 0) ? anmag >> anexp : anmag << -anexp; - wanexp = anexp + ((srn >> 6) & 0xF) - 13; - - wanmant = (anmant * (srn & 077) + 0x30) >> 4; - retval = (wanexp >= 0) ? ((wanmant << wanexp) & 0x7FFF) : (wanmant >> -wanexp); - - return (((an ^ srn) < 0) ? -retval : retval); -} - - - -/* - * update() - * - * updates the state variables for each output code - */ -void update(int code_size, /* distinguish 723_40 with others */ - int y, /* quantizer step size */ - int wi, /* scale factor multiplier */ - int fi, /* for long/short term energies */ - int dq, /* quantized prediction difference */ - int sr, /* reconstructed signal */ - int dqsez, /* difference from 2-pole predictor */ - struct g72x_state* state_ptr) /* coder state pointer */ -{ - int cnt; - short mag, exp; /* Adaptive predictor, FLOAT A */ - short a2p = 0; /* LIMC */ - short a1ul; /* UPA1 */ - short pks1; /* UPA2 */ - short fa1; - char tr; /* tone/transition detector */ - short ylint, thr2, dqthr; - short ylfrac, thr1; - short pk0; - - pk0 = (dqsez < 0) ? 1 : 0; /* needed in updating predictor poles */ - - mag = dq & 0x7FFF; /* prediction difference magnitude */ - /* TRANS */ - ylint = state_ptr->yl >> 15; /* exponent part of yl */ - ylfrac = (state_ptr->yl >> 10) & 0x1F; /* fractional part of yl */ - thr1 = (32 + ylfrac) << ylint; /* threshold */ - thr2 = (ylint > 9) ? 31 << 10 : thr1; /* limit thr2 to 31 << 10 */ - dqthr = (thr2 + (thr2 >> 1)) >> 1; /* dqthr = 0.75 * thr2 */ - if (state_ptr->td == 0) /* signal supposed voice */ - tr = 0; - else if (mag <= dqthr) /* supposed data, but small mag */ - tr = 0; /* treated as voice */ - else /* signal is data (modem) */ - tr = 1; - - /* - * Quantizer scale factor adaptation. - */ - - /* FUNCTW & FILTD & DELAY */ - /* update non-steady state step size multiplier */ - state_ptr->yu = y + ((wi - y) >> 5); - - /* LIMB */ - if (state_ptr->yu < 544) /* 544 <= yu <= 5120 */ - state_ptr->yu = 544; - else if (state_ptr->yu > 5120) - state_ptr->yu = 5120; - - /* FILTE & DELAY */ - /* update steady state step size multiplier */ - state_ptr->yl += state_ptr->yu + ((-state_ptr->yl) >> 6); - - /* - * Adaptive predictor coefficients. - */ - if (tr == 1) { /* reset a's and b's for modem signal */ - state_ptr->a[0] = 0; - state_ptr->a[1] = 0; - state_ptr->b[0] = 0; - state_ptr->b[1] = 0; - state_ptr->b[2] = 0; - state_ptr->b[3] = 0; - state_ptr->b[4] = 0; - state_ptr->b[5] = 0; - } else { /* update a's and b's */ - pks1 = pk0 ^ state_ptr->pk[0]; /* UPA2 */ - - /* update predictor pole a[1] */ - a2p = state_ptr->a[1] - (state_ptr->a[1] >> 7); - if (dqsez != 0) { - fa1 = (pks1) ? state_ptr->a[0] : -state_ptr->a[0]; - if (fa1 < -8191) /* a2p = function of fa1 */ - a2p -= 0x100; - else if (fa1 > 8191) - a2p += 0xFF; - else - a2p += fa1 >> 5; - - if (pk0 ^ state_ptr->pk[1]) - /* LIMC */ - if (a2p <= -12160) - a2p = -12288; - else if (a2p >= 12416) - a2p = 12288; - else - a2p -= 0x80; - else if (a2p <= -12416) - a2p = -12288; - else if (a2p >= 12160) - a2p = 12288; - else - a2p += 0x80; - } - - /* TRIGB & DELAY */ - state_ptr->a[1] = a2p; - - /* UPA1 */ - /* update predictor pole a[0] */ - state_ptr->a[0] -= state_ptr->a[0] >> 8; - if (dqsez != 0) { - if (pks1 == 0) - state_ptr->a[0] += 192; - else - state_ptr->a[0] -= 192; - } - - /* LIMD */ - a1ul = 15360 - a2p; - if (state_ptr->a[0] < -a1ul) - state_ptr->a[0] = -a1ul; - else if (state_ptr->a[0] > a1ul) - state_ptr->a[0] = a1ul; - - /* UPB : update predictor zeros b[6] */ - for (cnt = 0; cnt < 6; cnt++) { - if (code_size == 5) /* for 40Kbps G.723 */ - state_ptr->b[cnt] -= state_ptr->b[cnt] >> 9; - else /* for G.721 and 24Kbps G.723 */ - state_ptr->b[cnt] -= state_ptr->b[cnt] >> 8; - if (dq & 0x7FFF) { /* XOR */ - if ((dq ^ state_ptr->dq[cnt]) >= 0) - state_ptr->b[cnt] += 128; - else - state_ptr->b[cnt] -= 128; - } - } - } - - for (cnt = 5; cnt > 0; cnt--) - state_ptr->dq[cnt] = state_ptr->dq[cnt - 1]; - /* FLOAT A : convert dq[0] to 4-bit exp, 6-bit mantissa f.p. */ - if (mag == 0) { - state_ptr->dq[0] = (dq >= 0) ? 0x20 : 0xFC20; - } else { - exp = quan(mag, power2, 15); - state_ptr->dq[0] = (dq >= 0) ? (exp << 6) + ((mag << 6) >> exp) - : (exp << 6) + ((mag << 6) >> exp) - 0x400; - } - - state_ptr->sr[1] = state_ptr->sr[0]; - /* FLOAT B : convert sr to 4-bit exp., 6-bit mantissa f.p. */ - if (sr == 0) { - state_ptr->sr[0] = 0x20; - } else if (sr > 0) { - exp = quan(sr, power2, 15); - state_ptr->sr[0] = (exp << 6) + ((sr << 6) >> exp); - } else if (sr > -32768) { - mag = -sr; - exp = quan(mag, power2, 15); - state_ptr->sr[0] = (exp << 6) + ((mag << 6) >> exp) - 0x400; - } else - state_ptr->sr[0] = 0xFC20; - - /* DELAY A */ - state_ptr->pk[1] = state_ptr->pk[0]; - state_ptr->pk[0] = pk0; - - /* TONE */ - if (tr == 1) /* this sample has been treated as data */ - state_ptr->td = 0; /* next one will be treated as voice */ - else if (a2p < -11776) /* small sample-to-sample correlation */ - state_ptr->td = 1; /* signal may be data */ - else /* signal is voice */ - state_ptr->td = 0; - - /* - * Adaptation speed control. - */ - state_ptr->dms += (fi - state_ptr->dms) >> 5; /* FILTA */ - state_ptr->dml += (((fi << 2) - state_ptr->dml) >> 7); /* FILTB */ - - if (tr == 1) - state_ptr->ap = 256; - else if (y < 1536) /* SUBTC */ - state_ptr->ap += (0x200 - state_ptr->ap) >> 4; - else if (state_ptr->td == 1) - state_ptr->ap += (0x200 - state_ptr->ap) >> 4; - else if (abs((state_ptr->dms << 2) - state_ptr->dml) >= (state_ptr->dml >> 3)) - state_ptr->ap += (0x200 - state_ptr->ap) >> 4; - else - state_ptr->ap += (-state_ptr->ap) >> 4; -} - - -/* - * g72x_init_state() - * - * This routine initializes and/or resets the g72x_state structure - * pointed to by 'state_ptr'. - * All the initial state values are specified in the CCITT G.721 document. - */ -void g72x_init_state(struct g72x_state* state_ptr) -{ - int cnta; - - state_ptr->yl = 34816; - state_ptr->yu = 544; - state_ptr->dms = 0; - state_ptr->dml = 0; - state_ptr->ap = 0; - for (cnta = 0; cnta < 2; cnta++) { - state_ptr->a[cnta] = 0; - state_ptr->pk[cnta] = 0; - state_ptr->sr[cnta] = 32; - } - for (cnta = 0; cnta < 6; cnta++) { - state_ptr->b[cnta] = 0; - state_ptr->dq[cnta] = 32; - } - state_ptr->td = 0; -} - -/* - * predictor_zero() - * - * computes the estimated signal from 6-zero predictor. - * - */ -int predictor_zero(struct g72x_state* state_ptr) -{ - int i; - int sezi; - - sezi = fmult(state_ptr->b[0] >> 2, state_ptr->dq[0]); - for (i = 1; i < 6; i++) /* ACCUM */ - sezi += fmult(state_ptr->b[i] >> 2, state_ptr->dq[i]); - return (sezi); -} -/* - * predictor_pole() - * - * computes the estimated signal from 2-pole predictor. - * - */ -int predictor_pole(struct g72x_state* state_ptr) -{ - return (fmult(state_ptr->a[1] >> 2, state_ptr->sr[1]) + - fmult(state_ptr->a[0] >> 2, state_ptr->sr[0])); -} -/* - * step_size() - * - * computes the quantization step size of the adaptive quantizer. - * - */ -int step_size(struct g72x_state* state_ptr) -{ - int y; - int dif; - int al; - - if (state_ptr->ap >= 256) - return (state_ptr->yu); - else { - y = state_ptr->yl >> 6; - dif = state_ptr->yu - y; - al = state_ptr->ap >> 2; - if (dif > 0) - y += (dif * al) >> 6; - else if (dif < 0) - y += (dif * al + 0x3F) >> 6; - return (y); - } -} - -/* - * quantize() - * - * Given a raw sample, 'd', of the difference signal and a - * quantization step size scale factor, 'y', this routine returns the - * ADPCM codeword to which that sample gets quantized. The step - * size scale factor division operation is done in the log base 2 domain - * as a subtraction. - */ -int quantize(int d, /* Raw difference signal sample */ - int y, /* Step size multiplier */ - short* table, /* quantization table */ - int size) /* table size of short integers */ -{ - short dqm; /* Magnitude of 'd' */ - short exp; /* Integer part of base 2 log of 'd' */ - short mant; /* Fractional part of base 2 log */ - short dl; /* Log of magnitude of 'd' */ - short dln; /* Step size scale factor normalized log */ - int i; - - /* - * LOG - * - * Compute base 2 log of 'd', and store in 'dl'. - */ - dqm = abs(d); - exp = quan(dqm >> 1, power2, 15); - mant = ((dqm << 7) >> exp) & 0x7F; /* Fractional portion. */ - dl = (exp << 7) + mant; - - /* - * SUBTB - * - * "Divide" by step size multiplier. - */ - dln = dl - (y >> 2); - - /* - * QUAN - * - * Obtain codword i for 'd'. - */ - i = quan(dln, table, size); - if (d < 0) /* take 1's complement of i */ - return ((size << 1) + 1 - i); - else if (i == 0) /* take 1's complement of 0 */ - return ((size << 1) + 1); /* new in 1988 */ - else - return (i); -} -/* - * reconstruct() - * - * Returns reconstructed difference signal 'dq' obtained from - * codeword 'i' and quantization step size scale factor 'y'. - * Multiplication is performed in log base 2 domain as addition. - */ -int reconstruct(int sign, /* 0 for non-negative value */ - int dqln, /* G.72x codeword */ - int y) /* Step size multiplier */ -{ - short dql; /* Log of 'dq' magnitude */ - short dex; /* Integer part of log */ - short dqt; - short dq; /* Reconstructed difference signal sample */ - - dql = dqln + (y >> 2); /* ADDA */ - - if (dql < 0) { - return ((sign) ? -0x8000 : 0); - } else { /* ANTILOG */ - dex = (dql >> 7) & 15; - dqt = 128 + (dql & 127); - dq = (dqt << 7) >> (14 - dex); - return ((sign) ? (dq - 0x8000) : dq); - } -} \ No newline at end of file diff --git a/src/Cafe/OS/libs/nsyshid/g721/g721.h b/src/Cafe/OS/libs/nsyshid/g721/g721.h deleted file mode 100644 index c764e5fd..00000000 --- a/src/Cafe/OS/libs/nsyshid/g721/g721.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * This source code is a product of Sun Microsystems, Inc. and is provided - * for unrestricted use. Users may copy or modify this source code without - * charge. - * - * SUN SOURCE CODE IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING - * THE WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. - * - * Sun source code is provided with no support and without any obligation on - * the part of Sun Microsystems, Inc. to assist in its use, correction, - * modification or enhancement. - * - * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE - * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY THIS SOFTWARE - * OR ANY PART THEREOF. - * - * In no event will Sun Microsystems, Inc. be liable for any lost revenue - * or profits or other special, indirect and consequential damages, even if - * Sun has been advised of the possibility of such damages. - * - * Sun Microsystems, Inc. - * 2550 Garcia Avenue - * Mountain View, California 94043 - */ - -/* - * g72x.h - * - * Header file for CCITT conversion routines. - * - */ -#ifndef _G72X_H -#define _G72X_H - -/* - * The following is the definition of the state structure - * used by the G.721/G.723 encoder and decoder to preserve their internal - * state between successive calls. The meanings of the majority - * of the state structure fields are explained in detail in the - * CCITT Recommendation G.721. The field names are essentially identical - * to variable names in the bit level description of the coding algorithm - * included in this Recommendation. - */ -struct g72x_state { - long yl; /* Locked or steady state step size multiplier. */ - short yu; /* Unlocked or non-steady state step size multiplier. */ - short dms; /* Short term energy estimate. */ - short dml; /* Long term energy estimate. */ - short ap; /* Linear weighting coefficient of 'yl' and 'yu'. */ - - short a[2]; /* Coefficients of pole portion of prediction filter. */ - short b[6]; /* Coefficients of zero portion of prediction filter. */ - short pk[2]; /* - * Signs of previous two samples of a partially - * reconstructed signal. - */ - short dq[6]; /* - * Previous 6 samples of the quantized difference - * signal represented in an internal floating point - * format. - */ - short sr[2]; /* - * Previous 2 samples of the quantized difference - * signal represented in an internal floating point - * format. - */ - char td; /* delayed tone detect, new in 1988 version */ -}; - -/* External function definitions. */ - -void g72x_init_state(struct g72x_state*); -int g721_encoder(int sample, struct g72x_state* state_ptr); -int g721_decoder(int code, struct g72x_state* state_ptr); - - -int quantize(int d, int y, short* table, int size); -int reconstruct(int, int, int); -void - - update(int code_size, - int y, - int wi, - int fi, - int dq, - int sr, - int dqsez, - struct g72x_state* state_ptr); - -int predictor_zero(struct g72x_state* state_ptr); - -int predictor_pole(struct g72x_state* state_ptr); -int step_size(struct g72x_state* state_ptr); -#endif /* !_G72X_H */ - diff --git a/src/Cafe/OS/libs/nsyshid/nsyshid.cpp b/src/Cafe/OS/libs/nsyshid/nsyshid.cpp index 93ee8c3d..2fe6da07 100644 --- a/src/Cafe/OS/libs/nsyshid/nsyshid.cpp +++ b/src/Cafe/OS/libs/nsyshid/nsyshid.cpp @@ -256,17 +256,17 @@ namespace nsyshid device->m_productId); } - std::shared_ptr FindDeviceById(uint16 vendorId, uint16 productId) + bool FindDeviceById(uint16 vendorId, uint16 productId) { std::lock_guard lock(hidMutex); for (const auto& device : deviceList) { if (device->m_vendorId == vendorId && device->m_productId == productId) { - return device; + return true; } } - return nullptr; + return false; } void export_HIDAddClient(PPCInterpreter_t* hCPU) @@ -876,7 +876,7 @@ namespace nsyshid return nullptr; } - std::shared_ptr Backend::FindDeviceById(uint16 vendorId, uint16 productId) + bool Backend::FindDeviceById(uint16 vendorId, uint16 productId) { return nsyshid::FindDeviceById(vendorId, productId); } diff --git a/src/Cafe/OS/libs/snd_core/ax_out.cpp b/src/Cafe/OS/libs/snd_core/ax_out.cpp index fe32cfb4..a88807f2 100644 --- a/src/Cafe/OS/libs/snd_core/ax_out.cpp +++ b/src/Cafe/OS/libs/snd_core/ax_out.cpp @@ -404,7 +404,7 @@ namespace snd_core { try { - g_tvAudio = IAudioAPI::CreateDeviceFromConfig(IAudioAPI::AudioType::TV, 48000, snd_core::AX_SAMPLES_PER_3MS_48KHZ * AX_FRAMES_PER_GROUP, 16); + g_tvAudio = IAudioAPI::CreateDeviceFromConfig(true, 48000, snd_core::AX_SAMPLES_PER_3MS_48KHZ * AX_FRAMES_PER_GROUP, 16); } catch (std::runtime_error& ex) { @@ -417,7 +417,7 @@ namespace snd_core { try { - g_padAudio = IAudioAPI::CreateDeviceFromConfig(IAudioAPI::AudioType::Gamepad, 48000, snd_core::AX_SAMPLES_PER_3MS_48KHZ * AX_FRAMES_PER_GROUP, 16); + g_padAudio = IAudioAPI::CreateDeviceFromConfig(false, 48000, snd_core::AX_SAMPLES_PER_3MS_48KHZ * AX_FRAMES_PER_GROUP, 16); if(g_padAudio) g_padVolume = g_padAudio->GetVolume(); } @@ -442,11 +442,6 @@ namespace snd_core g_padAudio->Stop(); g_padAudio.reset(); } - if (g_portalAudio) - { - g_portalAudio->Stop(); - g_portalAudio.reset(); - } } void AXOut_updateDevicePlayState(bool isPlaying) @@ -467,14 +462,6 @@ namespace snd_core else g_padAudio->Stop(); } - - if (g_portalAudio) - { - if (isPlaying) - g_portalAudio->Play(); - else - g_portalAudio->Stop(); - } } // called periodically to check for AX updates diff --git a/src/Cemu/Logging/CemuLogging.cpp b/src/Cemu/Logging/CemuLogging.cpp index f3575cc9..5cde2a7f 100644 --- a/src/Cemu/Logging/CemuLogging.cpp +++ b/src/Cemu/Logging/CemuLogging.cpp @@ -3,7 +3,6 @@ #include "util/helpers/helpers.h" #include "config/CemuConfig.h" #include "config/ActiveSettings.h" -#include "config/LaunchSettings.h" #include #include @@ -145,9 +144,6 @@ bool cemuLog_log(LogType type, std::string_view text) if (!cemuLog_isLoggingEnabled(type)) return false; - if (LaunchSettings::Verbose()) - std::cout << text << std::endl; - cemuLog_writeLineToLog(text); const auto it = std::find_if(g_logging_window_mapping.cbegin(), g_logging_window_mapping.cend(), diff --git a/src/Cemu/Logging/CemuLogging.h b/src/Cemu/Logging/CemuLogging.h index d729d364..5b2e5fa4 100644 --- a/src/Cemu/Logging/CemuLogging.h +++ b/src/Cemu/Logging/CemuLogging.h @@ -39,6 +39,7 @@ enum class LogType : sint32 NN_SL = 26, TextureReadback = 29, + ProcUi = 39, nlibcurl = 41, @@ -46,7 +47,6 @@ enum class LogType : sint32 NFC = 41, NTAG = 42, - Recompiler = 60, }; template <> diff --git a/src/Cemu/ncrypto/ncrypto.h b/src/Cemu/ncrypto/ncrypto.h index 1ed7e91b..5f399ad7 100644 --- a/src/Cemu/ncrypto/ncrypto.h +++ b/src/Cemu/ncrypto/ncrypto.h @@ -13,17 +13,10 @@ namespace NCrypto std::string base64Encode(const void* inputMem, size_t inputLen); std::vector base64Decode(std::string_view inputStr); - /* key and iv helper struct */ + /* key helper struct */ struct AesKey { - static constexpr size_t SIZE = 16; - uint8 b[SIZE]; - }; - - struct AesIv - { - static constexpr size_t SIZE = 16; - uint8 iv[SIZE]; + uint8 b[16]; }; /* ECC Certificate */ diff --git a/src/Common/CafeString.h b/src/Common/CafeString.h index 57fc72da..d902d721 100644 --- a/src/Common/CafeString.h +++ b/src/Common/CafeString.h @@ -51,15 +51,15 @@ class CafeWideString // fixed buffer size, null-terminated, PPC wchar_t (16bit b bool assignFromUTF8(std::string_view sv) { - std::vector beStr = StringHelpers::FromUtf8(sv); - if(beStr.size() > N-1) + std::basic_string beStr = StringHelpers::FromUtf8(sv); + if(beStr.length() > N-1) { memcpy(data, beStr.data(), (N-1)*sizeof(uint16be)); data[N-1] = 0; return false; } - memcpy(data, beStr.data(), beStr.size()*sizeof(uint16be)); - data[beStr.size()] = '\0'; + memcpy(data, beStr.data(), beStr.length()*sizeof(uint16be)); + data[beStr.length()] = '\0'; return true; } diff --git a/src/Common/precompiled.h b/src/Common/precompiled.h index 26fdfd28..bda75cef 100644 --- a/src/Common/precompiled.h +++ b/src/Common/precompiled.h @@ -310,8 +310,7 @@ inline uint64 __rdtsc() inline void _mm_mfence() { - asm volatile("" ::: "memory"); - std::atomic_thread_fence(std::memory_order_seq_cst); + } inline unsigned char _addcarry_u64(unsigned char carry, unsigned long long a, unsigned long long b, unsigned long long *result) @@ -386,6 +385,8 @@ template constexpr bool HAS_FLAG(T1 flags, T2 test_flag) { return (flags & (T1)test_flag) == (T1)test_flag; } template constexpr bool HAS_BIT(T1 value, T2 index) { return (value & ((T1)1 << index)) != 0; } +template +constexpr void SAFE_RELEASE(T& p) { if (p) { p->Release(); p = nullptr; } } template constexpr uint32_t ppcsizeof() { return (uint32_t) sizeof(T); } @@ -615,36 +616,4 @@ namespace stdx scope_exit& operator=(scope_exit) = delete; void release() { m_released = true;} }; - - // Xcode 16 doesn't have std::atomic_ref support and we provide a minimalist reimplementation as fallback -#ifdef __cpp_lib_atomic_ref - #include - template - using atomic_ref = std::atomic_ref; -#else - template - class atomic_ref - { - static_assert(std::is_trivially_copyable::value, "atomic_ref requires trivially copyable types"); - public: - using value_type = T; - - explicit atomic_ref(T& obj) noexcept : ptr_(std::addressof(obj)) {} - - T load(std::memory_order order = std::memory_order_seq_cst) const noexcept - { - auto aptr = reinterpret_cast*>(ptr_); - return aptr->load(order); - } - - void store(T desired, std::memory_order order = std::memory_order_seq_cst) const noexcept - { - auto aptr = reinterpret_cast*>(ptr_); - aptr->store(desired, order); - } - - private: - T* ptr_; - }; -#endif } diff --git a/src/asm/CMakeLists.txt b/src/asm/CMakeLists.txt new file mode 100644 index 00000000..19a7ddd8 --- /dev/null +++ b/src/asm/CMakeLists.txt @@ -0,0 +1,53 @@ +project(CemuAsm C) + +if (CMAKE_OSX_ARCHITECTURES) + set(CEMU_ASM_ARCHITECTURE ${CMAKE_OSX_ARCHITECTURES}) +else() + set(CEMU_ASM_ARCHITECTURE ${CMAKE_SYSTEM_PROCESSOR}) +endif() + +if (CEMU_ASM_ARCHITECTURE MATCHES "(x86)|(X86)|(amd64)|(AMD64)") + + if (WIN32) + + enable_language(C ASM_MASM) + + add_library(CemuAsm x64util_masm.asm) + set_source_files_properties(x64util_masm.asm PROPERTIES LANGUAGE ASM_MASM) + + # workaround for cr flag being passed to LINK.exe which considers it an input file and thus fails + # doesn't always seem to happen. The Windows CI builds were fine, but locally I would run into this problem + # possibly related to https://gitlab.kitware.com/cmake/cmake/-/issues/18889 + set(CMAKE_ASM_MASM_CREATE_STATIC_LIBRARY " /OUT: ") + + set_property(TARGET CemuAsm PROPERTY MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") + + else() + + # NASM + if (APPLE) + set(CMAKE_ASM_NASM_COMPILE_OBJECT " -g -Fdwarf -f macho64 --prefix _ -o ") + else() + set(CMAKE_ASM_NASM_COMPILE_OBJECT " -g -Fdwarf -f elf64 -o ") + endif() + set(CMAKE_ASM_NASM_LINK_EXECUTABLE "ld -fPIC -o ") + + enable_language(C ASM_NASM) + + add_library(CemuAsm x64util_nasm.asm) + set_source_files_properties(x64util_nasm.asm PROPERTIES LANGUAGE ASM_NASM) + + if (APPLE) + set_target_properties(CemuAsm PROPERTIES NASM_OBJ_FORMAT macho64) + else() + set_target_properties(CemuAsm PROPERTIES NASM_OBJ_FORMAT elf64) + endif() + set_target_properties(CemuAsm PROPERTIES LINKER_LANGUAGE C) + + endif() + +elseif(CEMU_ASM_ARCHITECTURE MATCHES "(aarch64)|(AARCH64)|(arm64)|(ARM64)") + add_library(CemuAsm stub.cpp) +else() + message(STATUS "CemuAsm - Unsupported arch: ${CEMU_ASM_ARCHITECTURE}") +endif() diff --git a/src/asm/stub.cpp b/src/asm/stub.cpp new file mode 100644 index 00000000..8d1c8b69 --- /dev/null +++ b/src/asm/stub.cpp @@ -0,0 +1 @@ + diff --git a/src/asm/x64util.h b/src/asm/x64util.h new file mode 100644 index 00000000..885c2f63 --- /dev/null +++ b/src/asm/x64util.h @@ -0,0 +1,20 @@ +#pragma once + +#if defined(ARCH_X86_64) + +extern "C" void recompiler_fres(); +extern "C" void recompiler_frsqrte(); + +#else + +// stubbed on non-x86 for now +static void recompiler_fres() +{ + cemu_assert_unimplemented(); +} +static void recompiler_frsqrte() +{ + cemu_assert_unimplemented(); +} + +#endif diff --git a/src/asm/x64util_masm.asm b/src/asm/x64util_masm.asm new file mode 100644 index 00000000..2587c786 --- /dev/null +++ b/src/asm/x64util_masm.asm @@ -0,0 +1,233 @@ +.code + +recompiler_fres PROC + ; store all modified registers +push rdx +push rcx +push rax +push r8 +lea r8,[asmFresLookupTable] +movq rdx, xmm15 +mov rcx,rdx +shr rcx,2Fh +mov rax,rdx +and ecx,1Fh +shr rax,25h +and eax,3FFh +imul eax,dword ptr [r8+rcx*8+4] +mov r8d,dword ptr [r8+rcx*8] +mov rcx,rdx +shr rcx,34h +inc eax +shr eax,1 +sub r8d,eax +and ecx,7FFh +jne fres_espresso_label3 +mov rax,7FF0000000000000h +or rdx,rax +movq xmm15, rdx +pop r8 +pop rax +pop rcx +pop rdx +ret +fres_espresso_label3: +cmp ecx,7FFh +jne fres_espresso_label4 +mov rax,0FFFFFFFFFFFFFh +test rax,rdx +jne fres_espresso_label1 +test rdx,rdx +jns fres_espresso_label2 +mov rax,8000000000000000h +movq xmm15, rax +pop r8 +pop rax +pop rcx +pop rdx +ret +fres_espresso_label2: +xorps xmm15,xmm15 +pop r8 +pop rax +pop rcx +pop rdx +ret +fres_espresso_label4: +mov eax,7FDh +sub eax,ecx +mov ecx,eax +mov rax,8000000000000000h +and rdx,rax +shl rcx,34h +mov eax,r8d +or rcx,rdx +shl rax,1Dh +add rcx,rax +movq xmm15, rcx +fres_espresso_label1: +pop r8 +pop rax +pop rcx +pop rdx +ret + +recompiler_fres ENDP + +asmFresLookupTable: +DD 07ff800h, 03e1h +DD 0783800h, 03a7h +DD 070ea00h, 0371h +DD 06a0800h, 0340h +DD 0638800h, 0313h +DD 05d6200h, 02eah +DD 0579000h, 02c4h +DD 0520800h, 02a0h +DD 04cc800h, 027fh +DD 047ca00h, 0261h +DD 0430800h, 0245h +DD 03e8000h, 022ah +DD 03a2c00h, 0212h +DD 0360800h, 01fbh +DD 0321400h, 01e5h +DD 02e4a00h, 01d1h +DD 02aa800h, 01beh +DD 0272c00h, 01ach +DD 023d600h, 019bh +DD 0209e00h, 018bh +DD 01d8800h, 017ch +DD 01a9000h, 016eh +DD 017ae00h, 015bh +DD 014f800h, 015bh +DD 0124400h, 0143h +DD 0fbe00h, 0143h +DD 0d3800h, 012dh +DD 0ade00h, 012dh +DD 088400h, 011ah +DD 065000h, 011ah +DD 041c00h, 0108h +DD 020c00h, 0106h + +recompiler_frsqrte PROC + ; store all modified registers +push rdx +push rcx +push rax +push r8 +push r9 +movq r8, xmm15 +mov rax,7FFFFFFFFFFFFFFFh +test rax,r8 +jne frsqrte_espresso_label1 +mov rax,0FFF0000000000000h +and r8,rax +mov rax,7FF0000000000000h +or r8,rax +movq xmm15, r8 +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret +frsqrte_espresso_label1: +mov r9,r8 +shr r9,34h +and r9d,7FFh +cmp r9d,7FFh +jne frsqrte_espresso_label2 +mov rax,0FFFFFFFFFFFFFh +test rax,r8 +jne frsqrte_espresso_label3 +test r8,r8 +js frsqrte_espresso_label4 +xorps xmm15,xmm15 +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret +frsqrte_espresso_label2: +test r8,r8 +jns frsqrte_espresso_label5 +frsqrte_espresso_label4: +mov rax,7FF8000000000000h +movq xmm15, rax +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret +frsqrte_espresso_label5: +lea rdx,[asmFrsqrteLookupTable] +mov rax,r8 +shr rax,30h +mov rcx,r8 +shr rcx,25h +and eax,1Fh +and ecx,7FFh +imul ecx,dword ptr [rdx+rax*8+4] +mov eax,dword ptr [rdx+rax*8] +sub eax,ecx +lea ecx,[r9-3FDh] +shr ecx,1 +movsxd rdx,eax +mov eax,3FFh +sub eax,ecx +shl rdx,1Ah +mov ecx,eax +mov rax,8000000000000000h +and r8,rax +shl rcx,34h +or rcx,r8 +add rdx,rcx +movq xmm15, rdx +frsqrte_espresso_label3: +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret + +recompiler_frsqrte ENDP + +asmFrsqrteLookupTable: +DD 01a7e800h, 0568h +DD 017cb800h, 04f3h +DD 01552800h, 048dh +DD 0130c000h, 0435h +DD 010f2000h, 03e7h +DD 0eff000h, 03a2h +DD 0d2e000h, 0365h +DD 0b7c000h, 032eh +DD 09e5000h, 02fch +DD 0867000h, 02d0h +DD 06ff000h, 02a8h +DD 05ab800h, 0283h +DD 046a000h, 0261h +DD 0339800h, 0243h +DD 0218800h, 0226h +DD 0105800h, 020bh +DD 03ffa000h, 07a4h +DD 03c29000h, 0700h +DD 038aa000h, 0670h +DD 03572000h, 05f2h +DD 03279000h, 0584h +DD 02fb7000h, 0524h +DD 02d26000h, 04cch +DD 02ac0000h, 047eh +DD 02881000h, 043ah +DD 02665000h, 03fah +DD 02468000h, 03c2h +DD 02287000h, 038eh +DD 020c1000h, 035eh +DD 01f12000h, 0332h +DD 01d79000h, 030ah +DD 01bf4000h, 02e6h + + + +END \ No newline at end of file diff --git a/src/asm/x64util_nasm.asm b/src/asm/x64util_nasm.asm new file mode 100644 index 00000000..89878f6e --- /dev/null +++ b/src/asm/x64util_nasm.asm @@ -0,0 +1,237 @@ +DEFAULT REL + +SECTION .text + +global udiv128 +global recompiler_fres +global recompiler_frsqrte + +udiv128: + mov rax, rcx + div r8 + mov [r9], rdx + ret + +recompiler_fres: + ; store all modified registers +push rdx +push rcx +push rax +push r8 +lea r8,[asmFresLookupTable] +movq rdx, xmm15 +mov rcx,rdx +shr rcx,2Fh +mov rax,rdx +and ecx,1Fh +shr rax,25h +and eax,3FFh +imul eax,dword [r8+rcx*8+4] +mov r8d,dword [r8+rcx*8] +mov rcx,rdx +shr rcx,34h +inc eax +shr eax,1 +sub r8d,eax +and ecx,7FFh +jne fres_espresso_label3 +mov rax,7FF0000000000000h +or rdx,rax +movq xmm15, rdx +pop r8 +pop rax +pop rcx +pop rdx +ret +fres_espresso_label3: +cmp ecx,7FFh +jne fres_espresso_label4 +mov rax,0FFFFFFFFFFFFFh +test rax,rdx +jne fres_espresso_label1 +test rdx,rdx +jns fres_espresso_label2 +mov rax,8000000000000000h +movq xmm15, rax +pop r8 +pop rax +pop rcx +pop rdx +ret +fres_espresso_label2: +xorps xmm15,xmm15 +pop r8 +pop rax +pop rcx +pop rdx +ret +fres_espresso_label4: +mov eax,7FDh +sub eax,ecx +mov ecx,eax +mov rax,8000000000000000h +and rdx,rax +shl rcx,34h +mov eax,r8d +or rcx,rdx +shl rax,1Dh +add rcx,rax +movq xmm15, rcx +fres_espresso_label1: +pop r8 +pop rax +pop rcx +pop rdx +ret + +asmFresLookupTable: +DD 07ff800h, 03e1h +DD 0783800h, 03a7h +DD 070ea00h, 0371h +DD 06a0800h, 0340h +DD 0638800h, 0313h +DD 05d6200h, 02eah +DD 0579000h, 02c4h +DD 0520800h, 02a0h +DD 04cc800h, 027fh +DD 047ca00h, 0261h +DD 0430800h, 0245h +DD 03e8000h, 022ah +DD 03a2c00h, 0212h +DD 0360800h, 01fbh +DD 0321400h, 01e5h +DD 02e4a00h, 01d1h +DD 02aa800h, 01beh +DD 0272c00h, 01ach +DD 023d600h, 019bh +DD 0209e00h, 018bh +DD 01d8800h, 017ch +DD 01a9000h, 016eh +DD 017ae00h, 015bh +DD 014f800h, 015bh +DD 0124400h, 0143h +DD 0fbe00h, 0143h +DD 0d3800h, 012dh +DD 0ade00h, 012dh +DD 088400h, 011ah +DD 065000h, 011ah +DD 041c00h, 0108h +DD 020c00h, 0106h + +recompiler_frsqrte: + ; store all modified registers +push rdx +push rcx +push rax +push r8 +push r9 +movq r8, xmm15 +mov rax,7FFFFFFFFFFFFFFFh +test rax,r8 +jne frsqrte_espresso_label1 +mov rax,0FFF0000000000000h +and r8,rax +mov rax,7FF0000000000000h +or r8,rax +movq xmm15, r8 +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret +frsqrte_espresso_label1: +mov r9,r8 +shr r9,34h +and r9d,7FFh +cmp r9d,7FFh +jne frsqrte_espresso_label2 +mov rax,0FFFFFFFFFFFFFh +test rax,r8 +jne frsqrte_espresso_label3 +test r8,r8 +js frsqrte_espresso_label4 +xorps xmm15,xmm15 +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret +frsqrte_espresso_label2: +test r8,r8 +jns frsqrte_espresso_label5 +frsqrte_espresso_label4: +mov rax,7FF8000000000000h +movq xmm15, rax +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret +frsqrte_espresso_label5: +lea rdx,[asmFrsqrteLookupTable] +mov rax,r8 +shr rax,30h +mov rcx,r8 +shr rcx,25h +and eax,1Fh +and ecx,7FFh +imul ecx,dword [rdx+rax*8+4] +mov eax,dword [rdx+rax*8] +sub eax,ecx +lea ecx,[r9-3FDh] +shr ecx,1 +movsxd rdx,eax +mov eax,3FFh +sub eax,ecx +shl rdx,1Ah +mov ecx,eax +mov rax,8000000000000000h +and r8,rax +shl rcx,34h +or rcx,r8 +add rdx,rcx +movq xmm15, rdx +frsqrte_espresso_label3: +pop r9 +pop r8 +pop rax +pop rcx +pop rdx +ret + +asmFrsqrteLookupTable: +DD 01a7e800h, 0568h +DD 017cb800h, 04f3h +DD 01552800h, 048dh +DD 0130c000h, 0435h +DD 010f2000h, 03e7h +DD 0eff000h, 03a2h +DD 0d2e000h, 0365h +DD 0b7c000h, 032eh +DD 09e5000h, 02fch +DD 0867000h, 02d0h +DD 06ff000h, 02a8h +DD 05ab800h, 0283h +DD 046a000h, 0261h +DD 0339800h, 0243h +DD 0218800h, 0226h +DD 0105800h, 020bh +DD 03ffa000h, 07a4h +DD 03c29000h, 0700h +DD 038aa000h, 0670h +DD 03572000h, 05f2h +DD 03279000h, 0584h +DD 02fb7000h, 0524h +DD 02d26000h, 04cch +DD 02ac0000h, 047eh +DD 02881000h, 043ah +DD 02665000h, 03fah +DD 02468000h, 03c2h +DD 02287000h, 038eh +DD 020c1000h, 035eh +DD 01f12000h, 0332h +DD 01d79000h, 030ah +DD 01bf4000h, 02e6h diff --git a/src/audio/CubebAPI.cpp b/src/audio/CubebAPI.cpp index f6d5d516..f98fa601 100644 --- a/src/audio/CubebAPI.cpp +++ b/src/audio/CubebAPI.cpp @@ -183,17 +183,17 @@ void CubebAPI::Destroy() std::vector CubebAPI::GetDevices() { + cubeb_device_collection devices; + if (cubeb_enumerate_devices(s_context, CUBEB_DEVICE_TYPE_OUTPUT, &devices) != CUBEB_OK) + return {}; + std::vector result; + result.reserve(devices.count + 1); // Reserve space for the default device + // Add the default device to the list auto defaultDevice = std::make_shared(nullptr, "default", L"Default Device"); result.emplace_back(defaultDevice); - cubeb_device_collection devices; - if (cubeb_enumerate_devices(s_context, CUBEB_DEVICE_TYPE_OUTPUT, &devices) != CUBEB_OK) - return result; - - result.reserve(devices.count + 1); // The default device already occupies one element - for (size_t i = 0; i < devices.count; ++i) { // const auto& device = devices.device[i]; diff --git a/src/audio/CubebInputAPI.cpp b/src/audio/CubebInputAPI.cpp index a9faa9c8..c0fa73f4 100644 --- a/src/audio/CubebInputAPI.cpp +++ b/src/audio/CubebInputAPI.cpp @@ -175,17 +175,17 @@ void CubebInputAPI::Destroy() std::vector CubebInputAPI::GetDevices() { + cubeb_device_collection devices; + if (cubeb_enumerate_devices(s_context, CUBEB_DEVICE_TYPE_INPUT, &devices) != CUBEB_OK) + return {}; + std::vector result; + result.reserve(devices.count + 1); // Reserve space for the default device + // Add the default device to the list auto defaultDevice = std::make_shared(nullptr, "default", L"Default Device"); result.emplace_back(defaultDevice); - cubeb_device_collection devices; - if (cubeb_enumerate_devices(s_context, CUBEB_DEVICE_TYPE_INPUT, &devices) != CUBEB_OK) - return result; - - result.reserve(devices.count + 1); // The default device already occupies one element - for (size_t i = 0; i < devices.count; ++i) { // const auto& device = devices.device[i]; diff --git a/src/audio/DirectSoundAPI.cpp b/src/audio/DirectSoundAPI.cpp index 64042515..eabd3a7e 100644 --- a/src/audio/DirectSoundAPI.cpp +++ b/src/audio/DirectSoundAPI.cpp @@ -1,8 +1,9 @@ #include "DirectSoundAPI.h" +#include "gui/wxgui.h" + #include "util/helpers/helpers.h" #include "gui/guiWrapper.h" -#include #pragma comment(lib, "Dsound.lib") @@ -14,9 +15,12 @@ std::wstring DirectSoundAPI::DirectSoundDeviceDescription::GetIdentifier() const DirectSoundAPI::DirectSoundAPI(GUID* guid, sint32 samplerate, sint32 channels, sint32 samples_per_block, sint32 bits_per_sample) : IAudioAPI(samplerate, channels, samples_per_block, bits_per_sample) { - if (DirectSoundCreate8(guid, &m_direct_sound, nullptr) != DS_OK) + LPDIRECTSOUND8 direct_sound; + if (DirectSoundCreate8(guid, &direct_sound, nullptr) != DS_OK) throw std::runtime_error("can't create directsound device"); + m_direct_sound = decltype(m_direct_sound)(direct_sound); + if (FAILED(m_direct_sound->SetCooperativeLevel(gui_getWindowInfo().window_main.hwnd, DSSCL_PRIORITY))) throw std::runtime_error("can't set directsound priority"); @@ -26,7 +30,7 @@ DirectSoundAPI::DirectSoundAPI(GUID* guid, sint32 samplerate, sint32 channels, s bd.dwBufferBytes = kBufferCount * m_bytesPerBlock; // kBlockCount * (samples_per_block * channels * (bits_per_sample / 8)); bd.lpwfxFormat = (LPWAVEFORMATEX)&m_wfx; - Microsoft::WRL::ComPtr sound_buffer; + LPDIRECTSOUNDBUFFER sound_buffer; if (FAILED(m_direct_sound->CreateSoundBuffer(&bd, &sound_buffer, nullptr))) throw std::runtime_error("can't create directsound soundbuffer"); @@ -37,17 +41,27 @@ DirectSoundAPI::DirectSoundAPI(GUID* guid, sint32 samplerate, sint32 channels, s m_sound_buffer_size = caps.dwBufferBytes; - Microsoft::WRL::ComPtr notify8; + LPDIRECTSOUNDBUFFER8 sound_buffer8; + LPDIRECTSOUNDNOTIFY8 notify8; + sound_buffer->QueryInterface(IID_IDirectSoundBuffer8, (void**)&sound_buffer8); - if (FAILED(sound_buffer->QueryInterface(IID_IDirectSoundBuffer8, &m_sound_buffer))) + if (!sound_buffer8) { + sound_buffer->Release(); throw std::runtime_error("can't get directsound buffer interface"); } - if (FAILED(sound_buffer->QueryInterface(IID_IDirectSoundNotify8, &m_notify))) + m_sound_buffer = decltype(m_sound_buffer)(sound_buffer8); + + sound_buffer->QueryInterface(IID_IDirectSoundNotify8, (void**)¬ify8); + if (!notify8) { + sound_buffer->Release(); throw std::runtime_error("can't get directsound notify interface"); } + m_notify = decltype(m_notify)(notify8); + + sound_buffer->Release(); { // initialize sound buffer void *ptr1, *ptr2; @@ -141,6 +155,10 @@ DirectSoundAPI::~DirectSoundAPI() if(m_thread.joinable()) m_thread.join(); + m_notify.reset(); + m_sound_buffer.reset(); + m_direct_sound.reset(); + for(auto entry : m_notify_event) { if (entry) @@ -168,7 +186,7 @@ bool DirectSoundAPI::Stop() bool DirectSoundAPI::FeedBlock(sint16* data) { - std::lock_guard lock(m_mutex); + std::unique_lock lock(m_mutex); if (m_buffer.size() > kBlockCount) { cemuLog_logDebug(LogType::Force, "dropped direct sound block since too many buffers are queued"); diff --git a/src/audio/DirectSoundAPI.h b/src/audio/DirectSoundAPI.h index 52817fbe..c5ad0d6f 100644 --- a/src/audio/DirectSoundAPI.h +++ b/src/audio/DirectSoundAPI.h @@ -2,8 +2,8 @@ #define DIRECTSOUND_VERSION 0x0800 #include +//#include #include -#include #include "IAudioAPI.h" @@ -41,10 +41,15 @@ public: static std::vector GetInputDevices(); private: - Microsoft::WRL::ComPtr m_direct_sound; - //Microsoft::WRL::ComPtr m_direct_sound_capture; - Microsoft::WRL::ComPtr m_sound_buffer; - Microsoft::WRL::ComPtr m_notify; + struct DirectSoundDeleter + { + void operator()(IUnknown* ptr) const { if (ptr) ptr->Release(); } + }; + + std::unique_ptr m_direct_sound; + //std::unique_ptr m_direct_sound_capture; + std::unique_ptr m_sound_buffer; + std::unique_ptr m_notify; DWORD m_sound_buffer_size = 0; uint32_t m_offset = 0; diff --git a/src/audio/IAudioAPI.cpp b/src/audio/IAudioAPI.cpp index dc266eed..587526ab 100644 --- a/src/audio/IAudioAPI.cpp +++ b/src/audio/IAudioAPI.cpp @@ -13,14 +13,13 @@ std::shared_mutex g_audioMutex; AudioAPIPtr g_tvAudio; AudioAPIPtr g_padAudio; -AudioAPIPtr g_portalAudio; std::atomic_int32_t g_padVolume = 0; uint32 IAudioAPI::s_audioDelay = 2; std::array IAudioAPI::s_availableApis{}; IAudioAPI::IAudioAPI(uint32 samplerate, uint32 channels, uint32 samples_per_block, uint32 bits_per_sample) - : m_samplerate(samplerate), m_channels(channels), m_samplesPerBlock(samples_per_block), m_bitsPerSample(bits_per_sample) + : m_samplerate(samplerate), m_channels(channels), m_samplesPerBlock(samples_per_block), m_bitsPerSample(bits_per_sample) { m_bytesPerBlock = samples_per_block * channels * (bits_per_sample / 8); InitWFX(m_samplerate, m_channels, m_bitsPerSample); @@ -81,7 +80,7 @@ void IAudioAPI::InitializeStatic() #if BOOST_OS_WINDOWS s_availableApis[DirectSound] = true; s_availableApis[XAudio2] = XAudio2API::InitializeStatic(); - if (!s_availableApis[XAudio2]) // don't try to initialize the older lib if the newer version is available + if(!s_availableApis[XAudio2]) // don't try to initialize the older lib if the newer version is available s_availableApis[XAudio27] = XAudio27API::InitializeStatic(); #endif #if HAS_CUBEB @@ -98,29 +97,30 @@ bool IAudioAPI::IsAudioAPIAvailable(AudioAPI api) return false; } -AudioAPIPtr IAudioAPI::CreateDeviceFromConfig(AudioType type, sint32 rate, sint32 samples_per_block, sint32 bits_per_sample) +AudioAPIPtr IAudioAPI::CreateDeviceFromConfig(bool TV, sint32 rate, sint32 samples_per_block, sint32 bits_per_sample) { - sint32 channels = CemuConfig::AudioChannelsToNChannels(AudioTypeToChannels(type)); - return CreateDeviceFromConfig(type, rate, channels, samples_per_block, bits_per_sample); + auto& config = GetConfig(); + sint32 channels = CemuConfig::AudioChannelsToNChannels(TV ? config.tv_channels : config.pad_channels); + return CreateDeviceFromConfig(TV, rate, channels, samples_per_block, bits_per_sample); } -AudioAPIPtr IAudioAPI::CreateDeviceFromConfig(AudioType type, sint32 rate, sint32 channels, sint32 samples_per_block, sint32 bits_per_sample) +AudioAPIPtr IAudioAPI::CreateDeviceFromConfig(bool TV, sint32 rate, sint32 channels, sint32 samples_per_block, sint32 bits_per_sample) { AudioAPIPtr audioAPIDev; auto& config = GetConfig(); const auto audio_api = (IAudioAPI::AudioAPI)config.audio_api; - auto selectedDevice = GetDeviceFromType(type); + auto& selectedDevice = TV ? config.tv_device : config.pad_device; - if (selectedDevice.empty()) + if(selectedDevice.empty()) return {}; IAudioAPI::DeviceDescriptionPtr device_description; if (IAudioAPI::IsAudioAPIAvailable(audio_api)) { auto devices = IAudioAPI::GetDevices(audio_api); - const auto it = std::find_if(devices.begin(), devices.end(), [&selectedDevice](const auto& d) { return d->GetIdentifier() == selectedDevice; }); + const auto it = std::find_if(devices.begin(), devices.end(), [&selectedDevice](const auto& d) {return d->GetIdentifier() == selectedDevice; }); if (it != devices.end()) device_description = *it; } @@ -128,8 +128,7 @@ AudioAPIPtr IAudioAPI::CreateDeviceFromConfig(AudioType type, sint32 rate, sint3 throw std::runtime_error("failed to find selected device while trying to create audio device"); audioAPIDev = CreateDevice(audio_api, device_description, rate, channels, samples_per_block, bits_per_sample); - audioAPIDev->SetVolume(GetVolumeFromType(type)); - + audioAPIDev->SetVolume(TV ? config.tv_volume : config.pad_volume); return audioAPIDev; } @@ -138,7 +137,7 @@ AudioAPIPtr IAudioAPI::CreateDevice(AudioAPI api, const DeviceDescriptionPtr& de if (!IsAudioAPIAvailable(api)) return {}; - switch (api) + switch(api) { #if BOOST_OS_WINDOWS case DirectSound: @@ -158,11 +157,11 @@ AudioAPIPtr IAudioAPI::CreateDevice(AudioAPI api, const DeviceDescriptionPtr& de } #endif #if HAS_CUBEB - case Cubeb: - { - const auto tmp = std::dynamic_pointer_cast(device); - return std::make_unique(tmp->GetDeviceId(), samplerate, channels, samples_per_block, bits_per_sample); - } + case Cubeb: + { + const auto tmp = std::dynamic_pointer_cast(device); + return std::make_unique(tmp->GetDeviceId(), samplerate, channels, samples_per_block, bits_per_sample); + } #endif default: throw std::runtime_error(fmt::format("invalid audio api: {}", api)); @@ -173,8 +172,8 @@ std::vector IAudioAPI::GetDevices(AudioAPI api) { if (!IsAudioAPIAvailable(api)) return {}; - - switch (api) + + switch(api) { #if BOOST_OS_WINDOWS case DirectSound: @@ -210,51 +209,3 @@ uint32 IAudioAPI::GetAudioDelay() const { return m_audioDelayOverride > 0 ? m_audioDelayOverride : s_audioDelay; } - -AudioChannels IAudioAPI::AudioTypeToChannels(AudioType type) -{ - auto& config = GetConfig(); - switch (type) - { - case TV: - return config.tv_channels; - case Gamepad: - return config.pad_channels; - case Portal: - return kMono; - default: - return kMono; - } -} - -std::wstring IAudioAPI::GetDeviceFromType(AudioType type) -{ - auto& config = GetConfig(); - switch (type) - { - case TV: - return config.tv_device; - case Gamepad: - return config.pad_device; - case Portal: - return config.portal_device; - default: - return L""; - } -} - -sint32 IAudioAPI::GetVolumeFromType(AudioType type) -{ - auto& config = GetConfig(); - switch (type) - { - case TV: - return config.tv_volume; - case Gamepad: - return config.pad_volume; - case Portal: - return config.portal_volume; - default: - return 0; - } -} diff --git a/src/audio/IAudioAPI.h b/src/audio/IAudioAPI.h index 34df421a..8fb510db 100644 --- a/src/audio/IAudioAPI.h +++ b/src/audio/IAudioAPI.h @@ -4,8 +4,6 @@ #include #endif -#include "config/CemuConfig.h" - class IAudioAPI { friend class GeneralSettings2; @@ -32,13 +30,6 @@ public: using DeviceDescriptionPtr = std::shared_ptr; - enum AudioType - { - TV = 0, - Gamepad, - Portal - }; - enum AudioAPI { DirectSound = 0, @@ -71,8 +62,8 @@ public: static void InitializeStatic(); static bool IsAudioAPIAvailable(AudioAPI api); - static std::unique_ptr CreateDeviceFromConfig(AudioType type, sint32 rate, sint32 samples_per_block, sint32 bits_per_sample); - static std::unique_ptr CreateDeviceFromConfig(AudioType type, sint32 rate, sint32 channels, sint32 samples_per_block, sint32 bits_per_sample); + static std::unique_ptr CreateDeviceFromConfig(bool TV, sint32 rate, sint32 samples_per_block, sint32 bits_per_sample); + static std::unique_ptr CreateDeviceFromConfig(bool TV, sint32 rate, sint32 channels, sint32 samples_per_block, sint32 bits_per_sample); static std::unique_ptr CreateDevice(AudioAPI api, const DeviceDescriptionPtr& device, sint32 samplerate, sint32 channels, sint32 samples_per_block, sint32 bits_per_sample); static std::vector GetDevices(AudioAPI api); @@ -93,9 +84,6 @@ protected: private: static uint32 s_audioDelay; void InitWFX(sint32 samplerate, sint32 channels, sint32 bits_per_sample); - static AudioChannels AudioTypeToChannels(AudioType type); - static std::wstring GetDeviceFromType(AudioType type); - static sint32 GetVolumeFromType(AudioType type); }; @@ -105,5 +93,3 @@ extern AudioAPIPtr g_tvAudio; extern AudioAPIPtr g_padAudio; extern std::atomic_int32_t g_padVolume; - -extern AudioAPIPtr g_portalAudio; diff --git a/src/audio/XAudio2API.cpp b/src/audio/XAudio2API.cpp index 6b25baed..c92fd451 100644 --- a/src/audio/XAudio2API.cpp +++ b/src/audio/XAudio2API.cpp @@ -2,7 +2,6 @@ //#if (_WIN32_WINNT >= 0x0602 /*_WIN32_WINNT_WIN8*/) -#include #include #ifndef XAUDIO2_DLL @@ -34,15 +33,17 @@ XAudio2API::XAudio2API(std::wstring device_id, uint32 samplerate, uint32 channel throw std::runtime_error("can't find XAudio2Create import"); HRESULT hres; - if (FAILED((hres = _XAudio2Create(&m_xaudio, 0, XAUDIO2_DEFAULT_PROCESSOR)))) + IXAudio2* xaudio; + if (FAILED((hres = _XAudio2Create(&xaudio, 0, XAUDIO2_DEFAULT_PROCESSOR)))) throw std::runtime_error(fmt::format("can't create xaudio device (hres: {:#x})", hres)); + m_xaudio = decltype(m_xaudio)(xaudio); IXAudio2MasteringVoice* mastering_voice; if (FAILED((hres = m_xaudio->CreateMasteringVoice(&mastering_voice, channels, samplerate, 0, m_device_id.empty() ? nullptr : m_device_id.c_str())))) throw std::runtime_error(fmt::format("can't create xaudio mastering voice (hres: {:#x})", hres)); - m_mastering_voice.reset(mastering_voice); + m_mastering_voice = decltype(m_mastering_voice)(mastering_voice); m_wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; m_wfx.Format.nChannels = channels; @@ -87,6 +88,12 @@ XAudio2API::XAudio2API(std::wstring device_id, uint32 samplerate, uint32 channel m_xaudio->StartEngine(); } +void XAudio2API::XAudioDeleter::operator()(IXAudio2* ptr) const +{ + if (ptr) + ptr->Release(); +} + void XAudio2API::VoiceDeleter::operator()(IXAudio2Voice* ptr) const { if (ptr) @@ -99,6 +106,10 @@ XAudio2API::~XAudio2API() m_xaudio->StopEngine(); XAudio2API::Stop(); + + m_source_voice.reset(); + m_mastering_voice.reset(); + m_xaudio.reset(); } void XAudio2API::SetVolume(sint32 volume) @@ -168,10 +179,10 @@ const std::vector& XAudio2API::RefreshDevices( try { - Microsoft::WRL::ComPtr wbem_locator; + struct IWbemLocator *wbem_locator = nullptr; - HRESULT hres = CoCreateInstance(__uuidof(WbemLocator), nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&wbem_locator)); - if (FAILED(hres)) + HRESULT hres = CoCreateInstance(__uuidof(WbemLocator), nullptr, CLSCTX_INPROC_SERVER, __uuidof(IWbemLocator), (LPVOID*)&wbem_locator); + if (FAILED(hres) || !wbem_locator) throw std::system_error(hres, std::system_category()); std::shared_ptr path(SysAllocString(LR"(\\.\root\cimv2)"), SysFreeString); @@ -180,19 +191,20 @@ const std::vector& XAudio2API::RefreshDevices( std::shared_ptr name_row(SysAllocString(L"Name"), SysFreeString); std::shared_ptr device_id_row(SysAllocString(L"DeviceID"), SysFreeString); - Microsoft::WRL::ComPtr wbem_services; + IWbemServices *wbem_services = nullptr; hres = wbem_locator->ConnectServer(path.get(), nullptr, nullptr, nullptr, 0, nullptr, nullptr, &wbem_services); + wbem_locator->Release(); // Free memory resources. + if (FAILED(hres) || !wbem_services) + throw std::system_error(hres, std::system_category()); + + hres = CoSetProxyBlanket(wbem_services, RPC_C_AUTHN_WINNT, RPC_C_AUTHZ_NONE, nullptr, RPC_C_AUTHN_LEVEL_CALL, RPC_C_IMP_LEVEL_IMPERSONATE, nullptr, EOAC_NONE); if (FAILED(hres)) throw std::system_error(hres, std::system_category()); - hres = CoSetProxyBlanket(wbem_services.Get(), RPC_C_AUTHN_WINNT, RPC_C_AUTHZ_NONE, nullptr, RPC_C_AUTHN_LEVEL_CALL, RPC_C_IMP_LEVEL_IMPERSONATE, nullptr, EOAC_NONE); - if (FAILED(hres)) - throw std::system_error(hres, std::system_category()); - - Microsoft::WRL::ComPtr wbem_enum; + IEnumWbemClassObject* wbem_enum = nullptr; hres = wbem_services->ExecQuery(language.get(), query.get(), WBEM_FLAG_RETURN_WBEM_COMPLETE | WBEM_FLAG_FORWARD_ONLY, nullptr, &wbem_enum); - if (FAILED(hres)) + if (FAILED(hres) || !wbem_enum) throw std::system_error(hres, std::system_category()); ULONG returned; @@ -238,6 +250,11 @@ const std::vector& XAudio2API::RefreshDevices( auto default_device = std::make_shared(L"Primary Sound Driver", L""); s_devices.insert(s_devices.begin(), default_device); } + + wbem_enum->Release(); + + // Clean up + wbem_services->Release(); } catch (const std::system_error& ex) { diff --git a/src/audio/XAudio2API.h b/src/audio/XAudio2API.h index 5bb01b10..b5bb0296 100644 --- a/src/audio/XAudio2API.h +++ b/src/audio/XAudio2API.h @@ -4,7 +4,6 @@ #include #include #include -#include #include "IAudioAPI.h" @@ -51,6 +50,11 @@ private: static const std::vector& RefreshDevices(); + struct XAudioDeleter + { + void operator()(IXAudio2* ptr) const; + }; + struct VoiceDeleter { void operator()(IXAudio2Voice* ptr) const; @@ -59,7 +63,7 @@ private: static HMODULE s_xaudio_dll; static std::vector s_devices; - Microsoft::WRL::ComPtr m_xaudio; + std::unique_ptr m_xaudio; std::wstring m_device_id; std::unique_ptr m_mastering_voice; std::unique_ptr m_source_voice; diff --git a/src/config/ActiveSettings.cpp b/src/config/ActiveSettings.cpp index e552d164..f81f8336 100644 --- a/src/config/ActiveSettings.cpp +++ b/src/config/ActiveSettings.cpp @@ -165,11 +165,6 @@ bool ActiveSettings::DumpTexturesEnabled() return s_dump_textures; } -bool ActiveSettings::DumpRecompilerFunctionsEnabled() -{ - return s_dump_recompiler_functions; -} - bool ActiveSettings::DumpLibcurlRequestsEnabled() { return s_dump_libcurl_requests; @@ -185,11 +180,6 @@ void ActiveSettings::EnableDumpTextures(bool state) s_dump_textures = state; } -void ActiveSettings::EnableDumpRecompilerFunctions(bool state) -{ - s_dump_recompiler_functions = state; -} - void ActiveSettings::EnableDumpLibcurlRequests(bool state) { s_dump_libcurl_requests = state; diff --git a/src/config/ActiveSettings.h b/src/config/ActiveSettings.h index 0d7ecfec..e672fbee 100644 --- a/src/config/ActiveSettings.h +++ b/src/config/ActiveSettings.h @@ -109,11 +109,9 @@ public: // dump options [[nodiscard]] static bool DumpShadersEnabled(); [[nodiscard]] static bool DumpTexturesEnabled(); - [[nodiscard]] static bool DumpRecompilerFunctionsEnabled(); [[nodiscard]] static bool DumpLibcurlRequestsEnabled(); static void EnableDumpShaders(bool state); static void EnableDumpTextures(bool state); - static void EnableDumpRecompilerFunctions(bool state); static void EnableDumpLibcurlRequests(bool state); // hacks @@ -127,7 +125,6 @@ private: // dump options inline static bool s_dump_shaders = false; inline static bool s_dump_textures = false; - inline static bool s_dump_recompiler_functions = false; inline static bool s_dump_libcurl_requests = false; // timer speed diff --git a/src/config/CemuConfig.cpp b/src/config/CemuConfig.cpp index 809a5470..6bb7ac34 100644 --- a/src/config/CemuConfig.cpp +++ b/src/config/CemuConfig.cpp @@ -278,7 +278,6 @@ void CemuConfig::Load(XMLConfigParser& parser) tv_volume = audio.get("TVVolume", 20); pad_volume = audio.get("PadVolume", 0); input_volume = audio.get("InputVolume", 20); - portal_volume = audio.get("PortalVolume", 20); const auto tv = audio.get("TVDevice", ""); try @@ -310,16 +309,6 @@ void CemuConfig::Load(XMLConfigParser& parser) cemuLog_log(LogType::Force, "config load error: can't load input device: {}", input_device_name); } - const auto portal_device_name = audio.get("PortalDevice", ""); - try - { - portal_device = boost::nowide::widen(portal_device_name); - } - catch (const std::exception&) - { - cemuLog_log(LogType::Force, "config load error: can't load input device: {}", portal_device_name); - } - // account auto acc = parser.get("Account"); account.m_persistent_id = acc.get("PersistentId", account.m_persistent_id); @@ -522,11 +511,9 @@ void CemuConfig::Save(XMLConfigParser& parser) audio.set("TVVolume", tv_volume); audio.set("PadVolume", pad_volume); audio.set("InputVolume", input_volume); - audio.set("PortalVolume", portal_volume); audio.set("TVDevice", boost::nowide::narrow(tv_device).c_str()); audio.set("PadDevice", boost::nowide::narrow(pad_device).c_str()); audio.set("InputDevice", boost::nowide::narrow(input_device).c_str()); - audio.set("PortalDevice", boost::nowide::narrow(portal_device).c_str()); // account auto acc = config.set("Account"); diff --git a/src/config/CemuConfig.h b/src/config/CemuConfig.h index ca896eed..62665f6d 100644 --- a/src/config/CemuConfig.h +++ b/src/config/CemuConfig.h @@ -192,7 +192,7 @@ ENABLE_ENUM_ITERATORS(CrashDump, CrashDump::Disabled, CrashDump::Enabled); #endif template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const PrecompiledShaderOption c, FormatContext &ctx) const { string_view name; @@ -207,7 +207,7 @@ struct fmt::formatter : formatter { } }; template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const AccurateShaderMulOption c, FormatContext &ctx) const { string_view name; @@ -221,7 +221,7 @@ struct fmt::formatter : formatter { } }; template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const CPUMode c, FormatContext &ctx) const { string_view name; @@ -238,7 +238,7 @@ struct fmt::formatter : formatter { } }; template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const CPUModeLegacy c, FormatContext &ctx) const { string_view name; @@ -255,7 +255,7 @@ struct fmt::formatter : formatter { } }; template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const CafeConsoleRegion v, FormatContext &ctx) const { string_view name; @@ -276,7 +276,7 @@ struct fmt::formatter : formatter { } }; template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const CafeConsoleLanguage v, FormatContext &ctx) { string_view name; @@ -302,7 +302,7 @@ struct fmt::formatter : formatter { #if BOOST_OS_WINDOWS template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const CrashDump v, FormatContext &ctx) { string_view name; @@ -319,7 +319,7 @@ struct fmt::formatter : formatter { }; #elif BOOST_OS_UNIX template <> -struct fmt::formatter : formatter { +struct fmt::formatter : formatter { template auto format(const CrashDump v, FormatContext &ctx) { string_view name; @@ -480,8 +480,8 @@ struct CemuConfig sint32 audio_api = 0; sint32 audio_delay = 2; AudioChannels tv_channels = kStereo, pad_channels = kStereo, input_channels = kMono; - sint32 tv_volume = 50, pad_volume = 0, input_volume = 50, portal_volume = 50; - std::wstring tv_device{ L"default" }, pad_device, input_device, portal_device; + sint32 tv_volume = 50, pad_volume = 0, input_volume = 50; + std::wstring tv_device{ L"default" }, pad_device, input_device; // account struct diff --git a/src/config/LaunchSettings.cpp b/src/config/LaunchSettings.cpp index fde20539..32a069c6 100644 --- a/src/config/LaunchSettings.cpp +++ b/src/config/LaunchSettings.cpp @@ -13,7 +13,6 @@ #include "util/crypto/aes128.h" #include "Cafe/Filesystem/FST/FST.h" -#include "util/helpers/StringHelpers.h" void requireConsole(); @@ -59,9 +58,6 @@ bool LaunchSettings::HandleCommandline(const std::vector& args) desc.add_options() ("help,h", "This help screen") ("version,v", "Displays the version of Cemu") -#if !BOOST_OS_WINDOWS - ("verbose", "Log to stdout") -#endif ("game,g", po::wvalue(), "Path of game to launch") ("title-id,t", po::value(), "Title ID of the title to be launched (overridden by --game)") @@ -72,16 +68,13 @@ bool LaunchSettings::HandleCommandline(const std::vector& args) ("account,a", po::value(), "Persistent id of account") - ("force-interpreter", po::value()->implicit_value(true), "Force interpreter CPU emulation, disables recompiler. Useful for debugging purposes where you want to get accurate memory accesses and stack traces.") - ("force-multicore-interpreter", po::value()->implicit_value(true), "Force multi-core interpreter CPU emulation, disables recompiler. Only useful for getting stack traces, but slightly faster than the single-core interpreter mode.") + ("force-interpreter", po::value()->implicit_value(true), "Force interpreter CPU emulation, disables recompiler") ("enable-gdbstub", po::value()->implicit_value(true), "Enable GDB stub to debug executables inside Cemu using an external debugger"); po::options_description hidden{ "Hidden options" }; hidden.add_options() ("nsight", po::value()->implicit_value(true), "NSight debugging options") - ("legacy", po::value()->implicit_value(true), "Intel legacy graphic mode") - ("ppcrec-lower-addr", po::value(), "For debugging: Lower address allowed for PPC recompilation") - ("ppcrec-upper-addr", po::value(), "For debugging: Upper address allowed for PPC recompilation"); + ("legacy", po::value()->implicit_value(true), "Intel legacy graphic mode"); po::options_description extractor{ "Extractor tool" }; extractor.add_options() @@ -128,9 +121,6 @@ bool LaunchSettings::HandleCommandline(const std::vector& args) return false; // exit in main } - if (vm.count("verbose")) - s_verbose = true; - if (vm.count("game")) { std::wstring tmp = vm["game"].as(); @@ -183,9 +173,6 @@ bool LaunchSettings::HandleCommandline(const std::vector& args) if(vm.count("force-interpreter")) s_force_interpreter = vm["force-interpreter"].as(); - - if(vm.count("force-multicore-interpreter")) - s_force_multicore_interpreter = vm["force-multicore-interpreter"].as(); if (vm.count("enable-gdbstub")) s_enable_gdbstub = vm["enable-gdbstub"].as(); @@ -199,20 +186,6 @@ bool LaunchSettings::HandleCommandline(const std::vector& args) if (vm.count("output")) log_path = vm["output"].as(); - // recompiler range limit for debugging - if (vm.count("ppcrec-lower-addr")) - { - uint32 addr = (uint32)StringHelpers::ToInt64(vm["ppcrec-lower-addr"].as()); - ppcRec_limitLowerAddr = addr; - } - if (vm.count("ppcrec-upper-addr")) - { - uint32 addr = (uint32)StringHelpers::ToInt64(vm["ppcrec-upper-addr"].as()); - ppcRec_limitUpperAddr = addr; - } - if(ppcRec_limitLowerAddr != 0 && ppcRec_limitUpperAddr != 0) - cemuLog_log(LogType::Force, "PPCRec range limited to 0x{:08x}-0x{:08x}", ppcRec_limitLowerAddr, ppcRec_limitUpperAddr); - if(!extract_path.empty()) { ExtractorTool(extract_path, output_path, log_path); diff --git a/src/config/LaunchSettings.h b/src/config/LaunchSettings.h index 13665cb7..b0f673a1 100644 --- a/src/config/LaunchSettings.h +++ b/src/config/LaunchSettings.h @@ -22,19 +22,13 @@ public: static std::optional RenderUpsideDownEnabled() { return s_render_upside_down; } static std::optional FullscreenEnabled() { return s_fullscreen; } - static bool Verbose() { return s_verbose; } - static bool GDBStubEnabled() { return s_enable_gdbstub; } static bool NSightModeEnabled() { return s_nsight_mode; } static bool ForceInterpreter() { return s_force_interpreter; }; - static bool ForceMultiCoreInterpreter() { return s_force_multicore_interpreter; } static std::optional GetPersistentId() { return s_persistent_id; } - static uint32 GetPPCRecLowerAddr() { return ppcRec_limitLowerAddr; }; - static uint32 GetPPCRecUpperAddr() { return ppcRec_limitUpperAddr; }; - private: inline static std::optional s_load_game_file{}; inline static std::optional s_load_title_id{}; @@ -42,21 +36,14 @@ private: inline static std::optional s_render_upside_down{}; inline static std::optional s_fullscreen{}; - - inline static bool s_verbose = false; inline static bool s_enable_gdbstub = false; inline static bool s_nsight_mode = false; inline static bool s_force_interpreter = false; - inline static bool s_force_multicore_interpreter = false; inline static std::optional s_persistent_id{}; - // for recompiler debugging - inline static uint32 ppcRec_limitLowerAddr{}; - inline static uint32 ppcRec_limitUpperAddr{}; - static bool ExtractorTool(std::wstring_view wud_path, std::string_view output_path, std::wstring_view log_path); }; diff --git a/src/gui/DownloadGraphicPacksWindow.cpp b/src/gui/DownloadGraphicPacksWindow.cpp index f2a90959..9ea9e1dd 100644 --- a/src/gui/DownloadGraphicPacksWindow.cpp +++ b/src/gui/DownloadGraphicPacksWindow.cpp @@ -182,14 +182,14 @@ void DownloadGraphicPacksWindow::UpdateThread() if (checkGraphicPackDownloadedVersion(assetName, hasVersionFile)) { // already up to date - wxMessageBox(_("No updates available."), _("Graphic packs"), wxOK | wxCENTRE, this); + wxMessageBox(_("No updates available."), _("Graphic packs"), wxOK | wxCENTRE, this->GetParent()); m_threadState = ThreadFinished; return; } if (hasVersionFile) { // if a version file already exists (and graphic packs are installed) ask the user if he really wants to update - if (wxMessageBox(_("Updated graphic packs are available. Do you want to download and install them?"), _("Graphic packs"), wxYES_NO, this) != wxYES) + if (wxMessageBox(_("Updated graphic packs are available. Do you want to download and install them?"), _("Graphic packs"), wxYES_NO, this->GetParent()) != wxYES) { // cancel update m_threadState = ThreadFinished; @@ -336,7 +336,7 @@ int DownloadGraphicPacksWindow::ShowModal() { if(CafeSystem::IsTitleRunning()) { - wxMessageBox(_("Graphic packs cannot be updated while a game is running."), _("Graphic packs"), 5, this); + wxMessageBox(_("Graphic packs cannot be updated while a game is running."), _("Graphic packs"), 5, this->GetParent()); return wxID_CANCEL; } m_thread = std::thread(&DownloadGraphicPacksWindow::UpdateThread, this); diff --git a/src/gui/EmulatedUSBDevices/EmulatedUSBDeviceFrame.cpp b/src/gui/EmulatedUSBDevices/EmulatedUSBDeviceFrame.cpp index 53e3b995..c77ae081 100644 --- a/src/gui/EmulatedUSBDevices/EmulatedUSBDeviceFrame.cpp +++ b/src/gui/EmulatedUSBDevices/EmulatedUSBDeviceFrame.cpp @@ -90,9 +90,9 @@ wxPanel* EmulatedUSBDeviceFrame::AddSkylanderPage(wxNotebook* notebook) wxPanel* EmulatedUSBDeviceFrame::AddInfinityPage(wxNotebook* notebook) { auto* panel = new wxPanel(notebook); - auto* panelSizer = new wxBoxSizer(wxVERTICAL); + auto* panelSizer = new wxBoxSizer(wxBOTH); auto* box = new wxStaticBox(panel, wxID_ANY, _("Infinity Manager")); - auto* boxSizer = new wxStaticBoxSizer(box, wxVERTICAL); + auto* boxSizer = new wxStaticBoxSizer(box, wxBOTH); auto* row = new wxBoxSizer(wxHORIZONTAL); @@ -146,15 +146,17 @@ wxPanel* EmulatedUSBDeviceFrame::AddDimensionsPage(wxNotebook* notebook) auto* top_row = new wxBoxSizer(wxHORIZONTAL); auto* bottom_row = new wxBoxSizer(wxHORIZONTAL); + auto* dummy = new wxStaticText(box, wxID_ANY, ""); + top_row->Add(AddDimensionPanel(2, 0, box), 1, wxEXPAND | wxALL, 2); - top_row->Add(0, 0, 1, wxEXPAND | wxLEFT | wxRIGHT, 2); + top_row->Add(dummy, 1, wxEXPAND | wxLEFT | wxRIGHT, 2); top_row->Add(AddDimensionPanel(1, 1, box), 1, wxEXPAND | wxALL, 2); - top_row->Add(0, 0, 1, wxEXPAND | wxLEFT | wxRIGHT, 2); + top_row->Add(dummy, 1, wxEXPAND | wxLEFT | wxRIGHT, 2); top_row->Add(AddDimensionPanel(3, 2, box), 1, wxEXPAND | wxALL, 2); bottom_row->Add(AddDimensionPanel(2, 3, box), 1, wxEXPAND | wxALL, 2); bottom_row->Add(AddDimensionPanel(2, 4, box), 1, wxEXPAND | wxALL, 2); - bottom_row->Add(0, 0, 1, wxEXPAND | wxLEFT | wxRIGHT, 0); + bottom_row->Add(dummy, 1, wxEXPAND | wxLEFT | wxRIGHT, 0); bottom_row->Add(AddDimensionPanel(3, 5, box), 1, wxEXPAND | wxALL, 2); bottom_row->Add(AddDimensionPanel(3, 6, box), 1, wxEXPAND | wxALL, 2); @@ -283,7 +285,7 @@ void EmulatedUSBDeviceFrame::LoadSkylanderPath(uint8 slot, wxString path) std::unique_ptr skyFile(FileStream::openFile2(_utf8ToPath(path.utf8_string()), true)); if (!skyFile) { - wxMessageDialog open_error(this, "Error Opening File: " + path); + wxMessageDialog open_error(this, "Error Opening File: " + path.c_str()); open_error.ShowModal(); return; } @@ -831,4 +833,4 @@ uint8 MoveDimensionFigureDialog::GetNewIndex() const std::array, 7> EmulatedUSBDeviceFrame::GetCurrentMinifigs() { return m_dimSlots; -} +} \ No newline at end of file diff --git a/src/gui/GeneralSettings2.cpp b/src/gui/GeneralSettings2.cpp index 4c23aedc..9b763229 100644 --- a/src/gui/GeneralSettings2.cpp +++ b/src/gui/GeneralSettings2.cpp @@ -542,36 +542,6 @@ wxPanel* GeneralSettings2::AddAudioPage(wxNotebook* notebook) audio_panel_sizer->Add(box_sizer, 0, wxEXPAND | wxALL, 5); } - { - auto box = new wxStaticBox(audio_panel, wxID_ANY, _("Trap Team Portal")); - auto box_sizer = new wxStaticBoxSizer(box, wxVERTICAL); - - auto portal_audio_row = new wxFlexGridSizer(0, 3, 0, 0); - portal_audio_row->SetFlexibleDirection(wxBOTH); - portal_audio_row->SetNonFlexibleGrowMode(wxFLEX_GROWMODE_SPECIFIED); - - portal_audio_row->Add(new wxStaticText(box, wxID_ANY, _("Device")), 0, wxALIGN_CENTER_VERTICAL | wxALL, 5); - m_portal_device = new wxChoice(box, wxID_ANY, wxDefaultPosition); - m_portal_device->SetMinSize(wxSize(300, -1)); - m_portal_device->SetToolTip(_("Select the active audio output device for Wii U GamePad")); - portal_audio_row->Add(m_portal_device, 0, wxEXPAND | wxALL, 5); - portal_audio_row->AddSpacer(0); - - m_portal_device->Bind(wxEVT_CHOICE, &GeneralSettings2::OnAudioDeviceSelected, this); - - portal_audio_row->Add(new wxStaticText(box, wxID_ANY, _("Volume")), 0, wxALIGN_CENTER_VERTICAL | wxALL, 5); - m_portal_volume = new wxSlider(box, wxID_ANY, 100, 0, 100); - portal_audio_row->Add(m_portal_volume, 0, wxEXPAND | wxALL, 5); - auto audio_pad_volume_text = new wxStaticText(box, wxID_ANY, "100%"); - portal_audio_row->Add(audio_pad_volume_text, 0, wxALIGN_CENTER_VERTICAL | wxALL | wxALIGN_RIGHT, 5); - - m_portal_volume->Bind(wxEVT_SLIDER, &GeneralSettings2::OnSliderChangedPercent, this, wxID_ANY, wxID_ANY, new wxControlObject(audio_pad_volume_text)); - m_portal_volume->Bind(wxEVT_SLIDER, &GeneralSettings2::OnVolumeChanged, this); - - box_sizer->Add(portal_audio_row, 1, wxEXPAND, 5); - audio_panel_sizer->Add(box_sizer, 0, wxEXPAND | wxALL, 5); - } - audio_panel->SetSizerAndFit(audio_panel_sizer); return audio_panel; } @@ -1023,7 +993,6 @@ void GeneralSettings2::StoreConfig() config.tv_volume = m_tv_volume->GetValue(); config.pad_volume = m_pad_volume->GetValue(); config.input_volume = m_input_volume->GetValue(); - config.portal_volume = m_portal_volume->GetValue(); config.tv_device.clear(); const auto tv_device = m_tv_device->GetSelection(); @@ -1052,15 +1021,6 @@ void GeneralSettings2::StoreConfig() config.input_device = device_description->GetDescription()->GetIdentifier(); } - config.portal_device.clear(); - const auto portal_device = m_portal_device->GetSelection(); - if (portal_device != wxNOT_FOUND && portal_device != 0 && m_portal_device->HasClientObjectData()) - { - const auto* device_description = (wxDeviceDescription*)m_portal_device->GetClientObject(portal_device); - if (device_description) - config.portal_device = device_description->GetDescription()->GetIdentifier(); - } - // graphics config.graphic_api = (GraphicAPI)m_graphic_api->GetSelection(); @@ -1171,16 +1131,11 @@ void GeneralSettings2::OnVolumeChanged(wxCommandEvent& event) g_padVolume = event.GetInt(); } } - else if (event.GetEventObject() == m_tv_volume) + else { if (g_tvAudio) g_tvAudio->SetVolume(event.GetInt()); } - else - { - if(g_portalAudio) - g_portalAudio->SetVolume(event.GetInt()); - } } @@ -1240,12 +1195,10 @@ void GeneralSettings2::UpdateAudioDeviceList() m_tv_device->Clear(); m_pad_device->Clear(); m_input_device->Clear(); - m_portal_device->Clear(); m_tv_device->Append(_("Disabled")); m_pad_device->Append(_("Disabled")); m_input_device->Append(_("Disabled")); - m_portal_device->Append(_("Disabled")); const auto audio_api = (IAudioAPI::AudioAPI)GetConfig().audio_api; const auto devices = IAudioAPI::GetDevices(audio_api); @@ -1253,7 +1206,6 @@ void GeneralSettings2::UpdateAudioDeviceList() { m_tv_device->Append(device->GetName(), new wxDeviceDescription(device)); m_pad_device->Append(device->GetName(), new wxDeviceDescription(device)); - m_portal_device->Append(device->GetName(), new wxDeviceDescription(device)); } const auto input_audio_api = IAudioInputAPI::Cubeb; //(IAudioAPI::AudioAPI)GetConfig().input_audio_api; @@ -1273,8 +1225,6 @@ void GeneralSettings2::UpdateAudioDeviceList() m_input_device->SetSelection(0); - m_portal_device->SetSelection(0); - // todo reset global instance of audio device } @@ -1758,22 +1708,6 @@ void GeneralSettings2::ApplyConfig() else m_input_device->SetSelection(0); - SendSliderEvent(m_portal_volume, config.portal_volume); - if (!config.portal_device.empty() && m_portal_device->HasClientObjectData()) - { - for (uint32 i = 0; i < m_portal_device->GetCount(); ++i) - { - const auto device_description = (wxDeviceDescription*)m_portal_device->GetClientObject(i); - if (device_description && config.portal_device == device_description->GetDescription()->GetIdentifier()) - { - m_portal_device->SetSelection(i); - break; - } - } - } - else - m_portal_device->SetSelection(0); - // account UpdateOnlineAccounts(); m_active_account->SetSelection(0); @@ -1932,42 +1866,6 @@ void GeneralSettings2::UpdateAudioDevice() } } } - - // skylander portal audio device - { - const auto selection = m_portal_device->GetSelection(); - if (selection == wxNOT_FOUND) - { - cemu_assert_debug(false); - return; - } - - g_portalAudio.reset(); - - if (m_portal_device->HasClientObjectData()) - { - const auto description = (wxDeviceDescription*)m_portal_device->GetClientObject(selection); - if (description) - { - sint32 channels; - if (m_game_launched && g_portalAudio) - channels = g_portalAudio->GetChannels(); - else - channels = 1; - - try - { - g_portalAudio = IAudioAPI::CreateDevice((IAudioAPI::AudioAPI)config.audio_api, description->GetDescription(), 8000, 1, 32, 16); - g_portalAudio->SetVolume(m_portal_volume->GetValue()); - } - catch (std::runtime_error& ex) - { - cemuLog_log(LogType::Force, "can't initialize portal audio: {}", ex.what()); - } - } - } - - } } void GeneralSettings2::OnAudioDeviceSelected(wxCommandEvent& event) diff --git a/src/gui/GeneralSettings2.h b/src/gui/GeneralSettings2.h index 5e27d6e2..7fbfecc1 100644 --- a/src/gui/GeneralSettings2.h +++ b/src/gui/GeneralSettings2.h @@ -63,9 +63,9 @@ private: // Audio wxChoice* m_audio_api; wxSlider *m_audio_latency; - wxSlider *m_tv_volume, *m_pad_volume, *m_input_volume, *m_portal_volume; + wxSlider *m_tv_volume, *m_pad_volume, *m_input_volume; wxChoice *m_tv_channels, *m_pad_channels, *m_input_channels; - wxChoice *m_tv_device, *m_pad_device, *m_input_device, *m_portal_device; + wxChoice *m_tv_device, *m_pad_device, *m_input_device; // Account wxButton* m_create_account, * m_delete_account; diff --git a/src/gui/GettingStartedDialog.cpp b/src/gui/GettingStartedDialog.cpp index b613c38c..22426cf2 100644 --- a/src/gui/GettingStartedDialog.cpp +++ b/src/gui/GettingStartedDialog.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include "config/ActiveSettings.h" #include "gui/CemuApp.h" diff --git a/src/gui/MainWindow.cpp b/src/gui/MainWindow.cpp index bf880e48..4801706a 100644 --- a/src/gui/MainWindow.cpp +++ b/src/gui/MainWindow.cpp @@ -77,7 +77,6 @@ enum MAINFRAME_MENU_ID_FILE_INSTALL_UPDATE, MAINFRAME_MENU_ID_FILE_OPEN_CEMU_FOLDER, MAINFRAME_MENU_ID_FILE_OPEN_MLC_FOLDER, - MAINFRAME_MENU_ID_FILE_OPEN_SHADERCACHE_FOLDER, MAINFRAME_MENU_ID_FILE_EXIT, MAINFRAME_MENU_ID_FILE_END_EMULATION, MAINFRAME_MENU_ID_FILE_RECENT_0, @@ -91,7 +90,6 @@ enum MAINFRAME_MENU_ID_OPTIONS_GENERAL2, MAINFRAME_MENU_ID_OPTIONS_AUDIO, MAINFRAME_MENU_ID_OPTIONS_INPUT, - MAINFRAME_MENU_ID_OPTIONS_MAC_SETTINGS, // options -> account MAINFRAME_MENU_ID_OPTIONS_ACCOUNT_1 = 20350, MAINFRAME_MENU_ID_OPTIONS_ACCOUNT_12 = 20350 + 11, @@ -140,13 +138,11 @@ enum MAINFRAME_MENU_ID_DEBUG_VK_ACCURATE_BARRIERS, // debug->logging - MAINFRAME_MENU_ID_DEBUG_LOGGING_MESSAGE = 21499, MAINFRAME_MENU_ID_DEBUG_LOGGING0 = 21500, MAINFRAME_MENU_ID_DEBUG_ADVANCED_PPC_INFO = 21599, // debug->dump MAINFRAME_MENU_ID_DEBUG_DUMP_TEXTURES = 21600, MAINFRAME_MENU_ID_DEBUG_DUMP_SHADERS, - MAINFRAME_MENU_ID_DEBUG_DUMP_RECOMPILER_FUNCTIONS, MAINFRAME_MENU_ID_DEBUG_DUMP_RAM, MAINFRAME_MENU_ID_DEBUG_DUMP_FST, MAINFRAME_MENU_ID_DEBUG_DUMP_CURL_REQUESTS, @@ -173,7 +169,6 @@ EVT_MENU(MAINFRAME_MENU_ID_FILE_LOAD, MainWindow::OnFileMenu) EVT_MENU(MAINFRAME_MENU_ID_FILE_INSTALL_UPDATE, MainWindow::OnInstallUpdate) EVT_MENU(MAINFRAME_MENU_ID_FILE_OPEN_CEMU_FOLDER, MainWindow::OnOpenFolder) EVT_MENU(MAINFRAME_MENU_ID_FILE_OPEN_MLC_FOLDER, MainWindow::OnOpenFolder) -EVT_MENU(MAINFRAME_MENU_ID_FILE_OPEN_SHADERCACHE_FOLDER, MainWindow::OnOpenFolder) EVT_MENU(MAINFRAME_MENU_ID_FILE_EXIT, MainWindow::OnFileExit) EVT_MENU(MAINFRAME_MENU_ID_FILE_END_EMULATION, MainWindow::OnFileMenu) EVT_MENU_RANGE(MAINFRAME_MENU_ID_FILE_RECENT_0 + 0, MAINFRAME_MENU_ID_FILE_RECENT_LAST, MainWindow::OnFileMenu) @@ -189,7 +184,6 @@ EVT_MENU(MAINFRAME_MENU_ID_OPTIONS_GENERAL, MainWindow::OnOptionsInput) EVT_MENU(MAINFRAME_MENU_ID_OPTIONS_GENERAL2, MainWindow::OnOptionsInput) EVT_MENU(MAINFRAME_MENU_ID_OPTIONS_AUDIO, MainWindow::OnOptionsInput) EVT_MENU(MAINFRAME_MENU_ID_OPTIONS_INPUT, MainWindow::OnOptionsInput) -EVT_MENU(MAINFRAME_MENU_ID_OPTIONS_MAC_SETTINGS, MainWindow::OnOptionsInput) // tools menu EVT_MENU(MAINFRAME_MENU_ID_TOOLS_MEMORY_SEARCHER, MainWindow::OnToolsInput) EVT_MENU(MAINFRAME_MENU_ID_TOOLS_TITLE_MANAGER, MainWindow::OnToolsInput) @@ -210,9 +204,8 @@ EVT_MENU_RANGE(MAINFRAME_MENU_ID_NFC_RECENT_0 + 0, MAINFRAME_MENU_ID_NFC_RECENT_ EVT_MENU_RANGE(MAINFRAME_MENU_ID_DEBUG_LOGGING0 + 0, MAINFRAME_MENU_ID_DEBUG_LOGGING0 + 98, MainWindow::OnDebugLoggingToggleFlagGeneric) EVT_MENU(MAINFRAME_MENU_ID_DEBUG_ADVANCED_PPC_INFO, MainWindow::OnPPCInfoToggle) // debug -> dump menu -EVT_MENU(MAINFRAME_MENU_ID_DEBUG_DUMP_TEXTURES, MainWindow::OnDebugDumpGeneric) -EVT_MENU(MAINFRAME_MENU_ID_DEBUG_DUMP_SHADERS, MainWindow::OnDebugDumpGeneric) -EVT_MENU(MAINFRAME_MENU_ID_DEBUG_DUMP_RECOMPILER_FUNCTIONS, MainWindow::OnDebugDumpGeneric) +EVT_MENU(MAINFRAME_MENU_ID_DEBUG_DUMP_TEXTURES, MainWindow::OnDebugDumpUsedTextures) +EVT_MENU(MAINFRAME_MENU_ID_DEBUG_DUMP_SHADERS, MainWindow::OnDebugDumpUsedShaders) EVT_MENU(MAINFRAME_MENU_ID_DEBUG_DUMP_CURL_REQUESTS, MainWindow::OnDebugSetting) // debug -> Other options EVT_MENU(MAINFRAME_MENU_ID_DEBUG_RENDER_UPSIDE_DOWN, MainWindow::OnDebugSetting) @@ -289,13 +282,8 @@ private: }; MainWindow::MainWindow() - : wxFrame(nullptr, wxID_ANY, GetInitialWindowTitle(), wxDefaultPosition, wxSize(1280, 720), wxMINIMIZE_BOX | wxMAXIMIZE_BOX | wxSYSTEM_MENU | wxCAPTION | wxCLOSE_BOX | wxCLIP_CHILDREN | wxRESIZE_BORDER) + : wxFrame(nullptr, -1, GetInitialWindowTitle(), wxDefaultPosition, wxSize(1280, 720), wxMINIMIZE_BOX | wxMAXIMIZE_BOX | wxSYSTEM_MENU | wxCAPTION | wxCLOSE_BOX | wxCLIP_CHILDREN | wxRESIZE_BORDER) { -#ifdef __WXMAC__ - // Not necessary to set wxApp::s_macExitMenuItemId as automatically handled - wxApp::s_macAboutMenuItemId = MAINFRAME_MENU_ID_HELP_ABOUT; - wxApp::s_macPreferencesMenuItemId = MAINFRAME_MENU_ID_OPTIONS_MAC_SETTINGS; -#endif gui_initHandleContextFromWxWidgetsWindow(g_window_info.window_main, this); g_mainFrame = this; CafeSystem::SetImplementation(this); @@ -685,15 +673,10 @@ void MainWindow::OnFileMenu(wxCommandEvent& event) void MainWindow::OnOpenFolder(wxCommandEvent& event) { - const auto id = event.GetId(); - if(id == MAINFRAME_MENU_ID_FILE_OPEN_CEMU_FOLDER) + if(event.GetId() == MAINFRAME_MENU_ID_FILE_OPEN_CEMU_FOLDER) wxLaunchDefaultApplication(wxHelper::FromPath(ActiveSettings::GetUserDataPath())); - else if(id == MAINFRAME_MENU_ID_FILE_OPEN_MLC_FOLDER) + else if(event.GetId() == MAINFRAME_MENU_ID_FILE_OPEN_MLC_FOLDER) wxLaunchDefaultApplication(wxHelper::FromPath(ActiveSettings::GetMlcPath())); - else if (id == MAINFRAME_MENU_ID_FILE_OPEN_SHADERCACHE_FOLDER) - wxLaunchDefaultApplication(wxHelper::FromPath(ActiveSettings::GetCachePath("shaderCache"))); - - } void MainWindow::OnInstallUpdate(wxCommandEvent& event) @@ -919,7 +902,6 @@ void MainWindow::OnOptionsInput(wxCommandEvent& event) break; } - case MAINFRAME_MENU_ID_OPTIONS_MAC_SETTINGS: case MAINFRAME_MENU_ID_OPTIONS_GENERAL2: { OpenSettings(); @@ -1102,29 +1084,31 @@ void MainWindow::OnPPCInfoToggle(wxCommandEvent& event) g_config.Save(); } -void MainWindow::OnDebugDumpGeneric(wxCommandEvent& event) +void MainWindow::OnDebugDumpUsedTextures(wxCommandEvent& event) { - std::string dumpSubpath; - std::function setDumpState; - switch(event.GetId()) - { - case MAINFRAME_MENU_ID_DEBUG_DUMP_TEXTURES: - dumpSubpath = "dump/textures"; - setDumpState = ActiveSettings::EnableDumpTextures; - break; - case MAINFRAME_MENU_ID_DEBUG_DUMP_SHADERS: - dumpSubpath = "dump/shaders"; - setDumpState = ActiveSettings::EnableDumpShaders; - break; - case MAINFRAME_MENU_ID_DEBUG_DUMP_RECOMPILER_FUNCTIONS: - dumpSubpath = "dump/recompiler"; - setDumpState = ActiveSettings::EnableDumpRecompilerFunctions; - break; - default: - UNREACHABLE; - } const bool value = event.IsChecked(); - setDumpState(value); + ActiveSettings::EnableDumpTextures(value); + if (value) + { + try + { + // create directory + const fs::path path(ActiveSettings::GetUserDataPath()); + fs::create_directories(path / "dump" / "textures"); + } + catch (const std::exception& ex) + { + SystemException sys(ex); + cemuLog_log(LogType::Force, "can't create texture dump folder: {}", ex.what()); + ActiveSettings::EnableDumpTextures(false); + } + } +} + +void MainWindow::OnDebugDumpUsedShaders(wxCommandEvent& event) +{ + const bool value = event.IsChecked(); + ActiveSettings::EnableDumpShaders(value); if (value) { try @@ -1457,23 +1441,15 @@ void MainWindow::OnKeyUp(wxKeyEvent& event) void MainWindow::OnKeyDown(wxKeyEvent& event) { -#if defined(__APPLE__) - // On macOS, allow Cmd+Q to quit the application - if (event.CmdDown() && event.GetKeyCode() == 'Q') - { - Close(true); - } -#else - // On Windows/Linux, only Alt+F4 is allowed for quitting - if (event.AltDown() && event.GetKeyCode() == WXK_F4) - { - Close(true); - } -#endif - else - { - event.Skip(); - } + if ((event.AltDown() && event.GetKeyCode() == WXK_F4) || + (event.CmdDown() && event.GetKeyCode() == 'Q')) + { + Close(true); + } + else + { + event.Skip(); + } } void MainWindow::OnChar(wxKeyEvent& event) @@ -1868,7 +1844,7 @@ public: auto versionString = formatWxString(_("Cemu\nVersion {0}\nCompiled on {1}\nOriginal authors: {2}"), BUILD_VERSION_STRING, BUILD_DATE, "Exzap, Petergov"); sizer->Add(new wxStaticText(parent, wxID_ANY, versionString), wxSizerFlags().Border(wxALL, 3).Border(wxTOP, 10)); - sizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://cemu.info", "https://cemu.info"), wxSizerFlags().Expand().Border(wxTOP | wxBOTTOM, 3)); + sizer->Add(new wxHyperlinkCtrl(parent, -1, "https://cemu.info", "https://cemu.info"), wxSizerFlags().Expand().Border(wxTOP | wxBOTTOM, 3)); sizer->AddSpacer(3); sizer->Add(new wxStaticLine(parent), wxSizerFlags().Expand().Border(wxRIGHT, 4)); @@ -1888,105 +1864,95 @@ public: // zLib { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "zLib ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://www.zlib.net", "https://www.zlib.net"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "zLib ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://www.zlib.net", "https://www.zlib.net"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // wxWidgets { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "wxWidgets ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://www.wxwidgets.org/", "https://www.wxwidgets.org/"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "wxWidgets ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://www.wxwidgets.org/", "https://www.wxwidgets.org/"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // OpenSSL { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "OpenSSL ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://www.openssl.org/", "https://www.openssl.org/"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "OpenSSL ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://www.openssl.org/", "https://www.openssl.org/"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // libcurl { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "libcurl ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://curl.haxx.se/libcurl/", "https://curl.haxx.se/libcurl/"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "libcurl ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://curl.haxx.se/libcurl/", "https://curl.haxx.se/libcurl/"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // imgui { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "imgui ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://github.com/ocornut/imgui", "https://github.com/ocornut/imgui"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "imgui ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://github.com/ocornut/imgui", "https://github.com/ocornut/imgui"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // fontawesome { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "fontawesome ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://github.com/FortAwesome/Font-Awesome", "https://github.com/FortAwesome/Font-Awesome"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "fontawesome ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://github.com/FortAwesome/Font-Awesome", "https://github.com/FortAwesome/Font-Awesome"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // boost { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "boost ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://www.boost.org", "https://www.boost.org"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "boost ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://www.boost.org", "https://www.boost.org"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // libusb { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "libusb ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://libusb.info", "https://libusb.info"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); - sizer->Add(lineSizer); - } -#if BOOST_OS_MACOS - // MoltenVK - { - wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, -1, "MoltenVK ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://github.com/KhronosGroup/MoltenVK", "https://github.com/KhronosGroup/MoltenVK"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "libusb ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://libusb.info", "https://libusb.info"), 0); lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } -#endif // icons { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "icons from "), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://icons8.com", "https://icons8.com"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "icons from "), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://icons8.com", "https://icons8.com"), 0); sizer->Add(lineSizer); } // Lato font (are we still using it?) { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "\"Lato\" font by tyPoland Lukasz Dziedzic (OFL, V1.1)"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "\"Lato\" font by tyPoland Lukasz Dziedzic (OFL, V1.1)"), 0); sizer->Add(lineSizer); } // SDL { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "SDL ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "https://github.com/libsdl-org/SDL", "https://github.com/libsdl-org/SDL"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, "SDL ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "https://github.com/libsdl-org/SDL", "https://github.com/libsdl-org/SDL"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } // IH264 { wxSizer* lineSizer = new wxBoxSizer(wxHORIZONTAL); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, "Modified ih264 from Android project ("), 0); - lineSizer->Add(new wxHyperlinkCtrl(parent, wxID_ANY, "Source", "https://cemu.info/oss/ih264d.zip"), 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, " "), 0); - wxHyperlinkCtrl* noticeLink = new wxHyperlinkCtrl(parent, wxID_ANY, "NOTICE", ""); + lineSizer->Add(new wxStaticText(parent, -1, "Modified ih264 from Android project ("), 0); + lineSizer->Add(new wxHyperlinkCtrl(parent, -1, "Source", "https://cemu.info/oss/ih264d.zip"), 0); + lineSizer->Add(new wxStaticText(parent, -1, " "), 0); + wxHyperlinkCtrl* noticeLink = new wxHyperlinkCtrl(parent, -1, "NOTICE", ""); noticeLink->Bind(wxEVT_LEFT_DOWN, [](wxMouseEvent& event) { fs::path tempPath = fs::temp_directory_path(); @@ -2020,7 +1986,7 @@ public: wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(tempPath)))); }); lineSizer->Add(noticeLink, 0); - lineSizer->Add(new wxStaticText(parent, wxID_ANY, ")"), 0); + lineSizer->Add(new wxStaticText(parent, -1, ")"), 0); sizer->Add(lineSizer); } } @@ -2054,7 +2020,7 @@ public: wxString& nameList = ((i % 2) == 0) ? nameListLeft : nameListRight; if (i >= 2) nameList.append("\n"); - nameList.append(wxString::FromUTF8(name)); + nameList.append(name); } gridSizer->Add(new wxStaticText(parent, wxID_ANY, nameListLeft), wxSizerFlags()); @@ -2133,7 +2099,6 @@ void MainWindow::RecreateMenu() m_fileMenu->Append(MAINFRAME_MENU_ID_FILE_OPEN_CEMU_FOLDER, _("&Open Cemu folder")); m_fileMenu->Append(MAINFRAME_MENU_ID_FILE_OPEN_MLC_FOLDER, _("&Open MLC folder")); - m_fileMenu->Append(MAINFRAME_MENU_ID_FILE_OPEN_SHADERCACHE_FOLDER, _("Open &shader cache folder")); m_fileMenu->AppendSeparator(); m_exitMenuItem = m_fileMenu->Append(MAINFRAME_MENU_ID_FILE_EXIT, _("&Exit")); @@ -2184,9 +2149,6 @@ void MainWindow::RecreateMenu() m_padViewMenuItem = optionsMenu->AppendCheckItem(MAINFRAME_MENU_ID_OPTIONS_SECOND_WINDOW_PADVIEW, _("&Separate GamePad view"), wxEmptyString); m_padViewMenuItem->Check(GetConfig().pad_open); optionsMenu->AppendSeparator(); - #if BOOST_OS_MACOS - optionsMenu->Append(MAINFRAME_MENU_ID_OPTIONS_MAC_SETTINGS, _("&Settings..." "\tCtrl-,")); - #endif optionsMenu->Append(MAINFRAME_MENU_ID_OPTIONS_GENERAL2, _("&General settings")); optionsMenu->Append(MAINFRAME_MENU_ID_OPTIONS_INPUT, _("&Input settings")); @@ -2235,7 +2197,7 @@ void MainWindow::RecreateMenu() debugLoggingMenu->AppendSeparator(); wxMenu* logCosModulesMenu = new wxMenu(); - logCosModulesMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_LOGGING_MESSAGE, _("&Options below are for experts. Leave off if unsure"), wxEmptyString)->Enable(false); + logCosModulesMenu->AppendCheckItem(0, _("&Options below are for experts. Leave off if unsure"), wxEmptyString)->Enable(false); logCosModulesMenu->AppendSeparator(); logCosModulesMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_LOGGING0 + stdx::to_underlying(LogType::CoreinitFile), _("coreinit File-Access API"), wxEmptyString)->Check(cemuLog_isLoggingEnabled(LogType::CoreinitFile)); logCosModulesMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_LOGGING0 + stdx::to_underlying(LogType::CoreinitThreadSync), _("coreinit Thread-Synchronization API"), wxEmptyString)->Check(cemuLog_isLoggingEnabled(LogType::CoreinitThreadSync)); @@ -2269,7 +2231,6 @@ void MainWindow::RecreateMenu() wxMenu* debugDumpMenu = new wxMenu; debugDumpMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_DUMP_TEXTURES, _("&Textures"), wxEmptyString)->Check(ActiveSettings::DumpTexturesEnabled()); debugDumpMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_DUMP_SHADERS, _("&Shaders"), wxEmptyString)->Check(ActiveSettings::DumpShadersEnabled()); - debugDumpMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_DUMP_RECOMPILER_FUNCTIONS, _("&Recompiled functions"), wxEmptyString)->Check(ActiveSettings::DumpRecompilerFunctionsEnabled()); debugDumpMenu->AppendCheckItem(MAINFRAME_MENU_ID_DEBUG_DUMP_CURL_REQUESTS, _("&nlibcurl HTTP/HTTPS requests"), wxEmptyString); // debug submenu wxMenu* debugMenu = new wxMenu(); diff --git a/src/gui/MainWindow.h b/src/gui/MainWindow.h index ddb9795d..beb86f98 100644 --- a/src/gui/MainWindow.h +++ b/src/gui/MainWindow.h @@ -107,7 +107,8 @@ public: void OnDebugSetting(wxCommandEvent& event); void OnDebugLoggingToggleFlagGeneric(wxCommandEvent& event); void OnPPCInfoToggle(wxCommandEvent& event); - void OnDebugDumpGeneric(wxCommandEvent& event); + void OnDebugDumpUsedTextures(wxCommandEvent& event); + void OnDebugDumpUsedShaders(wxCommandEvent& event); void OnLoggingWindow(wxCommandEvent& event); void OnGDBStubToggle(wxCommandEvent& event); void OnDebugViewPPCThreads(wxCommandEvent& event); diff --git a/src/gui/MemorySearcherTool.cpp b/src/gui/MemorySearcherTool.cpp index 8506c591..fadebc44 100644 --- a/src/gui/MemorySearcherTool.cpp +++ b/src/gui/MemorySearcherTool.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include "config/ActiveSettings.h" #include "gui/helpers/wxHelpers.h" @@ -80,7 +79,7 @@ MemorySearcherTool::MemorySearcherTool(wxFrame* parent) m_gauge->Enable(false); m_textEntryTable = new wxStaticText(this, wxID_ANY, _("Results")); - m_listResults = new wxListView(this, LIST_RESULTS, wxDefaultPosition, wxDefaultSize, wxLC_REPORT | wxLC_SORT_ASCENDING); + m_listResults = new wxListCtrl(this, LIST_RESULTS, wxDefaultPosition, wxDefaultSize, wxLC_REPORT | wxLC_SORT_ASCENDING); m_listResults->Bind(wxEVT_LEFT_DCLICK, &MemorySearcherTool::OnResultListClick, this); { wxListItem col0; @@ -389,8 +388,14 @@ void MemorySearcherTool::OnEntryListRightClick(wxDataViewEvent& event) void MemorySearcherTool::OnResultListClick(wxMouseEvent& event) { - for (long selectedIndex = m_listResults->GetFirstSelected(); selectedIndex != wxNOT_FOUND; selectedIndex = m_listResults->GetNextSelected(selectedIndex)) + long selectedIndex = -1; + + while (true) { + selectedIndex = m_listResults->GetNextItem(selectedIndex, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); + if (selectedIndex == -1) + break; + long address = m_listResults->GetItemData(selectedIndex); auto currValue = m_listResults->GetItemText(selectedIndex, 1); @@ -679,7 +684,7 @@ void MemorySearcherTool::OnPopupClick(wxCommandEvent& event) if (event.GetId() == LIST_ENTRY_REMOVE) { const int row = m_listEntryTable->GetSelectedRow(); - if (row == wxNOT_FOUND) + if (row == -1) return; m_listEntryTable->DeleteItem(row); @@ -695,7 +700,7 @@ void MemorySearcherTool::OnItemEdited(wxDataViewEvent& event) else if (column == 3) { auto row = m_listEntryTable->GetSelectedRow(); - if (row == wxNOT_FOUND) + if (row == -1) return; auto addressText = std::string(m_listEntryTable->GetTextValue(row, 1).mbc_str()); diff --git a/src/gui/MemorySearcherTool.h b/src/gui/MemorySearcherTool.h index 27e7d27d..78b5cb77 100644 --- a/src/gui/MemorySearcherTool.h +++ b/src/gui/MemorySearcherTool.h @@ -173,7 +173,7 @@ wxDECLARE_EVENT_TABLE(); wxComboBox* m_cbDataType; wxTextCtrl* m_textValue; wxButton *m_buttonStart, *m_buttonFilter; - wxListView* m_listResults; + wxListCtrl* m_listResults; wxDataViewListCtrl* m_listEntryTable; wxStaticText* m_textEntryTable; wxGauge* m_gauge; diff --git a/src/gui/TitleManager.cpp b/src/gui/TitleManager.cpp index 67f13e05..4a4f7f56 100644 --- a/src/gui/TitleManager.cpp +++ b/src/gui/TitleManager.cpp @@ -356,7 +356,7 @@ void TitleManager::OnTitleSearchComplete(wxCommandEvent& event) void TitleManager::OnSetStatusBarText(wxSetStatusBarTextEvent& event) { - m_status_bar->SetStatusText(event.GetText(), event.GetNumber()); + m_status_bar->SetStatusText(_(event.GetText()), event.GetNumber()); } void TitleManager::OnFilterChanged(wxCommandEvent& event) @@ -491,7 +491,7 @@ void TitleManager::OnSaveOpenDirectory(wxCommandEvent& event) if (!fs::exists(target) || !fs::is_directory(target)) return; - wxLaunchDefaultApplication(wxHelper::FromPath(target)); + wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(target)))); } void TitleManager::OnSaveDelete(wxCommandEvent& event) diff --git a/src/gui/components/wxDownloadManagerList.cpp b/src/gui/components/wxDownloadManagerList.cpp index d447310c..14bf5cbe 100644 --- a/src/gui/components/wxDownloadManagerList.cpp +++ b/src/gui/components/wxDownloadManagerList.cpp @@ -25,8 +25,9 @@ wxDEFINE_EVENT(wxEVT_REMOVE_ENTRY, wxCommandEvent); + wxDownloadManagerList::wxDownloadManagerList(wxWindow* parent, wxWindowID id) - : wxListView(parent, id, wxDefaultPosition, wxDefaultSize, wxLC_REPORT | wxLC_VIRTUAL) + : wxListCtrl(parent, id, wxDefaultPosition, wxDefaultSize, wxLC_REPORT | wxLC_VIRTUAL) { AddColumns(); @@ -47,13 +48,11 @@ wxDownloadManagerList::wxDownloadManagerList(wxWindow* parent, wxWindowID id) Bind(wxEVT_REMOVE_ITEM, &wxDownloadManagerList::OnRemoveItem, this); Bind(wxEVT_REMOVE_ENTRY, &wxDownloadManagerList::OnRemoveEntry, this); Bind(wxEVT_CLOSE_WINDOW, &wxDownloadManagerList::OnClose, this); - - ShowSortIndicator(ColumnName); } boost::optional wxDownloadManagerList::GetSelectedTitleEntry() const { - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) { const auto tmp = GetTitleEntry(selection); @@ -66,7 +65,7 @@ boost::optional wxDownloadManagerList: boost::optional wxDownloadManagerList::GetSelectedTitleEntry() { - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) { const auto tmp = GetTitleEntry(selection); @@ -219,7 +218,16 @@ void wxDownloadManagerList::OnColumnClick(wxListEvent& event) { const int column = event.GetColumn(); - SortEntries(column); + if (column == m_sort_by_column) + { + m_sort_less = !m_sort_less; + } + else + { + m_sort_by_column = column; + m_sort_less = true; + } + SortEntries(); event.Skip(); } @@ -316,7 +324,7 @@ void wxDownloadManagerList::OnContextMenu(wxContextMenuEvent& event) wxMenu menu; menu.Bind(wxEVT_COMMAND_MENU_SELECTED, &wxDownloadManagerList::OnContextMenuSelected, this); - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection == wxNOT_FOUND) return; @@ -371,8 +379,8 @@ void wxDownloadManagerList::OnContextMenuSelected(wxCommandEvent& event) // still doing work if (m_context_worker.valid() && !future_is_ready(m_context_worker)) return; - - const auto selection = GetFirstSelected(); + + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection == wxNOT_FOUND) return; @@ -613,31 +621,24 @@ bool wxDownloadManagerList::SortFunc(std::span sortColumnOrder, const Type_ #include -void wxDownloadManagerList::SortEntries(int column) +void wxDownloadManagerList::SortEntries() { boost::container::small_vector s_SortColumnOrder{ ColumnName, ColumnType, ColumnVersion, ColumnTitleId, ColumnProgress }; - bool ascending; - if (column == -1) + if (m_sort_by_column != -1) { - column = GetSortIndicator(); - if (column == -1) - column = ColumnName; - ascending = IsAscendingSortIndicator(); + // prioritize column by moving it to first position in the column sort order list + s_SortColumnOrder.erase(std::remove(s_SortColumnOrder.begin(), s_SortColumnOrder.end(), m_sort_by_column), s_SortColumnOrder.end()); + s_SortColumnOrder.insert(s_SortColumnOrder.begin(), m_sort_by_column); } - else - ascending = GetUpdatedAscendingSortIndicator(column); - - // prioritize column by moving it to first position in the column sort order list - s_SortColumnOrder.erase(std::remove(s_SortColumnOrder.begin(), s_SortColumnOrder.end(), column), s_SortColumnOrder.end()); - s_SortColumnOrder.insert(s_SortColumnOrder.begin(), column); std::sort(m_sorted_data.begin(), m_sorted_data.end(), - [this, &s_SortColumnOrder, ascending](const Type_t& v1, const Type_t& v2) -> bool { - return ascending ? SortFunc(s_SortColumnOrder, v1, v2) : SortFunc(s_SortColumnOrder, v2, v1); - }); - - ShowSortIndicator(column, ascending); + [this, &s_SortColumnOrder](const Type_t& v1, const Type_t& v2) -> bool + { + const bool result = SortFunc({ s_SortColumnOrder.data(), s_SortColumnOrder.size() }, v1, v2); + return m_sort_less ? result : !result; + }); + RefreshPage(); } diff --git a/src/gui/components/wxDownloadManagerList.h b/src/gui/components/wxDownloadManagerList.h index 4febb461..3a6b853a 100644 --- a/src/gui/components/wxDownloadManagerList.h +++ b/src/gui/components/wxDownloadManagerList.h @@ -9,7 +9,7 @@ #include #include -class wxDownloadManagerList : public wxListView +class wxDownloadManagerList : public wxListCtrl { friend class TitleManager; public: @@ -49,7 +49,7 @@ public: // error state? }; - void SortEntries(int column = -1); + void SortEntries(); void RefreshPage(); void Filter(const wxString& filter); void Filter2(bool showTitles, bool showUpdates, bool showInstalled); @@ -138,6 +138,9 @@ private: std::vector m_data; std::vector> m_sorted_data; + int m_sort_by_column = ItemColumn::ColumnName; + bool m_sort_less = true; + bool m_filterShowTitles = true; bool m_filterShowUpdates = true; bool m_filterShowInstalled = true; diff --git a/src/gui/components/wxGameList.cpp b/src/gui/components/wxGameList.cpp index e30b16f5..6cbb5859 100644 --- a/src/gui/components/wxGameList.cpp +++ b/src/gui/components/wxGameList.cpp @@ -6,7 +6,6 @@ #include -#include #include #include #include @@ -45,7 +44,6 @@ #include #include #include -#include #endif // public events @@ -84,58 +82,8 @@ std::list _getCachesPaths(const TitleId& titleId) return cachePaths; } -// Convert PNG to Apple icon image format -bool writeICNS(const fs::path& pngPath, const fs::path& icnsPath) { - // Read PNG file - std::ifstream pngFile(pngPath, std::ios::binary); - if (!pngFile) - return false; - - // Get PNG size - pngFile.seekg(0, std::ios::end); - uint32 pngSize = static_cast(pngFile.tellg()); - pngFile.seekg(0, std::ios::beg); - - // Calculate total file size (header + size + type + data) - uint32 totalSize = 8 + 8 + pngSize; - - // Create output file - std::ofstream icnsFile(icnsPath, std::ios::binary); - if (!icnsFile) - return false; - - // Write ICNS header - icnsFile.put(0x69); // 'i' - icnsFile.put(0x63); // 'c' - icnsFile.put(0x6e); // 'n' - icnsFile.put(0x73); // 's' - - // Write total file size (big endian) - icnsFile.put((totalSize >> 24) & 0xFF); - icnsFile.put((totalSize >> 16) & 0xFF); - icnsFile.put((totalSize >> 8) & 0xFF); - icnsFile.put(totalSize & 0xFF); - - // Write icon type (ic07 = 128x128 PNG) - icnsFile.put(0x69); // 'i' - icnsFile.put(0x63); // 'c' - icnsFile.put(0x30); // '0' - icnsFile.put(0x37); // '7' - - // Write PNG size (big endian) - icnsFile.put((pngSize >> 24) & 0xFF); - icnsFile.put((pngSize >> 16) & 0xFF); - icnsFile.put((pngSize >> 8) & 0xFF); - icnsFile.put(pngSize & 0xFF); - - // Copy PNG data - icnsFile << pngFile.rdbuf(); - - return true; -} - wxGameList::wxGameList(wxWindow* parent, wxWindowID id) - : wxListView(parent, id, wxDefaultPosition, wxDefaultSize, GetStyleFlags(Style::kList)), m_style(Style::kList) + : wxListCtrl(parent, id, wxDefaultPosition, wxDefaultSize, GetStyleFlags(Style::kList)), m_style(Style::kList) { const auto& config = GetConfig(); @@ -194,8 +142,6 @@ wxGameList::wxGameList(wxWindow* parent, wxWindowID id) // start async worker (for icon loading) m_async_worker_active = true; m_async_worker_thread = std::thread(&wxGameList::AsyncWorkerThread, this); - - ShowSortIndicator(ColumnName); } wxGameList::~wxGameList() @@ -397,7 +343,7 @@ void wxGameList::SetStyle(Style style, bool save) SetWindowStyleFlag(GetStyleFlags(m_style)); uint64 selected_title_id = 0; - auto selection = GetFirstSelected(); + auto selection = GetNextItem(wxNOT_FOUND, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) { selected_title_id = (uint64)GetItemData(selection); @@ -420,8 +366,8 @@ void wxGameList::SetStyle(Style style, bool save) if(selection != wxNOT_FOUND) { - Select(selection); - Focus(selection); + SetItemState(selection, wxLIST_STATE_SELECTED | wxLIST_STATE_FOCUSED, wxLIST_STATE_SELECTED | wxLIST_STATE_FOCUSED); + EnsureVisible(selection); } if(save) @@ -488,71 +434,44 @@ static inline int order_to_int(const std::weak_ordering &wo) return 0; } -std::weak_ordering wxGameList::SortComparator(uint64 titleId1, uint64 titleId2, SortData* sortData) +int wxGameList::SortComparator(uint64 titleId1, uint64 titleId2, SortData* sortData) { - auto titleLastPlayed = [](uint64_t id) - { - iosu::pdm::GameListStat playTimeStat{}; - iosu::pdm::GetStatForGamelist(id, playTimeStat); - return playTimeStat; - }; + const auto isFavoriteA = GetConfig().IsGameListFavorite(titleId1); + const auto isFavoriteB = GetConfig().IsGameListFavorite(titleId2); + const auto& name1 = GetNameByTitleId(titleId1); + const auto& name2 = GetNameByTitleId(titleId2); - auto titlePlayMinutes = [](uint64_t id) - { - iosu::pdm::GameListStat playTimeStat; - if (!iosu::pdm::GetStatForGamelist(id, playTimeStat)) - return 0u; - return playTimeStat.numMinutesPlayed; - }; - - auto titleRegion = [](uint64_t id) - { - return CafeTitleList::GetGameInfo(id).GetRegion(); - }; - - switch(sortData->column) - { - default: - case ColumnName: - { - const auto isFavoriteA = GetConfig().IsGameListFavorite(titleId1); - const auto isFavoriteB = GetConfig().IsGameListFavorite(titleId2); - const auto nameA = GetNameByTitleId(titleId1); - const auto nameB = GetNameByTitleId(titleId2); - return std::tie(isFavoriteB, nameA) <=> std::tie(isFavoriteA, nameB); - } - case ColumnGameStarted: - return titleLastPlayed(titleId1).last_played <=> titleLastPlayed(titleId2).last_played; - case ColumnGameTime: - return titlePlayMinutes(titleId1) <=> titlePlayMinutes(titleId2); - case ColumnRegion: - return titleRegion(titleId1) <=> titleRegion(titleId2); - case ColumnTitleID: - return titleId1 <=> titleId2; - } - // unreachable - cemu_assert_debug(false); - return std::weak_ordering::less; + if(sortData->dir > 0) + return order_to_int(std::tie(isFavoriteB, name1) <=> std::tie(isFavoriteA, name2)); + else + return order_to_int(std::tie(isFavoriteB, name2) <=> std::tie(isFavoriteA, name1)); } int wxGameList::SortFunction(wxIntPtr item1, wxIntPtr item2, wxIntPtr sortData) { const auto sort_data = (SortData*)sortData; - return sort_data->dir * order_to_int(sort_data->thisptr->SortComparator((uint64)item1, (uint64)item2, sort_data)); + const int dir = sort_data->dir; + + return sort_data->thisptr->SortComparator((uint64)item1, (uint64)item2, sort_data); } void wxGameList::SortEntries(int column) { - bool ascending; if (column == -1) - { - column = GetSortIndicator(); - if (column == -1) - column = ColumnName; - ascending = IsAscendingSortIndicator(); - } + column = s_last_column; else - ascending = GetUpdatedAscendingSortIndicator(column); + { + if (s_last_column == column) + { + s_last_column = 0; + s_direction = -1; + } + else + { + s_last_column = column; + s_direction = 1; + } + } switch (column) { @@ -560,11 +479,9 @@ void wxGameList::SortEntries(int column) case ColumnGameTime: case ColumnGameStarted: case ColumnRegion: - case ColumnTitleID: { - SortData data{this, ItemColumns{column}, ascending ? 1 : -1}; + SortData data{ this, column, s_direction }; SortItems(SortFunction, (wxIntPtr)&data); - ShowSortIndicator(column, ascending); break; } } @@ -576,20 +493,21 @@ void wxGameList::OnKeyDown(wxListEvent& event) if (m_style != Style::kList) return; - const auto keycode = event.GetKeyCode(); + const auto keycode = std::tolower(event.m_code); if (keycode == WXK_LEFT) { const auto item_count = GetItemCount(); if (item_count > 0) { - auto selection = (int)GetFirstSelected(); + auto selection = (int)GetNextItem(wxNOT_FOUND, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection == wxNOT_FOUND) selection = 0; else selection = std::max(0, selection - GetCountPerPage()); - Select(selection); - Focus(selection); + SetItemState(wxNOT_FOUND, 0, wxLIST_STATE_SELECTED); + SetItemState(selection, wxLIST_STATE_SELECTED | wxLIST_STATE_FOCUSED, wxLIST_STATE_SELECTED | wxLIST_STATE_FOCUSED); + EnsureVisible(selection); } } else if (keycode == WXK_RIGHT) @@ -597,14 +515,15 @@ void wxGameList::OnKeyDown(wxListEvent& event) const auto item_count = GetItemCount(); if (item_count > 0) { - auto selection = (int)GetFirstSelected(); + auto selection = (int)GetNextItem(wxNOT_FOUND, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection == wxNOT_FOUND) selection = 0; selection = std::min(item_count - 1, selection + GetCountPerPage()); - Select(selection); - Focus(selection); + SetItemState(wxNOT_FOUND, 0, wxLIST_STATE_SELECTED); + SetItemState(selection, wxLIST_STATE_SELECTED | wxLIST_STATE_FOCUSED, wxLIST_STATE_SELECTED | wxLIST_STATE_FOCUSED); + EnsureVisible(selection); } } } @@ -644,7 +563,7 @@ void wxGameList::OnContextMenu(wxContextMenuEvent& event) wxMenu menu; menu.Bind(wxEVT_COMMAND_MENU_SELECTED, &wxGameList::OnContextMenuSelected, this); - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) { const auto title_id = (uint64)GetItemData(selection); @@ -677,7 +596,9 @@ void wxGameList::OnContextMenu(wxContextMenuEvent& event) menu.Append(kContextMenuEditGameProfile, _("&Edit game profile")); menu.AppendSeparator(); +#if BOOST_OS_LINUX || BOOST_OS_WINDOWS menu.Append(kContextMenuCreateShortcut, _("&Create shortcut")); +#endif menu.AppendSeparator(); menu.Append(kContextMenuCopyTitleName, _("&Copy Title Name")); menu.Append(kContextMenuCopyTitleId, _("&Copy Title ID")); @@ -747,7 +668,7 @@ void wxGameList::OnContextMenuSelected(wxCommandEvent& event) { fs::path path(gameInfo.GetBase().GetPath()); _stripPathFilename(path); - wxLaunchDefaultApplication(wxHelper::FromPath(path)); + wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(path)))); break; } case kWikiPage: @@ -768,21 +689,21 @@ void wxGameList::OnContextMenuSelected(wxCommandEvent& event) case kContextMenuSaveFolder: { - wxLaunchDefaultApplication(wxHelper::FromPath(gameInfo.GetSaveFolder())); + wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(gameInfo.GetSaveFolder())))); break; } case kContextMenuUpdateFolder: { fs::path path(gameInfo.GetUpdate().GetPath()); _stripPathFilename(path); - wxLaunchDefaultApplication(wxHelper::FromPath(path)); + wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(path)))); break; } case kContextMenuDLCFolder: { fs::path path(gameInfo.GetAOC().front().GetPath()); _stripPathFilename(path); - wxLaunchDefaultApplication(wxHelper::FromPath(path)); + wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(path)))); break; } case kContextMenuRemoveCache: @@ -803,7 +724,9 @@ void wxGameList::OnContextMenuSelected(wxCommandEvent& event) } case kContextMenuCreateShortcut: { +#if BOOST_OS_LINUX || BOOST_OS_WINDOWS CreateShortcut(gameInfo); +#endif break; } case kContextMenuCopyTitleName: @@ -1081,7 +1004,7 @@ void wxGameList::OnClose(wxCloseEvent& event) int wxGameList::FindInsertPosition(TitleId titleId) { - SortData data{this, ItemColumns(GetSortIndicator()), IsAscendingSortIndicator()}; + SortData data{ this, s_last_column, s_direction }; const auto itemCount = GetItemCount(); if (itemCount == 0) return 0; @@ -1449,135 +1372,6 @@ void wxGameList::CreateShortcut(GameInfo2& gameInfo) } outputStream << desktopEntryString; } -#elif BOOST_OS_MACOS -void wxGameList::CreateShortcut(GameInfo2& gameInfo) -{ - const auto titleId = gameInfo.GetBaseTitleId(); - const auto titleName = wxString::FromUTF8(gameInfo.GetTitleName()); - auto exePath = ActiveSettings::GetExecutablePath(); - - const wxString appName = wxString::Format("%s.app", titleName); - wxFileDialog entryDialog(this, _("Choose shortcut location"), "~/Applications", appName, - "Application (*.app)|*.app", wxFD_SAVE | wxFD_CHANGE_DIR | wxFD_OVERWRITE_PROMPT); - const auto result = entryDialog.ShowModal(); - if (result == wxID_CANCEL) - return; - const auto output_path = entryDialog.GetPath(); - // Create .app folder - const fs::path appPath = output_path.utf8_string(); - if (!fs::create_directories(appPath)) - { - cemuLog_log(LogType::Force, "Failed to create app directory"); - return; - } - const fs::path infoPath = appPath / "Contents/Info.plist"; - const fs::path scriptPath = appPath / "Contents/MacOS/run.sh"; - const fs::path icnsPath = appPath / "Contents/Resources/shortcut.icns"; - if (!(fs::create_directories(scriptPath.parent_path()) && fs::create_directories(icnsPath.parent_path()))) - { - cemuLog_log(LogType::Force, "Failed to create app shortcut directories"); - return; - } - - std::optional iconPath; - // Obtain and convert icon - [&]() - { - int iconIndex, smallIconIndex; - - if (!QueryIconForTitle(titleId, iconIndex, smallIconIndex)) - { - cemuLog_log(LogType::Force, "Icon hasn't loaded"); - return; - } - const fs::path outIconDir = fs::temp_directory_path(); - - if (!fs::exists(outIconDir) && !fs::create_directories(outIconDir)) - { - cemuLog_log(LogType::Force, "Failed to create icon directory"); - return; - } - - iconPath = outIconDir / fmt::format("{:016x}.png", gameInfo.GetBaseTitleId()); - wxFileOutputStream pngFileStream(_pathToUtf8(iconPath.value())); - - auto image = m_image_list->GetIcon(iconIndex).ConvertToImage(); - wxPNGHandler pngHandler; - if (!pngHandler.SaveFile(&image, pngFileStream, false)) - { - iconPath = std::nullopt; - cemuLog_log(LogType::Force, "Icon failed to save"); - } - }(); - - std::string runCommand = fmt::format("#!/bin/zsh\n\n{0:?} --title-id {1:016x}", _pathToUtf8(exePath), titleId); - const std::string infoPlist = fmt::format( - "\n" - "\n" - "\n" - "\n" - " CFBundleDisplayName\n" - " {0}\n" - " CFBundleExecutable\n" - " run.sh\n" - " CFBundleIconFile\n" - " shortcut.icns\n" - " CFBundleName\n" - " {0}\n" - " CFBundlePackageType\n" - " APPL\n" - " CFBundleSignature\n" - " \?\?\?\?\n" - " LSApplicationCategoryType\n" - " public.app-category.games\n" - " CFBundleShortVersionString\n" - " {1}\n" - " CFBundleVersion\n" - " {1}\n" - "\n" - "\n", - gameInfo.GetTitleName(), - std::to_string(gameInfo.GetVersion()) - ); - // write Info.plist to infoPath - std::ofstream infoStream(infoPath); - std::ofstream scriptStream(scriptPath); - if (!infoStream.good() || !scriptStream.good()) - { - auto errorMsg = formatWxString(_("Failed to save app shortcut to {}"), output_path.utf8_string()); - wxMessageBox(errorMsg, _("Error"), wxOK | wxCENTRE | wxICON_ERROR); - return; - } - infoStream << infoPlist; - scriptStream << runCommand; - scriptStream.close(); - - // Set execute permissions for script - fs::permissions( - scriptPath, - fs::perms::owner_exec | fs::perms::group_exec | fs::perms::others_exec, - fs::perm_options::add - ); - - // Return if iconPath is empty - if (!iconPath) - { - cemuLog_log(LogType::Force, "Icon not found"); - return; - } - - // Convert icon to icns, only works for 128x128 PNG - // Alternatively, can run the command "sips -s format icns {iconPath} --out '{icnsPath}'" - // using std::system() to handle images of any size - if (!writeICNS(*iconPath, icnsPath)) - { - cemuLog_log(LogType::Force, "Failed to convert icon to icns"); - return; - } - - // Remove temp file - fs::remove(*iconPath); -} #elif BOOST_OS_WINDOWS void wxGameList::CreateShortcut(GameInfo2& gameInfo) { @@ -1589,11 +1383,9 @@ void wxGameList::CreateShortcut(GameInfo2& gameInfo) PWSTR userShortcutFolder; SHGetKnownFolderPath(FOLDERID_Programs, 0, NULL, &userShortcutFolder); const wxString shortcutName = wxString::Format("%s.lnk", titleName); - wxFileDialog shortcutDialog(this, _("Choose shortcut location"), userShortcutFolder, shortcutName, + wxFileDialog shortcutDialog(this, _("Choose shortcut location"), _pathToUtf8(userShortcutFolder), shortcutName, "Shortcut (*.lnk)|*.lnk", wxFD_SAVE | wxFD_CHANGE_DIR | wxFD_OVERWRITE_PROMPT); - CoTaskMemFree(userShortcutFolder); - const auto result = shortcutDialog.ShowModal(); if (result == wxID_CANCEL) return; @@ -1623,7 +1415,7 @@ void wxGameList::CreateShortcut(GameInfo2& gameInfo) } icon_path = folder / fmt::format("{:016x}.ico", titleId); - auto stream = wxFileOutputStream(icon_path->wstring()); + auto stream = wxFileOutputStream(_pathToUtf8(*icon_path)); auto image = bitmap.ConvertToImage(); wxICOHandler icohandler{}; if (!icohandler.SaveFile(&image, stream, false)) @@ -1633,8 +1425,8 @@ void wxGameList::CreateShortcut(GameInfo2& gameInfo) } } - Microsoft::WRL::ComPtr shellLink; - HRESULT hres = CoCreateInstance(__uuidof(ShellLink), nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&shellLink)); + IShellLinkW* shellLink; + HRESULT hres = CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_INPROC_SERVER, IID_IShellLink, reinterpret_cast(&shellLink)); if (SUCCEEDED(hres)) { const auto description = wxString::Format("Play %s on Cemu", titleName); @@ -1650,17 +1442,19 @@ void wxGameList::CreateShortcut(GameInfo2& gameInfo) else shellLink->SetIconLocation(exePath.wstring().c_str(), 0); - Microsoft::WRL::ComPtr shellLinkFile; + IPersistFile* shellLinkFile; // save the shortcut - hres = shellLink.As(&shellLinkFile); + hres = shellLink->QueryInterface(IID_IPersistFile, reinterpret_cast(&shellLinkFile)); if (SUCCEEDED(hres)) { hres = shellLinkFile->Save(outputPath.wc_str(), TRUE); + shellLinkFile->Release(); } + shellLink->Release(); } if (!SUCCEEDED(hres)) { auto errorMsg = formatWxString(_("Failed to save shortcut to {}"), outputPath); wxMessageBox(errorMsg, _("Error"), wxOK | wxCENTRE | wxICON_ERROR); } } -#endif +#endif \ No newline at end of file diff --git a/src/gui/components/wxGameList.h b/src/gui/components/wxGameList.h index 625f7976..b285d259 100644 --- a/src/gui/components/wxGameList.h +++ b/src/gui/components/wxGameList.h @@ -30,7 +30,7 @@ wxDECLARE_EVENT(wxEVT_OPEN_GRAPHIC_PACK, wxTitleIdEvent); wxDECLARE_EVENT(wxEVT_GAMELIST_BEGIN_UPDATE, wxCommandEvent); wxDECLARE_EVENT(wxEVT_GAMELIST_END_UPDATE, wxCommandEvent); -class wxGameList : public wxListView +class wxGameList : public wxListCtrl { friend class MainWindow; public: @@ -53,7 +53,9 @@ public: void ReloadGameEntries(bool cached = false); void DeleteCachedStrings(); +#if BOOST_OS_LINUX || BOOST_OS_WINDOWS void CreateShortcut(GameInfo2& gameInfo); +#endif long FindListItemByTitleId(uint64 title_id) const; void OnClose(wxCloseEvent& event); @@ -68,7 +70,7 @@ private: inline static const wxColour kSecondColor{ 0xFDF9F2 }; void UpdateItemColors(sint32 startIndex = 0); - enum ItemColumns : int + enum ItemColumns { ColumnHiddenName = 0, ColumnIcon, @@ -83,16 +85,18 @@ private: ColumnCounts, }; + int s_last_column = ColumnName; + int s_direction = 1; void SortEntries(int column = -1); struct SortData { wxGameList* thisptr; - ItemColumns column; + int column; int dir; }; int FindInsertPosition(TitleId titleId); - std::weak_ordering SortComparator(uint64 titleId1, uint64 titleId2, SortData* sortData); + int SortComparator(uint64 titleId1, uint64 titleId2, SortData* sortData); static int SortFunction(wxIntPtr item1, wxIntPtr item2, wxIntPtr sortData); wxTimer* m_tooltip_timer; diff --git a/src/gui/components/wxTitleManagerList.cpp b/src/gui/components/wxTitleManagerList.cpp index a5774b14..e8efb060 100644 --- a/src/gui/components/wxTitleManagerList.cpp +++ b/src/gui/components/wxTitleManagerList.cpp @@ -38,7 +38,7 @@ wxDEFINE_EVENT(wxEVT_TITLE_REMOVED, wxCommandEvent); wxDEFINE_EVENT(wxEVT_REMOVE_ENTRY, wxCommandEvent); wxTitleManagerList::wxTitleManagerList(wxWindow* parent, wxWindowID id) - : wxListView(parent, id, wxDefaultPosition, wxDefaultSize, wxLC_REPORT | wxLC_VIRTUAL) + : wxListCtrl(parent, id, wxDefaultPosition, wxDefaultSize, wxLC_REPORT | wxLC_VIRTUAL) { AddColumns(); @@ -64,8 +64,6 @@ wxTitleManagerList::wxTitleManagerList(wxWindow* parent, wxWindowID id) m_callbackIdTitleList = CafeTitleList::RegisterCallback([](CafeTitleListCallbackEvent* evt, void* ctx) { ((wxTitleManagerList*)ctx)->HandleTitleListCallback(evt); }, this); m_callbackIdSaveList = CafeSaveList::RegisterCallback([](CafeSaveListCallbackEvent* evt, void* ctx) { ((wxTitleManagerList*)ctx)->HandleSaveListCallback(evt); }, this); - - ShowSortIndicator(ColumnTitleId); } wxTitleManagerList::~wxTitleManagerList() @@ -76,7 +74,7 @@ wxTitleManagerList::~wxTitleManagerList() boost::optional wxTitleManagerList::GetSelectedTitleEntry() const { - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) { const auto tmp = GetTitleEntry(selection); @@ -89,7 +87,7 @@ boost::optional wxTitleManagerList::GetSe boost::optional wxTitleManagerList::GetSelectedTitleEntry() { - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) { const auto tmp = GetTitleEntry(selection); @@ -576,7 +574,7 @@ void wxTitleManagerList::OnConvertToCompressedFormat(uint64 titleId, uint64 righ } else { - progressDialog.Update(0, _("Collecting list of files...") + fmt::format(" ({})", writerContext.totalFileCount.load())); + progressDialog.Update(0, _("Collecting list of files..." + fmt::format(" ({})", writerContext.totalFileCount.load()))); } if (progressDialog.WasCancelled()) writerContext.cancelled.store(true); @@ -759,7 +757,7 @@ void wxTitleManagerList::OnContextMenu(wxContextMenuEvent& event) wxMenu menu; menu.Bind(wxEVT_COMMAND_MENU_SELECTED, &wxTitleManagerList::OnContextMenuSelected, this); - const auto selection = GetFirstSelected(); + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection == wxNOT_FOUND) return; @@ -857,8 +855,8 @@ void wxTitleManagerList::OnContextMenuSelected(wxCommandEvent& event) // still doing work if (m_context_worker.valid() && !future_is_ready(m_context_worker)) return; - - const auto selection = GetFirstSelected(); + + const auto selection = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection == wxNOT_FOUND) return; @@ -871,7 +869,7 @@ void wxTitleManagerList::OnContextMenuSelected(wxCommandEvent& event) case kContextMenuOpenDirectory: { const auto path = fs::is_directory(entry->path) ? entry->path : entry->path.parent_path(); - wxLaunchDefaultApplication(wxHelper::FromPath(path)); + wxLaunchDefaultBrowser(wxHelper::FromUtf8(fmt::format("file:{}", _pathToUtf8(path)))); } break; case kContextMenuDelete: @@ -1175,48 +1173,54 @@ bool wxTitleManagerList::SortFunc(int column, const Type_t& v1, const Type_t& v2 { if(entry1.version == entry2.version) return SortFunc(ColumnTitleId, v1, v2); - - return entry1.version < entry2.version; + + return std::underlying_type_t(entry1.version) < std::underlying_type_t(entry2.version); } else if (column == ColumnRegion) { if(entry1.region == entry2.region) return SortFunc(ColumnTitleId, v1, v2); - - return std::underlying_type_t(entry1.region) < std::underlying_type_t(entry2.region); + + return std::underlying_type_t(entry1.region) < std::underlying_type_t(entry2.region); } else if (column == ColumnFormat) { if(entry1.format == entry2.format) return SortFunc(ColumnType, v1, v2); - return std::underlying_type_t(entry1.format) < std::underlying_type_t(entry2.format); + return std::underlying_type_t(entry1.format) < std::underlying_type_t(entry2.format); } return false; } void wxTitleManagerList::SortEntries(int column) { - bool ascending; - if (column == -1) + if(column == -1) { - column = GetSortIndicator(); + column = m_last_column_sorted; + m_last_column_sorted = -1; if (column == -1) column = ColumnTitleId; - ascending = IsAscendingSortIndicator(); } - else - ascending = GetUpdatedAscendingSortIndicator(column); - + if (column != ColumnTitleId && column != ColumnName && column != ColumnType && column != ColumnVersion && column != ColumnRegion && column != ColumnFormat) return; + if (m_last_column_sorted != column) + { + m_last_column_sorted = column; + m_sort_less = true; + } + else + m_sort_less = !m_sort_less; + std::sort(m_sorted_data.begin(), m_sorted_data.end(), - [this, column, ascending](const Type_t& v1, const Type_t& v2) -> bool { - return ascending ? SortFunc(column, v1, v2) : SortFunc(column, v2, v1); - }); - - ShowSortIndicator(column, ascending); + [this, column](const Type_t& v1, const Type_t& v2) -> bool + { + const bool result = SortFunc(column, v1, v2); + return m_sort_less ? result : !result; + }); + RefreshPage(); } diff --git a/src/gui/components/wxTitleManagerList.h b/src/gui/components/wxTitleManagerList.h index c21145b7..2780a9ce 100644 --- a/src/gui/components/wxTitleManagerList.h +++ b/src/gui/components/wxTitleManagerList.h @@ -9,7 +9,7 @@ #include #include -class wxTitleManagerList : public wxListView +class wxTitleManagerList : public wxListCtrl { friend class TitleManager; public: @@ -127,6 +127,8 @@ private: std::vector m_data; std::vector> m_sorted_data; + int m_last_column_sorted = -1; + bool m_sort_less = true; using Type_t = std::reference_wrapper; bool SortFunc(int column, const Type_t& v1, const Type_t& v2); diff --git a/src/gui/debugger/BreakpointWindow.cpp b/src/gui/debugger/BreakpointWindow.cpp index c693477d..63b92626 100644 --- a/src/gui/debugger/BreakpointWindow.cpp +++ b/src/gui/debugger/BreakpointWindow.cpp @@ -202,14 +202,14 @@ void BreakpointWindow::OnLeftDClick(wxMouseEvent& event) auto it = debuggerState.breakpoints.begin(); std::advance(it, index); - const wxString dialogTitle = (*it)->bpType == DEBUGGER_BP_T_LOGGING ? _("Enter a new logging message") : _("Enter a new comment"); - const wxString dialogMessage = (*it)->bpType == DEBUGGER_BP_T_LOGGING ? _("Set logging message when code at address %08x is ran.\nUse placeholders like {r3} or {f3} to log register values") : _("Set comment for breakpoint at address %08x"); - wxTextEntryDialog set_comment_dialog(this, dialogMessage, dialogTitle, (*it)->comment); - if (set_comment_dialog.ShowModal() == wxID_OK) + wxTextEntryDialog set_value_dialog(this, _("Enter a new comment."), wxString::Format(_("Set comment for breakpoint at address %08x"), address), (*it)->comment); + if (set_value_dialog.ShowModal() == wxID_OK) { - (*it)->comment = set_comment_dialog.GetValue().ToStdWstring(); - m_breakpoints->SetItem(index, ColumnComment, set_comment_dialog.GetValue()); + (*it)->comment = set_value_dialog.GetValue().ToStdWstring(); + m_breakpoints->SetItem(index, ColumnComment, set_value_dialog.GetValue()); } + + return; } } @@ -230,8 +230,8 @@ void BreakpointWindow::OnRightDown(wxMouseEvent& event) } else { - m_breakpoints->Focus(index); - m_breakpoints->Select(index); + m_breakpoints->SetItemState(index, wxLIST_STATE_FOCUSED, wxLIST_STATE_FOCUSED); + m_breakpoints->SetItemState(index, wxLIST_STATE_SELECTED, wxLIST_STATE_SELECTED); wxMenu menu; menu.Append(MENU_ID_DELETE_BP, _("Delete breakpoint")); @@ -245,8 +245,8 @@ void BreakpointWindow::OnContextMenuClickSelected(wxCommandEvent& evt) { if (evt.GetId() == MENU_ID_DELETE_BP) { - long sel = m_breakpoints->GetFirstSelected(); - if (sel != wxNOT_FOUND) + long sel = m_breakpoints->GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); + if (sel != -1) { if (sel >= debuggerState.breakpoints.size()) return; diff --git a/src/gui/debugger/DisasmCtrl.cpp b/src/gui/debugger/DisasmCtrl.cpp index 1f4b739f..2f38d55e 100644 --- a/src/gui/debugger/DisasmCtrl.cpp +++ b/src/gui/debugger/DisasmCtrl.cpp @@ -538,7 +538,7 @@ void DisasmCtrl::OnKeyPressed(sint32 key_code, const wxPoint& position) auto optVirtualAddress = LinePixelPosToAddress(position.y); switch (key_code) { - case WXK_F9: + case WXK_F9: { if (optVirtualAddress) { @@ -549,7 +549,7 @@ void DisasmCtrl::OnKeyPressed(sint32 key_code, const wxPoint& position) } return; } - case 'G': + case 'G': { if(IsKeyDown(WXK_CONTROL)) { @@ -615,7 +615,7 @@ void DisasmCtrl::OnMouseDClick(const wxPoint& position, uint32 line) { // double-clicked on disassembly (operation and operand data) wxString currentInstruction = wxEmptyString; - wxTextEntryDialog set_value_dialog(this, _("Enter a new instruction."), wxString::Format(_("Overwrite instruction at address %08x"), virtualAddress), currentInstruction); + wxTextEntryDialog set_value_dialog(this, _("Enter a new instruction."), _(wxString::Format("Overwrite instruction at address %08x", virtualAddress)), currentInstruction); if (set_value_dialog.ShowModal() == wxID_OK) { PPCAssemblerInOut ctx = { 0 }; @@ -637,7 +637,7 @@ void DisasmCtrl::OnMouseDClick(const wxPoint& position, uint32 line) if (comment && comment->type == RplDebugSymbolComment) old_comment = comment->comment; - wxTextEntryDialog set_value_dialog(this, _("Enter a new comment."), wxString::Format(_("Create comment at address %08x"), virtualAddress), old_comment); + wxTextEntryDialog set_value_dialog(this, _("Enter a new comment."), _(wxString::Format("Create comment at address %08x", virtualAddress)), old_comment); if (set_value_dialog.ShowModal() == wxID_OK) { rplDebugSymbol_createComment(virtualAddress, set_value_dialog.GetValue().wc_str()); @@ -686,7 +686,6 @@ void DisasmCtrl::OnContextMenu(const wxPoint& position, uint32 line) // show dialog wxMenu menu; menu.Append(IDContextMenu_ToggleBreakpoint, _("Toggle breakpoint")); - menu.Append(IDContextMenu_ToggleLoggingBreakpoint, _("Toggle logging point")); if(debugger_hasPatch(virtualAddress)) menu.Append(IDContextMenu_RestoreOriginalInstructions, _("Restore original instructions")); menu.AppendSeparator(); @@ -708,13 +707,6 @@ void DisasmCtrl::OnContextMenuEntryClicked(wxCommandEvent& event) wxPostEvent(this->m_parent, evt); break; } - case IDContextMenu_ToggleLoggingBreakpoint: - { - debugger_toggleLoggingBreakpoint(m_contextMenuAddress); - wxCommandEvent evt(wxEVT_BREAKPOINT_CHANGE); - wxPostEvent(this->m_parent, evt); - break; - } case IDContextMenu_RestoreOriginalInstructions: { debugger_removePatch(m_contextMenuAddress); @@ -850,4 +842,4 @@ void DisasmCtrl::GoToAddressDialog() } } } -} +} \ No newline at end of file diff --git a/src/gui/debugger/DisasmCtrl.h b/src/gui/debugger/DisasmCtrl.h index b526e8f9..5a67e49a 100644 --- a/src/gui/debugger/DisasmCtrl.h +++ b/src/gui/debugger/DisasmCtrl.h @@ -8,7 +8,6 @@ class DisasmCtrl : public TextList enum { IDContextMenu_ToggleBreakpoint = wxID_HIGHEST + 1, - IDContextMenu_ToggleLoggingBreakpoint, IDContextMenu_RestoreOriginalInstructions, IDContextMenu_CopyAddress, IDContextMenu_CopyUnrelocatedAddress, diff --git a/src/gui/debugger/DumpCtrl.cpp b/src/gui/debugger/DumpCtrl.cpp index fad93bd3..16fdd87d 100644 --- a/src/gui/debugger/DumpCtrl.cpp +++ b/src/gui/debugger/DumpCtrl.cpp @@ -196,7 +196,7 @@ void DumpCtrl::OnMouseDClick(const wxPoint& position, uint32 line) const uint32 offset = LineToOffset(line) + byte_index; const uint8 value = memory_readU8(offset); - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set byte at address %08x"), offset), wxString::Format("%02x", value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set byte at address %08x", offset)), wxString::Format("%02x", value)); if (set_value_dialog.ShowModal() == wxID_OK) { const uint8 new_value = std::stoul(set_value_dialog.GetValue().ToStdString(), nullptr, 16); @@ -303,4 +303,4 @@ void DumpCtrl::OnKeyPressed(sint32 key_code, const wxPoint& position) wxSize DumpCtrl::DoGetBestSize() const { return TextList::DoGetBestSize(); -} +} \ No newline at end of file diff --git a/src/gui/debugger/ModuleWindow.cpp b/src/gui/debugger/ModuleWindow.cpp index 8183c40c..c24517bd 100644 --- a/src/gui/debugger/ModuleWindow.cpp +++ b/src/gui/debugger/ModuleWindow.cpp @@ -126,7 +126,7 @@ void ModuleWindow::OnGameLoaded() void ModuleWindow::OnLeftDClick(wxMouseEvent& event) { long selected = m_modules->GetFirstSelected(); - if (selected == wxNOT_FOUND) + if (selected == -1) return; const auto text = m_modules->GetItemText(selected, ColumnAddress); const auto address = std::stoul(text.ToStdString(), nullptr, 16); diff --git a/src/gui/debugger/RegisterCtrl.cpp b/src/gui/debugger/RegisterCtrl.cpp index 24cae60b..bcf6fb5a 100644 --- a/src/gui/debugger/RegisterCtrl.cpp +++ b/src/gui/debugger/RegisterCtrl.cpp @@ -201,7 +201,7 @@ void RegisterCtrl::OnMouseDClick(const wxPoint& position, uint32 line) if (position.x <= OFFSET_REGISTER + OFFSET_REGISTER_LABEL) { const uint32 register_value = debuggerState.debugSession.ppcSnapshot.gpr[register_index]; - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set R%d value"), register_index), wxString::Format("%08x", register_value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set R%d value", register_index)), wxString::Format("%08x", register_value)); if (set_value_dialog.ShowModal() == wxID_OK) { const uint32 new_value = std::stoul(set_value_dialog.GetValue().ToStdString(), nullptr, 16); @@ -220,7 +220,7 @@ void RegisterCtrl::OnMouseDClick(const wxPoint& position, uint32 line) if (position.x <= OFFSET_REGISTER + OFFSET_FPR) { const double register_value = debuggerState.debugSession.ppcSnapshot.fpr[register_index].fp0; - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set FP0_%d value"), register_index), wxString::Format("%lf", register_value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set FP0_%d value", register_index)), wxString::Format("%lf", register_value)); if (set_value_dialog.ShowModal() == wxID_OK) { const double new_value = std::stod(set_value_dialog.GetValue().ToStdString()); @@ -234,7 +234,7 @@ void RegisterCtrl::OnMouseDClick(const wxPoint& position, uint32 line) else { const double register_value = debuggerState.debugSession.ppcSnapshot.fpr[register_index].fp1; - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set FP1_%d value"), register_index), wxString::Format("%lf", register_value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set FP1_%d value", register_index)), wxString::Format("%lf", register_value)); if (set_value_dialog.ShowModal() == wxID_OK) { const double new_value = std::stod(set_value_dialog.GetValue().ToStdString()); diff --git a/src/gui/debugger/RegisterWindow.cpp b/src/gui/debugger/RegisterWindow.cpp index b18d4e27..55c6386f 100644 --- a/src/gui/debugger/RegisterWindow.cpp +++ b/src/gui/debugger/RegisterWindow.cpp @@ -339,7 +339,7 @@ void RegisterWindow::OnMouseDClickEvent(wxMouseEvent& event) { const uint32 register_index = id - kRegisterValueR0; const uint32 register_value = debuggerState.debugSession.ppcSnapshot.gpr[register_index]; - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set R%d value"), register_index), wxString::Format("%08x", register_value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set R%d value", register_index)), wxString::Format("%08x", register_value)); if (set_value_dialog.ShowModal() == wxID_OK) { const uint32 new_value = std::stoul(set_value_dialog.GetValue().ToStdString(), nullptr, 16); @@ -355,7 +355,7 @@ void RegisterWindow::OnMouseDClickEvent(wxMouseEvent& event) { const uint32 register_index = id - kRegisterValueFPR0_0; const double register_value = debuggerState.debugSession.ppcSnapshot.fpr[register_index].fp0; - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set FP0_%d value"), register_index), wxString::Format("%lf", register_value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set FP0_%d value", register_index)), wxString::Format("%lf", register_value)); if (set_value_dialog.ShowModal() == wxID_OK) { const double new_value = std::stod(set_value_dialog.GetValue().ToStdString()); @@ -371,7 +371,7 @@ void RegisterWindow::OnMouseDClickEvent(wxMouseEvent& event) { const uint32 register_index = id - kRegisterValueFPR1_0; const double register_value = debuggerState.debugSession.ppcSnapshot.fpr[register_index].fp1; - wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), wxString::Format(_("Set FP1_%d value"), register_index), wxString::Format("%lf", register_value)); + wxTextEntryDialog set_value_dialog(this, _("Enter a new value."), _(wxString::Format("Set FP1_%d value", register_index)), wxString::Format("%lf", register_value)); if (set_value_dialog.ShowModal() == wxID_OK) { const double new_value = std::stod(set_value_dialog.GetValue().ToStdString()); diff --git a/src/gui/debugger/SymbolCtrl.cpp b/src/gui/debugger/SymbolCtrl.cpp index 57cc96f6..aa862987 100644 --- a/src/gui/debugger/SymbolCtrl.cpp +++ b/src/gui/debugger/SymbolCtrl.cpp @@ -2,7 +2,6 @@ #include "gui/guiWrapper.h" #include "Cafe/OS/RPL/rpl_symbol_storage.h" #include "Cafe/HW/Espresso/Debugger/Debugger.h" -#include enum ItemColumns { @@ -11,7 +10,8 @@ enum ItemColumns ColumnModule, }; -SymbolListCtrl::SymbolListCtrl(wxWindow* parent, const wxWindowID& id, const wxPoint& pos, const wxSize& size) : wxListView(parent, id, pos, size, wxLC_REPORT | wxLC_VIRTUAL) +SymbolListCtrl::SymbolListCtrl(wxWindow* parent, const wxWindowID& id, const wxPoint& pos, const wxSize& size) : + wxListCtrl(parent, id, pos, size, wxLC_REPORT | wxLC_VIRTUAL) { wxListItem col0; col0.SetId(ColumnName); @@ -106,8 +106,8 @@ wxString SymbolListCtrl::OnGetItemText(long item, long column) const void SymbolListCtrl::OnLeftDClick(wxListEvent& event) { - long selected = GetFirstSelected(); - if (selected == wxNOT_FOUND) + long selected = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); + if (selected == -1) return; const auto text = GetItemText(selected, ColumnAddress); const auto address = std::stoul(text.ToStdString(), nullptr, 16); @@ -119,8 +119,8 @@ void SymbolListCtrl::OnLeftDClick(wxListEvent& event) void SymbolListCtrl::OnRightClick(wxListEvent& event) { - long selected = GetFirstSelected(); - if (selected == wxNOT_FOUND) + long selected = GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); + if (selected == -1) return; auto text = GetItemText(selected, ColumnAddress); text = "0x" + text; @@ -162,4 +162,4 @@ void SymbolListCtrl::ChangeListFilter(std::string filter) SetItemCount(visible_entries); if (visible_entries > 0) RefreshItems(GetTopItem(), std::min(visible_entries - 1, GetTopItem() + GetCountPerPage() + 1)); -} +} \ No newline at end of file diff --git a/src/gui/debugger/SymbolCtrl.h b/src/gui/debugger/SymbolCtrl.h index 8a0161bd..81ccd326 100644 --- a/src/gui/debugger/SymbolCtrl.h +++ b/src/gui/debugger/SymbolCtrl.h @@ -2,7 +2,7 @@ #include -class SymbolListCtrl : public wxListView +class SymbolListCtrl : public wxListCtrl { public: SymbolListCtrl(wxWindow* parent, const wxWindowID& id, const wxPoint& pos, const wxSize& size); diff --git a/src/gui/helpers/wxLogEvent.h b/src/gui/helpers/wxLogEvent.h index 23f6533d..8cef1d2d 100644 --- a/src/gui/helpers/wxLogEvent.h +++ b/src/gui/helpers/wxLogEvent.h @@ -12,7 +12,7 @@ public: : wxCommandEvent(EVT_LOG), m_filter(filter), m_message(message) { } wxLogEvent(const wxLogEvent& event) - : wxCommandEvent(event), m_filter(event.GetFilter()), m_message(event.GetMessage()) { } + : wxCommandEvent(event), m_filter(event.m_filter), m_message(event.m_message) { } wxEvent* Clone() const { return new wxLogEvent(*this); } diff --git a/src/gui/input/InputAPIAddWindow.cpp b/src/gui/input/InputAPIAddWindow.cpp index 6bfb589b..688ee14e 100644 --- a/src/gui/input/InputAPIAddWindow.cpp +++ b/src/gui/input/InputAPIAddWindow.cpp @@ -23,7 +23,7 @@ using wxControllerData = wxCustomData; InputAPIAddWindow::InputAPIAddWindow(wxWindow* parent, const wxPoint& position, const std::vector& controllers) - : wxDialog(parent, wxID_ANY, "Add input API", position, wxDefaultSize, wxCAPTION), m_controllers(controllers) + : wxDialog(parent, wxID_ANY, "Add input API", position, wxDefaultSize, 0), m_controllers(controllers) { this->SetSizeHints(wxDefaultSize, wxDefaultSize); diff --git a/src/gui/input/PairingDialog.cpp b/src/gui/input/PairingDialog.cpp index ecbfc110..350fce81 100644 --- a/src/gui/input/PairingDialog.cpp +++ b/src/gui/input/PairingDialog.cpp @@ -225,7 +225,7 @@ void PairingDialog::WorkerThread() BluetoothFindDeviceClose(deviceFind); } } -#elif defined(HAS_BLUEZ) +#elif BOOST_OS_LINUX void PairingDialog::WorkerThread() { constexpr static uint8_t LIAC_LAP[] = {0x00, 0x8b, 0x9e}; diff --git a/src/gui/input/panels/InputPanel.cpp b/src/gui/input/panels/InputPanel.cpp index 941bbd66..514461fd 100644 --- a/src/gui/input/panels/InputPanel.cpp +++ b/src/gui/input/panels/InputPanel.cpp @@ -174,7 +174,12 @@ void InputPanel::load_controller(const EmulatedControllerPtr& controller) continue; auto button_name = controller->get_mapping_name(mapping); - text->ChangeValue(button_name); +#if BOOST_OS_WINDOWS + text->SetLabelText(button_name); +#else + // SetLabelText doesn't seem to work here for some reason on wxGTK + text->ChangeValue(button_name); +#endif } } diff --git a/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.cpp b/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.cpp index 0669321c..f4e5b7af 100644 --- a/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.cpp +++ b/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.cpp @@ -9,7 +9,6 @@ #include #include -#include enum { @@ -43,7 +42,7 @@ DebugPPCThreadsWindow::DebugPPCThreadsWindow(wxFrame& parent) wxFrame::SetBackgroundColour(*wxWHITE); auto* sizer = new wxBoxSizer(wxVERTICAL); - m_thread_list = new wxListView(this, GPLIST_ID, wxPoint(0, 0), wxSize(930, 240), wxLC_REPORT); + m_thread_list = new wxListCtrl(this, GPLIST_ID, wxPoint(0, 0), wxSize(930, 240), wxLC_REPORT); m_thread_list->SetFont(wxFont(8, wxFONTFAMILY_MODERN, wxFONTSTYLE_NORMAL, wxFONTWEIGHT_NORMAL, false, "Courier New")); //wxSystemSettings::GetFont(wxSYS_OEM_FIXED_FONT)); @@ -170,7 +169,7 @@ void DebugPPCThreadsWindow::RefreshThreadList() wxWindowUpdateLocker lock(m_thread_list); long selected_thread = 0; - const int selection = m_thread_list->GetFirstSelected(); + const int selection = m_thread_list->GetNextItem(-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED); if (selection != wxNOT_FOUND) selected_thread = m_thread_list->GetItemData(selection); @@ -268,15 +267,12 @@ void DebugPPCThreadsWindow::RefreshThreadList() m_thread_list->SetItem(i, 12, tempStr); - if (selected_thread != 0 && selected_thread == (long)threadItrMPTR) - { - m_thread_list->Select(i); - m_thread_list->Focus(i); - } - } - srwlock_activeThreadList.UnlockWrite(); - __OSUnlockScheduler(); - } + if(selected_thread != 0 && selected_thread == (long)threadItrMPTR) + m_thread_list->SetItemState(i, wxLIST_STATE_FOCUSED | wxLIST_STATE_SELECTED, wxLIST_STATE_FOCUSED | wxLIST_STATE_SELECTED); + } + srwlock_activeThreadList.UnlockWrite(); + __OSUnlockScheduler(); + } m_thread_list->SetScrollPos(0, scrollPos, true); } @@ -440,11 +436,12 @@ void DebugPPCThreadsWindow::OnThreadListRightClick(wxMouseEvent& event) if (itemIndex == wxNOT_FOUND) return; // select item - m_thread_list->Focus(itemIndex); - long sel = m_thread_list->GetFirstSelected(); - if (sel != wxNOT_FOUND) - m_thread_list->Select(sel, false); - m_thread_list->Select(itemIndex); + m_thread_list->SetItemState(itemIndex, wxLIST_STATE_FOCUSED, wxLIST_STATE_FOCUSED); + long sel = m_thread_list->GetNextItem(-1, wxLIST_NEXT_ALL, + wxLIST_STATE_SELECTED); + if (sel != -1) + m_thread_list->SetItemState(sel, 0, wxLIST_STATE_SELECTED); + m_thread_list->SetItemState(itemIndex, wxLIST_STATE_SELECTED, wxLIST_STATE_SELECTED); // check if thread is still on the list of active threads MPTR threadMPTR = (MPTR)m_thread_list->GetItemData(itemIndex); __OSLockScheduler(); diff --git a/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.h b/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.h index f6dc9060..649780c5 100644 --- a/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.h +++ b/src/gui/windows/PPCThreadsViewer/DebugPPCThreadsWindow.h @@ -23,7 +23,7 @@ private: void PresentProfileResults(OSThread_t* thread, const std::unordered_map& samples); void DumpStackTrace(struct OSThread_t* thread); - wxListView* m_thread_list; + wxListCtrl* m_thread_list; wxCheckBox* m_auto_refresh; wxTimer* m_timer; @@ -32,4 +32,4 @@ private: wxDECLARE_EVENT_TABLE(); -}; +}; \ No newline at end of file diff --git a/src/gui/wxcomponents/checkedlistctrl.h b/src/gui/wxcomponents/checkedlistctrl.h index e09536f9..215c0004 100644 --- a/src/gui/wxcomponents/checkedlistctrl.h +++ b/src/gui/wxcomponents/checkedlistctrl.h @@ -72,7 +72,7 @@ DECLARE_EXPORTED_EVENT_TYPE(WXEXPORT, wxEVT_COMMAND_LIST_ITEM_UNCHECKED, -1); //! This is the class which performs all transactions with the server. //! It uses the wxSocket facilities. -class wxCheckedListCtrl : public wxListView +class wxCheckedListCtrl : public wxListCtrl { protected: @@ -85,18 +85,18 @@ protected: public: wxCheckedListCtrl() - : wxListView(), m_imageList(16, 16, TRUE) {} + : wxListCtrl(), m_imageList(16, 16, TRUE) {} - wxCheckedListCtrl(wxWindow *parent, wxWindowID id = wxID_ANY, + wxCheckedListCtrl(wxWindow *parent, wxWindowID id = -1, const wxPoint& pt = wxDefaultPosition, const wxSize& sz = wxDefaultSize, long style = wxCLC_CHECK_WHEN_SELECTING, const wxValidator& validator = wxDefaultValidator, const wxString& name = wxListCtrlNameStr) - : wxListView(), m_imageList(16, 16, TRUE) + : wxListCtrl(), m_imageList(16, 16, TRUE) { Create(parent, id, pt, sz, style, validator, name); } - bool Create(wxWindow *parent, wxWindowID id = wxID_ANY, + bool Create(wxWindow *parent, wxWindowID id = -1, const wxPoint& pt = wxDefaultPosition, const wxSize& sz = wxDefaultSize, long style = wxCLC_CHECK_WHEN_SELECTING, diff --git a/src/input/CMakeLists.txt b/src/input/CMakeLists.txt index af8b8181..004dc2ba 100644 --- a/src/input/CMakeLists.txt +++ b/src/input/CMakeLists.txt @@ -61,7 +61,7 @@ if(WIN32) ) endif() -if (SUPPORTS_WIIMOTE) +if (ENABLE_WIIMOTE) target_compile_definitions(CemuInput PUBLIC SUPPORTS_WIIMOTE) target_sources(CemuInput PRIVATE api/Wiimote/WiimoteControllerProvider.h @@ -70,17 +70,13 @@ if (SUPPORTS_WIIMOTE) api/Wiimote/NativeWiimoteController.h api/Wiimote/NativeWiimoteController.cpp api/Wiimote/WiimoteDevice.h + api/Wiimote/hidapi/HidapiWiimote.cpp + api/Wiimote/hidapi/HidapiWiimote.h ) - if (ENABLE_HIDAPI) + if (UNIX AND NOT APPLE) target_sources(CemuInput PRIVATE - api/Wiimote/hidapi/HidapiWiimote.cpp - api/Wiimote/hidapi/HidapiWiimote.h) - endif () - - if (ENABLE_BLUEZ) - target_sources(CemuInput PRIVATE - api/Wiimote/l2cap/L2CapWiimote.cpp - api/Wiimote/l2cap/L2CapWiimote.h) + api/Wiimote/l2cap/L2CapWiimote.cpp + api/Wiimote/l2cap/L2CapWiimote.h) endif() endif () @@ -99,7 +95,6 @@ target_link_libraries(CemuInput PRIVATE pugixml::pugixml SDL2::SDL2 ) - if (ENABLE_HIDAPI) target_link_libraries(CemuInput PRIVATE hidapi::hidapi) endif() @@ -108,6 +103,7 @@ if (ENABLE_WXWIDGETS) target_link_libraries(CemuInput PRIVATE wx::base wx::core) endif() -if (ENABLE_BLUEZ) + +if (UNIX AND NOT APPLE) target_link_libraries(CemuInput PRIVATE bluez::bluez) endif () \ No newline at end of file diff --git a/src/input/api/DirectInput/DirectInputController.cpp b/src/input/api/DirectInput/DirectInputController.cpp index 234696b6..dbb7c80c 100644 --- a/src/input/api/DirectInput/DirectInputController.cpp +++ b/src/input/api/DirectInput/DirectInputController.cpp @@ -15,6 +15,9 @@ DirectInputController::DirectInputController(const GUID& guid, std::string_view DirectInputController::~DirectInputController() { + if (m_effect) + m_effect->Release(); + if (m_device) { m_device->Unacquire(); @@ -36,8 +39,8 @@ DirectInputController::~DirectInputController() if (kGameCubeController == m_product_guid) should_release_device = false; - if (!should_release_device) - m_device.Detach(); + if (should_release_device) + m_device->Release(); } } @@ -101,6 +104,7 @@ bool DirectInputController::connect() // set data format if (FAILED(m_device->SetDataFormat(m_provider->get_data_format()))) { + SAFE_RELEASE(m_device); return false; } @@ -111,6 +115,7 @@ bool DirectInputController::connect() { if (FAILED(m_device->SetCooperativeLevel(hwndMainWindow, DISCL_BACKGROUND | DISCL_NONEXCLUSIVE))) { + SAFE_RELEASE(m_device); return false; } // rumble can only be used with exclusive access diff --git a/src/input/api/DirectInput/DirectInputController.h b/src/input/api/DirectInput/DirectInputController.h index d2c3dba2..2ec371a2 100644 --- a/src/input/api/DirectInput/DirectInputController.h +++ b/src/input/api/DirectInput/DirectInputController.h @@ -2,7 +2,6 @@ #include "input/api/DirectInput/DirectInputControllerProvider.h" #include "input/api/Controller.h" -#include class DirectInputController : public Controller { @@ -42,9 +41,9 @@ private: GUID m_product_guid{}; std::shared_mutex m_mutex; - Microsoft::WRL::ComPtr m_device; - Microsoft::WRL::ComPtr m_effect; + LPDIRECTINPUTDEVICE8 m_device = nullptr; + LPDIRECTINPUTEFFECT m_effect = nullptr; std::array m_min_axis{}; std::array m_max_axis{}; -}; +}; \ No newline at end of file diff --git a/src/input/api/DirectInput/DirectInputControllerProvider.cpp b/src/input/api/DirectInput/DirectInputControllerProvider.cpp index 79f31354..063cb779 100644 --- a/src/input/api/DirectInput/DirectInputControllerProvider.cpp +++ b/src/input/api/DirectInput/DirectInputControllerProvider.cpp @@ -19,7 +19,7 @@ DirectInputControllerProvider::DirectInputControllerProvider() const auto r = DirectInput8Create(GetModuleHandle(nullptr), DIRECTINPUT_VERSION, IID_IDirectInput8, (void**)&m_dinput8, nullptr); - if (FAILED(r)) + if (FAILED(r) || !m_dinput8) { const auto error = GetLastError(); //FreeLibrary(m_module); @@ -29,6 +29,9 @@ DirectInputControllerProvider::DirectInputControllerProvider() DirectInputControllerProvider::~DirectInputControllerProvider() { + if (m_dinput8) + m_dinput8->Release(); + /*if (m_module) FreeLibrary(m_module); */ diff --git a/src/input/api/DirectInput/DirectInputControllerProvider.h b/src/input/api/DirectInput/DirectInputControllerProvider.h index de8c3216..5db883c0 100644 --- a/src/input/api/DirectInput/DirectInputControllerProvider.h +++ b/src/input/api/DirectInput/DirectInputControllerProvider.h @@ -4,7 +4,6 @@ #define DIRECTINPUT_VERSION 0x0800 #include -#include #include "input/api/ControllerProvider.h" @@ -23,7 +22,7 @@ public: std::vector> get_controllers() override; - IDirectInput8* get_dinput() const { return m_dinput8.Get(); } + IDirectInput8* get_dinput() const { return m_dinput8; } LPCDIDATAFORMAT get_data_format() const; private: @@ -32,7 +31,7 @@ private: decltype(&DirectInput8Create) m_DirectInput8Create; decltype(&GetdfDIJoystick) m_GetdfDIJoystick = nullptr; - Microsoft::WRL::ComPtr m_dinput8; + IDirectInput8* m_dinput8 = nullptr; }; -#endif +#endif \ No newline at end of file diff --git a/src/input/api/Wiimote/WiimoteControllerProvider.cpp b/src/input/api/Wiimote/WiimoteControllerProvider.cpp index c2454319..221d75a7 100644 --- a/src/input/api/Wiimote/WiimoteControllerProvider.cpp +++ b/src/input/api/Wiimote/WiimoteControllerProvider.cpp @@ -323,7 +323,6 @@ void WiimoteControllerProvider::reader_thread() break; case kExtensionClassicPro: cemuLog_logDebug(LogType::Force,"Extension Type Received: Classic Pro"); - new_state.m_extension = ClassicData{}; break; case kExtensionGuitar: cemuLog_logDebug(LogType::Force,"Extension Type Received: Guitar"); diff --git a/src/input/api/Wiimote/l2cap/L2CapWiimote.cpp b/src/input/api/Wiimote/l2cap/L2CapWiimote.cpp index a6bdf574..28a123f3 100644 --- a/src/input/api/Wiimote/l2cap/L2CapWiimote.cpp +++ b/src/input/api/Wiimote/l2cap/L2CapWiimote.cpp @@ -23,15 +23,15 @@ static bool AttemptSetNonBlock(int sockFd) return fcntl(sockFd, F_SETFL, fcntl(sockFd, F_GETFL) | O_NONBLOCK) == 0; } -L2CapWiimote::L2CapWiimote(int controlFd, int dataFd, bdaddr_t addr) - : m_controlFd(controlFd), m_dataFd(dataFd), m_addr(addr) +L2CapWiimote::L2CapWiimote(int recvFd, int sendFd, bdaddr_t addr) + : m_recvFd(recvFd), m_sendFd(sendFd), m_addr(addr) { } L2CapWiimote::~L2CapWiimote() { - close(m_dataFd); - close(m_controlFd); + close(m_recvFd); + close(m_sendFd); const auto& b = m_addr.b; cemuLog_logDebug(LogType::Force, "Wiimote at {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x} disconnected", b[5], b[4], b[3], b[2], b[1], b[0]); @@ -61,51 +61,51 @@ std::vector L2CapWiimote::get_devices() std::vector outDevices; for (const auto& addr : unconnected) { - // Control socket, PSM 0x11, needs to be open for the data socket to be opened - auto controlFd = socket(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP); - if (controlFd < 0) + // Socket for sending data to controller, PSM 0x11 + auto sendFd = socket(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP); + if (sendFd < 0) { - cemuLog_logDebug(LogType::Force, "Failed to open control socket: {}", strerror(errno)); + cemuLog_logDebug(LogType::Force, "Failed to open send socket: {}", strerror(errno)); continue; } - sockaddr_l2 controlAddr{}; - controlAddr.l2_family = AF_BLUETOOTH; - controlAddr.l2_psm = htobs(0x11); - controlAddr.l2_bdaddr = addr; + sockaddr_l2 sendAddr{}; + sendAddr.l2_family = AF_BLUETOOTH; + sendAddr.l2_psm = htobs(0x11); + sendAddr.l2_bdaddr = addr; - if (!AttemptConnect(controlFd, controlAddr) || !AttemptSetNonBlock(controlFd)) + if (!AttemptConnect(sendFd, sendAddr) || !AttemptSetNonBlock(sendFd)) { const auto& b = addr.b; - cemuLog_logDebug(LogType::Force, "Failed to connect control socket to '{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}': {}", + cemuLog_logDebug(LogType::Force, "Failed to connect send socket to '{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}': {}", b[5], b[4], b[3], b[2], b[1], b[0], strerror(errno)); - close(controlFd); + close(sendFd); continue; } - // Socket for sending and receiving data from controller, PSM 0x13 - auto dataFd = socket(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP); - if (dataFd < 0) + // Socket for receiving data from controller, PSM 0x13 + auto recvFd = socket(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP); + if (recvFd < 0) { - cemuLog_logDebug(LogType::Force, "Failed to open data socket: {}", strerror(errno)); - close(controlFd); + cemuLog_logDebug(LogType::Force, "Failed to open recv socket: {}", strerror(errno)); + close(sendFd); continue; } - sockaddr_l2 dataAddr{}; - dataAddr.l2_family = AF_BLUETOOTH; - dataAddr.l2_psm = htobs(0x13); - dataAddr.l2_bdaddr = addr; + sockaddr_l2 recvAddr{}; + recvAddr.l2_family = AF_BLUETOOTH; + recvAddr.l2_psm = htobs(0x13); + recvAddr.l2_bdaddr = addr; - if (!AttemptConnect(dataFd, dataAddr) || !AttemptSetNonBlock(dataFd)) + if (!AttemptConnect(recvFd, recvAddr) || !AttemptSetNonBlock(recvFd)) { const auto& b = addr.b; - cemuLog_logDebug(LogType::Force, "Failed to connect data socket to '{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}': {}", + cemuLog_logDebug(LogType::Force, "Failed to connect recv socket to '{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}': {}", b[5], b[4], b[3], b[2], b[1], b[0], strerror(errno)); - close(dataFd); - close(controlFd); + close(sendFd); + close(recvFd); continue; } - outDevices.emplace_back(std::make_shared(controlFd, dataFd, addr)); + outDevices.emplace_back(std::make_shared(sendFd, recvFd, addr)); s_addressMutex.lock(); s_addresses[addr] = true; @@ -123,13 +123,13 @@ bool L2CapWiimote::write_data(const std::vector& data) buffer[0] = 0xA2; std::memcpy(buffer + 1, data.data(), size); const auto outSize = size + 1; - return send(m_dataFd, buffer, outSize, 0) == outSize; + return send(m_sendFd, buffer, outSize, 0) == outSize; } std::optional> L2CapWiimote::read_data() { uint8 buffer[23]; - const auto nBytes = recv(m_dataFd, buffer, 23, 0); + const auto nBytes = recv(m_sendFd, buffer, 23, 0); if (nBytes < 0 && errno == EWOULDBLOCK) return std::vector{}; diff --git a/src/input/api/Wiimote/l2cap/L2CapWiimote.h b/src/input/api/Wiimote/l2cap/L2CapWiimote.h index 0b6c5c19..cc8d071b 100644 --- a/src/input/api/Wiimote/l2cap/L2CapWiimote.h +++ b/src/input/api/Wiimote/l2cap/L2CapWiimote.h @@ -5,7 +5,7 @@ class L2CapWiimote : public WiimoteDevice { public: - L2CapWiimote(int controlFd, int dataFd, bdaddr_t addr); + L2CapWiimote(int recvFd, int sendFd, bdaddr_t addr); ~L2CapWiimote() override; bool write_data(const std::vector& data) override; @@ -15,8 +15,8 @@ class L2CapWiimote : public WiimoteDevice static void AddCandidateAddress(bdaddr_t addr); static std::vector get_devices(); private: - int m_controlFd; - int m_dataFd; + int m_recvFd; + int m_sendFd; bdaddr_t m_addr; }; diff --git a/src/util/DXGIWrapper/DXGIWrapper.h b/src/util/DXGIWrapper/DXGIWrapper.h index 363733e7..54f2454d 100644 --- a/src/util/DXGIWrapper/DXGIWrapper.h +++ b/src/util/DXGIWrapper/DXGIWrapper.h @@ -1,7 +1,7 @@ #pragma once #include -#include +//#include class DXGIWrapper { @@ -23,28 +23,34 @@ public: throw std::runtime_error("can't find CreateDXGIFactory1 in dxgi module"); } - Microsoft::WRL::ComPtr dxgiFactory; + IDXGIFactory1* dxgiFactory = nullptr; pCreateDXGIFactory1(IID_PPV_ARGS(&dxgiFactory)); - Microsoft::WRL::ComPtr dxgiAdapter; + IDXGIAdapter1* tmpDxgiAdapter = nullptr; UINT adapterIndex = 0; - while (dxgiFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) != DXGI_ERROR_NOT_FOUND) + while (dxgiFactory->EnumAdapters1(adapterIndex, &tmpDxgiAdapter) != DXGI_ERROR_NOT_FOUND) { DXGI_ADAPTER_DESC1 desc; - dxgiAdapter->GetDesc1(&desc); + tmpDxgiAdapter->GetDesc1(&desc); if (deviceLUID == nullptr || memcmp(&desc.AdapterLuid, deviceLUID, sizeof(LUID)) == 0) { - if (FAILED(dxgiAdapter.As(&m_dxgiAdapter))) - { - Cleanup(); - throw std::runtime_error("can't create dxgi adapter"); - } + tmpDxgiAdapter->QueryInterface(IID_PPV_ARGS(&m_dxgiAdapter)); + tmpDxgiAdapter->Release(); break; } + tmpDxgiAdapter->Release(); ++adapterIndex; } + + dxgiFactory->Release(); + + if (!m_dxgiAdapter) + { + Cleanup(); + throw std::runtime_error("can't create dxgi adapter"); + } } ~DXGIWrapper() @@ -59,11 +65,15 @@ public: private: HMODULE m_moduleHandle = nullptr; - Microsoft::WRL::ComPtr m_dxgiAdapter; + IDXGIAdapter3* m_dxgiAdapter = nullptr; void Cleanup() { - m_dxgiAdapter.Reset(); + if (m_dxgiAdapter) + { + m_dxgiAdapter->Release(); + m_dxgiAdapter = nullptr; + } if (m_moduleHandle) { @@ -71,4 +81,4 @@ private: m_moduleHandle = nullptr; } } -}; +}; \ No newline at end of file diff --git a/src/util/Fiber/FiberUnix.cpp b/src/util/Fiber/FiberUnix.cpp index 36430449..0d527069 100644 --- a/src/util/Fiber/FiberUnix.cpp +++ b/src/util/Fiber/FiberUnix.cpp @@ -15,12 +15,7 @@ Fiber::Fiber(void(*FiberEntryPoint)(void* userParam), void* userParam, void* pri ctx->uc_stack.ss_sp = m_stackPtr; ctx->uc_stack.ss_size = stackSize; ctx->uc_link = &ctx[0]; -#ifdef __arm64__ - // https://www.man7.org/linux/man-pages/man3/makecontext.3.html#NOTES - makecontext(ctx, (void(*)())FiberEntryPoint, 2, (uint64) userParam >> 32, userParam); -#else makecontext(ctx, (void(*)())FiberEntryPoint, 1, userParam); -#endif this->m_implData = (void*)ctx; } diff --git a/src/util/MemMapper/MemMapperUnix.cpp b/src/util/MemMapper/MemMapperUnix.cpp index 8e800e53..0ade291d 100644 --- a/src/util/MemMapper/MemMapperUnix.cpp +++ b/src/util/MemMapper/MemMapperUnix.cpp @@ -45,11 +45,7 @@ namespace MemMapper void* r; if(fromReservation) { - uint64 page_size = sysconf(_SC_PAGESIZE); - void* page = baseAddr; - if ( (uint64) baseAddr % page_size != 0 ) - page = (void*) ((uint64)baseAddr & ~(page_size - 1)); - if( mprotect(page, size, GetProt(permissionFlags)) == 0 ) + if( mprotect(baseAddr, size, GetProt(permissionFlags)) == 0 ) r = baseAddr; else r = nullptr; diff --git a/src/util/helpers/StringBuf.h b/src/util/helpers/StringBuf.h index 6242fa4c..432fa7a1 100644 --- a/src/util/helpers/StringBuf.h +++ b/src/util/helpers/StringBuf.h @@ -44,9 +44,10 @@ public: void add(std::string_view appendedStr) { + size_t remainingLen = this->limit - this->length; size_t copyLen = appendedStr.size(); - if (this->length + copyLen + 1 >= this->limit) - _reserve(std::max(this->length + copyLen + 64, this->limit + this->limit / 2)); + if (remainingLen < copyLen) + copyLen = remainingLen; char* outputStart = (char*)(this->str + this->length); std::copy(appendedStr.data(), appendedStr.data() + copyLen, outputStart); length += copyLen; @@ -79,13 +80,6 @@ public: } private: - void _reserve(uint32 newLimit) - { - cemu_assert_debug(newLimit > length); - this->str = (uint8*)realloc(this->str, newLimit + 4); - this->limit = newLimit; - } - uint8* str; uint32 length; /* in bytes */ uint32 limit; /* in bytes */ diff --git a/src/util/helpers/StringHelpers.h b/src/util/helpers/StringHelpers.h index fb858f4d..54141808 100644 --- a/src/util/helpers/StringHelpers.h +++ b/src/util/helpers/StringHelpers.h @@ -2,92 +2,6 @@ #include "boost/nowide/convert.hpp" #include -// Definition for removed templates in Apple Clang 17 -#if defined(__apple_build_version__) && (__apple_build_version__ >= 17000000) -namespace std { - template<> - struct char_traits { - using char_type = uint16be; - using int_type = int; - using off_type = streamoff; - using pos_type = streampos; - using state_type = mbstate_t; - - static inline void constexpr assign(char_type& c1, const char_type& c2) noexcept { - c1 = c2; - } - - static inline constexpr bool eq(char_type c1, char_type c2) noexcept { - return c1 == c2; - } - - static inline constexpr bool lt(char_type c1, char_type c2) noexcept { - return c1 < c2; - } - - static constexpr int compare(const char_type* s1, const char_type* s2, size_t n) { - for (; n; --n, ++s1, ++s2) { - if (lt(*s1, *s2)) return -1; - if (lt(*s2, *s1)) return 1; - } - return 0; - } - - static constexpr size_t length(const char_type* s) { - size_t len = 0; - for (; !eq(*s, char_type(0)); ++s) ++len; - return len; - } - - static constexpr const char_type* find(const char_type* s, size_t n, const char_type& a) { - for (; n; --n) { - if (eq(*s, a)) - return s; - ++s; - } - return nullptr; - } - - static constexpr char_type* move(char_type* s1, const char_type* s2, size_t n) { - if (n == 0) return s1; - return static_cast(memmove(s1, s2, n * sizeof(char_type))); - } - - static constexpr char_type* copy(char_type* s1, const char_type* s2, size_t n) { - if (n == 0) return s1; - return static_cast(memcpy(s1, s2, n * sizeof(char_type))); - } - - static constexpr char_type* assign(char_type* s, size_t n, char_type a) { - char_type* r = s; - for (; n; --n, ++s) - assign(*s, a); - return r; - } - - static inline constexpr char_type to_char_type(int_type c) noexcept { - return char_type(c); - } - - static inline constexpr int_type to_int_type(char_type c) noexcept { - return int_type(c); - } - - static inline constexpr bool eq_int_type(int_type c1, int_type c2) noexcept { - return c1 == c2; - } - - static inline constexpr int_type eof() noexcept { - return static_cast(EOF); - } - - static inline constexpr int_type not_eof(int_type c) noexcept { - return eq_int_type(c, eof()) ? ~eof() : c; - } - }; -} -#endif - // todo - move the Cafe/PPC specific parts to CafeString.h eventually namespace StringHelpers { @@ -111,9 +25,9 @@ namespace StringHelpers } // convert utf8 string to Wii U big-endian wchar_t string - static std::vector FromUtf8(std::string_view str) + static std::basic_string FromUtf8(std::string_view str) { - std::vector tmpStr; + std::basic_string tmpStr; std::wstring w = boost::nowide::widen(str.data(), str.size()); for (auto& c : w) tmpStr.push_back((uint16)c); diff --git a/src/util/highresolutiontimer/HighResolutionTimer.cpp b/src/util/highresolutiontimer/HighResolutionTimer.cpp index bb4a40ab..67ffa349 100644 --- a/src/util/highresolutiontimer/HighResolutionTimer.cpp +++ b/src/util/highresolutiontimer/HighResolutionTimer.cpp @@ -27,8 +27,6 @@ uint64 HighResolutionTimer::m_freq = []() -> uint64 { LARGE_INTEGER freq; QueryPerformanceFrequency(&freq); return (uint64)(freq.QuadPart); -#elif BOOST_OS_MACOS - return 1000000000; #else timespec pc; clock_getres(CLOCK_MONOTONIC_RAW, &pc); diff --git a/vcpkg.json b/vcpkg.json index c746e00c..0a46e32e 100644 --- a/vcpkg.json +++ b/vcpkg.json @@ -57,10 +57,6 @@ "libusb" ], "overrides": [ - { - "name": "glslang", - "version": "15.1.0" - }, { "name": "sdl2", "version": "2.30.3"