diff --git a/.ci/build-freebsd.sh b/.ci/build-freebsd.sh
index 8e1eafc39b..7f35412b58 100755
--- a/.ci/build-freebsd.sh
+++ b/.ci/build-freebsd.sh
@@ -1,23 +1,10 @@
#!/bin/sh -ex
-# Pull all the submodules except llvm
+# Pull all the submodules except some
# Note: Tried to use git submodule status, but it takes over 20 seconds
# shellcheck disable=SC2046
-git submodule -q update --init --depth 1 $(awk '/path/ && !/llvm/ && !/SPIRV/ { print $3 }' .gitmodules)
-
-# Prefer newer Clang than in base system (see also .ci/install-freebsd.sh)
-# libc++ isn't in llvm* packages, so download manually
-fetch https://github.com/llvm/llvm-project/releases/download/llvmorg-16.0.1/llvm-project-16.0.1.src.tar.xz
-tar xf llvm*.tar.xz
-export CC=clang16 CXX=clang++16
-cmake -B libcxx_build -G Ninja -S llvm*/libcxx \
- -DLLVM_CCACHE_BUILD=ON \
- -DLIBCXX_INCLUDE_BENCHMARKS=OFF \
- -DCMAKE_INSTALL_PREFIX:PATH=libcxx_prefix
-cmake --build libcxx_build
-cmake --install libcxx_build
-export CXXFLAGS="$CXXFLAGS -nostdinc++ -isystem$PWD/libcxx_prefix/include/c++/v1"
-export LDFLAGS="$LDFLAGS -nostdlib++ -L$PWD/libcxx_prefix/lib -l:libc++.a -lcxxrt"
+git config --global --add safe.directory .
+git submodule -q update --init --depth 1 $(awk '/path/ && !/llvm/ && !/opencv/ && !/libpng/ && !/libsdl-org/ && !/curl/ && !/zlib/ && !/libusb/ { print $3 }' .gitmodules)
CONFIGURE_ARGS="
-DWITH_LLVM=ON
@@ -27,8 +14,13 @@ CONFIGURE_ARGS="
-DUSE_SYSTEM_FFMPEG=ON
-DUSE_SYSTEM_CURL=ON
-DUSE_SYSTEM_LIBPNG=ON
+ -DUSE_SYSTEM_LIBUSB=ON
+ -DUSE_SYSTEM_OPENCV=ON
"
+# base Clang workaround (missing clang-scan-deps)
+CONFIGURE_ARGS="$CONFIGURE_ARGS -DCMAKE_CXX_SCAN_FOR_MODULES=OFF"
+
# shellcheck disable=SC2086
cmake -B build -G Ninja $CONFIGURE_ARGS
cmake --build build
diff --git a/.ci/build-linux-aarch64.sh b/.ci/build-linux-aarch64.sh
new file mode 100755
index 0000000000..1fe640809c
--- /dev/null
+++ b/.ci/build-linux-aarch64.sh
@@ -0,0 +1,55 @@
+#!/bin/sh -ex
+
+cd rpcs3 || exit 1
+
+shellcheck .ci/*.sh
+
+git config --global --add safe.directory '*'
+
+# Pull all the submodules except some
+# shellcheck disable=SC2046
+git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/libsdl-org/ && !/curl/ && !/zlib/ { print $3 }' .gitmodules)
+
+mkdir build && cd build || exit 1
+
+if [ "$COMPILER" = "gcc" ]; then
+ # These are set in the dockerfile
+ export CC="${GCC_BINARY}"
+ export CXX="${GXX_BINARY}"
+ export LINKER=gold
+else
+ export CC="${CLANG_BINARY}"
+ export CXX="${CLANGXX_BINARY}"
+ export LINKER="${LLD_BINARY}"
+fi
+
+export LINKER_FLAG="-fuse-ld=${LINKER}"
+
+cmake .. \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DUSE_NATIVE_INSTRUCTIONS=OFF \
+ -DUSE_PRECOMPILED_HEADERS=OFF \
+ -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DUSE_SYSTEM_CURL=ON \
+ -DUSE_SDL=ON \
+ -DUSE_SYSTEM_SDL=ON \
+ -DUSE_SYSTEM_FFMPEG=OFF \
+ -DUSE_SYSTEM_OPENCV=ON \
+ -DUSE_DISCORD_RPC=ON \
+ -DOpenGL_GL_PREFERENCE=LEGACY \
+ -DLLVM_DIR=/opt/llvm/lib/cmake/llvm \
+ -DSTATIC_LINK_LLVM=ON \
+ -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \
+ -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \
+ -G Ninja
+
+ninja; build_status=$?;
+
+cd ..
+
+# If it compiled succesfully let's deploy.
+if [ "$build_status" -eq 0 ]; then
+ .ci/deploy-linux.sh "aarch64"
+fi
diff --git a/.ci/build-linux.sh b/.ci/build-linux.sh
index af43e6b0ab..13a9e802f3 100755
--- a/.ci/build-linux.sh
+++ b/.ci/build-linux.sh
@@ -1,15 +1,15 @@
#!/bin/sh -ex
-if [ -z "$CIRRUS_CI" ]; then
- cd rpcs3 || exit 1
-fi
+cd rpcs3 || exit 1
+
+shellcheck .ci/*.sh
git config --global --add safe.directory '*'
-# Pull all the submodules except llvm
+# Pull all the submodules except some
# Note: Tried to use git submodule status, but it takes over 20 seconds
# shellcheck disable=SC2046
-git submodule -q update --init $(awk '/path/ && !/llvm/ && !/SPIRV/ { print $3 }' .gitmodules)
+git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/libsdl-org/ && !/curl/ && !/zlib/ { print $3 }' .gitmodules)
mkdir build && cd build || exit 1
@@ -30,7 +30,7 @@ else
export RANLIB=/usr/bin/llvm-ranlib-"$LLVMVER"
fi
-export CFLAGS="$CFLAGS -fuse-ld=${LINKER}"
+export LINKER_FLAG="-fuse-ld=${LINKER}"
cmake .. \
-DCMAKE_INSTALL_PREFIX=/usr \
@@ -38,27 +38,29 @@ cmake .. \
-DUSE_PRECOMPILED_HEADERS=OFF \
-DCMAKE_C_FLAGS="$CFLAGS" \
-DCMAKE_CXX_FLAGS="$CFLAGS" \
+ -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \
-DCMAKE_AR="$AR" \
-DCMAKE_RANLIB="$RANLIB" \
-DUSE_SYSTEM_CURL=ON \
-DUSE_SDL=ON \
+ -DUSE_SYSTEM_SDL=ON \
-DUSE_SYSTEM_FFMPEG=OFF \
+ -DUSE_SYSTEM_OPENCV=ON \
+ -DUSE_DISCORD_RPC=ON \
-DOpenGL_GL_PREFERENCE=LEGACY \
-DLLVM_DIR=/opt/llvm/lib/cmake/llvm \
-DSTATIC_LINK_LLVM=ON \
+ -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \
+ -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \
-G Ninja
ninja; build_status=$?;
cd ..
-shellcheck .ci/*.sh
-
# If it compiled succesfully let's deploy.
-# Azure and Cirrus publish PRs as artifacts only.
-{ [ "$CI_HAS_ARTIFACTS" = "true" ];
-} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false"
-
-if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then
- .ci/deploy-linux.sh
+if [ "$build_status" -eq 0 ]; then
+ .ci/deploy-linux.sh "x86_64"
fi
diff --git a/.ci/build-mac-arm64.sh b/.ci/build-mac-arm64.sh
new file mode 100755
index 0000000000..3c9c864031
--- /dev/null
+++ b/.ci/build-mac-arm64.sh
@@ -0,0 +1,121 @@
+#!/bin/sh -ex
+
+# shellcheck disable=SC2086
+export HOMEBREW_NO_AUTO_UPDATE=1
+export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1
+export HOMEBREW_NO_ENV_HINTS=1
+export HOMEBREW_NO_INSTALL_CLEANUP=1
+
+/opt/homebrew/bin/brew install -f --overwrite --quiet nasm ninja p7zip ccache pipenv gnutls freetype googletest #create-dmg
+/opt/homebrew/bin/brew install -f --quiet ffmpeg@5
+/opt/homebrew/bin/brew install --quiet "llvm@$LLVM_COMPILER_VER" glew cmake sdl3 vulkan-headers coreutils
+/opt/homebrew/bin/brew link -f --quiet "llvm@$LLVM_COMPILER_VER" ffmpeg@5
+
+# moltenvk based on commit for 1.3.0 release
+wget https://raw.githubusercontent.com/Homebrew/homebrew-core/7255441cbcafabaa8950f67c7ec55ff499dbb2d3/Formula/m/molten-vk.rb
+/opt/homebrew/bin/brew install -f --overwrite --formula --quiet ./molten-vk.rb
+export CXX=clang++
+export CC=clang
+
+export BREW_PATH;
+BREW_PATH="$(brew --prefix)"
+export BREW_BIN="/opt/homebrew/bin"
+export BREW_SBIN="/opt/homebrew/sbin"
+export CMAKE_EXTRA_OPTS='-DLLVM_TARGETS_TO_BUILD=arm64'
+
+export WORKDIR;
+WORKDIR="$(pwd)"
+
+# Get Qt
+if [ ! -d "/tmp/Qt/$QT_VER" ]; then
+ mkdir -p "/tmp/Qt"
+ git clone https://github.com/engnr/qt-downloader.git
+ cd qt-downloader
+ git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597
+ # nested Qt 6.9.1 URL workaround
+ # sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
+ # sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
+ # archived Qt 6.7.3 URL workaround
+ sed -i '' "s/official_releases/archive/g" qt-downloader
+ cd "/tmp/Qt"
+ arch -arm64 "$BREW_PATH/bin/pipenv" run pip3 uninstall py7zr requests semantic_version lxml
+ arch -arm64 "$BREW_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml --no-cache
+ mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64"
+ # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.1 workaround
+ arch -arm64 "$BREW_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64"
+fi
+
+cd "$WORKDIR"
+ditto "/tmp/Qt/$QT_VER" "qt-downloader/$QT_VER"
+
+export Qt6_DIR="$WORKDIR/qt-downloader/$QT_VER/clang_64/lib/cmake/Qt$QT_VER_MAIN"
+export SDL3_DIR="$BREW_PATH/opt/sdl3/lib/cmake/SDL3"
+
+export PATH="$BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH"
+export LDFLAGS="-L$BREW_PATH/lib $BREW_PATH/opt/ffmpeg@5/lib/libavcodec.dylib $BREW_PATH/opt/ffmpeg@5/lib/libavformat.dylib $BREW_PATH/opt/ffmpeg@5/lib/libavutil.dylib $BREW_PATH/opt/ffmpeg@5/lib/libswscale.dylib $BREW_PATH/opt/ffmpeg@5/lib/libswresample.dylib $BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++.1.dylib $BREW_PATH/lib/libSDL3.dylib $BREW_PATH/lib/libGLEW.dylib $BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/unwind/libunwind.1.dylib -Wl,-rpath,$BREW_PATH/lib"
+export CPPFLAGS="-I$BREW_PATH/include -I$BREW_PATH/include -no-pie -D__MAC_OS_X_VERSION_MIN_REQUIRED=140000"
+export CFLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000"
+export LIBRARY_PATH="$BREW_PATH/lib"
+export LD_LIBRARY_PATH="$BREW_PATH/lib"
+
+export VULKAN_SDK
+VULKAN_SDK="$BREW_PATH/opt/molten-vk"
+ln -s "$VULKAN_SDK/lib/libMoltenVK.dylib" "$VULKAN_SDK/lib/libvulkan.dylib" || true
+export VK_ICD_FILENAMES="$VULKAN_SDK/share/vulkan/icd.d/MoltenVK_icd.json"
+
+export LLVM_DIR
+LLVM_DIR="$BREW_PATH/opt/llvm@$LLVM_COMPILER_VER"
+# exclude ffmpeg, LLVM, opencv, and sdl from submodule update
+# shellcheck disable=SC2046
+git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/opencv/ && !/SDL/ { print $3 }' .gitmodules)
+
+# 3rdparty fixes
+sed -i '' "s/extern const double NSAppKitVersionNumber;/const double NSAppKitVersionNumber = 1343;/g" 3rdparty/hidapi/hidapi/mac/hid.c
+
+mkdir build && cd build || exit 1
+
+export MACOSX_DEPLOYMENT_TARGET=14.0
+
+"$BREW_PATH/bin/cmake" .. \
+ -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \
+ -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \
+ -DUSE_SDL=ON \
+ -DUSE_DISCORD_RPC=ON \
+ -DUSE_VULKAN=ON \
+ -DUSE_ALSA=OFF \
+ -DUSE_PULSE=OFF \
+ -DUSE_AUDIOUNIT=ON \
+ -DUSE_SYSTEM_FFMPEG=ON \
+ -DLLVM_CCACHE_BUILD=OFF \
+ -DLLVM_BUILD_RUNTIME=OFF \
+ -DLLVM_BUILD_TOOLS=OFF \
+ -DLLVM_INCLUDE_DOCS=OFF \
+ -DLLVM_INCLUDE_EXAMPLES=OFF \
+ -DLLVM_INCLUDE_TESTS=OFF \
+ -DLLVM_INCLUDE_TOOLS=OFF \
+ -DLLVM_INCLUDE_UTILS=OFF \
+ -DLLVM_USE_PERF=OFF \
+ -DLLVM_ENABLE_Z3_SOLVER=OFF \
+ -DUSE_NATIVE_INSTRUCTIONS=OFF \
+ -DUSE_SYSTEM_MVK=ON \
+ -DUSE_SYSTEM_FAUDIO=OFF \
+ -DUSE_SYSTEM_SDL=ON \
+ -DUSE_SYSTEM_OPENCV=ON \
+ "$CMAKE_EXTRA_OPTS" \
+ -DLLVM_TARGET_ARCH=arm64 \
+ -DCMAKE_OSX_ARCHITECTURES=arm64 \
+ -DCMAKE_IGNORE_PATH="$BREW_PATH/lib" \
+ -DCMAKE_IGNORE_PREFIX_PATH=/opt/homebrew/opt \
+ -DCMAKE_CXX_FLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" \
+ -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \
+ -DCMAKE_OSX_SYSROOT="$(xcrun --sdk macosx --show-sdk-path)" \
+ -G Ninja
+
+"$BREW_PATH/bin/ninja"; build_status=$?;
+
+cd ..
+
+# If it compiled succesfully let's deploy.
+if [ "$build_status" -eq 0 ]; then
+ .ci/deploy-mac-arm64.sh
+fi
diff --git a/.ci/build-mac.sh b/.ci/build-mac.sh
index fea4ce5e5e..1e6bae01a1 100755
--- a/.ci/build-mac.sh
+++ b/.ci/build-mac.sh
@@ -1,27 +1,26 @@
#!/bin/sh -ex
+# shellcheck disable=SC2086
export HOMEBREW_NO_AUTO_UPDATE=1
-brew install -f --overwrite nasm ninja git p7zip ccache pipenv #create-dmg
+export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1
+export HOMEBREW_NO_ENV_HINTS=1
+export HOMEBREW_NO_INSTALL_CLEANUP=1
#/usr/sbin/softwareupdate --install-rosetta --agree-to-license
arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
-arch -x86_64 /usr/local/bin/brew install -f --overwrite python@3.12 || arch -x86_64 /usr/local/bin/brew link --overwrite python@3.12
arch -x86_64 /usr/local/bin/brew update
-arch -x86_64 /usr/local/bin/brew uninstall -f --ignore-dependencies ffmpeg
-arch -x86_64 /usr/local/bin/brew install -f --build-from-source ffmpeg@5
-arch -x86_64 /usr/local/bin/brew reinstall -f --build-from-source gnutls freetype
-arch -x86_64 /usr/local/bin/brew install llvm@16 glew cmake sdl2 vulkan-headers coreutils
-arch -x86_64 /usr/local/bin/brew link -f llvm@16 ffmpeg@5
+arch -x86_64 /usr/local/bin/brew install -f --overwrite --quiet python || arch -x86_64 /usr/local/bin/brew link --overwrite python
+arch -x86_64 /usr/local/bin/brew install -f --overwrite --quiet nasm ninja p7zip ccache pipenv gnutls freetype #create-dmg
+arch -x86_64 /usr/local/bin/brew install -f --quiet ffmpeg@5
+arch -x86_64 /usr/local/bin/brew install --quiet "llvm@$LLVM_COMPILER_VER" glew cmake sdl3 vulkan-headers coreutils
+arch -x86_64 /usr/local/bin/brew link -f --quiet "llvm@$LLVM_COMPILER_VER" ffmpeg@5
-# moltenvk based on commit for 1.2.6 release
-wget https://raw.githubusercontent.com/Homebrew/homebrew-core/4ac0cfaca4c2505abe2fcbcc0ce5816572103a6c/Formula/m/molten-vk.rb
-arch -x86_64 /usr/local/bin/brew install -f --overwrite ./molten-vk.rb
-#export MACOSX_DEPLOYMENT_TARGET=12.0
+# moltenvk based on commit for 1.3.0 release
+wget https://raw.githubusercontent.com/Homebrew/homebrew-core/7255441cbcafabaa8950f67c7ec55ff499dbb2d3/Formula/m/molten-vk.rb
+arch -x86_64 /usr/local/bin/brew install -f --overwrite --formula --quiet ./molten-vk.rb
export CXX=clang++
export CC=clang
-export BREW_PATH;
-BREW_PATH="$(brew --prefix)"
export BREW_X64_PATH;
BREW_X64_PATH="$("/usr/local/bin/brew" --prefix)"
export BREW_BIN="/usr/local/bin"
@@ -37,21 +36,28 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then
git clone https://github.com/engnr/qt-downloader.git
cd qt-downloader
git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597
+ # nested Qt 6.9.1 URL workaround
+ # sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
+ # sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
+ # archived Qt 6.7.3 URL workaround
+ sed -i '' "s/official_releases/archive/g" qt-downloader
cd "/tmp/Qt"
- "$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml
+ arch -x86_64 "$BREW_X64_PATH/bin/pipenv" --python "$BREW_X64_PATH/bin/python3" run pip3 install py7zr requests semantic_version lxml
mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64"
- "$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia
+ # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.1 workaround
+ arch -x86_64 "$BREW_X64_PATH/bin/pipenv" --python "$BREW_X64_PATH/bin/python3" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64"
fi
cd "$WORKDIR"
ditto "/tmp/Qt/$QT_VER" "qt-downloader/$QT_VER"
export Qt6_DIR="$WORKDIR/qt-downloader/$QT_VER/clang_64/lib/cmake/Qt$QT_VER_MAIN"
-export SDL2_DIR="$BREW_X64_PATH/opt/sdl2/lib/cmake/SDL2"
+export SDL3_DIR="$BREW_X64_PATH/opt/sdl3/lib/cmake/SDL3"
-export PATH="$BREW_X64_PATH/opt/llvm@16/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH"
+export PATH="$BREW_X64_PATH/opt/llvm@$LLVM_COMPILER_VER/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH"
export LDFLAGS="-L$BREW_X64_PATH/lib -Wl,-rpath,$BREW_X64_PATH/lib"
-export CPPFLAGS="-I$BREW_X64_PATH/include -msse -msse2 -mcx16 -no-pie"
+export CPPFLAGS="-I$BREW_X64_PATH/include -msse -msse2 -mcx16 -no-pie -D__MAC_OS_X_VERSION_MIN_REQUIRED=140000"
+export CFLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000"
export LIBRARY_PATH="$BREW_X64_PATH/lib"
export LD_LIBRARY_PATH="$BREW_X64_PATH/lib"
@@ -61,17 +67,21 @@ ln -s "$VULKAN_SDK/lib/libMoltenVK.dylib" "$VULKAN_SDK/lib/libvulkan.dylib"
export VK_ICD_FILENAMES="$VULKAN_SDK/share/vulkan/icd.d/MoltenVK_icd.json"
export LLVM_DIR
-LLVM_DIR="BREW_X64_PATH/opt/llvm@16"
-# exclude ffmpeg, SPIRV and LLVM, and sdl from submodule update
+LLVM_DIR="BREW_X64_PATH/opt/llvm@$LLVM_COMPILER_VER"
+# exclude ffmpeg, LLVM, opencv, and sdl from submodule update
# shellcheck disable=SC2046
-git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/SPIRV/ && !/SDL/ { print $3 }' .gitmodules)
+git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/opencv/ && !/SDL/ { print $3 }' .gitmodules)
# 3rdparty fixes
sed -i '' "s/extern const double NSAppKitVersionNumber;/const double NSAppKitVersionNumber = 1343;/g" 3rdparty/hidapi/hidapi/mac/hid.c
mkdir build && cd build || exit 1
+export MACOSX_DEPLOYMENT_TARGET=14.0
+
"$BREW_X64_PATH/bin/cmake" .. \
+ -DBUILD_RPCS3_TESTS=OFF \
+ -DRUN_RPCS3_TESTS=OFF \
-DUSE_SDL=ON \
-DUSE_DISCORD_RPC=ON \
-DUSE_VULKAN=ON \
@@ -93,19 +103,22 @@ mkdir build && cd build || exit 1
-DUSE_SYSTEM_MVK=ON \
-DUSE_SYSTEM_FAUDIO=OFF \
-DUSE_SYSTEM_SDL=ON \
- $CMAKE_EXTRA_OPTS \
+ -DUSE_SYSTEM_OPENCV=ON \
+ "$CMAKE_EXTRA_OPTS" \
-DLLVM_TARGET_ARCH=X86_64 \
-DCMAKE_OSX_ARCHITECTURES=x86_64 \
- -DCMAKE_IGNORE_PATH="$BREW_PATH/lib" \
+ -DCMAKE_IGNORE_PATH="$BREW_X64_PATH/lib" \
+ -DCMAKE_IGNORE_PREFIX_PATH=/usr/local/opt \
+ -DCMAKE_CXX_FLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" \
+ -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \
+ -DCMAKE_OSX_SYSROOT="$(xcrun --sdk macosx --show-sdk-path)" \
-G Ninja
-"$BREW_PATH/bin/ninja"; build_status=$?;
+"$BREW_X64_PATH/bin/ninja"; build_status=$?;
cd ..
-{ [ "$CI_HAS_ARTIFACTS" = "true" ];
-} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false"
-
-if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then
+# If it compiled succesfully let's deploy.
+if [ "$build_status" -eq 0 ]; then
.ci/deploy-mac.sh
fi
diff --git a/.ci/build-windows-clang.sh b/.ci/build-windows-clang.sh
new file mode 100644
index 0000000000..0880e7f5ed
--- /dev/null
+++ b/.ci/build-windows-clang.sh
@@ -0,0 +1,61 @@
+#!/bin/sh -ex
+
+git config --global --add safe.directory '*'
+
+# Pull all the submodules except some
+# Note: Tried to use git submodule status, but it takes over 20 seconds
+# shellcheck disable=SC2046
+git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/ffmpeg/ && !/curl/ && !/FAudio/ && !/zlib/ { print $3 }' .gitmodules)
+
+mkdir build && cd build || exit 1
+
+export CC="clang"
+export CXX="clang++"
+export LINKER=lld
+export LINKER_FLAG="-fuse-ld=${LINKER}"
+
+if [ -n "$LLVMVER" ]; then
+ export AR="llvm-ar-$LLVMVER"
+ export RANLIB="llvm-ranlib-$LLVMVER"
+else
+ export AR="llvm-ar"
+ export RANLIB="llvm-ranlib"
+fi
+
+cmake .. \
+ -DCMAKE_PREFIX_PATH=/clang64 \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DUSE_NATIVE_INSTRUCTIONS=OFF \
+ -DUSE_PRECOMPILED_HEADERS=OFF \
+ -DCMAKE_C_FLAGS="$CFLAGS" \
+ -DCMAKE_CXX_FLAGS="$CFLAGS" \
+ -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \
+ -DCMAKE_AR="$AR" \
+ -DCMAKE_RANLIB="$RANLIB" \
+ -DUSE_SYSTEM_CURL=ON \
+ -DUSE_FAUDIO=OFF \
+ -DUSE_SDL=ON \
+ -DUSE_SYSTEM_SDL=OFF \
+ -DUSE_SYSTEM_FFMPEG=ON \
+ -DUSE_SYSTEM_OPENCV=ON \
+ -DUSE_SYSTEM_OPENAL=OFF \
+ -DUSE_DISCORD_RPC=ON \
+ -DOpenGL_GL_PREFERENCE=LEGACY \
+ -DWITH_LLVM=ON \
+ -DLLVM_DIR=/clang64/lib/cmake/llvm \
+ -DVulkan_LIBRARY=/clang64/lib/libvulkan-1.dll.a \
+ -DSTATIC_LINK_LLVM=ON \
+ -DBUILD_RPCS3_TESTS=OFF \
+ -DRUN_RPCS3_TESTS=OFF \
+ -G Ninja
+
+ninja; build_status=$?;
+
+cd ..
+
+# If it compiled succesfully let's deploy.
+if [ "$build_status" -eq 0 ]; then
+ .ci/deploy-windows-clang.sh "x86_64"
+fi
diff --git a/.ci/deploy-linux.sh b/.ci/deploy-linux.sh
index f43b0b021a..f8c3d849c3 100755
--- a/.ci/deploy-linux.sh
+++ b/.ci/deploy-linux.sh
@@ -2,33 +2,51 @@
cd build || exit 1
+CPU_ARCH="${1:-x86_64}"
+
if [ "$DEPLOY_APPIMAGE" = "true" ]; then
DESTDIR=AppDir ninja install
- curl -fsSLo /usr/bin/linuxdeploy https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage
+ curl -fsSLo /usr/bin/linuxdeploy "https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-$CPU_ARCH.AppImage"
chmod +x /usr/bin/linuxdeploy
- curl -fsSLo /usr/bin/linuxdeploy-plugin-qt https://github.com/linuxdeploy/linuxdeploy-plugin-qt/releases/download/continuous/linuxdeploy-plugin-qt-x86_64.AppImage
+ curl -fsSLo /usr/bin/linuxdeploy-plugin-qt "https://github.com/linuxdeploy/linuxdeploy-plugin-qt/releases/download/continuous/linuxdeploy-plugin-qt-$CPU_ARCH.AppImage"
chmod +x /usr/bin/linuxdeploy-plugin-qt
- curl -fsSLo linuxdeploy-plugin-checkrt.sh https://github.com/linuxdeploy/linuxdeploy-plugin-checkrt/releases/download/continuous/linuxdeploy-plugin-checkrt-x86_64.sh
+ curl -fsSLo linuxdeploy-plugin-checkrt.sh https://github.com/darealshinji/linuxdeploy-plugin-checkrt/releases/download/continuous/linuxdeploy-plugin-checkrt.sh
chmod +x ./linuxdeploy-plugin-checkrt.sh
- EXTRA_QT_PLUGINS="svg;" APPIMAGE_EXTRACT_AND_RUN=1 linuxdeploy --appdir AppDir --plugin qt
+ export EXTRA_PLATFORM_PLUGINS="libqwayland-egl.so;libqwayland-generic.so"
+ export EXTRA_QT_PLUGINS="svg;wayland-decoration-client;wayland-graphics-integration-client;wayland-shell-integration;waylandcompositor"
+
+ APPIMAGE_EXTRACT_AND_RUN=1 linuxdeploy --appdir AppDir --plugin qt --plugin checkrt
# Remove libwayland-client because it has platform-dependent exports and breaks other OSes
rm -f ./AppDir/usr/lib/libwayland-client.so*
+ # Remove libvulkan because it causes issues with gamescope
+ rm -f ./AppDir/usr/lib/libvulkan.so*
+
+ # Remove unused Qt6 libraries
+ rm -f ./AppDir/usr/lib/libQt6VirtualKeyboard.so*
+ rm -f ./AppDir/usr/plugins/platforminputcontexts/libqtvirtualkeyboardplugin.so*
+
# Remove git directory containing local commit history file
rm -rf ./AppDir/usr/share/rpcs3/git
- ./linuxdeploy-plugin-checkrt.sh --appdir AppDir
+ curl -fsSLo /uruntime "https://github.com/VHSgunzo/uruntime/releases/download/v0.3.4/uruntime-appimage-dwarfs-$CPU_ARCH"
+ chmod +x /uruntime
+ /uruntime --appimage-mkdwarfs -f --set-owner 0 --set-group 0 --no-history --no-create-timestamp \
+ --compression zstd:level=22 -S26 -B32 --header /uruntime -i AppDir -o RPCS3.AppImage
- linuxdeploy --appimage-extract
- ./squashfs-root/plugins/linuxdeploy-plugin-appimage/usr/bin/appimagetool AppDir -g
+ APPIMAGE_SUFFIX="linux_${CPU_ARCH}"
+ if [ "$CPU_ARCH" = "x86_64" ]; then
+ # Preserve back compat. Previous versions never included the full arch.
+ APPIMAGE_SUFFIX="linux64"
+ fi
COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ../rpcs3/rpcs3_version.cpp)
COMM_COUNT="$(git rev-list --count HEAD)"
COMM_HASH="$(git rev-parse --short=8 HEAD)"
- RPCS3_APPIMAGE="rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_linux64.AppImage"
+ RPCS3_APPIMAGE="rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_${APPIMAGE_SUFFIX}.AppImage"
mv ./RPCS3*.AppImage "$RPCS3_APPIMAGE"
diff --git a/.ci/deploy-llvm.sh b/.ci/deploy-llvm.sh
new file mode 100644
index 0000000000..35e5d780af
--- /dev/null
+++ b/.ci/deploy-llvm.sh
@@ -0,0 +1,20 @@
+#!/bin/sh -ex
+
+# First let's print some info about our caches
+"$(cygpath -u "$CCACHE_BIN_DIR")"/ccache.exe --show-stats -v
+
+# BUILD_blablabla is Azure specific, so we wrap it for portability
+ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY"
+BUILD="llvmlibs_mt.7z"
+
+# Package artifacts
+7z a -m0=LZMA2 -mx9 "$BUILD" ./build/lib/Release-x64/llvm_build
+
+# Generate sha256 hashes
+# Write to file for GitHub releases
+sha256sum "$BUILD" | awk '{ print $1 }' | tee "$BUILD.sha256"
+echo "$(cat "$BUILD.sha256");$(stat -c %s "$BUILD")B" > GitHubReleaseMessage.txt
+
+# Move files to publishing directory
+cp -- "$BUILD" "$ARTIFACT_DIR"
+cp -- "$BUILD.sha256" "$ARTIFACT_DIR"
diff --git a/.ci/deploy-mac-arm64.sh b/.ci/deploy-mac-arm64.sh
new file mode 100755
index 0000000000..e7de472378
--- /dev/null
+++ b/.ci/deploy-mac-arm64.sh
@@ -0,0 +1,74 @@
+#!/bin/sh -ex
+
+# shellcheck disable=SC2086
+cd build || exit 1
+
+# Gather explicit version number and number of commits
+COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ../rpcs3/rpcs3_version.cpp)
+COMM_COUNT=$(git rev-list --count HEAD)
+COMM_HASH=$(git rev-parse --short=8 HEAD)
+
+AVVER="${COMM_TAG}-${COMM_COUNT}"
+
+# AVVER is used for GitHub releases, it is the version number.
+echo "AVVER=$AVVER" >> ../.ci/ci-vars.env
+
+cd bin
+mkdir "rpcs3.app/Contents/lib/" || true
+
+cp "$(realpath /opt/homebrew/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++abi.1.0.dylib)" "rpcs3.app/Contents/Frameworks/libc++abi.1.dylib"
+cp "$(realpath /opt/homebrew/lib/libsharpyuv.0.dylib)" "rpcs3.app/Contents/lib/libsharpyuv.0.dylib"
+cp "$(realpath /opt/homebrew/lib/libintl.8.dylib)" "rpcs3.app/Contents/lib/libintl.8.dylib"
+
+rm -rf "rpcs3.app/Contents/Frameworks/QtPdf.framework" \
+"rpcs3.app/Contents/Frameworks/QtQml.framework" \
+"rpcs3.app/Contents/Frameworks/QtQmlModels.framework" \
+"rpcs3.app/Contents/Frameworks/QtQuick.framework" \
+"rpcs3.app/Contents/Frameworks/QtVirtualKeyboard.framework" \
+"rpcs3.app/Contents/Plugins/platforminputcontexts" \
+"rpcs3.app/Contents/Plugins/virtualkeyboard" \
+"rpcs3.app/Contents/Resources/git"
+
+../../.ci/optimize-mac.sh rpcs3.app
+
+# Hack
+install_name_tool \
+-delete_rpath /opt/homebrew/lib \
+-delete_rpath /opt/homebrew/opt/llvm@$LLVM_COMPILER_VER/lib RPCS3.app/Contents/MacOS/rpcs3
+#-delete_rpath /opt/homebrew1/Cellar/sdl3/3.2.8/lib
+
+# Need to do this rename hack due to case insensitive filesystem
+mv rpcs3.app RPCS3_.app
+mv RPCS3_.app RPCS3.app
+
+# NOTE: "--deep" is deprecated
+codesign --deep -fs - RPCS3.app
+
+echo "[InternetShortcut]" > Quickstart.url
+echo "URL=https://rpcs3.net/quickstart" >> Quickstart.url
+echo "IconIndex=0" >> Quickstart.url
+
+#DMG_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos_arm64.dmg"
+#"$BREW_X64_PATH/bin/create-dmg" --volname RPCS3 \
+#--window-size 800 400 \
+#--icon-size 100 \
+#--icon rpcs3.app 200 190 \
+#--add-file Quickstart.url Quickstart.url 400 20 \
+#--hide-extension rpcs3.app \
+#--hide-extension Quickstart.url \
+#--app-drop-link 600 185 \
+#--skip-jenkins \
+#--format ULMO \
+#"$DMG_FILEPATH" \
+#RPCS3.app
+#FILESIZE=$(stat -f %z "$DMG_FILEPATH")
+#SHA256SUM=$(shasum -a 256 "$DMG_FILEPATH" | awk '{ print $1 }')
+
+ARCHIVE_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos_arm64.7z"
+"$BREW_PATH/bin/7z" a -mx9 "$ARCHIVE_FILEPATH" RPCS3.app Quickstart.url
+FILESIZE=$(stat -f %z "$ARCHIVE_FILEPATH")
+SHA256SUM=$(shasum -a 256 "$ARCHIVE_FILEPATH" | awk '{ print $1 }')
+
+cd ..
+echo "${SHA256SUM};${FILESIZE}B" > "$RELEASE_MESSAGE"
+cd bin
diff --git a/.ci/deploy-mac.sh b/.ci/deploy-mac.sh
index 20de7023ea..c293358748 100755
--- a/.ci/deploy-mac.sh
+++ b/.ci/deploy-mac.sh
@@ -1,5 +1,6 @@
#!/bin/sh -ex
+# shellcheck disable=SC2086
cd build || exit 1
# Gather explicit version number and number of commits
@@ -15,7 +16,7 @@ echo "AVVER=$AVVER" >> ../.ci/ci-vars.env
cd bin
mkdir "rpcs3.app/Contents/lib/"
-cp "/usr/local/opt/llvm@16/lib/c++/libc++abi.1.0.dylib" "rpcs3.app/Contents/lib/libc++abi.1.dylib"
+cp "/usr/local/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++abi.1.0.dylib" "rpcs3.app/Contents/lib/libc++abi.1.dylib"
cp "$(realpath /usr/local/lib/libsharpyuv.0.dylib)" "rpcs3.app/Contents/lib/libsharpyuv.0.dylib"
cp "$(realpath /usr/local/lib/libintl.8.dylib)" "rpcs3.app/Contents/lib/libintl.8.dylib"
@@ -28,10 +29,21 @@ rm -rf "rpcs3.app/Contents/Frameworks/QtPdf.framework" \
"rpcs3.app/Contents/Plugins/virtualkeyboard" \
"rpcs3.app/Contents/Resources/git"
+../../.ci/optimize-mac.sh rpcs3.app
+
# Need to do this rename hack due to case insensitive filesystem
mv rpcs3.app RPCS3_.app
mv RPCS3_.app RPCS3.app
+# Hack
+install_name_tool \
+-delete_rpath /usr/local/lib \
+-delete_rpath /usr/local/opt/llvm@$LLVM_COMPILER_VER/lib RPCS3.app/Contents/MacOS/rpcs3
+#-delete_rpath /usr/local/Cellar/sdl3/3.2.8/lib
+
+# NOTE: "--deep" is deprecated
+codesign --deep -fs - RPCS3.app
+
echo "[InternetShortcut]" > Quickstart.url
echo "URL=https://rpcs3.net/quickstart" >> Quickstart.url
echo "IconIndex=0" >> Quickstart.url
diff --git a/.ci/deploy-windows-clang.sh b/.ci/deploy-windows-clang.sh
new file mode 100644
index 0000000000..d45cb45acf
--- /dev/null
+++ b/.ci/deploy-windows-clang.sh
@@ -0,0 +1,38 @@
+#!/bin/sh -ex
+
+# source ci-vars.env
+# shellcheck disable=SC1091
+. .ci/ci-vars.env
+
+cd build || exit 1
+
+CPU_ARCH="${1:-x86_64}"
+
+echo "Deploying rpcs3 windows clang $CPU_ARCH"
+
+# BUILD_blablabla is CI specific, so we wrap it for portability
+ARTIFACT_DIR=$(cygpath -u "$BUILD_ARTIFACTSTAGINGDIRECTORY")
+MSYS2_CLANG_BIN=$(cygpath -w /clang64/bin)
+MSYS2_USR_BIN=$(cygpath -w /usr/bin)
+
+echo "Installing dependencies of: ./bin/rpcs3.exe (MSYS2 dir is '$MSYS2_CLANG_BIN', usr dir is '$MSYS2_USR_BIN')"
+cmake -DMSYS2_CLANG_BIN="$MSYS2_CLANG_BIN" -DMSYS2_USR_BIN="$MSYS2_USR_BIN" -Dexe=./bin/rpcs3.exe -P ../buildfiles/cmake/CopyRuntimeDependencies.cmake
+
+# Prepare compatibility and SDL database for packaging
+mkdir ./bin/config
+mkdir ./bin/config/input_configs
+curl -fsSL 'https://raw.githubusercontent.com/gabomdq/SDL_GameControllerDB/master/gamecontrollerdb.txt' 1> ./bin/config/input_configs/gamecontrollerdb.txt
+curl -fsSL 'https://rpcs3.net/compatibility?api=v1&export' | iconv -t UTF-8 1> ./bin/GuiConfigs/compat_database.dat
+
+# Package artifacts
+7z a -m0=LZMA2 -mx9 "$BUILD" ./bin/*
+
+# Generate sha256 hashes
+# Write to file for GitHub releases
+sha256sum "$BUILD" | awk '{ print $1 }' | tee "$BUILD.sha256"
+echo "$(cat "$BUILD.sha256");$(stat -c %s "$BUILD")B" > GitHubReleaseMessage.txt
+
+# Move files to publishing directory
+mkdir -p "$ARTIFACT_DIR"
+cp -- "$BUILD" "$ARTIFACT_DIR"
+cp -- "$BUILD.sha256" "$ARTIFACT_DIR"
diff --git a/.ci/deploy-windows.sh b/.ci/deploy-windows.sh
index 7a7522f8d4..b885831511 100755
--- a/.ci/deploy-windows.sh
+++ b/.ci/deploy-windows.sh
@@ -1,11 +1,13 @@
#!/bin/sh -ex
-# BUILD_blablabla is Azure specific, so we wrap it for portability
+# First let's print some info about our caches
+"$(cygpath -u "$CCACHE_BIN_DIR")"/ccache.exe --show-stats -v
+
+# BUILD_blablabla is CI specific, so we wrap it for portability
ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY"
# Remove unecessary files
rm -f ./bin/rpcs3.exp ./bin/rpcs3.lib ./bin/rpcs3.pdb ./bin/vc_redist.x64.exe
-rm -rf ./bin/git
# Prepare compatibility and SDL database for packaging
mkdir ./bin/config
diff --git a/.ci/docker.env b/.ci/docker.env
index 2b36fb34c0..eb70b68c18 100644
--- a/.ci/docker.env
+++ b/.ci/docker.env
@@ -1,5 +1,4 @@
-# Variables set by Azure Pipelines
-CI_HAS_ARTIFACTS
+# Variables set by CI
BUILD_REASON
BUILD_SOURCEVERSION
BUILD_ARTIFACTSTAGINGDIRECTORY
@@ -8,6 +7,7 @@ BUILD_SOURCEBRANCHNAME
APPDIR
ARTDIR
RELEASE_MESSAGE
+RUN_UNIT_TESTS
# Variables for build matrix
COMPILER
DEPLOY_APPIMAGE
diff --git a/.ci/export-azure-vars.sh b/.ci/export-azure-vars.sh
deleted file mode 100755
index 033dd41cc8..0000000000
--- a/.ci/export-azure-vars.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh -e
-
-# Export variables for later stages of the Azure pipeline
-# Values done in this manner will appear as environment variables
-# in later stages.
-
-# From pure-sh-bible
-# Setting 'IFS' tells 'read' where to split the string.
-while IFS='=' read -r key val; do
- # Skip over lines containing comments.
- [ "${key##\#*}" ] || continue
- echo "##vso[task.setvariable variable=$key]$val"
-done < ".ci/ci-vars.env"
diff --git a/.ci/export-cirrus-vars.sh b/.ci/export-cirrus-vars.sh
deleted file mode 100644
index 561e77e92f..0000000000
--- a/.ci/export-cirrus-vars.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh -e
-
-# Export variables for later stages of the Cirrus pipeline
-# Values done in this manner will appear as environment variables
-# in later stages.
-
-# From pure-sh-bible
-# Setting 'IFS' tells 'read' where to split the string.
-while IFS='=' read -r key val; do
- # Skip over lines containing comments.
- [ "${key##\#*}" ] || continue
- export "$key"="$val"
-done < ".ci/ci-vars.env"
diff --git a/.ci/generate-qt-ts.sh b/.ci/generate-qt-ts.sh
new file mode 100755
index 0000000000..a9fc139a93
--- /dev/null
+++ b/.ci/generate-qt-ts.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -ex
+
+mkdir -p ../translations
+
+LUPDATE_PATH=$(find /usr -name lupdate -type f 2>/dev/null | head -n 1)
+if [ -z "$LUPDATE_PATH" ]; then
+ echo "Error: lupdate not found!"
+ exit 1
+else
+ echo "lupdate found at: $LUPDATE_PATH"
+ $LUPDATE_PATH -recursive . -ts ../translations/rpcs3_template.ts
+ sed -i 's|filename="\.\./|filename="./|g' ../translations/rpcs3_template.ts
+fi
\ No newline at end of file
diff --git a/.ci/get_keys-windows.sh b/.ci/get_keys-windows.sh
old mode 100644
new mode 100755
index 9ef56dda62..8384b4de5d
--- a/.ci/get_keys-windows.sh
+++ b/.ci/get_keys-windows.sh
@@ -1,4 +1,3 @@
#!/bin/sh -ex
-curl -fLo "./llvm.lock" "https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-16.0.1/llvmlibs_mt.7z.sha256"
-curl -fLo "./glslang.lock" "https://github.com/RPCS3/glslang/releases/download/custom-build-win/glslanglibs_mt.7z.sha256"
+curl -fLo "./llvm.lock" "https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-${LLVM_VER}/llvmlibs_mt.7z.sha256"
diff --git a/.ci/install-freebsd.sh b/.ci/install-freebsd.sh
index c6528ad923..04efc6c0e9 100755
--- a/.ci/install-freebsd.sh
+++ b/.ci/install-freebsd.sh
@@ -8,11 +8,11 @@ sed -i '' 's/quarterly/latest/' /etc/pkg/FreeBSD.conf
export ASSUME_ALWAYS_YES=true
pkg info # debug
-# Prefer newer Clang than in base system (see also .ci/build-freebsd.sh)
-pkg install llvm16
+# WITH_LLVM
+pkg install "llvm$LLVM_COMPILER_VER"
-# Mandatory dependencies (qt6-base is pulled via qt6-multimedia)
-pkg install git ccache cmake ninja qt6-multimedia qt6-svg glew openal-soft ffmpeg
+# Mandatory dependencies (qtX-base is pulled via qtX-multimedia)
+pkg install git ccache cmake ninja "qt$QT_VER_MAIN-multimedia" "qt$QT_VER_MAIN-svg" glew openal-soft ffmpeg
-# Optional dependencies (libevdev is pulled by qt6-base)
-pkg install pkgconf alsa-lib pulseaudio sdl2 evdev-proto vulkan-headers vulkan-loader
+# Optional dependencies (libevdev is pulled by qtX-base)
+pkg install pkgconf alsa-lib pulseaudio sdl3 evdev-proto vulkan-headers vulkan-loader opencv
diff --git a/.ci/optimize-mac.sh b/.ci/optimize-mac.sh
new file mode 100755
index 0000000000..5fea7877f4
--- /dev/null
+++ b/.ci/optimize-mac.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+file_path=$(find "$1/Contents/MacOS" -type f -print0 | head -n 1)
+
+if [ -z "$file_path" ]; then
+ echo "No executable file found in $1/Contents/MacOS" >&2
+ exit 1
+fi
+
+
+target_architecture="$(lipo "$file_path" -archs)"
+
+if [ -z "$target_architecture" ]; then
+ exit 1
+fi
+
+# shellcheck disable=SC3045
+find "$1" -type f -print0 | while IFS= read -r -d '' file; do
+ echo Thinning "$file" -> "$target_architecture"
+ lipo "$file" -thin "$target_architecture" -output "$file" || true
+done
diff --git a/.ci/setup-llvm.sh b/.ci/setup-llvm.sh
new file mode 100644
index 0000000000..a54901309e
--- /dev/null
+++ b/.ci/setup-llvm.sh
@@ -0,0 +1,63 @@
+#!/bin/sh -ex
+
+# Resource/dependency URLs
+CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.11.2/ccache-4.11.2-windows-x86_64.zip"
+
+DEP_URLS=" \
+ $CCACHE_URL"
+
+# CI doesn't make a cache dir if it doesn't exist, so we do it manually
+[ -d "$DEPS_CACHE_DIR" ] || mkdir "$DEPS_CACHE_DIR"
+
+# Pull the llvm submodule
+# shellcheck disable=SC2046
+git submodule -q update --init --depth=1 -- 3rdparty/llvm
+
+# Git bash doesn't have rev, so here it is
+rev()
+{
+ echo "$1" | awk '{ for(i = length($0); i != 0; --i) { a = a substr($0, i, 1); } } END { print a }'
+}
+
+# Usage: download_and_verify url checksum algo file
+# Check to see if a file is already cached, and the checksum matches. If not, download it.
+# Tries up to 3 times
+download_and_verify()
+{
+ url="$1"
+ correctChecksum="$2"
+ algo="$3"
+ fileName="$4"
+
+ for _ in 1 2 3; do
+ [ -e "$DEPS_CACHE_DIR/$fileName" ] || curl -fLo "$DEPS_CACHE_DIR/$fileName" "$url"
+ fileChecksum=$("${algo}sum" "$DEPS_CACHE_DIR/$fileName" | awk '{ print $1 }')
+ [ "$fileChecksum" = "$correctChecksum" ] && return 0
+ done
+
+ return 1;
+}
+
+# Some dependencies install here
+[ -d "./build/lib_ext/Release-x64" ] || mkdir -p "./build/lib_ext/Release-x64"
+
+for url in $DEP_URLS; do
+ # Get the filename from the URL and remove query strings (?arg=something).
+ fileName="$(rev "$(rev "$url" | cut -d'/' -f1)" | cut -d'?' -f1)"
+ [ -z "$fileName" ] && echo "Unable to parse url: $url" && exit 1
+
+ # shellcheck disable=SC1003
+ case "$url" in
+ *ccache*) checksum=$CCACHE_SHA; algo="sha256"; outDir="$CCACHE_BIN_DIR" ;;
+ *) echo "Unknown url resource: $url"; exit 1 ;;
+ esac
+
+ download_and_verify "$url" "$checksum" "$algo" "$fileName"
+ 7z x -y "$DEPS_CACHE_DIR/$fileName" -aos -o"$outDir"
+done
+
+# Setup ccache tool
+[ -d "$CCACHE_DIR" ] || mkdir -p "$(cygpath -u "$CCACHE_DIR")"
+CCACHE_SH_DIR=$(cygpath -u "$CCACHE_BIN_DIR")
+mv "$CCACHE_SH_DIR"/ccache-*/* "$CCACHE_SH_DIR"
+cp "$CCACHE_SH_DIR"/ccache.exe "$CCACHE_SH_DIR"/cl.exe
diff --git a/.ci/setup-windows-ci-vars.sh b/.ci/setup-windows-ci-vars.sh
new file mode 100644
index 0000000000..11373e0716
--- /dev/null
+++ b/.ci/setup-windows-ci-vars.sh
@@ -0,0 +1,39 @@
+#!/bin/sh -ex
+
+CPU_ARCH="${1:-win64}"
+COMPILER="${2:-msvc}"
+
+# These are CI specific, so we wrap them for portability
+REPO_NAME="$BUILD_REPOSITORY_NAME"
+REPO_BRANCH="$BUILD_SOURCEBRANCHNAME"
+PR_NUMBER="$BUILD_PR_NUMBER"
+
+# Gather explicit version number and number of commits
+COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp)
+COMM_COUNT=$(git rev-list --count HEAD)
+COMM_HASH=$(git rev-parse --short=8 HEAD)
+
+# Format the above into filenames
+if [ -n "$PR_NUMBER" ]; then
+ AVVER="${COMM_TAG}-${COMM_HASH}"
+ BUILD_RAW="rpcs3-v${AVVER}_${CPU_ARCH}_${COMPILER}"
+ BUILD="${BUILD_RAW}.7z"
+else
+ AVVER="${COMM_TAG}-${COMM_COUNT}"
+ BUILD_RAW="rpcs3-v${AVVER}-${COMM_HASH}_${CPU_ARCH}_${COMPILER}"
+ BUILD="${BUILD_RAW}.7z"
+fi
+
+# BRANCH is used for experimental build warnings for pr builds, used in main_window.cpp.
+# BUILD is the name of the release artifact
+# BUILD_RAW is just filename
+# AVVER is used for GitHub releases, it is the version number.
+BRANCH="${REPO_NAME}/${REPO_BRANCH}"
+
+# SC2129
+{
+ echo "BRANCH=$BRANCH"
+ echo "BUILD=$BUILD"
+ echo "BUILD_RAW=$BUILD_RAW"
+ echo "AVVER=$AVVER"
+} >> .ci/ci-vars.env
diff --git a/.ci/setup-windows.sh b/.ci/setup-windows.sh
index a8e68bb5ee..11a68367b8 100755
--- a/.ci/setup-windows.sh
+++ b/.ci/setup-windows.sh
@@ -1,28 +1,22 @@
#!/bin/sh -ex
-# These are Azure specific, so we wrap them for portability
-REPO_NAME="$BUILD_REPOSITORY_NAME"
-REPO_BRANCH="$SYSTEM_PULLREQUEST_SOURCEBRANCH"
-PR_NUMBER="$SYSTEM_PULLREQUEST_PULLREQUESTID"
-
# Resource/dependency URLs
# Qt mirrors can be volatile and slow, so we list 2
#QT_HOST="http://mirrors.ocf.berkeley.edu/qt/"
QT_HOST="http://qt.mirror.constant.com/"
QT_URL_VER=$(echo "$QT_VER" | sed "s/\.//g")
QT_VER_MSVC_UP=$(echo "${QT_VER_MSVC}" | tr '[:lower:]' '[:upper:]')
-QT_PREFIX="online/qtsdkrepository/windows_x86/desktop/qt${QT_VER_MAIN}_${QT_URL_VER}/qt.qt${QT_VER_MAIN}.${QT_URL_VER}."
+QT_PREFIX="online/qtsdkrepository/windows_x86/desktop/qt${QT_VER_MAIN}_${QT_URL_VER}/qt${QT_VER_MAIN}_${QT_URL_VER}/qt.qt${QT_VER_MAIN}.${QT_URL_VER}."
QT_PREFIX_2="win64_${QT_VER_MSVC}_64/${QT_VER}-0-${QT_DATE}"
-QT_SUFFIX="-Windows-Windows_10_22H2-${QT_VER_MSVC_UP}-Windows-Windows_10_22H2-X86_64.7z"
+QT_SUFFIX="-Windows-Windows_11_23H2-${QT_VER_MSVC_UP}-Windows-Windows_11_23H2-X86_64.7z"
QT_BASE_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtbase${QT_SUFFIX}"
QT_DECL_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtdeclarative${QT_SUFFIX}"
QT_TOOL_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qttools${QT_SUFFIX}"
QT_MM_URL="${QT_HOST}${QT_PREFIX}addons.qtmultimedia.${QT_PREFIX_2}qtmultimedia${QT_SUFFIX}"
QT_SVG_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtsvg${QT_SUFFIX}"
-QT_5CMP_URL="${QT_HOST}${QT_PREFIX}qt5compat.${QT_PREFIX_2}qt5compat${QT_SUFFIX}"
-LLVMLIBS_URL='https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-16.0.1/llvmlibs_mt.7z'
-GLSLANG_URL='https://github.com/RPCS3/glslang/releases/latest/download/glslanglibs_mt.7z'
-VULKAN_SDK_URL="https://www.dropbox.com/scl/fi/sjjh0fc4ld281pjbl2xzu/VulkanSDK-1.3.268.0-Installer.exe?rlkey=f6wzc0lvms5vwkt2z3qabfv9d&dl=1"
+LLVMLIBS_URL="https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-${LLVM_VER}/llvmlibs_mt.7z"
+VULKAN_SDK_URL="https://www.dropbox.com/scl/fi/sjjh0fc4ld281pjbl2xzu/VulkanSDK-${VULKAN_VER}-Installer.exe?rlkey=f6wzc0lvms5vwkt2z3qabfv9d&dl=1"
+CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.11.2/ccache-4.11.2-windows-x86_64.zip"
DEP_URLS=" \
$QT_BASE_URL \
@@ -30,18 +24,17 @@ DEP_URLS=" \
$QT_TOOL_URL \
$QT_MM_URL \
$QT_SVG_URL \
- $QT_5CMP_URL \
$LLVMLIBS_URL \
- $GLSLANG_URL \
- $VULKAN_SDK_URL"
+ $VULKAN_SDK_URL\
+ $CCACHE_URL"
-# Azure pipelines doesn't make a cache dir if it doesn't exist, so we do it manually
-[ -d "$CACHE_DIR" ] || mkdir "$CACHE_DIR"
+# CI doesn't make a cache dir if it doesn't exist, so we do it manually
+[ -d "$DEPS_CACHE_DIR" ] || mkdir "$DEPS_CACHE_DIR"
# Pull all the submodules except llvm, since it is built separately and we just download that build
# Note: Tried to use git submodule status, but it takes over 20 seconds
# shellcheck disable=SC2046
-git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/FAudio/ && !/llvm/ && !/SPIRV/ { print $3 }' .gitmodules)
+git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/FAudio/ && !/llvm/ { print $3 }' .gitmodules)
# Git bash doesn't have rev, so here it is
rev()
@@ -60,17 +53,16 @@ download_and_verify()
fileName="$4"
for _ in 1 2 3; do
- [ -e "$CACHE_DIR/$fileName" ] || curl -fLo "$CACHE_DIR/$fileName" "$url"
- fileChecksum=$("${algo}sum" "$CACHE_DIR/$fileName" | awk '{ print $1 }')
+ [ -e "$DEPS_CACHE_DIR/$fileName" ] || curl -fLo "$DEPS_CACHE_DIR/$fileName" "$url"
+ fileChecksum=$("${algo}sum" "$DEPS_CACHE_DIR/$fileName" | awk '{ print $1 }')
[ "$fileChecksum" = "$correctChecksum" ] && return 0
- rm "$CACHE_DIR/$fileName"
done
return 1;
}
# Some dependencies install here
-[ -d "./lib" ] || mkdir "./lib"
+[ -d "./build/lib_ext/Release-x64" ] || mkdir -p "./build/lib_ext/Release-x64"
for url in $DEP_URLS; do
# Get the filename from the URL and remove query strings (?arg=something).
@@ -79,14 +71,14 @@ for url in $DEP_URLS; do
# shellcheck disable=SC1003
case "$url" in
- *qt*) checksum=$(curl -fL "${url}.sha1"); algo="sha1"; outDir='C:\Qt\' ;;
- *llvm*) checksum=$(curl -fL "${url}.sha256"); algo="sha256"; outDir="./3rdparty/llvm" ;;
- *glslang*) checksum=$(curl -fL "${url}.sha256"); algo="sha256"; outDir="./lib/Release-x64" ;;
+ *qt*) checksum=$(curl -fL "${url}.sha1"); algo="sha1"; outDir="$QTDIR/" ;;
+ *llvm*) checksum=$(curl -fL "${url}.sha256"); algo="sha256"; outDir="./build/lib_ext/Release-x64" ;;
+ *ccache*) checksum=$CCACHE_SHA; algo="sha256"; outDir="$CCACHE_BIN_DIR" ;;
*Vulkan*)
# Vulkan setup needs to be run in batch environment
# Need to subshell this or else it doesn't wait
download_and_verify "$url" "$VULKAN_SDK_SHA" "sha256" "$fileName"
- cp "$CACHE_DIR/$fileName" .
+ cp "$DEPS_CACHE_DIR/$fileName" .
_=$(echo "$fileName --accept-licenses --default-answer --confirm-command install" | cmd)
continue
;;
@@ -94,27 +86,11 @@ for url in $DEP_URLS; do
esac
download_and_verify "$url" "$checksum" "$algo" "$fileName"
- 7z x -y "$CACHE_DIR/$fileName" -aos -o"$outDir"
+ 7z x -y "$DEPS_CACHE_DIR/$fileName" -aos -o"$outDir"
done
-# Gather explicit version number and number of commits
-COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp)
-COMM_COUNT=$(git rev-list --count HEAD)
-COMM_HASH=$(git rev-parse --short=8 HEAD)
-
-# Format the above into filenames
-if [ -n "$PR_NUMBER" ]; then
- AVVER="${COMM_TAG}-${COMM_HASH}"
- BUILD="rpcs3-v${AVVER}_win64.7z"
-else
- AVVER="${COMM_TAG}-${COMM_COUNT}"
- BUILD="rpcs3-v${AVVER}-${COMM_HASH}_win64.7z"
-fi
-
-# BRANCH is used for experimental build warnings for pr builds, used in main_window.cpp.
-# BUILD is the name of the release artifact
-# AVVER is used for GitHub releases, it is the version number.
-BRANCH="${REPO_NAME}/${REPO_BRANCH}"
-echo "BRANCH=$BRANCH" > .ci/ci-vars.env
-echo "BUILD=$BUILD" >> .ci/ci-vars.env
-echo "AVVER=$AVVER" >> .ci/ci-vars.env
+# Setup ccache tool
+[ -d "$CCACHE_DIR" ] || mkdir -p "$(cygpath -u "$CCACHE_DIR")"
+CCACHE_SH_DIR=$(cygpath -u "$CCACHE_BIN_DIR")
+mv "$CCACHE_SH_DIR"/ccache-*/* "$CCACHE_SH_DIR"
+cp "$CCACHE_SH_DIR"/ccache.exe "$CCACHE_SH_DIR"/cl.exe
diff --git a/.cirrus.yml b/.cirrus.yml
deleted file mode 100644
index 4a1e574518..0000000000
--- a/.cirrus.yml
+++ /dev/null
@@ -1,150 +0,0 @@
-env:
- CIRRUS_CLONE_DEPTH: 0 # Unshallow clone to obtain proper GIT_VERSION
- BUILD_REPOSITORY_NAME: $CIRRUS_REPO_FULL_NAME
- SYSTEM_PULLREQUEST_SOURCEBRANCH: $CIRRUS_BRANCH
- SYSTEM_PULLREQUEST_PULLREQUESTID: $CIRRUS_PR
- BUILD_SOURCEVERSION: $CIRRUS_CHANGE_IN_REPO
- BUILD_SOURCEBRANCHNAME: $CIRRUS_BRANCH
- RPCS3_TOKEN: ENCRYPTED[!a4c3850e29ab150692286a74bec29819d25971a7ec431b86de2a35f7ed90c5b2ab3c93469f9298e30924d843599110e9!]
- QT_VER_MAIN: '6'
- QT_VER: '6.6.1'
-
-# windows_task:
-# matrix:
-# - name: Cirrus Windows
-# windows_container:
-# image: cirrusci/windowsservercore:visualstudio2019
-# cpu: 8
-# memory: 16G
-# env:
-# CIRRUS_SHELL: "bash"
-# COMPILER: msvc
-# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}\artifacts\
-# QT_VER_MSVC: 'msvc2019'
-# QT_DATE: '202311210527'
-# QTDIR: C:\Qt\${QT_VER}\${QT_VER_MSVC}_64
-# VULKAN_VER: '1.3.268.0'
-# VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5'
-# VULKAN_SDK: C:\VulkanSDK\${VULKAN_VER}
-# CACHE_DIR: "./cache"
-# UPLOAD_COMMIT_HASH: 7d09e3be30805911226241afbb14f8cdc2eb054e
-# UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-win"
-# deps_cache:
-# folder: "./cache"
-# #obj_cache:
-# # folder: "./tmp"
-# #obj2_cache:
-# # folder: "./rpcs3/x64"
-# setup_script:
-# - './.ci/get_keys-windows.sh'
-# - './.ci/setup-windows.sh'
-# rpcs3_script:
-# - export PATH=${PATH}:"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin"
-# - msbuild.exe rpcs3.sln //p:Configuration=Release //m
-# deploy_script:
-# - mkdir artifacts
-# - source './.ci/export-cirrus-vars.sh'
-# - './.ci/deploy-windows.sh'
-# artifacts:
-# name: Artifact
-# path: "*.7z*"
-# push_script: |
-# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ]; then
-# source './.ci/export-cirrus-vars.sh'
-# './.ci/github-upload.sh'
-# fi;
-
-# linux_task:
-# container:
-# image: rpcs3/rpcs3-ci-focal:1.5
-# cpu: 4
-# memory: 16G
-# env:
-# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}/artifacts
-# ARTDIR: ${CIRRUS_WORKING_DIR}/artifacts/
-# CCACHE_DIR: "/tmp/ccache_dir"
-# CCACHE_MAXSIZE: 300M
-# CI_HAS_ARTIFACTS: true
-# UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f
-# UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux"
-# DEPLOY_APPIMAGE: true
-# APPDIR: "./appdir"
-# RELEASE_MESSAGE: "../GitHubReleaseMessage.txt"
-# ccache_cache:
-# folder: "/tmp/ccache_dir"
-# matrix:
-# - name: Cirrus Linux GCC
-# env:
-# COMPILER: gcc
-# gcc_script:
-# - mkdir artifacts
-# - ".ci/build-linux.sh"
-# - name: Cirrus Linux Clang
-# env:
-# COMPILER: clang
-# clang_script:
-# - mkdir artifacts
-# - ".ci/build-linux.sh"
-# artifacts:
-# name: Artifact
-# path: "artifacts/*"
-# push_script: |
-# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ] && [ "$COMPILER" = "gcc" ]; then
-# COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp)
-# COMM_COUNT=$(git rev-list --count HEAD)
-# COMM_HASH=$(git rev-parse --short=8 HEAD)
-
-# export AVVER="${COMM_TAG}-${COMM_COUNT}"
-
-# .ci/github-upload.sh
-# fi;
-
-freebsd_task:
- matrix:
- - name: Cirrus FreeBSD
- freebsd_instance:
- image_family: freebsd-13-2
- cpu: 8
- memory: 8G
- env:
- CCACHE_MAXSIZE: 300M # 3x clean build, rounded
- CCACHE_DIR: /tmp/ccache_dir
- ccache_cache:
- folder: /tmp/ccache_dir
- install_script: "sh -ex ./.ci/install-freebsd.sh"
- script: "./.ci/build-freebsd.sh"
-
-# macos_task:
-# timeout_in: 12000m
-# homebrew_cache:
-# folder: /Users/admin/Library/Caches/Homebrew
-# qt_cache:
-# folder: /tmp/Qt
-# ccache_cache:
-# folder: /tmp/ccache_dir
-# matrix:
-# - name: Cirrus macOS
-# macos_instance:
-# image: ghcr.io/cirruslabs/macos-monterey-xcode:latest
-# mac_script:
-# - mkdir artifacts
-# - chmod +x ".ci/build-mac.sh"
-# - chmod +x ".ci/deploy-mac.sh"
-# - ".ci/build-mac.sh"
-# env:
-# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}/artifacts
-# ARTDIR: ${CIRRUS_WORKING_DIR}/artifacts/
-# CCACHE_DIR: "/tmp/ccache_dir"
-# CCACHE_MAXSIZE: 300M
-# CI_HAS_ARTIFACTS: true
-# UPLOAD_COMMIT_HASH: 51ae32f468089a8169aaf1567de355ff4a3e0842
-# UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-mac"
-# RELEASE_MESSAGE: "../GitHubReleaseMessage.txt"
-# artifacts:
-# name: Artifact
-# path: "artifacts/*"
-# push_script: |
-# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ]; then
-# source './.ci/export-cirrus-vars.sh'
-# .ci/github-upload.sh
-# fi;
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index a0453b46bc..d719944078 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -16,4 +16,4 @@ Submitting your test results for Commercial Games must be done on our forums. Pl
# Contributing
-Check the [Coding Style Guidelines](https://github.com/RPCS3/rpcs3/wiki/Coding-Style) and [Developer Information](https://github.com/RPCS3/rpcs3/wiki/Developer-Information). If you have any questions, hit us up on our [Discord Server](https://discord.me/RPCS3) in the **#development** channel.
+Check the [Coding Style Guidelines](https://github.com/RPCS3/rpcs3/wiki/Coding-Style) and [Developer Information](https://github.com/RPCS3/rpcs3/wiki/Developer-Information). If you have any questions, hit us up on our [Discord Server](https://discord.gg/rpcs3) in the **#development** channel.
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
index 76a7ca9a61..f41f7323c4 100644
--- a/.github/FUNDING.yml
+++ b/.github/FUNDING.yml
@@ -1,2 +1 @@
patreon: Nekotekina
-custom: https://rpcs3.net/alipay
diff --git a/.github/ISSUE_TEMPLATE/1-regression-report.yml b/.github/ISSUE_TEMPLATE/1-regression-report.yml
index 865f96e92b..7c8659b62e 100644
--- a/.github/ISSUE_TEMPLATE/1-regression-report.yml
+++ b/.github/ISSUE_TEMPLATE/1-regression-report.yml
@@ -7,7 +7,7 @@ body:
attributes:
value: |
# Summary
- Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead.
+ Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead.
- type: textarea
id: quick-summary
attributes:
@@ -50,7 +50,7 @@ body:
* Completely close RPCS3 and locate the log file.
RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon).
- * On Windows it will be in the RPCS3 directory near the executable
+ * On Windows it will be in the ```log``` folder inside your RPCS3 folder.
* On Linux it will be in ```~/.cache/rpcs3/```
* On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter.
- type: textarea
diff --git a/.github/ISSUE_TEMPLATE/2-bug-report.yml b/.github/ISSUE_TEMPLATE/2-bug-report.yml
index 00df05c66b..4a82a03008 100644
--- a/.github/ISSUE_TEMPLATE/2-bug-report.yml
+++ b/.github/ISSUE_TEMPLATE/2-bug-report.yml
@@ -7,7 +7,7 @@ body:
attributes:
value: |
# Summary
- Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead.
+ Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead.
- type: textarea
id: quick-summary
attributes:
@@ -36,7 +36,7 @@ body:
* Completely close RPCS3 and locate the log file.
RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon).
- * On Windows it will be in the RPCS3 directory near the executable
+ * On Windows it will be in the ```log``` folder inside your RPCS3 folder.
* On Linux it will be in ```~/.cache/rpcs3/```
* On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter.
- type: textarea
diff --git a/.github/ISSUE_TEMPLATE/3-feature-request.yml b/.github/ISSUE_TEMPLATE/3-feature-request.yml
index 49a44b923c..153c44c41c 100644
--- a/.github/ISSUE_TEMPLATE/3-feature-request.yml
+++ b/.github/ISSUE_TEMPLATE/3-feature-request.yml
@@ -6,7 +6,7 @@ body:
- type: markdown
attributes:
value: |
- Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead.
+ Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead.
- type: textarea
id: quick-summary
attributes:
@@ -31,6 +31,6 @@ body:
* If this feature is something that a game is trying to use, upload a log file for it.
RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon).
- * On Windows it will be in the RPCS3 directory near the executable
+ * On Windows it will be in the ```log``` folder inside your RPCS3 folder.
* On Linux it will be in ```~/.cache/rpcs3/```
* On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter.
diff --git a/.github/ISSUE_TEMPLATE/4-advanced.md b/.github/ISSUE_TEMPLATE/4-advanced.md
index f0ab236298..78a9d96178 100644
--- a/.github/ISSUE_TEMPLATE/4-advanced.md
+++ b/.github/ISSUE_TEMPLATE/4-advanced.md
@@ -7,7 +7,7 @@ assignees: ''
---
-## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead.
+## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead.
You're using the advanced template. You're expected to know what to write in order to fill in all the required information for proper report.
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 137afb75a2..1dd094e1dc 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -4,7 +4,7 @@ contact_links:
url: https://rpcs3.net/quickstart
about: Everything you need to know to install and configure emulator, and add games
- name: Ask for help
- url: https://discord.me/RPCS3
+ url: https://discord.gg/rpcs3
about: If you have some questions or need help, please use our Discord server instead of GitHub
- name: Report game compatibility
url: https://forums.rpcs3.net/thread-196671.html
diff --git a/.github/PR-BUILD.md b/.github/PR-BUILD.md
deleted file mode 100644
index fb9a0aeb17..0000000000
--- a/.github/PR-BUILD.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## How to test a PR build
-
-Please take into account, that RPCS3 build usually takes some time (about 15 mins), so you can't access a build if a PR was just submitted.
-
-- Open a PR you want to test
-- Scroll to the very bottom and locate the **Checks** section
-- Click on **Show all checks**
- You are supposed to see something like this
- 
-- Click on __Details__ on either **Cirrus Linux GCC** or **Cirrus Windows**
-- Click **View more details on Cirrus CI** at the very bottom
- 
-- Click on the download button for **Artifact** on the **Artifacts** block
- 
-
-- Congratulations! You are now downloading an RPCS3 build for that specific PR.
-
-__Please note that PR builds are not supposed to be stable because they contain new changesets.__
diff --git a/.github/PULL_REQUEST_TEMPLATE/1-default.md b/.github/PULL_REQUEST_TEMPLATE/1-default.md
deleted file mode 100644
index 710d07ae2a..0000000000
--- a/.github/PULL_REQUEST_TEMPLATE/1-default.md
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-[How to test this PR](.github/PR-BUILD.md)
\ No newline at end of file
diff --git a/.github/workflows/llvm.yml b/.github/workflows/llvm.yml
new file mode 100644
index 0000000000..e3e3e76c50
--- /dev/null
+++ b/.github/workflows/llvm.yml
@@ -0,0 +1,72 @@
+name: Build LLVM
+
+defaults:
+ run:
+ shell: bash
+on:
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.ref }}-${{ github.event_name }}
+ cancel-in-progress: true
+
+env:
+ BUILD_ARTIFACTSTAGINGDIRECTORY: ${{ github.workspace }}/artifacts/
+
+jobs:
+ Windows_Build:
+ if: github.event_name == 'workflow_dispatch'
+ name: LLVM Windows (MSVC)
+ runs-on: windows-2025
+ env:
+ COMPILER: msvc
+ CCACHE_SHA: '1f39f3ad5aae3fe915e99ad1302633bc8f6718e58fa7c0de2b0ba7e080f0f08c'
+ CCACHE_BIN_DIR: 'C:\ccache_bin'
+ CCACHE_DIR: 'C:\ccache'
+ CCACHE_INODECACHE: 'true'
+ CCACHE_SLOPPINESS: 'time_macros'
+ DEPS_CACHE_DIR: ./dependency_cache
+ steps:
+
+ - name: Checkout repository
+ uses: actions/checkout@main
+ with:
+ fetch-depth: 0
+
+ - name: Restore Dependencies Cache
+ uses: actions/cache/restore@main
+ id: restore-dependencies-cache
+ with:
+ path: ${{ env.DEPS_CACHE_DIR }}
+ key: "${{ runner.os }}-${{ env.COMPILER }}-llvm-${{ env.CCACHE_SHA }}"
+ restore-keys: ${{ runner.os }}-${{ env.COMPILER }}-llvm
+
+ - name: Download and unpack dependencies
+ run: .ci/setup-llvm.sh
+
+ - name: Add msbuild to PATH
+ uses: microsoft/setup-msbuild@main
+
+ - name: Compile LLVM
+ shell: pwsh
+ run: msbuild 3rdparty\llvm\llvm_build.vcxproj /p:SolutionDir="$(pwd)/" /p:Configuration=Release /v:minimal /p:Platform=x64 /p:PreferredToolArchitecture=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets"
+
+ - name: Pack up build artifacts
+ run: |
+ mkdir -p "${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}"
+ .ci/deploy-llvm.sh
+
+ - name: Upload artifacts (7z)
+ uses: actions/upload-artifact@main
+ with:
+ name: LLVM for Windows (MSVC)
+ path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}
+ compression-level: 0
+ if-no-files-found: error
+
+ - name: Save Dependencies Cache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.DEPS_CACHE_DIR }}
+ key: ${{ steps.restore-dependencies-cache.outputs.cache-primary-key }}
diff --git a/.github/workflows/qt-ts.yml b/.github/workflows/qt-ts.yml
new file mode 100644
index 0000000000..4596be29e0
--- /dev/null
+++ b/.github/workflows/qt-ts.yml
@@ -0,0 +1,34 @@
+name: Generate Translation Template
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - master
+ paths:
+ - 'rpcs3/**'
+
+jobs:
+ Generate_Translation_Template:
+ name: Generate Translation Template
+ runs-on: ubuntu-24.04
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@main
+
+ - name: Install Qt Tools
+ run: |
+ sudo apt update
+ sudo apt install -y qt6-l10n-tools
+
+ - name: Generate .ts file using lupdate (Qt)
+ working-directory: rpcs3
+ run: |
+ ../.ci/generate-qt-ts.sh
+
+ - name: Upload translation template
+ uses: actions/upload-artifact@main
+ with:
+ name: RPCS3_Translation_Template
+ path: translations/rpcs3_template.ts
+ compression-level: 0
\ No newline at end of file
diff --git a/.github/workflows/rpcs3.yml b/.github/workflows/rpcs3.yml
new file mode 100644
index 0000000000..1981f78edb
--- /dev/null
+++ b/.github/workflows/rpcs3.yml
@@ -0,0 +1,444 @@
+name: Build RPCS3
+
+defaults:
+ run:
+ shell: bash
+on:
+ push:
+ branches:
+ - master # Only trigger push event on 'master' branch
+ pull_request:
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.ref }}-${{ github.event_name }}
+ cancel-in-progress: true
+
+env:
+ BUILD_REPOSITORY_NAME: ${{ github.repository }}
+ BUILD_SOURCEBRANCHNAME: ${{ github.ref_name }}
+ BUILD_PR_NUMBER: ${{ github.event.pull_request.number }}
+ BUILD_SOURCEVERSION: ${{ github.sha }}
+ BUILD_ARTIFACTSTAGINGDIRECTORY: ${{ github.workspace }}/artifacts/
+
+jobs:
+ Linux_Build:
+ # Only run push event on master branch of main repo, but run all PRs
+ if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master')
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - os: ubuntu-24.04
+ docker_img: "rpcs3/rpcs3-ci-jammy:1.6"
+ build_sh: "/rpcs3/.ci/build-linux.sh"
+ compiler: clang
+ UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f
+ UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux"
+ - os: ubuntu-24.04
+ docker_img: "rpcs3/rpcs3-ci-jammy:1.6"
+ build_sh: "/rpcs3/.ci/build-linux.sh"
+ compiler: gcc
+ - os: ubuntu-24.04-arm
+ docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.6"
+ build_sh: "/rpcs3/.ci/build-linux-aarch64.sh"
+ compiler: clang
+ UPLOAD_COMMIT_HASH: a1d35836e8d45bfc6f63c26f0a3e5d46ef622fe1
+ UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux-arm64"
+ - os: ubuntu-24.04-arm
+ docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.6"
+ build_sh: "/rpcs3/.ci/build-linux-aarch64.sh"
+ compiler: gcc
+ name: RPCS3 Linux ${{ matrix.os }} ${{ matrix.compiler }}
+ runs-on: ${{ matrix.os }}
+ env:
+ CCACHE_DIR: ${{ github.workspace }}/ccache
+ DEPLOY_APPIMAGE: true
+ APPDIR: "/rpcs3/build/appdir"
+ ARTDIR: "/root/artifacts"
+ RELEASE_MESSAGE: "/rpcs3/GitHubReleaseMessage.txt"
+ COMPILER: ${{ matrix.compiler }}
+ UPLOAD_COMMIT_HASH: ${{ matrix.UPLOAD_COMMIT_HASH }}
+ UPLOAD_REPO_FULL_NAME: ${{ matrix.UPLOAD_REPO_FULL_NAME }}
+ RUN_UNIT_TESTS: github.event_name == 'pull_request' && 'ON' || 'OFF'
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@main
+ with:
+ fetch-depth: 0
+
+ - name: Restore build Ccache
+ uses: actions/cache/restore@main
+ id: restore-build-ccache
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-${{github.run_id}}
+ restore-keys: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-
+
+ - name: Docker setup and build
+ run: |
+ docker pull --quiet ${{ matrix.docker_img }}
+ docker run \
+ -v $PWD:/rpcs3 \
+ --env-file .ci/docker.env \
+ -v ${{ env.CCACHE_DIR }}:/root/.ccache \
+ -v ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}:${{ env.ARTDIR }} \
+ ${{ matrix.docker_img }} \
+ ${{ matrix.build_sh }}
+
+ - name: Upload artifacts
+ uses: actions/upload-artifact@main
+ with:
+ name: RPCS3 for Linux (${{ runner.arch }}, ${{ matrix.compiler }})
+ path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}/*.AppImage
+ compression-level: 0
+
+ - name: Deploy master build to GitHub Releases
+ if: |
+ github.event_name != 'pull_request' &&
+ github.repository == 'RPCS3/rpcs3' &&
+ github.ref == 'refs/heads/master' &&
+ matrix.compiler == 'clang'
+ env:
+ RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }}
+ run: |
+ COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp)
+ COMM_COUNT=$(git rev-list --count HEAD)
+ COMM_HASH=$(git rev-parse --short=8 HEAD)
+ export AVVER="${COMM_TAG}-${COMM_COUNT}"
+ .ci/github-upload.sh
+
+ - name: Save build Ccache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }}
+
+ Mac_Build:
+ # Only run push event on master branch of main repo, but run all PRs
+ if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master')
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - name: Intel
+ build_sh: "arch -X86_64 .ci/build-mac.sh"
+ UPLOAD_COMMIT_HASH: 51ae32f468089a8169aaf1567de355ff4a3e0842
+ UPLOAD_REPO_FULL_NAME: rpcs3/rpcs3-binaries-mac
+ - name: Apple Silicon
+ build_sh: .ci/build-mac-arm64.sh
+ UPLOAD_COMMIT_HASH: 8e21bdbc40711a3fccd18fbf17b742348b0f4281
+ UPLOAD_REPO_FULL_NAME: rpcs3/rpcs3-binaries-mac-arm64
+ name: RPCS3 Mac ${{ matrix.name }}
+ runs-on: macos-14
+ env:
+ CCACHE_DIR: /tmp/ccache_dir
+ QT_VER: '6.7.3'
+ QT_VER_MAIN: '6'
+ LLVM_COMPILER_VER: '19'
+ RELEASE_MESSAGE: ../GitHubReleaseMessage.txt
+ UPLOAD_COMMIT_HASH: ${{ matrix.UPLOAD_COMMIT_HASH }}
+ UPLOAD_REPO_FULL_NAME: ${{ matrix.UPLOAD_REPO_FULL_NAME }}
+ RUN_UNIT_TESTS: github.event_name == 'pull_request' && 'ON' || 'OFF'
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@main
+ with:
+ fetch-depth: 0
+
+ - name: Restore Build Ccache
+ uses: actions/cache/restore@main
+ id: restore-build-ccache
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ runner.os }}-ccache-${{ matrix.name }}-${{github.run_id}}
+ restore-keys: ${{ runner.os }}-ccache-${{ matrix.name }}-
+
+ - name: Restore Qt Cache
+ uses: actions/cache/restore@main
+ id: restore-qt-cache
+ with:
+ path: /tmp/Qt
+ key: ${{ runner.os }}-qt-${{ matrix.name }}-${{ env.QT_VER }}
+ restore-keys: ${{ runner.os }}-qt-${{ matrix.name }}-${{ env.QT_VER }}
+
+ - name: Build
+ run: ${{ matrix.build_sh }}
+
+ - name: Upload artifacts
+ uses: actions/upload-artifact@main
+ with:
+ name: RPCS3 for Mac (${{ matrix.name }})
+ path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}
+ compression-level: 0
+
+ - name: Export Variables
+ run: |
+ while IFS='=' read -r key val; do
+ # Skip lines that are empty or start with '#'
+ [[ -z "$key" || "$key" =~ ^# ]] && continue
+ echo "$key=$val" >> "${{ github.env }}"
+ done < .ci/ci-vars.env
+
+ - name: Deploy master build to GitHub Releases
+ if: |
+ github.event_name != 'pull_request' &&
+ github.repository == 'RPCS3/rpcs3' &&
+ github.ref == 'refs/heads/master'
+ env:
+ RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }}
+ run: .ci/github-upload.sh
+
+ - name: Save Build Ccache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }}
+
+ - name: Save Qt Cache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: /tmp/Qt
+ key: ${{ steps.restore-qt-cache.outputs.cache-primary-key }}
+
+ Windows_Build:
+ # Only run push event on master branch of main repo, but run all PRs
+ if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master')
+ name: RPCS3 Windows
+ runs-on: windows-2025
+ env:
+ COMPILER: msvc
+ QT_VER_MAIN: '6'
+ QT_VER: '6.9.1'
+ QT_VER_MSVC: 'msvc2022'
+ QT_DATE: '202505291653'
+ LLVM_VER: '19.1.7'
+ VULKAN_VER: '1.3.268.0'
+ VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5'
+ CCACHE_SHA: '1f39f3ad5aae3fe915e99ad1302633bc8f6718e58fa7c0de2b0ba7e080f0f08c'
+ CCACHE_BIN_DIR: 'C:\ccache_bin'
+ CCACHE_DIR: 'C:\ccache'
+ CCACHE_INODECACHE: 'true'
+ CCACHE_SLOPPINESS: 'time_macros'
+ DEPS_CACHE_DIR: ./dependency_cache
+ UPLOAD_COMMIT_HASH: 7d09e3be30805911226241afbb14f8cdc2eb054e
+ UPLOAD_REPO_FULL_NAME: "RPCS3/rpcs3-binaries-win"
+ steps:
+
+ - name: Checkout repository
+ uses: actions/checkout@main
+ with:
+ fetch-depth: 0
+
+ - name: Setup NuGet
+ uses: nuget/setup-nuget@v2
+
+ - name: Restore NuGet packages
+ run: nuget restore rpcs3.sln
+
+ - name: Setup env
+ shell: pwsh
+ run: |
+ echo "QTDIR=C:\Qt\${{ env.QT_VER }}\${{ env.QT_VER_MSVC }}_64" >> ${{ github.env }}
+ echo "VULKAN_SDK=C:\VulkanSDK\${{ env.VULKAN_VER }}" >> ${{ github.env }}
+
+ - name: Get Cache Keys
+ run: .ci/get_keys-windows.sh
+
+ - name: Restore Build Ccache
+ uses: actions/cache/restore@main
+ id: restore-build-ccache
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: "${{ runner.os }}-ccache-${{ env.COMPILER }}-${{github.run_id}}"
+ restore-keys: ${{ runner.os }}-ccache-${{ env.COMPILER }}-
+
+ - name: Restore Dependencies Cache
+ uses: actions/cache/restore@main
+ id: restore-dependencies-cache
+ with:
+ path: ${{ env.DEPS_CACHE_DIR }}
+ key: "${{ runner.os }}-${{ env.COMPILER }}-${{ env.QT_VER }}-${{ env.VULKAN_SDK_SHA }}-${{ env.CCACHE_SHA }}-${{ hashFiles('llvm.lock') }}"
+ restore-keys: ${{ runner.os }}-${{ env.COMPILER }}-
+
+ - name: Download and unpack dependencies
+ run: |
+ .ci/setup-windows.sh
+ .ci/setup-windows-ci-vars.sh win64 msvc
+
+ - name: Export Variables
+ run: |
+ while IFS='=' read -r key val; do
+ # Skip lines that are empty or start with '#'
+ [[ -z "$key" || "$key" =~ ^# ]] && continue
+ echo "$key=$val" >> "${{ github.env }}"
+ done < .ci/ci-vars.env
+
+ - name: Add msbuild to PATH
+ uses: microsoft/setup-msbuild@main
+
+ - name: Compile RPCS3
+ shell: pwsh
+ run: msbuild rpcs3.sln /p:Configuration=Release /v:minimal /p:Platform=x64 /p:PreferredToolArchitecture=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets"
+
+ - name: Run Unit Tests
+ if: github.event_name == 'pull_request'
+ shell: pwsh
+ run: build\lib\Release-x64\rpcs3_test.exe
+
+ - name: Pack up build artifacts
+ run: |
+ mkdir -p "${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}"
+ .ci/deploy-windows.sh
+
+ - name: Upload artifacts (7z)
+ uses: actions/upload-artifact@main
+ with:
+ name: RPCS3 for Windows (MSVC)
+ path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}
+ compression-level: 0
+ if-no-files-found: error
+
+ - name: Deploy master build to GitHub Releases
+ if: |
+ github.event_name != 'pull_request' &&
+ github.repository == 'RPCS3/rpcs3' &&
+ github.ref == 'refs/heads/master'
+ env:
+ RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }}
+ run: .ci/github-upload.sh
+
+ - name: Save Build Ccache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }}
+
+ - name: Save Dependencies Cache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.DEPS_CACHE_DIR }}
+ key: ${{ steps.restore-dependencies-cache.outputs.cache-primary-key }}
+
+ Windows_Build_Clang:
+ # Only run push event on master branch of main repo, but run all PRs
+ if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master')
+ name: RPCS3 Windows Clang
+ runs-on: windows-2025
+ strategy:
+ matrix:
+ include:
+ - msys2: clang64
+ compiler: clang
+ arch: win64
+ env:
+ CCACHE_DIR: 'C:\ccache'
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@main
+ with:
+ fetch-depth: 0
+
+ - name: Setup msys2
+ uses: msys2/setup-msys2@v2
+ with:
+ msystem: ${{ matrix.msys2 }}
+ update: true
+ cache: true
+ install: |
+ mingw-w64-clang-x86_64-clang
+ mingw-w64-clang-x86_64-ccache
+ mingw-w64-clang-x86_64-cmake
+ mingw-w64-clang-x86_64-lld
+ mingw-w64-clang-x86_64-ninja
+ mingw-w64-clang-x86_64-llvm
+ mingw-w64-clang-x86_64-ffmpeg
+ mingw-w64-clang-x86_64-opencv
+ mingw-w64-clang-x86_64-glew
+ mingw-w64-clang-x86_64-vulkan
+ mingw-w64-clang-x86_64-vulkan-headers
+ mingw-w64-clang-x86_64-vulkan-loader
+ mingw-w64-clang-x86_64-gtest
+ mingw-w64-clang-x86_64-qt6-base
+ mingw-w64-clang-x86_64-qt6-declarative
+ mingw-w64-clang-x86_64-qt6-multimedia
+ mingw-w64-clang-x86_64-qt6-svg
+ base-devel
+ curl
+ git
+ p7zip
+
+ - name: Restore build Ccache
+ uses: actions/cache/restore@main
+ id: restore-build-ccache
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-${{ github.run_id }}
+ restore-keys: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-
+
+ - name: Build RPCS3
+ shell: msys2 {0}
+ run: |
+ export CCACHE_DIR=$(cygpath -u "$CCACHE_DIR")
+ echo "CCACHE_DIR=$CCACHE_DIR"
+ .ci/setup-windows-ci-vars.sh ${{ matrix.arch }} ${{ matrix.compiler }}
+ .ci/build-windows-clang.sh
+
+ - name: Save build Ccache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }}
+
+ - name: Upload artifacts
+ uses: actions/upload-artifact@main
+ with:
+ name: RPCS3 for Windows (${{ runner.arch }}, ${{ matrix.compiler }})
+ path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}
+ compression-level: 0
+ if-no-files-found: error
+
+ FreeBSD_Build:
+ # Only run push event on master branch of main repo, but run all PRs
+ if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master')
+ name: RPCS3 FreeBSD
+ runs-on: ubuntu-latest
+ timeout-minutes: 60
+ env:
+ CCACHE_DIR: ${{ github.workspace }}/ccache
+ QT_VER_MAIN: '6'
+ LLVM_COMPILER_VER: '19'
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@main
+ with:
+ fetch-depth: 0
+
+ - name: Restore Build Ccache
+ uses: actions/cache/restore@main
+ id: restore-build-ccache
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: FreeBSD-ccache-${{github.run_id}}
+ restore-keys: FreeBSD-ccache-
+
+ - name: FreeBSD build
+ id: root
+ uses: vmactions/freebsd-vm@v1
+ with:
+ envs: 'QT_VER_MAIN LLVM_COMPILER_VER CCACHE_DIR'
+ usesh: true
+ run: .ci/install-freebsd.sh && .ci/build-freebsd.sh
+
+ - name: Save Build Ccache
+ if: github.ref == 'refs/heads/master'
+ uses: actions/cache/save@main
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }}
diff --git a/.gitignore b/.gitignore
index 88ca38462b..4688d5fa52 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,6 +35,7 @@
/lib
/tmp
/ipch
+/packages
/rpcs3/Debug
/rpcs3/Release
@@ -55,9 +56,6 @@
/bin/GuiConfigs/*.dat
/bin/GuiConfigs/*.dat.*
-# Some data from git
-!/bin/git/
-
# Visual Studio Files
.vs/*
.vscode/*
@@ -117,28 +115,9 @@ CMakeLists.txt.user
# macOS
.DS_Store
-# 7zlib
-/3rdparty/7z/**/*.lib
-
# yaml-cpp
yaml-cpp.pc
-# libusb
-/3rdparty/libusb_cmake/config.h
-/3rdparty/libusb_cmake/libusb-1.0.pc
-
-# miniupnp
-/3rdparty/miniupnp/x64/*
-
-# llvm
-/3rdparty/llvm/llvm_build
-
-# legacy llvm
-/llvm_build
-
-# ssl certificate
-cacert.pem
-
_ReSharper.*/
CMakeUserPresets.json
diff --git a/.gitmodules b/.gitmodules
index 3801c6cc68..427c61ffbd 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -15,14 +15,6 @@
path = 3rdparty/glslang/glslang
url = ../../KhronosGroup/glslang.git
ignore = dirty
-[submodule "3rdparty/SPIRV-Tools"]
- path = 3rdparty/SPIRV/SPIRV-Tools
- url = ../../KhronosGroup/SPIRV-Tools.git
- ignore = dirty
-[submodule "3rdparty/SPIRV-Headers"]
- path = 3rdparty/SPIRV/SPIRV-Headers
- url = ../../KhronosGroup/SPIRV-Headers.git
- ignore = dirty
[submodule "3rdparty/zlib"]
path = 3rdparty/zlib/zlib
url = ../../madler/zlib
@@ -36,10 +28,6 @@
path = 3rdparty/pugixml
url = ../../zeux/pugixml.git
ignore = dirty
-[submodule "3rdparty/xxHash"]
- path = 3rdparty/xxHash
- url = ../../Cyan4973/xxHash.git
- ignore = dirty
[submodule "3rdparty/yaml-cpp"]
path = 3rdparty/yaml-cpp/yaml-cpp
url = ../../RPCS3/yaml-cpp.git
@@ -88,3 +76,35 @@
path = 3rdparty/rtmidi/rtmidi
url = ../../thestk/rtmidi
ignore = dirty
+[submodule "3rdparty/zstd/zstd"]
+ path = 3rdparty/zstd/zstd
+ url = ../../facebook/zstd
+ ignore = dirty
+[submodule "3rdparty/7zip/7zip"]
+ path = 3rdparty/7zip/7zip
+ url = ../../ip7z/7zip.git
+ ignore = dirty
+[submodule "3rdparty/OpenAL/openal-soft"]
+ path = 3rdparty/OpenAL/openal-soft
+ url = ../../kcat/openal-soft.git
+ ignore = dirty
+[submodule "3rdparty/stblib/stb"]
+ path = 3rdparty/stblib/stb
+ url = ../../nothings/stb.git
+ ignore = dirty
+[submodule "3rdparty/opencv/opencv"]
+ path = 3rdparty/opencv/opencv
+ url = ../../Megamouse/opencv_minimal.git
+ ignore = dirty
+[submodule "3rdparty/fusion/fusion"]
+ path = 3rdparty/fusion/fusion
+ url = ../../xioTechnologies/Fusion.git
+ ignore = dirty
+[submodule "3rdparty/discord-rpc/discord-rpc"]
+ path = 3rdparty/discord-rpc/discord-rpc
+ url = ../../Vestrel/discord-rpc
+ ignore = dirty
+[submodule "3rdparty/GPUOpen/VulkanMemoryAllocator"]
+ path = 3rdparty/GPUOpen/VulkanMemoryAllocator
+ url = ../../GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git
+ ignore = dirty
diff --git a/3rdparty/7z/7zlib.vcxproj.filters b/3rdparty/7z/7zlib.vcxproj.filters
deleted file mode 100644
index 9ff9afe990..0000000000
--- a/3rdparty/7z/7zlib.vcxproj.filters
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/3rdparty/7z/CMakeLists.txt b/3rdparty/7z/CMakeLists.txt
deleted file mode 100644
index fb77d6773c..0000000000
--- a/3rdparty/7z/CMakeLists.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-# 7z sdk
-if(WIN32)
- add_library(3rdparty_7z STATIC EXCLUDE_FROM_ALL
- src/7zAlloc.c
- src/7zArcIn.c
- src/7zBuf.c
- src/7zBuf2.c
- src/7zCrc.c
- src/7zCrcOpt.c
- src/7zDec.c
- src/7zFile.c
- src/7zStream.c
- src/Aes.c
- src/AesOpt.c
- src/Alloc.c
- src/Bcj2.c
- src/Bcj2Enc.c
- src/Blake2s.c
- src/Bra.c
- src/Bra86.c
- src/BraIA64.c
- src/BwtSort.c
- src/CpuArch.c
- src/Delta.c
- src/DllSecur.c
- src/HuffEnc.c
- src/LzFind.c
- src/LzFindMt.c
- src/LzFindOpt.c
- src/Lzma2Dec.c
- src/Lzma2DecMt.c
- src/Lzma2Enc.c
- src/Lzma86Dec.c
- src/Lzma86Enc.c
- src/LzmaDec.c
- src/LzmaEnc.c
- src/LzmaLib.c
- src/MtCoder.c
- src/MtDec.c
- src/Ppmd7.c
- src/Ppmd7aDec.c
- src/Ppmd7Dec.c
- src/Ppmd7Enc.c
- src/Ppmd8.c
- src/Ppmd8Dec.c
- src/Ppmd8Enc.c
- src/Sha1.c
- src/Sha1Opt.c
- src/Sha256.c
- src/Sha256Opt.c
- src/Sort.c
- src/SwapBytes.c
- src/Threads.c
- src/Xz.c
- src/XzCrc64.c
- src/XzCrc64Opt.c
- src/XzDec.c
- src/XzEnc.c
- src/XzIn.c)
- target_include_directories(3rdparty_7z INTERFACE
- $
- $)
-
- target_include_directories(3rdparty_7z INTERFACE 7z)
-
- set_property(TARGET 3rdparty_7z PROPERTY FOLDER "3rdparty/")
-
-else()
- add_library(3rdparty_7z INTERFACE)
-endif()
diff --git a/3rdparty/7z/src/7z.h b/3rdparty/7z/src/7z.h
deleted file mode 100644
index b42405cc22..0000000000
--- a/3rdparty/7z/src/7z.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/* 7z.h -- 7z interface
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_7Z_H
-#define ZIP7_INC_7Z_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define k7zStartHeaderSize 0x20
-#define k7zSignatureSize 6
-
-extern const Byte k7zSignature[k7zSignatureSize];
-
-typedef struct
-{
- const Byte *Data;
- size_t Size;
-} CSzData;
-
-/* CSzCoderInfo & CSzFolder support only default methods */
-
-typedef struct
-{
- size_t PropsOffset;
- UInt32 MethodID;
- Byte NumStreams;
- Byte PropsSize;
-} CSzCoderInfo;
-
-typedef struct
-{
- UInt32 InIndex;
- UInt32 OutIndex;
-} CSzBond;
-
-#define SZ_NUM_CODERS_IN_FOLDER_MAX 4
-#define SZ_NUM_BONDS_IN_FOLDER_MAX 3
-#define SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX 4
-
-typedef struct
-{
- UInt32 NumCoders;
- UInt32 NumBonds;
- UInt32 NumPackStreams;
- UInt32 UnpackStream;
- UInt32 PackStreams[SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX];
- CSzBond Bonds[SZ_NUM_BONDS_IN_FOLDER_MAX];
- CSzCoderInfo Coders[SZ_NUM_CODERS_IN_FOLDER_MAX];
-} CSzFolder;
-
-
-SRes SzGetNextFolderItem(CSzFolder *f, CSzData *sd);
-
-typedef struct
-{
- UInt32 Low;
- UInt32 High;
-} CNtfsFileTime;
-
-typedef struct
-{
- Byte *Defs; /* MSB 0 bit numbering */
- UInt32 *Vals;
-} CSzBitUi32s;
-
-typedef struct
-{
- Byte *Defs; /* MSB 0 bit numbering */
- // UInt64 *Vals;
- CNtfsFileTime *Vals;
-} CSzBitUi64s;
-
-#define SzBitArray_Check(p, i) (((p)[(i) >> 3] & (0x80 >> ((i) & 7))) != 0)
-
-#define SzBitWithVals_Check(p, i) ((p)->Defs && ((p)->Defs[(i) >> 3] & (0x80 >> ((i) & 7))) != 0)
-
-typedef struct
-{
- UInt32 NumPackStreams;
- UInt32 NumFolders;
-
- UInt64 *PackPositions; // NumPackStreams + 1
- CSzBitUi32s FolderCRCs; // NumFolders
-
- size_t *FoCodersOffsets; // NumFolders + 1
- UInt32 *FoStartPackStreamIndex; // NumFolders + 1
- UInt32 *FoToCoderUnpackSizes; // NumFolders + 1
- Byte *FoToMainUnpackSizeIndex; // NumFolders
- UInt64 *CoderUnpackSizes; // for all coders in all folders
-
- Byte *CodersData;
-
- UInt64 RangeLimit;
-} CSzAr;
-
-UInt64 SzAr_GetFolderUnpackSize(const CSzAr *p, UInt32 folderIndex);
-
-SRes SzAr_DecodeFolder(const CSzAr *p, UInt32 folderIndex,
- ILookInStreamPtr stream, UInt64 startPos,
- Byte *outBuffer, size_t outSize,
- ISzAllocPtr allocMain);
-
-typedef struct
-{
- CSzAr db;
-
- UInt64 startPosAfterHeader;
- UInt64 dataPos;
-
- UInt32 NumFiles;
-
- UInt64 *UnpackPositions; // NumFiles + 1
- // Byte *IsEmptyFiles;
- Byte *IsDirs;
- CSzBitUi32s CRCs;
-
- CSzBitUi32s Attribs;
- // CSzBitUi32s Parents;
- CSzBitUi64s MTime;
- CSzBitUi64s CTime;
-
- UInt32 *FolderToFile; // NumFolders + 1
- UInt32 *FileToFolder; // NumFiles
-
- size_t *FileNameOffsets; /* in 2-byte steps */
- Byte *FileNames; /* UTF-16-LE */
-} CSzArEx;
-
-#define SzArEx_IsDir(p, i) (SzBitArray_Check((p)->IsDirs, i))
-
-#define SzArEx_GetFileSize(p, i) ((p)->UnpackPositions[(i) + 1] - (p)->UnpackPositions[i])
-
-void SzArEx_Init(CSzArEx *p);
-void SzArEx_Free(CSzArEx *p, ISzAllocPtr alloc);
-UInt64 SzArEx_GetFolderStreamPos(const CSzArEx *p, UInt32 folderIndex, UInt32 indexInFolder);
-int SzArEx_GetFolderFullPackSize(const CSzArEx *p, UInt32 folderIndex, UInt64 *resSize);
-
-/*
-if dest == NULL, the return value specifies the required size of the buffer,
- in 16-bit characters, including the null-terminating character.
-if dest != NULL, the return value specifies the number of 16-bit characters that
- are written to the dest, including the null-terminating character. */
-
-size_t SzArEx_GetFileNameUtf16(const CSzArEx *p, size_t fileIndex, UInt16 *dest);
-
-/*
-size_t SzArEx_GetFullNameLen(const CSzArEx *p, size_t fileIndex);
-UInt16 *SzArEx_GetFullNameUtf16_Back(const CSzArEx *p, size_t fileIndex, UInt16 *dest);
-*/
-
-
-
-/*
- SzArEx_Extract extracts file from archive
-
- *outBuffer must be 0 before first call for each new archive.
-
- Extracting cache:
- If you need to decompress more than one file, you can send
- these values from previous call:
- *blockIndex,
- *outBuffer,
- *outBufferSize
- You can consider "*outBuffer" as cache of solid block. If your archive is solid,
- it will increase decompression speed.
-
- If you use external function, you can declare these 3 cache variables
- (blockIndex, outBuffer, outBufferSize) as static in that external function.
-
- Free *outBuffer and set *outBuffer to 0, if you want to flush cache.
-*/
-
-SRes SzArEx_Extract(
- const CSzArEx *db,
- ILookInStreamPtr inStream,
- UInt32 fileIndex, /* index of file */
- UInt32 *blockIndex, /* index of solid block */
- Byte **outBuffer, /* pointer to pointer to output buffer (allocated with allocMain) */
- size_t *outBufferSize, /* buffer size for output buffer */
- size_t *offset, /* offset of stream for required file in *outBuffer */
- size_t *outSizeProcessed, /* size of file in *outBuffer */
- ISzAllocPtr allocMain,
- ISzAllocPtr allocTemp);
-
-
-/*
-SzArEx_Open Errors:
-SZ_ERROR_NO_ARCHIVE
-SZ_ERROR_ARCHIVE
-SZ_ERROR_UNSUPPORTED
-SZ_ERROR_MEM
-SZ_ERROR_CRC
-SZ_ERROR_INPUT_EOF
-SZ_ERROR_FAIL
-*/
-
-SRes SzArEx_Open(CSzArEx *p, ILookInStreamPtr inStream,
- ISzAllocPtr allocMain, ISzAllocPtr allocTemp);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/7zAlloc.c b/3rdparty/7z/src/7zAlloc.c
deleted file mode 100644
index 3a8e2cbf44..0000000000
--- a/3rdparty/7z/src/7zAlloc.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/* 7zAlloc.c -- Allocation functions for 7z processing
-2023-03-04 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "7zAlloc.h"
-
-/* #define SZ_ALLOC_DEBUG */
-/* use SZ_ALLOC_DEBUG to debug alloc/free operations */
-
-#ifdef SZ_ALLOC_DEBUG
-
-/*
-#ifdef _WIN32
-#include "7zWindows.h"
-#endif
-*/
-
-#include
-static int g_allocCount = 0;
-static int g_allocCountTemp = 0;
-
-static void Print_Alloc(const char *s, size_t size, int *counter)
-{
- const unsigned size2 = (unsigned)size;
- fprintf(stderr, "\n%s count = %10d : %10u bytes; ", s, *counter, size2);
- (*counter)++;
-}
-static void Print_Free(const char *s, int *counter)
-{
- (*counter)--;
- fprintf(stderr, "\n%s count = %10d", s, *counter);
-}
-#endif
-
-void *SzAlloc(ISzAllocPtr p, size_t size)
-{
- UNUSED_VAR(p)
- if (size == 0)
- return 0;
- #ifdef SZ_ALLOC_DEBUG
- Print_Alloc("Alloc", size, &g_allocCount);
- #endif
- return malloc(size);
-}
-
-void SzFree(ISzAllocPtr p, void *address)
-{
- UNUSED_VAR(p)
- #ifdef SZ_ALLOC_DEBUG
- if (address)
- Print_Free("Free ", &g_allocCount);
- #endif
- free(address);
-}
-
-void *SzAllocTemp(ISzAllocPtr p, size_t size)
-{
- UNUSED_VAR(p)
- if (size == 0)
- return 0;
- #ifdef SZ_ALLOC_DEBUG
- Print_Alloc("Alloc_temp", size, &g_allocCountTemp);
- /*
- #ifdef _WIN32
- return HeapAlloc(GetProcessHeap(), 0, size);
- #endif
- */
- #endif
- return malloc(size);
-}
-
-void SzFreeTemp(ISzAllocPtr p, void *address)
-{
- UNUSED_VAR(p)
- #ifdef SZ_ALLOC_DEBUG
- if (address)
- Print_Free("Free_temp ", &g_allocCountTemp);
- /*
- #ifdef _WIN32
- HeapFree(GetProcessHeap(), 0, address);
- return;
- #endif
- */
- #endif
- free(address);
-}
diff --git a/3rdparty/7z/src/7zAlloc.h b/3rdparty/7z/src/7zAlloc.h
deleted file mode 100644
index bedd125c0a..0000000000
--- a/3rdparty/7z/src/7zAlloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* 7zAlloc.h -- Allocation functions
-2023-03-04 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_7Z_ALLOC_H
-#define ZIP7_INC_7Z_ALLOC_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-void *SzAlloc(ISzAllocPtr p, size_t size);
-void SzFree(ISzAllocPtr p, void *address);
-
-void *SzAllocTemp(ISzAllocPtr p, size_t size);
-void SzFreeTemp(ISzAllocPtr p, void *address);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/7zArcIn.c b/3rdparty/7z/src/7zArcIn.c
deleted file mode 100644
index 951b7df45e..0000000000
--- a/3rdparty/7z/src/7zArcIn.c
+++ /dev/null
@@ -1,1786 +0,0 @@
-/* 7zArcIn.c -- 7z Input functions
-2023-05-11 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "7z.h"
-#include "7zBuf.h"
-#include "7zCrc.h"
-#include "CpuArch.h"
-
-#define MY_ALLOC(T, p, size, alloc) \
- { if ((p = (T *)ISzAlloc_Alloc(alloc, (size) * sizeof(T))) == NULL) return SZ_ERROR_MEM; }
-
-#define MY_ALLOC_ZE(T, p, size, alloc) \
- { if ((size) == 0) p = NULL; else MY_ALLOC(T, p, size, alloc) }
-
-#define MY_ALLOC_AND_CPY(to, size, from, alloc) \
- { MY_ALLOC(Byte, to, size, alloc); memcpy(to, from, size); }
-
-#define MY_ALLOC_ZE_AND_CPY(to, size, from, alloc) \
- { if ((size) == 0) to = NULL; else { MY_ALLOC_AND_CPY(to, size, from, alloc) } }
-
-#define k7zMajorVersion 0
-
-enum EIdEnum
-{
- k7zIdEnd,
- k7zIdHeader,
- k7zIdArchiveProperties,
- k7zIdAdditionalStreamsInfo,
- k7zIdMainStreamsInfo,
- k7zIdFilesInfo,
- k7zIdPackInfo,
- k7zIdUnpackInfo,
- k7zIdSubStreamsInfo,
- k7zIdSize,
- k7zIdCRC,
- k7zIdFolder,
- k7zIdCodersUnpackSize,
- k7zIdNumUnpackStream,
- k7zIdEmptyStream,
- k7zIdEmptyFile,
- k7zIdAnti,
- k7zIdName,
- k7zIdCTime,
- k7zIdATime,
- k7zIdMTime,
- k7zIdWinAttrib,
- k7zIdComment,
- k7zIdEncodedHeader,
- k7zIdStartPos,
- k7zIdDummy
- // k7zNtSecure,
- // k7zParent,
- // k7zIsReal
-};
-
-const Byte k7zSignature[k7zSignatureSize] = {'7', 'z', 0xBC, 0xAF, 0x27, 0x1C};
-
-#define SzBitUi32s_INIT(p) { (p)->Defs = NULL; (p)->Vals = NULL; }
-
-static SRes SzBitUi32s_Alloc(CSzBitUi32s *p, size_t num, ISzAllocPtr alloc)
-{
- if (num == 0)
- {
- p->Defs = NULL;
- p->Vals = NULL;
- }
- else
- {
- MY_ALLOC(Byte, p->Defs, (num + 7) >> 3, alloc)
- MY_ALLOC(UInt32, p->Vals, num, alloc)
- }
- return SZ_OK;
-}
-
-static void SzBitUi32s_Free(CSzBitUi32s *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->Defs); p->Defs = NULL;
- ISzAlloc_Free(alloc, p->Vals); p->Vals = NULL;
-}
-
-#define SzBitUi64s_INIT(p) { (p)->Defs = NULL; (p)->Vals = NULL; }
-
-static void SzBitUi64s_Free(CSzBitUi64s *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->Defs); p->Defs = NULL;
- ISzAlloc_Free(alloc, p->Vals); p->Vals = NULL;
-}
-
-
-static void SzAr_Init(CSzAr *p)
-{
- p->NumPackStreams = 0;
- p->NumFolders = 0;
-
- p->PackPositions = NULL;
- SzBitUi32s_INIT(&p->FolderCRCs)
-
- p->FoCodersOffsets = NULL;
- p->FoStartPackStreamIndex = NULL;
- p->FoToCoderUnpackSizes = NULL;
- p->FoToMainUnpackSizeIndex = NULL;
- p->CoderUnpackSizes = NULL;
-
- p->CodersData = NULL;
-
- p->RangeLimit = 0;
-}
-
-static void SzAr_Free(CSzAr *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->PackPositions);
- SzBitUi32s_Free(&p->FolderCRCs, alloc);
-
- ISzAlloc_Free(alloc, p->FoCodersOffsets);
- ISzAlloc_Free(alloc, p->FoStartPackStreamIndex);
- ISzAlloc_Free(alloc, p->FoToCoderUnpackSizes);
- ISzAlloc_Free(alloc, p->FoToMainUnpackSizeIndex);
- ISzAlloc_Free(alloc, p->CoderUnpackSizes);
-
- ISzAlloc_Free(alloc, p->CodersData);
-
- SzAr_Init(p);
-}
-
-
-void SzArEx_Init(CSzArEx *p)
-{
- SzAr_Init(&p->db);
-
- p->NumFiles = 0;
- p->dataPos = 0;
-
- p->UnpackPositions = NULL;
- p->IsDirs = NULL;
-
- p->FolderToFile = NULL;
- p->FileToFolder = NULL;
-
- p->FileNameOffsets = NULL;
- p->FileNames = NULL;
-
- SzBitUi32s_INIT(&p->CRCs)
- SzBitUi32s_INIT(&p->Attribs)
- // SzBitUi32s_INIT(&p->Parents)
- SzBitUi64s_INIT(&p->MTime)
- SzBitUi64s_INIT(&p->CTime)
-}
-
-void SzArEx_Free(CSzArEx *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->UnpackPositions);
- ISzAlloc_Free(alloc, p->IsDirs);
-
- ISzAlloc_Free(alloc, p->FolderToFile);
- ISzAlloc_Free(alloc, p->FileToFolder);
-
- ISzAlloc_Free(alloc, p->FileNameOffsets);
- ISzAlloc_Free(alloc, p->FileNames);
-
- SzBitUi32s_Free(&p->CRCs, alloc);
- SzBitUi32s_Free(&p->Attribs, alloc);
- // SzBitUi32s_Free(&p->Parents, alloc);
- SzBitUi64s_Free(&p->MTime, alloc);
- SzBitUi64s_Free(&p->CTime, alloc);
-
- SzAr_Free(&p->db, alloc);
- SzArEx_Init(p);
-}
-
-
-static int TestSignatureCandidate(const Byte *testBytes)
-{
- unsigned i;
- for (i = 0; i < k7zSignatureSize; i++)
- if (testBytes[i] != k7zSignature[i])
- return 0;
- return 1;
-}
-
-#define SzData_CLEAR(p) { (p)->Data = NULL; (p)->Size = 0; }
-
-#define SZ_READ_BYTE_SD_NOCHECK(_sd_, dest) \
- (_sd_)->Size--; dest = *(_sd_)->Data++;
-
-#define SZ_READ_BYTE_SD(_sd_, dest) \
- if ((_sd_)->Size == 0) return SZ_ERROR_ARCHIVE; \
- SZ_READ_BYTE_SD_NOCHECK(_sd_, dest)
-
-#define SZ_READ_BYTE(dest) SZ_READ_BYTE_SD(sd, dest)
-
-#define SZ_READ_BYTE_2(dest) \
- if (sd.Size == 0) return SZ_ERROR_ARCHIVE; \
- sd.Size--; dest = *sd.Data++;
-
-#define SKIP_DATA(sd, size) { sd->Size -= (size_t)(size); sd->Data += (size_t)(size); }
-#define SKIP_DATA2(sd, size) { sd.Size -= (size_t)(size); sd.Data += (size_t)(size); }
-
-#define SZ_READ_32(dest) if (sd.Size < 4) return SZ_ERROR_ARCHIVE; \
- dest = GetUi32(sd.Data); SKIP_DATA2(sd, 4);
-
-static Z7_NO_INLINE SRes ReadNumber(CSzData *sd, UInt64 *value)
-{
- Byte firstByte, mask;
- unsigned i;
- UInt32 v;
-
- SZ_READ_BYTE(firstByte)
- if ((firstByte & 0x80) == 0)
- {
- *value = firstByte;
- return SZ_OK;
- }
- SZ_READ_BYTE(v)
- if ((firstByte & 0x40) == 0)
- {
- *value = (((UInt32)firstByte & 0x3F) << 8) | v;
- return SZ_OK;
- }
- SZ_READ_BYTE(mask)
- *value = v | ((UInt32)mask << 8);
- mask = 0x20;
- for (i = 2; i < 8; i++)
- {
- Byte b;
- if ((firstByte & mask) == 0)
- {
- const UInt64 highPart = (unsigned)firstByte & (unsigned)(mask - 1);
- *value |= (highPart << (8 * i));
- return SZ_OK;
- }
- SZ_READ_BYTE(b)
- *value |= ((UInt64)b << (8 * i));
- mask >>= 1;
- }
- return SZ_OK;
-}
-
-
-static Z7_NO_INLINE SRes SzReadNumber32(CSzData *sd, UInt32 *value)
-{
- Byte firstByte;
- UInt64 value64;
- if (sd->Size == 0)
- return SZ_ERROR_ARCHIVE;
- firstByte = *sd->Data;
- if ((firstByte & 0x80) == 0)
- {
- *value = firstByte;
- sd->Data++;
- sd->Size--;
- return SZ_OK;
- }
- RINOK(ReadNumber(sd, &value64))
- if (value64 >= (UInt32)0x80000000 - 1)
- return SZ_ERROR_UNSUPPORTED;
- if (value64 >= ((UInt64)(1) << ((sizeof(size_t) - 1) * 8 + 4)))
- return SZ_ERROR_UNSUPPORTED;
- *value = (UInt32)value64;
- return SZ_OK;
-}
-
-#define ReadID(sd, value) ReadNumber(sd, value)
-
-static SRes SkipData(CSzData *sd)
-{
- UInt64 size;
- RINOK(ReadNumber(sd, &size))
- if (size > sd->Size)
- return SZ_ERROR_ARCHIVE;
- SKIP_DATA(sd, size)
- return SZ_OK;
-}
-
-static SRes WaitId(CSzData *sd, UInt32 id)
-{
- for (;;)
- {
- UInt64 type;
- RINOK(ReadID(sd, &type))
- if (type == id)
- return SZ_OK;
- if (type == k7zIdEnd)
- return SZ_ERROR_ARCHIVE;
- RINOK(SkipData(sd))
- }
-}
-
-static SRes RememberBitVector(CSzData *sd, UInt32 numItems, const Byte **v)
-{
- const UInt32 numBytes = (numItems + 7) >> 3;
- if (numBytes > sd->Size)
- return SZ_ERROR_ARCHIVE;
- *v = sd->Data;
- SKIP_DATA(sd, numBytes)
- return SZ_OK;
-}
-
-static UInt32 CountDefinedBits(const Byte *bits, UInt32 numItems)
-{
- Byte b = 0;
- unsigned m = 0;
- UInt32 sum = 0;
- for (; numItems != 0; numItems--)
- {
- if (m == 0)
- {
- b = *bits++;
- m = 8;
- }
- m--;
- sum += ((b >> m) & 1);
- }
- return sum;
-}
-
-static Z7_NO_INLINE SRes ReadBitVector(CSzData *sd, UInt32 numItems, Byte **v, ISzAllocPtr alloc)
-{
- Byte allAreDefined;
- Byte *v2;
- const UInt32 numBytes = (numItems + 7) >> 3;
- *v = NULL;
- SZ_READ_BYTE(allAreDefined)
- if (numBytes == 0)
- return SZ_OK;
- if (allAreDefined == 0)
- {
- if (numBytes > sd->Size)
- return SZ_ERROR_ARCHIVE;
- MY_ALLOC_AND_CPY(*v, numBytes, sd->Data, alloc)
- SKIP_DATA(sd, numBytes)
- return SZ_OK;
- }
- MY_ALLOC(Byte, *v, numBytes, alloc)
- v2 = *v;
- memset(v2, 0xFF, (size_t)numBytes);
- {
- const unsigned numBits = (unsigned)numItems & 7;
- if (numBits != 0)
- v2[(size_t)numBytes - 1] = (Byte)((((UInt32)1 << numBits) - 1) << (8 - numBits));
- }
- return SZ_OK;
-}
-
-static Z7_NO_INLINE SRes ReadUi32s(CSzData *sd2, UInt32 numItems, CSzBitUi32s *crcs, ISzAllocPtr alloc)
-{
- UInt32 i;
- CSzData sd;
- UInt32 *vals;
- const Byte *defs;
- MY_ALLOC_ZE(UInt32, crcs->Vals, numItems, alloc)
- sd = *sd2;
- defs = crcs->Defs;
- vals = crcs->Vals;
- for (i = 0; i < numItems; i++)
- if (SzBitArray_Check(defs, i))
- {
- SZ_READ_32(vals[i])
- }
- else
- vals[i] = 0;
- *sd2 = sd;
- return SZ_OK;
-}
-
-static SRes ReadBitUi32s(CSzData *sd, UInt32 numItems, CSzBitUi32s *crcs, ISzAllocPtr alloc)
-{
- SzBitUi32s_Free(crcs, alloc);
- RINOK(ReadBitVector(sd, numItems, &crcs->Defs, alloc))
- return ReadUi32s(sd, numItems, crcs, alloc);
-}
-
-static SRes SkipBitUi32s(CSzData *sd, UInt32 numItems)
-{
- Byte allAreDefined;
- UInt32 numDefined = numItems;
- SZ_READ_BYTE(allAreDefined)
- if (!allAreDefined)
- {
- const size_t numBytes = (numItems + 7) >> 3;
- if (numBytes > sd->Size)
- return SZ_ERROR_ARCHIVE;
- numDefined = CountDefinedBits(sd->Data, numItems);
- SKIP_DATA(sd, numBytes)
- }
- if (numDefined > (sd->Size >> 2))
- return SZ_ERROR_ARCHIVE;
- SKIP_DATA(sd, (size_t)numDefined * 4)
- return SZ_OK;
-}
-
-static SRes ReadPackInfo(CSzAr *p, CSzData *sd, ISzAllocPtr alloc)
-{
- RINOK(SzReadNumber32(sd, &p->NumPackStreams))
-
- RINOK(WaitId(sd, k7zIdSize))
- MY_ALLOC(UInt64, p->PackPositions, (size_t)p->NumPackStreams + 1, alloc)
- {
- UInt64 sum = 0;
- UInt32 i;
- const UInt32 numPackStreams = p->NumPackStreams;
- for (i = 0; i < numPackStreams; i++)
- {
- UInt64 packSize;
- p->PackPositions[i] = sum;
- RINOK(ReadNumber(sd, &packSize))
- sum += packSize;
- if (sum < packSize)
- return SZ_ERROR_ARCHIVE;
- }
- p->PackPositions[i] = sum;
- }
-
- for (;;)
- {
- UInt64 type;
- RINOK(ReadID(sd, &type))
- if (type == k7zIdEnd)
- return SZ_OK;
- if (type == k7zIdCRC)
- {
- /* CRC of packed streams is unused now */
- RINOK(SkipBitUi32s(sd, p->NumPackStreams))
- continue;
- }
- RINOK(SkipData(sd))
- }
-}
-
-/*
-static SRes SzReadSwitch(CSzData *sd)
-{
- Byte external;
- RINOK(SzReadByte(sd, &external));
- return (external == 0) ? SZ_OK: SZ_ERROR_UNSUPPORTED;
-}
-*/
-
-#define k_NumCodersStreams_in_Folder_MAX (SZ_NUM_BONDS_IN_FOLDER_MAX + SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX)
-
-SRes SzGetNextFolderItem(CSzFolder *f, CSzData *sd)
-{
- UInt32 numCoders, i;
- UInt32 numInStreams = 0;
- const Byte *dataStart = sd->Data;
-
- f->NumCoders = 0;
- f->NumBonds = 0;
- f->NumPackStreams = 0;
- f->UnpackStream = 0;
-
- RINOK(SzReadNumber32(sd, &numCoders))
- if (numCoders == 0 || numCoders > SZ_NUM_CODERS_IN_FOLDER_MAX)
- return SZ_ERROR_UNSUPPORTED;
-
- for (i = 0; i < numCoders; i++)
- {
- Byte mainByte;
- CSzCoderInfo *coder = f->Coders + i;
- unsigned idSize, j;
- UInt64 id;
-
- SZ_READ_BYTE(mainByte)
- if ((mainByte & 0xC0) != 0)
- return SZ_ERROR_UNSUPPORTED;
-
- idSize = (unsigned)(mainByte & 0xF);
- if (idSize > sizeof(id))
- return SZ_ERROR_UNSUPPORTED;
- if (idSize > sd->Size)
- return SZ_ERROR_ARCHIVE;
- id = 0;
- for (j = 0; j < idSize; j++)
- {
- id = ((id << 8) | *sd->Data);
- sd->Data++;
- sd->Size--;
- }
- if (id > (UInt32)0xFFFFFFFF)
- return SZ_ERROR_UNSUPPORTED;
- coder->MethodID = (UInt32)id;
-
- coder->NumStreams = 1;
- coder->PropsOffset = 0;
- coder->PropsSize = 0;
-
- if ((mainByte & 0x10) != 0)
- {
- UInt32 numStreams;
-
- RINOK(SzReadNumber32(sd, &numStreams))
- if (numStreams > k_NumCodersStreams_in_Folder_MAX)
- return SZ_ERROR_UNSUPPORTED;
- coder->NumStreams = (Byte)numStreams;
-
- RINOK(SzReadNumber32(sd, &numStreams))
- if (numStreams != 1)
- return SZ_ERROR_UNSUPPORTED;
- }
-
- numInStreams += coder->NumStreams;
-
- if (numInStreams > k_NumCodersStreams_in_Folder_MAX)
- return SZ_ERROR_UNSUPPORTED;
-
- if ((mainByte & 0x20) != 0)
- {
- UInt32 propsSize = 0;
- RINOK(SzReadNumber32(sd, &propsSize))
- if (propsSize > sd->Size)
- return SZ_ERROR_ARCHIVE;
- if (propsSize >= 0x80)
- return SZ_ERROR_UNSUPPORTED;
- coder->PropsOffset = (size_t)(sd->Data - dataStart);
- coder->PropsSize = (Byte)propsSize;
- sd->Data += (size_t)propsSize;
- sd->Size -= (size_t)propsSize;
- }
- }
-
- /*
- if (numInStreams == 1 && numCoders == 1)
- {
- f->NumPackStreams = 1;
- f->PackStreams[0] = 0;
- }
- else
- */
- {
- Byte streamUsed[k_NumCodersStreams_in_Folder_MAX];
- UInt32 numBonds, numPackStreams;
-
- numBonds = numCoders - 1;
- if (numInStreams < numBonds)
- return SZ_ERROR_ARCHIVE;
- if (numBonds > SZ_NUM_BONDS_IN_FOLDER_MAX)
- return SZ_ERROR_UNSUPPORTED;
- f->NumBonds = numBonds;
-
- numPackStreams = numInStreams - numBonds;
- if (numPackStreams > SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX)
- return SZ_ERROR_UNSUPPORTED;
- f->NumPackStreams = numPackStreams;
-
- for (i = 0; i < numInStreams; i++)
- streamUsed[i] = False;
-
- if (numBonds != 0)
- {
- Byte coderUsed[SZ_NUM_CODERS_IN_FOLDER_MAX];
-
- for (i = 0; i < numCoders; i++)
- coderUsed[i] = False;
-
- for (i = 0; i < numBonds; i++)
- {
- CSzBond *bp = f->Bonds + i;
-
- RINOK(SzReadNumber32(sd, &bp->InIndex))
- if (bp->InIndex >= numInStreams || streamUsed[bp->InIndex])
- return SZ_ERROR_ARCHIVE;
- streamUsed[bp->InIndex] = True;
-
- RINOK(SzReadNumber32(sd, &bp->OutIndex))
- if (bp->OutIndex >= numCoders || coderUsed[bp->OutIndex])
- return SZ_ERROR_ARCHIVE;
- coderUsed[bp->OutIndex] = True;
- }
-
- for (i = 0; i < numCoders; i++)
- if (!coderUsed[i])
- {
- f->UnpackStream = i;
- break;
- }
-
- if (i == numCoders)
- return SZ_ERROR_ARCHIVE;
- }
-
- if (numPackStreams == 1)
- {
- for (i = 0; i < numInStreams; i++)
- if (!streamUsed[i])
- break;
- if (i == numInStreams)
- return SZ_ERROR_ARCHIVE;
- f->PackStreams[0] = i;
- }
- else
- for (i = 0; i < numPackStreams; i++)
- {
- UInt32 index;
- RINOK(SzReadNumber32(sd, &index))
- if (index >= numInStreams || streamUsed[index])
- return SZ_ERROR_ARCHIVE;
- streamUsed[index] = True;
- f->PackStreams[i] = index;
- }
- }
-
- f->NumCoders = numCoders;
-
- return SZ_OK;
-}
-
-
-static Z7_NO_INLINE SRes SkipNumbers(CSzData *sd2, UInt32 num)
-{
- CSzData sd;
- sd = *sd2;
- for (; num != 0; num--)
- {
- Byte firstByte, mask;
- unsigned i;
- SZ_READ_BYTE_2(firstByte)
- if ((firstByte & 0x80) == 0)
- continue;
- if ((firstByte & 0x40) == 0)
- {
- if (sd.Size == 0)
- return SZ_ERROR_ARCHIVE;
- sd.Size--;
- sd.Data++;
- continue;
- }
- mask = 0x20;
- for (i = 2; i < 8 && (firstByte & mask) != 0; i++)
- mask >>= 1;
- if (i > sd.Size)
- return SZ_ERROR_ARCHIVE;
- SKIP_DATA2(sd, i)
- }
- *sd2 = sd;
- return SZ_OK;
-}
-
-
-#define k_Scan_NumCoders_MAX 64
-#define k_Scan_NumCodersStreams_in_Folder_MAX 64
-
-
-static SRes ReadUnpackInfo(CSzAr *p,
- CSzData *sd2,
- UInt32 numFoldersMax,
- const CBuf *tempBufs, UInt32 numTempBufs,
- ISzAllocPtr alloc)
-{
- CSzData sd;
-
- UInt32 fo, numFolders, numCodersOutStreams, packStreamIndex;
- const Byte *startBufPtr;
- Byte external;
-
- RINOK(WaitId(sd2, k7zIdFolder))
-
- RINOK(SzReadNumber32(sd2, &numFolders))
- if (numFolders > numFoldersMax)
- return SZ_ERROR_UNSUPPORTED;
- p->NumFolders = numFolders;
-
- SZ_READ_BYTE_SD(sd2, external)
- if (external == 0)
- sd = *sd2;
- else
- {
- UInt32 index;
- RINOK(SzReadNumber32(sd2, &index))
- if (index >= numTempBufs)
- return SZ_ERROR_ARCHIVE;
- sd.Data = tempBufs[index].data;
- sd.Size = tempBufs[index].size;
- }
-
- MY_ALLOC(size_t, p->FoCodersOffsets, (size_t)numFolders + 1, alloc)
- MY_ALLOC(UInt32, p->FoStartPackStreamIndex, (size_t)numFolders + 1, alloc)
- MY_ALLOC(UInt32, p->FoToCoderUnpackSizes, (size_t)numFolders + 1, alloc)
- MY_ALLOC_ZE(Byte, p->FoToMainUnpackSizeIndex, (size_t)numFolders, alloc)
-
- startBufPtr = sd.Data;
-
- packStreamIndex = 0;
- numCodersOutStreams = 0;
-
- for (fo = 0; fo < numFolders; fo++)
- {
- UInt32 numCoders, ci, numInStreams = 0;
-
- p->FoCodersOffsets[fo] = (size_t)(sd.Data - startBufPtr);
-
- RINOK(SzReadNumber32(&sd, &numCoders))
- if (numCoders == 0 || numCoders > k_Scan_NumCoders_MAX)
- return SZ_ERROR_UNSUPPORTED;
-
- for (ci = 0; ci < numCoders; ci++)
- {
- Byte mainByte;
- unsigned idSize;
- UInt32 coderInStreams;
-
- SZ_READ_BYTE_2(mainByte)
- if ((mainByte & 0xC0) != 0)
- return SZ_ERROR_UNSUPPORTED;
- idSize = (mainByte & 0xF);
- if (idSize > 8)
- return SZ_ERROR_UNSUPPORTED;
- if (idSize > sd.Size)
- return SZ_ERROR_ARCHIVE;
- SKIP_DATA2(sd, idSize)
-
- coderInStreams = 1;
-
- if ((mainByte & 0x10) != 0)
- {
- UInt32 coderOutStreams;
- RINOK(SzReadNumber32(&sd, &coderInStreams))
- RINOK(SzReadNumber32(&sd, &coderOutStreams))
- if (coderInStreams > k_Scan_NumCodersStreams_in_Folder_MAX || coderOutStreams != 1)
- return SZ_ERROR_UNSUPPORTED;
- }
-
- numInStreams += coderInStreams;
-
- if ((mainByte & 0x20) != 0)
- {
- UInt32 propsSize;
- RINOK(SzReadNumber32(&sd, &propsSize))
- if (propsSize > sd.Size)
- return SZ_ERROR_ARCHIVE;
- SKIP_DATA2(sd, propsSize)
- }
- }
-
- {
- UInt32 indexOfMainStream = 0;
- UInt32 numPackStreams = 1;
-
- if (numCoders != 1 || numInStreams != 1)
- {
- Byte streamUsed[k_Scan_NumCodersStreams_in_Folder_MAX];
- Byte coderUsed[k_Scan_NumCoders_MAX];
-
- UInt32 i;
- const UInt32 numBonds = numCoders - 1;
- if (numInStreams < numBonds)
- return SZ_ERROR_ARCHIVE;
-
- if (numInStreams > k_Scan_NumCodersStreams_in_Folder_MAX)
- return SZ_ERROR_UNSUPPORTED;
-
- for (i = 0; i < numInStreams; i++)
- streamUsed[i] = False;
- for (i = 0; i < numCoders; i++)
- coderUsed[i] = False;
-
- for (i = 0; i < numBonds; i++)
- {
- UInt32 index;
-
- RINOK(SzReadNumber32(&sd, &index))
- if (index >= numInStreams || streamUsed[index])
- return SZ_ERROR_ARCHIVE;
- streamUsed[index] = True;
-
- RINOK(SzReadNumber32(&sd, &index))
- if (index >= numCoders || coderUsed[index])
- return SZ_ERROR_ARCHIVE;
- coderUsed[index] = True;
- }
-
- numPackStreams = numInStreams - numBonds;
-
- if (numPackStreams != 1)
- for (i = 0; i < numPackStreams; i++)
- {
- UInt32 index;
- RINOK(SzReadNumber32(&sd, &index))
- if (index >= numInStreams || streamUsed[index])
- return SZ_ERROR_ARCHIVE;
- streamUsed[index] = True;
- }
-
- for (i = 0; i < numCoders; i++)
- if (!coderUsed[i])
- {
- indexOfMainStream = i;
- break;
- }
-
- if (i == numCoders)
- return SZ_ERROR_ARCHIVE;
- }
-
- p->FoStartPackStreamIndex[fo] = packStreamIndex;
- p->FoToCoderUnpackSizes[fo] = numCodersOutStreams;
- p->FoToMainUnpackSizeIndex[fo] = (Byte)indexOfMainStream;
- numCodersOutStreams += numCoders;
- if (numCodersOutStreams < numCoders)
- return SZ_ERROR_UNSUPPORTED;
- if (numPackStreams > p->NumPackStreams - packStreamIndex)
- return SZ_ERROR_ARCHIVE;
- packStreamIndex += numPackStreams;
- }
- }
-
- p->FoToCoderUnpackSizes[fo] = numCodersOutStreams;
-
- {
- const size_t dataSize = (size_t)(sd.Data - startBufPtr);
- p->FoStartPackStreamIndex[fo] = packStreamIndex;
- p->FoCodersOffsets[fo] = dataSize;
- MY_ALLOC_ZE_AND_CPY(p->CodersData, dataSize, startBufPtr, alloc)
- }
-
- if (external != 0)
- {
- if (sd.Size != 0)
- return SZ_ERROR_ARCHIVE;
- sd = *sd2;
- }
-
- RINOK(WaitId(&sd, k7zIdCodersUnpackSize))
-
- MY_ALLOC_ZE(UInt64, p->CoderUnpackSizes, (size_t)numCodersOutStreams, alloc)
- {
- UInt32 i;
- for (i = 0; i < numCodersOutStreams; i++)
- {
- RINOK(ReadNumber(&sd, p->CoderUnpackSizes + i))
- }
- }
-
- for (;;)
- {
- UInt64 type;
- RINOK(ReadID(&sd, &type))
- if (type == k7zIdEnd)
- {
- *sd2 = sd;
- return SZ_OK;
- }
- if (type == k7zIdCRC)
- {
- RINOK(ReadBitUi32s(&sd, numFolders, &p->FolderCRCs, alloc))
- continue;
- }
- RINOK(SkipData(&sd))
- }
-}
-
-
-UInt64 SzAr_GetFolderUnpackSize(const CSzAr *p, UInt32 folderIndex)
-{
- return p->CoderUnpackSizes[p->FoToCoderUnpackSizes[folderIndex] + p->FoToMainUnpackSizeIndex[folderIndex]];
-}
-
-
-typedef struct
-{
- UInt32 NumTotalSubStreams;
- UInt32 NumSubDigests;
- CSzData sdNumSubStreams;
- CSzData sdSizes;
- CSzData sdCRCs;
-} CSubStreamInfo;
-
-
-static SRes ReadSubStreamsInfo(CSzAr *p, CSzData *sd, CSubStreamInfo *ssi)
-{
- UInt64 type = 0;
- UInt32 numSubDigests = 0;
- const UInt32 numFolders = p->NumFolders;
- UInt32 numUnpackStreams = numFolders;
- UInt32 numUnpackSizesInData = 0;
-
- for (;;)
- {
- RINOK(ReadID(sd, &type))
- if (type == k7zIdNumUnpackStream)
- {
- UInt32 i;
- ssi->sdNumSubStreams.Data = sd->Data;
- numUnpackStreams = 0;
- numSubDigests = 0;
- for (i = 0; i < numFolders; i++)
- {
- UInt32 numStreams;
- RINOK(SzReadNumber32(sd, &numStreams))
- if (numUnpackStreams > numUnpackStreams + numStreams)
- return SZ_ERROR_UNSUPPORTED;
- numUnpackStreams += numStreams;
- if (numStreams != 0)
- numUnpackSizesInData += (numStreams - 1);
- if (numStreams != 1 || !SzBitWithVals_Check(&p->FolderCRCs, i))
- numSubDigests += numStreams;
- }
- ssi->sdNumSubStreams.Size = (size_t)(sd->Data - ssi->sdNumSubStreams.Data);
- continue;
- }
- if (type == k7zIdCRC || type == k7zIdSize || type == k7zIdEnd)
- break;
- RINOK(SkipData(sd))
- }
-
- if (!ssi->sdNumSubStreams.Data)
- {
- numSubDigests = numFolders;
- if (p->FolderCRCs.Defs)
- numSubDigests = numFolders - CountDefinedBits(p->FolderCRCs.Defs, numFolders);
- }
-
- ssi->NumTotalSubStreams = numUnpackStreams;
- ssi->NumSubDigests = numSubDigests;
-
- if (type == k7zIdSize)
- {
- ssi->sdSizes.Data = sd->Data;
- RINOK(SkipNumbers(sd, numUnpackSizesInData))
- ssi->sdSizes.Size = (size_t)(sd->Data - ssi->sdSizes.Data);
- RINOK(ReadID(sd, &type))
- }
-
- for (;;)
- {
- if (type == k7zIdEnd)
- return SZ_OK;
- if (type == k7zIdCRC)
- {
- ssi->sdCRCs.Data = sd->Data;
- RINOK(SkipBitUi32s(sd, numSubDigests))
- ssi->sdCRCs.Size = (size_t)(sd->Data - ssi->sdCRCs.Data);
- }
- else
- {
- RINOK(SkipData(sd))
- }
- RINOK(ReadID(sd, &type))
- }
-}
-
-static SRes SzReadStreamsInfo(CSzAr *p,
- CSzData *sd,
- UInt32 numFoldersMax, const CBuf *tempBufs, UInt32 numTempBufs,
- UInt64 *dataOffset,
- CSubStreamInfo *ssi,
- ISzAllocPtr alloc)
-{
- UInt64 type;
-
- SzData_CLEAR(&ssi->sdSizes)
- SzData_CLEAR(&ssi->sdCRCs)
- SzData_CLEAR(&ssi->sdNumSubStreams)
-
- *dataOffset = 0;
- RINOK(ReadID(sd, &type))
- if (type == k7zIdPackInfo)
- {
- RINOK(ReadNumber(sd, dataOffset))
- if (*dataOffset > p->RangeLimit)
- return SZ_ERROR_ARCHIVE;
- RINOK(ReadPackInfo(p, sd, alloc))
- if (p->PackPositions[p->NumPackStreams] > p->RangeLimit - *dataOffset)
- return SZ_ERROR_ARCHIVE;
- RINOK(ReadID(sd, &type))
- }
- if (type == k7zIdUnpackInfo)
- {
- RINOK(ReadUnpackInfo(p, sd, numFoldersMax, tempBufs, numTempBufs, alloc))
- RINOK(ReadID(sd, &type))
- }
- if (type == k7zIdSubStreamsInfo)
- {
- RINOK(ReadSubStreamsInfo(p, sd, ssi))
- RINOK(ReadID(sd, &type))
- }
- else
- {
- ssi->NumTotalSubStreams = p->NumFolders;
- // ssi->NumSubDigests = 0;
- }
-
- return (type == k7zIdEnd ? SZ_OK : SZ_ERROR_UNSUPPORTED);
-}
-
-static SRes SzReadAndDecodePackedStreams(
- ILookInStreamPtr inStream,
- CSzData *sd,
- CBuf *tempBufs,
- UInt32 numFoldersMax,
- UInt64 baseOffset,
- CSzAr *p,
- ISzAllocPtr allocTemp)
-{
- UInt64 dataStartPos;
- UInt32 fo;
- CSubStreamInfo ssi;
-
- RINOK(SzReadStreamsInfo(p, sd, numFoldersMax, NULL, 0, &dataStartPos, &ssi, allocTemp))
-
- dataStartPos += baseOffset;
- if (p->NumFolders == 0)
- return SZ_ERROR_ARCHIVE;
-
- for (fo = 0; fo < p->NumFolders; fo++)
- Buf_Init(tempBufs + fo);
-
- for (fo = 0; fo < p->NumFolders; fo++)
- {
- CBuf *tempBuf = tempBufs + fo;
- const UInt64 unpackSize = SzAr_GetFolderUnpackSize(p, fo);
- if ((size_t)unpackSize != unpackSize)
- return SZ_ERROR_MEM;
- if (!Buf_Create(tempBuf, (size_t)unpackSize, allocTemp))
- return SZ_ERROR_MEM;
- }
-
- for (fo = 0; fo < p->NumFolders; fo++)
- {
- const CBuf *tempBuf = tempBufs + fo;
- RINOK(LookInStream_SeekTo(inStream, dataStartPos))
- RINOK(SzAr_DecodeFolder(p, fo, inStream, dataStartPos, tempBuf->data, tempBuf->size, allocTemp))
- }
-
- return SZ_OK;
-}
-
-static SRes SzReadFileNames(const Byte *data, size_t size, UInt32 numFiles, size_t *offsets)
-{
- size_t pos = 0;
- *offsets++ = 0;
- if (numFiles == 0)
- return (size == 0) ? SZ_OK : SZ_ERROR_ARCHIVE;
- if (size < 2)
- return SZ_ERROR_ARCHIVE;
- if (data[size - 2] != 0 || data[size - 1] != 0)
- return SZ_ERROR_ARCHIVE;
- do
- {
- const Byte *p;
- if (pos == size)
- return SZ_ERROR_ARCHIVE;
- for (p = data + pos;
- #ifdef _WIN32
- *(const UInt16 *)(const void *)p != 0
- #else
- p[0] != 0 || p[1] != 0
- #endif
- ; p += 2);
- pos = (size_t)(p - data) + 2;
- *offsets++ = (pos >> 1);
- }
- while (--numFiles);
- return (pos == size) ? SZ_OK : SZ_ERROR_ARCHIVE;
-}
-
-static Z7_NO_INLINE SRes ReadTime(CSzBitUi64s *p, UInt32 num,
- CSzData *sd2,
- const CBuf *tempBufs, UInt32 numTempBufs,
- ISzAllocPtr alloc)
-{
- CSzData sd;
- UInt32 i;
- CNtfsFileTime *vals;
- Byte *defs;
- Byte external;
-
- RINOK(ReadBitVector(sd2, num, &p->Defs, alloc))
-
- SZ_READ_BYTE_SD(sd2, external)
- if (external == 0)
- sd = *sd2;
- else
- {
- UInt32 index;
- RINOK(SzReadNumber32(sd2, &index))
- if (index >= numTempBufs)
- return SZ_ERROR_ARCHIVE;
- sd.Data = tempBufs[index].data;
- sd.Size = tempBufs[index].size;
- }
-
- MY_ALLOC_ZE(CNtfsFileTime, p->Vals, num, alloc)
- vals = p->Vals;
- defs = p->Defs;
- for (i = 0; i < num; i++)
- if (SzBitArray_Check(defs, i))
- {
- if (sd.Size < 8)
- return SZ_ERROR_ARCHIVE;
- vals[i].Low = GetUi32(sd.Data);
- vals[i].High = GetUi32(sd.Data + 4);
- SKIP_DATA2(sd, 8)
- }
- else
- vals[i].High = vals[i].Low = 0;
-
- if (external == 0)
- *sd2 = sd;
-
- return SZ_OK;
-}
-
-
-#define NUM_ADDITIONAL_STREAMS_MAX 8
-
-
-static SRes SzReadHeader2(
- CSzArEx *p, /* allocMain */
- CSzData *sd,
- ILookInStreamPtr inStream,
- CBuf *tempBufs, UInt32 *numTempBufs,
- ISzAllocPtr allocMain,
- ISzAllocPtr allocTemp
- )
-{
- CSubStreamInfo ssi;
-
-{
- UInt64 type;
-
- SzData_CLEAR(&ssi.sdSizes)
- SzData_CLEAR(&ssi.sdCRCs)
- SzData_CLEAR(&ssi.sdNumSubStreams)
-
- ssi.NumSubDigests = 0;
- ssi.NumTotalSubStreams = 0;
-
- RINOK(ReadID(sd, &type))
-
- if (type == k7zIdArchiveProperties)
- {
- for (;;)
- {
- UInt64 type2;
- RINOK(ReadID(sd, &type2))
- if (type2 == k7zIdEnd)
- break;
- RINOK(SkipData(sd))
- }
- RINOK(ReadID(sd, &type))
- }
-
- if (type == k7zIdAdditionalStreamsInfo)
- {
- CSzAr tempAr;
- SRes res;
-
- SzAr_Init(&tempAr);
- tempAr.RangeLimit = p->db.RangeLimit;
-
- res = SzReadAndDecodePackedStreams(inStream, sd, tempBufs, NUM_ADDITIONAL_STREAMS_MAX,
- p->startPosAfterHeader, &tempAr, allocTemp);
- *numTempBufs = tempAr.NumFolders;
- SzAr_Free(&tempAr, allocTemp);
-
- if (res != SZ_OK)
- return res;
- RINOK(ReadID(sd, &type))
- }
-
- if (type == k7zIdMainStreamsInfo)
- {
- RINOK(SzReadStreamsInfo(&p->db, sd, (UInt32)1 << 30, tempBufs, *numTempBufs,
- &p->dataPos, &ssi, allocMain))
- p->dataPos += p->startPosAfterHeader;
- RINOK(ReadID(sd, &type))
- }
-
- if (type == k7zIdEnd)
- {
- return SZ_OK;
- }
-
- if (type != k7zIdFilesInfo)
- return SZ_ERROR_ARCHIVE;
-}
-
-{
- UInt32 numFiles = 0;
- UInt32 numEmptyStreams = 0;
- const Byte *emptyStreams = NULL;
- const Byte *emptyFiles = NULL;
-
- RINOK(SzReadNumber32(sd, &numFiles))
- p->NumFiles = numFiles;
-
- for (;;)
- {
- UInt64 type;
- UInt64 size;
- RINOK(ReadID(sd, &type))
- if (type == k7zIdEnd)
- break;
- RINOK(ReadNumber(sd, &size))
- if (size > sd->Size)
- return SZ_ERROR_ARCHIVE;
-
- if (type >= ((UInt32)1 << 8))
- {
- SKIP_DATA(sd, size)
- }
- else switch ((unsigned)type)
- {
- case k7zIdName:
- {
- size_t namesSize;
- const Byte *namesData;
- Byte external;
-
- SZ_READ_BYTE(external)
- if (external == 0)
- {
- namesSize = (size_t)size - 1;
- namesData = sd->Data;
- }
- else
- {
- UInt32 index;
- RINOK(SzReadNumber32(sd, &index))
- if (index >= *numTempBufs)
- return SZ_ERROR_ARCHIVE;
- namesData = (tempBufs)[index].data;
- namesSize = (tempBufs)[index].size;
- }
-
- if ((namesSize & 1) != 0)
- return SZ_ERROR_ARCHIVE;
- MY_ALLOC(size_t, p->FileNameOffsets, numFiles + 1, allocMain)
- MY_ALLOC_ZE_AND_CPY(p->FileNames, namesSize, namesData, allocMain)
- RINOK(SzReadFileNames(p->FileNames, namesSize, numFiles, p->FileNameOffsets))
- if (external == 0)
- {
- SKIP_DATA(sd, namesSize)
- }
- break;
- }
- case k7zIdEmptyStream:
- {
- RINOK(RememberBitVector(sd, numFiles, &emptyStreams))
- numEmptyStreams = CountDefinedBits(emptyStreams, numFiles);
- emptyFiles = NULL;
- break;
- }
- case k7zIdEmptyFile:
- {
- RINOK(RememberBitVector(sd, numEmptyStreams, &emptyFiles))
- break;
- }
- case k7zIdWinAttrib:
- {
- Byte external;
- CSzData sdSwitch;
- CSzData *sdPtr;
- SzBitUi32s_Free(&p->Attribs, allocMain);
- RINOK(ReadBitVector(sd, numFiles, &p->Attribs.Defs, allocMain))
-
- SZ_READ_BYTE(external)
- if (external == 0)
- sdPtr = sd;
- else
- {
- UInt32 index;
- RINOK(SzReadNumber32(sd, &index))
- if (index >= *numTempBufs)
- return SZ_ERROR_ARCHIVE;
- sdSwitch.Data = (tempBufs)[index].data;
- sdSwitch.Size = (tempBufs)[index].size;
- sdPtr = &sdSwitch;
- }
- RINOK(ReadUi32s(sdPtr, numFiles, &p->Attribs, allocMain))
- break;
- }
- /*
- case k7zParent:
- {
- SzBitUi32s_Free(&p->Parents, allocMain);
- RINOK(ReadBitVector(sd, numFiles, &p->Parents.Defs, allocMain));
- RINOK(SzReadSwitch(sd));
- RINOK(ReadUi32s(sd, numFiles, &p->Parents, allocMain));
- break;
- }
- */
- case k7zIdMTime: RINOK(ReadTime(&p->MTime, numFiles, sd, tempBufs, *numTempBufs, allocMain)) break;
- case k7zIdCTime: RINOK(ReadTime(&p->CTime, numFiles, sd, tempBufs, *numTempBufs, allocMain)) break;
- default:
- {
- SKIP_DATA(sd, size)
- }
- }
- }
-
- if (numFiles - numEmptyStreams != ssi.NumTotalSubStreams)
- return SZ_ERROR_ARCHIVE;
-
- for (;;)
- {
- UInt64 type;
- RINOK(ReadID(sd, &type))
- if (type == k7zIdEnd)
- break;
- RINOK(SkipData(sd))
- }
-
- {
- UInt32 i;
- UInt32 emptyFileIndex = 0;
- UInt32 folderIndex = 0;
- UInt32 remSubStreams = 0;
- UInt32 numSubStreams = 0;
- UInt64 unpackPos = 0;
- const Byte *digestsDefs = NULL;
- const Byte *digestsVals = NULL;
- UInt32 digestIndex = 0;
- Byte isDirMask = 0;
- Byte crcMask = 0;
- Byte mask = 0x80;
-
- MY_ALLOC(UInt32, p->FolderToFile, p->db.NumFolders + 1, allocMain)
- MY_ALLOC_ZE(UInt32, p->FileToFolder, p->NumFiles, allocMain)
- MY_ALLOC(UInt64, p->UnpackPositions, p->NumFiles + 1, allocMain)
- MY_ALLOC_ZE(Byte, p->IsDirs, (p->NumFiles + 7) >> 3, allocMain)
-
- RINOK(SzBitUi32s_Alloc(&p->CRCs, p->NumFiles, allocMain))
-
- if (ssi.sdCRCs.Size != 0)
- {
- Byte allDigestsDefined = 0;
- SZ_READ_BYTE_SD_NOCHECK(&ssi.sdCRCs, allDigestsDefined)
- if (allDigestsDefined)
- digestsVals = ssi.sdCRCs.Data;
- else
- {
- const size_t numBytes = (ssi.NumSubDigests + 7) >> 3;
- digestsDefs = ssi.sdCRCs.Data;
- digestsVals = digestsDefs + numBytes;
- }
- }
-
- for (i = 0; i < numFiles; i++, mask >>= 1)
- {
- if (mask == 0)
- {
- const UInt32 byteIndex = (i - 1) >> 3;
- p->IsDirs[byteIndex] = isDirMask;
- p->CRCs.Defs[byteIndex] = crcMask;
- isDirMask = 0;
- crcMask = 0;
- mask = 0x80;
- }
-
- p->UnpackPositions[i] = unpackPos;
- p->CRCs.Vals[i] = 0;
-
- if (emptyStreams && SzBitArray_Check(emptyStreams, i))
- {
- if (emptyFiles)
- {
- if (!SzBitArray_Check(emptyFiles, emptyFileIndex))
- isDirMask |= mask;
- emptyFileIndex++;
- }
- else
- isDirMask |= mask;
- if (remSubStreams == 0)
- {
- p->FileToFolder[i] = (UInt32)-1;
- continue;
- }
- }
-
- if (remSubStreams == 0)
- {
- for (;;)
- {
- if (folderIndex >= p->db.NumFolders)
- return SZ_ERROR_ARCHIVE;
- p->FolderToFile[folderIndex] = i;
- numSubStreams = 1;
- if (ssi.sdNumSubStreams.Data)
- {
- RINOK(SzReadNumber32(&ssi.sdNumSubStreams, &numSubStreams))
- }
- remSubStreams = numSubStreams;
- if (numSubStreams != 0)
- break;
- {
- const UInt64 folderUnpackSize = SzAr_GetFolderUnpackSize(&p->db, folderIndex);
- unpackPos += folderUnpackSize;
- if (unpackPos < folderUnpackSize)
- return SZ_ERROR_ARCHIVE;
- }
- folderIndex++;
- }
- }
-
- p->FileToFolder[i] = folderIndex;
-
- if (emptyStreams && SzBitArray_Check(emptyStreams, i))
- continue;
-
- if (--remSubStreams == 0)
- {
- const UInt64 folderUnpackSize = SzAr_GetFolderUnpackSize(&p->db, folderIndex);
- const UInt64 startFolderUnpackPos = p->UnpackPositions[p->FolderToFile[folderIndex]];
- if (folderUnpackSize < unpackPos - startFolderUnpackPos)
- return SZ_ERROR_ARCHIVE;
- unpackPos = startFolderUnpackPos + folderUnpackSize;
- if (unpackPos < folderUnpackSize)
- return SZ_ERROR_ARCHIVE;
-
- if (numSubStreams == 1 && SzBitWithVals_Check(&p->db.FolderCRCs, folderIndex))
- {
- p->CRCs.Vals[i] = p->db.FolderCRCs.Vals[folderIndex];
- crcMask |= mask;
- }
- folderIndex++;
- }
- else
- {
- UInt64 v;
- RINOK(ReadNumber(&ssi.sdSizes, &v))
- unpackPos += v;
- if (unpackPos < v)
- return SZ_ERROR_ARCHIVE;
- }
- if ((crcMask & mask) == 0 && digestsVals)
- {
- if (!digestsDefs || SzBitArray_Check(digestsDefs, digestIndex))
- {
- p->CRCs.Vals[i] = GetUi32(digestsVals);
- digestsVals += 4;
- crcMask |= mask;
- }
- digestIndex++;
- }
- }
-
- if (mask != 0x80)
- {
- const UInt32 byteIndex = (i - 1) >> 3;
- p->IsDirs[byteIndex] = isDirMask;
- p->CRCs.Defs[byteIndex] = crcMask;
- }
-
- p->UnpackPositions[i] = unpackPos;
-
- if (remSubStreams != 0)
- return SZ_ERROR_ARCHIVE;
-
- for (;;)
- {
- p->FolderToFile[folderIndex] = i;
- if (folderIndex >= p->db.NumFolders)
- break;
- if (!ssi.sdNumSubStreams.Data)
- return SZ_ERROR_ARCHIVE;
- RINOK(SzReadNumber32(&ssi.sdNumSubStreams, &numSubStreams))
- if (numSubStreams != 0)
- return SZ_ERROR_ARCHIVE;
- /*
- {
- UInt64 folderUnpackSize = SzAr_GetFolderUnpackSize(&p->db, folderIndex);
- unpackPos += folderUnpackSize;
- if (unpackPos < folderUnpackSize)
- return SZ_ERROR_ARCHIVE;
- }
- */
- folderIndex++;
- }
-
- if (ssi.sdNumSubStreams.Data && ssi.sdNumSubStreams.Size != 0)
- return SZ_ERROR_ARCHIVE;
- }
-}
- return SZ_OK;
-}
-
-
-static SRes SzReadHeader(
- CSzArEx *p,
- CSzData *sd,
- ILookInStreamPtr inStream,
- ISzAllocPtr allocMain,
- ISzAllocPtr allocTemp)
-{
- UInt32 i;
- UInt32 numTempBufs = 0;
- SRes res;
- CBuf tempBufs[NUM_ADDITIONAL_STREAMS_MAX];
-
- for (i = 0; i < NUM_ADDITIONAL_STREAMS_MAX; i++)
- Buf_Init(tempBufs + i);
-
- res = SzReadHeader2(p, sd, inStream,
- tempBufs, &numTempBufs,
- allocMain, allocTemp);
-
- for (i = 0; i < NUM_ADDITIONAL_STREAMS_MAX; i++)
- Buf_Free(tempBufs + i, allocTemp);
-
- RINOK(res)
-
- if (sd->Size != 0)
- return SZ_ERROR_FAIL;
-
- return res;
-}
-
-static SRes SzArEx_Open2(
- CSzArEx *p,
- ILookInStreamPtr inStream,
- ISzAllocPtr allocMain,
- ISzAllocPtr allocTemp)
-{
- Byte header[k7zStartHeaderSize];
- Int64 startArcPos;
- UInt64 nextHeaderOffset, nextHeaderSize;
- size_t nextHeaderSizeT;
- UInt32 nextHeaderCRC;
- CBuf buf;
- SRes res;
-
- startArcPos = 0;
- RINOK(ILookInStream_Seek(inStream, &startArcPos, SZ_SEEK_CUR))
-
- RINOK(LookInStream_Read2(inStream, header, k7zStartHeaderSize, SZ_ERROR_NO_ARCHIVE))
-
- if (!TestSignatureCandidate(header))
- return SZ_ERROR_NO_ARCHIVE;
- if (header[6] != k7zMajorVersion)
- return SZ_ERROR_UNSUPPORTED;
-
- nextHeaderOffset = GetUi64(header + 12);
- nextHeaderSize = GetUi64(header + 20);
- nextHeaderCRC = GetUi32(header + 28);
-
- p->startPosAfterHeader = (UInt64)startArcPos + k7zStartHeaderSize;
-
- if (CrcCalc(header + 12, 20) != GetUi32(header + 8))
- return SZ_ERROR_CRC;
-
- p->db.RangeLimit = nextHeaderOffset;
-
- nextHeaderSizeT = (size_t)nextHeaderSize;
- if (nextHeaderSizeT != nextHeaderSize)
- return SZ_ERROR_MEM;
- if (nextHeaderSizeT == 0)
- return SZ_OK;
- if (nextHeaderOffset > nextHeaderOffset + nextHeaderSize ||
- nextHeaderOffset > nextHeaderOffset + nextHeaderSize + k7zStartHeaderSize)
- return SZ_ERROR_NO_ARCHIVE;
-
- {
- Int64 pos = 0;
- RINOK(ILookInStream_Seek(inStream, &pos, SZ_SEEK_END))
- if ((UInt64)pos < (UInt64)startArcPos + nextHeaderOffset ||
- (UInt64)pos < (UInt64)startArcPos + k7zStartHeaderSize + nextHeaderOffset ||
- (UInt64)pos < (UInt64)startArcPos + k7zStartHeaderSize + nextHeaderOffset + nextHeaderSize)
- return SZ_ERROR_INPUT_EOF;
- }
-
- RINOK(LookInStream_SeekTo(inStream, (UInt64)startArcPos + k7zStartHeaderSize + nextHeaderOffset))
-
- if (!Buf_Create(&buf, nextHeaderSizeT, allocTemp))
- return SZ_ERROR_MEM;
-
- res = LookInStream_Read(inStream, buf.data, nextHeaderSizeT);
-
- if (res == SZ_OK)
- {
- res = SZ_ERROR_ARCHIVE;
- if (CrcCalc(buf.data, nextHeaderSizeT) == nextHeaderCRC)
- {
- CSzData sd;
- UInt64 type;
- sd.Data = buf.data;
- sd.Size = buf.size;
-
- res = ReadID(&sd, &type);
-
- if (res == SZ_OK && type == k7zIdEncodedHeader)
- {
- CSzAr tempAr;
- CBuf tempBuf;
- Buf_Init(&tempBuf);
-
- SzAr_Init(&tempAr);
- tempAr.RangeLimit = p->db.RangeLimit;
-
- res = SzReadAndDecodePackedStreams(inStream, &sd, &tempBuf, 1, p->startPosAfterHeader, &tempAr, allocTemp);
- SzAr_Free(&tempAr, allocTemp);
-
- if (res != SZ_OK)
- {
- Buf_Free(&tempBuf, allocTemp);
- }
- else
- {
- Buf_Free(&buf, allocTemp);
- buf.data = tempBuf.data;
- buf.size = tempBuf.size;
- sd.Data = buf.data;
- sd.Size = buf.size;
- res = ReadID(&sd, &type);
- }
- }
-
- if (res == SZ_OK)
- {
- if (type == k7zIdHeader)
- {
- /*
- CSzData sd2;
- unsigned ttt;
- for (ttt = 0; ttt < 40000; ttt++)
- {
- SzArEx_Free(p, allocMain);
- sd2 = sd;
- res = SzReadHeader(p, &sd2, inStream, allocMain, allocTemp);
- if (res != SZ_OK)
- break;
- }
- */
- res = SzReadHeader(p, &sd, inStream, allocMain, allocTemp);
- }
- else
- res = SZ_ERROR_UNSUPPORTED;
- }
- }
- }
-
- Buf_Free(&buf, allocTemp);
- return res;
-}
-
-
-SRes SzArEx_Open(CSzArEx *p, ILookInStreamPtr inStream,
- ISzAllocPtr allocMain, ISzAllocPtr allocTemp)
-{
- const SRes res = SzArEx_Open2(p, inStream, allocMain, allocTemp);
- if (res != SZ_OK)
- SzArEx_Free(p, allocMain);
- return res;
-}
-
-
-SRes SzArEx_Extract(
- const CSzArEx *p,
- ILookInStreamPtr inStream,
- UInt32 fileIndex,
- UInt32 *blockIndex,
- Byte **tempBuf,
- size_t *outBufferSize,
- size_t *offset,
- size_t *outSizeProcessed,
- ISzAllocPtr allocMain,
- ISzAllocPtr allocTemp)
-{
- const UInt32 folderIndex = p->FileToFolder[fileIndex];
- SRes res = SZ_OK;
-
- *offset = 0;
- *outSizeProcessed = 0;
-
- if (folderIndex == (UInt32)-1)
- {
- ISzAlloc_Free(allocMain, *tempBuf);
- *blockIndex = folderIndex;
- *tempBuf = NULL;
- *outBufferSize = 0;
- return SZ_OK;
- }
-
- if (*tempBuf == NULL || *blockIndex != folderIndex)
- {
- const UInt64 unpackSizeSpec = SzAr_GetFolderUnpackSize(&p->db, folderIndex);
- /*
- UInt64 unpackSizeSpec =
- p->UnpackPositions[p->FolderToFile[(size_t)folderIndex + 1]] -
- p->UnpackPositions[p->FolderToFile[folderIndex]];
- */
- const size_t unpackSize = (size_t)unpackSizeSpec;
-
- if (unpackSize != unpackSizeSpec)
- return SZ_ERROR_MEM;
- *blockIndex = folderIndex;
- ISzAlloc_Free(allocMain, *tempBuf);
- *tempBuf = NULL;
-
- if (res == SZ_OK)
- {
- *outBufferSize = unpackSize;
- if (unpackSize != 0)
- {
- *tempBuf = (Byte *)ISzAlloc_Alloc(allocMain, unpackSize);
- if (*tempBuf == NULL)
- res = SZ_ERROR_MEM;
- }
-
- if (res == SZ_OK)
- {
- res = SzAr_DecodeFolder(&p->db, folderIndex,
- inStream, p->dataPos, *tempBuf, unpackSize, allocTemp);
- }
- }
- }
-
- if (res == SZ_OK)
- {
- const UInt64 unpackPos = p->UnpackPositions[fileIndex];
- *offset = (size_t)(unpackPos - p->UnpackPositions[p->FolderToFile[folderIndex]]);
- *outSizeProcessed = (size_t)(p->UnpackPositions[(size_t)fileIndex + 1] - unpackPos);
- if (*offset + *outSizeProcessed > *outBufferSize)
- return SZ_ERROR_FAIL;
- if (SzBitWithVals_Check(&p->CRCs, fileIndex))
- if (CrcCalc(*tempBuf + *offset, *outSizeProcessed) != p->CRCs.Vals[fileIndex])
- res = SZ_ERROR_CRC;
- }
-
- return res;
-}
-
-
-size_t SzArEx_GetFileNameUtf16(const CSzArEx *p, size_t fileIndex, UInt16 *dest)
-{
- const size_t offs = p->FileNameOffsets[fileIndex];
- const size_t len = p->FileNameOffsets[fileIndex + 1] - offs;
- if (dest != 0)
- {
- size_t i;
- const Byte *src = p->FileNames + offs * 2;
- for (i = 0; i < len; i++)
- dest[i] = GetUi16(src + i * 2);
- }
- return len;
-}
-
-/*
-size_t SzArEx_GetFullNameLen(const CSzArEx *p, size_t fileIndex)
-{
- size_t len;
- if (!p->FileNameOffsets)
- return 1;
- len = 0;
- for (;;)
- {
- UInt32 parent = (UInt32)(Int32)-1;
- len += p->FileNameOffsets[fileIndex + 1] - p->FileNameOffsets[fileIndex];
- if SzBitWithVals_Check(&p->Parents, fileIndex)
- parent = p->Parents.Vals[fileIndex];
- if (parent == (UInt32)(Int32)-1)
- return len;
- fileIndex = parent;
- }
-}
-
-UInt16 *SzArEx_GetFullNameUtf16_Back(const CSzArEx *p, size_t fileIndex, UInt16 *dest)
-{
- BoolInt needSlash;
- if (!p->FileNameOffsets)
- {
- *(--dest) = 0;
- return dest;
- }
- needSlash = False;
- for (;;)
- {
- UInt32 parent = (UInt32)(Int32)-1;
- size_t curLen = p->FileNameOffsets[fileIndex + 1] - p->FileNameOffsets[fileIndex];
- SzArEx_GetFileNameUtf16(p, fileIndex, dest - curLen);
- if (needSlash)
- *(dest - 1) = '/';
- needSlash = True;
- dest -= curLen;
-
- if SzBitWithVals_Check(&p->Parents, fileIndex)
- parent = p->Parents.Vals[fileIndex];
- if (parent == (UInt32)(Int32)-1)
- return dest;
- fileIndex = parent;
- }
-}
-*/
diff --git a/3rdparty/7z/src/7zBuf.c b/3rdparty/7z/src/7zBuf.c
deleted file mode 100644
index 438bba68bd..0000000000
--- a/3rdparty/7z/src/7zBuf.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* 7zBuf.c -- Byte Buffer
-2017-04-03 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "7zBuf.h"
-
-void Buf_Init(CBuf *p)
-{
- p->data = 0;
- p->size = 0;
-}
-
-int Buf_Create(CBuf *p, size_t size, ISzAllocPtr alloc)
-{
- p->size = 0;
- if (size == 0)
- {
- p->data = 0;
- return 1;
- }
- p->data = (Byte *)ISzAlloc_Alloc(alloc, size);
- if (p->data)
- {
- p->size = size;
- return 1;
- }
- return 0;
-}
-
-void Buf_Free(CBuf *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->data);
- p->data = 0;
- p->size = 0;
-}
diff --git a/3rdparty/7z/src/7zBuf.h b/3rdparty/7z/src/7zBuf.h
deleted file mode 100644
index ca34c19097..0000000000
--- a/3rdparty/7z/src/7zBuf.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* 7zBuf.h -- Byte Buffer
-2023-03-04 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_7Z_BUF_H
-#define ZIP7_INC_7Z_BUF_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-typedef struct
-{
- Byte *data;
- size_t size;
-} CBuf;
-
-void Buf_Init(CBuf *p);
-int Buf_Create(CBuf *p, size_t size, ISzAllocPtr alloc);
-void Buf_Free(CBuf *p, ISzAllocPtr alloc);
-
-typedef struct
-{
- Byte *data;
- size_t size;
- size_t pos;
-} CDynBuf;
-
-void DynBuf_Construct(CDynBuf *p);
-void DynBuf_SeekToBeg(CDynBuf *p);
-int DynBuf_Write(CDynBuf *p, const Byte *buf, size_t size, ISzAllocPtr alloc);
-void DynBuf_Free(CDynBuf *p, ISzAllocPtr alloc);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/7zBuf2.c b/3rdparty/7z/src/7zBuf2.c
deleted file mode 100644
index 49b4343b67..0000000000
--- a/3rdparty/7z/src/7zBuf2.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/* 7zBuf2.c -- Byte Buffer
-2017-04-03 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "7zBuf.h"
-
-void DynBuf_Construct(CDynBuf *p)
-{
- p->data = 0;
- p->size = 0;
- p->pos = 0;
-}
-
-void DynBuf_SeekToBeg(CDynBuf *p)
-{
- p->pos = 0;
-}
-
-int DynBuf_Write(CDynBuf *p, const Byte *buf, size_t size, ISzAllocPtr alloc)
-{
- if (size > p->size - p->pos)
- {
- size_t newSize = p->pos + size;
- Byte *data;
- newSize += newSize / 4;
- data = (Byte *)ISzAlloc_Alloc(alloc, newSize);
- if (!data)
- return 0;
- p->size = newSize;
- if (p->pos != 0)
- memcpy(data, p->data, p->pos);
- ISzAlloc_Free(alloc, p->data);
- p->data = data;
- }
- if (size != 0)
- {
- memcpy(p->data + p->pos, buf, size);
- p->pos += size;
- }
- return 1;
-}
-
-void DynBuf_Free(CDynBuf *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->data);
- p->data = 0;
- p->size = 0;
- p->pos = 0;
-}
diff --git a/3rdparty/7z/src/7zCrc.c b/3rdparty/7z/src/7zCrc.c
deleted file mode 100644
index 087189902c..0000000000
--- a/3rdparty/7z/src/7zCrc.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/* 7zCrc.c -- CRC32 calculation and init
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "7zCrc.h"
-#include "CpuArch.h"
-
-#define kCrcPoly 0xEDB88320
-
-#ifdef MY_CPU_LE
- #define CRC_NUM_TABLES 8
-#else
- #define CRC_NUM_TABLES 9
-
- UInt32 Z7_FASTCALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
- UInt32 Z7_FASTCALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
-#endif
-
-#ifndef MY_CPU_BE
- UInt32 Z7_FASTCALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
- UInt32 Z7_FASTCALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
-#endif
-
-/*
-extern
-CRC_FUNC g_CrcUpdateT4;
-CRC_FUNC g_CrcUpdateT4;
-*/
-extern
-CRC_FUNC g_CrcUpdateT8;
-CRC_FUNC g_CrcUpdateT8;
-extern
-CRC_FUNC g_CrcUpdateT0_32;
-CRC_FUNC g_CrcUpdateT0_32;
-extern
-CRC_FUNC g_CrcUpdateT0_64;
-CRC_FUNC g_CrcUpdateT0_64;
-extern
-CRC_FUNC g_CrcUpdate;
-CRC_FUNC g_CrcUpdate;
-
-UInt32 g_CrcTable[256 * CRC_NUM_TABLES];
-
-UInt32 Z7_FASTCALL CrcUpdate(UInt32 v, const void *data, size_t size)
-{
- return g_CrcUpdate(v, data, size, g_CrcTable);
-}
-
-UInt32 Z7_FASTCALL CrcCalc(const void *data, size_t size)
-{
- return g_CrcUpdate(CRC_INIT_VAL, data, size, g_CrcTable) ^ CRC_INIT_VAL;
-}
-
-#if CRC_NUM_TABLES < 4 \
- || (CRC_NUM_TABLES == 4 && defined(MY_CPU_BE)) \
- || (!defined(MY_CPU_LE) && !defined(MY_CPU_BE))
-#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
-UInt32 Z7_FASTCALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table);
-UInt32 Z7_FASTCALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- const Byte *pEnd = p + size;
- for (; p != pEnd; p++)
- v = CRC_UPDATE_BYTE_2(v, *p);
- return v;
-}
-#endif
-
-/* ---------- hardware CRC ---------- */
-
-#ifdef MY_CPU_LE
-
-#if defined(MY_CPU_ARM_OR_ARM64)
-
-// #pragma message("ARM*")
-
- #if defined(_MSC_VER)
- #if defined(MY_CPU_ARM64)
- #if (_MSC_VER >= 1910)
- #ifndef __clang__
- #define USE_ARM64_CRC
- #include
- #endif
- #endif
- #endif
- #elif (defined(__clang__) && (__clang_major__ >= 3)) \
- || (defined(__GNUC__) && (__GNUC__ > 4))
- #if !defined(__ARM_FEATURE_CRC32)
- #define __ARM_FEATURE_CRC32 1
- #if defined(__clang__)
- #if defined(MY_CPU_ARM64)
- #define ATTRIB_CRC __attribute__((__target__("crc")))
- #else
- #define ATTRIB_CRC __attribute__((__target__("armv8-a,crc")))
- #endif
- #else
- #if defined(MY_CPU_ARM64)
- #define ATTRIB_CRC __attribute__((__target__("+crc")))
- #else
- #define ATTRIB_CRC __attribute__((__target__("arch=armv8-a+crc")))
- #endif
- #endif
- #endif
- #if defined(__ARM_FEATURE_CRC32)
- #define USE_ARM64_CRC
- #include
- #endif
- #endif
-
-#else
-
-// no hardware CRC
-
-// #define USE_CRC_EMU
-
-#ifdef USE_CRC_EMU
-
-#pragma message("ARM64 CRC emulation")
-
-Z7_FORCE_INLINE
-UInt32 __crc32b(UInt32 v, UInt32 data)
-{
- const UInt32 *table = g_CrcTable;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data);
- return v;
-}
-
-Z7_FORCE_INLINE
-UInt32 __crc32w(UInt32 v, UInt32 data)
-{
- const UInt32 *table = g_CrcTable;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- return v;
-}
-
-Z7_FORCE_INLINE
-UInt32 __crc32d(UInt32 v, UInt64 data)
-{
- const UInt32 *table = g_CrcTable;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- v = CRC_UPDATE_BYTE_2(v, (Byte)data); data >>= 8;
- return v;
-}
-
-#endif // USE_CRC_EMU
-
-#endif // defined(MY_CPU_ARM64) && defined(MY_CPU_LE)
-
-
-
-#if defined(USE_ARM64_CRC) || defined(USE_CRC_EMU)
-
-#define T0_32_UNROLL_BYTES (4 * 4)
-#define T0_64_UNROLL_BYTES (4 * 8)
-
-#ifndef ATTRIB_CRC
-#define ATTRIB_CRC
-#endif
-// #pragma message("USE ARM HW CRC")
-
-ATTRIB_CRC
-UInt32 Z7_FASTCALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, const UInt32 *table);
-ATTRIB_CRC
-UInt32 Z7_FASTCALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- UNUSED_VAR(table);
-
- for (; size != 0 && ((unsigned)(ptrdiff_t)p & (T0_32_UNROLL_BYTES - 1)) != 0; size--)
- v = __crc32b(v, *p++);
-
- if (size >= T0_32_UNROLL_BYTES)
- {
- const Byte *lim = p + size;
- size &= (T0_32_UNROLL_BYTES - 1);
- lim -= size;
- do
- {
- v = __crc32w(v, *(const UInt32 *)(const void *)(p));
- v = __crc32w(v, *(const UInt32 *)(const void *)(p + 4)); p += 2 * 4;
- v = __crc32w(v, *(const UInt32 *)(const void *)(p));
- v = __crc32w(v, *(const UInt32 *)(const void *)(p + 4)); p += 2 * 4;
- }
- while (p != lim);
- }
-
- for (; size != 0; size--)
- v = __crc32b(v, *p++);
-
- return v;
-}
-
-ATTRIB_CRC
-UInt32 Z7_FASTCALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, const UInt32 *table);
-ATTRIB_CRC
-UInt32 Z7_FASTCALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- UNUSED_VAR(table);
-
- for (; size != 0 && ((unsigned)(ptrdiff_t)p & (T0_64_UNROLL_BYTES - 1)) != 0; size--)
- v = __crc32b(v, *p++);
-
- if (size >= T0_64_UNROLL_BYTES)
- {
- const Byte *lim = p + size;
- size &= (T0_64_UNROLL_BYTES - 1);
- lim -= size;
- do
- {
- v = __crc32d(v, *(const UInt64 *)(const void *)(p));
- v = __crc32d(v, *(const UInt64 *)(const void *)(p + 8)); p += 2 * 8;
- v = __crc32d(v, *(const UInt64 *)(const void *)(p));
- v = __crc32d(v, *(const UInt64 *)(const void *)(p + 8)); p += 2 * 8;
- }
- while (p != lim);
- }
-
- for (; size != 0; size--)
- v = __crc32b(v, *p++);
-
- return v;
-}
-
-#undef T0_32_UNROLL_BYTES
-#undef T0_64_UNROLL_BYTES
-
-#endif // defined(USE_ARM64_CRC) || defined(USE_CRC_EMU)
-
-#endif // MY_CPU_LE
-
-
-
-
-void Z7_FASTCALL CrcGenerateTable(void)
-{
- UInt32 i;
- for (i = 0; i < 256; i++)
- {
- UInt32 r = i;
- unsigned j;
- for (j = 0; j < 8; j++)
- r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1)));
- g_CrcTable[i] = r;
- }
- for (i = 256; i < 256 * CRC_NUM_TABLES; i++)
- {
- const UInt32 r = g_CrcTable[(size_t)i - 256];
- g_CrcTable[i] = g_CrcTable[r & 0xFF] ^ (r >> 8);
- }
-
- #if CRC_NUM_TABLES < 4
- g_CrcUpdate = CrcUpdateT1;
- #elif defined(MY_CPU_LE)
- // g_CrcUpdateT4 = CrcUpdateT4;
- #if CRC_NUM_TABLES < 8
- g_CrcUpdate = CrcUpdateT4;
- #else // CRC_NUM_TABLES >= 8
- g_CrcUpdateT8 = CrcUpdateT8;
- /*
- #ifdef MY_CPU_X86_OR_AMD64
- if (!CPU_Is_InOrder())
- #endif
- */
- g_CrcUpdate = CrcUpdateT8;
- #endif
- #else
- {
- #ifndef MY_CPU_BE
- UInt32 k = 0x01020304;
- const Byte *p = (const Byte *)&k;
- if (p[0] == 4 && p[1] == 3)
- {
- #if CRC_NUM_TABLES < 8
- // g_CrcUpdateT4 = CrcUpdateT4;
- g_CrcUpdate = CrcUpdateT4;
- #else // CRC_NUM_TABLES >= 8
- g_CrcUpdateT8 = CrcUpdateT8;
- g_CrcUpdate = CrcUpdateT8;
- #endif
- }
- else if (p[0] != 1 || p[1] != 2)
- g_CrcUpdate = CrcUpdateT1;
- else
- #endif // MY_CPU_BE
- {
- for (i = 256 * CRC_NUM_TABLES - 1; i >= 256; i--)
- {
- const UInt32 x = g_CrcTable[(size_t)i - 256];
- g_CrcTable[i] = Z7_BSWAP32(x);
- }
- #if CRC_NUM_TABLES <= 4
- g_CrcUpdate = CrcUpdateT1;
- #elif CRC_NUM_TABLES <= 8
- // g_CrcUpdateT4 = CrcUpdateT1_BeT4;
- g_CrcUpdate = CrcUpdateT1_BeT4;
- #else // CRC_NUM_TABLES > 8
- g_CrcUpdateT8 = CrcUpdateT1_BeT8;
- g_CrcUpdate = CrcUpdateT1_BeT8;
- #endif
- }
- }
- #endif // CRC_NUM_TABLES < 4
-
- #ifdef MY_CPU_LE
- #ifdef USE_ARM64_CRC
- if (CPU_IsSupported_CRC32())
- {
- g_CrcUpdateT0_32 = CrcUpdateT0_32;
- g_CrcUpdateT0_64 = CrcUpdateT0_64;
- g_CrcUpdate =
- #if defined(MY_CPU_ARM)
- CrcUpdateT0_32;
- #else
- CrcUpdateT0_64;
- #endif
- }
- #endif
-
- #ifdef USE_CRC_EMU
- g_CrcUpdateT0_32 = CrcUpdateT0_32;
- g_CrcUpdateT0_64 = CrcUpdateT0_64;
- g_CrcUpdate = CrcUpdateT0_64;
- #endif
- #endif
-}
-
-#undef kCrcPoly
-#undef CRC64_NUM_TABLES
-#undef CRC_UPDATE_BYTE_2
diff --git a/3rdparty/7z/src/7zCrc.h b/3rdparty/7z/src/7zCrc.h
deleted file mode 100644
index ef1c1afab5..0000000000
--- a/3rdparty/7z/src/7zCrc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* 7zCrc.h -- CRC32 calculation
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_7Z_CRC_H
-#define ZIP7_INC_7Z_CRC_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-extern UInt32 g_CrcTable[];
-
-/* Call CrcGenerateTable one time before other CRC functions */
-void Z7_FASTCALL CrcGenerateTable(void);
-
-#define CRC_INIT_VAL 0xFFFFFFFF
-#define CRC_GET_DIGEST(crc) ((crc) ^ CRC_INIT_VAL)
-#define CRC_UPDATE_BYTE(crc, b) (g_CrcTable[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
-
-UInt32 Z7_FASTCALL CrcUpdate(UInt32 crc, const void *data, size_t size);
-UInt32 Z7_FASTCALL CrcCalc(const void *data, size_t size);
-
-typedef UInt32 (Z7_FASTCALL *CRC_FUNC)(UInt32 v, const void *data, size_t size, const UInt32 *table);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/7zCrcOpt.c b/3rdparty/7z/src/7zCrcOpt.c
deleted file mode 100644
index f34913635e..0000000000
--- a/3rdparty/7z/src/7zCrcOpt.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/* 7zCrcOpt.c -- CRC32 calculation
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "CpuArch.h"
-
-#ifndef MY_CPU_BE
-
-#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
-
-UInt32 Z7_FASTCALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
-UInt32 Z7_FASTCALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
- v = CRC_UPDATE_BYTE_2(v, *p);
- for (; size >= 4; size -= 4, p += 4)
- {
- v ^= *(const UInt32 *)(const void *)p;
- v =
- (table + 0x300)[((v ) & 0xFF)]
- ^ (table + 0x200)[((v >> 8) & 0xFF)]
- ^ (table + 0x100)[((v >> 16) & 0xFF)]
- ^ (table + 0x000)[((v >> 24))];
- }
- for (; size > 0; size--, p++)
- v = CRC_UPDATE_BYTE_2(v, *p);
- return v;
-}
-
-UInt32 Z7_FASTCALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
-UInt32 Z7_FASTCALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- for (; size > 0 && ((unsigned)(ptrdiff_t)p & 7) != 0; size--, p++)
- v = CRC_UPDATE_BYTE_2(v, *p);
- for (; size >= 8; size -= 8, p += 8)
- {
- UInt32 d;
- v ^= *(const UInt32 *)(const void *)p;
- v =
- (table + 0x700)[((v ) & 0xFF)]
- ^ (table + 0x600)[((v >> 8) & 0xFF)]
- ^ (table + 0x500)[((v >> 16) & 0xFF)]
- ^ (table + 0x400)[((v >> 24))];
- d = *((const UInt32 *)(const void *)p + 1);
- v ^=
- (table + 0x300)[((d ) & 0xFF)]
- ^ (table + 0x200)[((d >> 8) & 0xFF)]
- ^ (table + 0x100)[((d >> 16) & 0xFF)]
- ^ (table + 0x000)[((d >> 24))];
- }
- for (; size > 0; size--, p++)
- v = CRC_UPDATE_BYTE_2(v, *p);
- return v;
-}
-
-#endif
-
-
-#ifndef MY_CPU_LE
-
-#define CRC_UINT32_SWAP(v) Z7_BSWAP32(v)
-
-#define CRC_UPDATE_BYTE_2_BE(crc, b) (table[(((crc) >> 24) ^ (b))] ^ ((crc) << 8))
-
-UInt32 Z7_FASTCALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- table += 0x100;
- v = CRC_UINT32_SWAP(v);
- for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
- v = CRC_UPDATE_BYTE_2_BE(v, *p);
- for (; size >= 4; size -= 4, p += 4)
- {
- v ^= *(const UInt32 *)(const void *)p;
- v =
- (table + 0x000)[((v ) & 0xFF)]
- ^ (table + 0x100)[((v >> 8) & 0xFF)]
- ^ (table + 0x200)[((v >> 16) & 0xFF)]
- ^ (table + 0x300)[((v >> 24))];
- }
- for (; size > 0; size--, p++)
- v = CRC_UPDATE_BYTE_2_BE(v, *p);
- return CRC_UINT32_SWAP(v);
-}
-
-UInt32 Z7_FASTCALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
-{
- const Byte *p = (const Byte *)data;
- table += 0x100;
- v = CRC_UINT32_SWAP(v);
- for (; size > 0 && ((unsigned)(ptrdiff_t)p & 7) != 0; size--, p++)
- v = CRC_UPDATE_BYTE_2_BE(v, *p);
- for (; size >= 8; size -= 8, p += 8)
- {
- UInt32 d;
- v ^= *(const UInt32 *)(const void *)p;
- v =
- (table + 0x400)[((v ) & 0xFF)]
- ^ (table + 0x500)[((v >> 8) & 0xFF)]
- ^ (table + 0x600)[((v >> 16) & 0xFF)]
- ^ (table + 0x700)[((v >> 24))];
- d = *((const UInt32 *)(const void *)p + 1);
- v ^=
- (table + 0x000)[((d ) & 0xFF)]
- ^ (table + 0x100)[((d >> 8) & 0xFF)]
- ^ (table + 0x200)[((d >> 16) & 0xFF)]
- ^ (table + 0x300)[((d >> 24))];
- }
- for (; size > 0; size--, p++)
- v = CRC_UPDATE_BYTE_2_BE(v, *p);
- return CRC_UINT32_SWAP(v);
-}
-
-#endif
diff --git a/3rdparty/7z/src/7zDec.c b/3rdparty/7z/src/7zDec.c
deleted file mode 100644
index 19cf81ebc5..0000000000
--- a/3rdparty/7z/src/7zDec.c
+++ /dev/null
@@ -1,648 +0,0 @@
-/* 7zDec.c -- Decoding from 7z folder
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-/* #define Z7_PPMD_SUPPORT */
-
-#include "7z.h"
-#include "7zCrc.h"
-
-#include "Bcj2.h"
-#include "Bra.h"
-#include "CpuArch.h"
-#include "Delta.h"
-#include "LzmaDec.h"
-#include "Lzma2Dec.h"
-#ifdef Z7_PPMD_SUPPORT
-#include "Ppmd7.h"
-#endif
-
-#define k_Copy 0
-#ifndef Z7_NO_METHOD_LZMA2
-#define k_LZMA2 0x21
-#endif
-#define k_LZMA 0x30101
-#define k_BCJ2 0x303011B
-
-#if !defined(Z7_NO_METHODS_FILTERS)
-#define Z7_USE_BRANCH_FILTER
-#endif
-
-#if !defined(Z7_NO_METHODS_FILTERS) || \
- defined(Z7_USE_NATIVE_BRANCH_FILTER) && defined(MY_CPU_ARM64)
-#define Z7_USE_FILTER_ARM64
-#ifndef Z7_USE_BRANCH_FILTER
-#define Z7_USE_BRANCH_FILTER
-#endif
-#define k_ARM64 0xa
-#endif
-
-#if !defined(Z7_NO_METHODS_FILTERS) || \
- defined(Z7_USE_NATIVE_BRANCH_FILTER) && defined(MY_CPU_ARMT)
-#define Z7_USE_FILTER_ARMT
-#ifndef Z7_USE_BRANCH_FILTER
-#define Z7_USE_BRANCH_FILTER
-#endif
-#define k_ARMT 0x3030701
-#endif
-
-#ifndef Z7_NO_METHODS_FILTERS
-#define k_Delta 3
-#define k_BCJ 0x3030103
-#define k_PPC 0x3030205
-#define k_IA64 0x3030401
-#define k_ARM 0x3030501
-#define k_SPARC 0x3030805
-#endif
-
-#ifdef Z7_PPMD_SUPPORT
-
-#define k_PPMD 0x30401
-
-typedef struct
-{
- IByteIn vt;
- const Byte *cur;
- const Byte *end;
- const Byte *begin;
- UInt64 processed;
- BoolInt extra;
- SRes res;
- ILookInStreamPtr inStream;
-} CByteInToLook;
-
-static Byte ReadByte(IByteInPtr pp)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CByteInToLook)
- if (p->cur != p->end)
- return *p->cur++;
- if (p->res == SZ_OK)
- {
- size_t size = (size_t)(p->cur - p->begin);
- p->processed += size;
- p->res = ILookInStream_Skip(p->inStream, size);
- size = (1 << 25);
- p->res = ILookInStream_Look(p->inStream, (const void **)&p->begin, &size);
- p->cur = p->begin;
- p->end = p->begin + size;
- if (size != 0)
- return *p->cur++;
- }
- p->extra = True;
- return 0;
-}
-
-static SRes SzDecodePpmd(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStreamPtr inStream,
- Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain)
-{
- CPpmd7 ppmd;
- CByteInToLook s;
- SRes res = SZ_OK;
-
- s.vt.Read = ReadByte;
- s.inStream = inStream;
- s.begin = s.end = s.cur = NULL;
- s.extra = False;
- s.res = SZ_OK;
- s.processed = 0;
-
- if (propsSize != 5)
- return SZ_ERROR_UNSUPPORTED;
-
- {
- unsigned order = props[0];
- UInt32 memSize = GetUi32(props + 1);
- if (order < PPMD7_MIN_ORDER ||
- order > PPMD7_MAX_ORDER ||
- memSize < PPMD7_MIN_MEM_SIZE ||
- memSize > PPMD7_MAX_MEM_SIZE)
- return SZ_ERROR_UNSUPPORTED;
- Ppmd7_Construct(&ppmd);
- if (!Ppmd7_Alloc(&ppmd, memSize, allocMain))
- return SZ_ERROR_MEM;
- Ppmd7_Init(&ppmd, order);
- }
- {
- ppmd.rc.dec.Stream = &s.vt;
- if (!Ppmd7z_RangeDec_Init(&ppmd.rc.dec))
- res = SZ_ERROR_DATA;
- else if (!s.extra)
- {
- Byte *buf = outBuffer;
- const Byte *lim = buf + outSize;
- for (; buf != lim; buf++)
- {
- int sym = Ppmd7z_DecodeSymbol(&ppmd);
- if (s.extra || sym < 0)
- break;
- *buf = (Byte)sym;
- }
- if (buf != lim)
- res = SZ_ERROR_DATA;
- else if (!Ppmd7z_RangeDec_IsFinishedOK(&ppmd.rc.dec))
- {
- /* if (Ppmd7z_DecodeSymbol(&ppmd) != PPMD7_SYM_END || !Ppmd7z_RangeDec_IsFinishedOK(&ppmd.rc.dec)) */
- res = SZ_ERROR_DATA;
- }
- }
- if (s.extra)
- res = (s.res != SZ_OK ? s.res : SZ_ERROR_DATA);
- else if (s.processed + (size_t)(s.cur - s.begin) != inSize)
- res = SZ_ERROR_DATA;
- }
- Ppmd7_Free(&ppmd, allocMain);
- return res;
-}
-
-#endif
-
-
-static SRes SzDecodeLzma(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStreamPtr inStream,
- Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain)
-{
- CLzmaDec state;
- SRes res = SZ_OK;
-
- LzmaDec_CONSTRUCT(&state)
- RINOK(LzmaDec_AllocateProbs(&state, props, propsSize, allocMain))
- state.dic = outBuffer;
- state.dicBufSize = outSize;
- LzmaDec_Init(&state);
-
- for (;;)
- {
- const void *inBuf = NULL;
- size_t lookahead = (1 << 18);
- if (lookahead > inSize)
- lookahead = (size_t)inSize;
- res = ILookInStream_Look(inStream, &inBuf, &lookahead);
- if (res != SZ_OK)
- break;
-
- {
- SizeT inProcessed = (SizeT)lookahead, dicPos = state.dicPos;
- ELzmaStatus status;
- res = LzmaDec_DecodeToDic(&state, outSize, (const Byte *)inBuf, &inProcessed, LZMA_FINISH_END, &status);
- lookahead -= inProcessed;
- inSize -= inProcessed;
- if (res != SZ_OK)
- break;
-
- if (status == LZMA_STATUS_FINISHED_WITH_MARK)
- {
- if (outSize != state.dicPos || inSize != 0)
- res = SZ_ERROR_DATA;
- break;
- }
-
- if (outSize == state.dicPos && inSize == 0 && status == LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK)
- break;
-
- if (inProcessed == 0 && dicPos == state.dicPos)
- {
- res = SZ_ERROR_DATA;
- break;
- }
-
- res = ILookInStream_Skip(inStream, inProcessed);
- if (res != SZ_OK)
- break;
- }
- }
-
- LzmaDec_FreeProbs(&state, allocMain);
- return res;
-}
-
-
-#ifndef Z7_NO_METHOD_LZMA2
-
-static SRes SzDecodeLzma2(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStreamPtr inStream,
- Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain)
-{
- CLzma2Dec state;
- SRes res = SZ_OK;
-
- Lzma2Dec_CONSTRUCT(&state)
- if (propsSize != 1)
- return SZ_ERROR_DATA;
- RINOK(Lzma2Dec_AllocateProbs(&state, props[0], allocMain))
- state.decoder.dic = outBuffer;
- state.decoder.dicBufSize = outSize;
- Lzma2Dec_Init(&state);
-
- for (;;)
- {
- const void *inBuf = NULL;
- size_t lookahead = (1 << 18);
- if (lookahead > inSize)
- lookahead = (size_t)inSize;
- res = ILookInStream_Look(inStream, &inBuf, &lookahead);
- if (res != SZ_OK)
- break;
-
- {
- SizeT inProcessed = (SizeT)lookahead, dicPos = state.decoder.dicPos;
- ELzmaStatus status;
- res = Lzma2Dec_DecodeToDic(&state, outSize, (const Byte *)inBuf, &inProcessed, LZMA_FINISH_END, &status);
- lookahead -= inProcessed;
- inSize -= inProcessed;
- if (res != SZ_OK)
- break;
-
- if (status == LZMA_STATUS_FINISHED_WITH_MARK)
- {
- if (outSize != state.decoder.dicPos || inSize != 0)
- res = SZ_ERROR_DATA;
- break;
- }
-
- if (inProcessed == 0 && dicPos == state.decoder.dicPos)
- {
- res = SZ_ERROR_DATA;
- break;
- }
-
- res = ILookInStream_Skip(inStream, inProcessed);
- if (res != SZ_OK)
- break;
- }
- }
-
- Lzma2Dec_FreeProbs(&state, allocMain);
- return res;
-}
-
-#endif
-
-
-static SRes SzDecodeCopy(UInt64 inSize, ILookInStreamPtr inStream, Byte *outBuffer)
-{
- while (inSize > 0)
- {
- const void *inBuf;
- size_t curSize = (1 << 18);
- if (curSize > inSize)
- curSize = (size_t)inSize;
- RINOK(ILookInStream_Look(inStream, &inBuf, &curSize))
- if (curSize == 0)
- return SZ_ERROR_INPUT_EOF;
- memcpy(outBuffer, inBuf, curSize);
- outBuffer += curSize;
- inSize -= curSize;
- RINOK(ILookInStream_Skip(inStream, curSize))
- }
- return SZ_OK;
-}
-
-static BoolInt IS_MAIN_METHOD(UInt32 m)
-{
- switch (m)
- {
- case k_Copy:
- case k_LZMA:
- #ifndef Z7_NO_METHOD_LZMA2
- case k_LZMA2:
- #endif
- #ifdef Z7_PPMD_SUPPORT
- case k_PPMD:
- #endif
- return True;
- }
- return False;
-}
-
-static BoolInt IS_SUPPORTED_CODER(const CSzCoderInfo *c)
-{
- return
- c->NumStreams == 1
- /* && c->MethodID <= (UInt32)0xFFFFFFFF */
- && IS_MAIN_METHOD((UInt32)c->MethodID);
-}
-
-#define IS_BCJ2(c) ((c)->MethodID == k_BCJ2 && (c)->NumStreams == 4)
-
-static SRes CheckSupportedFolder(const CSzFolder *f)
-{
- if (f->NumCoders < 1 || f->NumCoders > 4)
- return SZ_ERROR_UNSUPPORTED;
- if (!IS_SUPPORTED_CODER(&f->Coders[0]))
- return SZ_ERROR_UNSUPPORTED;
- if (f->NumCoders == 1)
- {
- if (f->NumPackStreams != 1 || f->PackStreams[0] != 0 || f->NumBonds != 0)
- return SZ_ERROR_UNSUPPORTED;
- return SZ_OK;
- }
-
-
- #if defined(Z7_USE_BRANCH_FILTER)
-
- if (f->NumCoders == 2)
- {
- const CSzCoderInfo *c = &f->Coders[1];
- if (
- /* c->MethodID > (UInt32)0xFFFFFFFF || */
- c->NumStreams != 1
- || f->NumPackStreams != 1
- || f->PackStreams[0] != 0
- || f->NumBonds != 1
- || f->Bonds[0].InIndex != 1
- || f->Bonds[0].OutIndex != 0)
- return SZ_ERROR_UNSUPPORTED;
- switch ((UInt32)c->MethodID)
- {
- #if !defined(Z7_NO_METHODS_FILTERS)
- case k_Delta:
- case k_BCJ:
- case k_PPC:
- case k_IA64:
- case k_SPARC:
- case k_ARM:
- #endif
- #ifdef Z7_USE_FILTER_ARM64
- case k_ARM64:
- #endif
- #ifdef Z7_USE_FILTER_ARMT
- case k_ARMT:
- #endif
- break;
- default:
- return SZ_ERROR_UNSUPPORTED;
- }
- return SZ_OK;
- }
-
- #endif
-
-
- if (f->NumCoders == 4)
- {
- if (!IS_SUPPORTED_CODER(&f->Coders[1])
- || !IS_SUPPORTED_CODER(&f->Coders[2])
- || !IS_BCJ2(&f->Coders[3]))
- return SZ_ERROR_UNSUPPORTED;
- if (f->NumPackStreams != 4
- || f->PackStreams[0] != 2
- || f->PackStreams[1] != 6
- || f->PackStreams[2] != 1
- || f->PackStreams[3] != 0
- || f->NumBonds != 3
- || f->Bonds[0].InIndex != 5 || f->Bonds[0].OutIndex != 0
- || f->Bonds[1].InIndex != 4 || f->Bonds[1].OutIndex != 1
- || f->Bonds[2].InIndex != 3 || f->Bonds[2].OutIndex != 2)
- return SZ_ERROR_UNSUPPORTED;
- return SZ_OK;
- }
-
- return SZ_ERROR_UNSUPPORTED;
-}
-
-
-
-
-
-
-static SRes SzFolder_Decode2(const CSzFolder *folder,
- const Byte *propsData,
- const UInt64 *unpackSizes,
- const UInt64 *packPositions,
- ILookInStreamPtr inStream, UInt64 startPos,
- Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain,
- Byte *tempBuf[])
-{
- UInt32 ci;
- SizeT tempSizes[3] = { 0, 0, 0};
- SizeT tempSize3 = 0;
- Byte *tempBuf3 = 0;
-
- RINOK(CheckSupportedFolder(folder))
-
- for (ci = 0; ci < folder->NumCoders; ci++)
- {
- const CSzCoderInfo *coder = &folder->Coders[ci];
-
- if (IS_MAIN_METHOD((UInt32)coder->MethodID))
- {
- UInt32 si = 0;
- UInt64 offset;
- UInt64 inSize;
- Byte *outBufCur = outBuffer;
- SizeT outSizeCur = outSize;
- if (folder->NumCoders == 4)
- {
- const UInt32 indices[] = { 3, 2, 0 };
- const UInt64 unpackSize = unpackSizes[ci];
- si = indices[ci];
- if (ci < 2)
- {
- Byte *temp;
- outSizeCur = (SizeT)unpackSize;
- if (outSizeCur != unpackSize)
- return SZ_ERROR_MEM;
- temp = (Byte *)ISzAlloc_Alloc(allocMain, outSizeCur);
- if (!temp && outSizeCur != 0)
- return SZ_ERROR_MEM;
- outBufCur = tempBuf[1 - ci] = temp;
- tempSizes[1 - ci] = outSizeCur;
- }
- else if (ci == 2)
- {
- if (unpackSize > outSize) /* check it */
- return SZ_ERROR_PARAM;
- tempBuf3 = outBufCur = outBuffer + (outSize - (size_t)unpackSize);
- tempSize3 = outSizeCur = (SizeT)unpackSize;
- }
- else
- return SZ_ERROR_UNSUPPORTED;
- }
- offset = packPositions[si];
- inSize = packPositions[(size_t)si + 1] - offset;
- RINOK(LookInStream_SeekTo(inStream, startPos + offset))
-
- if (coder->MethodID == k_Copy)
- {
- if (inSize != outSizeCur) /* check it */
- return SZ_ERROR_DATA;
- RINOK(SzDecodeCopy(inSize, inStream, outBufCur))
- }
- else if (coder->MethodID == k_LZMA)
- {
- RINOK(SzDecodeLzma(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain))
- }
- #ifndef Z7_NO_METHOD_LZMA2
- else if (coder->MethodID == k_LZMA2)
- {
- RINOK(SzDecodeLzma2(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain))
- }
- #endif
- #ifdef Z7_PPMD_SUPPORT
- else if (coder->MethodID == k_PPMD)
- {
- RINOK(SzDecodePpmd(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain))
- }
- #endif
- else
- return SZ_ERROR_UNSUPPORTED;
- }
- else if (coder->MethodID == k_BCJ2)
- {
- const UInt64 offset = packPositions[1];
- const UInt64 s3Size = packPositions[2] - offset;
-
- if (ci != 3)
- return SZ_ERROR_UNSUPPORTED;
-
- tempSizes[2] = (SizeT)s3Size;
- if (tempSizes[2] != s3Size)
- return SZ_ERROR_MEM;
- tempBuf[2] = (Byte *)ISzAlloc_Alloc(allocMain, tempSizes[2]);
- if (!tempBuf[2] && tempSizes[2] != 0)
- return SZ_ERROR_MEM;
-
- RINOK(LookInStream_SeekTo(inStream, startPos + offset))
- RINOK(SzDecodeCopy(s3Size, inStream, tempBuf[2]))
-
- if ((tempSizes[0] & 3) != 0 ||
- (tempSizes[1] & 3) != 0 ||
- tempSize3 + tempSizes[0] + tempSizes[1] != outSize)
- return SZ_ERROR_DATA;
-
- {
- CBcj2Dec p;
-
- p.bufs[0] = tempBuf3; p.lims[0] = tempBuf3 + tempSize3;
- p.bufs[1] = tempBuf[0]; p.lims[1] = tempBuf[0] + tempSizes[0];
- p.bufs[2] = tempBuf[1]; p.lims[2] = tempBuf[1] + tempSizes[1];
- p.bufs[3] = tempBuf[2]; p.lims[3] = tempBuf[2] + tempSizes[2];
-
- p.dest = outBuffer;
- p.destLim = outBuffer + outSize;
-
- Bcj2Dec_Init(&p);
- RINOK(Bcj2Dec_Decode(&p))
-
- {
- unsigned i;
- for (i = 0; i < 4; i++)
- if (p.bufs[i] != p.lims[i])
- return SZ_ERROR_DATA;
- if (p.dest != p.destLim || !Bcj2Dec_IsMaybeFinished(&p))
- return SZ_ERROR_DATA;
- }
- }
- }
- #if defined(Z7_USE_BRANCH_FILTER)
- else if (ci == 1)
- {
- #if !defined(Z7_NO_METHODS_FILTERS)
- if (coder->MethodID == k_Delta)
- {
- if (coder->PropsSize != 1)
- return SZ_ERROR_UNSUPPORTED;
- {
- Byte state[DELTA_STATE_SIZE];
- Delta_Init(state);
- Delta_Decode(state, (unsigned)(propsData[coder->PropsOffset]) + 1, outBuffer, outSize);
- }
- continue;
- }
- #endif
-
- #ifdef Z7_USE_FILTER_ARM64
- if (coder->MethodID == k_ARM64)
- {
- UInt32 pc = 0;
- if (coder->PropsSize == 4)
- pc = GetUi32(propsData + coder->PropsOffset);
- else if (coder->PropsSize != 0)
- return SZ_ERROR_UNSUPPORTED;
- z7_BranchConv_ARM64_Dec(outBuffer, outSize, pc);
- continue;
- }
- #endif
-
- #if !defined(Z7_NO_METHODS_FILTERS) || defined(Z7_USE_FILTER_ARMT)
- {
- if (coder->PropsSize != 0)
- return SZ_ERROR_UNSUPPORTED;
- #define CASE_BRA_CONV(isa) case k_ ## isa: Z7_BRANCH_CONV_DEC(isa)(outBuffer, outSize, 0); break; // pc = 0;
- switch (coder->MethodID)
- {
- #if !defined(Z7_NO_METHODS_FILTERS)
- case k_BCJ:
- {
- UInt32 state = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
- z7_BranchConvSt_X86_Dec(outBuffer, outSize, 0, &state); // pc = 0
- break;
- }
- CASE_BRA_CONV(PPC)
- CASE_BRA_CONV(IA64)
- CASE_BRA_CONV(SPARC)
- CASE_BRA_CONV(ARM)
- #endif
- #if !defined(Z7_NO_METHODS_FILTERS) || defined(Z7_USE_FILTER_ARMT)
- CASE_BRA_CONV(ARMT)
- #endif
- default:
- return SZ_ERROR_UNSUPPORTED;
- }
- continue;
- }
- #endif
- } // (c == 1)
- #endif
- else
- return SZ_ERROR_UNSUPPORTED;
- }
-
- return SZ_OK;
-}
-
-
-SRes SzAr_DecodeFolder(const CSzAr *p, UInt32 folderIndex,
- ILookInStreamPtr inStream, UInt64 startPos,
- Byte *outBuffer, size_t outSize,
- ISzAllocPtr allocMain)
-{
- SRes res;
- CSzFolder folder;
- CSzData sd;
-
- const Byte *data = p->CodersData + p->FoCodersOffsets[folderIndex];
- sd.Data = data;
- sd.Size = p->FoCodersOffsets[(size_t)folderIndex + 1] - p->FoCodersOffsets[folderIndex];
-
- res = SzGetNextFolderItem(&folder, &sd);
-
- if (res != SZ_OK)
- return res;
-
- if (sd.Size != 0
- || folder.UnpackStream != p->FoToMainUnpackSizeIndex[folderIndex]
- || outSize != SzAr_GetFolderUnpackSize(p, folderIndex))
- return SZ_ERROR_FAIL;
- {
- unsigned i;
- Byte *tempBuf[3] = { 0, 0, 0};
-
- res = SzFolder_Decode2(&folder, data,
- &p->CoderUnpackSizes[p->FoToCoderUnpackSizes[folderIndex]],
- p->PackPositions + p->FoStartPackStreamIndex[folderIndex],
- inStream, startPos,
- outBuffer, (SizeT)outSize, allocMain, tempBuf);
-
- for (i = 0; i < 3; i++)
- ISzAlloc_Free(allocMain, tempBuf[i]);
-
- if (res == SZ_OK)
- if (SzBitWithVals_Check(&p->FolderCRCs, folderIndex))
- if (CrcCalc(outBuffer, outSize) != p->FolderCRCs.Vals[folderIndex])
- res = SZ_ERROR_CRC;
-
- return res;
- }
-}
diff --git a/3rdparty/7z/src/7zFile.c b/3rdparty/7z/src/7zFile.c
deleted file mode 100644
index 96d4edd204..0000000000
--- a/3rdparty/7z/src/7zFile.c
+++ /dev/null
@@ -1,443 +0,0 @@
-/* 7zFile.c -- File IO
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "7zFile.h"
-
-#ifndef USE_WINDOWS_FILE
-
- #include
-
- #ifndef USE_FOPEN
- #include
- #include
- #ifdef _WIN32
- #include
- typedef int ssize_t;
- typedef int off_t;
- #else
- #include
- #endif
- #endif
-
-#else
-
-/*
- ReadFile and WriteFile functions in Windows have BUG:
- If you Read or Write 64MB or more (probably min_failure_size = 64MB - 32KB + 1)
- from/to Network file, it returns ERROR_NO_SYSTEM_RESOURCES
- (Insufficient system resources exist to complete the requested service).
- Probably in some version of Windows there are problems with other sizes:
- for 32 MB (maybe also for 16 MB).
- And message can be "Network connection was lost"
-*/
-
-#endif
-
-#define kChunkSizeMax (1 << 22)
-
-void File_Construct(CSzFile *p)
-{
- #ifdef USE_WINDOWS_FILE
- p->handle = INVALID_HANDLE_VALUE;
- #elif defined(USE_FOPEN)
- p->file = NULL;
- #else
- p->fd = -1;
- #endif
-}
-
-#if !defined(UNDER_CE) || !defined(USE_WINDOWS_FILE)
-
-static WRes File_Open(CSzFile *p, const char *name, int writeMode)
-{
- #ifdef USE_WINDOWS_FILE
-
- p->handle = CreateFileA(name,
- writeMode ? GENERIC_WRITE : GENERIC_READ,
- FILE_SHARE_READ, NULL,
- writeMode ? CREATE_ALWAYS : OPEN_EXISTING,
- FILE_ATTRIBUTE_NORMAL, NULL);
- return (p->handle != INVALID_HANDLE_VALUE) ? 0 : GetLastError();
-
- #elif defined(USE_FOPEN)
-
- p->file = fopen(name, writeMode ? "wb+" : "rb");
- return (p->file != 0) ? 0 :
- #ifdef UNDER_CE
- 2; /* ENOENT */
- #else
- errno;
- #endif
-
- #else
-
- int flags = (writeMode ? (O_CREAT | O_EXCL | O_WRONLY) : O_RDONLY);
- #ifdef O_BINARY
- flags |= O_BINARY;
- #endif
- p->fd = open(name, flags, 0666);
- return (p->fd != -1) ? 0 : errno;
-
- #endif
-}
-
-WRes InFile_Open(CSzFile *p, const char *name) { return File_Open(p, name, 0); }
-
-WRes OutFile_Open(CSzFile *p, const char *name)
-{
- #if defined(USE_WINDOWS_FILE) || defined(USE_FOPEN)
- return File_Open(p, name, 1);
- #else
- p->fd = creat(name, 0666);
- return (p->fd != -1) ? 0 : errno;
- #endif
-}
-
-#endif
-
-
-#ifdef USE_WINDOWS_FILE
-static WRes File_OpenW(CSzFile *p, const WCHAR *name, int writeMode)
-{
- p->handle = CreateFileW(name,
- writeMode ? GENERIC_WRITE : GENERIC_READ,
- FILE_SHARE_READ, NULL,
- writeMode ? CREATE_ALWAYS : OPEN_EXISTING,
- FILE_ATTRIBUTE_NORMAL, NULL);
- return (p->handle != INVALID_HANDLE_VALUE) ? 0 : GetLastError();
-}
-WRes InFile_OpenW(CSzFile *p, const WCHAR *name) { return File_OpenW(p, name, 0); }
-WRes OutFile_OpenW(CSzFile *p, const WCHAR *name) { return File_OpenW(p, name, 1); }
-#endif
-
-WRes File_Close(CSzFile *p)
-{
- #ifdef USE_WINDOWS_FILE
-
- if (p->handle != INVALID_HANDLE_VALUE)
- {
- if (!CloseHandle(p->handle))
- return GetLastError();
- p->handle = INVALID_HANDLE_VALUE;
- }
-
- #elif defined(USE_FOPEN)
-
- if (p->file != NULL)
- {
- int res = fclose(p->file);
- if (res != 0)
- {
- if (res == EOF)
- return errno;
- return res;
- }
- p->file = NULL;
- }
-
- #else
-
- if (p->fd != -1)
- {
- if (close(p->fd) != 0)
- return errno;
- p->fd = -1;
- }
-
- #endif
-
- return 0;
-}
-
-
-WRes File_Read(CSzFile *p, void *data, size_t *size)
-{
- size_t originalSize = *size;
- *size = 0;
- if (originalSize == 0)
- return 0;
-
- #ifdef USE_WINDOWS_FILE
-
- do
- {
- const DWORD curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : (DWORD)originalSize;
- DWORD processed = 0;
- const BOOL res = ReadFile(p->handle, data, curSize, &processed, NULL);
- data = (void *)((Byte *)data + processed);
- originalSize -= processed;
- *size += processed;
- if (!res)
- return GetLastError();
- // debug : we can break here for partial reading mode
- if (processed == 0)
- break;
- }
- while (originalSize > 0);
-
- #elif defined(USE_FOPEN)
-
- do
- {
- const size_t curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : originalSize;
- const size_t processed = fread(data, 1, curSize, p->file);
- data = (void *)((Byte *)data + (size_t)processed);
- originalSize -= processed;
- *size += processed;
- if (processed != curSize)
- return ferror(p->file);
- // debug : we can break here for partial reading mode
- if (processed == 0)
- break;
- }
- while (originalSize > 0);
-
- #else
-
- do
- {
- const size_t curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : originalSize;
- const ssize_t processed = read(p->fd, data, curSize);
- if (processed == -1)
- return errno;
- if (processed == 0)
- break;
- data = (void *)((Byte *)data + (size_t)processed);
- originalSize -= (size_t)processed;
- *size += (size_t)processed;
- // debug : we can break here for partial reading mode
- // break;
- }
- while (originalSize > 0);
-
- #endif
-
- return 0;
-}
-
-
-WRes File_Write(CSzFile *p, const void *data, size_t *size)
-{
- size_t originalSize = *size;
- *size = 0;
- if (originalSize == 0)
- return 0;
-
- #ifdef USE_WINDOWS_FILE
-
- do
- {
- const DWORD curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : (DWORD)originalSize;
- DWORD processed = 0;
- const BOOL res = WriteFile(p->handle, data, curSize, &processed, NULL);
- data = (const void *)((const Byte *)data + processed);
- originalSize -= processed;
- *size += processed;
- if (!res)
- return GetLastError();
- if (processed == 0)
- break;
- }
- while (originalSize > 0);
-
- #elif defined(USE_FOPEN)
-
- do
- {
- const size_t curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : originalSize;
- const size_t processed = fwrite(data, 1, curSize, p->file);
- data = (void *)((Byte *)data + (size_t)processed);
- originalSize -= processed;
- *size += processed;
- if (processed != curSize)
- return ferror(p->file);
- if (processed == 0)
- break;
- }
- while (originalSize > 0);
-
- #else
-
- do
- {
- const size_t curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : originalSize;
- const ssize_t processed = write(p->fd, data, curSize);
- if (processed == -1)
- return errno;
- if (processed == 0)
- break;
- data = (const void *)((const Byte *)data + (size_t)processed);
- originalSize -= (size_t)processed;
- *size += (size_t)processed;
- }
- while (originalSize > 0);
-
- #endif
-
- return 0;
-}
-
-
-WRes File_Seek(CSzFile *p, Int64 *pos, ESzSeek origin)
-{
- #ifdef USE_WINDOWS_FILE
-
- DWORD moveMethod;
- UInt32 low = (UInt32)*pos;
- LONG high = (LONG)((UInt64)*pos >> 16 >> 16); /* for case when UInt64 is 32-bit only */
- // (int) to eliminate clang warning
- switch ((int)origin)
- {
- case SZ_SEEK_SET: moveMethod = FILE_BEGIN; break;
- case SZ_SEEK_CUR: moveMethod = FILE_CURRENT; break;
- case SZ_SEEK_END: moveMethod = FILE_END; break;
- default: return ERROR_INVALID_PARAMETER;
- }
- low = SetFilePointer(p->handle, (LONG)low, &high, moveMethod);
- if (low == (UInt32)0xFFFFFFFF)
- {
- WRes res = GetLastError();
- if (res != NO_ERROR)
- return res;
- }
- *pos = ((Int64)high << 32) | low;
- return 0;
-
- #else
-
- int moveMethod; // = origin;
-
- switch ((int)origin)
- {
- case SZ_SEEK_SET: moveMethod = SEEK_SET; break;
- case SZ_SEEK_CUR: moveMethod = SEEK_CUR; break;
- case SZ_SEEK_END: moveMethod = SEEK_END; break;
- default: return EINVAL;
- }
-
- #if defined(USE_FOPEN)
- {
- int res = fseek(p->file, (long)*pos, moveMethod);
- if (res == -1)
- return errno;
- *pos = ftell(p->file);
- if (*pos == -1)
- return errno;
- return 0;
- }
- #else
- {
- off_t res = lseek(p->fd, (off_t)*pos, moveMethod);
- if (res == -1)
- return errno;
- *pos = res;
- return 0;
- }
-
- #endif // USE_FOPEN
- #endif // USE_WINDOWS_FILE
-}
-
-
-WRes File_GetLength(CSzFile *p, UInt64 *length)
-{
- #ifdef USE_WINDOWS_FILE
-
- DWORD sizeHigh;
- DWORD sizeLow = GetFileSize(p->handle, &sizeHigh);
- if (sizeLow == 0xFFFFFFFF)
- {
- DWORD res = GetLastError();
- if (res != NO_ERROR)
- return res;
- }
- *length = (((UInt64)sizeHigh) << 32) + sizeLow;
- return 0;
-
- #elif defined(USE_FOPEN)
-
- long pos = ftell(p->file);
- int res = fseek(p->file, 0, SEEK_END);
- *length = ftell(p->file);
- fseek(p->file, pos, SEEK_SET);
- return res;
-
- #else
-
- off_t pos;
- *length = 0;
- pos = lseek(p->fd, 0, SEEK_CUR);
- if (pos != -1)
- {
- const off_t len2 = lseek(p->fd, 0, SEEK_END);
- const off_t res2 = lseek(p->fd, pos, SEEK_SET);
- if (len2 != -1)
- {
- *length = (UInt64)len2;
- if (res2 != -1)
- return 0;
- }
- }
- return errno;
-
- #endif
-}
-
-
-/* ---------- FileSeqInStream ---------- */
-
-static SRes FileSeqInStream_Read(ISeqInStreamPtr pp, void *buf, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileSeqInStream)
- const WRes wres = File_Read(&p->file, buf, size);
- p->wres = wres;
- return (wres == 0) ? SZ_OK : SZ_ERROR_READ;
-}
-
-void FileSeqInStream_CreateVTable(CFileSeqInStream *p)
-{
- p->vt.Read = FileSeqInStream_Read;
-}
-
-
-/* ---------- FileInStream ---------- */
-
-static SRes FileInStream_Read(ISeekInStreamPtr pp, void *buf, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileInStream)
- const WRes wres = File_Read(&p->file, buf, size);
- p->wres = wres;
- return (wres == 0) ? SZ_OK : SZ_ERROR_READ;
-}
-
-static SRes FileInStream_Seek(ISeekInStreamPtr pp, Int64 *pos, ESzSeek origin)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileInStream)
- const WRes wres = File_Seek(&p->file, pos, origin);
- p->wres = wres;
- return (wres == 0) ? SZ_OK : SZ_ERROR_READ;
-}
-
-void FileInStream_CreateVTable(CFileInStream *p)
-{
- p->vt.Read = FileInStream_Read;
- p->vt.Seek = FileInStream_Seek;
-}
-
-
-/* ---------- FileOutStream ---------- */
-
-static size_t FileOutStream_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileOutStream)
- const WRes wres = File_Write(&p->file, data, &size);
- p->wres = wres;
- return size;
-}
-
-void FileOutStream_CreateVTable(CFileOutStream *p)
-{
- p->vt.Write = FileOutStream_Write;
-}
diff --git a/3rdparty/7z/src/7zFile.h b/3rdparty/7z/src/7zFile.h
deleted file mode 100644
index d7c871a734..0000000000
--- a/3rdparty/7z/src/7zFile.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* 7zFile.h -- File IO
-2023-03-05 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_FILE_H
-#define ZIP7_INC_FILE_H
-
-#ifdef _WIN32
-#define USE_WINDOWS_FILE
-// #include
-#endif
-
-#ifdef USE_WINDOWS_FILE
-#include "7zWindows.h"
-
-#else
-// note: USE_FOPEN mode is limited to 32-bit file size
-// #define USE_FOPEN
-// #include
-#endif
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-/* ---------- File ---------- */
-
-typedef struct
-{
- #ifdef USE_WINDOWS_FILE
- HANDLE handle;
- #elif defined(USE_FOPEN)
- FILE *file;
- #else
- int fd;
- #endif
-} CSzFile;
-
-void File_Construct(CSzFile *p);
-#if !defined(UNDER_CE) || !defined(USE_WINDOWS_FILE)
-WRes InFile_Open(CSzFile *p, const char *name);
-WRes OutFile_Open(CSzFile *p, const char *name);
-#endif
-#ifdef USE_WINDOWS_FILE
-WRes InFile_OpenW(CSzFile *p, const WCHAR *name);
-WRes OutFile_OpenW(CSzFile *p, const WCHAR *name);
-#endif
-WRes File_Close(CSzFile *p);
-
-/* reads max(*size, remain file's size) bytes */
-WRes File_Read(CSzFile *p, void *data, size_t *size);
-
-/* writes *size bytes */
-WRes File_Write(CSzFile *p, const void *data, size_t *size);
-
-WRes File_Seek(CSzFile *p, Int64 *pos, ESzSeek origin);
-WRes File_GetLength(CSzFile *p, UInt64 *length);
-
-
-/* ---------- FileInStream ---------- */
-
-typedef struct
-{
- ISeqInStream vt;
- CSzFile file;
- WRes wres;
-} CFileSeqInStream;
-
-void FileSeqInStream_CreateVTable(CFileSeqInStream *p);
-
-
-typedef struct
-{
- ISeekInStream vt;
- CSzFile file;
- WRes wres;
-} CFileInStream;
-
-void FileInStream_CreateVTable(CFileInStream *p);
-
-
-typedef struct
-{
- ISeqOutStream vt;
- CSzFile file;
- WRes wres;
-} CFileOutStream;
-
-void FileOutStream_CreateVTable(CFileOutStream *p);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/7zStream.c b/3rdparty/7z/src/7zStream.c
deleted file mode 100644
index 92f821e2cd..0000000000
--- a/3rdparty/7z/src/7zStream.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/* 7zStream.c -- 7z Stream functions
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "7zTypes.h"
-
-
-SRes SeqInStream_ReadMax(ISeqInStreamPtr stream, void *buf, size_t *processedSize)
-{
- size_t size = *processedSize;
- *processedSize = 0;
- while (size != 0)
- {
- size_t cur = size;
- const SRes res = ISeqInStream_Read(stream, buf, &cur);
- *processedSize += cur;
- buf = (void *)((Byte *)buf + cur);
- size -= cur;
- if (res != SZ_OK)
- return res;
- if (cur == 0)
- return SZ_OK;
- }
- return SZ_OK;
-}
-
-/*
-SRes SeqInStream_Read2(ISeqInStreamPtr stream, void *buf, size_t size, SRes errorType)
-{
- while (size != 0)
- {
- size_t processed = size;
- RINOK(ISeqInStream_Read(stream, buf, &processed))
- if (processed == 0)
- return errorType;
- buf = (void *)((Byte *)buf + processed);
- size -= processed;
- }
- return SZ_OK;
-}
-
-SRes SeqInStream_Read(ISeqInStreamPtr stream, void *buf, size_t size)
-{
- return SeqInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
-}
-*/
-
-
-SRes SeqInStream_ReadByte(ISeqInStreamPtr stream, Byte *buf)
-{
- size_t processed = 1;
- RINOK(ISeqInStream_Read(stream, buf, &processed))
- return (processed == 1) ? SZ_OK : SZ_ERROR_INPUT_EOF;
-}
-
-
-
-SRes LookInStream_SeekTo(ILookInStreamPtr stream, UInt64 offset)
-{
- Int64 t = (Int64)offset;
- return ILookInStream_Seek(stream, &t, SZ_SEEK_SET);
-}
-
-SRes LookInStream_LookRead(ILookInStreamPtr stream, void *buf, size_t *size)
-{
- const void *lookBuf;
- if (*size == 0)
- return SZ_OK;
- RINOK(ILookInStream_Look(stream, &lookBuf, size))
- memcpy(buf, lookBuf, *size);
- return ILookInStream_Skip(stream, *size);
-}
-
-SRes LookInStream_Read2(ILookInStreamPtr stream, void *buf, size_t size, SRes errorType)
-{
- while (size != 0)
- {
- size_t processed = size;
- RINOK(ILookInStream_Read(stream, buf, &processed))
- if (processed == 0)
- return errorType;
- buf = (void *)((Byte *)buf + processed);
- size -= processed;
- }
- return SZ_OK;
-}
-
-SRes LookInStream_Read(ILookInStreamPtr stream, void *buf, size_t size)
-{
- return LookInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
-}
-
-
-
-#define GET_LookToRead2 Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLookToRead2)
-
-static SRes LookToRead2_Look_Lookahead(ILookInStreamPtr pp, const void **buf, size_t *size)
-{
- SRes res = SZ_OK;
- GET_LookToRead2
- size_t size2 = p->size - p->pos;
- if (size2 == 0 && *size != 0)
- {
- p->pos = 0;
- p->size = 0;
- size2 = p->bufSize;
- res = ISeekInStream_Read(p->realStream, p->buf, &size2);
- p->size = size2;
- }
- if (*size > size2)
- *size = size2;
- *buf = p->buf + p->pos;
- return res;
-}
-
-static SRes LookToRead2_Look_Exact(ILookInStreamPtr pp, const void **buf, size_t *size)
-{
- SRes res = SZ_OK;
- GET_LookToRead2
- size_t size2 = p->size - p->pos;
- if (size2 == 0 && *size != 0)
- {
- p->pos = 0;
- p->size = 0;
- if (*size > p->bufSize)
- *size = p->bufSize;
- res = ISeekInStream_Read(p->realStream, p->buf, size);
- size2 = p->size = *size;
- }
- if (*size > size2)
- *size = size2;
- *buf = p->buf + p->pos;
- return res;
-}
-
-static SRes LookToRead2_Skip(ILookInStreamPtr pp, size_t offset)
-{
- GET_LookToRead2
- p->pos += offset;
- return SZ_OK;
-}
-
-static SRes LookToRead2_Read(ILookInStreamPtr pp, void *buf, size_t *size)
-{
- GET_LookToRead2
- size_t rem = p->size - p->pos;
- if (rem == 0)
- return ISeekInStream_Read(p->realStream, buf, size);
- if (rem > *size)
- rem = *size;
- memcpy(buf, p->buf + p->pos, rem);
- p->pos += rem;
- *size = rem;
- return SZ_OK;
-}
-
-static SRes LookToRead2_Seek(ILookInStreamPtr pp, Int64 *pos, ESzSeek origin)
-{
- GET_LookToRead2
- p->pos = p->size = 0;
- return ISeekInStream_Seek(p->realStream, pos, origin);
-}
-
-void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead)
-{
- p->vt.Look = lookahead ?
- LookToRead2_Look_Lookahead :
- LookToRead2_Look_Exact;
- p->vt.Skip = LookToRead2_Skip;
- p->vt.Read = LookToRead2_Read;
- p->vt.Seek = LookToRead2_Seek;
-}
-
-
-
-static SRes SecToLook_Read(ISeqInStreamPtr pp, void *buf, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSecToLook)
- return LookInStream_LookRead(p->realStream, buf, size);
-}
-
-void SecToLook_CreateVTable(CSecToLook *p)
-{
- p->vt.Read = SecToLook_Read;
-}
-
-static SRes SecToRead_Read(ISeqInStreamPtr pp, void *buf, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSecToRead)
- return ILookInStream_Read(p->realStream, buf, size);
-}
-
-void SecToRead_CreateVTable(CSecToRead *p)
-{
- p->vt.Read = SecToRead_Read;
-}
diff --git a/3rdparty/7z/src/7zTypes.h b/3rdparty/7z/src/7zTypes.h
deleted file mode 100644
index 0eb9513cae..0000000000
--- a/3rdparty/7z/src/7zTypes.h
+++ /dev/null
@@ -1,597 +0,0 @@
-/* 7zTypes.h -- Basic types
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_7Z_TYPES_H
-#define ZIP7_7Z_TYPES_H
-
-#ifdef _WIN32
-/* #include */
-#else
-#include
-#endif
-
-#include
-
-#ifndef EXTERN_C_BEGIN
-#ifdef __cplusplus
-#define EXTERN_C_BEGIN extern "C" {
-#define EXTERN_C_END }
-#else
-#define EXTERN_C_BEGIN
-#define EXTERN_C_END
-#endif
-#endif
-
-EXTERN_C_BEGIN
-
-#define SZ_OK 0
-
-#define SZ_ERROR_DATA 1
-#define SZ_ERROR_MEM 2
-#define SZ_ERROR_CRC 3
-#define SZ_ERROR_UNSUPPORTED 4
-#define SZ_ERROR_PARAM 5
-#define SZ_ERROR_INPUT_EOF 6
-#define SZ_ERROR_OUTPUT_EOF 7
-#define SZ_ERROR_READ 8
-#define SZ_ERROR_WRITE 9
-#define SZ_ERROR_PROGRESS 10
-#define SZ_ERROR_FAIL 11
-#define SZ_ERROR_THREAD 12
-
-#define SZ_ERROR_ARCHIVE 16
-#define SZ_ERROR_NO_ARCHIVE 17
-
-typedef int SRes;
-
-
-#ifdef _MSC_VER
- #if _MSC_VER > 1200
- #define MY_ALIGN(n) __declspec(align(n))
- #else
- #define MY_ALIGN(n)
- #endif
-#else
- /*
- // C11/C++11:
- #include
- #define MY_ALIGN(n) alignas(n)
- */
- #define MY_ALIGN(n) __attribute__ ((aligned(n)))
-#endif
-
-
-#ifdef _WIN32
-
-/* typedef DWORD WRes; */
-typedef unsigned WRes;
-#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x)
-
-// #define MY_HRES_ERROR_INTERNAL_ERROR MY_SRes_HRESULT_FROM_WRes(ERROR_INTERNAL_ERROR)
-
-#else // _WIN32
-
-// #define ENV_HAVE_LSTAT
-typedef int WRes;
-
-// (FACILITY_ERRNO = 0x800) is 7zip's FACILITY constant to represent (errno) errors in HRESULT
-#define MY_FACILITY_ERRNO 0x800
-#define MY_FACILITY_WIN32 7
-#define MY_FACILITY_WRes MY_FACILITY_ERRNO
-
-#define MY_HRESULT_FROM_errno_CONST_ERROR(x) ((HRESULT)( \
- ( (HRESULT)(x) & 0x0000FFFF) \
- | (MY_FACILITY_WRes << 16) \
- | (HRESULT)0x80000000 ))
-
-#define MY_SRes_HRESULT_FROM_WRes(x) \
- ((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : MY_HRESULT_FROM_errno_CONST_ERROR(x))
-
-// we call macro HRESULT_FROM_WIN32 for system errors (WRes) that are (errno)
-#define HRESULT_FROM_WIN32(x) MY_SRes_HRESULT_FROM_WRes(x)
-
-/*
-#define ERROR_FILE_NOT_FOUND 2L
-#define ERROR_ACCESS_DENIED 5L
-#define ERROR_NO_MORE_FILES 18L
-#define ERROR_LOCK_VIOLATION 33L
-#define ERROR_FILE_EXISTS 80L
-#define ERROR_DISK_FULL 112L
-#define ERROR_NEGATIVE_SEEK 131L
-#define ERROR_ALREADY_EXISTS 183L
-#define ERROR_DIRECTORY 267L
-#define ERROR_TOO_MANY_POSTS 298L
-
-#define ERROR_INTERNAL_ERROR 1359L
-#define ERROR_INVALID_REPARSE_DATA 4392L
-#define ERROR_REPARSE_TAG_INVALID 4393L
-#define ERROR_REPARSE_TAG_MISMATCH 4394L
-*/
-
-// we use errno equivalents for some WIN32 errors:
-
-#define ERROR_INVALID_PARAMETER EINVAL
-#define ERROR_INVALID_FUNCTION EINVAL
-#define ERROR_ALREADY_EXISTS EEXIST
-#define ERROR_FILE_EXISTS EEXIST
-#define ERROR_PATH_NOT_FOUND ENOENT
-#define ERROR_FILE_NOT_FOUND ENOENT
-#define ERROR_DISK_FULL ENOSPC
-// #define ERROR_INVALID_HANDLE EBADF
-
-// we use FACILITY_WIN32 for errors that has no errno equivalent
-// Too many posts were made to a semaphore.
-#define ERROR_TOO_MANY_POSTS ((HRESULT)0x8007012AL)
-#define ERROR_INVALID_REPARSE_DATA ((HRESULT)0x80071128L)
-#define ERROR_REPARSE_TAG_INVALID ((HRESULT)0x80071129L)
-
-// if (MY_FACILITY_WRes != FACILITY_WIN32),
-// we use FACILITY_WIN32 for COM errors:
-#define E_OUTOFMEMORY ((HRESULT)0x8007000EL)
-#define E_INVALIDARG ((HRESULT)0x80070057L)
-#define MY_E_ERROR_NEGATIVE_SEEK ((HRESULT)0x80070083L)
-
-/*
-// we can use FACILITY_ERRNO for some COM errors, that have errno equivalents:
-#define E_OUTOFMEMORY MY_HRESULT_FROM_errno_CONST_ERROR(ENOMEM)
-#define E_INVALIDARG MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
-#define MY_E_ERROR_NEGATIVE_SEEK MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
-*/
-
-#define TEXT(quote) quote
-
-#define FILE_ATTRIBUTE_READONLY 0x0001
-#define FILE_ATTRIBUTE_HIDDEN 0x0002
-#define FILE_ATTRIBUTE_SYSTEM 0x0004
-#define FILE_ATTRIBUTE_DIRECTORY 0x0010
-#define FILE_ATTRIBUTE_ARCHIVE 0x0020
-#define FILE_ATTRIBUTE_DEVICE 0x0040
-#define FILE_ATTRIBUTE_NORMAL 0x0080
-#define FILE_ATTRIBUTE_TEMPORARY 0x0100
-#define FILE_ATTRIBUTE_SPARSE_FILE 0x0200
-#define FILE_ATTRIBUTE_REPARSE_POINT 0x0400
-#define FILE_ATTRIBUTE_COMPRESSED 0x0800
-#define FILE_ATTRIBUTE_OFFLINE 0x1000
-#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x2000
-#define FILE_ATTRIBUTE_ENCRYPTED 0x4000
-
-#define FILE_ATTRIBUTE_UNIX_EXTENSION 0x8000 /* trick for Unix */
-
-#endif
-
-
-#ifndef RINOK
-#define RINOK(x) { const int _result_ = (x); if (_result_ != 0) return _result_; }
-#endif
-
-#ifndef RINOK_WRes
-#define RINOK_WRes(x) { const WRes _result_ = (x); if (_result_ != 0) return _result_; }
-#endif
-
-typedef unsigned char Byte;
-typedef short Int16;
-typedef unsigned short UInt16;
-
-#ifdef Z7_DECL_Int32_AS_long
-typedef long Int32;
-typedef unsigned long UInt32;
-#else
-typedef int Int32;
-typedef unsigned int UInt32;
-#endif
-
-
-#ifndef _WIN32
-
-typedef int INT;
-typedef Int32 INT32;
-typedef unsigned int UINT;
-typedef UInt32 UINT32;
-typedef INT32 LONG; // LONG, ULONG and DWORD must be 32-bit for _WIN32 compatibility
-typedef UINT32 ULONG;
-
-#undef DWORD
-typedef UINT32 DWORD;
-
-#define VOID void
-
-#define HRESULT LONG
-
-typedef void *LPVOID;
-// typedef void VOID;
-// typedef ULONG_PTR DWORD_PTR, *PDWORD_PTR;
-// gcc / clang on Unix : sizeof(long==sizeof(void*) in 32 or 64 bits)
-typedef long INT_PTR;
-typedef unsigned long UINT_PTR;
-typedef long LONG_PTR;
-typedef unsigned long DWORD_PTR;
-
-typedef size_t SIZE_T;
-
-#endif // _WIN32
-
-
-#define MY_HRES_ERROR_INTERNAL_ERROR ((HRESULT)0x8007054FL)
-
-
-#ifdef Z7_DECL_Int64_AS_long
-
-typedef long Int64;
-typedef unsigned long UInt64;
-
-#else
-
-#if (defined(_MSC_VER) || defined(__BORLANDC__)) && !defined(__clang__)
-typedef __int64 Int64;
-typedef unsigned __int64 UInt64;
-#else
-#if defined(__clang__) || defined(__GNUC__)
-#include
-typedef int64_t Int64;
-typedef uint64_t UInt64;
-#else
-typedef long long int Int64;
-typedef unsigned long long int UInt64;
-// #define UINT64_CONST(n) n ## ULL
-#endif
-#endif
-
-#endif
-
-#define UINT64_CONST(n) n
-
-
-#ifdef Z7_DECL_SizeT_AS_unsigned_int
-typedef unsigned int SizeT;
-#else
-typedef size_t SizeT;
-#endif
-
-/*
-#if (defined(_MSC_VER) && _MSC_VER <= 1200)
-typedef size_t MY_uintptr_t;
-#else
-#include
-typedef uintptr_t MY_uintptr_t;
-#endif
-*/
-
-typedef int BoolInt;
-/* typedef BoolInt Bool; */
-#define True 1
-#define False 0
-
-
-#ifdef _WIN32
-#define Z7_STDCALL __stdcall
-#else
-#define Z7_STDCALL
-#endif
-
-#ifdef _MSC_VER
-
-#if _MSC_VER >= 1300
-#define Z7_NO_INLINE __declspec(noinline)
-#else
-#define Z7_NO_INLINE
-#endif
-
-#define Z7_FORCE_INLINE __forceinline
-
-#define Z7_CDECL __cdecl
-#define Z7_FASTCALL __fastcall
-
-#else // _MSC_VER
-
-#if (defined(__GNUC__) && (__GNUC__ >= 4)) \
- || (defined(__clang__) && (__clang_major__ >= 4)) \
- || defined(__INTEL_COMPILER) \
- || defined(__xlC__)
-#define Z7_NO_INLINE __attribute__((noinline))
-#define Z7_FORCE_INLINE __attribute__((always_inline)) inline
-#else
-#define Z7_NO_INLINE
-#define Z7_FORCE_INLINE
-#endif
-
-#define Z7_CDECL
-
-#if defined(_M_IX86) \
- || defined(__i386__)
-// #define Z7_FASTCALL __attribute__((fastcall))
-// #define Z7_FASTCALL __attribute__((cdecl))
-#define Z7_FASTCALL
-#elif defined(MY_CPU_AMD64)
-// #define Z7_FASTCALL __attribute__((ms_abi))
-#define Z7_FASTCALL
-#else
-#define Z7_FASTCALL
-#endif
-
-#endif // _MSC_VER
-
-
-/* The following interfaces use first parameter as pointer to structure */
-
-// #define Z7_C_IFACE_CONST_QUAL
-#define Z7_C_IFACE_CONST_QUAL const
-
-#define Z7_C_IFACE_DECL(a) \
- struct a ## _; \
- typedef Z7_C_IFACE_CONST_QUAL struct a ## _ * a ## Ptr; \
- typedef struct a ## _ a; \
- struct a ## _
-
-
-Z7_C_IFACE_DECL (IByteIn)
-{
- Byte (*Read)(IByteInPtr p); /* reads one byte, returns 0 in case of EOF or error */
-};
-#define IByteIn_Read(p) (p)->Read(p)
-
-
-Z7_C_IFACE_DECL (IByteOut)
-{
- void (*Write)(IByteOutPtr p, Byte b);
-};
-#define IByteOut_Write(p, b) (p)->Write(p, b)
-
-
-Z7_C_IFACE_DECL (ISeqInStream)
-{
- SRes (*Read)(ISeqInStreamPtr p, void *buf, size_t *size);
- /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
- (output(*size) < input(*size)) is allowed */
-};
-#define ISeqInStream_Read(p, buf, size) (p)->Read(p, buf, size)
-
-/* try to read as much as avail in stream and limited by (*processedSize) */
-SRes SeqInStream_ReadMax(ISeqInStreamPtr stream, void *buf, size_t *processedSize);
-/* it can return SZ_ERROR_INPUT_EOF */
-// SRes SeqInStream_Read(ISeqInStreamPtr stream, void *buf, size_t size);
-// SRes SeqInStream_Read2(ISeqInStreamPtr stream, void *buf, size_t size, SRes errorType);
-SRes SeqInStream_ReadByte(ISeqInStreamPtr stream, Byte *buf);
-
-
-Z7_C_IFACE_DECL (ISeqOutStream)
-{
- size_t (*Write)(ISeqOutStreamPtr p, const void *buf, size_t size);
- /* Returns: result - the number of actually written bytes.
- (result < size) means error */
-};
-#define ISeqOutStream_Write(p, buf, size) (p)->Write(p, buf, size)
-
-typedef enum
-{
- SZ_SEEK_SET = 0,
- SZ_SEEK_CUR = 1,
- SZ_SEEK_END = 2
-} ESzSeek;
-
-
-Z7_C_IFACE_DECL (ISeekInStream)
-{
- SRes (*Read)(ISeekInStreamPtr p, void *buf, size_t *size); /* same as ISeqInStream::Read */
- SRes (*Seek)(ISeekInStreamPtr p, Int64 *pos, ESzSeek origin);
-};
-#define ISeekInStream_Read(p, buf, size) (p)->Read(p, buf, size)
-#define ISeekInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
-
-
-Z7_C_IFACE_DECL (ILookInStream)
-{
- SRes (*Look)(ILookInStreamPtr p, const void **buf, size_t *size);
- /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
- (output(*size) > input(*size)) is not allowed
- (output(*size) < input(*size)) is allowed */
- SRes (*Skip)(ILookInStreamPtr p, size_t offset);
- /* offset must be <= output(*size) of Look */
- SRes (*Read)(ILookInStreamPtr p, void *buf, size_t *size);
- /* reads directly (without buffer). It's same as ISeqInStream::Read */
- SRes (*Seek)(ILookInStreamPtr p, Int64 *pos, ESzSeek origin);
-};
-
-#define ILookInStream_Look(p, buf, size) (p)->Look(p, buf, size)
-#define ILookInStream_Skip(p, offset) (p)->Skip(p, offset)
-#define ILookInStream_Read(p, buf, size) (p)->Read(p, buf, size)
-#define ILookInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
-
-
-SRes LookInStream_LookRead(ILookInStreamPtr stream, void *buf, size_t *size);
-SRes LookInStream_SeekTo(ILookInStreamPtr stream, UInt64 offset);
-
-/* reads via ILookInStream::Read */
-SRes LookInStream_Read2(ILookInStreamPtr stream, void *buf, size_t size, SRes errorType);
-SRes LookInStream_Read(ILookInStreamPtr stream, void *buf, size_t size);
-
-
-typedef struct
-{
- ILookInStream vt;
- ISeekInStreamPtr realStream;
-
- size_t pos;
- size_t size; /* it's data size */
-
- /* the following variables must be set outside */
- Byte *buf;
- size_t bufSize;
-} CLookToRead2;
-
-void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead);
-
-#define LookToRead2_INIT(p) { (p)->pos = (p)->size = 0; }
-
-
-typedef struct
-{
- ISeqInStream vt;
- ILookInStreamPtr realStream;
-} CSecToLook;
-
-void SecToLook_CreateVTable(CSecToLook *p);
-
-
-
-typedef struct
-{
- ISeqInStream vt;
- ILookInStreamPtr realStream;
-} CSecToRead;
-
-void SecToRead_CreateVTable(CSecToRead *p);
-
-
-Z7_C_IFACE_DECL (ICompressProgress)
-{
- SRes (*Progress)(ICompressProgressPtr p, UInt64 inSize, UInt64 outSize);
- /* Returns: result. (result != SZ_OK) means break.
- Value (UInt64)(Int64)-1 for size means unknown value. */
-};
-
-#define ICompressProgress_Progress(p, inSize, outSize) (p)->Progress(p, inSize, outSize)
-
-
-
-typedef struct ISzAlloc ISzAlloc;
-typedef const ISzAlloc * ISzAllocPtr;
-
-struct ISzAlloc
-{
- void *(*Alloc)(ISzAllocPtr p, size_t size);
- void (*Free)(ISzAllocPtr p, void *address); /* address can be 0 */
-};
-
-#define ISzAlloc_Alloc(p, size) (p)->Alloc(p, size)
-#define ISzAlloc_Free(p, a) (p)->Free(p, a)
-
-/* deprecated */
-#define IAlloc_Alloc(p, size) ISzAlloc_Alloc(p, size)
-#define IAlloc_Free(p, a) ISzAlloc_Free(p, a)
-
-
-
-
-
-#ifndef MY_offsetof
- #ifdef offsetof
- #define MY_offsetof(type, m) offsetof(type, m)
- /*
- #define MY_offsetof(type, m) FIELD_OFFSET(type, m)
- */
- #else
- #define MY_offsetof(type, m) ((size_t)&(((type *)0)->m))
- #endif
-#endif
-
-
-
-#ifndef Z7_container_of
-
-/*
-#define Z7_container_of(ptr, type, m) container_of(ptr, type, m)
-#define Z7_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m)
-#define Z7_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m)))
-#define Z7_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m))))
-*/
-
-/*
- GCC shows warning: "perhaps the 'offsetof' macro was used incorrectly"
- GCC 3.4.4 : classes with constructor
- GCC 4.8.1 : classes with non-public variable members"
-*/
-
-#define Z7_container_of(ptr, type, m) \
- ((type *)(void *)((char *)(void *) \
- (1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
-
-#define Z7_container_of_CONST(ptr, type, m) \
- ((const type *)(const void *)((const char *)(const void *) \
- (1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
-
-/*
-#define Z7_container_of_NON_CONST_FROM_CONST(ptr, type, m) \
- ((type *)(void *)(const void *)((const char *)(const void *) \
- (1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
-*/
-
-#endif
-
-#define Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(void *)(ptr))
-
-// #define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
-#define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_container_of(ptr, type, m)
-// #define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_container_of_NON_CONST_FROM_CONST(ptr, type, m)
-
-#define Z7_CONTAINER_FROM_VTBL_CONST(ptr, type, m) Z7_container_of_CONST(ptr, type, m)
-
-#define Z7_CONTAINER_FROM_VTBL_CLS(ptr, type, m) Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
-/*
-#define Z7_CONTAINER_FROM_VTBL_CLS(ptr, type, m) Z7_CONTAINER_FROM_VTBL(ptr, type, m)
-*/
-#if defined (__clang__) || defined(__GNUC__)
-#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#define Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL \
- _Pragma("GCC diagnostic pop")
-#else
-#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL
-#define Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL
-#endif
-
-#define Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR(ptr, type, m, p) \
- Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL \
- type *p = Z7_CONTAINER_FROM_VTBL(ptr, type, m); \
- Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL
-
-#define Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(type) \
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR(pp, type, vt, p)
-
-
-// #define ZIP7_DECLARE_HANDLE(name) typedef void *name;
-#define Z7_DECLARE_HANDLE(name) struct name##_dummy{int unused;}; typedef struct name##_dummy *name;
-
-
-#define Z7_memset_0_ARRAY(a) memset((a), 0, sizeof(a))
-
-#ifndef Z7_ARRAY_SIZE
-#define Z7_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-#endif
-
-
-#ifdef _WIN32
-
-#define CHAR_PATH_SEPARATOR '\\'
-#define WCHAR_PATH_SEPARATOR L'\\'
-#define STRING_PATH_SEPARATOR "\\"
-#define WSTRING_PATH_SEPARATOR L"\\"
-
-#else
-
-#define CHAR_PATH_SEPARATOR '/'
-#define WCHAR_PATH_SEPARATOR L'/'
-#define STRING_PATH_SEPARATOR "/"
-#define WSTRING_PATH_SEPARATOR L"/"
-
-#endif
-
-#define k_PropVar_TimePrec_0 0
-#define k_PropVar_TimePrec_Unix 1
-#define k_PropVar_TimePrec_DOS 2
-#define k_PropVar_TimePrec_HighPrec 3
-#define k_PropVar_TimePrec_Base 16
-#define k_PropVar_TimePrec_100ns (k_PropVar_TimePrec_Base + 7)
-#define k_PropVar_TimePrec_1ns (k_PropVar_TimePrec_Base + 9)
-
-EXTERN_C_END
-
-#endif
-
-/*
-#ifndef Z7_ST
-#ifdef _7ZIP_ST
-#define Z7_ST
-#endif
-#endif
-*/
diff --git a/3rdparty/7z/src/7zVersion.h b/3rdparty/7z/src/7zVersion.h
deleted file mode 100644
index bf21a25f5b..0000000000
--- a/3rdparty/7z/src/7zVersion.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#define MY_VER_MAJOR 23
-#define MY_VER_MINOR 01
-#define MY_VER_BUILD 0
-#define MY_VERSION_NUMBERS "23.01"
-#define MY_VERSION MY_VERSION_NUMBERS
-
-#ifdef MY_CPU_NAME
- #define MY_VERSION_CPU MY_VERSION " (" MY_CPU_NAME ")"
-#else
- #define MY_VERSION_CPU MY_VERSION
-#endif
-
-#define MY_DATE "2023-06-20"
-#undef MY_COPYRIGHT
-#undef MY_VERSION_COPYRIGHT_DATE
-#define MY_AUTHOR_NAME "Igor Pavlov"
-#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain"
-#define MY_COPYRIGHT_CR "Copyright (c) 1999-2023 Igor Pavlov"
-
-#ifdef USE_COPYRIGHT_CR
- #define MY_COPYRIGHT MY_COPYRIGHT_CR
-#else
- #define MY_COPYRIGHT MY_COPYRIGHT_PD
-#endif
-
-#define MY_COPYRIGHT_DATE MY_COPYRIGHT " : " MY_DATE
-#define MY_VERSION_COPYRIGHT_DATE MY_VERSION_CPU " : " MY_COPYRIGHT " : " MY_DATE
diff --git a/3rdparty/7z/src/7zVersion.rc b/3rdparty/7z/src/7zVersion.rc
deleted file mode 100644
index e520995ddc..0000000000
--- a/3rdparty/7z/src/7zVersion.rc
+++ /dev/null
@@ -1,55 +0,0 @@
-#define MY_VS_FFI_FILEFLAGSMASK 0x0000003FL
-#define MY_VOS_NT_WINDOWS32 0x00040004L
-#define MY_VOS_CE_WINDOWS32 0x00050004L
-
-#define MY_VFT_APP 0x00000001L
-#define MY_VFT_DLL 0x00000002L
-
-// #include
-
-#ifndef MY_VERSION
-#include "7zVersion.h"
-#endif
-
-#define MY_VER MY_VER_MAJOR,MY_VER_MINOR,MY_VER_BUILD,0
-
-#ifdef DEBUG
-#define DBG_FL VS_FF_DEBUG
-#else
-#define DBG_FL 0
-#endif
-
-#define MY_VERSION_INFO(fileType, descr, intName, origName) \
-LANGUAGE 9, 1 \
-1 VERSIONINFO \
- FILEVERSION MY_VER \
- PRODUCTVERSION MY_VER \
- FILEFLAGSMASK MY_VS_FFI_FILEFLAGSMASK \
- FILEFLAGS DBG_FL \
- FILEOS MY_VOS_NT_WINDOWS32 \
- FILETYPE fileType \
- FILESUBTYPE 0x0L \
-BEGIN \
- BLOCK "StringFileInfo" \
- BEGIN \
- BLOCK "040904b0" \
- BEGIN \
- VALUE "CompanyName", "Igor Pavlov" \
- VALUE "FileDescription", descr \
- VALUE "FileVersion", MY_VERSION \
- VALUE "InternalName", intName \
- VALUE "LegalCopyright", MY_COPYRIGHT \
- VALUE "OriginalFilename", origName \
- VALUE "ProductName", "7-Zip" \
- VALUE "ProductVersion", MY_VERSION \
- END \
- END \
- BLOCK "VarFileInfo" \
- BEGIN \
- VALUE "Translation", 0x409, 1200 \
- END \
-END
-
-#define MY_VERSION_INFO_APP(descr, intName) MY_VERSION_INFO(MY_VFT_APP, descr, intName, intName ".exe")
-
-#define MY_VERSION_INFO_DLL(descr, intName) MY_VERSION_INFO(MY_VFT_DLL, descr, intName, intName ".dll")
diff --git a/3rdparty/7z/src/7zWindows.h b/3rdparty/7z/src/7zWindows.h
deleted file mode 100644
index 42c6db8bfc..0000000000
--- a/3rdparty/7z/src/7zWindows.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* 7zWindows.h -- StdAfx
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_7Z_WINDOWS_H
-#define ZIP7_INC_7Z_WINDOWS_H
-
-#ifdef _WIN32
-
-#if defined(__clang__)
-# pragma clang diagnostic push
-#endif
-
-#if defined(_MSC_VER)
-
-#pragma warning(push)
-#pragma warning(disable : 4668) // '_WIN32_WINNT' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
-
-#if _MSC_VER == 1900
-// for old kit10 versions
-// #pragma warning(disable : 4255) // winuser.h(13979): warning C4255: 'GetThreadDpiAwarenessContext':
-#endif
-// win10 Windows Kit:
-#endif // _MSC_VER
-
-#if defined(_MSC_VER) && _MSC_VER <= 1200 && !defined(_WIN64)
-// for msvc6 without sdk2003
-#define RPC_NO_WINDOWS_H
-#endif
-
-#if defined(__MINGW32__) || defined(__MINGW64__)
-// #if defined(__GNUC__) && !defined(__clang__)
-#include
-#else
-#include
-#endif
-// #include
-// #include
-
-// but if precompiled with clang-cl then we need
-// #include
-#if defined(_MSC_VER)
-#pragma warning(pop)
-#endif
-
-#if defined(__clang__)
-# pragma clang diagnostic pop
-#endif
-
-#if defined(_MSC_VER) && _MSC_VER <= 1200 && !defined(_WIN64)
-#ifndef _W64
-
-typedef long LONG_PTR, *PLONG_PTR;
-typedef unsigned long ULONG_PTR, *PULONG_PTR;
-typedef ULONG_PTR DWORD_PTR, *PDWORD_PTR;
-
-#define Z7_OLD_WIN_SDK
-#endif // _W64
-#endif // _MSC_VER == 1200
-
-#ifdef Z7_OLD_WIN_SDK
-
-#ifndef INVALID_FILE_ATTRIBUTES
-#define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
-#endif
-#ifndef INVALID_SET_FILE_POINTER
-#define INVALID_SET_FILE_POINTER ((DWORD)-1)
-#endif
-#ifndef FILE_SPECIAL_ACCESS
-#define FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS)
-#endif
-
-// ShlObj.h:
-// #define BIF_NEWDIALOGSTYLE 0x0040
-
-#pragma warning(disable : 4201)
-// #pragma warning(disable : 4115)
-
-#undef VARIANT_TRUE
-#define VARIANT_TRUE ((VARIANT_BOOL)-1)
-#endif
-
-#endif // Z7_OLD_WIN_SDK
-
-#ifdef UNDER_CE
-#undef VARIANT_TRUE
-#define VARIANT_TRUE ((VARIANT_BOOL)-1)
-#endif
-
-
-#if defined(_MSC_VER)
-#if _MSC_VER >= 1400 && _MSC_VER <= 1600
- // BaseTsd.h(148) : 'HandleToULong' : unreferenced inline function has been removed
- // string.h
- // #pragma warning(disable : 4514)
-#endif
-#endif
-
-
-/* #include "7zTypes.h" */
-
-#endif
diff --git a/3rdparty/7z/src/Aes.c b/3rdparty/7z/src/Aes.c
deleted file mode 100644
index d6732a6653..0000000000
--- a/3rdparty/7z/src/Aes.c
+++ /dev/null
@@ -1,393 +0,0 @@
-/* Aes.c -- AES encryption / decryption
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "CpuArch.h"
-#include "Aes.h"
-
-AES_CODE_FUNC g_AesCbc_Decode;
-#ifndef Z7_SFX
-AES_CODE_FUNC g_AesCbc_Encode;
-AES_CODE_FUNC g_AesCtr_Code;
-UInt32 g_Aes_SupportedFunctions_Flags;
-#endif
-
-static UInt32 T[256 * 4];
-static const Byte Sbox[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
-
-
-static UInt32 D[256 * 4];
-static Byte InvS[256];
-
-#define xtime(x) ((((x) << 1) ^ (((x) & 0x80) != 0 ? 0x1B : 0)) & 0xFF)
-
-#define Ui32(a0, a1, a2, a3) ((UInt32)(a0) | ((UInt32)(a1) << 8) | ((UInt32)(a2) << 16) | ((UInt32)(a3) << 24))
-
-#define gb0(x) ( (x) & 0xFF)
-#define gb1(x) (((x) >> ( 8)) & 0xFF)
-#define gb2(x) (((x) >> (16)) & 0xFF)
-#define gb3(x) (((x) >> (24)))
-
-#define gb(n, x) gb ## n(x)
-
-#define TT(x) (T + (x << 8))
-#define DD(x) (D + (x << 8))
-
-
-// #define Z7_SHOW_AES_STATUS
-
-#ifdef MY_CPU_X86_OR_AMD64
- #define USE_HW_AES
-#elif defined(MY_CPU_ARM_OR_ARM64) && defined(MY_CPU_LE)
- #if defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define USE_HW_AES
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 6) // fix that check
- #define USE_HW_AES
- #endif
- #elif defined(_MSC_VER)
- #if _MSC_VER >= 1910
- #define USE_HW_AES
- #endif
- #endif
-#endif
-
-#ifdef USE_HW_AES
-#ifdef Z7_SHOW_AES_STATUS
-#include
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-#endif
-
-
-void AesGenTables(void)
-{
- unsigned i;
- for (i = 0; i < 256; i++)
- InvS[Sbox[i]] = (Byte)i;
-
- for (i = 0; i < 256; i++)
- {
- {
- const UInt32 a1 = Sbox[i];
- const UInt32 a2 = xtime(a1);
- const UInt32 a3 = a2 ^ a1;
- TT(0)[i] = Ui32(a2, a1, a1, a3);
- TT(1)[i] = Ui32(a3, a2, a1, a1);
- TT(2)[i] = Ui32(a1, a3, a2, a1);
- TT(3)[i] = Ui32(a1, a1, a3, a2);
- }
- {
- const UInt32 a1 = InvS[i];
- const UInt32 a2 = xtime(a1);
- const UInt32 a4 = xtime(a2);
- const UInt32 a8 = xtime(a4);
- const UInt32 a9 = a8 ^ a1;
- const UInt32 aB = a8 ^ a2 ^ a1;
- const UInt32 aD = a8 ^ a4 ^ a1;
- const UInt32 aE = a8 ^ a4 ^ a2;
- DD(0)[i] = Ui32(aE, a9, aD, aB);
- DD(1)[i] = Ui32(aB, aE, a9, aD);
- DD(2)[i] = Ui32(aD, aB, aE, a9);
- DD(3)[i] = Ui32(a9, aD, aB, aE);
- }
- }
-
- {
- AES_CODE_FUNC d = AesCbc_Decode;
- #ifndef Z7_SFX
- AES_CODE_FUNC e = AesCbc_Encode;
- AES_CODE_FUNC c = AesCtr_Code;
- UInt32 flags = 0;
- #endif
-
- #ifdef USE_HW_AES
- if (CPU_IsSupported_AES())
- {
- // #pragma message ("AES HW")
- PRF(printf("\n===AES HW\n"));
- d = AesCbc_Decode_HW;
-
- #ifndef Z7_SFX
- e = AesCbc_Encode_HW;
- c = AesCtr_Code_HW;
- flags = k_Aes_SupportedFunctions_HW;
- #endif
-
- #ifdef MY_CPU_X86_OR_AMD64
- if (CPU_IsSupported_VAES_AVX2())
- {
- PRF(printf("\n===vaes avx2\n"));
- d = AesCbc_Decode_HW_256;
- #ifndef Z7_SFX
- c = AesCtr_Code_HW_256;
- flags |= k_Aes_SupportedFunctions_HW_256;
- #endif
- }
- #endif
- }
- #endif
-
- g_AesCbc_Decode = d;
- #ifndef Z7_SFX
- g_AesCbc_Encode = e;
- g_AesCtr_Code = c;
- g_Aes_SupportedFunctions_Flags = flags;
- #endif
- }
-}
-
-
-#define HT(i, x, s) TT(x)[gb(x, s[(i + x) & 3])]
-
-#define HT4(m, i, s, p) m[i] = \
- HT(i, 0, s) ^ \
- HT(i, 1, s) ^ \
- HT(i, 2, s) ^ \
- HT(i, 3, s) ^ w[p + i]
-
-#define HT16(m, s, p) \
- HT4(m, 0, s, p); \
- HT4(m, 1, s, p); \
- HT4(m, 2, s, p); \
- HT4(m, 3, s, p); \
-
-#define FT(i, x) Sbox[gb(x, m[(i + x) & 3])]
-#define FT4(i) dest[i] = Ui32(FT(i, 0), FT(i, 1), FT(i, 2), FT(i, 3)) ^ w[i];
-
-
-#define HD(i, x, s) DD(x)[gb(x, s[(i - x) & 3])]
-
-#define HD4(m, i, s, p) m[i] = \
- HD(i, 0, s) ^ \
- HD(i, 1, s) ^ \
- HD(i, 2, s) ^ \
- HD(i, 3, s) ^ w[p + i];
-
-#define HD16(m, s, p) \
- HD4(m, 0, s, p); \
- HD4(m, 1, s, p); \
- HD4(m, 2, s, p); \
- HD4(m, 3, s, p); \
-
-#define FD(i, x) InvS[gb(x, m[(i - x) & 3])]
-#define FD4(i) dest[i] = Ui32(FD(i, 0), FD(i, 1), FD(i, 2), FD(i, 3)) ^ w[i];
-
-void Z7_FASTCALL Aes_SetKey_Enc(UInt32 *w, const Byte *key, unsigned keySize)
-{
- unsigned i, m;
- const UInt32 *wLim;
- UInt32 t;
- UInt32 rcon = 1;
-
- keySize /= 4;
- w[0] = ((UInt32)keySize / 2) + 3;
- w += 4;
-
- for (i = 0; i < keySize; i++, key += 4)
- w[i] = GetUi32(key);
-
- t = w[(size_t)keySize - 1];
- wLim = w + (size_t)keySize * 3 + 28;
- m = 0;
- do
- {
- if (m == 0)
- {
- t = Ui32(Sbox[gb1(t)] ^ rcon, Sbox[gb2(t)], Sbox[gb3(t)], Sbox[gb0(t)]);
- rcon <<= 1;
- if (rcon & 0x100)
- rcon = 0x1b;
- m = keySize;
- }
- else if (m == 4 && keySize > 6)
- t = Ui32(Sbox[gb0(t)], Sbox[gb1(t)], Sbox[gb2(t)], Sbox[gb3(t)]);
- m--;
- t ^= w[0];
- w[keySize] = t;
- }
- while (++w != wLim);
-}
-
-void Z7_FASTCALL Aes_SetKey_Dec(UInt32 *w, const Byte *key, unsigned keySize)
-{
- unsigned i, num;
- Aes_SetKey_Enc(w, key, keySize);
- num = keySize + 20;
- w += 8;
- for (i = 0; i < num; i++)
- {
- UInt32 r = w[i];
- w[i] =
- DD(0)[Sbox[gb0(r)]] ^
- DD(1)[Sbox[gb1(r)]] ^
- DD(2)[Sbox[gb2(r)]] ^
- DD(3)[Sbox[gb3(r)]];
- }
-}
-
-/* Aes_Encode and Aes_Decode functions work with little-endian words.
- src and dest are pointers to 4 UInt32 words.
- src and dest can point to same block */
-
-// Z7_FORCE_INLINE
-static void Aes_Encode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
-{
- UInt32 s[4];
- UInt32 m[4];
- UInt32 numRounds2 = w[0];
- w += 4;
- s[0] = src[0] ^ w[0];
- s[1] = src[1] ^ w[1];
- s[2] = src[2] ^ w[2];
- s[3] = src[3] ^ w[3];
- w += 4;
- for (;;)
- {
- HT16(m, s, 0)
- if (--numRounds2 == 0)
- break;
- HT16(s, m, 4)
- w += 8;
- }
- w += 4;
- FT4(0)
- FT4(1)
- FT4(2)
- FT4(3)
-}
-
-Z7_FORCE_INLINE
-static void Aes_Decode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
-{
- UInt32 s[4];
- UInt32 m[4];
- UInt32 numRounds2 = w[0];
- w += 4 + numRounds2 * 8;
- s[0] = src[0] ^ w[0];
- s[1] = src[1] ^ w[1];
- s[2] = src[2] ^ w[2];
- s[3] = src[3] ^ w[3];
- for (;;)
- {
- w -= 8;
- HD16(m, s, 4)
- if (--numRounds2 == 0)
- break;
- HD16(s, m, 0)
- }
- FD4(0)
- FD4(1)
- FD4(2)
- FD4(3)
-}
-
-void AesCbc_Init(UInt32 *p, const Byte *iv)
-{
- unsigned i;
- for (i = 0; i < 4; i++)
- p[i] = GetUi32(iv + i * 4);
-}
-
-void Z7_FASTCALL AesCbc_Encode(UInt32 *p, Byte *data, size_t numBlocks)
-{
- for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
- {
- p[0] ^= GetUi32(data);
- p[1] ^= GetUi32(data + 4);
- p[2] ^= GetUi32(data + 8);
- p[3] ^= GetUi32(data + 12);
-
- Aes_Encode(p + 4, p, p);
-
- SetUi32(data, p[0])
- SetUi32(data + 4, p[1])
- SetUi32(data + 8, p[2])
- SetUi32(data + 12, p[3])
- }
-}
-
-void Z7_FASTCALL AesCbc_Decode(UInt32 *p, Byte *data, size_t numBlocks)
-{
- UInt32 in[4], out[4];
- for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
- {
- in[0] = GetUi32(data);
- in[1] = GetUi32(data + 4);
- in[2] = GetUi32(data + 8);
- in[3] = GetUi32(data + 12);
-
- Aes_Decode(p + 4, out, in);
-
- SetUi32(data, p[0] ^ out[0])
- SetUi32(data + 4, p[1] ^ out[1])
- SetUi32(data + 8, p[2] ^ out[2])
- SetUi32(data + 12, p[3] ^ out[3])
-
- p[0] = in[0];
- p[1] = in[1];
- p[2] = in[2];
- p[3] = in[3];
- }
-}
-
-void Z7_FASTCALL AesCtr_Code(UInt32 *p, Byte *data, size_t numBlocks)
-{
- for (; numBlocks != 0; numBlocks--)
- {
- UInt32 temp[4];
- unsigned i;
-
- if (++p[0] == 0)
- p[1]++;
-
- Aes_Encode(p + 4, temp, p);
-
- for (i = 0; i < 4; i++, data += 4)
- {
- const UInt32 t = temp[i];
-
- #ifdef MY_CPU_LE_UNALIGN
- *((UInt32 *)(void *)data) ^= t;
- #else
- data[0] = (Byte)(data[0] ^ (t & 0xFF));
- data[1] = (Byte)(data[1] ^ ((t >> 8) & 0xFF));
- data[2] = (Byte)(data[2] ^ ((t >> 16) & 0xFF));
- data[3] = (Byte)(data[3] ^ ((t >> 24)));
- #endif
- }
- }
-}
-
-#undef xtime
-#undef Ui32
-#undef gb0
-#undef gb1
-#undef gb2
-#undef gb3
-#undef gb
-#undef TT
-#undef DD
-#undef USE_HW_AES
-#undef PRF
diff --git a/3rdparty/7z/src/Aes.h b/3rdparty/7z/src/Aes.h
deleted file mode 100644
index 2373afb970..0000000000
--- a/3rdparty/7z/src/Aes.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Aes.h -- AES encryption / decryption
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_AES_H
-#define ZIP7_INC_AES_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define AES_BLOCK_SIZE 16
-
-/* Call AesGenTables one time before other AES functions */
-void AesGenTables(void);
-
-/* UInt32 pointers must be 16-byte aligned */
-
-/* 16-byte (4 * 32-bit words) blocks: 1 (IV) + 1 (keyMode) + 15 (AES-256 roundKeys) */
-#define AES_NUM_IVMRK_WORDS ((1 + 1 + 15) * 4)
-
-/* aes - 16-byte aligned pointer to keyMode+roundKeys sequence */
-/* keySize = 16 or 24 or 32 (bytes) */
-typedef void (Z7_FASTCALL *AES_SET_KEY_FUNC)(UInt32 *aes, const Byte *key, unsigned keySize);
-void Z7_FASTCALL Aes_SetKey_Enc(UInt32 *aes, const Byte *key, unsigned keySize);
-void Z7_FASTCALL Aes_SetKey_Dec(UInt32 *aes, const Byte *key, unsigned keySize);
-
-/* ivAes - 16-byte aligned pointer to iv+keyMode+roundKeys sequence: UInt32[AES_NUM_IVMRK_WORDS] */
-void AesCbc_Init(UInt32 *ivAes, const Byte *iv); /* iv size is AES_BLOCK_SIZE */
-
-/* data - 16-byte aligned pointer to data */
-/* numBlocks - the number of 16-byte blocks in data array */
-typedef void (Z7_FASTCALL *AES_CODE_FUNC)(UInt32 *ivAes, Byte *data, size_t numBlocks);
-
-extern AES_CODE_FUNC g_AesCbc_Decode;
-#ifndef Z7_SFX
-extern AES_CODE_FUNC g_AesCbc_Encode;
-extern AES_CODE_FUNC g_AesCtr_Code;
-#define k_Aes_SupportedFunctions_HW (1 << 2)
-#define k_Aes_SupportedFunctions_HW_256 (1 << 3)
-extern UInt32 g_Aes_SupportedFunctions_Flags;
-#endif
-
-
-#define Z7_DECLARE_AES_CODE_FUNC(funcName) \
- void Z7_FASTCALL funcName(UInt32 *ivAes, Byte *data, size_t numBlocks);
-
-Z7_DECLARE_AES_CODE_FUNC (AesCbc_Encode)
-Z7_DECLARE_AES_CODE_FUNC (AesCbc_Decode)
-Z7_DECLARE_AES_CODE_FUNC (AesCtr_Code)
-
-Z7_DECLARE_AES_CODE_FUNC (AesCbc_Encode_HW)
-Z7_DECLARE_AES_CODE_FUNC (AesCbc_Decode_HW)
-Z7_DECLARE_AES_CODE_FUNC (AesCtr_Code_HW)
-
-Z7_DECLARE_AES_CODE_FUNC (AesCbc_Decode_HW_256)
-Z7_DECLARE_AES_CODE_FUNC (AesCtr_Code_HW_256)
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/AesOpt.c b/3rdparty/7z/src/AesOpt.c
deleted file mode 100644
index af911e22f9..0000000000
--- a/3rdparty/7z/src/AesOpt.c
+++ /dev/null
@@ -1,840 +0,0 @@
-/* AesOpt.c -- AES optimized code for x86 AES hardware instructions
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Aes.h"
-#include "CpuArch.h"
-
-#ifdef MY_CPU_X86_OR_AMD64
-
- #if defined(__INTEL_COMPILER)
- #if (__INTEL_COMPILER >= 1110)
- #define USE_INTEL_AES
- #if (__INTEL_COMPILER >= 1900)
- #define USE_INTEL_VAES
- #endif
- #endif
- #elif defined(__clang__) && (__clang_major__ > 3 || __clang_major__ == 3 && __clang_minor__ >= 8) \
- || defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4)
- #define USE_INTEL_AES
- #if !defined(__AES__)
- #define ATTRIB_AES __attribute__((__target__("aes")))
- #endif
- #if defined(__clang__) && (__clang_major__ >= 8) \
- || defined(__GNUC__) && (__GNUC__ >= 8)
- #define USE_INTEL_VAES
- #if !defined(__AES__) || !defined(__VAES__) || !defined(__AVX__) || !defined(__AVX2__)
- #define ATTRIB_VAES __attribute__((__target__("aes,vaes,avx,avx2")))
- #endif
- #endif
- #elif defined(_MSC_VER)
- #if (_MSC_VER > 1500) || (_MSC_FULL_VER >= 150030729)
- #define USE_INTEL_AES
- #if (_MSC_VER >= 1910)
- #define USE_INTEL_VAES
- #endif
- #endif
- #endif
-
-#ifndef ATTRIB_AES
- #define ATTRIB_AES
-#endif
-#ifndef ATTRIB_VAES
- #define ATTRIB_VAES
-#endif
-
-
-#ifdef USE_INTEL_AES
-
-#include
-
-#ifndef USE_INTEL_VAES
-#define AES_TYPE_keys UInt32
-#define AES_TYPE_data Byte
-// #define AES_TYPE_keys __m128i
-// #define AES_TYPE_data __m128i
-#endif
-
-#define AES_FUNC_START(name) \
- void Z7_FASTCALL name(UInt32 *ivAes, Byte *data8, size_t numBlocks)
- // void Z7_FASTCALL name(__m128i *p, __m128i *data, size_t numBlocks)
-
-#define AES_FUNC_START2(name) \
-AES_FUNC_START (name); \
-ATTRIB_AES \
-AES_FUNC_START (name)
-
-#define MM_OP(op, dest, src) dest = op(dest, src);
-#define MM_OP_m(op, src) MM_OP(op, m, src)
-
-#define MM_XOR( dest, src) MM_OP(_mm_xor_si128, dest, src)
-#define AVX_XOR(dest, src) MM_OP(_mm256_xor_si256, dest, src)
-
-
-AES_FUNC_START2 (AesCbc_Encode_HW)
-{
- __m128i *p = (__m128i *)(void *)ivAes;
- __m128i *data = (__m128i *)(void *)data8;
- __m128i m = *p;
- const __m128i k0 = p[2];
- const __m128i k1 = p[3];
- const UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
- for (; numBlocks != 0; numBlocks--, data++)
- {
- UInt32 r = numRounds2;
- const __m128i *w = p + 4;
- __m128i temp = *data;
- MM_XOR (temp, k0)
- MM_XOR (m, temp)
- MM_OP_m (_mm_aesenc_si128, k1)
- do
- {
- MM_OP_m (_mm_aesenc_si128, w[0])
- MM_OP_m (_mm_aesenc_si128, w[1])
- w += 2;
- }
- while (--r);
- MM_OP_m (_mm_aesenclast_si128, w[0])
- *data = m;
- }
- *p = m;
-}
-
-
-#define WOP_1(op)
-#define WOP_2(op) WOP_1 (op) op (m1, 1)
-#define WOP_3(op) WOP_2 (op) op (m2, 2)
-#define WOP_4(op) WOP_3 (op) op (m3, 3)
-#ifdef MY_CPU_AMD64
-#define WOP_5(op) WOP_4 (op) op (m4, 4)
-#define WOP_6(op) WOP_5 (op) op (m5, 5)
-#define WOP_7(op) WOP_6 (op) op (m6, 6)
-#define WOP_8(op) WOP_7 (op) op (m7, 7)
-#endif
-/*
-#define WOP_9(op) WOP_8 (op) op (m8, 8);
-#define WOP_10(op) WOP_9 (op) op (m9, 9);
-#define WOP_11(op) WOP_10(op) op (m10, 10);
-#define WOP_12(op) WOP_11(op) op (m11, 11);
-#define WOP_13(op) WOP_12(op) op (m12, 12);
-#define WOP_14(op) WOP_13(op) op (m13, 13);
-*/
-
-#ifdef MY_CPU_AMD64
- #define NUM_WAYS 8
- #define WOP_M1 WOP_8
-#else
- #define NUM_WAYS 4
- #define WOP_M1 WOP_4
-#endif
-
-#define WOP(op) op (m0, 0) WOP_M1(op)
-
-
-#define DECLARE_VAR(reg, ii) __m128i reg;
-#define LOAD_data( reg, ii) reg = data[ii];
-#define STORE_data( reg, ii) data[ii] = reg;
-#if (NUM_WAYS > 1)
-#define XOR_data_M1(reg, ii) MM_XOR (reg, data[ii- 1])
-#endif
-
-#define AVX_DECLARE_VAR(reg, ii) __m256i reg;
-#define AVX_LOAD_data( reg, ii) reg = ((const __m256i *)(const void *)data)[ii];
-#define AVX_STORE_data( reg, ii) ((__m256i *)(void *)data)[ii] = reg;
-#define AVX_XOR_data_M1(reg, ii) AVX_XOR (reg, (((const __m256i *)(const void *)(data - 1))[ii]))
-
-#define MM_OP_key(op, reg) MM_OP(op, reg, key);
-
-#define AES_DEC( reg, ii) MM_OP_key (_mm_aesdec_si128, reg)
-#define AES_DEC_LAST( reg, ii) MM_OP_key (_mm_aesdeclast_si128, reg)
-#define AES_ENC( reg, ii) MM_OP_key (_mm_aesenc_si128, reg)
-#define AES_ENC_LAST( reg, ii) MM_OP_key (_mm_aesenclast_si128, reg)
-#define AES_XOR( reg, ii) MM_OP_key (_mm_xor_si128, reg)
-
-
-#define AVX_AES_DEC( reg, ii) MM_OP_key (_mm256_aesdec_epi128, reg)
-#define AVX_AES_DEC_LAST( reg, ii) MM_OP_key (_mm256_aesdeclast_epi128, reg)
-#define AVX_AES_ENC( reg, ii) MM_OP_key (_mm256_aesenc_epi128, reg)
-#define AVX_AES_ENC_LAST( reg, ii) MM_OP_key (_mm256_aesenclast_epi128, reg)
-#define AVX_AES_XOR( reg, ii) MM_OP_key (_mm256_xor_si256, reg)
-
-#define CTR_START(reg, ii) MM_OP (_mm_add_epi64, ctr, one) reg = ctr;
-#define CTR_END( reg, ii) MM_XOR (data[ii], reg)
-
-#define AVX_CTR_START(reg, ii) MM_OP (_mm256_add_epi64, ctr2, two) reg = _mm256_xor_si256(ctr2, key);
-#define AVX_CTR_END( reg, ii) AVX_XOR (((__m256i *)(void *)data)[ii], reg)
-
-#define WOP_KEY(op, n) { \
- const __m128i key = w[n]; \
- WOP(op); }
-
-#define AVX_WOP_KEY(op, n) { \
- const __m256i key = w[n]; \
- WOP(op); }
-
-
-#define WIDE_LOOP_START \
- dataEnd = data + numBlocks; \
- if (numBlocks >= NUM_WAYS) \
- { dataEnd -= NUM_WAYS; do { \
-
-
-#define WIDE_LOOP_END \
- data += NUM_WAYS; \
- } while (data <= dataEnd); \
- dataEnd += NUM_WAYS; } \
-
-
-#define SINGLE_LOOP \
- for (; data < dataEnd; data++)
-
-
-#define NUM_AES_KEYS_MAX 15
-
-#define WIDE_LOOP_START_AVX(OP) \
- dataEnd = data + numBlocks; \
- if (numBlocks >= NUM_WAYS * 2) \
- { __m256i keys[NUM_AES_KEYS_MAX]; \
- UInt32 ii; \
- OP \
- for (ii = 0; ii < numRounds; ii++) \
- keys[ii] = _mm256_broadcastsi128_si256(p[ii]); \
- dataEnd -= NUM_WAYS * 2; do { \
-
-
-#define WIDE_LOOP_END_AVX(OP) \
- data += NUM_WAYS * 2; \
- } while (data <= dataEnd); \
- dataEnd += NUM_WAYS * 2; \
- OP \
- _mm256_zeroupper(); \
- } \
-
-/* MSVC for x86: If we don't call _mm256_zeroupper(), and -arch:IA32 is not specified,
- MSVC still can insert vzeroupper instruction. */
-
-
-AES_FUNC_START2 (AesCbc_Decode_HW)
-{
- __m128i *p = (__m128i *)(void *)ivAes;
- __m128i *data = (__m128i *)(void *)data8;
- __m128i iv = *p;
- const __m128i *wStart = p + *(const UInt32 *)(p + 1) * 2 + 2 - 1;
- const __m128i *dataEnd;
- p += 2;
-
- WIDE_LOOP_START
- {
- const __m128i *w = wStart;
-
- WOP (DECLARE_VAR)
- WOP (LOAD_data)
- WOP_KEY (AES_XOR, 1)
-
- do
- {
- WOP_KEY (AES_DEC, 0)
- w--;
- }
- while (w != p);
- WOP_KEY (AES_DEC_LAST, 0)
-
- MM_XOR (m0, iv)
- WOP_M1 (XOR_data_M1)
- iv = data[NUM_WAYS - 1];
- WOP (STORE_data)
- }
- WIDE_LOOP_END
-
- SINGLE_LOOP
- {
- const __m128i *w = wStart - 1;
- __m128i m = _mm_xor_si128 (w[2], *data);
- do
- {
- MM_OP_m (_mm_aesdec_si128, w[1])
- MM_OP_m (_mm_aesdec_si128, w[0])
- w -= 2;
- }
- while (w != p);
- MM_OP_m (_mm_aesdec_si128, w[1])
- MM_OP_m (_mm_aesdeclast_si128, w[0])
-
- MM_XOR (m, iv)
- iv = *data;
- *data = m;
- }
-
- p[-2] = iv;
-}
-
-
-AES_FUNC_START2 (AesCtr_Code_HW)
-{
- __m128i *p = (__m128i *)(void *)ivAes;
- __m128i *data = (__m128i *)(void *)data8;
- __m128i ctr = *p;
- UInt32 numRoundsMinus2 = *(const UInt32 *)(p + 1) * 2 - 1;
- const __m128i *dataEnd;
- __m128i one = _mm_cvtsi32_si128(1);
-
- p += 2;
-
- WIDE_LOOP_START
- {
- const __m128i *w = p;
- UInt32 r = numRoundsMinus2;
- WOP (DECLARE_VAR)
- WOP (CTR_START)
- WOP_KEY (AES_XOR, 0)
- w += 1;
- do
- {
- WOP_KEY (AES_ENC, 0)
- w += 1;
- }
- while (--r);
- WOP_KEY (AES_ENC_LAST, 0)
-
- WOP (CTR_END)
- }
- WIDE_LOOP_END
-
- SINGLE_LOOP
- {
- UInt32 numRounds2 = *(const UInt32 *)(p - 2 + 1) - 1;
- const __m128i *w = p;
- __m128i m;
- MM_OP (_mm_add_epi64, ctr, one)
- m = _mm_xor_si128 (ctr, p[0]);
- w += 1;
- do
- {
- MM_OP_m (_mm_aesenc_si128, w[0])
- MM_OP_m (_mm_aesenc_si128, w[1])
- w += 2;
- }
- while (--numRounds2);
- MM_OP_m (_mm_aesenc_si128, w[0])
- MM_OP_m (_mm_aesenclast_si128, w[1])
- MM_XOR (*data, m)
- }
-
- p[-2] = ctr;
-}
-
-
-
-#ifdef USE_INTEL_VAES
-
-/*
-GCC before 2013-Jun:
- :
- #ifdef __AVX__
- #include
- #endif
-GCC after 2013-Jun:
- :
- #include
-CLANG 3.8+:
-{
- :
- #if !defined(_MSC_VER) || defined(__AVX__)
- #include
- #endif
-
- if (the compiler is clang for Windows and if global arch is not set for __AVX__)
- [ if (defined(_MSC_VER) && !defined(__AVX__)) ]
- {
- doesn't include
- and we have 2 ways to fix it:
- 1) we can define required __AVX__ before
- or
- 2) we can include after
- }
-}
-
-If we include manually for GCC/CLANG, it's
-required that must be included before .
-*/
-
-/*
-#if defined(__clang__) && defined(_MSC_VER)
-#define __AVX__
-#define __AVX2__
-#define __VAES__
-#endif
-*/
-
-#include
-#if defined(__clang__) && defined(_MSC_VER)
- #if !defined(__AVX__)
- #include
- #endif
- #if !defined(__AVX2__)
- #include
- #endif
- #if !defined(__VAES__)
- #include
- #endif
-#endif // __clang__ && _MSC_VER
-
-
-#define VAES_FUNC_START2(name) \
-AES_FUNC_START (name); \
-ATTRIB_VAES \
-AES_FUNC_START (name)
-
-VAES_FUNC_START2 (AesCbc_Decode_HW_256)
-{
- __m128i *p = (__m128i *)(void *)ivAes;
- __m128i *data = (__m128i *)(void *)data8;
- __m128i iv = *p;
- const __m128i *dataEnd;
- UInt32 numRounds = *(const UInt32 *)(p + 1) * 2 + 1;
- p += 2;
-
- WIDE_LOOP_START_AVX(;)
- {
- const __m256i *w = keys + numRounds - 2;
-
- WOP (AVX_DECLARE_VAR)
- WOP (AVX_LOAD_data)
- AVX_WOP_KEY (AVX_AES_XOR, 1)
-
- do
- {
- AVX_WOP_KEY (AVX_AES_DEC, 0)
- w--;
- }
- while (w != keys);
- AVX_WOP_KEY (AVX_AES_DEC_LAST, 0)
-
- AVX_XOR (m0, _mm256_setr_m128i(iv, data[0]))
- WOP_M1 (AVX_XOR_data_M1)
- iv = data[NUM_WAYS * 2 - 1];
- WOP (AVX_STORE_data)
- }
- WIDE_LOOP_END_AVX(;)
-
- SINGLE_LOOP
- {
- const __m128i *w = p + *(const UInt32 *)(p + 1 - 2) * 2 + 1 - 3;
- __m128i m = _mm_xor_si128 (w[2], *data);
- do
- {
- MM_OP_m (_mm_aesdec_si128, w[1])
- MM_OP_m (_mm_aesdec_si128, w[0])
- w -= 2;
- }
- while (w != p);
- MM_OP_m (_mm_aesdec_si128, w[1])
- MM_OP_m (_mm_aesdeclast_si128, w[0])
-
- MM_XOR (m, iv)
- iv = *data;
- *data = m;
- }
-
- p[-2] = iv;
-}
-
-
-/*
-SSE2: _mm_cvtsi32_si128 : movd
-AVX: _mm256_setr_m128i : vinsertf128
-AVX2: _mm256_add_epi64 : vpaddq ymm, ymm, ymm
- _mm256_extracti128_si256 : vextracti128
- _mm256_broadcastsi128_si256 : vbroadcasti128
-*/
-
-#define AVX_CTR_LOOP_START \
- ctr2 = _mm256_setr_m128i(_mm_sub_epi64(ctr, one), ctr); \
- two = _mm256_setr_m128i(one, one); \
- two = _mm256_add_epi64(two, two); \
-
-// two = _mm256_setr_epi64x(2, 0, 2, 0);
-
-#define AVX_CTR_LOOP_ENC \
- ctr = _mm256_extracti128_si256 (ctr2, 1); \
-
-VAES_FUNC_START2 (AesCtr_Code_HW_256)
-{
- __m128i *p = (__m128i *)(void *)ivAes;
- __m128i *data = (__m128i *)(void *)data8;
- __m128i ctr = *p;
- UInt32 numRounds = *(const UInt32 *)(p + 1) * 2 + 1;
- const __m128i *dataEnd;
- __m128i one = _mm_cvtsi32_si128(1);
- __m256i ctr2, two;
- p += 2;
-
- WIDE_LOOP_START_AVX (AVX_CTR_LOOP_START)
- {
- const __m256i *w = keys;
- UInt32 r = numRounds - 2;
- WOP (AVX_DECLARE_VAR)
- AVX_WOP_KEY (AVX_CTR_START, 0)
-
- w += 1;
- do
- {
- AVX_WOP_KEY (AVX_AES_ENC, 0)
- w += 1;
- }
- while (--r);
- AVX_WOP_KEY (AVX_AES_ENC_LAST, 0)
-
- WOP (AVX_CTR_END)
- }
- WIDE_LOOP_END_AVX (AVX_CTR_LOOP_ENC)
-
- SINGLE_LOOP
- {
- UInt32 numRounds2 = *(const UInt32 *)(p - 2 + 1) - 1;
- const __m128i *w = p;
- __m128i m;
- MM_OP (_mm_add_epi64, ctr, one)
- m = _mm_xor_si128 (ctr, p[0]);
- w += 1;
- do
- {
- MM_OP_m (_mm_aesenc_si128, w[0])
- MM_OP_m (_mm_aesenc_si128, w[1])
- w += 2;
- }
- while (--numRounds2);
- MM_OP_m (_mm_aesenc_si128, w[0])
- MM_OP_m (_mm_aesenclast_si128, w[1])
- MM_XOR (*data, m)
- }
-
- p[-2] = ctr;
-}
-
-#endif // USE_INTEL_VAES
-
-#else // USE_INTEL_AES
-
-/* no USE_INTEL_AES */
-
-#pragma message("AES HW_SW stub was used")
-
-#define AES_TYPE_keys UInt32
-#define AES_TYPE_data Byte
-
-#define AES_FUNC_START(name) \
- void Z7_FASTCALL name(UInt32 *p, Byte *data, size_t numBlocks) \
-
-#define AES_COMPAT_STUB(name) \
- AES_FUNC_START(name); \
- AES_FUNC_START(name ## _HW) \
- { name(p, data, numBlocks); }
-
-AES_COMPAT_STUB (AesCbc_Encode)
-AES_COMPAT_STUB (AesCbc_Decode)
-AES_COMPAT_STUB (AesCtr_Code)
-
-#endif // USE_INTEL_AES
-
-
-#ifndef USE_INTEL_VAES
-
-#pragma message("VAES HW_SW stub was used")
-
-#define VAES_COMPAT_STUB(name) \
- void Z7_FASTCALL name ## _256(UInt32 *p, Byte *data, size_t numBlocks); \
- void Z7_FASTCALL name ## _256(UInt32 *p, Byte *data, size_t numBlocks) \
- { name((AES_TYPE_keys *)(void *)p, (AES_TYPE_data *)(void *)data, numBlocks); }
-
-VAES_COMPAT_STUB (AesCbc_Decode_HW)
-VAES_COMPAT_STUB (AesCtr_Code_HW)
-
-#endif // ! USE_INTEL_VAES
-
-
-#elif defined(MY_CPU_ARM_OR_ARM64) && defined(MY_CPU_LE)
-
- #if defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define USE_HW_AES
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 6) // fix that check
- #define USE_HW_AES
- #endif
- #elif defined(_MSC_VER)
- #if _MSC_VER >= 1910
- #define USE_HW_AES
- #endif
- #endif
-
-#ifdef USE_HW_AES
-
-// #pragma message("=== AES HW === ")
-
-#if defined(__clang__) || defined(__GNUC__)
- #ifdef MY_CPU_ARM64
- #define ATTRIB_AES __attribute__((__target__("+crypto")))
- #else
- #define ATTRIB_AES __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
- #endif
-#else
- // _MSC_VER
- // for arm32
- #define _ARM_USE_NEW_NEON_INTRINSICS
-#endif
-
-#ifndef ATTRIB_AES
- #define ATTRIB_AES
-#endif
-
-#if defined(_MSC_VER) && defined(MY_CPU_ARM64)
-#include
-#else
-#include
-#endif
-
-typedef uint8x16_t v128;
-
-#define AES_FUNC_START(name) \
- void Z7_FASTCALL name(UInt32 *ivAes, Byte *data8, size_t numBlocks)
- // void Z7_FASTCALL name(v128 *p, v128 *data, size_t numBlocks)
-
-#define AES_FUNC_START2(name) \
-AES_FUNC_START (name); \
-ATTRIB_AES \
-AES_FUNC_START (name)
-
-#define MM_OP(op, dest, src) dest = op(dest, src);
-#define MM_OP_m(op, src) MM_OP(op, m, src)
-#define MM_OP1_m(op) m = op(m);
-
-#define MM_XOR( dest, src) MM_OP(veorq_u8, dest, src)
-#define MM_XOR_m( src) MM_XOR(m, src)
-
-#define AES_E_m(k) MM_OP_m (vaeseq_u8, k)
-#define AES_E_MC_m(k) AES_E_m (k) MM_OP1_m(vaesmcq_u8)
-
-
-AES_FUNC_START2 (AesCbc_Encode_HW)
-{
- v128 *p = (v128*)(void*)ivAes;
- v128 *data = (v128*)(void*)data8;
- v128 m = *p;
- const v128 k0 = p[2];
- const v128 k1 = p[3];
- const v128 k2 = p[4];
- const v128 k3 = p[5];
- const v128 k4 = p[6];
- const v128 k5 = p[7];
- const v128 k6 = p[8];
- const v128 k7 = p[9];
- const v128 k8 = p[10];
- const v128 k9 = p[11];
- const UInt32 numRounds2 = *(const UInt32 *)(p + 1);
- const v128 *w = p + ((size_t)numRounds2 * 2);
- const v128 k_z1 = w[1];
- const v128 k_z0 = w[2];
- for (; numBlocks != 0; numBlocks--, data++)
- {
- MM_XOR_m (*data);
- AES_E_MC_m (k0)
- AES_E_MC_m (k1)
- AES_E_MC_m (k2)
- AES_E_MC_m (k3)
- AES_E_MC_m (k4)
- AES_E_MC_m (k5)
- AES_E_MC_m (k6)
- AES_E_MC_m (k7)
- AES_E_MC_m (k8)
- if (numRounds2 >= 6)
- {
- AES_E_MC_m (k9)
- AES_E_MC_m (p[12])
- if (numRounds2 != 6)
- {
- AES_E_MC_m (p[13])
- AES_E_MC_m (p[14])
- }
- }
- AES_E_m (k_z1)
- MM_XOR_m (k_z0);
- *data = m;
- }
- *p = m;
-}
-
-
-#define WOP_1(op)
-#define WOP_2(op) WOP_1 (op) op (m1, 1)
-#define WOP_3(op) WOP_2 (op) op (m2, 2)
-#define WOP_4(op) WOP_3 (op) op (m3, 3)
-#define WOP_5(op) WOP_4 (op) op (m4, 4)
-#define WOP_6(op) WOP_5 (op) op (m5, 5)
-#define WOP_7(op) WOP_6 (op) op (m6, 6)
-#define WOP_8(op) WOP_7 (op) op (m7, 7)
-
- #define NUM_WAYS 8
- #define WOP_M1 WOP_8
-
-#define WOP(op) op (m0, 0) WOP_M1(op)
-
-#define DECLARE_VAR(reg, ii) v128 reg;
-#define LOAD_data( reg, ii) reg = data[ii];
-#define STORE_data( reg, ii) data[ii] = reg;
-#if (NUM_WAYS > 1)
-#define XOR_data_M1(reg, ii) MM_XOR (reg, data[ii- 1])
-#endif
-
-#define MM_OP_key(op, reg) MM_OP (op, reg, key)
-
-#define AES_D_m(k) MM_OP_m (vaesdq_u8, k)
-#define AES_D_IMC_m(k) AES_D_m (k) MM_OP1_m (vaesimcq_u8)
-
-#define AES_XOR( reg, ii) MM_OP_key (veorq_u8, reg)
-#define AES_D( reg, ii) MM_OP_key (vaesdq_u8, reg)
-#define AES_E( reg, ii) MM_OP_key (vaeseq_u8, reg)
-
-#define AES_D_IMC( reg, ii) AES_D (reg, ii) reg = vaesimcq_u8(reg);
-#define AES_E_MC( reg, ii) AES_E (reg, ii) reg = vaesmcq_u8(reg);
-
-#define CTR_START(reg, ii) MM_OP (vaddq_u64, ctr, one) reg = vreinterpretq_u8_u64(ctr);
-#define CTR_END( reg, ii) MM_XOR (data[ii], reg)
-
-#define WOP_KEY(op, n) { \
- const v128 key = w[n]; \
- WOP(op) }
-
-#define WIDE_LOOP_START \
- dataEnd = data + numBlocks; \
- if (numBlocks >= NUM_WAYS) \
- { dataEnd -= NUM_WAYS; do { \
-
-#define WIDE_LOOP_END \
- data += NUM_WAYS; \
- } while (data <= dataEnd); \
- dataEnd += NUM_WAYS; } \
-
-#define SINGLE_LOOP \
- for (; data < dataEnd; data++)
-
-
-AES_FUNC_START2 (AesCbc_Decode_HW)
-{
- v128 *p = (v128*)(void*)ivAes;
- v128 *data = (v128*)(void*)data8;
- v128 iv = *p;
- const v128 *wStart = p + ((size_t)*(const UInt32 *)(p + 1)) * 2;
- const v128 *dataEnd;
- p += 2;
-
- WIDE_LOOP_START
- {
- const v128 *w = wStart;
- WOP (DECLARE_VAR)
- WOP (LOAD_data)
- WOP_KEY (AES_D_IMC, 2)
- do
- {
- WOP_KEY (AES_D_IMC, 1)
- WOP_KEY (AES_D_IMC, 0)
- w -= 2;
- }
- while (w != p);
- WOP_KEY (AES_D, 1)
- WOP_KEY (AES_XOR, 0)
- MM_XOR (m0, iv);
- WOP_M1 (XOR_data_M1)
- iv = data[NUM_WAYS - 1];
- WOP (STORE_data)
- }
- WIDE_LOOP_END
-
- SINGLE_LOOP
- {
- const v128 *w = wStart;
- v128 m = *data;
- AES_D_IMC_m (w[2])
- do
- {
- AES_D_IMC_m (w[1]);
- AES_D_IMC_m (w[0]);
- w -= 2;
- }
- while (w != p);
- AES_D_m (w[1]);
- MM_XOR_m (w[0]);
- MM_XOR_m (iv);
- iv = *data;
- *data = m;
- }
-
- p[-2] = iv;
-}
-
-
-AES_FUNC_START2 (AesCtr_Code_HW)
-{
- v128 *p = (v128*)(void*)ivAes;
- v128 *data = (v128*)(void*)data8;
- uint64x2_t ctr = vreinterpretq_u64_u8(*p);
- const v128 *wEnd = p + ((size_t)*(const UInt32 *)(p + 1)) * 2;
- const v128 *dataEnd;
- uint64x2_t one = vdupq_n_u64(0);
- one = vsetq_lane_u64(1, one, 0);
- p += 2;
-
- WIDE_LOOP_START
- {
- const v128 *w = p;
- WOP (DECLARE_VAR)
- WOP (CTR_START)
- do
- {
- WOP_KEY (AES_E_MC, 0)
- WOP_KEY (AES_E_MC, 1)
- w += 2;
- }
- while (w != wEnd);
- WOP_KEY (AES_E_MC, 0)
- WOP_KEY (AES_E, 1)
- WOP_KEY (AES_XOR, 2)
- WOP (CTR_END)
- }
- WIDE_LOOP_END
-
- SINGLE_LOOP
- {
- const v128 *w = p;
- v128 m;
- CTR_START (m, 0);
- do
- {
- AES_E_MC_m (w[0]);
- AES_E_MC_m (w[1]);
- w += 2;
- }
- while (w != wEnd);
- AES_E_MC_m (w[0])
- AES_E_m (w[1])
- MM_XOR_m (w[2])
- CTR_END (m, 0)
- }
-
- p[-2] = vreinterpretq_u8_u64(ctr);
-}
-
-#endif // USE_HW_AES
-
-#endif // MY_CPU_ARM_OR_ARM64
-
-#undef NUM_WAYS
-#undef WOP_M1
-#undef WOP
-#undef DECLARE_VAR
-#undef LOAD_data
-#undef STORE_data
-#undef USE_INTEL_AES
-#undef USE_HW_AES
diff --git a/3rdparty/7z/src/Alloc.c b/3rdparty/7z/src/Alloc.c
deleted file mode 100644
index 0b12133dea..0000000000
--- a/3rdparty/7z/src/Alloc.c
+++ /dev/null
@@ -1,535 +0,0 @@
-/* Alloc.c -- Memory allocation functions
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#ifdef _WIN32
-#include "7zWindows.h"
-#endif
-#include
-
-#include "Alloc.h"
-
-#ifdef _WIN32
-#ifdef Z7_LARGE_PAGES
-#if defined(__clang__) || defined(__GNUC__)
-typedef void (*Z7_voidFunction)(void);
-#define MY_CAST_FUNC (Z7_voidFunction)
-#elif defined(_MSC_VER) && _MSC_VER > 1920
-#define MY_CAST_FUNC (void *)
-// #pragma warning(disable : 4191) // 'type cast': unsafe conversion from 'FARPROC' to 'void (__cdecl *)()'
-#else
-#define MY_CAST_FUNC
-#endif
-#endif // Z7_LARGE_PAGES
-#endif // _WIN32
-
-// #define SZ_ALLOC_DEBUG
-/* #define SZ_ALLOC_DEBUG */
-
-/* use SZ_ALLOC_DEBUG to debug alloc/free operations */
-#ifdef SZ_ALLOC_DEBUG
-
-#include
-#include
-static int g_allocCount = 0;
-#ifdef _WIN32
-static int g_allocCountMid = 0;
-static int g_allocCountBig = 0;
-#endif
-
-
-#define CONVERT_INT_TO_STR(charType, tempSize) \
- char temp[tempSize]; unsigned i = 0; \
- while (val >= 10) { temp[i++] = (char)('0' + (unsigned)(val % 10)); val /= 10; } \
- *s++ = (charType)('0' + (unsigned)val); \
- while (i != 0) { i--; *s++ = temp[i]; } \
- *s = 0;
-
-static void ConvertUInt64ToString(UInt64 val, char *s)
-{
- CONVERT_INT_TO_STR(char, 24)
-}
-
-#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
-
-static void ConvertUInt64ToHex(UInt64 val, char *s)
-{
- UInt64 v = val;
- unsigned i;
- for (i = 1;; i++)
- {
- v >>= 4;
- if (v == 0)
- break;
- }
- s[i] = 0;
- do
- {
- unsigned t = (unsigned)(val & 0xF);
- val >>= 4;
- s[--i] = GET_HEX_CHAR(t);
- }
- while (i);
-}
-
-#define DEBUG_OUT_STREAM stderr
-
-static void Print(const char *s)
-{
- fputs(s, DEBUG_OUT_STREAM);
-}
-
-static void PrintAligned(const char *s, size_t align)
-{
- size_t len = strlen(s);
- for(;;)
- {
- fputc(' ', DEBUG_OUT_STREAM);
- if (len >= align)
- break;
- ++len;
- }
- Print(s);
-}
-
-static void PrintLn(void)
-{
- Print("\n");
-}
-
-static void PrintHex(UInt64 v, size_t align)
-{
- char s[32];
- ConvertUInt64ToHex(v, s);
- PrintAligned(s, align);
-}
-
-static void PrintDec(int v, size_t align)
-{
- char s[32];
- ConvertUInt64ToString((unsigned)v, s);
- PrintAligned(s, align);
-}
-
-static void PrintAddr(void *p)
-{
- PrintHex((UInt64)(size_t)(ptrdiff_t)p, 12);
-}
-
-
-#define PRINT_REALLOC(name, cnt, size, ptr) { \
- Print(name " "); \
- if (!ptr) PrintDec(cnt++, 10); \
- PrintHex(size, 10); \
- PrintAddr(ptr); \
- PrintLn(); }
-
-#define PRINT_ALLOC(name, cnt, size, ptr) { \
- Print(name " "); \
- PrintDec(cnt++, 10); \
- PrintHex(size, 10); \
- PrintAddr(ptr); \
- PrintLn(); }
-
-#define PRINT_FREE(name, cnt, ptr) if (ptr) { \
- Print(name " "); \
- PrintDec(--cnt, 10); \
- PrintAddr(ptr); \
- PrintLn(); }
-
-#else
-
-#ifdef _WIN32
-#define PRINT_ALLOC(name, cnt, size, ptr)
-#endif
-#define PRINT_FREE(name, cnt, ptr)
-#define Print(s)
-#define PrintLn()
-#define PrintHex(v, align)
-#define PrintAddr(p)
-
-#endif
-
-
-/*
-by specification:
- malloc(non_NULL, 0) : returns NULL or a unique pointer value that can later be successfully passed to free()
- realloc(NULL, size) : the call is equivalent to malloc(size)
- realloc(non_NULL, 0) : the call is equivalent to free(ptr)
-
-in main compilers:
- malloc(0) : returns non_NULL
- realloc(NULL, 0) : returns non_NULL
- realloc(non_NULL, 0) : returns NULL
-*/
-
-
-void *MyAlloc(size_t size)
-{
- if (size == 0)
- return NULL;
- // PRINT_ALLOC("Alloc ", g_allocCount, size, NULL)
- #ifdef SZ_ALLOC_DEBUG
- {
- void *p = malloc(size);
- if (p)
- {
- PRINT_ALLOC("Alloc ", g_allocCount, size, p)
- }
- return p;
- }
- #else
- return malloc(size);
- #endif
-}
-
-void MyFree(void *address)
-{
- PRINT_FREE("Free ", g_allocCount, address)
-
- free(address);
-}
-
-void *MyRealloc(void *address, size_t size)
-{
- if (size == 0)
- {
- MyFree(address);
- return NULL;
- }
- // PRINT_REALLOC("Realloc ", g_allocCount, size, address)
- #ifdef SZ_ALLOC_DEBUG
- {
- void *p = realloc(address, size);
- if (p)
- {
- PRINT_REALLOC("Realloc ", g_allocCount, size, address)
- }
- return p;
- }
- #else
- return realloc(address, size);
- #endif
-}
-
-
-#ifdef _WIN32
-
-void *MidAlloc(size_t size)
-{
- if (size == 0)
- return NULL;
- #ifdef SZ_ALLOC_DEBUG
- {
- void *p = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
- if (p)
- {
- PRINT_ALLOC("Alloc-Mid", g_allocCountMid, size, p)
- }
- return p;
- }
- #else
- return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
- #endif
-}
-
-void MidFree(void *address)
-{
- PRINT_FREE("Free-Mid", g_allocCountMid, address)
-
- if (!address)
- return;
- VirtualFree(address, 0, MEM_RELEASE);
-}
-
-#ifdef Z7_LARGE_PAGES
-
-#ifdef MEM_LARGE_PAGES
- #define MY__MEM_LARGE_PAGES MEM_LARGE_PAGES
-#else
- #define MY__MEM_LARGE_PAGES 0x20000000
-#endif
-
-extern
-SIZE_T g_LargePageSize;
-SIZE_T g_LargePageSize = 0;
-typedef SIZE_T (WINAPI *Func_GetLargePageMinimum)(VOID);
-
-void SetLargePageSize(void)
-{
- #ifdef Z7_LARGE_PAGES
- SIZE_T size;
- const
- Func_GetLargePageMinimum fn =
- (Func_GetLargePageMinimum) MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")),
- "GetLargePageMinimum");
- if (!fn)
- return;
- size = fn();
- if (size == 0 || (size & (size - 1)) != 0)
- return;
- g_LargePageSize = size;
- #endif
-}
-
-#endif // Z7_LARGE_PAGES
-
-void *BigAlloc(size_t size)
-{
- if (size == 0)
- return NULL;
-
- PRINT_ALLOC("Alloc-Big", g_allocCountBig, size, NULL)
-
- #ifdef Z7_LARGE_PAGES
- {
- SIZE_T ps = g_LargePageSize;
- if (ps != 0 && ps <= (1 << 30) && size > (ps / 2))
- {
- size_t size2;
- ps--;
- size2 = (size + ps) & ~ps;
- if (size2 >= size)
- {
- void *p = VirtualAlloc(NULL, size2, MEM_COMMIT | MY__MEM_LARGE_PAGES, PAGE_READWRITE);
- if (p)
- {
- PRINT_ALLOC("Alloc-BM ", g_allocCountMid, size2, p)
- return p;
- }
- }
- }
- }
- #endif
-
- return MidAlloc(size);
-}
-
-void BigFree(void *address)
-{
- PRINT_FREE("Free-Big", g_allocCountBig, address)
- MidFree(address);
-}
-
-#endif // _WIN32
-
-
-static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return MyAlloc(size); }
-static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) MyFree(address); }
-const ISzAlloc g_Alloc = { SzAlloc, SzFree };
-
-#ifdef _WIN32
-static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return MidAlloc(size); }
-static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) MidFree(address); }
-static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return BigAlloc(size); }
-static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) BigFree(address); }
-const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };
-const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };
-#endif
-
-/*
- uintptr_t : C99 (optional)
- : unsupported in VS6
-*/
-
-#ifdef _WIN32
- typedef UINT_PTR UIntPtr;
-#else
- /*
- typedef uintptr_t UIntPtr;
- */
- typedef ptrdiff_t UIntPtr;
-#endif
-
-
-#define ADJUST_ALLOC_SIZE 0
-/*
-#define ADJUST_ALLOC_SIZE (sizeof(void *) - 1)
-*/
-/*
- Use (ADJUST_ALLOC_SIZE = (sizeof(void *) - 1)), if
- MyAlloc() can return address that is NOT multiple of sizeof(void *).
-*/
-
-
-/*
-#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((char *)(p) - ((size_t)(UIntPtr)(p) & ((align) - 1))))
-*/
-#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((((UIntPtr)(p)) & ~((UIntPtr)(align) - 1))))
-
-
-#if !defined(_WIN32) && defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE >= 200112L)
- #define USE_posix_memalign
-#endif
-
-#ifndef USE_posix_memalign
-#define MY_ALIGN_PTR_UP_PLUS(p, align) MY_ALIGN_PTR_DOWN(((char *)(p) + (align) + ADJUST_ALLOC_SIZE), align)
-#endif
-
-/*
- This posix_memalign() is for test purposes only.
- We also need special Free() function instead of free(),
- if this posix_memalign() is used.
-*/
-
-/*
-static int posix_memalign(void **ptr, size_t align, size_t size)
-{
- size_t newSize = size + align;
- void *p;
- void *pAligned;
- *ptr = NULL;
- if (newSize < size)
- return 12; // ENOMEM
- p = MyAlloc(newSize);
- if (!p)
- return 12; // ENOMEM
- pAligned = MY_ALIGN_PTR_UP_PLUS(p, align);
- ((void **)pAligned)[-1] = p;
- *ptr = pAligned;
- return 0;
-}
-*/
-
-/*
- ALLOC_ALIGN_SIZE >= sizeof(void *)
- ALLOC_ALIGN_SIZE >= cache_line_size
-*/
-
-#define ALLOC_ALIGN_SIZE ((size_t)1 << 7)
-
-static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)
-{
- #ifndef USE_posix_memalign
-
- void *p;
- void *pAligned;
- size_t newSize;
- UNUSED_VAR(pp)
-
- /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
- block to prevent cache line sharing with another allocated blocks */
-
- newSize = size + ALLOC_ALIGN_SIZE * 1 + ADJUST_ALLOC_SIZE;
- if (newSize < size)
- return NULL;
-
- p = MyAlloc(newSize);
-
- if (!p)
- return NULL;
- pAligned = MY_ALIGN_PTR_UP_PLUS(p, ALLOC_ALIGN_SIZE);
-
- Print(" size="); PrintHex(size, 8);
- Print(" a_size="); PrintHex(newSize, 8);
- Print(" ptr="); PrintAddr(p);
- Print(" a_ptr="); PrintAddr(pAligned);
- PrintLn();
-
- ((void **)pAligned)[-1] = p;
-
- return pAligned;
-
- #else
-
- void *p;
- UNUSED_VAR(pp)
- if (posix_memalign(&p, ALLOC_ALIGN_SIZE, size))
- return NULL;
-
- Print(" posix_memalign="); PrintAddr(p);
- PrintLn();
-
- return p;
-
- #endif
-}
-
-
-static void SzAlignedFree(ISzAllocPtr pp, void *address)
-{
- UNUSED_VAR(pp)
- #ifndef USE_posix_memalign
- if (address)
- MyFree(((void **)address)[-1]);
- #else
- free(address);
- #endif
-}
-
-
-const ISzAlloc g_AlignedAlloc = { SzAlignedAlloc, SzAlignedFree };
-
-
-
-#define MY_ALIGN_PTR_DOWN_1(p) MY_ALIGN_PTR_DOWN(p, sizeof(void *))
-
-/* we align ptr to support cases where CAlignOffsetAlloc::offset is not multiply of sizeof(void *) */
-#define REAL_BLOCK_PTR_VAR(p) ((void **)MY_ALIGN_PTR_DOWN_1(p))[-1]
-/*
-#define REAL_BLOCK_PTR_VAR(p) ((void **)(p))[-1]
-*/
-
-static void *AlignOffsetAlloc_Alloc(ISzAllocPtr pp, size_t size)
-{
- const CAlignOffsetAlloc *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CAlignOffsetAlloc, vt);
- void *adr;
- void *pAligned;
- size_t newSize;
- size_t extra;
- size_t alignSize = (size_t)1 << p->numAlignBits;
-
- if (alignSize < sizeof(void *))
- alignSize = sizeof(void *);
-
- if (p->offset >= alignSize)
- return NULL;
-
- /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
- block to prevent cache line sharing with another allocated blocks */
- extra = p->offset & (sizeof(void *) - 1);
- newSize = size + alignSize + extra + ADJUST_ALLOC_SIZE;
- if (newSize < size)
- return NULL;
-
- adr = ISzAlloc_Alloc(p->baseAlloc, newSize);
-
- if (!adr)
- return NULL;
-
- pAligned = (char *)MY_ALIGN_PTR_DOWN((char *)adr +
- alignSize - p->offset + extra + ADJUST_ALLOC_SIZE, alignSize) + p->offset;
-
- PrintLn();
- Print("- Aligned: ");
- Print(" size="); PrintHex(size, 8);
- Print(" a_size="); PrintHex(newSize, 8);
- Print(" ptr="); PrintAddr(adr);
- Print(" a_ptr="); PrintAddr(pAligned);
- PrintLn();
-
- REAL_BLOCK_PTR_VAR(pAligned) = adr;
-
- return pAligned;
-}
-
-
-static void AlignOffsetAlloc_Free(ISzAllocPtr pp, void *address)
-{
- if (address)
- {
- const CAlignOffsetAlloc *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CAlignOffsetAlloc, vt);
- PrintLn();
- Print("- Aligned Free: ");
- PrintLn();
- ISzAlloc_Free(p->baseAlloc, REAL_BLOCK_PTR_VAR(address));
- }
-}
-
-
-void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p)
-{
- p->vt.Alloc = AlignOffsetAlloc_Alloc;
- p->vt.Free = AlignOffsetAlloc_Free;
-}
diff --git a/3rdparty/7z/src/Alloc.h b/3rdparty/7z/src/Alloc.h
deleted file mode 100644
index a88125d69a..0000000000
--- a/3rdparty/7z/src/Alloc.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Alloc.h -- Memory allocation functions
-2023-03-04 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_ALLOC_H
-#define ZIP7_INC_ALLOC_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-/*
- MyFree(NULL) : is allowed, as free(NULL)
- MyAlloc(0) : returns NULL : but malloc(0) is allowed to return NULL or non_NULL
- MyRealloc(NULL, 0) : returns NULL : but realloc(NULL, 0) is allowed to return NULL or non_NULL
-MyRealloc() is similar to realloc() for the following cases:
- MyRealloc(non_NULL, 0) : returns NULL and always calls MyFree(ptr)
- MyRealloc(NULL, non_ZERO) : returns NULL, if allocation failed
- MyRealloc(non_NULL, non_ZERO) : returns NULL, if reallocation failed
-*/
-
-void *MyAlloc(size_t size);
-void MyFree(void *address);
-void *MyRealloc(void *address, size_t size);
-
-#ifdef _WIN32
-
-#ifdef Z7_LARGE_PAGES
-void SetLargePageSize(void);
-#endif
-
-void *MidAlloc(size_t size);
-void MidFree(void *address);
-void *BigAlloc(size_t size);
-void BigFree(void *address);
-
-#else
-
-#define MidAlloc(size) MyAlloc(size)
-#define MidFree(address) MyFree(address)
-#define BigAlloc(size) MyAlloc(size)
-#define BigFree(address) MyFree(address)
-
-#endif
-
-extern const ISzAlloc g_Alloc;
-
-#ifdef _WIN32
-extern const ISzAlloc g_BigAlloc;
-extern const ISzAlloc g_MidAlloc;
-#else
-#define g_BigAlloc g_AlignedAlloc
-#define g_MidAlloc g_AlignedAlloc
-#endif
-
-extern const ISzAlloc g_AlignedAlloc;
-
-
-typedef struct
-{
- ISzAlloc vt;
- ISzAllocPtr baseAlloc;
- unsigned numAlignBits; /* ((1 << numAlignBits) >= sizeof(void *)) */
- size_t offset; /* (offset == (k * sizeof(void *)) && offset < (1 << numAlignBits) */
-} CAlignOffsetAlloc;
-
-void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p);
-
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Bcj2.c b/3rdparty/7z/src/Bcj2.c
deleted file mode 100644
index c562e47ce8..0000000000
--- a/3rdparty/7z/src/Bcj2.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/* Bcj2.c -- BCJ2 Decoder (Converter for x86 code)
-2023-03-01 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Bcj2.h"
-#include "CpuArch.h"
-
-#define kTopValue ((UInt32)1 << 24)
-#define kNumBitModelTotalBits 11
-#define kBitModelTotal (1 << kNumBitModelTotalBits)
-#define kNumMoveBits 5
-
-// UInt32 bcj2_stats[256 + 2][2];
-
-void Bcj2Dec_Init(CBcj2Dec *p)
-{
- unsigned i;
- p->state = BCJ2_STREAM_RC; // BCJ2_DEC_STATE_OK;
- p->ip = 0;
- p->temp = 0;
- p->range = 0;
- p->code = 0;
- for (i = 0; i < sizeof(p->probs) / sizeof(p->probs[0]); i++)
- p->probs[i] = kBitModelTotal >> 1;
-}
-
-SRes Bcj2Dec_Decode(CBcj2Dec *p)
-{
- UInt32 v = p->temp;
- // const Byte *src;
- if (p->range <= 5)
- {
- UInt32 code = p->code;
- p->state = BCJ2_DEC_STATE_ERROR; /* for case if we return SZ_ERROR_DATA; */
- for (; p->range != 5; p->range++)
- {
- if (p->range == 1 && code != 0)
- return SZ_ERROR_DATA;
- if (p->bufs[BCJ2_STREAM_RC] == p->lims[BCJ2_STREAM_RC])
- {
- p->state = BCJ2_STREAM_RC;
- return SZ_OK;
- }
- code = (code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
- p->code = code;
- }
- if (code == 0xffffffff)
- return SZ_ERROR_DATA;
- p->range = 0xffffffff;
- }
- // else
- {
- unsigned state = p->state;
- // we check BCJ2_IS_32BIT_STREAM() here instead of check in the main loop
- if (BCJ2_IS_32BIT_STREAM(state))
- {
- const Byte *cur = p->bufs[state];
- if (cur == p->lims[state])
- return SZ_OK;
- p->bufs[state] = cur + 4;
- {
- const UInt32 ip = p->ip + 4;
- v = GetBe32a(cur) - ip;
- p->ip = ip;
- }
- state = BCJ2_DEC_STATE_ORIG_0;
- }
- if ((unsigned)(state - BCJ2_DEC_STATE_ORIG_0) < 4)
- {
- Byte *dest = p->dest;
- for (;;)
- {
- if (dest == p->destLim)
- {
- p->state = state;
- p->temp = v;
- return SZ_OK;
- }
- *dest++ = (Byte)v;
- p->dest = dest;
- if (++state == BCJ2_DEC_STATE_ORIG_3 + 1)
- break;
- v >>= 8;
- }
- }
- }
-
- // src = p->bufs[BCJ2_STREAM_MAIN];
- for (;;)
- {
- /*
- if (BCJ2_IS_32BIT_STREAM(p->state))
- p->state = BCJ2_DEC_STATE_OK;
- else
- */
- {
- if (p->range < kTopValue)
- {
- if (p->bufs[BCJ2_STREAM_RC] == p->lims[BCJ2_STREAM_RC])
- {
- p->state = BCJ2_STREAM_RC;
- p->temp = v;
- return SZ_OK;
- }
- p->range <<= 8;
- p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
- }
- {
- const Byte *src = p->bufs[BCJ2_STREAM_MAIN];
- const Byte *srcLim;
- Byte *dest = p->dest;
- {
- const SizeT rem = (SizeT)(p->lims[BCJ2_STREAM_MAIN] - src);
- SizeT num = (SizeT)(p->destLim - dest);
- if (num >= rem)
- num = rem;
- #define NUM_ITERS 4
- #if (NUM_ITERS & (NUM_ITERS - 1)) == 0
- num &= ~((SizeT)NUM_ITERS - 1); // if (NUM_ITERS == (1 << x))
- #else
- num -= num % NUM_ITERS; // if (NUM_ITERS != (1 << x))
- #endif
- srcLim = src + num;
- }
-
- #define NUM_SHIFT_BITS 24
- #define ONE_ITER(indx) { \
- const unsigned b = src[indx]; \
- *dest++ = (Byte)b; \
- v = (v << NUM_SHIFT_BITS) | b; \
- if (((b + (0x100 - 0xe8)) & 0xfe) == 0) break; \
- if (((v - (((UInt32)0x0f << (NUM_SHIFT_BITS)) + 0x80)) & \
- ((((UInt32)1 << (4 + NUM_SHIFT_BITS)) - 0x1) << 4)) == 0) break; \
- /* ++dest */; /* v = b; */ }
-
- if (src != srcLim)
- for (;;)
- {
- /* The dependency chain of 2-cycle for (v) calculation is not big problem here.
- But we can remove dependency chain with v = b in the end of loop. */
- ONE_ITER(0)
- #if (NUM_ITERS > 1)
- ONE_ITER(1)
- #if (NUM_ITERS > 2)
- ONE_ITER(2)
- #if (NUM_ITERS > 3)
- ONE_ITER(3)
- #if (NUM_ITERS > 4)
- ONE_ITER(4)
- #if (NUM_ITERS > 5)
- ONE_ITER(5)
- #if (NUM_ITERS > 6)
- ONE_ITER(6)
- #if (NUM_ITERS > 7)
- ONE_ITER(7)
- #endif
- #endif
- #endif
- #endif
- #endif
- #endif
- #endif
-
- src += NUM_ITERS;
- if (src == srcLim)
- break;
- }
-
- if (src == srcLim)
- #if (NUM_ITERS > 1)
- for (;;)
- #endif
- {
- #if (NUM_ITERS > 1)
- if (src == p->lims[BCJ2_STREAM_MAIN] || dest == p->destLim)
- #endif
- {
- const SizeT num = (SizeT)(src - p->bufs[BCJ2_STREAM_MAIN]);
- p->bufs[BCJ2_STREAM_MAIN] = src;
- p->dest = dest;
- p->ip += (UInt32)num;
- /* state BCJ2_STREAM_MAIN has more priority than BCJ2_STATE_ORIG */
- p->state =
- src == p->lims[BCJ2_STREAM_MAIN] ?
- (unsigned)BCJ2_STREAM_MAIN :
- (unsigned)BCJ2_DEC_STATE_ORIG;
- p->temp = v;
- return SZ_OK;
- }
- #if (NUM_ITERS > 1)
- ONE_ITER(0)
- src++;
- #endif
- }
-
- {
- const SizeT num = (SizeT)(dest - p->dest);
- p->dest = dest; // p->dest += num;
- p->bufs[BCJ2_STREAM_MAIN] += num; // = src;
- p->ip += (UInt32)num;
- }
- {
- UInt32 bound, ttt;
- CBcj2Prob *prob; // unsigned index;
- /*
- prob = p->probs + (unsigned)((Byte)v == 0xe8 ?
- 2 + (Byte)(v >> 8) :
- ((v >> 5) & 1)); // ((Byte)v < 0xe8 ? 0 : 1));
- */
- {
- const unsigned c = ((v + 0x17) >> 6) & 1;
- prob = p->probs + (unsigned)
- (((0 - c) & (Byte)(v >> NUM_SHIFT_BITS)) + c + ((v >> 5) & 1));
- // (Byte)
- // 8x->0 : e9->1 : xxe8->xx+2
- // 8x->0x100 : e9->0x101 : xxe8->xx
- // (((0x100 - (e & ~v)) & (0x100 | (v >> 8))) + (e & v));
- // (((0x101 + (~e | v)) & (0x100 | (v >> 8))) + (e & v));
- }
- ttt = *prob;
- bound = (p->range >> kNumBitModelTotalBits) * ttt;
- if (p->code < bound)
- {
- // bcj2_stats[prob - p->probs][0]++;
- p->range = bound;
- *prob = (CBcj2Prob)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
- continue;
- }
- {
- // bcj2_stats[prob - p->probs][1]++;
- p->range -= bound;
- p->code -= bound;
- *prob = (CBcj2Prob)(ttt - (ttt >> kNumMoveBits));
- }
- }
- }
- }
- {
- /* (v == 0xe8 ? 0 : 1) uses setcc instruction with additional zero register usage in x64 MSVC. */
- // const unsigned cj = ((Byte)v == 0xe8) ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP;
- const unsigned cj = (((v + 0x57) >> 6) & 1) + BCJ2_STREAM_CALL;
- const Byte *cur = p->bufs[cj];
- Byte *dest;
- SizeT rem;
- if (cur == p->lims[cj])
- {
- p->state = cj;
- break;
- }
- v = GetBe32a(cur);
- p->bufs[cj] = cur + 4;
- {
- const UInt32 ip = p->ip + 4;
- v -= ip;
- p->ip = ip;
- }
- dest = p->dest;
- rem = (SizeT)(p->destLim - dest);
- if (rem < 4)
- {
- if ((unsigned)rem > 0) { dest[0] = (Byte)v; v >>= 8;
- if ((unsigned)rem > 1) { dest[1] = (Byte)v; v >>= 8;
- if ((unsigned)rem > 2) { dest[2] = (Byte)v; v >>= 8; }}}
- p->temp = v;
- p->dest = dest + rem;
- p->state = BCJ2_DEC_STATE_ORIG_0 + (unsigned)rem;
- break;
- }
- SetUi32(dest, v)
- v >>= 24;
- p->dest = dest + 4;
- }
- }
-
- if (p->range < kTopValue && p->bufs[BCJ2_STREAM_RC] != p->lims[BCJ2_STREAM_RC])
- {
- p->range <<= 8;
- p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
- }
- return SZ_OK;
-}
-
-#undef NUM_ITERS
-#undef ONE_ITER
-#undef NUM_SHIFT_BITS
-#undef kTopValue
-#undef kNumBitModelTotalBits
-#undef kBitModelTotal
-#undef kNumMoveBits
diff --git a/3rdparty/7z/src/Bcj2.h b/3rdparty/7z/src/Bcj2.h
deleted file mode 100644
index 2eda0c820f..0000000000
--- a/3rdparty/7z/src/Bcj2.h
+++ /dev/null
@@ -1,332 +0,0 @@
-/* Bcj2.h -- BCJ2 converter for x86 code (Branch CALL/JUMP variant2)
-2023-03-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_BCJ2_H
-#define ZIP7_INC_BCJ2_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define BCJ2_NUM_STREAMS 4
-
-enum
-{
- BCJ2_STREAM_MAIN,
- BCJ2_STREAM_CALL,
- BCJ2_STREAM_JUMP,
- BCJ2_STREAM_RC
-};
-
-enum
-{
- BCJ2_DEC_STATE_ORIG_0 = BCJ2_NUM_STREAMS,
- BCJ2_DEC_STATE_ORIG_1,
- BCJ2_DEC_STATE_ORIG_2,
- BCJ2_DEC_STATE_ORIG_3,
-
- BCJ2_DEC_STATE_ORIG,
- BCJ2_DEC_STATE_ERROR /* after detected data error */
-};
-
-enum
-{
- BCJ2_ENC_STATE_ORIG = BCJ2_NUM_STREAMS,
- BCJ2_ENC_STATE_FINISHED /* it's state after fully encoded stream */
-};
-
-
-/* #define BCJ2_IS_32BIT_STREAM(s) ((s) == BCJ2_STREAM_CALL || (s) == BCJ2_STREAM_JUMP) */
-#define BCJ2_IS_32BIT_STREAM(s) ((unsigned)((unsigned)(s) - (unsigned)BCJ2_STREAM_CALL) < 2)
-
-/*
-CBcj2Dec / CBcj2Enc
-bufs sizes:
- BUF_SIZE(n) = lims[n] - bufs[n]
-bufs sizes for BCJ2_STREAM_CALL and BCJ2_STREAM_JUMP must be multiply of 4:
- (BUF_SIZE(BCJ2_STREAM_CALL) & 3) == 0
- (BUF_SIZE(BCJ2_STREAM_JUMP) & 3) == 0
-*/
-
-// typedef UInt32 CBcj2Prob;
-typedef UInt16 CBcj2Prob;
-
-/*
-BCJ2 encoder / decoder internal requirements:
- - If last bytes of stream contain marker (e8/e8/0f8x), then
- there is also encoded symbol (0 : no conversion) in RC stream.
- - One case of overlapped instructions is supported,
- if last byte of converted instruction is (0f) and next byte is (8x):
- marker [xx xx xx 0f] 8x
- then the pair (0f 8x) is treated as marker.
-*/
-
-/* ---------- BCJ2 Decoder ---------- */
-
-/*
-CBcj2Dec:
-(dest) is allowed to overlap with bufs[BCJ2_STREAM_MAIN], with the following conditions:
- bufs[BCJ2_STREAM_MAIN] >= dest &&
- bufs[BCJ2_STREAM_MAIN] - dest >=
- BUF_SIZE(BCJ2_STREAM_CALL) +
- BUF_SIZE(BCJ2_STREAM_JUMP)
- reserve = bufs[BCJ2_STREAM_MAIN] - dest -
- ( BUF_SIZE(BCJ2_STREAM_CALL) +
- BUF_SIZE(BCJ2_STREAM_JUMP) )
- and additional conditions:
- if (it's first call of Bcj2Dec_Decode() after Bcj2Dec_Init())
- {
- (reserve != 1) : if (ver < v23.00)
- }
- else // if there are more than one calls of Bcj2Dec_Decode() after Bcj2Dec_Init())
- {
- (reserve >= 6) : if (ver < v23.00)
- (reserve >= 4) : if (ver >= v23.00)
- We need that (reserve) because after first call of Bcj2Dec_Decode(),
- CBcj2Dec::temp can contain up to 4 bytes for writing to (dest).
- }
- (reserve == 0) is allowed, if we decode full stream via single call of Bcj2Dec_Decode().
- (reserve == 0) also is allowed in case of multi-call, if we use fixed buffers,
- and (reserve) is calculated from full (final) sizes of all streams before first call.
-*/
-
-typedef struct
-{
- const Byte *bufs[BCJ2_NUM_STREAMS];
- const Byte *lims[BCJ2_NUM_STREAMS];
- Byte *dest;
- const Byte *destLim;
-
- unsigned state; /* BCJ2_STREAM_MAIN has more priority than BCJ2_STATE_ORIG */
-
- UInt32 ip; /* property of starting base for decoding */
- UInt32 temp; /* Byte temp[4]; */
- UInt32 range;
- UInt32 code;
- CBcj2Prob probs[2 + 256];
-} CBcj2Dec;
-
-
-/* Note:
- Bcj2Dec_Init() sets (CBcj2Dec::ip = 0)
- if (ip != 0) property is required, the caller must set CBcj2Dec::ip after Bcj2Dec_Init()
-*/
-void Bcj2Dec_Init(CBcj2Dec *p);
-
-
-/* Bcj2Dec_Decode():
- returns:
- SZ_OK
- SZ_ERROR_DATA : if data in 5 starting bytes of BCJ2_STREAM_RC stream are not correct
-*/
-SRes Bcj2Dec_Decode(CBcj2Dec *p);
-
-/* To check that decoding was finished you can compare
- sizes of processed streams with sizes known from another sources.
- You must do at least one mandatory check from the two following options:
- - the check for size of processed output (ORIG) stream.
- - the check for size of processed input (MAIN) stream.
- additional optional checks:
- - the checks for processed sizes of all input streams (MAIN, CALL, JUMP, RC)
- - the checks Bcj2Dec_IsMaybeFinished*()
- also before actual decoding you can check that the
- following condition is met for stream sizes:
- ( size(ORIG) == size(MAIN) + size(CALL) + size(JUMP) )
-*/
-
-/* (state == BCJ2_STREAM_MAIN) means that decoder is ready for
- additional input data in BCJ2_STREAM_MAIN stream.
- Note that (state == BCJ2_STREAM_MAIN) is allowed for non-finished decoding.
-*/
-#define Bcj2Dec_IsMaybeFinished_state_MAIN(_p_) ((_p_)->state == BCJ2_STREAM_MAIN)
-
-/* if the stream decoding was finished correctly, then range decoder
- part of CBcj2Dec also was finished, and then (CBcj2Dec::code == 0).
- Note that (CBcj2Dec::code == 0) is allowed for non-finished decoding.
-*/
-#define Bcj2Dec_IsMaybeFinished_code(_p_) ((_p_)->code == 0)
-
-/* use Bcj2Dec_IsMaybeFinished() only as additional check
- after at least one mandatory check from the two following options:
- - the check for size of processed output (ORIG) stream.
- - the check for size of processed input (MAIN) stream.
-*/
-#define Bcj2Dec_IsMaybeFinished(_p_) ( \
- Bcj2Dec_IsMaybeFinished_state_MAIN(_p_) && \
- Bcj2Dec_IsMaybeFinished_code(_p_))
-
-
-
-/* ---------- BCJ2 Encoder ---------- */
-
-typedef enum
-{
- BCJ2_ENC_FINISH_MODE_CONTINUE,
- BCJ2_ENC_FINISH_MODE_END_BLOCK,
- BCJ2_ENC_FINISH_MODE_END_STREAM
-} EBcj2Enc_FinishMode;
-
-/*
- BCJ2_ENC_FINISH_MODE_CONTINUE:
- process non finished encoding.
- It notifies the encoder that additional further calls
- can provide more input data (src) than provided by current call.
- In that case the CBcj2Enc encoder still can move (src) pointer
- up to (srcLim), but CBcj2Enc encoder can store some of the last
- processed bytes (up to 4 bytes) from src to internal CBcj2Enc::temp[] buffer.
- at return:
- (CBcj2Enc::src will point to position that includes
- processed data and data copied to (temp[]) buffer)
- That data from (temp[]) buffer will be used in further calls.
-
- BCJ2_ENC_FINISH_MODE_END_BLOCK:
- finish encoding of current block (ended at srcLim) without RC flushing.
- at return: if (CBcj2Enc::state == BCJ2_ENC_STATE_ORIG) &&
- CBcj2Enc::src == CBcj2Enc::srcLim)
- : it shows that block encoding was finished. And the encoder is
- ready for new (src) data or for stream finish operation.
- finished block means
- {
- CBcj2Enc has completed block encoding up to (srcLim).
- (1 + 4 bytes) or (2 + 4 bytes) CALL/JUMP cortages will
- not cross block boundary at (srcLim).
- temporary CBcj2Enc buffer for (ORIG) src data is empty.
- 3 output uncompressed streams (MAIN, CALL, JUMP) were flushed.
- RC stream was not flushed. And RC stream will cross block boundary.
- }
- Note: some possible implementation of BCJ2 encoder could
- write branch marker (e8/e8/0f8x) in one call of Bcj2Enc_Encode(),
- and it could calculate symbol for RC in another call of Bcj2Enc_Encode().
- BCJ2 encoder uses ip/fileIp/fileSize/relatLimit values to calculate RC symbol.
- And these CBcj2Enc variables can have different values in different Bcj2Enc_Encode() calls.
- So caller must finish each block with BCJ2_ENC_FINISH_MODE_END_BLOCK
- to ensure that RC symbol is calculated and written in proper block.
-
- BCJ2_ENC_FINISH_MODE_END_STREAM
- finish encoding of stream (ended at srcLim) fully including RC flushing.
- at return: if (CBcj2Enc::state == BCJ2_ENC_STATE_FINISHED)
- : it shows that stream encoding was finished fully,
- and all output streams were flushed fully.
- also Bcj2Enc_IsFinished() can be called.
-*/
-
-
-/*
- 32-bit relative offset in JUMP/CALL commands is
- - (mod 4 GiB) for 32-bit x86 code
- - signed Int32 for 64-bit x86-64 code
- BCJ2 encoder also does internal relative to absolute address conversions.
- And there are 2 possible ways to do it:
- before v23: we used 32-bit variables and (mod 4 GiB) conversion
- since v23: we use 64-bit variables and (signed Int32 offset) conversion.
- The absolute address condition for conversion in v23:
- ((UInt64)((Int64)ip64 - (Int64)fileIp64 + 5 + (Int32)offset) < (UInt64)fileSize64)
- note that if (fileSize64 > 2 GiB). there is difference between
- old (mod 4 GiB) way (v22) and new (signed Int32 offset) way (v23).
- And new (v23) way is more suitable to encode 64-bit x86-64 code for (fileSize64 > 2 GiB) cases.
-*/
-
-/*
-// for old (v22) way for conversion:
-typedef UInt32 CBcj2Enc_ip_unsigned;
-typedef Int32 CBcj2Enc_ip_signed;
-#define BCJ2_ENC_FileSize_MAX ((UInt32)1 << 31)
-*/
-typedef UInt64 CBcj2Enc_ip_unsigned;
-typedef Int64 CBcj2Enc_ip_signed;
-
-/* maximum size of file that can be used for conversion condition */
-#define BCJ2_ENC_FileSize_MAX ((CBcj2Enc_ip_unsigned)0 - 2)
-
-/* default value of fileSize64_minus1 variable that means
- that absolute address limitation will not be used */
-#define BCJ2_ENC_FileSizeField_UNLIMITED ((CBcj2Enc_ip_unsigned)0 - 1)
-
-/* calculate value that later can be set to CBcj2Enc::fileSize64_minus1 */
-#define BCJ2_ENC_GET_FileSizeField_VAL_FROM_FileSize(fileSize) \
- ((CBcj2Enc_ip_unsigned)(fileSize) - 1)
-
-/* set CBcj2Enc::fileSize64_minus1 variable from size of file */
-#define Bcj2Enc_SET_FileSize(p, fileSize) \
- (p)->fileSize64_minus1 = BCJ2_ENC_GET_FileSizeField_VAL_FROM_FileSize(fileSize);
-
-
-typedef struct
-{
- Byte *bufs[BCJ2_NUM_STREAMS];
- const Byte *lims[BCJ2_NUM_STREAMS];
- const Byte *src;
- const Byte *srcLim;
-
- unsigned state;
- EBcj2Enc_FinishMode finishMode;
-
- Byte context;
- Byte flushRem;
- Byte isFlushState;
-
- Byte cache;
- UInt32 range;
- UInt64 low;
- UInt64 cacheSize;
-
- // UInt32 context; // for marker version, it can include marker flag.
-
- /* (ip64) and (fileIp64) correspond to virtual source stream position
- that doesn't include data in temp[] */
- CBcj2Enc_ip_unsigned ip64; /* current (ip) position */
- CBcj2Enc_ip_unsigned fileIp64; /* start (ip) position of current file */
- CBcj2Enc_ip_unsigned fileSize64_minus1; /* size of current file (for conversion limitation) */
- UInt32 relatLimit; /* (relatLimit <= ((UInt32)1 << 31)) : 0 means disable_conversion */
- // UInt32 relatExcludeBits;
-
- UInt32 tempTarget;
- unsigned tempPos; /* the number of bytes that were copied to temp[] buffer
- (tempPos <= 4) outside of Bcj2Enc_Encode() */
- // Byte temp[4]; // for marker version
- Byte temp[8];
- CBcj2Prob probs[2 + 256];
-} CBcj2Enc;
-
-void Bcj2Enc_Init(CBcj2Enc *p);
-
-
-/*
-Bcj2Enc_Encode(): at exit:
- p->State < BCJ2_NUM_STREAMS : we need more buffer space for output stream
- (bufs[p->State] == lims[p->State])
- p->State == BCJ2_ENC_STATE_ORIG : we need more data in input src stream
- (src == srcLim)
- p->State == BCJ2_ENC_STATE_FINISHED : after fully encoded stream
-*/
-void Bcj2Enc_Encode(CBcj2Enc *p);
-
-/* Bcj2Enc encoder can look ahead for up 4 bytes of source stream.
- CBcj2Enc::tempPos : is the number of bytes that were copied from input stream to temp[] buffer.
- (CBcj2Enc::src) after Bcj2Enc_Encode() is starting position after
- fully processed data and after data copied to temp buffer.
- So if the caller needs to get real number of fully processed input
- bytes (without look ahead data in temp buffer),
- the caller must subtruct (CBcj2Enc::tempPos) value from processed size
- value that is calculated based on current (CBcj2Enc::src):
- cur_processed_pos = Calc_Big_Processed_Pos(enc.src)) -
- Bcj2Enc_Get_AvailInputSize_in_Temp(&enc);
-*/
-/* get the size of input data that was stored in temp[] buffer: */
-#define Bcj2Enc_Get_AvailInputSize_in_Temp(p) ((p)->tempPos)
-
-#define Bcj2Enc_IsFinished(p) ((p)->flushRem == 0)
-
-/* Note : the decoder supports overlapping of marker (0f 80).
- But we can eliminate such overlapping cases by setting
- the limit for relative offset conversion as
- CBcj2Enc::relatLimit <= (0x0f << 24) == (240 MiB)
-*/
-/* default value for CBcj2Enc::relatLimit */
-#define BCJ2_ENC_RELAT_LIMIT_DEFAULT ((UInt32)0x0f << 24)
-#define BCJ2_ENC_RELAT_LIMIT_MAX ((UInt32)1 << 31)
-// #define BCJ2_RELAT_EXCLUDE_NUM_BITS 5
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Bcj2Enc.c b/3rdparty/7z/src/Bcj2Enc.c
deleted file mode 100644
index 53da590351..0000000000
--- a/3rdparty/7z/src/Bcj2Enc.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/* Bcj2Enc.c -- BCJ2 Encoder converter for x86 code (Branch CALL/JUMP variant2)
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-/* #define SHOW_STAT */
-#ifdef SHOW_STAT
-#include
-#define PRF2(s) printf("%s ip=%8x tempPos=%d src= %8x\n", s, (unsigned)p->ip64, p->tempPos, (unsigned)(p->srcLim - p->src));
-#else
-#define PRF2(s)
-#endif
-
-#include "Bcj2.h"
-#include "CpuArch.h"
-
-#define kTopValue ((UInt32)1 << 24)
-#define kNumBitModelTotalBits 11
-#define kBitModelTotal (1 << kNumBitModelTotalBits)
-#define kNumMoveBits 5
-
-void Bcj2Enc_Init(CBcj2Enc *p)
-{
- unsigned i;
- p->state = BCJ2_ENC_STATE_ORIG;
- p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE;
- p->context = 0;
- p->flushRem = 5;
- p->isFlushState = 0;
- p->cache = 0;
- p->range = 0xffffffff;
- p->low = 0;
- p->cacheSize = 1;
- p->ip64 = 0;
- p->fileIp64 = 0;
- p->fileSize64_minus1 = BCJ2_ENC_FileSizeField_UNLIMITED;
- p->relatLimit = BCJ2_ENC_RELAT_LIMIT_DEFAULT;
- // p->relatExcludeBits = 0;
- p->tempPos = 0;
- for (i = 0; i < sizeof(p->probs) / sizeof(p->probs[0]); i++)
- p->probs[i] = kBitModelTotal >> 1;
-}
-
-// Z7_NO_INLINE
-Z7_FORCE_INLINE
-static BoolInt Bcj2_RangeEnc_ShiftLow(CBcj2Enc *p)
-{
- const UInt32 low = (UInt32)p->low;
- const unsigned high = (unsigned)
- #if defined(Z7_MSC_VER_ORIGINAL) \
- && defined(MY_CPU_X86) \
- && defined(MY_CPU_LE) \
- && !defined(MY_CPU_64BIT)
- // we try to rid of __aullshr() call in MSVS-x86
- (((const UInt32 *)&p->low)[1]); // [1] : for little-endian only
- #else
- (p->low >> 32);
- #endif
- if (low < (UInt32)0xff000000 || high != 0)
- {
- Byte *buf = p->bufs[BCJ2_STREAM_RC];
- do
- {
- if (buf == p->lims[BCJ2_STREAM_RC])
- {
- p->state = BCJ2_STREAM_RC;
- p->bufs[BCJ2_STREAM_RC] = buf;
- return True;
- }
- *buf++ = (Byte)(p->cache + high);
- p->cache = 0xff;
- }
- while (--p->cacheSize);
- p->bufs[BCJ2_STREAM_RC] = buf;
- p->cache = (Byte)(low >> 24);
- }
- p->cacheSize++;
- p->low = low << 8;
- return False;
-}
-
-
-/*
-We can use 2 alternative versions of code:
-1) non-marker version:
- Byte CBcj2Enc::context
- Byte temp[8];
- Last byte of marker (e8/e9/[0f]8x) can be written to temp[] buffer.
- Encoder writes last byte of marker (e8/e9/[0f]8x) to dest, only in conjunction
- with writing branch symbol to range coder in same Bcj2Enc_Encode_2() call.
-
-2) marker version:
- UInt32 CBcj2Enc::context
- Byte CBcj2Enc::temp[4];
- MARKER_FLAG in CBcj2Enc::context shows that CBcj2Enc::context contains finded marker.
- it's allowed that
- one call of Bcj2Enc_Encode_2() writes last byte of marker (e8/e9/[0f]8x) to dest,
- and another call of Bcj2Enc_Encode_2() does offset conversion.
- So different values of (fileIp) and (fileSize) are possible
- in these different Bcj2Enc_Encode_2() calls.
-
-Also marker version requires additional if((v & MARKER_FLAG) == 0) check in main loop.
-So we use non-marker version.
-*/
-
-/*
- Corner cases with overlap in multi-block.
- before v23: there was one corner case, where converted instruction
- could start in one sub-stream and finish in next sub-stream.
- If multi-block (solid) encoding is used,
- and BCJ2_ENC_FINISH_MODE_END_BLOCK is used for each sub-stream.
- and (0f) is last byte of previous sub-stream
- and (8x) is first byte of current sub-stream
- then (0f 8x) pair is treated as marker by BCJ2 encoder and decoder.
- BCJ2 encoder can converts 32-bit offset for that (0f 8x) cortage,
- if that offset meets limit requirements.
- If encoder allows 32-bit offset conversion for such overlap case,
- then the data in 3 uncompressed BCJ2 streams for some sub-stream
- can depend from data of previous sub-stream.
- That corner case is not big problem, and it's rare case.
- Since v23.00 we do additional check to prevent conversions in such overlap cases.
-*/
-
-/*
- Bcj2Enc_Encode_2() output variables at exit:
- {
- if (Bcj2Enc_Encode_2() exits with (p->state == BCJ2_ENC_STATE_ORIG))
- {
- it means that encoder needs more input data.
- if (p->srcLim == p->src) at exit, then
- {
- (p->finishMode != BCJ2_ENC_FINISH_MODE_END_STREAM)
- all input data were read and processed, and we are ready for
- new input data.
- }
- else
- {
- (p->srcLim != p->src)
- (p->finishMode == BCJ2_ENC_FINISH_MODE_CONTINUE)
- The encoder have found e8/e9/0f_8x marker,
- and p->src points to last byte of that marker,
- Bcj2Enc_Encode_2() needs more input data to get totally
- 5 bytes (last byte of marker and 32-bit branch offset)
- as continuous array starting from p->src.
- (p->srcLim - p->src < 5) requirement is met after exit.
- So non-processed resedue from p->src to p->srcLim is always less than 5 bytes.
- }
- }
- }
-*/
-
-Z7_NO_INLINE
-static void Bcj2Enc_Encode_2(CBcj2Enc *p)
-{
- if (!p->isFlushState)
- {
- const Byte *src;
- UInt32 v;
- {
- const unsigned state = p->state;
- if (BCJ2_IS_32BIT_STREAM(state))
- {
- Byte *cur = p->bufs[state];
- if (cur == p->lims[state])
- return;
- SetBe32a(cur, p->tempTarget)
- p->bufs[state] = cur + 4;
- }
- }
- p->state = BCJ2_ENC_STATE_ORIG; // for main reason of exit
- src = p->src;
- v = p->context;
-
- // #define WRITE_CONTEXT p->context = v; // for marker version
- #define WRITE_CONTEXT p->context = (Byte)v;
- #define WRITE_CONTEXT_AND_SRC p->src = src; WRITE_CONTEXT
-
- for (;;)
- {
- // const Byte *src;
- // UInt32 v;
- CBcj2Enc_ip_unsigned ip;
- if (p->range < kTopValue)
- {
- // to reduce register pressure and code size: we save and restore local variables.
- WRITE_CONTEXT_AND_SRC
- if (Bcj2_RangeEnc_ShiftLow(p))
- return;
- p->range <<= 8;
- src = p->src;
- v = p->context;
- }
- // src = p->src;
- // #define MARKER_FLAG ((UInt32)1 << 17)
- // if ((v & MARKER_FLAG) == 0) // for marker version
- {
- const Byte *srcLim;
- Byte *dest = p->bufs[BCJ2_STREAM_MAIN];
- {
- const SizeT remSrc = (SizeT)(p->srcLim - src);
- SizeT rem = (SizeT)(p->lims[BCJ2_STREAM_MAIN] - dest);
- if (rem >= remSrc)
- rem = remSrc;
- srcLim = src + rem;
- }
- /* p->context contains context of previous byte:
- bits [0 : 7] : src[-1], if (src) was changed in this call
- bits [8 : 31] : are undefined for non-marker version
- */
- // v = p->context;
- #define NUM_SHIFT_BITS 24
- #define CONV_FLAG ((UInt32)1 << 16)
- #define ONE_ITER { \
- b = src[0]; \
- *dest++ = (Byte)b; \
- v = (v << NUM_SHIFT_BITS) | b; \
- if (((b + (0x100 - 0xe8)) & 0xfe) == 0) break; \
- if (((v - (((UInt32)0x0f << (NUM_SHIFT_BITS)) + 0x80)) & \
- ((((UInt32)1 << (4 + NUM_SHIFT_BITS)) - 0x1) << 4)) == 0) break; \
- src++; if (src == srcLim) { break; } }
-
- if (src != srcLim)
- for (;;)
- {
- /* clang can generate ineffective code with setne instead of two jcc instructions.
- we can use 2 iterations and external (unsigned b) to avoid that ineffective code genaration. */
- unsigned b;
- ONE_ITER
- ONE_ITER
- }
-
- ip = p->ip64 + (CBcj2Enc_ip_unsigned)(SizeT)(dest - p->bufs[BCJ2_STREAM_MAIN]);
- p->bufs[BCJ2_STREAM_MAIN] = dest;
- p->ip64 = ip;
-
- if (src == srcLim)
- {
- WRITE_CONTEXT_AND_SRC
- if (src != p->srcLim)
- {
- p->state = BCJ2_STREAM_MAIN;
- return;
- }
- /* (p->src == p->srcLim)
- (p->state == BCJ2_ENC_STATE_ORIG) */
- if (p->finishMode != BCJ2_ENC_FINISH_MODE_END_STREAM)
- return;
- /* (p->finishMode == BCJ2_ENC_FINISH_MODE_END_STREAM */
- // (p->flushRem == 5);
- p->isFlushState = 1;
- break;
- }
- src++;
- // p->src = src;
- }
- // ip = p->ip; // for marker version
- /* marker was found */
- /* (v) contains marker that was found:
- bits [NUM_SHIFT_BITS : NUM_SHIFT_BITS + 7]
- : value of src[-2] : xx/xx/0f
- bits [0 : 7] : value of src[-1] : e8/e9/8x
- */
- {
- {
- #if NUM_SHIFT_BITS != 24
- v &= ~(UInt32)CONV_FLAG;
- #endif
- // UInt32 relat = 0;
- if ((SizeT)(p->srcLim - src) >= 4)
- {
- /*
- if (relat != 0 || (Byte)v != 0xe8)
- BoolInt isBigOffset = True;
- */
- const UInt32 relat = GetUi32(src);
- /*
- #define EXCLUDE_FLAG ((UInt32)1 << 4)
- #define NEED_CONVERT(rel) ((((rel) + EXCLUDE_FLAG) & (0 - EXCLUDE_FLAG * 2)) != 0)
- if (p->relatExcludeBits != 0)
- {
- const UInt32 flag = (UInt32)1 << (p->relatExcludeBits - 1);
- isBigOffset = (((relat + flag) & (0 - flag * 2)) != 0);
- }
- // isBigOffset = False; // for debug
- */
- ip -= p->fileIp64;
- // Use the following if check, if (ip) is 64-bit:
- if (ip > (((v + 0x20) >> 5) & 1)) // 23.00 : we eliminate milti-block overlap for (Of 80) and (e8/e9)
- if ((CBcj2Enc_ip_unsigned)((CBcj2Enc_ip_signed)ip + 4 + (Int32)relat) <= p->fileSize64_minus1)
- if (((UInt32)(relat + p->relatLimit) >> 1) < p->relatLimit)
- v |= CONV_FLAG;
- }
- else if (p->finishMode == BCJ2_ENC_FINISH_MODE_CONTINUE)
- {
- // (p->srcLim - src < 4)
- // /*
- // for non-marker version
- p->ip64--; // p->ip = ip - 1;
- p->bufs[BCJ2_STREAM_MAIN]--;
- src--;
- v >>= NUM_SHIFT_BITS;
- // (0 < p->srcLim - p->src <= 4)
- // */
- // v |= MARKER_FLAG; // for marker version
- /* (p->state == BCJ2_ENC_STATE_ORIG) */
- WRITE_CONTEXT_AND_SRC
- return;
- }
- {
- const unsigned c = ((v + 0x17) >> 6) & 1;
- CBcj2Prob *prob = p->probs + (unsigned)
- (((0 - c) & (Byte)(v >> NUM_SHIFT_BITS)) + c + ((v >> 5) & 1));
- /*
- ((Byte)v == 0xe8 ? 2 + ((Byte)(v >> 8)) :
- ((Byte)v < 0xe8 ? 0 : 1)); // ((v >> 5) & 1));
- */
- const unsigned ttt = *prob;
- const UInt32 bound = (p->range >> kNumBitModelTotalBits) * ttt;
- if ((v & CONV_FLAG) == 0)
- {
- // static int yyy = 0; yyy++; printf("\n!needConvert = %d\n", yyy);
- // v = (Byte)v; // for marker version
- p->range = bound;
- *prob = (CBcj2Prob)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
- // WRITE_CONTEXT_AND_SRC
- continue;
- }
- p->low += bound;
- p->range -= bound;
- *prob = (CBcj2Prob)(ttt - (ttt >> kNumMoveBits));
- }
- // p->context = src[3];
- {
- // const unsigned cj = ((Byte)v == 0xe8 ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP);
- const unsigned cj = (((v + 0x57) >> 6) & 1) + BCJ2_STREAM_CALL;
- ip = p->ip64;
- v = GetUi32(src); // relat
- ip += 4;
- p->ip64 = ip;
- src += 4;
- // p->src = src;
- {
- const UInt32 absol = (UInt32)ip + v;
- Byte *cur = p->bufs[cj];
- v >>= 24;
- // WRITE_CONTEXT
- if (cur == p->lims[cj])
- {
- p->state = cj;
- p->tempTarget = absol;
- WRITE_CONTEXT_AND_SRC
- return;
- }
- SetBe32a(cur, absol)
- p->bufs[cj] = cur + 4;
- }
- }
- }
- }
- } // end of loop
- }
-
- for (; p->flushRem != 0; p->flushRem--)
- if (Bcj2_RangeEnc_ShiftLow(p))
- return;
- p->state = BCJ2_ENC_STATE_FINISHED;
-}
-
-
-/*
-BCJ2 encoder needs look ahead for up to 4 bytes in (src) buffer.
-So base function Bcj2Enc_Encode_2()
- in BCJ2_ENC_FINISH_MODE_CONTINUE mode can return with
- (p->state == BCJ2_ENC_STATE_ORIG && p->src < p->srcLim)
-Bcj2Enc_Encode() solves that look ahead problem by using p->temp[] buffer.
- so if (p->state == BCJ2_ENC_STATE_ORIG) after Bcj2Enc_Encode(),
- then (p->src == p->srcLim).
- And the caller's code is simpler with Bcj2Enc_Encode().
-*/
-
-Z7_NO_INLINE
-void Bcj2Enc_Encode(CBcj2Enc *p)
-{
- PRF2("\n----")
- if (p->tempPos != 0)
- {
- /* extra: number of bytes that were copied from (src) to (temp) buffer in this call */
- unsigned extra = 0;
- /* We will touch only minimal required number of bytes in input (src) stream.
- So we will add input bytes from (src) stream to temp[] with step of 1 byte.
- We don't add new bytes to temp[] before Bcj2Enc_Encode_2() call
- in first loop iteration because
- - previous call of Bcj2Enc_Encode() could use another (finishMode),
- - previous call could finish with (p->state != BCJ2_ENC_STATE_ORIG).
- the case with full temp[] buffer (p->tempPos == 4) is possible here.
- */
- for (;;)
- {
- // (0 < p->tempPos <= 5) // in non-marker version
- /* p->src : the current src data position including extra bytes
- that were copied to temp[] buffer in this call */
- const Byte *src = p->src;
- const Byte *srcLim = p->srcLim;
- const EBcj2Enc_FinishMode finishMode = p->finishMode;
- if (src != srcLim)
- {
- /* if there are some src data after the data copied to temp[],
- then we use MODE_CONTINUE for temp data */
- p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE;
- }
- p->src = p->temp;
- p->srcLim = p->temp + p->tempPos;
- PRF2(" ")
- Bcj2Enc_Encode_2(p);
- {
- const unsigned num = (unsigned)(p->src - p->temp);
- const unsigned tempPos = p->tempPos - num;
- unsigned i;
- p->tempPos = tempPos;
- for (i = 0; i < tempPos; i++)
- p->temp[i] = p->temp[(SizeT)i + num];
- // tempPos : number of bytes in temp buffer
- p->src = src;
- p->srcLim = srcLim;
- p->finishMode = finishMode;
- if (p->state != BCJ2_ENC_STATE_ORIG)
- {
- // (p->tempPos <= 4) // in non-marker version
- /* if (the reason of exit from Bcj2Enc_Encode_2()
- is not BCJ2_ENC_STATE_ORIG),
- then we exit from Bcj2Enc_Encode() with same reason */
- // optional code begin : we rollback (src) and tempPos, if it's possible:
- if (extra >= tempPos)
- extra = tempPos;
- p->src = src - extra;
- p->tempPos = tempPos - extra;
- // optional code end : rollback of (src) and tempPos
- return;
- }
- /* (p->tempPos <= 4)
- (p->state == BCJ2_ENC_STATE_ORIG)
- so encoder needs more data than in temp[] */
- if (src == srcLim)
- return; // src buffer has no more input data.
- /* (src != srcLim)
- so we can provide more input data from src for Bcj2Enc_Encode_2() */
- if (extra >= tempPos)
- {
- /* (extra >= tempPos) means that temp buffer contains
- only data from src buffer of this call.
- So now we can encode without temp buffer */
- p->src = src - tempPos; // rollback (src)
- p->tempPos = 0;
- break;
- }
- // we append one additional extra byte from (src) to temp[] buffer:
- p->temp[tempPos] = *src;
- p->tempPos = tempPos + 1;
- // (0 < p->tempPos <= 5) // in non-marker version
- p->src = src + 1;
- extra++;
- }
- }
- }
-
- PRF2("++++")
- // (p->tempPos == 0)
- Bcj2Enc_Encode_2(p);
- PRF2("====")
-
- if (p->state == BCJ2_ENC_STATE_ORIG)
- {
- const Byte *src = p->src;
- const Byte *srcLim = p->srcLim;
- const unsigned rem = (unsigned)(srcLim - src);
- /* (rem <= 4) here.
- if (p->src != p->srcLim), then
- - we copy non-processed bytes from (p->src) to temp[] buffer,
- - we set p->src equal to p->srcLim.
- */
- if (rem)
- {
- unsigned i = 0;
- p->src = srcLim;
- p->tempPos = rem;
- // (0 < p->tempPos <= 4)
- do
- p->temp[i] = src[i];
- while (++i != rem);
- }
- // (p->tempPos <= 4)
- // (p->src == p->srcLim)
- }
-}
-
-#undef PRF2
-#undef CONV_FLAG
-#undef MARKER_FLAG
-#undef WRITE_CONTEXT
-#undef WRITE_CONTEXT_AND_SRC
-#undef ONE_ITER
-#undef NUM_SHIFT_BITS
-#undef kTopValue
-#undef kNumBitModelTotalBits
-#undef kBitModelTotal
-#undef kNumMoveBits
diff --git a/3rdparty/7z/src/Blake2.h b/3rdparty/7z/src/Blake2.h
deleted file mode 100644
index 723523568a..0000000000
--- a/3rdparty/7z/src/Blake2.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Blake2.h -- BLAKE2 Hash
-2023-03-04 : Igor Pavlov : Public domain
-2015 : Samuel Neves : Public domain */
-
-#ifndef ZIP7_INC_BLAKE2_H
-#define ZIP7_INC_BLAKE2_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define BLAKE2S_BLOCK_SIZE 64
-#define BLAKE2S_DIGEST_SIZE 32
-#define BLAKE2SP_PARALLEL_DEGREE 8
-
-typedef struct
-{
- UInt32 h[8];
- UInt32 t[2];
- UInt32 f[2];
- Byte buf[BLAKE2S_BLOCK_SIZE];
- UInt32 bufPos;
- UInt32 lastNode_f1;
- UInt32 dummy[2]; /* for sizeof(CBlake2s) alignment */
-} CBlake2s;
-
-/* You need to xor CBlake2s::h[i] with input parameter block after Blake2s_Init0() */
-/*
-void Blake2s_Init0(CBlake2s *p);
-void Blake2s_Update(CBlake2s *p, const Byte *data, size_t size);
-void Blake2s_Final(CBlake2s *p, Byte *digest);
-*/
-
-
-typedef struct
-{
- CBlake2s S[BLAKE2SP_PARALLEL_DEGREE];
- unsigned bufPos;
-} CBlake2sp;
-
-
-void Blake2sp_Init(CBlake2sp *p);
-void Blake2sp_Update(CBlake2sp *p, const Byte *data, size_t size);
-void Blake2sp_Final(CBlake2sp *p, Byte *digest);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Blake2s.c b/3rdparty/7z/src/Blake2s.c
deleted file mode 100644
index 2a84b57a42..0000000000
--- a/3rdparty/7z/src/Blake2s.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/* Blake2s.c -- BLAKE2s and BLAKE2sp Hash
-2023-03-04 : Igor Pavlov : Public domain
-2015 : Samuel Neves : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "Blake2.h"
-#include "CpuArch.h"
-#include "RotateDefs.h"
-
-#define rotr32 rotrFixed
-
-#define BLAKE2S_NUM_ROUNDS 10
-#define BLAKE2S_FINAL_FLAG (~(UInt32)0)
-
-static const UInt32 k_Blake2s_IV[8] =
-{
- 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
- 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
-};
-
-static const Byte k_Blake2s_Sigma[BLAKE2S_NUM_ROUNDS][16] =
-{
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
- { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,
- { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,
- { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,
- { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,
- { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,
- { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,
- { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,
- { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,
- { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } ,
-};
-
-
-static void Blake2s_Init0(CBlake2s *p)
-{
- unsigned i;
- for (i = 0; i < 8; i++)
- p->h[i] = k_Blake2s_IV[i];
- p->t[0] = 0;
- p->t[1] = 0;
- p->f[0] = 0;
- p->f[1] = 0;
- p->bufPos = 0;
- p->lastNode_f1 = 0;
-}
-
-
-static void Blake2s_Compress(CBlake2s *p)
-{
- UInt32 m[16];
- UInt32 v[16];
-
- {
- unsigned i;
-
- for (i = 0; i < 16; i++)
- m[i] = GetUi32(p->buf + i * sizeof(m[i]));
-
- for (i = 0; i < 8; i++)
- v[i] = p->h[i];
- }
-
- v[ 8] = k_Blake2s_IV[0];
- v[ 9] = k_Blake2s_IV[1];
- v[10] = k_Blake2s_IV[2];
- v[11] = k_Blake2s_IV[3];
-
- v[12] = p->t[0] ^ k_Blake2s_IV[4];
- v[13] = p->t[1] ^ k_Blake2s_IV[5];
- v[14] = p->f[0] ^ k_Blake2s_IV[6];
- v[15] = p->f[1] ^ k_Blake2s_IV[7];
-
- #define G(r,i,a,b,c,d) \
- a += b + m[sigma[2*i+0]]; d ^= a; d = rotr32(d, 16); c += d; b ^= c; b = rotr32(b, 12); \
- a += b + m[sigma[2*i+1]]; d ^= a; d = rotr32(d, 8); c += d; b ^= c; b = rotr32(b, 7); \
-
- #define R(r) \
- G(r,0,v[ 0],v[ 4],v[ 8],v[12]) \
- G(r,1,v[ 1],v[ 5],v[ 9],v[13]) \
- G(r,2,v[ 2],v[ 6],v[10],v[14]) \
- G(r,3,v[ 3],v[ 7],v[11],v[15]) \
- G(r,4,v[ 0],v[ 5],v[10],v[15]) \
- G(r,5,v[ 1],v[ 6],v[11],v[12]) \
- G(r,6,v[ 2],v[ 7],v[ 8],v[13]) \
- G(r,7,v[ 3],v[ 4],v[ 9],v[14]) \
-
- {
- unsigned r;
- for (r = 0; r < BLAKE2S_NUM_ROUNDS; r++)
- {
- const Byte *sigma = k_Blake2s_Sigma[r];
- R(r)
- }
- /* R(0); R(1); R(2); R(3); R(4); R(5); R(6); R(7); R(8); R(9); */
- }
-
- #undef G
- #undef R
-
- {
- unsigned i;
- for (i = 0; i < 8; i++)
- p->h[i] ^= v[i] ^ v[i + 8];
- }
-}
-
-
-#define Blake2s_Increment_Counter(S, inc) \
- { p->t[0] += (inc); p->t[1] += (p->t[0] < (inc)); }
-
-#define Blake2s_Set_LastBlock(p) \
- { p->f[0] = BLAKE2S_FINAL_FLAG; p->f[1] = p->lastNode_f1; }
-
-
-static void Blake2s_Update(CBlake2s *p, const Byte *data, size_t size)
-{
- while (size != 0)
- {
- unsigned pos = (unsigned)p->bufPos;
- unsigned rem = BLAKE2S_BLOCK_SIZE - pos;
-
- if (size <= rem)
- {
- memcpy(p->buf + pos, data, size);
- p->bufPos += (UInt32)size;
- return;
- }
-
- memcpy(p->buf + pos, data, rem);
- Blake2s_Increment_Counter(S, BLAKE2S_BLOCK_SIZE)
- Blake2s_Compress(p);
- p->bufPos = 0;
- data += rem;
- size -= rem;
- }
-}
-
-
-static void Blake2s_Final(CBlake2s *p, Byte *digest)
-{
- unsigned i;
-
- Blake2s_Increment_Counter(S, (UInt32)p->bufPos)
- Blake2s_Set_LastBlock(p)
- memset(p->buf + p->bufPos, 0, BLAKE2S_BLOCK_SIZE - p->bufPos);
- Blake2s_Compress(p);
-
- for (i = 0; i < 8; i++)
- {
- SetUi32(digest + sizeof(p->h[i]) * i, p->h[i])
- }
-}
-
-
-/* ---------- BLAKE2s ---------- */
-
-/* we need to xor CBlake2s::h[i] with input parameter block after Blake2s_Init0() */
-/*
-typedef struct
-{
- Byte digest_length;
- Byte key_length;
- Byte fanout;
- Byte depth;
- UInt32 leaf_length;
- Byte node_offset[6];
- Byte node_depth;
- Byte inner_length;
- Byte salt[BLAKE2S_SALTBYTES];
- Byte personal[BLAKE2S_PERSONALBYTES];
-} CBlake2sParam;
-*/
-
-
-static void Blake2sp_Init_Spec(CBlake2s *p, unsigned node_offset, unsigned node_depth)
-{
- Blake2s_Init0(p);
-
- p->h[0] ^= (BLAKE2S_DIGEST_SIZE | ((UInt32)BLAKE2SP_PARALLEL_DEGREE << 16) | ((UInt32)2 << 24));
- p->h[2] ^= ((UInt32)node_offset);
- p->h[3] ^= ((UInt32)node_depth << 16) | ((UInt32)BLAKE2S_DIGEST_SIZE << 24);
- /*
- P->digest_length = BLAKE2S_DIGEST_SIZE;
- P->key_length = 0;
- P->fanout = BLAKE2SP_PARALLEL_DEGREE;
- P->depth = 2;
- P->leaf_length = 0;
- store48(P->node_offset, node_offset);
- P->node_depth = node_depth;
- P->inner_length = BLAKE2S_DIGEST_SIZE;
- */
-}
-
-
-void Blake2sp_Init(CBlake2sp *p)
-{
- unsigned i;
-
- p->bufPos = 0;
-
- for (i = 0; i < BLAKE2SP_PARALLEL_DEGREE; i++)
- Blake2sp_Init_Spec(&p->S[i], i, 0);
-
- p->S[BLAKE2SP_PARALLEL_DEGREE - 1].lastNode_f1 = BLAKE2S_FINAL_FLAG;
-}
-
-
-void Blake2sp_Update(CBlake2sp *p, const Byte *data, size_t size)
-{
- unsigned pos = p->bufPos;
- while (size != 0)
- {
- unsigned index = pos / BLAKE2S_BLOCK_SIZE;
- unsigned rem = BLAKE2S_BLOCK_SIZE - (pos & (BLAKE2S_BLOCK_SIZE - 1));
- if (rem > size)
- rem = (unsigned)size;
- Blake2s_Update(&p->S[index], data, rem);
- size -= rem;
- data += rem;
- pos += rem;
- pos &= (BLAKE2S_BLOCK_SIZE * BLAKE2SP_PARALLEL_DEGREE - 1);
- }
- p->bufPos = pos;
-}
-
-
-void Blake2sp_Final(CBlake2sp *p, Byte *digest)
-{
- CBlake2s R;
- unsigned i;
-
- Blake2sp_Init_Spec(&R, 0, 1);
- R.lastNode_f1 = BLAKE2S_FINAL_FLAG;
-
- for (i = 0; i < BLAKE2SP_PARALLEL_DEGREE; i++)
- {
- Byte hash[BLAKE2S_DIGEST_SIZE];
- Blake2s_Final(&p->S[i], hash);
- Blake2s_Update(&R, hash, BLAKE2S_DIGEST_SIZE);
- }
-
- Blake2s_Final(&R, digest);
-}
-
-#undef rotr32
diff --git a/3rdparty/7z/src/Bra.c b/3rdparty/7z/src/Bra.c
deleted file mode 100644
index 411f25232b..0000000000
--- a/3rdparty/7z/src/Bra.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/* Bra.c -- Branch converters for RISC code
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Bra.h"
-#include "CpuArch.h"
-#include "RotateDefs.h"
-
-#if defined(MY_CPU_SIZEOF_POINTER) \
- && ( MY_CPU_SIZEOF_POINTER == 4 \
- || MY_CPU_SIZEOF_POINTER == 8)
- #define BR_CONV_USE_OPT_PC_PTR
-#endif
-
-#ifdef BR_CONV_USE_OPT_PC_PTR
-#define BR_PC_INIT pc -= (UInt32)(SizeT)p;
-#define BR_PC_GET (pc + (UInt32)(SizeT)p)
-#else
-#define BR_PC_INIT pc += (UInt32)size;
-#define BR_PC_GET (pc - (UInt32)(SizeT)(lim - p))
-// #define BR_PC_INIT
-// #define BR_PC_GET (pc + (UInt32)(SizeT)(p - data))
-#endif
-
-#define BR_CONVERT_VAL(v, c) if (encoding) v += c; else v -= c;
-// #define BR_CONVERT_VAL(v, c) if (!encoding) c = (UInt32)0 - c; v += c;
-
-#define Z7_BRANCH_CONV(name) z7_BranchConv_ ## name
-
-#define Z7_BRANCH_FUNC_MAIN(name) \
-static \
-Z7_FORCE_INLINE \
-Z7_ATTRIB_NO_VECTOR \
-Byte *Z7_BRANCH_CONV(name)(Byte *p, SizeT size, UInt32 pc, int encoding)
-
-#define Z7_BRANCH_FUNC_IMP(name, m, encoding) \
-Z7_NO_INLINE \
-Z7_ATTRIB_NO_VECTOR \
-Byte *m(name)(Byte *data, SizeT size, UInt32 pc) \
- { return Z7_BRANCH_CONV(name)(data, size, pc, encoding); } \
-
-#ifdef Z7_EXTRACT_ONLY
-#define Z7_BRANCH_FUNCS_IMP(name) \
- Z7_BRANCH_FUNC_IMP(name, Z7_BRANCH_CONV_DEC, 0)
-#else
-#define Z7_BRANCH_FUNCS_IMP(name) \
- Z7_BRANCH_FUNC_IMP(name, Z7_BRANCH_CONV_DEC, 0) \
- Z7_BRANCH_FUNC_IMP(name, Z7_BRANCH_CONV_ENC, 1)
-#endif
-
-#if defined(__clang__)
-#define BR_EXTERNAL_FOR
-#define BR_NEXT_ITERATION continue;
-#else
-#define BR_EXTERNAL_FOR for (;;)
-#define BR_NEXT_ITERATION break;
-#endif
-
-#if defined(__clang__) && (__clang_major__ >= 8) \
- || defined(__GNUC__) && (__GNUC__ >= 1000) \
- // GCC is not good for __builtin_expect() here
- /* || defined(_MSC_VER) && (_MSC_VER >= 1920) */
- // #define Z7_unlikely [[unlikely]]
- // #define Z7_LIKELY(x) (__builtin_expect((x), 1))
- #define Z7_UNLIKELY(x) (__builtin_expect((x), 0))
- // #define Z7_likely [[likely]]
-#else
- // #define Z7_LIKELY(x) (x)
- #define Z7_UNLIKELY(x) (x)
- // #define Z7_likely
-#endif
-
-
-Z7_BRANCH_FUNC_MAIN(ARM64)
-{
- // Byte *p = data;
- const Byte *lim;
- const UInt32 flag = (UInt32)1 << (24 - 4);
- const UInt32 mask = ((UInt32)1 << 24) - (flag << 1);
- size &= ~(SizeT)3;
- // if (size == 0) return p;
- lim = p + size;
- BR_PC_INIT
- pc -= 4; // because (p) will point to next instruction
-
- BR_EXTERNAL_FOR
- {
- // Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (;;)
- {
- UInt32 v;
- if Z7_UNLIKELY(p == lim)
- return p;
- v = GetUi32a(p);
- p += 4;
- if Z7_UNLIKELY(((v - 0x94000000) & 0xfc000000) == 0)
- {
- UInt32 c = BR_PC_GET >> 2;
- BR_CONVERT_VAL(v, c)
- v &= 0x03ffffff;
- v |= 0x94000000;
- SetUi32a(p - 4, v)
- BR_NEXT_ITERATION
- }
- // v = rotlFixed(v, 8); v += (flag << 8) - 0x90; if Z7_UNLIKELY((v & ((mask << 8) + 0x9f)) == 0)
- v -= 0x90000000; if Z7_UNLIKELY((v & 0x9f000000) == 0)
- {
- UInt32 z, c;
- // v = rotrFixed(v, 8);
- v += flag; if Z7_UNLIKELY(v & mask) continue;
- z = (v & 0xffffffe0) | (v >> 26);
- c = (BR_PC_GET >> (12 - 3)) & ~(UInt32)7;
- BR_CONVERT_VAL(z, c)
- v &= 0x1f;
- v |= 0x90000000;
- v |= z << 26;
- v |= 0x00ffffe0 & ((z & (((flag << 1) - 1))) - flag);
- SetUi32a(p - 4, v)
- }
- }
- }
-}
-Z7_BRANCH_FUNCS_IMP(ARM64)
-
-
-Z7_BRANCH_FUNC_MAIN(ARM)
-{
- // Byte *p = data;
- const Byte *lim;
- size &= ~(SizeT)3;
- lim = p + size;
- BR_PC_INIT
- /* in ARM: branch offset is relative to the +2 instructions from current instruction.
- (p) will point to next instruction */
- pc += 8 - 4;
-
- for (;;)
- {
- for (;;)
- {
- if Z7_UNLIKELY(p >= lim) { return p; } p += 4; if Z7_UNLIKELY(p[-1] == 0xeb) break;
- if Z7_UNLIKELY(p >= lim) { return p; } p += 4; if Z7_UNLIKELY(p[-1] == 0xeb) break;
- }
- {
- UInt32 v = GetUi32a(p - 4);
- UInt32 c = BR_PC_GET >> 2;
- BR_CONVERT_VAL(v, c)
- v &= 0x00ffffff;
- v |= 0xeb000000;
- SetUi32a(p - 4, v)
- }
- }
-}
-Z7_BRANCH_FUNCS_IMP(ARM)
-
-
-Z7_BRANCH_FUNC_MAIN(PPC)
-{
- // Byte *p = data;
- const Byte *lim;
- size &= ~(SizeT)3;
- lim = p + size;
- BR_PC_INIT
- pc -= 4; // because (p) will point to next instruction
-
- for (;;)
- {
- UInt32 v;
- for (;;)
- {
- if Z7_UNLIKELY(p == lim)
- return p;
- // v = GetBe32a(p);
- v = *(UInt32 *)(void *)p;
- p += 4;
- // if ((v & 0xfc000003) == 0x48000001) break;
- // if ((p[-4] & 0xFC) == 0x48 && (p[-1] & 3) == 1) break;
- if Z7_UNLIKELY(
- ((v - Z7_CONV_BE_TO_NATIVE_CONST32(0x48000001))
- & Z7_CONV_BE_TO_NATIVE_CONST32(0xfc000003)) == 0) break;
- }
- {
- v = Z7_CONV_NATIVE_TO_BE_32(v);
- {
- UInt32 c = BR_PC_GET;
- BR_CONVERT_VAL(v, c)
- }
- v &= 0x03ffffff;
- v |= 0x48000000;
- SetBe32a(p - 4, v)
- }
- }
-}
-Z7_BRANCH_FUNCS_IMP(PPC)
-
-
-#ifdef Z7_CPU_FAST_ROTATE_SUPPORTED
-#define BR_SPARC_USE_ROTATE
-#endif
-
-Z7_BRANCH_FUNC_MAIN(SPARC)
-{
- // Byte *p = data;
- const Byte *lim;
- const UInt32 flag = (UInt32)1 << 22;
- size &= ~(SizeT)3;
- lim = p + size;
- BR_PC_INIT
- pc -= 4; // because (p) will point to next instruction
- for (;;)
- {
- UInt32 v;
- for (;;)
- {
- if Z7_UNLIKELY(p == lim)
- return p;
- /* // the code without GetBe32a():
- { const UInt32 v = GetUi16a(p) & 0xc0ff; p += 4; if (v == 0x40 || v == 0xc07f) break; }
- */
- v = GetBe32a(p);
- p += 4;
- #ifdef BR_SPARC_USE_ROTATE
- v = rotlFixed(v, 2);
- v += (flag << 2) - 1;
- if Z7_UNLIKELY((v & (3 - (flag << 3))) == 0)
- #else
- v += (UInt32)5 << 29;
- v ^= (UInt32)7 << 29;
- v += flag;
- if Z7_UNLIKELY((v & (0 - (flag << 1))) == 0)
- #endif
- break;
- }
- {
- // UInt32 v = GetBe32a(p - 4);
- #ifndef BR_SPARC_USE_ROTATE
- v <<= 2;
- #endif
- {
- UInt32 c = BR_PC_GET;
- BR_CONVERT_VAL(v, c)
- }
- v &= (flag << 3) - 1;
- #ifdef BR_SPARC_USE_ROTATE
- v -= (flag << 2) - 1;
- v = rotrFixed(v, 2);
- #else
- v -= (flag << 2);
- v >>= 2;
- v |= (UInt32)1 << 30;
- #endif
- SetBe32a(p - 4, v)
- }
- }
-}
-Z7_BRANCH_FUNCS_IMP(SPARC)
-
-
-Z7_BRANCH_FUNC_MAIN(ARMT)
-{
- // Byte *p = data;
- Byte *lim;
- size &= ~(SizeT)1;
- // if (size == 0) return p;
- if (size <= 2) return p;
- size -= 2;
- lim = p + size;
- BR_PC_INIT
- /* in ARM: branch offset is relative to the +2 instructions from current instruction.
- (p) will point to the +2 instructions from current instruction */
- // pc += 4 - 4;
- // if (encoding) pc -= 0xf800 << 1; else pc += 0xf800 << 1;
- // #define ARMT_TAIL_PROC { goto armt_tail; }
- #define ARMT_TAIL_PROC { return p; }
-
- do
- {
- /* in MSVC 32-bit x86 compilers:
- UInt32 version : it loads value from memory with movzx
- Byte version : it loads value to 8-bit register (AL/CL)
- movzx version is slightly faster in some cpus
- */
- unsigned b1;
- // Byte / unsigned
- b1 = p[1];
- // optimized version to reduce one (p >= lim) check:
- // unsigned a1 = p[1]; b1 = p[3]; p += 2; if Z7_LIKELY((b1 & (a1 ^ 8)) < 0xf8)
- for (;;)
- {
- unsigned b3; // Byte / UInt32
- /* (Byte)(b3) normalization can use low byte computations in MSVC.
- It gives smaller code, and no loss of speed in some compilers/cpus.
- But new MSVC 32-bit x86 compilers use more slow load
- from memory to low byte register in that case.
- So we try to use full 32-bit computations for faster code.
- */
- // if (p >= lim) { ARMT_TAIL_PROC } b3 = b1 + 8; b1 = p[3]; p += 2; if ((b3 & b1) >= 0xf8) break;
- if Z7_UNLIKELY(p >= lim) { ARMT_TAIL_PROC } b3 = p[3]; p += 2; if Z7_UNLIKELY((b3 & (b1 ^ 8)) >= 0xf8) break;
- if Z7_UNLIKELY(p >= lim) { ARMT_TAIL_PROC } b1 = p[3]; p += 2; if Z7_UNLIKELY((b1 & (b3 ^ 8)) >= 0xf8) break;
- }
- {
- /* we can adjust pc for (0xf800) to rid of (& 0x7FF) operation.
- But gcc/clang for arm64 can use bfi instruction for full code here */
- UInt32 v =
- ((UInt32)GetUi16a(p - 2) << 11) |
- ((UInt32)GetUi16a(p) & 0x7FF);
- /*
- UInt32 v =
- ((UInt32)p[1 - 2] << 19)
- + (((UInt32)p[1] & 0x7) << 8)
- + (((UInt32)p[-2] << 11))
- + (p[0]);
- */
- p += 2;
- {
- UInt32 c = BR_PC_GET >> 1;
- BR_CONVERT_VAL(v, c)
- }
- SetUi16a(p - 4, (UInt16)(((v >> 11) & 0x7ff) | 0xf000))
- SetUi16a(p - 2, (UInt16)(v | 0xf800))
- /*
- p[-4] = (Byte)(v >> 11);
- p[-3] = (Byte)(0xf0 | ((v >> 19) & 0x7));
- p[-2] = (Byte)v;
- p[-1] = (Byte)(0xf8 | (v >> 8));
- */
- }
- }
- while (p < lim);
- return p;
- // armt_tail:
- // if ((Byte)((lim[1] & 0xf8)) != 0xf0) { lim += 2; } return lim;
- // return (Byte *)(lim + ((Byte)((lim[1] ^ 0xf0) & 0xf8) == 0 ? 0 : 2));
- // return (Byte *)(lim + (((lim[1] ^ ~0xfu) & ~7u) == 0 ? 0 : 2));
- // return (Byte *)(lim + 2 - (((((unsigned)lim[1] ^ 8) + 8) >> 7) & 2));
-}
-Z7_BRANCH_FUNCS_IMP(ARMT)
-
-
-// #define BR_IA64_NO_INLINE
-
-Z7_BRANCH_FUNC_MAIN(IA64)
-{
- // Byte *p = data;
- const Byte *lim;
- size &= ~(SizeT)15;
- lim = p + size;
- pc -= 1 << 4;
- pc >>= 4 - 1;
- // pc -= 1 << 1;
-
- for (;;)
- {
- unsigned m;
- for (;;)
- {
- if Z7_UNLIKELY(p == lim)
- return p;
- m = (unsigned)((UInt32)0x334b0000 >> (*p & 0x1e));
- p += 16;
- pc += 1 << 1;
- if (m &= 3)
- break;
- }
- {
- p += (ptrdiff_t)m * 5 - 20; // negative value is expected here.
- do
- {
- const UInt32 t =
- #if defined(MY_CPU_X86_OR_AMD64)
- // we use 32-bit load here to reduce code size on x86:
- GetUi32(p);
- #else
- GetUi16(p);
- #endif
- UInt32 z = GetUi32(p + 1) >> m;
- p += 5;
- if (((t >> m) & (0x70 << 1)) == 0
- && ((z - (0x5000000 << 1)) & (0xf000000 << 1)) == 0)
- {
- UInt32 v = (UInt32)((0x8fffff << 1) | 1) & z;
- z ^= v;
- #ifdef BR_IA64_NO_INLINE
- v |= (v & ((UInt32)1 << (23 + 1))) >> 3;
- {
- UInt32 c = pc;
- BR_CONVERT_VAL(v, c)
- }
- v &= (0x1fffff << 1) | 1;
- #else
- {
- if (encoding)
- {
- // pc &= ~(0xc00000 << 1); // we just need to clear at least 2 bits
- pc &= (0x1fffff << 1) | 1;
- v += pc;
- }
- else
- {
- // pc |= 0xc00000 << 1; // we need to set at least 2 bits
- pc |= ~(UInt32)((0x1fffff << 1) | 1);
- v -= pc;
- }
- }
- v &= ~(UInt32)(0x600000 << 1);
- #endif
- v += (0x700000 << 1);
- v &= (0x8fffff << 1) | 1;
- z |= v;
- z <<= m;
- SetUi32(p + 1 - 5, z)
- }
- m++;
- }
- while (m &= 3); // while (m < 4);
- }
- }
-}
-Z7_BRANCH_FUNCS_IMP(IA64)
diff --git a/3rdparty/7z/src/Bra.h b/3rdparty/7z/src/Bra.h
deleted file mode 100644
index aeedaba305..0000000000
--- a/3rdparty/7z/src/Bra.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* Bra.h -- Branch converters for executables
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_BRA_H
-#define ZIP7_INC_BRA_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define Z7_BRANCH_CONV_DEC(name) z7_BranchConv_ ## name ## _Dec
-#define Z7_BRANCH_CONV_ENC(name) z7_BranchConv_ ## name ## _Enc
-#define Z7_BRANCH_CONV_ST_DEC(name) z7_BranchConvSt_ ## name ## _Dec
-#define Z7_BRANCH_CONV_ST_ENC(name) z7_BranchConvSt_ ## name ## _Enc
-
-#define Z7_BRANCH_CONV_DECL(name) Byte * name(Byte *data, SizeT size, UInt32 pc)
-#define Z7_BRANCH_CONV_ST_DECL(name) Byte * name(Byte *data, SizeT size, UInt32 pc, UInt32 *state)
-
-typedef Z7_BRANCH_CONV_DECL( (*z7_Func_BranchConv));
-typedef Z7_BRANCH_CONV_ST_DECL((*z7_Func_BranchConvSt));
-
-#define Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL 0
-Z7_BRANCH_CONV_ST_DECL(Z7_BRANCH_CONV_ST_DEC(X86));
-Z7_BRANCH_CONV_ST_DECL(Z7_BRANCH_CONV_ST_ENC(X86));
-
-#define Z7_BRANCH_FUNCS_DECL(name) \
-Z7_BRANCH_CONV_DECL(Z7_BRANCH_CONV_DEC(name)); \
-Z7_BRANCH_CONV_DECL(Z7_BRANCH_CONV_ENC(name));
-
-Z7_BRANCH_FUNCS_DECL(ARM64)
-Z7_BRANCH_FUNCS_DECL(ARM)
-Z7_BRANCH_FUNCS_DECL(ARMT)
-Z7_BRANCH_FUNCS_DECL(PPC)
-Z7_BRANCH_FUNCS_DECL(SPARC)
-Z7_BRANCH_FUNCS_DECL(IA64)
-
-/*
-These functions convert data that contain CPU instructions.
-Each such function converts relative addresses to absolute addresses in some
-branch instructions: CALL (in all converters) and JUMP (X86 converter only).
-Such conversion allows to increase compression ratio, if we compress that data.
-
-There are 2 types of converters:
- Byte * Conv_RISC (Byte *data, SizeT size, UInt32 pc);
- Byte * ConvSt_X86(Byte *data, SizeT size, UInt32 pc, UInt32 *state);
-Each Converter supports 2 versions: one for encoding
-and one for decoding (_Enc/_Dec postfixes in function name).
-
-In params:
- data : data buffer
- size : size of data
- pc : current virtual Program Counter (Instruction Pinter) value
-In/Out param:
- state : pointer to state variable (for X86 converter only)
-
-Return:
- The pointer to position in (data) buffer after last byte that was processed.
- If the caller calls converter again, it must call it starting with that position.
- But the caller is allowed to move data in buffer. so pointer to
- current processed position also will be changed for next call.
- Also the caller must increase internal (pc) value for next call.
-
-Each converter has some characteristics: Endian, Alignment, LookAhead.
- Type Endian Alignment LookAhead
-
- X86 little 1 4
- ARMT little 2 2
- ARM little 4 0
- ARM64 little 4 0
- PPC big 4 0
- SPARC big 4 0
- IA64 little 16 0
-
- (data) must be aligned for (Alignment).
- processed size can be calculated as:
- SizeT processed = Conv(data, size, pc) - data;
- if (processed == 0)
- it means that converter needs more data for processing.
- If (size < Alignment + LookAhead)
- then (processed == 0) is allowed.
-
-Example code for conversion in loop:
- UInt32 pc = 0;
- size = 0;
- for (;;)
- {
- size += Load_more_input_data(data + size);
- SizeT processed = Conv(data, size, pc) - data;
- if (processed == 0 && no_more_input_data_after_size)
- break; // we stop convert loop
- data += processed;
- size -= processed;
- pc += processed;
- }
-*/
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Bra86.c b/3rdparty/7z/src/Bra86.c
deleted file mode 100644
index 9c468ba742..0000000000
--- a/3rdparty/7z/src/Bra86.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/* Bra86.c -- Branch converter for X86 code (BCJ)
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Bra.h"
-#include "CpuArch.h"
-
-
-#if defined(MY_CPU_SIZEOF_POINTER) \
- && ( MY_CPU_SIZEOF_POINTER == 4 \
- || MY_CPU_SIZEOF_POINTER == 8)
- #define BR_CONV_USE_OPT_PC_PTR
-#endif
-
-#ifdef BR_CONV_USE_OPT_PC_PTR
-#define BR_PC_INIT pc -= (UInt32)(SizeT)p; // (MY_uintptr_t)
-#define BR_PC_GET (pc + (UInt32)(SizeT)p)
-#else
-#define BR_PC_INIT pc += (UInt32)size;
-#define BR_PC_GET (pc - (UInt32)(SizeT)(lim - p))
-// #define BR_PC_INIT
-// #define BR_PC_GET (pc + (UInt32)(SizeT)(p - data))
-#endif
-
-#define BR_CONVERT_VAL(v, c) if (encoding) v += c; else v -= c;
-// #define BR_CONVERT_VAL(v, c) if (!encoding) c = (UInt32)0 - c; v += c;
-
-#define Z7_BRANCH_CONV_ST(name) z7_BranchConvSt_ ## name
-
-#define BR86_NEED_CONV_FOR_MS_BYTE(b) ((((b) + 1) & 0xfe) == 0)
-
-#ifdef MY_CPU_LE_UNALIGN
- #define BR86_PREPARE_BCJ_SCAN const UInt32 v = GetUi32(p) ^ 0xe8e8e8e8;
- #define BR86_IS_BCJ_BYTE(n) ((v & ((UInt32)0xfe << (n) * 8)) == 0)
-#else
- #define BR86_PREPARE_BCJ_SCAN
- // bad for MSVC X86 (partial write to byte reg):
- #define BR86_IS_BCJ_BYTE(n) ((p[n - 4] & 0xfe) == 0xe8)
- // bad for old MSVC (partial write to byte reg):
- // #define BR86_IS_BCJ_BYTE(n) (((*p ^ 0xe8) & 0xfe) == 0)
-#endif
-
-static
-Z7_FORCE_INLINE
-Z7_ATTRIB_NO_VECTOR
-Byte *Z7_BRANCH_CONV_ST(X86)(Byte *p, SizeT size, UInt32 pc, UInt32 *state, int encoding)
-{
- if (size < 5)
- return p;
- {
- // Byte *p = data;
- const Byte *lim = p + size - 4;
- unsigned mask = (unsigned)*state; // & 7;
-#ifdef BR_CONV_USE_OPT_PC_PTR
- /* if BR_CONV_USE_OPT_PC_PTR is defined: we need to adjust (pc) for (+4),
- because call/jump offset is relative to the next instruction.
- if BR_CONV_USE_OPT_PC_PTR is not defined : we don't need to adjust (pc) for (+4),
- because BR_PC_GET uses (pc - (lim - p)), and lim was adjusted for (-4) before.
- */
- pc += 4;
-#endif
- BR_PC_INIT
- goto start;
-
- for (;; mask |= 4)
- {
- // cont: mask |= 4;
- start:
- if (p >= lim)
- goto fin;
- {
- BR86_PREPARE_BCJ_SCAN
- p += 4;
- if (BR86_IS_BCJ_BYTE(0)) { goto m0; } mask >>= 1;
- if (BR86_IS_BCJ_BYTE(1)) { goto m1; } mask >>= 1;
- if (BR86_IS_BCJ_BYTE(2)) { goto m2; } mask = 0;
- if (BR86_IS_BCJ_BYTE(3)) { goto a3; }
- }
- goto main_loop;
-
- m0: p--;
- m1: p--;
- m2: p--;
- if (mask == 0)
- goto a3;
- if (p > lim)
- goto fin_p;
-
- // if (((0x17u >> mask) & 1) == 0)
- if (mask > 4 || mask == 3)
- {
- mask >>= 1;
- continue; // goto cont;
- }
- mask >>= 1;
- if (BR86_NEED_CONV_FOR_MS_BYTE(p[mask]))
- continue; // goto cont;
- // if (!BR86_NEED_CONV_FOR_MS_BYTE(p[3])) continue; // goto cont;
- {
- UInt32 v = GetUi32(p);
- UInt32 c;
- v += (1 << 24); if (v & 0xfe000000) continue; // goto cont;
- c = BR_PC_GET;
- BR_CONVERT_VAL(v, c)
- {
- mask <<= 3;
- if (BR86_NEED_CONV_FOR_MS_BYTE(v >> mask))
- {
- v ^= (((UInt32)0x100 << mask) - 1);
- #ifdef MY_CPU_X86
- // for X86 : we can recalculate (c) to reduce register pressure
- c = BR_PC_GET;
- #endif
- BR_CONVERT_VAL(v, c)
- }
- mask = 0;
- }
- // v = (v & ((1 << 24) - 1)) - (v & (1 << 24));
- v &= (1 << 25) - 1; v -= (1 << 24);
- SetUi32(p, v)
- p += 4;
- goto main_loop;
- }
-
- main_loop:
- if (p >= lim)
- goto fin;
- for (;;)
- {
- BR86_PREPARE_BCJ_SCAN
- p += 4;
- if (BR86_IS_BCJ_BYTE(0)) { goto a0; }
- if (BR86_IS_BCJ_BYTE(1)) { goto a1; }
- if (BR86_IS_BCJ_BYTE(2)) { goto a2; }
- if (BR86_IS_BCJ_BYTE(3)) { goto a3; }
- if (p >= lim)
- goto fin;
- }
-
- a0: p--;
- a1: p--;
- a2: p--;
- a3:
- if (p > lim)
- goto fin_p;
- // if (!BR86_NEED_CONV_FOR_MS_BYTE(p[3])) continue; // goto cont;
- {
- UInt32 v = GetUi32(p);
- UInt32 c;
- v += (1 << 24); if (v & 0xfe000000) continue; // goto cont;
- c = BR_PC_GET;
- BR_CONVERT_VAL(v, c)
- // v = (v & ((1 << 24) - 1)) - (v & (1 << 24));
- v &= (1 << 25) - 1; v -= (1 << 24);
- SetUi32(p, v)
- p += 4;
- goto main_loop;
- }
- }
-
-fin_p:
- p--;
-fin:
- // the following processing for tail is optional and can be commented
- /*
- lim += 4;
- for (; p < lim; p++, mask >>= 1)
- if ((*p & 0xfe) == 0xe8)
- break;
- */
- *state = (UInt32)mask;
- return p;
- }
-}
-
-
-#define Z7_BRANCH_CONV_ST_FUNC_IMP(name, m, encoding) \
-Z7_NO_INLINE \
-Z7_ATTRIB_NO_VECTOR \
-Byte *m(name)(Byte *data, SizeT size, UInt32 pc, UInt32 *state) \
- { return Z7_BRANCH_CONV_ST(name)(data, size, pc, state, encoding); }
-
-Z7_BRANCH_CONV_ST_FUNC_IMP(X86, Z7_BRANCH_CONV_ST_DEC, 0)
-#ifndef Z7_EXTRACT_ONLY
-Z7_BRANCH_CONV_ST_FUNC_IMP(X86, Z7_BRANCH_CONV_ST_ENC, 1)
-#endif
diff --git a/3rdparty/7z/src/BraIA64.c b/3rdparty/7z/src/BraIA64.c
deleted file mode 100644
index 1b61927ccd..0000000000
--- a/3rdparty/7z/src/BraIA64.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/* BraIA64.c -- Converter for IA-64 code
-2023-02-20 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-// the code was moved to Bra.c
-
-#ifdef _MSC_VER
-#pragma warning(disable : 4206) // nonstandard extension used : translation unit is empty
-#endif
-
-#if defined(__clang__)
-#pragma GCC diagnostic ignored "-Wempty-translation-unit"
-#endif
diff --git a/3rdparty/7z/src/BwtSort.c b/3rdparty/7z/src/BwtSort.c
deleted file mode 100644
index 05ad6de8b9..0000000000
--- a/3rdparty/7z/src/BwtSort.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/* BwtSort.c -- BWT block sorting
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "BwtSort.h"
-#include "Sort.h"
-
-/* #define BLOCK_SORT_USE_HEAP_SORT */
-
-/* Don't change it !!! */
-#define kNumHashBytes 2
-#define kNumHashValues (1 << (kNumHashBytes * 8))
-
-/* kNumRefBitsMax must be < (kNumHashBytes * 8) = 16 */
-#define kNumRefBitsMax 12
-
-#define BS_TEMP_SIZE kNumHashValues
-
-#ifdef BLOCK_SORT_EXTERNAL_FLAGS
-
-/* 32 Flags in UInt32 word */
-#define kNumFlagsBits 5
-#define kNumFlagsInWord (1 << kNumFlagsBits)
-#define kFlagsMask (kNumFlagsInWord - 1)
-#define kAllFlags 0xFFFFFFFF
-
-#else
-
-#define kNumBitsMax 20
-#define kIndexMask ((1 << kNumBitsMax) - 1)
-#define kNumExtraBits (32 - kNumBitsMax)
-#define kNumExtra0Bits (kNumExtraBits - 2)
-#define kNumExtra0Mask ((1 << kNumExtra0Bits) - 1)
-
-#define SetFinishedGroupSize(p, size) \
- { *(p) |= ((((size) - 1) & kNumExtra0Mask) << kNumBitsMax); \
- if ((size) > (1 << kNumExtra0Bits)) { \
- *(p) |= 0x40000000; *((p) + 1) |= ((((size) - 1)>> kNumExtra0Bits) << kNumBitsMax); } } \
-
-static void SetGroupSize(UInt32 *p, UInt32 size)
-{
- if (--size == 0)
- return;
- *p |= 0x80000000 | ((size & kNumExtra0Mask) << kNumBitsMax);
- if (size >= (1 << kNumExtra0Bits))
- {
- *p |= 0x40000000;
- p[1] |= ((size >> kNumExtra0Bits) << kNumBitsMax);
- }
-}
-
-#endif
-
-/*
-SortGroup - is recursive Range-Sort function with HeapSort optimization for small blocks
- "range" is not real range. It's only for optimization.
-returns: 1 - if there are groups, 0 - no more groups
-*/
-
-static
-UInt32
-Z7_FASTCALL
-SortGroup(UInt32 BlockSize, UInt32 NumSortedBytes, UInt32 groupOffset, UInt32 groupSize, int NumRefBits, UInt32 *Indices
- #ifndef BLOCK_SORT_USE_HEAP_SORT
- , UInt32 left, UInt32 range
- #endif
- )
-{
- UInt32 *ind2 = Indices + groupOffset;
- UInt32 *Groups;
- if (groupSize <= 1)
- {
- /*
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- SetFinishedGroupSize(ind2, 1)
- #endif
- */
- return 0;
- }
- Groups = Indices + BlockSize + BS_TEMP_SIZE;
- if (groupSize <= ((UInt32)1 << NumRefBits)
- #ifndef BLOCK_SORT_USE_HEAP_SORT
- && groupSize <= range
- #endif
- )
- {
- UInt32 *temp = Indices + BlockSize;
- UInt32 j;
- UInt32 mask, thereAreGroups, group, cg;
- {
- UInt32 gPrev;
- UInt32 gRes = 0;
- {
- UInt32 sp = ind2[0] + NumSortedBytes;
- if (sp >= BlockSize) sp -= BlockSize;
- gPrev = Groups[sp];
- temp[0] = (gPrev << NumRefBits);
- }
-
- for (j = 1; j < groupSize; j++)
- {
- UInt32 sp = ind2[j] + NumSortedBytes;
- UInt32 g;
- if (sp >= BlockSize) sp -= BlockSize;
- g = Groups[sp];
- temp[j] = (g << NumRefBits) | j;
- gRes |= (gPrev ^ g);
- }
- if (gRes == 0)
- {
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- SetGroupSize(ind2, groupSize);
- #endif
- return 1;
- }
- }
-
- HeapSort(temp, groupSize);
- mask = (((UInt32)1 << NumRefBits) - 1);
- thereAreGroups = 0;
-
- group = groupOffset;
- cg = (temp[0] >> NumRefBits);
- temp[0] = ind2[temp[0] & mask];
-
- {
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- UInt32 *Flags = Groups + BlockSize;
- #else
- UInt32 prevGroupStart = 0;
- #endif
-
- for (j = 1; j < groupSize; j++)
- {
- UInt32 val = temp[j];
- UInt32 cgCur = (val >> NumRefBits);
-
- if (cgCur != cg)
- {
- cg = cgCur;
- group = groupOffset + j;
-
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- {
- UInt32 t = group - 1;
- Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
- }
- #else
- SetGroupSize(temp + prevGroupStart, j - prevGroupStart);
- prevGroupStart = j;
- #endif
- }
- else
- thereAreGroups = 1;
- {
- UInt32 ind = ind2[val & mask];
- temp[j] = ind;
- Groups[ind] = group;
- }
- }
-
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- SetGroupSize(temp + prevGroupStart, j - prevGroupStart);
- #endif
- }
-
- for (j = 0; j < groupSize; j++)
- ind2[j] = temp[j];
- return thereAreGroups;
- }
-
- /* Check that all strings are in one group (cannot sort) */
- {
- UInt32 group, j;
- UInt32 sp = ind2[0] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
- group = Groups[sp];
- for (j = 1; j < groupSize; j++)
- {
- sp = ind2[j] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
- if (Groups[sp] != group)
- break;
- }
- if (j == groupSize)
- {
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- SetGroupSize(ind2, groupSize);
- #endif
- return 1;
- }
- }
-
- #ifndef BLOCK_SORT_USE_HEAP_SORT
- {
- /* ---------- Range Sort ---------- */
- UInt32 i;
- UInt32 mid;
- for (;;)
- {
- UInt32 j;
- if (range <= 1)
- {
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- SetGroupSize(ind2, groupSize);
- #endif
- return 1;
- }
- mid = left + ((range + 1) >> 1);
- j = groupSize;
- i = 0;
- do
- {
- UInt32 sp = ind2[i] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
- if (Groups[sp] >= mid)
- {
- for (j--; j > i; j--)
- {
- sp = ind2[j] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
- if (Groups[sp] < mid)
- {
- UInt32 temp = ind2[i]; ind2[i] = ind2[j]; ind2[j] = temp;
- break;
- }
- }
- if (i >= j)
- break;
- }
- }
- while (++i < j);
- if (i == 0)
- {
- range = range - (mid - left);
- left = mid;
- }
- else if (i == groupSize)
- range = (mid - left);
- else
- break;
- }
-
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- {
- UInt32 t = (groupOffset + i - 1);
- UInt32 *Flags = Groups + BlockSize;
- Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
- }
- #endif
-
- {
- UInt32 j;
- for (j = i; j < groupSize; j++)
- Groups[ind2[j]] = groupOffset + i;
- }
-
- {
- UInt32 res = SortGroup(BlockSize, NumSortedBytes, groupOffset, i, NumRefBits, Indices, left, mid - left);
- return res | SortGroup(BlockSize, NumSortedBytes, groupOffset + i, groupSize - i, NumRefBits, Indices, mid, range - (mid - left));
- }
-
- }
-
- #else
-
- /* ---------- Heap Sort ---------- */
-
- {
- UInt32 j;
- for (j = 0; j < groupSize; j++)
- {
- UInt32 sp = ind2[j] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
- ind2[j] = sp;
- }
-
- HeapSortRef(ind2, Groups, groupSize);
-
- /* Write Flags */
- {
- UInt32 sp = ind2[0];
- UInt32 group = Groups[sp];
-
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- UInt32 *Flags = Groups + BlockSize;
- #else
- UInt32 prevGroupStart = 0;
- #endif
-
- for (j = 1; j < groupSize; j++)
- {
- sp = ind2[j];
- if (Groups[sp] != group)
- {
- group = Groups[sp];
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- {
- UInt32 t = groupOffset + j - 1;
- Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
- }
- #else
- SetGroupSize(ind2 + prevGroupStart, j - prevGroupStart);
- prevGroupStart = j;
- #endif
- }
- }
-
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- SetGroupSize(ind2 + prevGroupStart, j - prevGroupStart);
- #endif
- }
- {
- /* Write new Groups values and Check that there are groups */
- UInt32 thereAreGroups = 0;
- for (j = 0; j < groupSize; j++)
- {
- UInt32 group = groupOffset + j;
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- UInt32 subGroupSize = ((ind2[j] & ~0xC0000000) >> kNumBitsMax);
- if ((ind2[j] & 0x40000000) != 0)
- subGroupSize += ((ind2[(size_t)j + 1] >> kNumBitsMax) << kNumExtra0Bits);
- subGroupSize++;
- for (;;)
- {
- UInt32 original = ind2[j];
- UInt32 sp = original & kIndexMask;
- if (sp < NumSortedBytes) sp += BlockSize; sp -= NumSortedBytes;
- ind2[j] = sp | (original & ~kIndexMask);
- Groups[sp] = group;
- if (--subGroupSize == 0)
- break;
- j++;
- thereAreGroups = 1;
- }
- #else
- UInt32 *Flags = Groups + BlockSize;
- for (;;)
- {
- UInt32 sp = ind2[j]; if (sp < NumSortedBytes) sp += BlockSize; sp -= NumSortedBytes;
- ind2[j] = sp;
- Groups[sp] = group;
- if ((Flags[(groupOffset + j) >> kNumFlagsBits] & (1 << ((groupOffset + j) & kFlagsMask))) == 0)
- break;
- j++;
- thereAreGroups = 1;
- }
- #endif
- }
- return thereAreGroups;
- }
- }
- #endif
-}
-
-/* conditions: blockSize > 0 */
-UInt32 BlockSort(UInt32 *Indices, const Byte *data, UInt32 blockSize)
-{
- UInt32 *counters = Indices + blockSize;
- UInt32 i;
- UInt32 *Groups;
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- UInt32 *Flags;
- #endif
-
- /* Radix-Sort for 2 bytes */
- for (i = 0; i < kNumHashValues; i++)
- counters[i] = 0;
- for (i = 0; i < blockSize - 1; i++)
- counters[((UInt32)data[i] << 8) | data[(size_t)i + 1]]++;
- counters[((UInt32)data[i] << 8) | data[0]]++;
-
- Groups = counters + BS_TEMP_SIZE;
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- Flags = Groups + blockSize;
- {
- UInt32 numWords = (blockSize + kFlagsMask) >> kNumFlagsBits;
- for (i = 0; i < numWords; i++)
- Flags[i] = kAllFlags;
- }
- #endif
-
- {
- UInt32 sum = 0;
- for (i = 0; i < kNumHashValues; i++)
- {
- UInt32 groupSize = counters[i];
- if (groupSize > 0)
- {
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
- UInt32 t = sum + groupSize - 1;
- Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
- #endif
- sum += groupSize;
- }
- counters[i] = sum - groupSize;
- }
-
- for (i = 0; i < blockSize - 1; i++)
- Groups[i] = counters[((UInt32)data[i] << 8) | data[(size_t)i + 1]];
- Groups[i] = counters[((UInt32)data[i] << 8) | data[0]];
-
- for (i = 0; i < blockSize - 1; i++)
- Indices[counters[((UInt32)data[i] << 8) | data[(size_t)i + 1]]++] = i;
- Indices[counters[((UInt32)data[i] << 8) | data[0]]++] = i;
-
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- {
- UInt32 prev = 0;
- for (i = 0; i < kNumHashValues; i++)
- {
- UInt32 prevGroupSize = counters[i] - prev;
- if (prevGroupSize == 0)
- continue;
- SetGroupSize(Indices + prev, prevGroupSize);
- prev = counters[i];
- }
- }
- #endif
- }
-
- {
- int NumRefBits;
- UInt32 NumSortedBytes;
- for (NumRefBits = 0; ((blockSize - 1) >> NumRefBits) != 0; NumRefBits++);
- NumRefBits = 32 - NumRefBits;
- if (NumRefBits > kNumRefBitsMax)
- NumRefBits = kNumRefBitsMax;
-
- for (NumSortedBytes = kNumHashBytes; ; NumSortedBytes <<= 1)
- {
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- UInt32 finishedGroupSize = 0;
- #endif
- UInt32 newLimit = 0;
- for (i = 0; i < blockSize;)
- {
- UInt32 groupSize;
- #ifdef BLOCK_SORT_EXTERNAL_FLAGS
-
- if ((Flags[i >> kNumFlagsBits] & (1 << (i & kFlagsMask))) == 0)
- {
- i++;
- continue;
- }
- for (groupSize = 1;
- (Flags[(i + groupSize) >> kNumFlagsBits] & (1 << ((i + groupSize) & kFlagsMask))) != 0;
- groupSize++);
-
- groupSize++;
-
- #else
-
- groupSize = ((Indices[i] & ~0xC0000000) >> kNumBitsMax);
- {
- BoolInt finishedGroup = ((Indices[i] & 0x80000000) == 0);
- if ((Indices[i] & 0x40000000) != 0)
- {
- groupSize += ((Indices[(size_t)i + 1] >> kNumBitsMax) << kNumExtra0Bits);
- Indices[(size_t)i + 1] &= kIndexMask;
- }
- Indices[i] &= kIndexMask;
- groupSize++;
- if (finishedGroup || groupSize == 1)
- {
- Indices[i - finishedGroupSize] &= kIndexMask;
- if (finishedGroupSize > 1)
- Indices[(size_t)(i - finishedGroupSize) + 1] &= kIndexMask;
- {
- UInt32 newGroupSize = groupSize + finishedGroupSize;
- SetFinishedGroupSize(Indices + i - finishedGroupSize, newGroupSize)
- finishedGroupSize = newGroupSize;
- }
- i += groupSize;
- continue;
- }
- finishedGroupSize = 0;
- }
-
- #endif
-
- if (NumSortedBytes >= blockSize)
- {
- UInt32 j;
- for (j = 0; j < groupSize; j++)
- {
- UInt32 t = (i + j);
- /* Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask)); */
- Groups[Indices[t]] = t;
- }
- }
- else
- if (SortGroup(blockSize, NumSortedBytes, i, groupSize, NumRefBits, Indices
- #ifndef BLOCK_SORT_USE_HEAP_SORT
- , 0, blockSize
- #endif
- ) != 0)
- newLimit = i + groupSize;
- i += groupSize;
- }
- if (newLimit == 0)
- break;
- }
- }
- #ifndef BLOCK_SORT_EXTERNAL_FLAGS
- for (i = 0; i < blockSize;)
- {
- UInt32 groupSize = ((Indices[i] & ~0xC0000000) >> kNumBitsMax);
- if ((Indices[i] & 0x40000000) != 0)
- {
- groupSize += ((Indices[(size_t)i + 1] >> kNumBitsMax) << kNumExtra0Bits);
- Indices[(size_t)i + 1] &= kIndexMask;
- }
- Indices[i] &= kIndexMask;
- groupSize++;
- i += groupSize;
- }
- #endif
- return Groups[0];
-}
diff --git a/3rdparty/7z/src/BwtSort.h b/3rdparty/7z/src/BwtSort.h
deleted file mode 100644
index a34b243a33..0000000000
--- a/3rdparty/7z/src/BwtSort.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* BwtSort.h -- BWT block sorting
-2023-03-03 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_BWT_SORT_H
-#define ZIP7_INC_BWT_SORT_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-/* use BLOCK_SORT_EXTERNAL_FLAGS if blockSize can be > 1M */
-/* #define BLOCK_SORT_EXTERNAL_FLAGS */
-
-#ifdef BLOCK_SORT_EXTERNAL_FLAGS
-#define BLOCK_SORT_EXTERNAL_SIZE(blockSize) ((((blockSize) + 31) >> 5))
-#else
-#define BLOCK_SORT_EXTERNAL_SIZE(blockSize) 0
-#endif
-
-#define BLOCK_SORT_BUF_SIZE(blockSize) ((blockSize) * 2 + BLOCK_SORT_EXTERNAL_SIZE(blockSize) + (1 << 16))
-
-UInt32 BlockSort(UInt32 *indices, const Byte *data, UInt32 blockSize);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Compiler.h b/3rdparty/7z/src/Compiler.h
deleted file mode 100644
index ca4618b394..0000000000
--- a/3rdparty/7z/src/Compiler.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* Compiler.h : Compiler specific defines and pragmas
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_COMPILER_H
-#define ZIP7_INC_COMPILER_H
-
-#if defined(__clang__)
-# define Z7_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
-#endif
-#if defined(__clang__) && defined(__apple_build_version__)
-# define Z7_APPLE_CLANG_VERSION Z7_CLANG_VERSION
-#elif defined(__clang__)
-# define Z7_LLVM_CLANG_VERSION Z7_CLANG_VERSION
-#elif defined(__GNUC__)
-# define Z7_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#endif
-
-#ifdef _MSC_VER
-#if !defined(__clang__) && !defined(__GNUC__)
-#define Z7_MSC_VER_ORIGINAL _MSC_VER
-#endif
-#endif
-
-#if defined(__MINGW32__) || defined(__MINGW64__)
-#define Z7_MINGW
-#endif
-
-// #pragma GCC diagnostic ignored "-Wunknown-pragmas"
-
-#ifdef __clang__
-// padding size of '' with 4 bytes to alignment boundary
-#pragma GCC diagnostic ignored "-Wpadded"
-#endif
-
-
-#ifdef _MSC_VER
-
- #ifdef UNDER_CE
- #define RPC_NO_WINDOWS_H
- /* #pragma warning(disable : 4115) // '_RPC_ASYNC_STATE' : named type definition in parentheses */
- #pragma warning(disable : 4201) // nonstandard extension used : nameless struct/union
- #pragma warning(disable : 4214) // nonstandard extension used : bit field types other than int
- #endif
-
-#if defined(_MSC_VER) && _MSC_VER >= 1800
-#pragma warning(disable : 4464) // relative include path contains '..'
-#endif
-
-// == 1200 : -O1 : for __forceinline
-// >= 1900 : -O1 : for printf
-#pragma warning(disable : 4710) // function not inlined
-
-#if _MSC_VER < 1900
-// winnt.h: 'Int64ShllMod32'
-#pragma warning(disable : 4514) // unreferenced inline function has been removed
-#endif
-
-#if _MSC_VER < 1300
-// #pragma warning(disable : 4702) // unreachable code
-// Bra.c : -O1:
-#pragma warning(disable : 4714) // function marked as __forceinline not inlined
-#endif
-
-/*
-#if _MSC_VER > 1400 && _MSC_VER <= 1900
-// strcat: This function or variable may be unsafe
-// sysinfoapi.h: kit10: GetVersion was declared deprecated
-#pragma warning(disable : 4996)
-#endif
-*/
-
-#if _MSC_VER > 1200
-// -Wall warnings
-
-#pragma warning(disable : 4711) // function selected for automatic inline expansion
-#pragma warning(disable : 4820) // '2' bytes padding added after data member
-
-#if _MSC_VER >= 1400 && _MSC_VER < 1920
-// 1400: string.h: _DBG_MEMCPY_INLINE_
-// 1600 - 191x : smmintrin.h __cplusplus'
-// is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
-#pragma warning(disable : 4668)
-
-// 1400 - 1600 : WinDef.h : 'FARPROC' :
-// 1900 - 191x : immintrin.h: _readfsbase_u32
-// no function prototype given : converting '()' to '(void)'
-#pragma warning(disable : 4255)
-#endif
-
-#if _MSC_VER >= 1914
-// Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified
-#pragma warning(disable : 5045)
-#endif
-
-#endif // _MSC_VER > 1200
-#endif // _MSC_VER
-
-
-#if defined(__clang__) && (__clang_major__ >= 4)
- #define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE \
- _Pragma("clang loop unroll(disable)") \
- _Pragma("clang loop vectorize(disable)")
- #define Z7_ATTRIB_NO_VECTORIZE
-#elif defined(__GNUC__) && (__GNUC__ >= 5)
- #define Z7_ATTRIB_NO_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
- // __attribute__((optimize("no-unroll-loops")));
- #define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
-#elif defined(_MSC_VER) && (_MSC_VER >= 1920)
- #define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE \
- _Pragma("loop( no_vector )")
- #define Z7_ATTRIB_NO_VECTORIZE
-#else
- #define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- #define Z7_ATTRIB_NO_VECTORIZE
-#endif
-
-#if defined(MY_CPU_X86_OR_AMD64) && ( \
- defined(__clang__) && (__clang_major__ >= 4) \
- || defined(__GNUC__) && (__GNUC__ >= 5))
- #define Z7_ATTRIB_NO_SSE __attribute__((__target__("no-sse")))
-#else
- #define Z7_ATTRIB_NO_SSE
-#endif
-
-#define Z7_ATTRIB_NO_VECTOR \
- Z7_ATTRIB_NO_VECTORIZE \
- Z7_ATTRIB_NO_SSE
-
-
-#if defined(__clang__) && (__clang_major__ >= 8) \
- || defined(__GNUC__) && (__GNUC__ >= 1000) \
- /* || defined(_MSC_VER) && (_MSC_VER >= 1920) */
- // GCC is not good for __builtin_expect()
- #define Z7_LIKELY(x) (__builtin_expect((x), 1))
- #define Z7_UNLIKELY(x) (__builtin_expect((x), 0))
- // #define Z7_unlikely [[unlikely]]
- // #define Z7_likely [[likely]]
-#else
- #define Z7_LIKELY(x) (x)
- #define Z7_UNLIKELY(x) (x)
- // #define Z7_likely
-#endif
-
-
-#if (defined(Z7_CLANG_VERSION) && (Z7_CLANG_VERSION >= 36000))
-#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_RESERVED_MACRO_IDENTIFIER \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wreserved-macro-identifier\"")
-#define Z7_DIAGNOSCTIC_IGNORE_END_RESERVED_MACRO_IDENTIFIER \
- _Pragma("GCC diagnostic pop")
-#else
-#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_RESERVED_MACRO_IDENTIFIER
-#define Z7_DIAGNOSCTIC_IGNORE_END_RESERVED_MACRO_IDENTIFIER
-#endif
-
-#define UNUSED_VAR(x) (void)x;
-/* #define UNUSED_VAR(x) x=x; */
-
-#endif
diff --git a/3rdparty/7z/src/CpuArch.c b/3rdparty/7z/src/CpuArch.c
deleted file mode 100644
index 5092d380d9..0000000000
--- a/3rdparty/7z/src/CpuArch.c
+++ /dev/null
@@ -1,823 +0,0 @@
-/* CpuArch.c -- CPU specific code
-2023-05-18 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-// #include
-
-#include "CpuArch.h"
-
-#ifdef MY_CPU_X86_OR_AMD64
-
-#undef NEED_CHECK_FOR_CPUID
-#if !defined(MY_CPU_AMD64)
-#define NEED_CHECK_FOR_CPUID
-#endif
-
-/*
- cpuid instruction supports (subFunction) parameter in ECX,
- that is used only with some specific (function) parameter values.
- But we always use only (subFunction==0).
-*/
-/*
- __cpuid(): MSVC and GCC/CLANG use same function/macro name
- but parameters are different.
- We use MSVC __cpuid() parameters style for our z7_x86_cpuid() function.
-*/
-
-#if defined(__GNUC__) /* && (__GNUC__ >= 10) */ \
- || defined(__clang__) /* && (__clang_major__ >= 10) */
-
-/* there was some CLANG/GCC compilers that have issues with
- rbx(ebx) handling in asm blocks in -fPIC mode (__PIC__ is defined).
- compiler's contains the macro __cpuid() that is similar to our code.
- The history of __cpuid() changes in CLANG/GCC:
- GCC:
- 2007: it preserved ebx for (__PIC__ && __i386__)
- 2013: it preserved rbx and ebx for __PIC__
- 2014: it doesn't preserves rbx and ebx anymore
- we suppose that (__GNUC__ >= 5) fixed that __PIC__ ebx/rbx problem.
- CLANG:
- 2014+: it preserves rbx, but only for 64-bit code. No __PIC__ check.
- Why CLANG cares about 64-bit mode only, and doesn't care about ebx (in 32-bit)?
- Do we need __PIC__ test for CLANG or we must care about rbx even if
- __PIC__ is not defined?
-*/
-
-#define ASM_LN "\n"
-
-#if defined(MY_CPU_AMD64) && defined(__PIC__) \
- && ((defined (__GNUC__) && (__GNUC__ < 5)) || defined(__clang__))
-
-#define x86_cpuid_MACRO(p, func) { \
- __asm__ __volatile__ ( \
- ASM_LN "mov %%rbx, %q1" \
- ASM_LN "cpuid" \
- ASM_LN "xchg %%rbx, %q1" \
- : "=a" ((p)[0]), "=&r" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
-
- /* "=&r" selects free register. It can select even rbx, if that register is free.
- "=&D" for (RDI) also works, but the code can be larger with "=&D"
- "2"(0) means (subFunction = 0),
- 2 is (zero-based) index in the output constraint list "=c" (ECX). */
-
-#elif defined(MY_CPU_X86) && defined(__PIC__) \
- && ((defined (__GNUC__) && (__GNUC__ < 5)) || defined(__clang__))
-
-#define x86_cpuid_MACRO(p, func) { \
- __asm__ __volatile__ ( \
- ASM_LN "mov %%ebx, %k1" \
- ASM_LN "cpuid" \
- ASM_LN "xchg %%ebx, %k1" \
- : "=a" ((p)[0]), "=&r" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
-
-#else
-
-#define x86_cpuid_MACRO(p, func) { \
- __asm__ __volatile__ ( \
- ASM_LN "cpuid" \
- : "=a" ((p)[0]), "=b" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
-
-#endif
-
-
-void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
-{
- x86_cpuid_MACRO(p, func)
-}
-
-
-Z7_NO_INLINE
-UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
-{
- #if defined(NEED_CHECK_FOR_CPUID)
- #define EFALGS_CPUID_BIT 21
- UInt32 a;
- __asm__ __volatile__ (
- ASM_LN "pushf"
- ASM_LN "pushf"
- ASM_LN "pop %0"
- // ASM_LN "movl %0, %1"
- // ASM_LN "xorl $0x200000, %0"
- ASM_LN "btc %1, %0"
- ASM_LN "push %0"
- ASM_LN "popf"
- ASM_LN "pushf"
- ASM_LN "pop %0"
- ASM_LN "xorl (%%esp), %0"
-
- ASM_LN "popf"
- ASM_LN
- : "=&r" (a) // "=a"
- : "i" (EFALGS_CPUID_BIT)
- );
- if ((a & (1 << EFALGS_CPUID_BIT)) == 0)
- return 0;
- #endif
- {
- UInt32 p[4];
- x86_cpuid_MACRO(p, 0)
- return p[0];
- }
-}
-
-#undef ASM_LN
-
-#elif !defined(_MSC_VER)
-
-/*
-// for gcc/clang and other: we can try to use __cpuid macro:
-#include
-void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
-{
- __cpuid(func, p[0], p[1], p[2], p[3]);
-}
-UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
-{
- return (UInt32)__get_cpuid_max(0, NULL);
-}
-*/
-// for unsupported cpuid:
-void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
-{
- UNUSED_VAR(func)
- p[0] = p[1] = p[2] = p[3] = 0;
-}
-UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
-{
- return 0;
-}
-
-#else // _MSC_VER
-
-#if !defined(MY_CPU_AMD64)
-
-UInt32 __declspec(naked) Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
-{
- #if defined(NEED_CHECK_FOR_CPUID)
- #define EFALGS_CPUID_BIT 21
- __asm pushfd
- __asm pushfd
- /*
- __asm pop eax
- // __asm mov edx, eax
- __asm btc eax, EFALGS_CPUID_BIT
- __asm push eax
- */
- __asm btc dword ptr [esp], EFALGS_CPUID_BIT
- __asm popfd
- __asm pushfd
- __asm pop eax
- // __asm xor eax, edx
- __asm xor eax, [esp]
- // __asm push edx
- __asm popfd
- __asm and eax, (1 shl EFALGS_CPUID_BIT)
- __asm jz end_func
- #endif
- __asm push ebx
- __asm xor eax, eax // func
- __asm xor ecx, ecx // subFunction (optional) for (func == 0)
- __asm cpuid
- __asm pop ebx
- #if defined(NEED_CHECK_FOR_CPUID)
- end_func:
- #endif
- __asm ret 0
-}
-
-void __declspec(naked) Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
-{
- UNUSED_VAR(p)
- UNUSED_VAR(func)
- __asm push ebx
- __asm push edi
- __asm mov edi, ecx // p
- __asm mov eax, edx // func
- __asm xor ecx, ecx // subfunction (optional) for (func == 0)
- __asm cpuid
- __asm mov [edi ], eax
- __asm mov [edi + 4], ebx
- __asm mov [edi + 8], ecx
- __asm mov [edi + 12], edx
- __asm pop edi
- __asm pop ebx
- __asm ret 0
-}
-
-#else // MY_CPU_AMD64
-
- #if _MSC_VER >= 1600
- #include
- #define MY_cpuidex __cpuidex
- #else
-/*
- __cpuid (func == (0 or 7)) requires subfunction number in ECX.
- MSDN: The __cpuid intrinsic clears the ECX register before calling the cpuid instruction.
- __cpuid() in new MSVC clears ECX.
- __cpuid() in old MSVC (14.00) x64 doesn't clear ECX
- We still can use __cpuid for low (func) values that don't require ECX,
- but __cpuid() in old MSVC will be incorrect for some func values: (func == 7).
- So here we use the hack for old MSVC to send (subFunction) in ECX register to cpuid instruction,
- where ECX value is first parameter for FASTCALL / NO_INLINE func,
- So the caller of MY_cpuidex_HACK() sets ECX as subFunction, and
- old MSVC for __cpuid() doesn't change ECX and cpuid instruction gets (subFunction) value.
-
-DON'T remove Z7_NO_INLINE and Z7_FASTCALL for MY_cpuidex_HACK(): !!!
-*/
-static
-Z7_NO_INLINE void Z7_FASTCALL MY_cpuidex_HACK(UInt32 subFunction, UInt32 func, int *CPUInfo)
-{
- UNUSED_VAR(subFunction)
- __cpuid(CPUInfo, func);
-}
- #define MY_cpuidex(info, func, func2) MY_cpuidex_HACK(func2, func, info)
- #pragma message("======== MY_cpuidex_HACK WAS USED ========")
- #endif // _MSC_VER >= 1600
-
-#if !defined(MY_CPU_AMD64)
-/* inlining for __cpuid() in MSVC x86 (32-bit) produces big ineffective code,
- so we disable inlining here */
-Z7_NO_INLINE
-#endif
-void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
-{
- MY_cpuidex((int *)p, (int)func, 0);
-}
-
-Z7_NO_INLINE
-UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
-{
- int a[4];
- MY_cpuidex(a, 0, 0);
- return a[0];
-}
-
-#endif // MY_CPU_AMD64
-#endif // _MSC_VER
-
-#if defined(NEED_CHECK_FOR_CPUID)
-#define CHECK_CPUID_IS_SUPPORTED { if (z7_x86_cpuid_GetMaxFunc() == 0) return 0; }
-#else
-#define CHECK_CPUID_IS_SUPPORTED
-#endif
-#undef NEED_CHECK_FOR_CPUID
-
-
-static
-BoolInt x86cpuid_Func_1(UInt32 *p)
-{
- CHECK_CPUID_IS_SUPPORTED
- z7_x86_cpuid(p, 1);
- return True;
-}
-
-/*
-static const UInt32 kVendors[][1] =
-{
- { 0x756E6547 }, // , 0x49656E69, 0x6C65746E },
- { 0x68747541 }, // , 0x69746E65, 0x444D4163 },
- { 0x746E6543 } // , 0x48727561, 0x736C7561 }
-};
-*/
-
-/*
-typedef struct
-{
- UInt32 maxFunc;
- UInt32 vendor[3];
- UInt32 ver;
- UInt32 b;
- UInt32 c;
- UInt32 d;
-} Cx86cpuid;
-
-enum
-{
- CPU_FIRM_INTEL,
- CPU_FIRM_AMD,
- CPU_FIRM_VIA
-};
-int x86cpuid_GetFirm(const Cx86cpuid *p);
-#define x86cpuid_ver_GetFamily(ver) (((ver >> 16) & 0xff0) | ((ver >> 8) & 0xf))
-#define x86cpuid_ver_GetModel(ver) (((ver >> 12) & 0xf0) | ((ver >> 4) & 0xf))
-#define x86cpuid_ver_GetStepping(ver) (ver & 0xf)
-
-int x86cpuid_GetFirm(const Cx86cpuid *p)
-{
- unsigned i;
- for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[0]); i++)
- {
- const UInt32 *v = kVendors[i];
- if (v[0] == p->vendor[0]
- // && v[1] == p->vendor[1]
- // && v[2] == p->vendor[2]
- )
- return (int)i;
- }
- return -1;
-}
-
-BoolInt CPU_Is_InOrder()
-{
- Cx86cpuid p;
- UInt32 family, model;
- if (!x86cpuid_CheckAndRead(&p))
- return True;
-
- family = x86cpuid_ver_GetFamily(p.ver);
- model = x86cpuid_ver_GetModel(p.ver);
-
- switch (x86cpuid_GetFirm(&p))
- {
- case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && (
- // In-Order Atom CPU
- model == 0x1C // 45 nm, N4xx, D4xx, N5xx, D5xx, 230, 330
- || model == 0x26 // 45 nm, Z6xx
- || model == 0x27 // 32 nm, Z2460
- || model == 0x35 // 32 nm, Z2760
- || model == 0x36 // 32 nm, N2xxx, D2xxx
- )));
- case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
- case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
- }
- return False; // v23 : unknown processors are not In-Order
-}
-*/
-
-#ifdef _WIN32
-#include "7zWindows.h"
-#endif
-
-#if !defined(MY_CPU_AMD64) && defined(_WIN32)
-
-/* for legacy SSE ia32: there is no user-space cpu instruction to check
- that OS supports SSE register storing/restoring on context switches.
- So we need some OS-specific function to check that it's safe to use SSE registers.
-*/
-
-Z7_FORCE_INLINE
-static BoolInt CPU_Sys_Is_SSE_Supported(void)
-{
-#ifdef _MSC_VER
- #pragma warning(push)
- #pragma warning(disable : 4996) // `GetVersion': was declared deprecated
-#endif
- /* low byte is major version of Windows
- We suppose that any Windows version since
- Windows2000 (major == 5) supports SSE registers */
- return (Byte)GetVersion() >= 5;
-#if defined(_MSC_VER)
- #pragma warning(pop)
-#endif
-}
-#define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
-#else
-#define CHECK_SYS_SSE_SUPPORT
-#endif
-
-
-#if !defined(MY_CPU_AMD64)
-
-BoolInt CPU_IsSupported_CMOV(void)
-{
- UInt32 a[4];
- if (!x86cpuid_Func_1(&a[0]))
- return 0;
- return (a[3] >> 15) & 1;
-}
-
-BoolInt CPU_IsSupported_SSE(void)
-{
- UInt32 a[4];
- CHECK_SYS_SSE_SUPPORT
- if (!x86cpuid_Func_1(&a[0]))
- return 0;
- return (a[3] >> 25) & 1;
-}
-
-BoolInt CPU_IsSupported_SSE2(void)
-{
- UInt32 a[4];
- CHECK_SYS_SSE_SUPPORT
- if (!x86cpuid_Func_1(&a[0]))
- return 0;
- return (a[3] >> 26) & 1;
-}
-
-#endif
-
-
-static UInt32 x86cpuid_Func_1_ECX(void)
-{
- UInt32 a[4];
- CHECK_SYS_SSE_SUPPORT
- if (!x86cpuid_Func_1(&a[0]))
- return 0;
- return a[2];
-}
-
-BoolInt CPU_IsSupported_AES(void)
-{
- return (x86cpuid_Func_1_ECX() >> 25) & 1;
-}
-
-BoolInt CPU_IsSupported_SSSE3(void)
-{
- return (x86cpuid_Func_1_ECX() >> 9) & 1;
-}
-
-BoolInt CPU_IsSupported_SSE41(void)
-{
- return (x86cpuid_Func_1_ECX() >> 19) & 1;
-}
-
-BoolInt CPU_IsSupported_SHA(void)
-{
- CHECK_SYS_SSE_SUPPORT
-
- if (z7_x86_cpuid_GetMaxFunc() < 7)
- return False;
- {
- UInt32 d[4];
- z7_x86_cpuid(d, 7);
- return (d[1] >> 29) & 1;
- }
-}
-
-/*
-MSVC: _xgetbv() intrinsic is available since VS2010SP1.
- MSVC also defines (_XCR_XFEATURE_ENABLED_MASK) macro in
- that we can use or check.
- For any 32-bit x86 we can use asm code in MSVC,
- but MSVC asm code is huge after compilation.
- So _xgetbv() is better
-
-ICC: _xgetbv() intrinsic is available (in what version of ICC?)
- ICC defines (__GNUC___) and it supports gnu assembler
- also ICC supports MASM style code with -use-msasm switch.
- but ICC doesn't support __attribute__((__target__))
-
-GCC/CLANG 9:
- _xgetbv() is macro that works via __builtin_ia32_xgetbv()
- and we need __attribute__((__target__("xsave")).
- But with __target__("xsave") the function will be not
- inlined to function that has no __target__("xsave") attribute.
- If we want _xgetbv() call inlining, then we should use asm version
- instead of calling _xgetbv().
- Note:intrinsic is broke before GCC 8.2:
- https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85684
-*/
-
-#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) \
- || defined(_MSC_VER) && (_MSC_VER >= 1600) && (_MSC_FULL_VER >= 160040219) \
- || defined(__GNUC__) && (__GNUC__ >= 9) \
- || defined(__clang__) && (__clang_major__ >= 9)
-// we define ATTRIB_XGETBV, if we want to use predefined _xgetbv() from compiler
-#if defined(__INTEL_COMPILER)
-#define ATTRIB_XGETBV
-#elif defined(__GNUC__) || defined(__clang__)
-// we don't define ATTRIB_XGETBV here, because asm version is better for inlining.
-// #define ATTRIB_XGETBV __attribute__((__target__("xsave")))
-#else
-#define ATTRIB_XGETBV
-#endif
-#endif
-
-#if defined(ATTRIB_XGETBV)
-#include
-#endif
-
-
-// XFEATURE_ENABLED_MASK/XCR0
-#define MY_XCR_XFEATURE_ENABLED_MASK 0
-
-#if defined(ATTRIB_XGETBV)
-ATTRIB_XGETBV
-#endif
-static UInt64 x86_xgetbv_0(UInt32 num)
-{
-#if defined(ATTRIB_XGETBV)
- {
- return
- #if (defined(_MSC_VER))
- _xgetbv(num);
- #else
- __builtin_ia32_xgetbv(
- #if !defined(__clang__)
- (int)
- #endif
- num);
- #endif
- }
-
-#elif defined(__GNUC__) || defined(__clang__) || defined(__SUNPRO_CC)
-
- UInt32 a, d;
- #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
- __asm__
- (
- "xgetbv"
- : "=a"(a), "=d"(d) : "c"(num) : "cc"
- );
- #else // is old gcc
- __asm__
- (
- ".byte 0x0f, 0x01, 0xd0" "\n\t"
- : "=a"(a), "=d"(d) : "c"(num) : "cc"
- );
- #endif
- return ((UInt64)d << 32) | a;
- // return a;
-
-#elif defined(_MSC_VER) && !defined(MY_CPU_AMD64)
-
- UInt32 a, d;
- __asm {
- push eax
- push edx
- push ecx
- mov ecx, num;
- // xor ecx, ecx // = MY_XCR_XFEATURE_ENABLED_MASK
- _emit 0x0f
- _emit 0x01
- _emit 0xd0
- mov a, eax
- mov d, edx
- pop ecx
- pop edx
- pop eax
- }
- return ((UInt64)d << 32) | a;
- // return a;
-
-#else // it's unknown compiler
- // #error "Need xgetbv function"
- UNUSED_VAR(num)
- // for MSVC-X64 we could call external function from external file.
- /* Actually we had checked OSXSAVE/AVX in cpuid before.
- So it's expected that OS supports at least AVX and below. */
- // if (num != MY_XCR_XFEATURE_ENABLED_MASK) return 0; // if not XCR0
- return
- // (1 << 0) | // x87
- (1 << 1) // SSE
- | (1 << 2); // AVX
-
-#endif
-}
-
-#ifdef _WIN32
-/*
- Windows versions do not know about new ISA extensions that
- can be introduced. But we still can use new extensions,
- even if Windows doesn't report about supporting them,
- But we can use new extensions, only if Windows knows about new ISA extension
- that changes the number or size of registers: SSE, AVX/XSAVE, AVX512
- So it's enough to check
- MY_PF_AVX_INSTRUCTIONS_AVAILABLE
- instead of
- MY_PF_AVX2_INSTRUCTIONS_AVAILABLE
-*/
-#define MY_PF_XSAVE_ENABLED 17
-// #define MY_PF_SSSE3_INSTRUCTIONS_AVAILABLE 36
-// #define MY_PF_SSE4_1_INSTRUCTIONS_AVAILABLE 37
-// #define MY_PF_SSE4_2_INSTRUCTIONS_AVAILABLE 38
-// #define MY_PF_AVX_INSTRUCTIONS_AVAILABLE 39
-// #define MY_PF_AVX2_INSTRUCTIONS_AVAILABLE 40
-// #define MY_PF_AVX512F_INSTRUCTIONS_AVAILABLE 41
-#endif
-
-BoolInt CPU_IsSupported_AVX(void)
-{
- #ifdef _WIN32
- if (!IsProcessorFeaturePresent(MY_PF_XSAVE_ENABLED))
- return False;
- /* PF_AVX_INSTRUCTIONS_AVAILABLE probably is supported starting from
- some latest Win10 revisions. But we need AVX in older Windows also.
- So we don't use the following check: */
- /*
- if (!IsProcessorFeaturePresent(MY_PF_AVX_INSTRUCTIONS_AVAILABLE))
- return False;
- */
- #endif
-
- /*
- OS must use new special XSAVE/XRSTOR instructions to save
- AVX registers when it required for context switching.
- At OS statring:
- OS sets CR4.OSXSAVE flag to signal the processor that OS supports the XSAVE extensions.
- Also OS sets bitmask in XCR0 register that defines what
- registers will be processed by XSAVE instruction:
- XCR0.SSE[bit 0] - x87 registers and state
- XCR0.SSE[bit 1] - SSE registers and state
- XCR0.AVX[bit 2] - AVX registers and state
- CR4.OSXSAVE is reflected to CPUID.1:ECX.OSXSAVE[bit 27].
- So we can read that bit in user-space.
- XCR0 is available for reading in user-space by new XGETBV instruction.
- */
- {
- const UInt32 c = x86cpuid_Func_1_ECX();
- if (0 == (1
- & (c >> 28) // AVX instructions are supported by hardware
- & (c >> 27))) // OSXSAVE bit: XSAVE and related instructions are enabled by OS.
- return False;
- }
-
- /* also we can check
- CPUID.1:ECX.XSAVE [bit 26] : that shows that
- XSAVE, XRESTOR, XSETBV, XGETBV instructions are supported by hardware.
- But that check is redundant, because if OSXSAVE bit is set, then XSAVE is also set */
-
- /* If OS have enabled XSAVE extension instructions (OSXSAVE == 1),
- in most cases we expect that OS also will support storing/restoring
- for AVX and SSE states at least.
- But to be ensure for that we call user-space instruction
- XGETBV(0) to get XCR0 value that contains bitmask that defines
- what exact states(registers) OS have enabled for storing/restoring.
- */
-
- {
- const UInt32 bm = (UInt32)x86_xgetbv_0(MY_XCR_XFEATURE_ENABLED_MASK);
- // printf("\n=== XGetBV=%d\n", bm);
- return 1
- & (bm >> 1) // SSE state is supported (set by OS) for storing/restoring
- & (bm >> 2); // AVX state is supported (set by OS) for storing/restoring
- }
- // since Win7SP1: we can use GetEnabledXStateFeatures();
-}
-
-
-BoolInt CPU_IsSupported_AVX2(void)
-{
- if (!CPU_IsSupported_AVX())
- return False;
- if (z7_x86_cpuid_GetMaxFunc() < 7)
- return False;
- {
- UInt32 d[4];
- z7_x86_cpuid(d, 7);
- // printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
- return 1
- & (d[1] >> 5); // avx2
- }
-}
-
-BoolInt CPU_IsSupported_VAES_AVX2(void)
-{
- if (!CPU_IsSupported_AVX())
- return False;
- if (z7_x86_cpuid_GetMaxFunc() < 7)
- return False;
- {
- UInt32 d[4];
- z7_x86_cpuid(d, 7);
- // printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
- return 1
- & (d[1] >> 5) // avx2
- // & (d[1] >> 31) // avx512vl
- & (d[2] >> 9); // vaes // VEX-256/EVEX
- }
-}
-
-BoolInt CPU_IsSupported_PageGB(void)
-{
- CHECK_CPUID_IS_SUPPORTED
- {
- UInt32 d[4];
- z7_x86_cpuid(d, 0x80000000);
- if (d[0] < 0x80000001)
- return False;
- z7_x86_cpuid(d, 0x80000001);
- return (d[3] >> 26) & 1;
- }
-}
-
-
-#elif defined(MY_CPU_ARM_OR_ARM64)
-
-#ifdef _WIN32
-
-#include "7zWindows.h"
-
-BoolInt CPU_IsSupported_CRC32(void) { return IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
-BoolInt CPU_IsSupported_CRYPTO(void) { return IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
-BoolInt CPU_IsSupported_NEON(void) { return IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
-
-#else
-
-#if defined(__APPLE__)
-
-/*
-#include
-#include
-static void Print_sysctlbyname(const char *name)
-{
- size_t bufSize = 256;
- char buf[256];
- int res = sysctlbyname(name, &buf, &bufSize, NULL, 0);
- {
- int i;
- printf("\nres = %d : %s : '%s' : bufSize = %d, numeric", res, name, buf, (unsigned)bufSize);
- for (i = 0; i < 20; i++)
- printf(" %2x", (unsigned)(Byte)buf[i]);
-
- }
-}
-*/
-/*
- Print_sysctlbyname("hw.pagesize");
- Print_sysctlbyname("machdep.cpu.brand_string");
-*/
-
-static BoolInt z7_sysctlbyname_Get_BoolInt(const char *name)
-{
- UInt32 val = 0;
- if (z7_sysctlbyname_Get_UInt32(name, &val) == 0 && val == 1)
- return 1;
- return 0;
-}
-
-BoolInt CPU_IsSupported_CRC32(void)
-{
- return z7_sysctlbyname_Get_BoolInt("hw.optional.armv8_crc32");
-}
-
-BoolInt CPU_IsSupported_NEON(void)
-{
- return z7_sysctlbyname_Get_BoolInt("hw.optional.neon");
-}
-
-#ifdef MY_CPU_ARM64
-#define APPLE_CRYPTO_SUPPORT_VAL 1
-#else
-#define APPLE_CRYPTO_SUPPORT_VAL 0
-#endif
-
-BoolInt CPU_IsSupported_SHA1(void) { return APPLE_CRYPTO_SUPPORT_VAL; }
-BoolInt CPU_IsSupported_SHA2(void) { return APPLE_CRYPTO_SUPPORT_VAL; }
-BoolInt CPU_IsSupported_AES (void) { return APPLE_CRYPTO_SUPPORT_VAL; }
-
-
-#else // __APPLE__
-
-#include
-
-#define USE_HWCAP
-
-#ifdef USE_HWCAP
-
-#include
-
- #define MY_HWCAP_CHECK_FUNC_2(name1, name2) \
- BoolInt CPU_IsSupported_ ## name1() { return (getauxval(AT_HWCAP) & (HWCAP_ ## name2)) ? 1 : 0; }
-
-#ifdef MY_CPU_ARM64
- #define MY_HWCAP_CHECK_FUNC(name) \
- MY_HWCAP_CHECK_FUNC_2(name, name)
- MY_HWCAP_CHECK_FUNC_2(NEON, ASIMD)
-// MY_HWCAP_CHECK_FUNC (ASIMD)
-#elif defined(MY_CPU_ARM)
- #define MY_HWCAP_CHECK_FUNC(name) \
- BoolInt CPU_IsSupported_ ## name() { return (getauxval(AT_HWCAP2) & (HWCAP2_ ## name)) ? 1 : 0; }
- MY_HWCAP_CHECK_FUNC_2(NEON, NEON)
-#endif
-
-#else // USE_HWCAP
-
- #define MY_HWCAP_CHECK_FUNC(name) \
- BoolInt CPU_IsSupported_ ## name() { return 0; }
- MY_HWCAP_CHECK_FUNC(NEON)
-
-#endif // USE_HWCAP
-
-MY_HWCAP_CHECK_FUNC (CRC32)
-MY_HWCAP_CHECK_FUNC (SHA1)
-MY_HWCAP_CHECK_FUNC (SHA2)
-MY_HWCAP_CHECK_FUNC (AES)
-
-#endif // __APPLE__
-#endif // _WIN32
-
-#endif // MY_CPU_ARM_OR_ARM64
-
-
-
-#ifdef __APPLE__
-
-#include
-
-int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize)
-{
- return sysctlbyname(name, buf, bufSize, NULL, 0);
-}
-
-int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val)
-{
- size_t bufSize = sizeof(*val);
- const int res = z7_sysctlbyname_Get(name, val, &bufSize);
- if (res == 0 && bufSize != sizeof(*val))
- return EFAULT;
- return res;
-}
-
-#endif
diff --git a/3rdparty/7z/src/CpuArch.h b/3rdparty/7z/src/CpuArch.h
deleted file mode 100644
index 6c1cd2ab22..0000000000
--- a/3rdparty/7z/src/CpuArch.h
+++ /dev/null
@@ -1,523 +0,0 @@
-/* CpuArch.h -- CPU specific code
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_CPU_ARCH_H
-#define ZIP7_INC_CPU_ARCH_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-/*
-MY_CPU_LE means that CPU is LITTLE ENDIAN.
-MY_CPU_BE means that CPU is BIG ENDIAN.
-If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform.
-
-MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.
-
-MY_CPU_64BIT means that processor can work with 64-bit registers.
- MY_CPU_64BIT can be used to select fast code branch
- MY_CPU_64BIT doesn't mean that (sizeof(void *) == 8)
-*/
-
-#if defined(_M_X64) \
- || defined(_M_AMD64) \
- || defined(__x86_64__) \
- || defined(__AMD64__) \
- || defined(__amd64__)
- #define MY_CPU_AMD64
- #ifdef __ILP32__
- #define MY_CPU_NAME "x32"
- #define MY_CPU_SIZEOF_POINTER 4
- #else
- #define MY_CPU_NAME "x64"
- #define MY_CPU_SIZEOF_POINTER 8
- #endif
- #define MY_CPU_64BIT
-#endif
-
-
-#if defined(_M_IX86) \
- || defined(__i386__)
- #define MY_CPU_X86
- #define MY_CPU_NAME "x86"
- /* #define MY_CPU_32BIT */
- #define MY_CPU_SIZEOF_POINTER 4
-#endif
-
-
-#if defined(_M_ARM64) \
- || defined(__AARCH64EL__) \
- || defined(__AARCH64EB__) \
- || defined(__aarch64__)
- #define MY_CPU_ARM64
- #ifdef __ILP32__
- #define MY_CPU_NAME "arm64-32"
- #define MY_CPU_SIZEOF_POINTER 4
- #else
- #define MY_CPU_NAME "arm64"
- #define MY_CPU_SIZEOF_POINTER 8
- #endif
- #define MY_CPU_64BIT
-#endif
-
-
-#if defined(_M_ARM) \
- || defined(_M_ARM_NT) \
- || defined(_M_ARMT) \
- || defined(__arm__) \
- || defined(__thumb__) \
- || defined(__ARMEL__) \
- || defined(__ARMEB__) \
- || defined(__THUMBEL__) \
- || defined(__THUMBEB__)
- #define MY_CPU_ARM
-
- #if defined(__thumb__) || defined(__THUMBEL__) || defined(_M_ARMT)
- #define MY_CPU_ARMT
- #define MY_CPU_NAME "armt"
- #else
- #define MY_CPU_ARM32
- #define MY_CPU_NAME "arm"
- #endif
- /* #define MY_CPU_32BIT */
- #define MY_CPU_SIZEOF_POINTER 4
-#endif
-
-
-#if defined(_M_IA64) \
- || defined(__ia64__)
- #define MY_CPU_IA64
- #define MY_CPU_NAME "ia64"
- #define MY_CPU_64BIT
-#endif
-
-
-#if defined(__mips64) \
- || defined(__mips64__) \
- || (defined(__mips) && (__mips == 64 || __mips == 4 || __mips == 3))
- #define MY_CPU_NAME "mips64"
- #define MY_CPU_64BIT
-#elif defined(__mips__)
- #define MY_CPU_NAME "mips"
- /* #define MY_CPU_32BIT */
-#endif
-
-
-#if defined(__ppc64__) \
- || defined(__powerpc64__) \
- || defined(__ppc__) \
- || defined(__powerpc__) \
- || defined(__PPC__) \
- || defined(_POWER)
-
-#define MY_CPU_PPC_OR_PPC64
-
-#if defined(__ppc64__) \
- || defined(__powerpc64__) \
- || defined(_LP64) \
- || defined(__64BIT__)
- #ifdef __ILP32__
- #define MY_CPU_NAME "ppc64-32"
- #define MY_CPU_SIZEOF_POINTER 4
- #else
- #define MY_CPU_NAME "ppc64"
- #define MY_CPU_SIZEOF_POINTER 8
- #endif
- #define MY_CPU_64BIT
-#else
- #define MY_CPU_NAME "ppc"
- #define MY_CPU_SIZEOF_POINTER 4
- /* #define MY_CPU_32BIT */
-#endif
-#endif
-
-
-#if defined(__riscv) \
- || defined(__riscv__)
- #if __riscv_xlen == 32
- #define MY_CPU_NAME "riscv32"
- #elif __riscv_xlen == 64
- #define MY_CPU_NAME "riscv64"
- #else
- #define MY_CPU_NAME "riscv"
- #endif
-#endif
-
-
-#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)
-#define MY_CPU_X86_OR_AMD64
-#endif
-
-#if defined(MY_CPU_ARM) || defined(MY_CPU_ARM64)
-#define MY_CPU_ARM_OR_ARM64
-#endif
-
-
-#ifdef _WIN32
-
- #ifdef MY_CPU_ARM
- #define MY_CPU_ARM_LE
- #endif
-
- #ifdef MY_CPU_ARM64
- #define MY_CPU_ARM64_LE
- #endif
-
- #ifdef _M_IA64
- #define MY_CPU_IA64_LE
- #endif
-
-#endif
-
-
-#if defined(MY_CPU_X86_OR_AMD64) \
- || defined(MY_CPU_ARM_LE) \
- || defined(MY_CPU_ARM64_LE) \
- || defined(MY_CPU_IA64_LE) \
- || defined(__LITTLE_ENDIAN__) \
- || defined(__ARMEL__) \
- || defined(__THUMBEL__) \
- || defined(__AARCH64EL__) \
- || defined(__MIPSEL__) \
- || defined(__MIPSEL) \
- || defined(_MIPSEL) \
- || defined(__BFIN__) \
- || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
- #define MY_CPU_LE
-#endif
-
-#if defined(__BIG_ENDIAN__) \
- || defined(__ARMEB__) \
- || defined(__THUMBEB__) \
- || defined(__AARCH64EB__) \
- || defined(__MIPSEB__) \
- || defined(__MIPSEB) \
- || defined(_MIPSEB) \
- || defined(__m68k__) \
- || defined(__s390__) \
- || defined(__s390x__) \
- || defined(__zarch__) \
- || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
- #define MY_CPU_BE
-#endif
-
-
-#if defined(MY_CPU_LE) && defined(MY_CPU_BE)
- #error Stop_Compiling_Bad_Endian
-#endif
-
-#if !defined(MY_CPU_LE) && !defined(MY_CPU_BE)
- #error Stop_Compiling_CPU_ENDIAN_must_be_detected_at_compile_time
-#endif
-
-#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT)
- #error Stop_Compiling_Bad_32_64_BIT
-#endif
-
-#ifdef __SIZEOF_POINTER__
- #ifdef MY_CPU_SIZEOF_POINTER
- #if MY_CPU_SIZEOF_POINTER != __SIZEOF_POINTER__
- #error Stop_Compiling_Bad_MY_CPU_PTR_SIZE
- #endif
- #else
- #define MY_CPU_SIZEOF_POINTER __SIZEOF_POINTER__
- #endif
-#endif
-
-#if defined(MY_CPU_SIZEOF_POINTER) && (MY_CPU_SIZEOF_POINTER == 4)
-#if defined (_LP64)
- #error Stop_Compiling_Bad_MY_CPU_PTR_SIZE
-#endif
-#endif
-
-#ifdef _MSC_VER
- #if _MSC_VER >= 1300
- #define MY_CPU_pragma_pack_push_1 __pragma(pack(push, 1))
- #define MY_CPU_pragma_pop __pragma(pack(pop))
- #else
- #define MY_CPU_pragma_pack_push_1
- #define MY_CPU_pragma_pop
- #endif
-#else
- #ifdef __xlC__
- #define MY_CPU_pragma_pack_push_1 _Pragma("pack(1)")
- #define MY_CPU_pragma_pop _Pragma("pack()")
- #else
- #define MY_CPU_pragma_pack_push_1 _Pragma("pack(push, 1)")
- #define MY_CPU_pragma_pop _Pragma("pack(pop)")
- #endif
-#endif
-
-
-#ifndef MY_CPU_NAME
- #ifdef MY_CPU_LE
- #define MY_CPU_NAME "LE"
- #elif defined(MY_CPU_BE)
- #define MY_CPU_NAME "BE"
- #else
- /*
- #define MY_CPU_NAME ""
- */
- #endif
-#endif
-
-
-
-
-
-#ifdef __has_builtin
- #define Z7_has_builtin(x) __has_builtin(x)
-#else
- #define Z7_has_builtin(x) 0
-#endif
-
-
-#define Z7_BSWAP32_CONST(v) \
- ( (((UInt32)(v) << 24) ) \
- | (((UInt32)(v) << 8) & (UInt32)0xff0000) \
- | (((UInt32)(v) >> 8) & (UInt32)0xff00 ) \
- | (((UInt32)(v) >> 24) ))
-
-
-#if defined(_MSC_VER) && (_MSC_VER >= 1300)
-
-#include
-
-/* Note: these macros will use bswap instruction (486), that is unsupported in 386 cpu */
-
-#pragma intrinsic(_byteswap_ushort)
-#pragma intrinsic(_byteswap_ulong)
-#pragma intrinsic(_byteswap_uint64)
-
-#define Z7_BSWAP16(v) _byteswap_ushort(v)
-#define Z7_BSWAP32(v) _byteswap_ulong (v)
-#define Z7_BSWAP64(v) _byteswap_uint64(v)
-#define Z7_CPU_FAST_BSWAP_SUPPORTED
-
-#elif (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
- || (defined(__clang__) && Z7_has_builtin(__builtin_bswap16))
-
-#define Z7_BSWAP16(v) __builtin_bswap16(v)
-#define Z7_BSWAP32(v) __builtin_bswap32(v)
-#define Z7_BSWAP64(v) __builtin_bswap64(v)
-#define Z7_CPU_FAST_BSWAP_SUPPORTED
-
-#else
-
-#define Z7_BSWAP16(v) ((UInt16) \
- ( ((UInt32)(v) << 8) \
- | ((UInt32)(v) >> 8) \
- ))
-
-#define Z7_BSWAP32(v) Z7_BSWAP32_CONST(v)
-
-#define Z7_BSWAP64(v) \
- ( ( ( (UInt64)(v) ) << 8 * 7 ) \
- | ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 1) ) << 8 * 5 ) \
- | ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 2) ) << 8 * 3 ) \
- | ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 3) ) << 8 * 1 ) \
- | ( ( (UInt64)(v) >> 8 * 1 ) & ((UInt32)0xff << 8 * 3) ) \
- | ( ( (UInt64)(v) >> 8 * 3 ) & ((UInt32)0xff << 8 * 2) ) \
- | ( ( (UInt64)(v) >> 8 * 5 ) & ((UInt32)0xff << 8 * 1) ) \
- | ( ( (UInt64)(v) >> 8 * 7 ) ) \
- )
-
-#endif
-
-
-
-#ifdef MY_CPU_LE
- #if defined(MY_CPU_X86_OR_AMD64) \
- || defined(MY_CPU_ARM64)
- #define MY_CPU_LE_UNALIGN
- #define MY_CPU_LE_UNALIGN_64
- #elif defined(__ARM_FEATURE_UNALIGNED)
- /* gcc9 for 32-bit arm can use LDRD instruction that requires 32-bit alignment.
- So we can't use unaligned 64-bit operations. */
- #define MY_CPU_LE_UNALIGN
- #endif
-#endif
-
-
-#ifdef MY_CPU_LE_UNALIGN
-
-#define GetUi16(p) (*(const UInt16 *)(const void *)(p))
-#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
-#ifdef MY_CPU_LE_UNALIGN_64
-#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
-#define SetUi64(p, v) { *(UInt64 *)(void *)(p) = (v); }
-#endif
-
-#define SetUi16(p, v) { *(UInt16 *)(void *)(p) = (v); }
-#define SetUi32(p, v) { *(UInt32 *)(void *)(p) = (v); }
-
-#else
-
-#define GetUi16(p) ( (UInt16) ( \
- ((const Byte *)(p))[0] | \
- ((UInt16)((const Byte *)(p))[1] << 8) ))
-
-#define GetUi32(p) ( \
- ((const Byte *)(p))[0] | \
- ((UInt32)((const Byte *)(p))[1] << 8) | \
- ((UInt32)((const Byte *)(p))[2] << 16) | \
- ((UInt32)((const Byte *)(p))[3] << 24))
-
-#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
- _ppp_[0] = (Byte)_vvv_; \
- _ppp_[1] = (Byte)(_vvv_ >> 8); }
-
-#define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
- _ppp_[0] = (Byte)_vvv_; \
- _ppp_[1] = (Byte)(_vvv_ >> 8); \
- _ppp_[2] = (Byte)(_vvv_ >> 16); \
- _ppp_[3] = (Byte)(_vvv_ >> 24); }
-
-#endif
-
-
-#ifndef GetUi64
-#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
-#endif
-
-#ifndef SetUi64
-#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
- SetUi32(_ppp2_ , (UInt32)_vvv2_) \
- SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)) }
-#endif
-
-
-#if defined(MY_CPU_LE_UNALIGN) && defined(Z7_CPU_FAST_BSWAP_SUPPORTED)
-
-#define GetBe32(p) Z7_BSWAP32 (*(const UInt32 *)(const void *)(p))
-#define SetBe32(p, v) { (*(UInt32 *)(void *)(p)) = Z7_BSWAP32(v); }
-
-#if defined(MY_CPU_LE_UNALIGN_64)
-#define GetBe64(p) Z7_BSWAP64 (*(const UInt64 *)(const void *)(p))
-#endif
-
-#else
-
-#define GetBe32(p) ( \
- ((UInt32)((const Byte *)(p))[0] << 24) | \
- ((UInt32)((const Byte *)(p))[1] << 16) | \
- ((UInt32)((const Byte *)(p))[2] << 8) | \
- ((const Byte *)(p))[3] )
-
-#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
- _ppp_[0] = (Byte)(_vvv_ >> 24); \
- _ppp_[1] = (Byte)(_vvv_ >> 16); \
- _ppp_[2] = (Byte)(_vvv_ >> 8); \
- _ppp_[3] = (Byte)_vvv_; }
-
-#endif
-
-#ifndef GetBe64
-#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
-#endif
-
-#ifndef GetBe16
-#define GetBe16(p) ( (UInt16) ( \
- ((UInt16)((const Byte *)(p))[0] << 8) | \
- ((const Byte *)(p))[1] ))
-#endif
-
-
-#if defined(MY_CPU_BE)
-#define Z7_CONV_BE_TO_NATIVE_CONST32(v) (v)
-#define Z7_CONV_LE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
-#define Z7_CONV_NATIVE_TO_BE_32(v) (v)
-#elif defined(MY_CPU_LE)
-#define Z7_CONV_BE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
-#define Z7_CONV_LE_TO_NATIVE_CONST32(v) (v)
-#define Z7_CONV_NATIVE_TO_BE_32(v) Z7_BSWAP32(v)
-#else
-#error Stop_Compiling_Unknown_Endian_CONV
-#endif
-
-
-#if defined(MY_CPU_BE)
-
-#define GetBe32a(p) (*(const UInt32 *)(const void *)(p))
-#define GetBe16a(p) (*(const UInt16 *)(const void *)(p))
-#define SetBe32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
-#define SetBe16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
-
-#define GetUi32a(p) GetUi32(p)
-#define GetUi16a(p) GetUi16(p)
-#define SetUi32a(p, v) SetUi32(p, v)
-#define SetUi16a(p, v) SetUi16(p, v)
-
-#elif defined(MY_CPU_LE)
-
-#define GetUi32a(p) (*(const UInt32 *)(const void *)(p))
-#define GetUi16a(p) (*(const UInt16 *)(const void *)(p))
-#define SetUi32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
-#define SetUi16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
-
-#define GetBe32a(p) GetBe32(p)
-#define GetBe16a(p) GetBe16(p)
-#define SetBe32a(p, v) SetBe32(p, v)
-#define SetBe16a(p, v) SetBe16(p, v)
-
-#else
-#error Stop_Compiling_Unknown_Endian_CPU_a
-#endif
-
-
-#if defined(MY_CPU_X86_OR_AMD64) \
- || defined(MY_CPU_ARM_OR_ARM64) \
- || defined(MY_CPU_PPC_OR_PPC64)
- #define Z7_CPU_FAST_ROTATE_SUPPORTED
-#endif
-
-
-#ifdef MY_CPU_X86_OR_AMD64
-
-void Z7_FASTCALL z7_x86_cpuid(UInt32 a[4], UInt32 function);
-UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void);
-#if defined(MY_CPU_AMD64)
-#define Z7_IF_X86_CPUID_SUPPORTED
-#else
-#define Z7_IF_X86_CPUID_SUPPORTED if (z7_x86_cpuid_GetMaxFunc())
-#endif
-
-BoolInt CPU_IsSupported_AES(void);
-BoolInt CPU_IsSupported_AVX(void);
-BoolInt CPU_IsSupported_AVX2(void);
-BoolInt CPU_IsSupported_VAES_AVX2(void);
-BoolInt CPU_IsSupported_CMOV(void);
-BoolInt CPU_IsSupported_SSE(void);
-BoolInt CPU_IsSupported_SSE2(void);
-BoolInt CPU_IsSupported_SSSE3(void);
-BoolInt CPU_IsSupported_SSE41(void);
-BoolInt CPU_IsSupported_SHA(void);
-BoolInt CPU_IsSupported_PageGB(void);
-
-#elif defined(MY_CPU_ARM_OR_ARM64)
-
-BoolInt CPU_IsSupported_CRC32(void);
-BoolInt CPU_IsSupported_NEON(void);
-
-#if defined(_WIN32)
-BoolInt CPU_IsSupported_CRYPTO(void);
-#define CPU_IsSupported_SHA1 CPU_IsSupported_CRYPTO
-#define CPU_IsSupported_SHA2 CPU_IsSupported_CRYPTO
-#define CPU_IsSupported_AES CPU_IsSupported_CRYPTO
-#else
-BoolInt CPU_IsSupported_SHA1(void);
-BoolInt CPU_IsSupported_SHA2(void);
-BoolInt CPU_IsSupported_AES(void);
-#endif
-
-#endif
-
-#if defined(__APPLE__)
-int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize);
-int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val);
-#endif
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Delta.c b/3rdparty/7z/src/Delta.c
deleted file mode 100644
index fc7e9fe96c..0000000000
--- a/3rdparty/7z/src/Delta.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/* Delta.c -- Delta converter
-2021-02-09 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Delta.h"
-
-void Delta_Init(Byte *state)
-{
- unsigned i;
- for (i = 0; i < DELTA_STATE_SIZE; i++)
- state[i] = 0;
-}
-
-
-void Delta_Encode(Byte *state, unsigned delta, Byte *data, SizeT size)
-{
- Byte temp[DELTA_STATE_SIZE];
-
- if (size == 0)
- return;
-
- {
- unsigned i = 0;
- do
- temp[i] = state[i];
- while (++i != delta);
- }
-
- if (size <= delta)
- {
- unsigned i = 0, k;
- do
- {
- Byte b = *data;
- *data++ = (Byte)(b - temp[i]);
- temp[i] = b;
- }
- while (++i != size);
-
- k = 0;
-
- do
- {
- if (i == delta)
- i = 0;
- state[k] = temp[i++];
- }
- while (++k != delta);
-
- return;
- }
-
- {
- Byte *p = data + size - delta;
- {
- unsigned i = 0;
- do
- state[i] = *p++;
- while (++i != delta);
- }
- {
- const Byte *lim = data + delta;
- ptrdiff_t dif = -(ptrdiff_t)delta;
-
- if (((ptrdiff_t)size + dif) & 1)
- {
- --p; *p = (Byte)(*p - p[dif]);
- }
-
- while (p != lim)
- {
- --p; *p = (Byte)(*p - p[dif]);
- --p; *p = (Byte)(*p - p[dif]);
- }
-
- dif = -dif;
-
- do
- {
- --p; *p = (Byte)(*p - temp[--dif]);
- }
- while (dif != 0);
- }
- }
-}
-
-
-void Delta_Decode(Byte *state, unsigned delta, Byte *data, SizeT size)
-{
- unsigned i;
- const Byte *lim;
-
- if (size == 0)
- return;
-
- i = 0;
- lim = data + size;
-
- if (size <= delta)
- {
- do
- *data = (Byte)(*data + state[i++]);
- while (++data != lim);
-
- for (; delta != i; state++, delta--)
- *state = state[i];
- data -= i;
- }
- else
- {
- /*
- #define B(n) b ## n
- #define I(n) Byte B(n) = state[n];
- #define U(n) { B(n) = (Byte)((B(n)) + *data++); data[-1] = (B(n)); }
- #define F(n) if (data != lim) { U(n) }
-
- if (delta == 1)
- {
- I(0)
- if ((lim - data) & 1) { U(0) }
- while (data != lim) { U(0) U(0) }
- data -= 1;
- }
- else if (delta == 2)
- {
- I(0) I(1)
- lim -= 1; while (data < lim) { U(0) U(1) }
- lim += 1; F(0)
- data -= 2;
- }
- else if (delta == 3)
- {
- I(0) I(1) I(2)
- lim -= 2; while (data < lim) { U(0) U(1) U(2) }
- lim += 2; F(0) F(1)
- data -= 3;
- }
- else if (delta == 4)
- {
- I(0) I(1) I(2) I(3)
- lim -= 3; while (data < lim) { U(0) U(1) U(2) U(3) }
- lim += 3; F(0) F(1) F(2)
- data -= 4;
- }
- else
- */
- {
- do
- {
- *data = (Byte)(*data + state[i++]);
- data++;
- }
- while (i != delta);
-
- {
- ptrdiff_t dif = -(ptrdiff_t)delta;
- do
- *data = (Byte)(*data + data[dif]);
- while (++data != lim);
- data += dif;
- }
- }
- }
-
- do
- *state++ = *data;
- while (++data != lim);
-}
diff --git a/3rdparty/7z/src/Delta.h b/3rdparty/7z/src/Delta.h
deleted file mode 100644
index be77c6c6c1..0000000000
--- a/3rdparty/7z/src/Delta.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Delta.h -- Delta converter
-2023-03-03 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_DELTA_H
-#define ZIP7_INC_DELTA_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define DELTA_STATE_SIZE 256
-
-void Delta_Init(Byte *state);
-void Delta_Encode(Byte *state, unsigned delta, Byte *data, SizeT size);
-void Delta_Decode(Byte *state, unsigned delta, Byte *data, SizeT size);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/DllSecur.c b/3rdparty/7z/src/DllSecur.c
deleted file mode 100644
index f6c23de6f0..0000000000
--- a/3rdparty/7z/src/DllSecur.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* DllSecur.c -- DLL loading security
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#ifdef _WIN32
-
-#include "7zWindows.h"
-
-#include "DllSecur.h"
-
-#ifndef UNDER_CE
-
-#if (defined(__GNUC__) && (__GNUC__ >= 8)) || defined(__clang__)
- // #pragma GCC diagnostic ignored "-Wcast-function-type"
-#endif
-
-#if defined(__clang__) || defined(__GNUC__)
-typedef void (*Z7_voidFunction)(void);
-#define MY_CAST_FUNC (Z7_voidFunction)
-#elif defined(_MSC_VER) && _MSC_VER > 1920
-#define MY_CAST_FUNC (void *)
-// #pragma warning(disable : 4191) // 'type cast': unsafe conversion from 'FARPROC' to 'void (__cdecl *)()'
-#else
-#define MY_CAST_FUNC
-#endif
-
-typedef BOOL (WINAPI *Func_SetDefaultDllDirectories)(DWORD DirectoryFlags);
-
-#define MY_LOAD_LIBRARY_SEARCH_USER_DIRS 0x400
-#define MY_LOAD_LIBRARY_SEARCH_SYSTEM32 0x800
-
-#define DELIM "\0"
-
-static const char * const g_Dlls =
- "userenv"
- DELIM "setupapi"
- DELIM "apphelp"
- DELIM "propsys"
- DELIM "dwmapi"
- DELIM "cryptbase"
- DELIM "oleacc"
- DELIM "clbcatq"
- DELIM "version"
- #ifndef _CONSOLE
- DELIM "uxtheme"
- #endif
- DELIM;
-
-#endif
-
-#ifdef __clang__
- #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
-#if defined (_MSC_VER) && _MSC_VER >= 1900
-// sysinfoapi.h: kit10: GetVersion was declared deprecated
-#pragma warning(disable : 4996)
-#endif
-
-#define IF_NON_VISTA_SET_DLL_DIRS_AND_RETURN \
- if ((UInt16)GetVersion() != 6) { \
- const \
- Func_SetDefaultDllDirectories setDllDirs = \
- (Func_SetDefaultDllDirectories) MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), \
- "SetDefaultDllDirectories"); \
- if (setDllDirs) if (setDllDirs(MY_LOAD_LIBRARY_SEARCH_SYSTEM32 | MY_LOAD_LIBRARY_SEARCH_USER_DIRS)) return; }
-
-void My_SetDefaultDllDirectories(void)
-{
- #ifndef UNDER_CE
- IF_NON_VISTA_SET_DLL_DIRS_AND_RETURN
- #endif
-}
-
-
-void LoadSecurityDlls(void)
-{
- #ifndef UNDER_CE
- // at Vista (ver 6.0) : CoCreateInstance(CLSID_ShellLink, ...) doesn't work after SetDefaultDllDirectories() : Check it ???
- IF_NON_VISTA_SET_DLL_DIRS_AND_RETURN
- {
- wchar_t buf[MAX_PATH + 100];
- const char *dll;
- unsigned pos = GetSystemDirectoryW(buf, MAX_PATH + 2);
- if (pos == 0 || pos > MAX_PATH)
- return;
- if (buf[pos - 1] != '\\')
- buf[pos++] = '\\';
- for (dll = g_Dlls; *dll != 0;)
- {
- wchar_t *dest = &buf[pos];
- for (;;)
- {
- const char c = *dll++;
- if (c == 0)
- break;
- *dest++ = (Byte)c;
- }
- dest[0] = '.';
- dest[1] = 'd';
- dest[2] = 'l';
- dest[3] = 'l';
- dest[4] = 0;
- // lstrcatW(buf, L".dll");
- LoadLibraryExW(buf, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
- }
- }
- #endif
-}
-
-#endif // _WIN32
diff --git a/3rdparty/7z/src/DllSecur.h b/3rdparty/7z/src/DllSecur.h
deleted file mode 100644
index 87bacc637c..0000000000
--- a/3rdparty/7z/src/DllSecur.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* DllSecur.h -- DLL loading for security
-2023-03-03 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_DLL_SECUR_H
-#define ZIP7_INC_DLL_SECUR_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#ifdef _WIN32
-
-void My_SetDefaultDllDirectories(void);
-void LoadSecurityDlls(void);
-
-#endif
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/HuffEnc.c b/3rdparty/7z/src/HuffEnc.c
deleted file mode 100644
index 3dc1e392a6..0000000000
--- a/3rdparty/7z/src/HuffEnc.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/* HuffEnc.c -- functions for Huffman encoding
-2023-03-04 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "HuffEnc.h"
-#include "Sort.h"
-
-#define kMaxLen 16
-#define NUM_BITS 10
-#define MASK (((unsigned)1 << NUM_BITS) - 1)
-
-#define NUM_COUNTERS 64
-
-#define HUFFMAN_SPEED_OPT
-
-void Huffman_Generate(const UInt32 *freqs, UInt32 *p, Byte *lens, UInt32 numSymbols, UInt32 maxLen)
-{
- UInt32 num = 0;
- /* if (maxLen > 10) maxLen = 10; */
- {
- UInt32 i;
-
- #ifdef HUFFMAN_SPEED_OPT
-
- UInt32 counters[NUM_COUNTERS];
- for (i = 0; i < NUM_COUNTERS; i++)
- counters[i] = 0;
- for (i = 0; i < numSymbols; i++)
- {
- UInt32 freq = freqs[i];
- counters[(freq < NUM_COUNTERS - 1) ? freq : NUM_COUNTERS - 1]++;
- }
-
- for (i = 1; i < NUM_COUNTERS; i++)
- {
- UInt32 temp = counters[i];
- counters[i] = num;
- num += temp;
- }
-
- for (i = 0; i < numSymbols; i++)
- {
- UInt32 freq = freqs[i];
- if (freq == 0)
- lens[i] = 0;
- else
- p[counters[((freq < NUM_COUNTERS - 1) ? freq : NUM_COUNTERS - 1)]++] = i | (freq << NUM_BITS);
- }
- counters[0] = 0;
- HeapSort(p + counters[NUM_COUNTERS - 2], counters[NUM_COUNTERS - 1] - counters[NUM_COUNTERS - 2]);
-
- #else
-
- for (i = 0; i < numSymbols; i++)
- {
- UInt32 freq = freqs[i];
- if (freq == 0)
- lens[i] = 0;
- else
- p[num++] = i | (freq << NUM_BITS);
- }
- HeapSort(p, num);
-
- #endif
- }
-
- if (num < 2)
- {
- unsigned minCode = 0;
- unsigned maxCode = 1;
- if (num == 1)
- {
- maxCode = (unsigned)p[0] & MASK;
- if (maxCode == 0)
- maxCode++;
- }
- p[minCode] = 0;
- p[maxCode] = 1;
- lens[minCode] = lens[maxCode] = 1;
- return;
- }
-
- {
- UInt32 b, e, i;
-
- i = b = e = 0;
- do
- {
- UInt32 n, m, freq;
- n = (i != num && (b == e || (p[i] >> NUM_BITS) <= (p[b] >> NUM_BITS))) ? i++ : b++;
- freq = (p[n] & ~MASK);
- p[n] = (p[n] & MASK) | (e << NUM_BITS);
- m = (i != num && (b == e || (p[i] >> NUM_BITS) <= (p[b] >> NUM_BITS))) ? i++ : b++;
- freq += (p[m] & ~MASK);
- p[m] = (p[m] & MASK) | (e << NUM_BITS);
- p[e] = (p[e] & MASK) | freq;
- e++;
- }
- while (num - e > 1);
-
- {
- UInt32 lenCounters[kMaxLen + 1];
- for (i = 0; i <= kMaxLen; i++)
- lenCounters[i] = 0;
-
- p[--e] &= MASK;
- lenCounters[1] = 2;
- while (e != 0)
- {
- UInt32 len = (p[p[--e] >> NUM_BITS] >> NUM_BITS) + 1;
- p[e] = (p[e] & MASK) | (len << NUM_BITS);
- if (len >= maxLen)
- for (len = maxLen - 1; lenCounters[len] == 0; len--);
- lenCounters[len]--;
- lenCounters[(size_t)len + 1] += 2;
- }
-
- {
- UInt32 len;
- i = 0;
- for (len = maxLen; len != 0; len--)
- {
- UInt32 k;
- for (k = lenCounters[len]; k != 0; k--)
- lens[p[i++] & MASK] = (Byte)len;
- }
- }
-
- {
- UInt32 nextCodes[kMaxLen + 1];
- {
- UInt32 code = 0;
- UInt32 len;
- for (len = 1; len <= kMaxLen; len++)
- nextCodes[len] = code = (code + lenCounters[(size_t)len - 1]) << 1;
- }
- /* if (code + lenCounters[kMaxLen] - 1 != (1 << kMaxLen) - 1) throw 1; */
-
- {
- UInt32 k;
- for (k = 0; k < numSymbols; k++)
- p[k] = nextCodes[lens[k]]++;
- }
- }
- }
- }
-}
-
-#undef kMaxLen
-#undef NUM_BITS
-#undef MASK
-#undef NUM_COUNTERS
-#undef HUFFMAN_SPEED_OPT
diff --git a/3rdparty/7z/src/HuffEnc.h b/3rdparty/7z/src/HuffEnc.h
deleted file mode 100644
index cbc5d11f94..0000000000
--- a/3rdparty/7z/src/HuffEnc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* HuffEnc.h -- Huffman encoding
-2023-03-05 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_HUFF_ENC_H
-#define ZIP7_INC_HUFF_ENC_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-/*
-Conditions:
- num <= 1024 = 2 ^ NUM_BITS
- Sum(freqs) < 4M = 2 ^ (32 - NUM_BITS)
- maxLen <= 16 = kMaxLen
- Num_Items(p) >= HUFFMAN_TEMP_SIZE(num)
-*/
-
-void Huffman_Generate(const UInt32 *freqs, UInt32 *p, Byte *lens, UInt32 num, UInt32 maxLen);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/LzFind.c b/3rdparty/7z/src/LzFind.c
deleted file mode 100644
index 7589a4c322..0000000000
--- a/3rdparty/7z/src/LzFind.c
+++ /dev/null
@@ -1,1717 +0,0 @@
-/* LzFind.c -- Match finder for LZ algorithms
-2023-03-14 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-// #include
-
-#include "CpuArch.h"
-#include "LzFind.h"
-#include "LzHash.h"
-
-#define kBlockMoveAlign (1 << 7) // alignment for memmove()
-#define kBlockSizeAlign (1 << 16) // alignment for block allocation
-#define kBlockSizeReserveMin (1 << 24) // it's 1/256 from 4 GB dictinary
-
-#define kEmptyHashValue 0
-
-#define kMaxValForNormalize ((UInt32)0)
-// #define kMaxValForNormalize ((UInt32)(1 << 20) + 0xfff) // for debug
-
-// #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
-
-#define GET_AVAIL_BYTES(p) \
- Inline_MatchFinder_GetNumAvailableBytes(p)
-
-
-// #define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
-#define kFix5HashSize kFix4HashSize
-
-/*
- HASH2_CALC:
- if (hv) match, then cur[0] and cur[1] also match
-*/
-#define HASH2_CALC hv = GetUi16(cur);
-
-// (crc[0 ... 255] & 0xFF) provides one-to-one correspondence to [0 ... 255]
-
-/*
- HASH3_CALC:
- if (cur[0]) and (h2) match, then cur[1] also match
- if (cur[0]) and (hv) match, then cur[1] and cur[2] also match
-*/
-#define HASH3_CALC { \
- UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
- h2 = temp & (kHash2Size - 1); \
- hv = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
-
-#define HASH4_CALC { \
- UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
- h2 = temp & (kHash2Size - 1); \
- temp ^= ((UInt32)cur[2] << 8); \
- h3 = temp & (kHash3Size - 1); \
- hv = (temp ^ (p->crc[cur[3]] << kLzHash_CrcShift_1)) & p->hashMask; }
-
-#define HASH5_CALC { \
- UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
- h2 = temp & (kHash2Size - 1); \
- temp ^= ((UInt32)cur[2] << 8); \
- h3 = temp & (kHash3Size - 1); \
- temp ^= (p->crc[cur[3]] << kLzHash_CrcShift_1); \
- /* h4 = temp & p->hash4Mask; */ /* (kHash4Size - 1); */ \
- hv = (temp ^ (p->crc[cur[4]] << kLzHash_CrcShift_2)) & p->hashMask; }
-
-#define HASH_ZIP_CALC hv = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
-
-
-static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc)
-{
- // if (!p->directInput)
- {
- ISzAlloc_Free(alloc, p->bufBase);
- p->bufBase = NULL;
- }
-}
-
-
-static int LzInWindow_Create2(CMatchFinder *p, UInt32 blockSize, ISzAllocPtr alloc)
-{
- if (blockSize == 0)
- return 0;
- if (!p->bufBase || p->blockSize != blockSize)
- {
- // size_t blockSizeT;
- LzInWindow_Free(p, alloc);
- p->blockSize = blockSize;
- // blockSizeT = blockSize;
-
- // printf("\nblockSize = 0x%x\n", blockSize);
- /*
- #if defined _WIN64
- // we can allocate 4GiB, but still use UInt32 for (p->blockSize)
- // we use UInt32 type for (p->blockSize), because
- // we don't want to wrap over 4 GiB,
- // when we use (p->streamPos - p->pos) that is UInt32.
- if (blockSize >= (UInt32)0 - (UInt32)kBlockSizeAlign)
- {
- blockSizeT = ((size_t)1 << 32);
- printf("\nchanged to blockSizeT = 4GiB\n");
- }
- #endif
- */
-
- p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, blockSize);
- // printf("\nbufferBase = %p\n", p->bufBase);
- // return 0; // for debug
- }
- return (p->bufBase != NULL);
-}
-
-static const Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
-
-static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return GET_AVAIL_BYTES(p); }
-
-
-Z7_NO_INLINE
-static void MatchFinder_ReadBlock(CMatchFinder *p)
-{
- if (p->streamEndWasReached || p->result != SZ_OK)
- return;
-
- /* We use (p->streamPos - p->pos) value.
- (p->streamPos < p->pos) is allowed. */
-
- if (p->directInput)
- {
- UInt32 curSize = 0xFFFFFFFF - GET_AVAIL_BYTES(p);
- if (curSize > p->directInputRem)
- curSize = (UInt32)p->directInputRem;
- p->streamPos += curSize;
- p->directInputRem -= curSize;
- if (p->directInputRem == 0)
- p->streamEndWasReached = 1;
- return;
- }
-
- for (;;)
- {
- const Byte *dest = p->buffer + GET_AVAIL_BYTES(p);
- size_t size = (size_t)(p->bufBase + p->blockSize - dest);
- if (size == 0)
- {
- /* we call ReadBlock() after NeedMove() and MoveBlock().
- NeedMove() and MoveBlock() povide more than (keepSizeAfter)
- to the end of (blockSize).
- So we don't execute this branch in normal code flow.
- We can go here, if we will call ReadBlock() before NeedMove(), MoveBlock().
- */
- // p->result = SZ_ERROR_FAIL; // we can show error here
- return;
- }
-
- // #define kRead 3
- // if (size > kRead) size = kRead; // for debug
-
- /*
- // we need cast (Byte *)dest.
- #ifdef __clang__
- #pragma GCC diagnostic ignored "-Wcast-qual"
- #endif
- */
- p->result = ISeqInStream_Read(p->stream,
- p->bufBase + (dest - p->bufBase), &size);
- if (p->result != SZ_OK)
- return;
- if (size == 0)
- {
- p->streamEndWasReached = 1;
- return;
- }
- p->streamPos += (UInt32)size;
- if (GET_AVAIL_BYTES(p) > p->keepSizeAfter)
- return;
- /* here and in another (p->keepSizeAfter) checks we keep on 1 byte more than was requested by Create() function
- (GET_AVAIL_BYTES(p) >= p->keepSizeAfter) - minimal required size */
- }
-
- // on exit: (p->result != SZ_OK || p->streamEndWasReached || GET_AVAIL_BYTES(p) > p->keepSizeAfter)
-}
-
-
-
-Z7_NO_INLINE
-void MatchFinder_MoveBlock(CMatchFinder *p)
-{
- const size_t offset = (size_t)(p->buffer - p->bufBase) - p->keepSizeBefore;
- const size_t keepBefore = (offset & (kBlockMoveAlign - 1)) + p->keepSizeBefore;
- p->buffer = p->bufBase + keepBefore;
- memmove(p->bufBase,
- p->bufBase + (offset & ~((size_t)kBlockMoveAlign - 1)),
- keepBefore + (size_t)GET_AVAIL_BYTES(p));
-}
-
-/* We call MoveBlock() before ReadBlock().
- So MoveBlock() can be wasteful operation, if the whole input data
- can fit in current block even without calling MoveBlock().
- in important case where (dataSize <= historySize)
- condition (p->blockSize > dataSize + p->keepSizeAfter) is met
- So there is no MoveBlock() in that case case.
-*/
-
-int MatchFinder_NeedMove(CMatchFinder *p)
-{
- if (p->directInput)
- return 0;
- if (p->streamEndWasReached || p->result != SZ_OK)
- return 0;
- return ((size_t)(p->bufBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
-}
-
-void MatchFinder_ReadIfRequired(CMatchFinder *p)
-{
- if (p->keepSizeAfter >= GET_AVAIL_BYTES(p))
- MatchFinder_ReadBlock(p);
-}
-
-
-
-static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
-{
- p->cutValue = 32;
- p->btMode = 1;
- p->numHashBytes = 4;
- p->numHashBytes_Min = 2;
- p->numHashOutBits = 0;
- p->bigHash = 0;
-}
-
-#define kCrcPoly 0xEDB88320
-
-void MatchFinder_Construct(CMatchFinder *p)
-{
- unsigned i;
- p->buffer = NULL;
- p->bufBase = NULL;
- p->directInput = 0;
- p->stream = NULL;
- p->hash = NULL;
- p->expectedDataSize = (UInt64)(Int64)-1;
- MatchFinder_SetDefaultSettings(p);
-
- for (i = 0; i < 256; i++)
- {
- UInt32 r = (UInt32)i;
- unsigned j;
- for (j = 0; j < 8; j++)
- r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1)));
- p->crc[i] = r;
- }
-}
-
-#undef kCrcPoly
-
-static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->hash);
- p->hash = NULL;
-}
-
-void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc)
-{
- MatchFinder_FreeThisClassMemory(p, alloc);
- LzInWindow_Free(p, alloc);
-}
-
-static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc)
-{
- const size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
- if (sizeInBytes / sizeof(CLzRef) != num)
- return NULL;
- return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes);
-}
-
-#if (kBlockSizeReserveMin < kBlockSizeAlign * 2)
- #error Stop_Compiling_Bad_Reserve
-#endif
-
-
-
-static UInt32 GetBlockSize(CMatchFinder *p, UInt32 historySize)
-{
- UInt32 blockSize = (p->keepSizeBefore + p->keepSizeAfter);
- /*
- if (historySize > kMaxHistorySize)
- return 0;
- */
- // printf("\nhistorySize == 0x%x\n", historySize);
-
- if (p->keepSizeBefore < historySize || blockSize < p->keepSizeBefore) // if 32-bit overflow
- return 0;
-
- {
- const UInt32 kBlockSizeMax = (UInt32)0 - (UInt32)kBlockSizeAlign;
- const UInt32 rem = kBlockSizeMax - blockSize;
- const UInt32 reserve = (blockSize >> (blockSize < ((UInt32)1 << 30) ? 1 : 2))
- + (1 << 12) + kBlockMoveAlign + kBlockSizeAlign; // do not overflow 32-bit here
- if (blockSize >= kBlockSizeMax
- || rem < kBlockSizeReserveMin) // we reject settings that will be slow
- return 0;
- if (reserve >= rem)
- blockSize = kBlockSizeMax;
- else
- {
- blockSize += reserve;
- blockSize &= ~(UInt32)(kBlockSizeAlign - 1);
- }
- }
- // printf("\n LzFind_blockSize = %x\n", blockSize);
- // printf("\n LzFind_blockSize = %d\n", blockSize >> 20);
- return blockSize;
-}
-
-
-// input is historySize
-static UInt32 MatchFinder_GetHashMask2(CMatchFinder *p, UInt32 hs)
-{
- if (p->numHashBytes == 2)
- return (1 << 16) - 1;
- if (hs != 0)
- hs--;
- hs |= (hs >> 1);
- hs |= (hs >> 2);
- hs |= (hs >> 4);
- hs |= (hs >> 8);
- // we propagated 16 bits in (hs). Low 16 bits must be set later
- if (hs >= (1 << 24))
- {
- if (p->numHashBytes == 3)
- hs = (1 << 24) - 1;
- /* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
- }
- // (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
- hs |= (1 << 16) - 1; /* don't change it! */
- // bt5: we adjust the size with recommended minimum size
- if (p->numHashBytes >= 5)
- hs |= (256 << kLzHash_CrcShift_2) - 1;
- return hs;
-}
-
-// input is historySize
-static UInt32 MatchFinder_GetHashMask(CMatchFinder *p, UInt32 hs)
-{
- if (p->numHashBytes == 2)
- return (1 << 16) - 1;
- if (hs != 0)
- hs--;
- hs |= (hs >> 1);
- hs |= (hs >> 2);
- hs |= (hs >> 4);
- hs |= (hs >> 8);
- // we propagated 16 bits in (hs). Low 16 bits must be set later
- hs >>= 1;
- if (hs >= (1 << 24))
- {
- if (p->numHashBytes == 3)
- hs = (1 << 24) - 1;
- else
- hs >>= 1;
- /* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
- }
- // (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
- hs |= (1 << 16) - 1; /* don't change it! */
- // bt5: we adjust the size with recommended minimum size
- if (p->numHashBytes >= 5)
- hs |= (256 << kLzHash_CrcShift_2) - 1;
- return hs;
-}
-
-
-int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
- UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
- ISzAllocPtr alloc)
-{
- /* we need one additional byte in (p->keepSizeBefore),
- since we use MoveBlock() after (p->pos++) and before dictionary using */
- // keepAddBufferBefore = (UInt32)0xFFFFFFFF - (1 << 22); // for debug
- p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
-
- keepAddBufferAfter += matchMaxLen;
- /* we need (p->keepSizeAfter >= p->numHashBytes) */
- if (keepAddBufferAfter < p->numHashBytes)
- keepAddBufferAfter = p->numHashBytes;
- // keepAddBufferAfter -= 2; // for debug
- p->keepSizeAfter = keepAddBufferAfter;
-
- if (p->directInput)
- p->blockSize = 0;
- if (p->directInput || LzInWindow_Create2(p, GetBlockSize(p, historySize), alloc))
- {
- size_t hashSizeSum;
- {
- UInt32 hs;
- UInt32 hsCur;
-
- if (p->numHashOutBits != 0)
- {
- unsigned numBits = p->numHashOutBits;
- const unsigned nbMax =
- (p->numHashBytes == 2 ? 16 :
- (p->numHashBytes == 3 ? 24 : 32));
- if (numBits > nbMax)
- numBits = nbMax;
- if (numBits >= 32)
- hs = (UInt32)0 - 1;
- else
- hs = ((UInt32)1 << numBits) - 1;
- // (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
- hs |= (1 << 16) - 1; /* don't change it! */
- if (p->numHashBytes >= 5)
- hs |= (256 << kLzHash_CrcShift_2) - 1;
- {
- const UInt32 hs2 = MatchFinder_GetHashMask2(p, historySize);
- if (hs > hs2)
- hs = hs2;
- }
- hsCur = hs;
- if (p->expectedDataSize < historySize)
- {
- const UInt32 hs2 = MatchFinder_GetHashMask2(p, (UInt32)p->expectedDataSize);
- if (hsCur > hs2)
- hsCur = hs2;
- }
- }
- else
- {
- hs = MatchFinder_GetHashMask(p, historySize);
- hsCur = hs;
- if (p->expectedDataSize < historySize)
- {
- hsCur = MatchFinder_GetHashMask(p, (UInt32)p->expectedDataSize);
- if (hsCur > hs) // is it possible?
- hsCur = hs;
- }
- }
-
- p->hashMask = hsCur;
-
- hashSizeSum = hs;
- hashSizeSum++;
- if (hashSizeSum < hs)
- return 0;
- {
- UInt32 fixedHashSize = 0;
- if (p->numHashBytes > 2 && p->numHashBytes_Min <= 2) fixedHashSize += kHash2Size;
- if (p->numHashBytes > 3 && p->numHashBytes_Min <= 3) fixedHashSize += kHash3Size;
- // if (p->numHashBytes > 4) p->fixedHashSize += hs4; // kHash4Size;
- hashSizeSum += fixedHashSize;
- p->fixedHashSize = fixedHashSize;
- }
- }
-
- p->matchMaxLen = matchMaxLen;
-
- {
- size_t newSize;
- size_t numSons;
- const UInt32 newCyclicBufferSize = historySize + 1; // do not change it
- p->historySize = historySize;
- p->cyclicBufferSize = newCyclicBufferSize; // it must be = (historySize + 1)
-
- numSons = newCyclicBufferSize;
- if (p->btMode)
- numSons <<= 1;
- newSize = hashSizeSum + numSons;
-
- if (numSons < newCyclicBufferSize || newSize < numSons)
- return 0;
-
- // aligned size is not required here, but it can be better for some loops
- #define NUM_REFS_ALIGN_MASK 0xF
- newSize = (newSize + NUM_REFS_ALIGN_MASK) & ~(size_t)NUM_REFS_ALIGN_MASK;
-
- // 22.02: we don't reallocate buffer, if old size is enough
- if (p->hash && p->numRefs >= newSize)
- return 1;
-
- MatchFinder_FreeThisClassMemory(p, alloc);
- p->numRefs = newSize;
- p->hash = AllocRefs(newSize, alloc);
-
- if (p->hash)
- {
- p->son = p->hash + hashSizeSum;
- return 1;
- }
- }
- }
-
- MatchFinder_Free(p, alloc);
- return 0;
-}
-
-
-static void MatchFinder_SetLimits(CMatchFinder *p)
-{
- UInt32 k;
- UInt32 n = kMaxValForNormalize - p->pos;
- if (n == 0)
- n = (UInt32)(Int32)-1; // we allow (pos == 0) at start even with (kMaxValForNormalize == 0)
-
- k = p->cyclicBufferSize - p->cyclicBufferPos;
- if (k < n)
- n = k;
-
- k = GET_AVAIL_BYTES(p);
- {
- const UInt32 ksa = p->keepSizeAfter;
- UInt32 mm = p->matchMaxLen;
- if (k > ksa)
- k -= ksa; // we must limit exactly to keepSizeAfter for ReadBlock
- else if (k >= mm)
- {
- // the limitation for (p->lenLimit) update
- k -= mm; // optimization : to reduce the number of checks
- k++;
- // k = 1; // non-optimized version : for debug
- }
- else
- {
- mm = k;
- if (k != 0)
- k = 1;
- }
- p->lenLimit = mm;
- }
- if (k < n)
- n = k;
-
- p->posLimit = p->pos + n;
-}
-
-
-void MatchFinder_Init_LowHash(CMatchFinder *p)
-{
- size_t i;
- CLzRef *items = p->hash;
- const size_t numItems = p->fixedHashSize;
- for (i = 0; i < numItems; i++)
- items[i] = kEmptyHashValue;
-}
-
-
-void MatchFinder_Init_HighHash(CMatchFinder *p)
-{
- size_t i;
- CLzRef *items = p->hash + p->fixedHashSize;
- const size_t numItems = (size_t)p->hashMask + 1;
- for (i = 0; i < numItems; i++)
- items[i] = kEmptyHashValue;
-}
-
-
-void MatchFinder_Init_4(CMatchFinder *p)
-{
- if (!p->directInput)
- p->buffer = p->bufBase;
- {
- /* kEmptyHashValue = 0 (Zero) is used in hash tables as NO-VALUE marker.
- the code in CMatchFinderMt expects (pos = 1) */
- p->pos =
- p->streamPos =
- 1; // it's smallest optimal value. do not change it
- // 0; // for debug
- }
- p->result = SZ_OK;
- p->streamEndWasReached = 0;
-}
-
-
-// (CYC_TO_POS_OFFSET == 0) is expected by some optimized code
-#define CYC_TO_POS_OFFSET 0
-// #define CYC_TO_POS_OFFSET 1 // for debug
-
-void MatchFinder_Init(CMatchFinder *p)
-{
- MatchFinder_Init_HighHash(p);
- MatchFinder_Init_LowHash(p);
- MatchFinder_Init_4(p);
- // if (readData)
- MatchFinder_ReadBlock(p);
-
- /* if we init (cyclicBufferPos = pos), then we can use one variable
- instead of both (cyclicBufferPos) and (pos) : only before (cyclicBufferPos) wrapping */
- p->cyclicBufferPos = (p->pos - CYC_TO_POS_OFFSET); // init with relation to (pos)
- // p->cyclicBufferPos = 0; // smallest value
- // p->son[0] = p->son[1] = 0; // unused: we can init skipped record for speculated accesses.
- MatchFinder_SetLimits(p);
-}
-
-
-
-#ifdef MY_CPU_X86_OR_AMD64
- #if defined(__clang__) && (__clang_major__ >= 4) \
- || defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40701)
- // || defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
-
- #define USE_LZFIND_SATUR_SUB_128
- #define USE_LZFIND_SATUR_SUB_256
- #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("sse4.1")))
- #define LZFIND_ATTRIB_AVX2 __attribute__((__target__("avx2")))
- #elif defined(_MSC_VER)
- #if (_MSC_VER >= 1600)
- #define USE_LZFIND_SATUR_SUB_128
- #endif
- #if (_MSC_VER >= 1900)
- #define USE_LZFIND_SATUR_SUB_256
- #endif
- #endif
-
-// #elif defined(MY_CPU_ARM_OR_ARM64)
-#elif defined(MY_CPU_ARM64)
-
- #if defined(__clang__) && (__clang_major__ >= 8) \
- || defined(__GNUC__) && (__GNUC__ >= 8)
- #define USE_LZFIND_SATUR_SUB_128
- #ifdef MY_CPU_ARM64
- // #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("")))
- #else
- // #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
- #endif
-
- #elif defined(_MSC_VER)
- #if (_MSC_VER >= 1910)
- #define USE_LZFIND_SATUR_SUB_128
- #endif
- #endif
-
- #if defined(_MSC_VER) && defined(MY_CPU_ARM64)
- #include
- #else
- #include
- #endif
-
-#endif
-
-
-#ifdef USE_LZFIND_SATUR_SUB_128
-
-// #define Z7_SHOW_HW_STATUS
-
-#ifdef Z7_SHOW_HW_STATUS
-#include
-#define PRF(x) x
-PRF(;)
-#else
-#define PRF(x)
-#endif
-
-
-#ifdef MY_CPU_ARM_OR_ARM64
-
-#ifdef MY_CPU_ARM64
-// #define FORCE_LZFIND_SATUR_SUB_128
-#endif
-typedef uint32x4_t LzFind_v128;
-#define SASUB_128_V(v, s) \
- vsubq_u32(vmaxq_u32(v, s), s)
-
-#else // MY_CPU_ARM_OR_ARM64
-
-#include // sse4.1
-
-typedef __m128i LzFind_v128;
-// SSE 4.1
-#define SASUB_128_V(v, s) \
- _mm_sub_epi32(_mm_max_epu32(v, s), s)
-
-#endif // MY_CPU_ARM_OR_ARM64
-
-
-#define SASUB_128(i) \
- *( LzFind_v128 *)( void *)(items + (i) * 4) = SASUB_128_V( \
- *(const LzFind_v128 *)(const void *)(items + (i) * 4), sub2);
-
-
-Z7_NO_INLINE
-static
-#ifdef LZFIND_ATTRIB_SSE41
-LZFIND_ATTRIB_SSE41
-#endif
-void
-Z7_FASTCALL
-LzFind_SaturSub_128(UInt32 subValue, CLzRef *items, const CLzRef *lim)
-{
- const LzFind_v128 sub2 =
- #ifdef MY_CPU_ARM_OR_ARM64
- vdupq_n_u32(subValue);
- #else
- _mm_set_epi32((Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
- #endif
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SASUB_128(0) SASUB_128(1) items += 2 * 4;
- SASUB_128(0) SASUB_128(1) items += 2 * 4;
- }
- while (items != lim);
-}
-
-
-
-#ifdef USE_LZFIND_SATUR_SUB_256
-
-#include // avx
-/*
-clang :immintrin.h uses
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX2__)
-#include
-#endif
-so we need for clang-cl */
-
-#if defined(__clang__)
-#include
-#include
-#endif
-
-// AVX2:
-#define SASUB_256(i) \
- *( __m256i *)( void *)(items + (i) * 8) = \
- _mm256_sub_epi32(_mm256_max_epu32( \
- *(const __m256i *)(const void *)(items + (i) * 8), sub2), sub2);
-
-Z7_NO_INLINE
-static
-#ifdef LZFIND_ATTRIB_AVX2
-LZFIND_ATTRIB_AVX2
-#endif
-void
-Z7_FASTCALL
-LzFind_SaturSub_256(UInt32 subValue, CLzRef *items, const CLzRef *lim)
-{
- const __m256i sub2 = _mm256_set_epi32(
- (Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue,
- (Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SASUB_256(0) SASUB_256(1) items += 2 * 8;
- SASUB_256(0) SASUB_256(1) items += 2 * 8;
- }
- while (items != lim);
-}
-#endif // USE_LZFIND_SATUR_SUB_256
-
-#ifndef FORCE_LZFIND_SATUR_SUB_128
-typedef void (Z7_FASTCALL *LZFIND_SATUR_SUB_CODE_FUNC)(
- UInt32 subValue, CLzRef *items, const CLzRef *lim);
-static LZFIND_SATUR_SUB_CODE_FUNC g_LzFind_SaturSub;
-#endif // FORCE_LZFIND_SATUR_SUB_128
-
-#endif // USE_LZFIND_SATUR_SUB_128
-
-
-// kEmptyHashValue must be zero
-// #define SASUB_32(i) { UInt32 v = items[i]; UInt32 m = v - subValue; if (v < subValue) m = kEmptyHashValue; items[i] = m; }
-#define SASUB_32(i) { UInt32 v = items[i]; if (v < subValue) v = subValue; items[i] = v - subValue; }
-
-#ifdef FORCE_LZFIND_SATUR_SUB_128
-
-#define DEFAULT_SaturSub LzFind_SaturSub_128
-
-#else
-
-#define DEFAULT_SaturSub LzFind_SaturSub_32
-
-Z7_NO_INLINE
-static
-void
-Z7_FASTCALL
-LzFind_SaturSub_32(UInt32 subValue, CLzRef *items, const CLzRef *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SASUB_32(0) SASUB_32(1) items += 2;
- SASUB_32(0) SASUB_32(1) items += 2;
- SASUB_32(0) SASUB_32(1) items += 2;
- SASUB_32(0) SASUB_32(1) items += 2;
- }
- while (items != lim);
-}
-
-#endif
-
-
-Z7_NO_INLINE
-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
-{
- #define LZFIND_NORM_ALIGN_BLOCK_SIZE (1 << 7)
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (LZFIND_NORM_ALIGN_BLOCK_SIZE - 1)) != 0; numItems--)
- {
- SASUB_32(0)
- items++;
- }
- {
- const size_t k_Align_Mask = (LZFIND_NORM_ALIGN_BLOCK_SIZE / 4 - 1);
- CLzRef *lim = items + (numItems & ~(size_t)k_Align_Mask);
- numItems &= k_Align_Mask;
- if (items != lim)
- {
- #if defined(USE_LZFIND_SATUR_SUB_128) && !defined(FORCE_LZFIND_SATUR_SUB_128)
- if (g_LzFind_SaturSub)
- g_LzFind_SaturSub(subValue, items, lim);
- else
- #endif
- DEFAULT_SaturSub(subValue, items, lim);
- }
- items = lim;
- }
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (; numItems != 0; numItems--)
- {
- SASUB_32(0)
- items++;
- }
-}
-
-
-
-// call MatchFinder_CheckLimits() only after (p->pos++) update
-
-Z7_NO_INLINE
-static void MatchFinder_CheckLimits(CMatchFinder *p)
-{
- if (// !p->streamEndWasReached && p->result == SZ_OK &&
- p->keepSizeAfter == GET_AVAIL_BYTES(p))
- {
- // we try to read only in exact state (p->keepSizeAfter == GET_AVAIL_BYTES(p))
- if (MatchFinder_NeedMove(p))
- MatchFinder_MoveBlock(p);
- MatchFinder_ReadBlock(p);
- }
-
- if (p->pos == kMaxValForNormalize)
- if (GET_AVAIL_BYTES(p) >= p->numHashBytes) // optional optimization for last bytes of data.
- /*
- if we disable normalization for last bytes of data, and
- if (data_size == 4 GiB), we don't call wastfull normalization,
- but (pos) will be wrapped over Zero (0) in that case.
- And we cannot resume later to normal operation
- */
- {
- // MatchFinder_Normalize(p);
- /* after normalization we need (p->pos >= p->historySize + 1); */
- /* we can reduce subValue to aligned value, if want to keep alignment
- of (p->pos) and (p->buffer) for speculated accesses. */
- const UInt32 subValue = (p->pos - p->historySize - 1) /* & ~(UInt32)(kNormalizeAlign - 1) */;
- // const UInt32 subValue = (1 << 15); // for debug
- // printf("\nMatchFinder_Normalize() subValue == 0x%x\n", subValue);
- MatchFinder_REDUCE_OFFSETS(p, subValue)
- MatchFinder_Normalize3(subValue, p->hash, (size_t)p->hashMask + 1 + p->fixedHashSize);
- {
- size_t numSonRefs = p->cyclicBufferSize;
- if (p->btMode)
- numSonRefs <<= 1;
- MatchFinder_Normalize3(subValue, p->son, numSonRefs);
- }
- }
-
- if (p->cyclicBufferPos == p->cyclicBufferSize)
- p->cyclicBufferPos = 0;
-
- MatchFinder_SetLimits(p);
-}
-
-
-/*
- (lenLimit > maxLen)
-*/
-Z7_FORCE_INLINE
-static UInt32 * Hc_GetMatchesSpec(size_t lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
- UInt32 *d, unsigned maxLen)
-{
- /*
- son[_cyclicBufferPos] = curMatch;
- for (;;)
- {
- UInt32 delta = pos - curMatch;
- if (cutValue-- == 0 || delta >= _cyclicBufferSize)
- return d;
- {
- const Byte *pb = cur - delta;
- curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
- if (pb[maxLen] == cur[maxLen] && *pb == *cur)
- {
- UInt32 len = 0;
- while (++len != lenLimit)
- if (pb[len] != cur[len])
- break;
- if (maxLen < len)
- {
- maxLen = len;
- *d++ = len;
- *d++ = delta - 1;
- if (len == lenLimit)
- return d;
- }
- }
- }
- }
- */
-
- const Byte *lim = cur + lenLimit;
- son[_cyclicBufferPos] = curMatch;
-
- do
- {
- UInt32 delta;
-
- if (curMatch == 0)
- break;
- // if (curMatch2 >= curMatch) return NULL;
- delta = pos - curMatch;
- if (delta >= _cyclicBufferSize)
- break;
- {
- ptrdiff_t diff;
- curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
- diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
- if (cur[maxLen] == cur[(ptrdiff_t)maxLen + diff])
- {
- const Byte *c = cur;
- while (*c == c[diff])
- {
- if (++c == lim)
- {
- d[0] = (UInt32)(lim - cur);
- d[1] = delta - 1;
- return d + 2;
- }
- }
- {
- const unsigned len = (unsigned)(c - cur);
- if (maxLen < len)
- {
- maxLen = len;
- d[0] = (UInt32)len;
- d[1] = delta - 1;
- d += 2;
- }
- }
- }
- }
- }
- while (--cutValue);
-
- return d;
-}
-
-
-Z7_FORCE_INLINE
-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
- UInt32 *d, UInt32 maxLen)
-{
- CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
- CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
- unsigned len0 = 0, len1 = 0;
-
- UInt32 cmCheck;
-
- // if (curMatch >= pos) { *ptr0 = *ptr1 = kEmptyHashValue; return NULL; }
-
- cmCheck = (UInt32)(pos - _cyclicBufferSize);
- if ((UInt32)pos <= _cyclicBufferSize)
- cmCheck = 0;
-
- if (cmCheck < curMatch)
- do
- {
- const UInt32 delta = pos - curMatch;
- {
- CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
- const Byte *pb = cur - delta;
- unsigned len = (len0 < len1 ? len0 : len1);
- const UInt32 pair0 = pair[0];
- if (pb[len] == cur[len])
- {
- if (++len != lenLimit && pb[len] == cur[len])
- while (++len != lenLimit)
- if (pb[len] != cur[len])
- break;
- if (maxLen < len)
- {
- maxLen = (UInt32)len;
- *d++ = (UInt32)len;
- *d++ = delta - 1;
- if (len == lenLimit)
- {
- *ptr1 = pair0;
- *ptr0 = pair[1];
- return d;
- }
- }
- }
- if (pb[len] < cur[len])
- {
- *ptr1 = curMatch;
- // const UInt32 curMatch2 = pair[1];
- // if (curMatch2 >= curMatch) { *ptr0 = *ptr1 = kEmptyHashValue; return NULL; }
- // curMatch = curMatch2;
- curMatch = pair[1];
- ptr1 = pair + 1;
- len1 = len;
- }
- else
- {
- *ptr0 = curMatch;
- curMatch = pair[0];
- ptr0 = pair;
- len0 = len;
- }
- }
- }
- while(--cutValue && cmCheck < curMatch);
-
- *ptr0 = *ptr1 = kEmptyHashValue;
- return d;
-}
-
-
-static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
-{
- CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
- CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
- unsigned len0 = 0, len1 = 0;
-
- UInt32 cmCheck;
-
- cmCheck = (UInt32)(pos - _cyclicBufferSize);
- if ((UInt32)pos <= _cyclicBufferSize)
- cmCheck = 0;
-
- if (// curMatch >= pos || // failure
- cmCheck < curMatch)
- do
- {
- const UInt32 delta = pos - curMatch;
- {
- CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
- const Byte *pb = cur - delta;
- unsigned len = (len0 < len1 ? len0 : len1);
- if (pb[len] == cur[len])
- {
- while (++len != lenLimit)
- if (pb[len] != cur[len])
- break;
- {
- if (len == lenLimit)
- {
- *ptr1 = pair[0];
- *ptr0 = pair[1];
- return;
- }
- }
- }
- if (pb[len] < cur[len])
- {
- *ptr1 = curMatch;
- curMatch = pair[1];
- ptr1 = pair + 1;
- len1 = len;
- }
- else
- {
- *ptr0 = curMatch;
- curMatch = pair[0];
- ptr0 = pair;
- len0 = len;
- }
- }
- }
- while(--cutValue && cmCheck < curMatch);
-
- *ptr0 = *ptr1 = kEmptyHashValue;
- return;
-}
-
-
-#define MOVE_POS \
- ++p->cyclicBufferPos; \
- p->buffer++; \
- { const UInt32 pos1 = p->pos + 1; p->pos = pos1; if (pos1 == p->posLimit) MatchFinder_CheckLimits(p); }
-
-#define MOVE_POS_RET MOVE_POS return distances;
-
-Z7_NO_INLINE
-static void MatchFinder_MovePos(CMatchFinder *p)
-{
- /* we go here at the end of stream data, when (avail < num_hash_bytes)
- We don't update sons[cyclicBufferPos << btMode].
- So (sons) record will contain junk. And we cannot resume match searching
- to normal operation, even if we will provide more input data in buffer.
- p->sons[p->cyclicBufferPos << p->btMode] = 0; // kEmptyHashValue
- if (p->btMode)
- p->sons[(p->cyclicBufferPos << p->btMode) + 1] = 0; // kEmptyHashValue
- */
- MOVE_POS
-}
-
-#define GET_MATCHES_HEADER2(minLen, ret_op) \
- unsigned lenLimit; UInt32 hv; const Byte *cur; UInt32 curMatch; \
- lenLimit = (unsigned)p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
- cur = p->buffer;
-
-#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return distances)
-#define SKIP_HEADER(minLen) do { GET_MATCHES_HEADER2(minLen, continue)
-
-#define MF_PARAMS(p) lenLimit, curMatch, p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
-
-#define SKIP_FOOTER SkipMatchesSpec(MF_PARAMS(p)); MOVE_POS } while (--num);
-
-#define GET_MATCHES_FOOTER_BASE(_maxLen_, func) \
- distances = func(MF_PARAMS(p), \
- distances, (UInt32)_maxLen_); MOVE_POS_RET
-
-#define GET_MATCHES_FOOTER_BT(_maxLen_) \
- GET_MATCHES_FOOTER_BASE(_maxLen_, GetMatchesSpec1)
-
-#define GET_MATCHES_FOOTER_HC(_maxLen_) \
- GET_MATCHES_FOOTER_BASE(_maxLen_, Hc_GetMatchesSpec)
-
-
-
-#define UPDATE_maxLen { \
- const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)d2; \
- const Byte *c = cur + maxLen; \
- const Byte *lim = cur + lenLimit; \
- for (; c != lim; c++) if (*(c + diff) != *c) break; \
- maxLen = (unsigned)(c - cur); }
-
-static UInt32* Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- GET_MATCHES_HEADER(2)
- HASH2_CALC
- curMatch = p->hash[hv];
- p->hash[hv] = p->pos;
- GET_MATCHES_FOOTER_BT(1)
-}
-
-UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- GET_MATCHES_HEADER(3)
- HASH_ZIP_CALC
- curMatch = p->hash[hv];
- p->hash[hv] = p->pos;
- GET_MATCHES_FOOTER_BT(2)
-}
-
-
-#define SET_mmm \
- mmm = p->cyclicBufferSize; \
- if (pos < mmm) \
- mmm = pos;
-
-
-static UInt32* Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- UInt32 mmm;
- UInt32 h2, d2, pos;
- unsigned maxLen;
- UInt32 *hash;
- GET_MATCHES_HEADER(3)
-
- HASH3_CALC
-
- hash = p->hash;
- pos = p->pos;
-
- d2 = pos - hash[h2];
-
- curMatch = (hash + kFix3HashSize)[hv];
-
- hash[h2] = pos;
- (hash + kFix3HashSize)[hv] = pos;
-
- SET_mmm
-
- maxLen = 2;
-
- if (d2 < mmm && *(cur - d2) == *cur)
- {
- UPDATE_maxLen
- distances[0] = (UInt32)maxLen;
- distances[1] = d2 - 1;
- distances += 2;
- if (maxLen == lenLimit)
- {
- SkipMatchesSpec(MF_PARAMS(p));
- MOVE_POS_RET
- }
- }
-
- GET_MATCHES_FOOTER_BT(maxLen)
-}
-
-
-static UInt32* Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- UInt32 mmm;
- UInt32 h2, h3, d2, d3, pos;
- unsigned maxLen;
- UInt32 *hash;
- GET_MATCHES_HEADER(4)
-
- HASH4_CALC
-
- hash = p->hash;
- pos = p->pos;
-
- d2 = pos - hash [h2];
- d3 = pos - (hash + kFix3HashSize)[h3];
- curMatch = (hash + kFix4HashSize)[hv];
-
- hash [h2] = pos;
- (hash + kFix3HashSize)[h3] = pos;
- (hash + kFix4HashSize)[hv] = pos;
-
- SET_mmm
-
- maxLen = 3;
-
- for (;;)
- {
- if (d2 < mmm && *(cur - d2) == *cur)
- {
- distances[0] = 2;
- distances[1] = d2 - 1;
- distances += 2;
- if (*(cur - d2 + 2) == cur[2])
- {
- // distances[-2] = 3;
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- d2 = d3;
- distances[1] = d3 - 1;
- distances += 2;
- }
- else
- break;
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- d2 = d3;
- distances[1] = d3 - 1;
- distances += 2;
- }
- else
- break;
-
- UPDATE_maxLen
- distances[-2] = (UInt32)maxLen;
- if (maxLen == lenLimit)
- {
- SkipMatchesSpec(MF_PARAMS(p));
- MOVE_POS_RET
- }
- break;
- }
-
- GET_MATCHES_FOOTER_BT(maxLen)
-}
-
-
-static UInt32* Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- UInt32 mmm;
- UInt32 h2, h3, d2, d3, maxLen, pos;
- UInt32 *hash;
- GET_MATCHES_HEADER(5)
-
- HASH5_CALC
-
- hash = p->hash;
- pos = p->pos;
-
- d2 = pos - hash [h2];
- d3 = pos - (hash + kFix3HashSize)[h3];
- // d4 = pos - (hash + kFix4HashSize)[h4];
-
- curMatch = (hash + kFix5HashSize)[hv];
-
- hash [h2] = pos;
- (hash + kFix3HashSize)[h3] = pos;
- // (hash + kFix4HashSize)[h4] = pos;
- (hash + kFix5HashSize)[hv] = pos;
-
- SET_mmm
-
- maxLen = 4;
-
- for (;;)
- {
- if (d2 < mmm && *(cur - d2) == *cur)
- {
- distances[0] = 2;
- distances[1] = d2 - 1;
- distances += 2;
- if (*(cur - d2 + 2) == cur[2])
- {
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- distances[1] = d3 - 1;
- distances += 2;
- d2 = d3;
- }
- else
- break;
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- distances[1] = d3 - 1;
- distances += 2;
- d2 = d3;
- }
- else
- break;
-
- distances[-2] = 3;
- if (*(cur - d2 + 3) != cur[3])
- break;
- UPDATE_maxLen
- distances[-2] = (UInt32)maxLen;
- if (maxLen == lenLimit)
- {
- SkipMatchesSpec(MF_PARAMS(p));
- MOVE_POS_RET
- }
- break;
- }
-
- GET_MATCHES_FOOTER_BT(maxLen)
-}
-
-
-static UInt32* Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- UInt32 mmm;
- UInt32 h2, h3, d2, d3, pos;
- unsigned maxLen;
- UInt32 *hash;
- GET_MATCHES_HEADER(4)
-
- HASH4_CALC
-
- hash = p->hash;
- pos = p->pos;
-
- d2 = pos - hash [h2];
- d3 = pos - (hash + kFix3HashSize)[h3];
- curMatch = (hash + kFix4HashSize)[hv];
-
- hash [h2] = pos;
- (hash + kFix3HashSize)[h3] = pos;
- (hash + kFix4HashSize)[hv] = pos;
-
- SET_mmm
-
- maxLen = 3;
-
- for (;;)
- {
- if (d2 < mmm && *(cur - d2) == *cur)
- {
- distances[0] = 2;
- distances[1] = d2 - 1;
- distances += 2;
- if (*(cur - d2 + 2) == cur[2])
- {
- // distances[-2] = 3;
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- d2 = d3;
- distances[1] = d3 - 1;
- distances += 2;
- }
- else
- break;
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- d2 = d3;
- distances[1] = d3 - 1;
- distances += 2;
- }
- else
- break;
-
- UPDATE_maxLen
- distances[-2] = (UInt32)maxLen;
- if (maxLen == lenLimit)
- {
- p->son[p->cyclicBufferPos] = curMatch;
- MOVE_POS_RET
- }
- break;
- }
-
- GET_MATCHES_FOOTER_HC(maxLen)
-}
-
-
-static UInt32 * Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- UInt32 mmm;
- UInt32 h2, h3, d2, d3, maxLen, pos;
- UInt32 *hash;
- GET_MATCHES_HEADER(5)
-
- HASH5_CALC
-
- hash = p->hash;
- pos = p->pos;
-
- d2 = pos - hash [h2];
- d3 = pos - (hash + kFix3HashSize)[h3];
- // d4 = pos - (hash + kFix4HashSize)[h4];
-
- curMatch = (hash + kFix5HashSize)[hv];
-
- hash [h2] = pos;
- (hash + kFix3HashSize)[h3] = pos;
- // (hash + kFix4HashSize)[h4] = pos;
- (hash + kFix5HashSize)[hv] = pos;
-
- SET_mmm
-
- maxLen = 4;
-
- for (;;)
- {
- if (d2 < mmm && *(cur - d2) == *cur)
- {
- distances[0] = 2;
- distances[1] = d2 - 1;
- distances += 2;
- if (*(cur - d2 + 2) == cur[2])
- {
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- distances[1] = d3 - 1;
- distances += 2;
- d2 = d3;
- }
- else
- break;
- }
- else if (d3 < mmm && *(cur - d3) == *cur)
- {
- distances[1] = d3 - 1;
- distances += 2;
- d2 = d3;
- }
- else
- break;
-
- distances[-2] = 3;
- if (*(cur - d2 + 3) != cur[3])
- break;
- UPDATE_maxLen
- distances[-2] = maxLen;
- if (maxLen == lenLimit)
- {
- p->son[p->cyclicBufferPos] = curMatch;
- MOVE_POS_RET
- }
- break;
- }
-
- GET_MATCHES_FOOTER_HC(maxLen)
-}
-
-
-UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
-{
- GET_MATCHES_HEADER(3)
- HASH_ZIP_CALC
- curMatch = p->hash[hv];
- p->hash[hv] = p->pos;
- GET_MATCHES_FOOTER_HC(2)
-}
-
-
-static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- SKIP_HEADER(2)
- {
- HASH2_CALC
- curMatch = p->hash[hv];
- p->hash[hv] = p->pos;
- }
- SKIP_FOOTER
-}
-
-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- SKIP_HEADER(3)
- {
- HASH_ZIP_CALC
- curMatch = p->hash[hv];
- p->hash[hv] = p->pos;
- }
- SKIP_FOOTER
-}
-
-static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- SKIP_HEADER(3)
- {
- UInt32 h2;
- UInt32 *hash;
- HASH3_CALC
- hash = p->hash;
- curMatch = (hash + kFix3HashSize)[hv];
- hash[h2] =
- (hash + kFix3HashSize)[hv] = p->pos;
- }
- SKIP_FOOTER
-}
-
-static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- SKIP_HEADER(4)
- {
- UInt32 h2, h3;
- UInt32 *hash;
- HASH4_CALC
- hash = p->hash;
- curMatch = (hash + kFix4HashSize)[hv];
- hash [h2] =
- (hash + kFix3HashSize)[h3] =
- (hash + kFix4HashSize)[hv] = p->pos;
- }
- SKIP_FOOTER
-}
-
-static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- SKIP_HEADER(5)
- {
- UInt32 h2, h3;
- UInt32 *hash;
- HASH5_CALC
- hash = p->hash;
- curMatch = (hash + kFix5HashSize)[hv];
- hash [h2] =
- (hash + kFix3HashSize)[h3] =
- // (hash + kFix4HashSize)[h4] =
- (hash + kFix5HashSize)[hv] = p->pos;
- }
- SKIP_FOOTER
-}
-
-
-#define HC_SKIP_HEADER(minLen) \
- do { if (p->lenLimit < minLen) { MatchFinder_MovePos(p); num--; continue; } { \
- const Byte *cur; \
- UInt32 *hash; \
- UInt32 *son; \
- UInt32 pos = p->pos; \
- UInt32 num2 = num; \
- /* (p->pos == p->posLimit) is not allowed here !!! */ \
- { const UInt32 rem = p->posLimit - pos; if (num2 > rem) num2 = rem; } \
- num -= num2; \
- { const UInt32 cycPos = p->cyclicBufferPos; \
- son = p->son + cycPos; \
- p->cyclicBufferPos = cycPos + num2; } \
- cur = p->buffer; \
- hash = p->hash; \
- do { \
- UInt32 curMatch; \
- UInt32 hv;
-
-
-#define HC_SKIP_FOOTER \
- cur++; pos++; *son++ = curMatch; \
- } while (--num2); \
- p->buffer = cur; \
- p->pos = pos; \
- if (pos == p->posLimit) MatchFinder_CheckLimits(p); \
- }} while(num); \
-
-
-static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- HC_SKIP_HEADER(4)
-
- UInt32 h2, h3;
- HASH4_CALC
- curMatch = (hash + kFix4HashSize)[hv];
- hash [h2] =
- (hash + kFix3HashSize)[h3] =
- (hash + kFix4HashSize)[hv] = pos;
-
- HC_SKIP_FOOTER
-}
-
-
-static void Hc5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- HC_SKIP_HEADER(5)
-
- UInt32 h2, h3;
- HASH5_CALC
- curMatch = (hash + kFix5HashSize)[hv];
- hash [h2] =
- (hash + kFix3HashSize)[h3] =
- // (hash + kFix4HashSize)[h4] =
- (hash + kFix5HashSize)[hv] = pos;
-
- HC_SKIP_FOOTER
-}
-
-
-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
-{
- HC_SKIP_HEADER(3)
-
- HASH_ZIP_CALC
- curMatch = hash[hv];
- hash[hv] = pos;
-
- HC_SKIP_FOOTER
-}
-
-
-void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable)
-{
- vTable->Init = (Mf_Init_Func)MatchFinder_Init;
- vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
- vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
- if (!p->btMode)
- {
- if (p->numHashBytes <= 4)
- {
- vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
- vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
- }
- else
- {
- vTable->GetMatches = (Mf_GetMatches_Func)Hc5_MatchFinder_GetMatches;
- vTable->Skip = (Mf_Skip_Func)Hc5_MatchFinder_Skip;
- }
- }
- else if (p->numHashBytes == 2)
- {
- vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
- vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
- }
- else if (p->numHashBytes == 3)
- {
- vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
- vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
- }
- else if (p->numHashBytes == 4)
- {
- vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
- vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
- }
- else
- {
- vTable->GetMatches = (Mf_GetMatches_Func)Bt5_MatchFinder_GetMatches;
- vTable->Skip = (Mf_Skip_Func)Bt5_MatchFinder_Skip;
- }
-}
-
-
-
-void LzFindPrepare(void)
-{
- #ifndef FORCE_LZFIND_SATUR_SUB_128
- #ifdef USE_LZFIND_SATUR_SUB_128
- LZFIND_SATUR_SUB_CODE_FUNC f = NULL;
- #ifdef MY_CPU_ARM_OR_ARM64
- {
- if (CPU_IsSupported_NEON())
- {
- // #pragma message ("=== LzFind NEON")
- PRF(printf("\n=== LzFind NEON\n"));
- f = LzFind_SaturSub_128;
- }
- // f = 0; // for debug
- }
- #else // MY_CPU_ARM_OR_ARM64
- if (CPU_IsSupported_SSE41())
- {
- // #pragma message ("=== LzFind SSE41")
- PRF(printf("\n=== LzFind SSE41\n"));
- f = LzFind_SaturSub_128;
-
- #ifdef USE_LZFIND_SATUR_SUB_256
- if (CPU_IsSupported_AVX2())
- {
- // #pragma message ("=== LzFind AVX2")
- PRF(printf("\n=== LzFind AVX2\n"));
- f = LzFind_SaturSub_256;
- }
- #endif
- }
- #endif // MY_CPU_ARM_OR_ARM64
- g_LzFind_SaturSub = f;
- #endif // USE_LZFIND_SATUR_SUB_128
- #endif // FORCE_LZFIND_SATUR_SUB_128
-}
-
-
-#undef MOVE_POS
-#undef MOVE_POS_RET
-#undef PRF
diff --git a/3rdparty/7z/src/LzFind.h b/3rdparty/7z/src/LzFind.h
deleted file mode 100644
index 5acdb98647..0000000000
--- a/3rdparty/7z/src/LzFind.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* LzFind.h -- Match finder for LZ algorithms
-2023-03-04 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZ_FIND_H
-#define ZIP7_INC_LZ_FIND_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-typedef UInt32 CLzRef;
-
-typedef struct
-{
- const Byte *buffer;
- UInt32 pos;
- UInt32 posLimit;
- UInt32 streamPos; /* wrap over Zero is allowed (streamPos < pos). Use (UInt32)(streamPos - pos) */
- UInt32 lenLimit;
-
- UInt32 cyclicBufferPos;
- UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
-
- Byte streamEndWasReached;
- Byte btMode;
- Byte bigHash;
- Byte directInput;
-
- UInt32 matchMaxLen;
- CLzRef *hash;
- CLzRef *son;
- UInt32 hashMask;
- UInt32 cutValue;
-
- Byte *bufBase;
- ISeqInStreamPtr stream;
-
- UInt32 blockSize;
- UInt32 keepSizeBefore;
- UInt32 keepSizeAfter;
-
- UInt32 numHashBytes;
- size_t directInputRem;
- UInt32 historySize;
- UInt32 fixedHashSize;
- Byte numHashBytes_Min;
- Byte numHashOutBits;
- Byte _pad2_[2];
- SRes result;
- UInt32 crc[256];
- size_t numRefs;
-
- UInt64 expectedDataSize;
-} CMatchFinder;
-
-#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((const Byte *)(p)->buffer)
-
-#define Inline_MatchFinder_GetNumAvailableBytes(p) ((UInt32)((p)->streamPos - (p)->pos))
-
-/*
-#define Inline_MatchFinder_IsFinishedOK(p) \
- ((p)->streamEndWasReached \
- && (p)->streamPos == (p)->pos \
- && (!(p)->directInput || (p)->directInputRem == 0))
-*/
-
-int MatchFinder_NeedMove(CMatchFinder *p);
-/* Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); */
-void MatchFinder_MoveBlock(CMatchFinder *p);
-void MatchFinder_ReadIfRequired(CMatchFinder *p);
-
-void MatchFinder_Construct(CMatchFinder *p);
-
-/* (directInput = 0) is default value.
- It's required to provide correct (directInput) value
- before calling MatchFinder_Create().
- You can set (directInput) by any of the following calls:
- - MatchFinder_SET_DIRECT_INPUT_BUF()
- - MatchFinder_SET_STREAM()
- - MatchFinder_SET_STREAM_MODE()
-*/
-
-#define MatchFinder_SET_DIRECT_INPUT_BUF(p, _src_, _srcLen_) { \
- (p)->stream = NULL; \
- (p)->directInput = 1; \
- (p)->buffer = (_src_); \
- (p)->directInputRem = (_srcLen_); }
-
-/*
-#define MatchFinder_SET_STREAM_MODE(p) { \
- (p)->directInput = 0; }
-*/
-
-#define MatchFinder_SET_STREAM(p, _stream_) { \
- (p)->stream = _stream_; \
- (p)->directInput = 0; }
-
-
-int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
- UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
- ISzAllocPtr alloc);
-void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc);
-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems);
-
-/*
-#define MatchFinder_INIT_POS(p, val) \
- (p)->pos = (val); \
- (p)->streamPos = (val);
-*/
-
-// void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
-#define MatchFinder_REDUCE_OFFSETS(p, subValue) \
- (p)->pos -= (subValue); \
- (p)->streamPos -= (subValue);
-
-
-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
- UInt32 *distances, UInt32 maxLen);
-
-/*
-Conditions:
- Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
- Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
-*/
-
-typedef void (*Mf_Init_Func)(void *object);
-typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
-typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
-typedef UInt32 * (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
-typedef void (*Mf_Skip_Func)(void *object, UInt32);
-
-typedef struct
-{
- Mf_Init_Func Init;
- Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
- Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
- Mf_GetMatches_Func GetMatches;
- Mf_Skip_Func Skip;
-} IMatchFinder2;
-
-void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable);
-
-void MatchFinder_Init_LowHash(CMatchFinder *p);
-void MatchFinder_Init_HighHash(CMatchFinder *p);
-void MatchFinder_Init_4(CMatchFinder *p);
-void MatchFinder_Init(CMatchFinder *p);
-
-UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
-UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
-
-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
-
-void LzFindPrepare(void);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/LzFindMt.c b/3rdparty/7z/src/LzFindMt.c
deleted file mode 100644
index 32b9ecb417..0000000000
--- a/3rdparty/7z/src/LzFindMt.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-/* LzFindMt.c -- multithreaded Match finder for LZ algorithms
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-// #include
-
-#include "CpuArch.h"
-
-#include "LzHash.h"
-#include "LzFindMt.h"
-
-// #define LOG_ITERS
-
-// #define LOG_THREAD
-
-#ifdef LOG_THREAD
-#include
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-
-#ifdef LOG_ITERS
-#include
-extern UInt64 g_NumIters_Tree;
-extern UInt64 g_NumIters_Loop;
-extern UInt64 g_NumIters_Bytes;
-#define LOG_ITER(x) x
-#else
-#define LOG_ITER(x)
-#endif
-
-#define kMtHashBlockSize ((UInt32)1 << 17)
-#define kMtHashNumBlocks (1 << 1)
-
-#define GET_HASH_BLOCK_OFFSET(i) (((i) & (kMtHashNumBlocks - 1)) * kMtHashBlockSize)
-
-#define kMtBtBlockSize ((UInt32)1 << 16)
-#define kMtBtNumBlocks (1 << 4)
-
-#define GET_BT_BLOCK_OFFSET(i) (((i) & (kMtBtNumBlocks - 1)) * (size_t)kMtBtBlockSize)
-
-/*
- HASH functions:
- We use raw 8/16 bits from a[1] and a[2],
- xored with crc(a[0]) and crc(a[3]).
- We check a[0], a[3] only. We don't need to compare a[1] and a[2] in matches.
- our crc() function provides one-to-one correspondence for low 8-bit values:
- (crc[0...0xFF] & 0xFF) <-> [0...0xFF]
-*/
-
-#define MF(mt) ((mt)->MatchFinder)
-#define MF_CRC (p->crc)
-
-// #define MF(mt) (&(mt)->MatchFinder)
-// #define MF_CRC (p->MatchFinder.crc)
-
-#define MT_HASH2_CALC \
- h2 = (MF_CRC[cur[0]] ^ cur[1]) & (kHash2Size - 1);
-
-#define MT_HASH3_CALC { \
- UInt32 temp = MF_CRC[cur[0]] ^ cur[1]; \
- h2 = temp & (kHash2Size - 1); \
- h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
-
-/*
-#define MT_HASH3_CALC__NO_2 { \
- UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
- h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
-
-#define MT_HASH4_CALC { \
- UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
- h2 = temp & (kHash2Size - 1); \
- temp ^= ((UInt32)cur[2] << 8); \
- h3 = temp & (kHash3Size - 1); \
- h4 = (temp ^ (p->crc[cur[3]] << kLzHash_CrcShift_1)) & p->hash4Mask; }
- // (kHash4Size - 1);
-*/
-
-
-Z7_NO_INLINE
-static void MtSync_Construct(CMtSync *p)
-{
- p->affinity = 0;
- p->wasCreated = False;
- p->csWasInitialized = False;
- p->csWasEntered = False;
- Thread_CONSTRUCT(&p->thread)
- Event_Construct(&p->canStart);
- Event_Construct(&p->wasStopped);
- Semaphore_Construct(&p->freeSemaphore);
- Semaphore_Construct(&p->filledSemaphore);
-}
-
-
-#define DEBUG_BUFFER_LOCK // define it to debug lock state
-
-#ifdef DEBUG_BUFFER_LOCK
-#include
-#define BUFFER_MUST_BE_LOCKED(p) if (!(p)->csWasEntered) exit(1);
-#define BUFFER_MUST_BE_UNLOCKED(p) if ( (p)->csWasEntered) exit(1);
-#else
-#define BUFFER_MUST_BE_LOCKED(p)
-#define BUFFER_MUST_BE_UNLOCKED(p)
-#endif
-
-#define LOCK_BUFFER(p) { \
- BUFFER_MUST_BE_UNLOCKED(p); \
- CriticalSection_Enter(&(p)->cs); \
- (p)->csWasEntered = True; }
-
-#define UNLOCK_BUFFER(p) { \
- BUFFER_MUST_BE_LOCKED(p); \
- CriticalSection_Leave(&(p)->cs); \
- (p)->csWasEntered = False; }
-
-
-Z7_NO_INLINE
-static UInt32 MtSync_GetNextBlock(CMtSync *p)
-{
- UInt32 numBlocks = 0;
- if (p->needStart)
- {
- BUFFER_MUST_BE_UNLOCKED(p)
- p->numProcessedBlocks = 1;
- p->needStart = False;
- p->stopWriting = False;
- p->exit = False;
- Event_Reset(&p->wasStopped);
- Event_Set(&p->canStart);
- }
- else
- {
- UNLOCK_BUFFER(p)
- // we free current block
- numBlocks = p->numProcessedBlocks++;
- Semaphore_Release1(&p->freeSemaphore);
- }
-
- // buffer is UNLOCKED here
- Semaphore_Wait(&p->filledSemaphore);
- LOCK_BUFFER(p)
- return numBlocks;
-}
-
-
-/* if Writing (Processing) thread was started, we must call MtSync_StopWriting() */
-
-Z7_NO_INLINE
-static void MtSync_StopWriting(CMtSync *p)
-{
- if (!Thread_WasCreated(&p->thread) || p->needStart)
- return;
-
- PRF(printf("\nMtSync_StopWriting %p\n", p));
-
- if (p->csWasEntered)
- {
- /* we don't use buffer in this thread after StopWriting().
- So we UNLOCK buffer.
- And we restore default UNLOCKED state for stopped thread */
- UNLOCK_BUFFER(p)
- }
-
- /* We send (p->stopWriting) message and release freeSemaphore
- to free current block.
- So the thread will see (p->stopWriting) at some
- iteration after Wait(freeSemaphore).
- The thread doesn't need to fill all avail free blocks,
- so we can get fast thread stop.
- */
-
- p->stopWriting = True;
- Semaphore_Release1(&p->freeSemaphore); // check semaphore count !!!
-
- PRF(printf("\nMtSync_StopWriting %p : Event_Wait(&p->wasStopped)\n", p));
- Event_Wait(&p->wasStopped);
- PRF(printf("\nMtSync_StopWriting %p : Event_Wait() finsihed\n", p));
-
- /* 21.03 : we don't restore samaphore counters here.
- We will recreate and reinit samaphores in next start */
-
- p->needStart = True;
-}
-
-
-Z7_NO_INLINE
-static void MtSync_Destruct(CMtSync *p)
-{
- PRF(printf("\nMtSync_Destruct %p\n", p));
-
- if (Thread_WasCreated(&p->thread))
- {
- /* we want thread to be in Stopped state before sending EXIT command.
- note: stop(btSync) will stop (htSync) also */
- MtSync_StopWriting(p);
- /* thread in Stopped state here : (p->needStart == true) */
- p->exit = True;
- // if (p->needStart) // it's (true)
- Event_Set(&p->canStart); // we send EXIT command to thread
- Thread_Wait_Close(&p->thread); // we wait thread finishing
- }
-
- if (p->csWasInitialized)
- {
- CriticalSection_Delete(&p->cs);
- p->csWasInitialized = False;
- }
- p->csWasEntered = False;
-
- Event_Close(&p->canStart);
- Event_Close(&p->wasStopped);
- Semaphore_Close(&p->freeSemaphore);
- Semaphore_Close(&p->filledSemaphore);
-
- p->wasCreated = False;
-}
-
-
-// #define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; }
-// we want to get real system error codes here instead of SZ_ERROR_THREAD
-#define RINOK_THREAD(x) RINOK_WRes(x)
-
-
-// call it before each new file (when new starting is required):
-Z7_NO_INLINE
-static SRes MtSync_Init(CMtSync *p, UInt32 numBlocks)
-{
- WRes wres;
- // BUFFER_MUST_BE_UNLOCKED(p)
- if (!p->needStart || p->csWasEntered)
- return SZ_ERROR_FAIL;
- wres = Semaphore_OptCreateInit(&p->freeSemaphore, numBlocks, numBlocks);
- if (wres == 0)
- wres = Semaphore_OptCreateInit(&p->filledSemaphore, 0, numBlocks);
- return MY_SRes_HRESULT_FROM_WRes(wres);
-}
-
-
-static WRes MtSync_Create_WRes(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj)
-{
- WRes wres;
-
- if (p->wasCreated)
- return SZ_OK;
-
- RINOK_THREAD(CriticalSection_Init(&p->cs))
- p->csWasInitialized = True;
- p->csWasEntered = False;
-
- RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canStart))
- RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStopped))
-
- p->needStart = True;
- p->exit = True; /* p->exit is unused before (canStart) Event.
- But in case of some unexpected code failure we will get fast exit from thread */
-
- // return ERROR_TOO_MANY_POSTS; // for debug
- // return EINVAL; // for debug
-
- if (p->affinity != 0)
- wres = Thread_Create_With_Affinity(&p->thread, startAddress, obj, (CAffinityMask)p->affinity);
- else
- wres = Thread_Create(&p->thread, startAddress, obj);
-
- RINOK_THREAD(wres)
- p->wasCreated = True;
- return SZ_OK;
-}
-
-
-Z7_NO_INLINE
-static SRes MtSync_Create(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj)
-{
- const WRes wres = MtSync_Create_WRes(p, startAddress, obj);
- if (wres == 0)
- return 0;
- MtSync_Destruct(p);
- return MY_SRes_HRESULT_FROM_WRes(wres);
-}
-
-
-// ---------- HASH THREAD ----------
-
-#define kMtMaxValForNormalize 0xFFFFFFFF
-// #define kMtMaxValForNormalize ((1 << 21)) // for debug
-// #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
-
-#ifdef MY_CPU_LE_UNALIGN
- #define GetUi24hi_from32(p) ((UInt32)GetUi32(p) >> 8)
-#else
- #define GetUi24hi_from32(p) ((p)[1] ^ ((UInt32)(p)[2] << 8) ^ ((UInt32)(p)[3] << 16))
-#endif
-
-#define GetHeads_DECL(name) \
- static void GetHeads ## name(const Byte *p, UInt32 pos, \
- UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc)
-
-#define GetHeads_LOOP(v) \
- for (; numHeads != 0; numHeads--) { \
- const UInt32 value = (v); \
- p++; \
- *heads++ = pos - hash[value]; \
- hash[value] = pos++; }
-
-#define DEF_GetHeads2(name, v, action) \
- GetHeads_DECL(name) { action \
- GetHeads_LOOP(v) }
-
-#define DEF_GetHeads(name, v) DEF_GetHeads2(name, v, ;)
-
-DEF_GetHeads2(2, GetUi16(p), UNUSED_VAR(hashMask); UNUSED_VAR(crc); )
-DEF_GetHeads(3, (crc[p[0]] ^ GetUi16(p + 1)) & hashMask)
-DEF_GetHeads2(3b, GetUi16(p) ^ ((UInt32)(p)[2] << 16), UNUSED_VAR(hashMask); UNUSED_VAR(crc); )
-// BT3 is not good for crc collisions for big hashMask values.
-
-/*
-GetHeads_DECL(3b)
-{
- UNUSED_VAR(hashMask);
- UNUSED_VAR(crc);
- {
- const Byte *pLim = p + numHeads;
- if (numHeads == 0)
- return;
- pLim--;
- while (p < pLim)
- {
- UInt32 v1 = GetUi32(p);
- UInt32 v0 = v1 & 0xFFFFFF;
- UInt32 h0, h1;
- p += 2;
- v1 >>= 8;
- h0 = hash[v0]; hash[v0] = pos; heads[0] = pos - h0; pos++;
- h1 = hash[v1]; hash[v1] = pos; heads[1] = pos - h1; pos++;
- heads += 2;
- }
- if (p == pLim)
- {
- UInt32 v0 = GetUi16(p) ^ ((UInt32)(p)[2] << 16);
- *heads = pos - hash[v0];
- hash[v0] = pos;
- }
- }
-}
-*/
-
-/*
-GetHeads_DECL(4)
-{
- unsigned sh = 0;
- UNUSED_VAR(crc)
- while ((hashMask & 0x80000000) == 0)
- {
- hashMask <<= 1;
- sh++;
- }
- GetHeads_LOOP((GetUi32(p) * 0xa54a1) >> sh)
-}
-#define GetHeads4b GetHeads4
-*/
-
-#define USE_GetHeads_LOCAL_CRC
-
-#ifdef USE_GetHeads_LOCAL_CRC
-
-GetHeads_DECL(4)
-{
- UInt32 crc0[256];
- UInt32 crc1[256];
- {
- unsigned i;
- for (i = 0; i < 256; i++)
- {
- UInt32 v = crc[i];
- crc0[i] = v & hashMask;
- crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
- // crc1[i] = rotlFixed(v, 8) & hashMask;
- }
- }
- GetHeads_LOOP(crc0[p[0]] ^ crc1[p[3]] ^ (UInt32)GetUi16(p+1))
-}
-
-GetHeads_DECL(4b)
-{
- UInt32 crc0[256];
- {
- unsigned i;
- for (i = 0; i < 256; i++)
- crc0[i] = crc[i] & hashMask;
- }
- GetHeads_LOOP(crc0[p[0]] ^ GetUi24hi_from32(p))
-}
-
-GetHeads_DECL(5)
-{
- UInt32 crc0[256];
- UInt32 crc1[256];
- UInt32 crc2[256];
- {
- unsigned i;
- for (i = 0; i < 256; i++)
- {
- UInt32 v = crc[i];
- crc0[i] = v & hashMask;
- crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
- crc2[i] = (v << kLzHash_CrcShift_2) & hashMask;
- }
- }
- GetHeads_LOOP(crc0[p[0]] ^ crc1[p[3]] ^ crc2[p[4]] ^ (UInt32)GetUi16(p+1))
-}
-
-GetHeads_DECL(5b)
-{
- UInt32 crc0[256];
- UInt32 crc1[256];
- {
- unsigned i;
- for (i = 0; i < 256; i++)
- {
- UInt32 v = crc[i];
- crc0[i] = v & hashMask;
- crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
- }
- }
- GetHeads_LOOP(crc0[p[0]] ^ crc1[p[4]] ^ GetUi24hi_from32(p))
-}
-
-#else
-
-DEF_GetHeads(4, (crc[p[0]] ^ (crc[p[3]] << kLzHash_CrcShift_1) ^ (UInt32)GetUi16(p+1)) & hashMask)
-DEF_GetHeads(4b, (crc[p[0]] ^ GetUi24hi_from32(p)) & hashMask)
-DEF_GetHeads(5, (crc[p[0]] ^ (crc[p[3]] << kLzHash_CrcShift_1) ^ (crc[p[4]] << kLzHash_CrcShift_2) ^ (UInt32)GetUi16(p + 1)) & hashMask)
-DEF_GetHeads(5b, (crc[p[0]] ^ (crc[p[4]] << kLzHash_CrcShift_1) ^ GetUi24hi_from32(p)) & hashMask)
-
-#endif
-
-
-static void HashThreadFunc(CMatchFinderMt *mt)
-{
- CMtSync *p = &mt->hashSync;
- PRF(printf("\nHashThreadFunc\n"));
-
- for (;;)
- {
- UInt32 blockIndex = 0;
- PRF(printf("\nHashThreadFunc : Event_Wait(&p->canStart)\n"));
- Event_Wait(&p->canStart);
- PRF(printf("\nHashThreadFunc : Event_Wait(&p->canStart) : after \n"));
- if (p->exit)
- {
- PRF(printf("\nHashThreadFunc : exit \n"));
- return;
- }
-
- MatchFinder_Init_HighHash(MF(mt));
-
- for (;;)
- {
- PRF(printf("Hash thread block = %d pos = %d\n", (unsigned)blockIndex, mt->MatchFinder->pos));
-
- {
- CMatchFinder *mf = MF(mt);
- if (MatchFinder_NeedMove(mf))
- {
- CriticalSection_Enter(&mt->btSync.cs);
- CriticalSection_Enter(&mt->hashSync.cs);
- {
- const Byte *beforePtr = Inline_MatchFinder_GetPointerToCurrentPos(mf);
- ptrdiff_t offset;
- MatchFinder_MoveBlock(mf);
- offset = beforePtr - Inline_MatchFinder_GetPointerToCurrentPos(mf);
- mt->pointerToCurPos -= offset;
- mt->buffer -= offset;
- }
- CriticalSection_Leave(&mt->hashSync.cs);
- CriticalSection_Leave(&mt->btSync.cs);
- continue;
- }
-
- Semaphore_Wait(&p->freeSemaphore);
-
- if (p->exit) // exit is unexpected here. But we check it here for some failure case
- return;
-
- // for faster stop : we check (p->stopWriting) after Wait(freeSemaphore)
- if (p->stopWriting)
- break;
-
- MatchFinder_ReadIfRequired(mf);
- {
- UInt32 *heads = mt->hashBuf + GET_HASH_BLOCK_OFFSET(blockIndex++);
- UInt32 num = Inline_MatchFinder_GetNumAvailableBytes(mf);
- heads[0] = 2;
- heads[1] = num;
-
- /* heads[1] contains the number of avail bytes:
- if (avail < mf->numHashBytes) :
- {
- it means that stream was finished
- HASH_THREAD and BT_TREAD must move position for heads[1] (avail) bytes.
- HASH_THREAD doesn't stop,
- HASH_THREAD fills only the header (2 numbers) for all next blocks:
- {2, NumHashBytes - 1}, {2,0}, {2,0}, ... , {2,0}
- }
- else
- {
- HASH_THREAD and BT_TREAD must move position for (heads[0] - 2) bytes;
- }
- */
-
- if (num >= mf->numHashBytes)
- {
- num = num - mf->numHashBytes + 1;
- if (num > kMtHashBlockSize - 2)
- num = kMtHashBlockSize - 2;
-
- if (mf->pos > (UInt32)kMtMaxValForNormalize - num)
- {
- const UInt32 subValue = (mf->pos - mf->historySize - 1); // & ~(UInt32)(kNormalizeAlign - 1);
- MatchFinder_REDUCE_OFFSETS(mf, subValue)
- MatchFinder_Normalize3(subValue, mf->hash + mf->fixedHashSize, (size_t)mf->hashMask + 1);
- }
-
- heads[0] = 2 + num;
- mt->GetHeadsFunc(mf->buffer, mf->pos, mf->hash + mf->fixedHashSize, mf->hashMask, heads + 2, num, mf->crc);
- }
-
- mf->pos += num; // wrap over zero is allowed at the end of stream
- mf->buffer += num;
- }
- }
-
- Semaphore_Release1(&p->filledSemaphore);
- } // for() processing end
-
- // p->numBlocks_Sent = blockIndex;
- Event_Set(&p->wasStopped);
- } // for() thread end
-}
-
-
-
-
-// ---------- BT THREAD ----------
-
-/* we use one variable instead of two (cyclicBufferPos == pos) before CyclicBuf wrap.
- here we define fixed offset of (p->pos) from (p->cyclicBufferPos) */
-#define CYC_TO_POS_OFFSET 0
-// #define CYC_TO_POS_OFFSET 1 // for debug
-
-#define MFMT_GM_INLINE
-
-#ifdef MFMT_GM_INLINE
-
-/*
- we use size_t for (pos) instead of UInt32
- to eliminate "movsx" BUG in old MSVC x64 compiler.
-*/
-
-
-UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
- UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
- UInt32 *posRes);
-
-#endif
-
-
-static void BtGetMatches(CMatchFinderMt *p, UInt32 *d)
-{
- UInt32 numProcessed = 0;
- UInt32 curPos = 2;
-
- /* GetMatchesSpec() functions don't create (len = 1)
- in [len, dist] match pairs, if (p->numHashBytes >= 2)
- Also we suppose here that (matchMaxLen >= 2).
- So the following code for (reserve) is not required
- UInt32 reserve = (p->matchMaxLen * 2);
- const UInt32 kNumHashBytes_Max = 5; // BT_HASH_BYTES_MAX
- if (reserve < kNumHashBytes_Max - 1)
- reserve = kNumHashBytes_Max - 1;
- const UInt32 limit = kMtBtBlockSize - (reserve);
- */
-
- const UInt32 limit = kMtBtBlockSize - (p->matchMaxLen * 2);
-
- d[1] = p->hashNumAvail;
-
- if (p->failure_BT)
- {
- // printf("\n == 1 BtGetMatches() p->failure_BT\n");
- d[0] = 0;
- // d[1] = 0;
- return;
- }
-
- while (curPos < limit)
- {
- if (p->hashBufPos == p->hashBufPosLimit)
- {
- // MatchFinderMt_GetNextBlock_Hash(p);
- UInt32 avail;
- {
- const UInt32 bi = MtSync_GetNextBlock(&p->hashSync);
- const UInt32 k = GET_HASH_BLOCK_OFFSET(bi);
- const UInt32 *h = p->hashBuf + k;
- avail = h[1];
- p->hashBufPosLimit = k + h[0];
- p->hashNumAvail = avail;
- p->hashBufPos = k + 2;
- }
-
- {
- /* we must prevent UInt32 overflow for avail total value,
- if avail was increased with new hash block */
- UInt32 availSum = numProcessed + avail;
- if (availSum < numProcessed)
- availSum = (UInt32)(Int32)-1;
- d[1] = availSum;
- }
-
- if (avail >= p->numHashBytes)
- continue;
-
- // if (p->hashBufPos != p->hashBufPosLimit) exit(1);
-
- /* (avail < p->numHashBytes)
- It means that stream was finished.
- And (avail) - is a number of remaining bytes,
- we fill (d) for (avail) bytes for LZ_THREAD (receiver).
- but we don't update (p->pos) and (p->cyclicBufferPos) here in BT_THREAD */
-
- /* here we suppose that we have space enough:
- (kMtBtBlockSize - curPos >= p->hashNumAvail) */
- p->hashNumAvail = 0;
- d[0] = curPos + avail;
- d += curPos;
- for (; avail != 0; avail--)
- *d++ = 0;
- return;
- }
- {
- UInt32 size = p->hashBufPosLimit - p->hashBufPos;
- UInt32 pos = p->pos;
- UInt32 cyclicBufferPos = p->cyclicBufferPos;
- UInt32 lenLimit = p->matchMaxLen;
- if (lenLimit >= p->hashNumAvail)
- lenLimit = p->hashNumAvail;
- {
- UInt32 size2 = p->hashNumAvail - lenLimit + 1;
- if (size2 < size)
- size = size2;
- size2 = p->cyclicBufferSize - cyclicBufferPos;
- if (size2 < size)
- size = size2;
- }
-
- if (pos > (UInt32)kMtMaxValForNormalize - size)
- {
- const UInt32 subValue = (pos - p->cyclicBufferSize); // & ~(UInt32)(kNormalizeAlign - 1);
- pos -= subValue;
- p->pos = pos;
- MatchFinder_Normalize3(subValue, p->son, (size_t)p->cyclicBufferSize * 2);
- }
-
- #ifndef MFMT_GM_INLINE
- while (curPos < limit && size-- != 0)
- {
- UInt32 *startDistances = d + curPos;
- UInt32 num = (UInt32)(GetMatchesSpec1(lenLimit, pos - p->hashBuf[p->hashBufPos++],
- pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue,
- startDistances + 1, p->numHashBytes - 1) - startDistances);
- *startDistances = num - 1;
- curPos += num;
- cyclicBufferPos++;
- pos++;
- p->buffer++;
- }
- #else
- {
- UInt32 posRes = pos;
- const UInt32 *d_end;
- {
- d_end = GetMatchesSpecN_2(
- p->buffer + lenLimit - 1,
- pos, p->buffer, p->son, p->cutValue, d + curPos,
- p->numHashBytes - 1, p->hashBuf + p->hashBufPos,
- d + limit, p->hashBuf + p->hashBufPos + size,
- cyclicBufferPos, p->cyclicBufferSize,
- &posRes);
- }
- {
- if (!d_end)
- {
- // printf("\n == 2 BtGetMatches() p->failure_BT\n");
- // internal data failure
- p->failure_BT = True;
- d[0] = 0;
- // d[1] = 0;
- return;
- }
- }
- curPos = (UInt32)(d_end - d);
- {
- const UInt32 processed = posRes - pos;
- pos = posRes;
- p->hashBufPos += processed;
- cyclicBufferPos += processed;
- p->buffer += processed;
- }
- }
- #endif
-
- {
- const UInt32 processed = pos - p->pos;
- numProcessed += processed;
- p->hashNumAvail -= processed;
- p->pos = pos;
- }
- if (cyclicBufferPos == p->cyclicBufferSize)
- cyclicBufferPos = 0;
- p->cyclicBufferPos = cyclicBufferPos;
- }
- }
-
- d[0] = curPos;
-}
-
-
-static void BtFillBlock(CMatchFinderMt *p, UInt32 globalBlockIndex)
-{
- CMtSync *sync = &p->hashSync;
-
- BUFFER_MUST_BE_UNLOCKED(sync)
-
- if (!sync->needStart)
- {
- LOCK_BUFFER(sync)
- }
-
- BtGetMatches(p, p->btBuf + GET_BT_BLOCK_OFFSET(globalBlockIndex));
-
- /* We suppose that we have called GetNextBlock() from start.
- So buffer is LOCKED */
-
- UNLOCK_BUFFER(sync)
-}
-
-
-Z7_NO_INLINE
-static void BtThreadFunc(CMatchFinderMt *mt)
-{
- CMtSync *p = &mt->btSync;
- for (;;)
- {
- UInt32 blockIndex = 0;
- Event_Wait(&p->canStart);
-
- for (;;)
- {
- PRF(printf(" BT thread block = %d pos = %d\n", (unsigned)blockIndex, mt->pos));
- /* (p->exit == true) is possible after (p->canStart) at first loop iteration
- and is unexpected after more Wait(freeSemaphore) iterations */
- if (p->exit)
- return;
-
- Semaphore_Wait(&p->freeSemaphore);
-
- // for faster stop : we check (p->stopWriting) after Wait(freeSemaphore)
- if (p->stopWriting)
- break;
-
- BtFillBlock(mt, blockIndex++);
-
- Semaphore_Release1(&p->filledSemaphore);
- }
-
- // we stop HASH_THREAD here
- MtSync_StopWriting(&mt->hashSync);
-
- // p->numBlocks_Sent = blockIndex;
- Event_Set(&p->wasStopped);
- }
-}
-
-
-void MatchFinderMt_Construct(CMatchFinderMt *p)
-{
- p->hashBuf = NULL;
- MtSync_Construct(&p->hashSync);
- MtSync_Construct(&p->btSync);
-}
-
-static void MatchFinderMt_FreeMem(CMatchFinderMt *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->hashBuf);
- p->hashBuf = NULL;
-}
-
-void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc)
-{
- /*
- HASH_THREAD can use CriticalSection(s) btSync.cs and hashSync.cs.
- So we must be sure that HASH_THREAD will not use CriticalSection(s)
- after deleting CriticalSection here.
-
- we call ReleaseStream(p)
- that calls StopWriting(btSync)
- that calls StopWriting(hashSync), if it's required to stop HASH_THREAD.
- after StopWriting() it's safe to destruct MtSync(s) in any order */
-
- MatchFinderMt_ReleaseStream(p);
-
- MtSync_Destruct(&p->btSync);
- MtSync_Destruct(&p->hashSync);
-
- LOG_ITER(
- printf("\nTree %9d * %7d iter = %9d = sum : bytes = %9d\n",
- (UInt32)(g_NumIters_Tree / 1000),
- (UInt32)(((UInt64)g_NumIters_Loop * 1000) / (g_NumIters_Tree + 1)),
- (UInt32)(g_NumIters_Loop / 1000),
- (UInt32)(g_NumIters_Bytes / 1000)
- ));
-
- MatchFinderMt_FreeMem(p, alloc);
-}
-
-
-#define kHashBufferSize (kMtHashBlockSize * kMtHashNumBlocks)
-#define kBtBufferSize (kMtBtBlockSize * kMtBtNumBlocks)
-
-
-static THREAD_FUNC_DECL HashThreadFunc2(void *p) { HashThreadFunc((CMatchFinderMt *)p); return 0; }
-static THREAD_FUNC_DECL BtThreadFunc2(void *p)
-{
- Byte allocaDummy[0x180];
- unsigned i = 0;
- for (i = 0; i < 16; i++)
- allocaDummy[i] = (Byte)0;
- if (allocaDummy[0] == 0)
- BtThreadFunc((CMatchFinderMt *)p);
- return 0;
-}
-
-
-SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
- UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc)
-{
- CMatchFinder *mf = MF(p);
- p->historySize = historySize;
- if (kMtBtBlockSize <= matchMaxLen * 4)
- return SZ_ERROR_PARAM;
- if (!p->hashBuf)
- {
- p->hashBuf = (UInt32 *)ISzAlloc_Alloc(alloc, ((size_t)kHashBufferSize + (size_t)kBtBufferSize) * sizeof(UInt32));
- if (!p->hashBuf)
- return SZ_ERROR_MEM;
- p->btBuf = p->hashBuf + kHashBufferSize;
- }
- keepAddBufferBefore += (kHashBufferSize + kBtBufferSize);
- keepAddBufferAfter += kMtHashBlockSize;
- if (!MatchFinder_Create(mf, historySize, keepAddBufferBefore, matchMaxLen, keepAddBufferAfter, alloc))
- return SZ_ERROR_MEM;
-
- RINOK(MtSync_Create(&p->hashSync, HashThreadFunc2, p))
- RINOK(MtSync_Create(&p->btSync, BtThreadFunc2, p))
- return SZ_OK;
-}
-
-
-SRes MatchFinderMt_InitMt(CMatchFinderMt *p)
-{
- RINOK(MtSync_Init(&p->hashSync, kMtHashNumBlocks))
- return MtSync_Init(&p->btSync, kMtBtNumBlocks);
-}
-
-
-static void MatchFinderMt_Init(CMatchFinderMt *p)
-{
- CMatchFinder *mf = MF(p);
-
- p->btBufPos =
- p->btBufPosLimit = NULL;
- p->hashBufPos =
- p->hashBufPosLimit = 0;
- p->hashNumAvail = 0; // 21.03
-
- p->failure_BT = False;
-
- /* Init without data reading. We don't want to read data in this thread */
- MatchFinder_Init_4(mf);
-
- MatchFinder_Init_LowHash(mf);
-
- p->pointerToCurPos = Inline_MatchFinder_GetPointerToCurrentPos(mf);
- p->btNumAvailBytes = 0;
- p->failure_LZ_BT = False;
- // p->failure_LZ_LZ = False;
-
- p->lzPos =
- 1; // optimal smallest value
- // 0; // for debug: ignores match to start
- // kNormalizeAlign; // for debug
-
- p->hash = mf->hash;
- p->fixedHashSize = mf->fixedHashSize;
- // p->hash4Mask = mf->hash4Mask;
- p->crc = mf->crc;
- // memcpy(p->crc, mf->crc, sizeof(mf->crc));
-
- p->son = mf->son;
- p->matchMaxLen = mf->matchMaxLen;
- p->numHashBytes = mf->numHashBytes;
-
- /* (mf->pos) and (mf->streamPos) were already initialized to 1 in MatchFinder_Init_4() */
- // mf->streamPos = mf->pos = 1; // optimal smallest value
- // 0; // for debug: ignores match to start
- // kNormalizeAlign; // for debug
-
- /* we must init (p->pos = mf->pos) for BT, because
- BT code needs (p->pos == delta_value_for_empty_hash_record == mf->pos) */
- p->pos = mf->pos; // do not change it
-
- p->cyclicBufferPos = (p->pos - CYC_TO_POS_OFFSET);
- p->cyclicBufferSize = mf->cyclicBufferSize;
- p->buffer = mf->buffer;
- p->cutValue = mf->cutValue;
- // p->son[0] = p->son[1] = 0; // unused: to init skipped record for speculated accesses.
-}
-
-
-/* ReleaseStream is required to finish multithreading */
-void MatchFinderMt_ReleaseStream(CMatchFinderMt *p)
-{
- // Sleep(1); // for debug
- MtSync_StopWriting(&p->btSync);
- // Sleep(200); // for debug
- /* p->MatchFinder->ReleaseStream(); */
-}
-
-
-Z7_NO_INLINE
-static UInt32 MatchFinderMt_GetNextBlock_Bt(CMatchFinderMt *p)
-{
- if (p->failure_LZ_BT)
- p->btBufPos = p->failureBuf;
- else
- {
- const UInt32 bi = MtSync_GetNextBlock(&p->btSync);
- const UInt32 *bt = p->btBuf + GET_BT_BLOCK_OFFSET(bi);
- {
- const UInt32 numItems = bt[0];
- p->btBufPosLimit = bt + numItems;
- p->btNumAvailBytes = bt[1];
- p->btBufPos = bt + 2;
- if (numItems < 2 || numItems > kMtBtBlockSize)
- {
- p->failureBuf[0] = 0;
- p->btBufPos = p->failureBuf;
- p->btBufPosLimit = p->failureBuf + 1;
- p->failure_LZ_BT = True;
- // p->btNumAvailBytes = 0;
- /* we don't want to decrease AvailBytes, that was load before.
- that can be unxepected for the code that have loaded anopther value before */
- }
- }
-
- if (p->lzPos >= (UInt32)kMtMaxValForNormalize - (UInt32)kMtBtBlockSize)
- {
- /* we don't check (lzPos) over exact avail bytes in (btBuf).
- (fixedHashSize) is small, so normalization is fast */
- const UInt32 subValue = (p->lzPos - p->historySize - 1); // & ~(UInt32)(kNormalizeAlign - 1);
- p->lzPos -= subValue;
- MatchFinder_Normalize3(subValue, p->hash, p->fixedHashSize);
- }
- }
- return p->btNumAvailBytes;
-}
-
-
-
-static const Byte * MatchFinderMt_GetPointerToCurrentPos(CMatchFinderMt *p)
-{
- return p->pointerToCurPos;
-}
-
-
-#define GET_NEXT_BLOCK_IF_REQUIRED if (p->btBufPos == p->btBufPosLimit) MatchFinderMt_GetNextBlock_Bt(p);
-
-
-static UInt32 MatchFinderMt_GetNumAvailableBytes(CMatchFinderMt *p)
-{
- if (p->btBufPos != p->btBufPosLimit)
- return p->btNumAvailBytes;
- return MatchFinderMt_GetNextBlock_Bt(p);
-}
-
-
-// #define CHECK_FAILURE_LZ(_match_, _pos_) if (_match_ >= _pos_) { p->failure_LZ_LZ = True; return d; }
-#define CHECK_FAILURE_LZ(_match_, _pos_)
-
-static UInt32 * MixMatches2(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
-{
- UInt32 h2, c2;
- UInt32 *hash = p->hash;
- const Byte *cur = p->pointerToCurPos;
- const UInt32 m = p->lzPos;
- MT_HASH2_CALC
-
- c2 = hash[h2];
- hash[h2] = m;
-
- if (c2 >= matchMinPos)
- {
- CHECK_FAILURE_LZ(c2, m)
- if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
- {
- *d++ = 2;
- *d++ = m - c2 - 1;
- }
- }
-
- return d;
-}
-
-static UInt32 * MixMatches3(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
-{
- UInt32 h2, h3, c2, c3;
- UInt32 *hash = p->hash;
- const Byte *cur = p->pointerToCurPos;
- const UInt32 m = p->lzPos;
- MT_HASH3_CALC
-
- c2 = hash[h2];
- c3 = (hash + kFix3HashSize)[h3];
-
- hash[h2] = m;
- (hash + kFix3HashSize)[h3] = m;
-
- if (c2 >= matchMinPos)
- {
- CHECK_FAILURE_LZ(c2, m)
- if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
- {
- d[1] = m - c2 - 1;
- if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
- {
- d[0] = 3;
- return d + 2;
- }
- d[0] = 2;
- d += 2;
- }
- }
-
- if (c3 >= matchMinPos)
- {
- CHECK_FAILURE_LZ(c3, m)
- if (cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
- {
- *d++ = 3;
- *d++ = m - c3 - 1;
- }
- }
-
- return d;
-}
-
-
-#define INCREASE_LZ_POS p->lzPos++; p->pointerToCurPos++;
-
-/*
-static
-UInt32* MatchFinderMt_GetMatches_Bt4(CMatchFinderMt *p, UInt32 *d)
-{
- const UInt32 *bt = p->btBufPos;
- const UInt32 len = *bt++;
- const UInt32 *btLim = bt + len;
- UInt32 matchMinPos;
- UInt32 avail = p->btNumAvailBytes - 1;
- p->btBufPos = btLim;
-
- {
- p->btNumAvailBytes = avail;
-
- #define BT_HASH_BYTES_MAX 5
-
- matchMinPos = p->lzPos;
-
- if (len != 0)
- matchMinPos -= bt[1];
- else if (avail < (BT_HASH_BYTES_MAX - 1) - 1)
- {
- INCREASE_LZ_POS
- return d;
- }
- else
- {
- const UInt32 hs = p->historySize;
- if (matchMinPos > hs)
- matchMinPos -= hs;
- else
- matchMinPos = 1;
- }
- }
-
- for (;;)
- {
-
- UInt32 h2, h3, c2, c3;
- UInt32 *hash = p->hash;
- const Byte *cur = p->pointerToCurPos;
- UInt32 m = p->lzPos;
- MT_HASH3_CALC
-
- c2 = hash[h2];
- c3 = (hash + kFix3HashSize)[h3];
-
- hash[h2] = m;
- (hash + kFix3HashSize)[h3] = m;
-
- if (c2 >= matchMinPos && cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
- {
- d[1] = m - c2 - 1;
- if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
- {
- d[0] = 3;
- d += 2;
- break;
- }
- // else
- {
- d[0] = 2;
- d += 2;
- }
- }
- if (c3 >= matchMinPos && cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
- {
- *d++ = 3;
- *d++ = m - c3 - 1;
- }
- break;
- }
-
- if (len != 0)
- {
- do
- {
- const UInt32 v0 = bt[0];
- const UInt32 v1 = bt[1];
- bt += 2;
- d[0] = v0;
- d[1] = v1;
- d += 2;
- }
- while (bt != btLim);
- }
- INCREASE_LZ_POS
- return d;
-}
-*/
-
-
-static UInt32 * MixMatches4(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
-{
- UInt32 h2, h3, /* h4, */ c2, c3 /* , c4 */;
- UInt32 *hash = p->hash;
- const Byte *cur = p->pointerToCurPos;
- const UInt32 m = p->lzPos;
- MT_HASH3_CALC
- // MT_HASH4_CALC
- c2 = hash[h2];
- c3 = (hash + kFix3HashSize)[h3];
- // c4 = (hash + kFix4HashSize)[h4];
-
- hash[h2] = m;
- (hash + kFix3HashSize)[h3] = m;
- // (hash + kFix4HashSize)[h4] = m;
-
- // #define BT5_USE_H2
- // #ifdef BT5_USE_H2
- if (c2 >= matchMinPos && cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
- {
- d[1] = m - c2 - 1;
- if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
- {
- // d[0] = (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 3] == cur[3]) ? 4 : 3;
- // return d + 2;
-
- if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 3] == cur[3])
- {
- d[0] = 4;
- return d + 2;
- }
- d[0] = 3;
- d += 2;
-
- #ifdef BT5_USE_H4
- if (c4 >= matchMinPos)
- if (
- cur[(ptrdiff_t)c4 - (ptrdiff_t)m] == cur[0] &&
- cur[(ptrdiff_t)c4 - (ptrdiff_t)m + 3] == cur[3]
- )
- {
- *d++ = 4;
- *d++ = m - c4 - 1;
- }
- #endif
- return d;
- }
- d[0] = 2;
- d += 2;
- }
- // #endif
-
- if (c3 >= matchMinPos && cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
- {
- d[1] = m - c3 - 1;
- if (cur[(ptrdiff_t)c3 - (ptrdiff_t)m + 3] == cur[3])
- {
- d[0] = 4;
- return d + 2;
- }
- d[0] = 3;
- d += 2;
- }
-
- #ifdef BT5_USE_H4
- if (c4 >= matchMinPos)
- if (
- cur[(ptrdiff_t)c4 - (ptrdiff_t)m] == cur[0] &&
- cur[(ptrdiff_t)c4 - (ptrdiff_t)m + 3] == cur[3]
- )
- {
- *d++ = 4;
- *d++ = m - c4 - 1;
- }
- #endif
-
- return d;
-}
-
-
-static UInt32 * MatchFinderMt2_GetMatches(CMatchFinderMt *p, UInt32 *d)
-{
- const UInt32 *bt = p->btBufPos;
- const UInt32 len = *bt++;
- const UInt32 *btLim = bt + len;
- p->btBufPos = btLim;
- p->btNumAvailBytes--;
- INCREASE_LZ_POS
- {
- while (bt != btLim)
- {
- const UInt32 v0 = bt[0];
- const UInt32 v1 = bt[1];
- bt += 2;
- d[0] = v0;
- d[1] = v1;
- d += 2;
- }
- }
- return d;
-}
-
-
-
-static UInt32 * MatchFinderMt_GetMatches(CMatchFinderMt *p, UInt32 *d)
-{
- const UInt32 *bt = p->btBufPos;
- UInt32 len = *bt++;
- const UInt32 avail = p->btNumAvailBytes - 1;
- p->btNumAvailBytes = avail;
- p->btBufPos = bt + len;
- if (len == 0)
- {
- #define BT_HASH_BYTES_MAX 5
- if (avail >= (BT_HASH_BYTES_MAX - 1) - 1)
- {
- UInt32 m = p->lzPos;
- if (m > p->historySize)
- m -= p->historySize;
- else
- m = 1;
- d = p->MixMatchesFunc(p, m, d);
- }
- }
- else
- {
- /*
- first match pair from BinTree: (match_len, match_dist),
- (match_len >= numHashBytes).
- MixMatchesFunc() inserts only hash matches that are nearer than (match_dist)
- */
- d = p->MixMatchesFunc(p, p->lzPos - bt[1], d);
- // if (d) // check for failure
- do
- {
- const UInt32 v0 = bt[0];
- const UInt32 v1 = bt[1];
- bt += 2;
- d[0] = v0;
- d[1] = v1;
- d += 2;
- }
- while (len -= 2);
- }
- INCREASE_LZ_POS
- return d;
-}
-
-#define SKIP_HEADER2_MT do { GET_NEXT_BLOCK_IF_REQUIRED
-#define SKIP_HEADER_MT(n) SKIP_HEADER2_MT if (p->btNumAvailBytes-- >= (n)) { const Byte *cur = p->pointerToCurPos; UInt32 *hash = p->hash;
-#define SKIP_FOOTER_MT } INCREASE_LZ_POS p->btBufPos += (size_t)*p->btBufPos + 1; } while (--num != 0);
-
-static void MatchFinderMt0_Skip(CMatchFinderMt *p, UInt32 num)
-{
- SKIP_HEADER2_MT { p->btNumAvailBytes--;
- SKIP_FOOTER_MT
-}
-
-static void MatchFinderMt2_Skip(CMatchFinderMt *p, UInt32 num)
-{
- SKIP_HEADER_MT(2)
- UInt32 h2;
- MT_HASH2_CALC
- hash[h2] = p->lzPos;
- SKIP_FOOTER_MT
-}
-
-static void MatchFinderMt3_Skip(CMatchFinderMt *p, UInt32 num)
-{
- SKIP_HEADER_MT(3)
- UInt32 h2, h3;
- MT_HASH3_CALC
- (hash + kFix3HashSize)[h3] =
- hash[ h2] =
- p->lzPos;
- SKIP_FOOTER_MT
-}
-
-/*
-// MatchFinderMt4_Skip() is similar to MatchFinderMt3_Skip().
-// The difference is that MatchFinderMt3_Skip() updates hash for last 3 bytes of stream.
-
-static void MatchFinderMt4_Skip(CMatchFinderMt *p, UInt32 num)
-{
- SKIP_HEADER_MT(4)
- UInt32 h2, h3; // h4
- MT_HASH3_CALC
- // MT_HASH4_CALC
- // (hash + kFix4HashSize)[h4] =
- (hash + kFix3HashSize)[h3] =
- hash[ h2] =
- p->lzPos;
- SKIP_FOOTER_MT
-}
-*/
-
-void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable)
-{
- vTable->Init = (Mf_Init_Func)MatchFinderMt_Init;
- vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinderMt_GetNumAvailableBytes;
- vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinderMt_GetPointerToCurrentPos;
- vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt_GetMatches;
-
- switch (MF(p)->numHashBytes)
- {
- case 2:
- p->GetHeadsFunc = GetHeads2;
- p->MixMatchesFunc = (Mf_Mix_Matches)NULL;
- vTable->Skip = (Mf_Skip_Func)MatchFinderMt0_Skip;
- vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt2_GetMatches;
- break;
- case 3:
- p->GetHeadsFunc = MF(p)->bigHash ? GetHeads3b : GetHeads3;
- p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches2;
- vTable->Skip = (Mf_Skip_Func)MatchFinderMt2_Skip;
- break;
- case 4:
- p->GetHeadsFunc = MF(p)->bigHash ? GetHeads4b : GetHeads4;
-
- // it's fast inline version of GetMatches()
- // vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt_GetMatches_Bt4;
-
- p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches3;
- vTable->Skip = (Mf_Skip_Func)MatchFinderMt3_Skip;
- break;
- default:
- p->GetHeadsFunc = MF(p)->bigHash ? GetHeads5b : GetHeads5;
- p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches4;
- vTable->Skip =
- (Mf_Skip_Func)MatchFinderMt3_Skip;
- // (Mf_Skip_Func)MatchFinderMt4_Skip;
- break;
- }
-}
-
-#undef RINOK_THREAD
-#undef PRF
-#undef MF
-#undef GetUi24hi_from32
-#undef LOCK_BUFFER
-#undef UNLOCK_BUFFER
diff --git a/3rdparty/7z/src/LzFindMt.h b/3rdparty/7z/src/LzFindMt.h
deleted file mode 100644
index 5002d0c642..0000000000
--- a/3rdparty/7z/src/LzFindMt.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* LzFindMt.h -- multithreaded Match finder for LZ algorithms
-2023-03-05 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZ_FIND_MT_H
-#define ZIP7_INC_LZ_FIND_MT_H
-
-#include "LzFind.h"
-#include "Threads.h"
-
-EXTERN_C_BEGIN
-
-typedef struct
-{
- UInt32 numProcessedBlocks;
- CThread thread;
- UInt64 affinity;
-
- BoolInt wasCreated;
- BoolInt needStart;
- BoolInt csWasInitialized;
- BoolInt csWasEntered;
-
- BoolInt exit;
- BoolInt stopWriting;
-
- CAutoResetEvent canStart;
- CAutoResetEvent wasStopped;
- CSemaphore freeSemaphore;
- CSemaphore filledSemaphore;
- CCriticalSection cs;
- // UInt32 numBlocks_Sent;
-} CMtSync;
-
-typedef UInt32 * (*Mf_Mix_Matches)(void *p, UInt32 matchMinPos, UInt32 *distances);
-
-/* kMtCacheLineDummy must be >= size_of_CPU_cache_line */
-#define kMtCacheLineDummy 128
-
-typedef void (*Mf_GetHeads)(const Byte *buffer, UInt32 pos,
- UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc);
-
-typedef struct
-{
- /* LZ */
- const Byte *pointerToCurPos;
- UInt32 *btBuf;
- const UInt32 *btBufPos;
- const UInt32 *btBufPosLimit;
- UInt32 lzPos;
- UInt32 btNumAvailBytes;
-
- UInt32 *hash;
- UInt32 fixedHashSize;
- // UInt32 hash4Mask;
- UInt32 historySize;
- const UInt32 *crc;
-
- Mf_Mix_Matches MixMatchesFunc;
- UInt32 failure_LZ_BT; // failure in BT transfered to LZ
- // UInt32 failure_LZ_LZ; // failure in LZ tables
- UInt32 failureBuf[1];
- // UInt32 crc[256];
-
- /* LZ + BT */
- CMtSync btSync;
- Byte btDummy[kMtCacheLineDummy];
-
- /* BT */
- UInt32 *hashBuf;
- UInt32 hashBufPos;
- UInt32 hashBufPosLimit;
- UInt32 hashNumAvail;
- UInt32 failure_BT;
-
-
- CLzRef *son;
- UInt32 matchMaxLen;
- UInt32 numHashBytes;
- UInt32 pos;
- const Byte *buffer;
- UInt32 cyclicBufferPos;
- UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
- UInt32 cutValue;
-
- /* BT + Hash */
- CMtSync hashSync;
- /* Byte hashDummy[kMtCacheLineDummy]; */
-
- /* Hash */
- Mf_GetHeads GetHeadsFunc;
- CMatchFinder *MatchFinder;
- // CMatchFinder MatchFinder;
-} CMatchFinderMt;
-
-// only for Mt part
-void MatchFinderMt_Construct(CMatchFinderMt *p);
-void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc);
-
-SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
- UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc);
-void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable);
-
-/* call MatchFinderMt_InitMt() before IMatchFinder::Init() */
-SRes MatchFinderMt_InitMt(CMatchFinderMt *p);
-void MatchFinderMt_ReleaseStream(CMatchFinderMt *p);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/LzFindOpt.c b/3rdparty/7z/src/LzFindOpt.c
deleted file mode 100644
index e590aac1ee..0000000000
--- a/3rdparty/7z/src/LzFindOpt.c
+++ /dev/null
@@ -1,578 +0,0 @@
-/* LzFindOpt.c -- multithreaded Match finder for LZ algorithms
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "CpuArch.h"
-#include "LzFind.h"
-
-// #include "LzFindMt.h"
-
-// #define LOG_ITERS
-
-// #define LOG_THREAD
-
-#ifdef LOG_THREAD
-#include
-#define PRF(x) x
-#else
-// #define PRF(x)
-#endif
-
-#ifdef LOG_ITERS
-#include
-UInt64 g_NumIters_Tree;
-UInt64 g_NumIters_Loop;
-UInt64 g_NumIters_Bytes;
-#define LOG_ITER(x) x
-#else
-#define LOG_ITER(x)
-#endif
-
-// ---------- BT THREAD ----------
-
-#define USE_SON_PREFETCH
-#define USE_LONG_MATCH_OPT
-
-#define kEmptyHashValue 0
-
-// #define CYC_TO_POS_OFFSET 0
-
-// #define CYC_TO_POS_OFFSET 1 // for debug
-
-/*
-Z7_NO_INLINE
-UInt32 * Z7_FASTCALL GetMatchesSpecN_1(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
- UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size, UInt32 *posRes)
-{
- do
- {
- UInt32 delta;
- if (hash == size)
- break;
- delta = *hash++;
-
- if (delta == 0 || delta > (UInt32)pos)
- return NULL;
-
- lenLimit++;
-
- if (delta == (UInt32)pos)
- {
- CLzRef *ptr1 = son + ((size_t)pos << 1) - CYC_TO_POS_OFFSET * 2;
- *d++ = 0;
- ptr1[0] = kEmptyHashValue;
- ptr1[1] = kEmptyHashValue;
- }
-else
-{
- UInt32 *_distances = ++d;
-
- CLzRef *ptr0 = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2 + 1;
- CLzRef *ptr1 = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
-
- const Byte *len0 = cur, *len1 = cur;
- UInt32 cutValue = _cutValue;
- const Byte *maxLen = cur + _maxLen;
-
- for (LOG_ITER(g_NumIters_Tree++);;)
- {
- LOG_ITER(g_NumIters_Loop++);
- {
- const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
- CLzRef *pair = son + ((size_t)(((ptrdiff_t)pos - CYC_TO_POS_OFFSET) + diff) << 1);
- const Byte *len = (len0 < len1 ? len0 : len1);
-
- #ifdef USE_SON_PREFETCH
- const UInt32 pair0 = *pair;
- #endif
-
- if (len[diff] == len[0])
- {
- if (++len != lenLimit && len[diff] == len[0])
- while (++len != lenLimit)
- {
- LOG_ITER(g_NumIters_Bytes++);
- if (len[diff] != len[0])
- break;
- }
- if (maxLen < len)
- {
- maxLen = len;
- *d++ = (UInt32)(len - cur);
- *d++ = delta - 1;
-
- if (len == lenLimit)
- {
- const UInt32 pair1 = pair[1];
- *ptr1 =
- #ifdef USE_SON_PREFETCH
- pair0;
- #else
- pair[0];
- #endif
- *ptr0 = pair1;
-
- _distances[-1] = (UInt32)(d - _distances);
-
- #ifdef USE_LONG_MATCH_OPT
-
- if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
- break;
-
- {
- for (;;)
- {
- hash++;
- pos++;
- cur++;
- lenLimit++;
- {
- CLzRef *ptr = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
- #if 0
- *(UInt64 *)(void *)ptr = ((const UInt64 *)(const void *)ptr)[diff];
- #else
- const UInt32 p0 = ptr[0 + (diff * 2)];
- const UInt32 p1 = ptr[1 + (diff * 2)];
- ptr[0] = p0;
- ptr[1] = p1;
- // ptr[0] = ptr[0 + (diff * 2)];
- // ptr[1] = ptr[1 + (diff * 2)];
- #endif
- }
- // PrintSon(son + 2, pos - 1);
- // printf("\npos = %x delta = %x\n", pos, delta);
- len++;
- *d++ = 2;
- *d++ = (UInt32)(len - cur);
- *d++ = delta - 1;
- if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
- break;
- }
- }
- #endif
-
- break;
- }
- }
- }
-
- {
- const UInt32 curMatch = (UInt32)pos - delta; // (UInt32)(pos + diff);
- if (len[diff] < len[0])
- {
- delta = pair[1];
- if (delta >= curMatch)
- return NULL;
- *ptr1 = curMatch;
- ptr1 = pair + 1;
- len1 = len;
- }
- else
- {
- delta = *pair;
- if (delta >= curMatch)
- return NULL;
- *ptr0 = curMatch;
- ptr0 = pair;
- len0 = len;
- }
-
- delta = (UInt32)pos - delta;
-
- if (--cutValue == 0 || delta >= pos)
- {
- *ptr0 = *ptr1 = kEmptyHashValue;
- _distances[-1] = (UInt32)(d - _distances);
- break;
- }
- }
- }
- } // for (tree iterations)
-}
- pos++;
- cur++;
- }
- while (d < limit);
- *posRes = (UInt32)pos;
- return d;
-}
-*/
-
-/* define cbs if you use 2 functions.
- GetMatchesSpecN_1() : (pos < _cyclicBufferSize)
- GetMatchesSpecN_2() : (pos >= _cyclicBufferSize)
-
- do not define cbs if you use 1 function:
- GetMatchesSpecN_2()
-*/
-
-// #define cbs _cyclicBufferSize
-
-/*
- we use size_t for (pos) and (_cyclicBufferPos_ instead of UInt32
- to eliminate "movsx" BUG in old MSVC x64 compiler.
-*/
-
-UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
- UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
- UInt32 *posRes);
-
-Z7_NO_INLINE
-UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
- UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
- UInt32 *posRes)
-{
- do // while (hash != size)
- {
- UInt32 delta;
-
- #ifndef cbs
- UInt32 cbs;
- #endif
-
- if (hash == size)
- break;
-
- delta = *hash++;
-
- if (delta == 0)
- return NULL;
-
- lenLimit++;
-
- #ifndef cbs
- cbs = _cyclicBufferSize;
- if ((UInt32)pos < cbs)
- {
- if (delta > (UInt32)pos)
- return NULL;
- cbs = (UInt32)pos;
- }
- #endif
-
- if (delta >= cbs)
- {
- CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
- *d++ = 0;
- ptr1[0] = kEmptyHashValue;
- ptr1[1] = kEmptyHashValue;
- }
-else
-{
- UInt32 *_distances = ++d;
-
- CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
- CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
-
- UInt32 cutValue = _cutValue;
- const Byte *len0 = cur, *len1 = cur;
- const Byte *maxLen = cur + _maxLen;
-
- // if (cutValue == 0) { *ptr0 = *ptr1 = kEmptyHashValue; } else
- for (LOG_ITER(g_NumIters_Tree++);;)
- {
- LOG_ITER(g_NumIters_Loop++);
- {
- // SPEC code
- CLzRef *pair = son + ((size_t)((ptrdiff_t)_cyclicBufferPos - (ptrdiff_t)delta
- + (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)
- ) << 1);
-
- const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
- const Byte *len = (len0 < len1 ? len0 : len1);
-
- #ifdef USE_SON_PREFETCH
- const UInt32 pair0 = *pair;
- #endif
-
- if (len[diff] == len[0])
- {
- if (++len != lenLimit && len[diff] == len[0])
- while (++len != lenLimit)
- {
- LOG_ITER(g_NumIters_Bytes++);
- if (len[diff] != len[0])
- break;
- }
- if (maxLen < len)
- {
- maxLen = len;
- *d++ = (UInt32)(len - cur);
- *d++ = delta - 1;
-
- if (len == lenLimit)
- {
- const UInt32 pair1 = pair[1];
- *ptr1 =
- #ifdef USE_SON_PREFETCH
- pair0;
- #else
- pair[0];
- #endif
- *ptr0 = pair1;
-
- _distances[-1] = (UInt32)(d - _distances);
-
- #ifdef USE_LONG_MATCH_OPT
-
- if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
- break;
-
- {
- for (;;)
- {
- *d++ = 2;
- *d++ = (UInt32)(lenLimit - cur);
- *d++ = delta - 1;
- cur++;
- lenLimit++;
- // SPEC
- _cyclicBufferPos++;
- {
- // SPEC code
- CLzRef *dest = son + ((size_t)(_cyclicBufferPos) << 1);
- const CLzRef *src = dest + ((diff
- + (ptrdiff_t)(UInt32)((_cyclicBufferPos < delta) ? cbs : 0)) << 1);
- // CLzRef *ptr = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
- #if 0
- *(UInt64 *)(void *)dest = *((const UInt64 *)(const void *)src);
- #else
- const UInt32 p0 = src[0];
- const UInt32 p1 = src[1];
- dest[0] = p0;
- dest[1] = p1;
- #endif
- }
- pos++;
- hash++;
- if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
- break;
- } // for() end for long matches
- }
- #endif
-
- break; // break from TREE iterations
- }
- }
- }
- {
- const UInt32 curMatch = (UInt32)pos - delta; // (UInt32)(pos + diff);
- if (len[diff] < len[0])
- {
- delta = pair[1];
- *ptr1 = curMatch;
- ptr1 = pair + 1;
- len1 = len;
- if (delta >= curMatch)
- return NULL;
- }
- else
- {
- delta = *pair;
- *ptr0 = curMatch;
- ptr0 = pair;
- len0 = len;
- if (delta >= curMatch)
- return NULL;
- }
- delta = (UInt32)pos - delta;
-
- if (--cutValue == 0 || delta >= cbs)
- {
- *ptr0 = *ptr1 = kEmptyHashValue;
- _distances[-1] = (UInt32)(d - _distances);
- break;
- }
- }
- }
- } // for (tree iterations)
-}
- pos++;
- _cyclicBufferPos++;
- cur++;
- }
- while (d < limit);
- *posRes = (UInt32)pos;
- return d;
-}
-
-
-
-/*
-typedef UInt32 uint32plus; // size_t
-
-UInt32 * Z7_FASTCALL GetMatchesSpecN_3(uint32plus lenLimit, size_t pos, const Byte *cur, CLzRef *son,
- UInt32 _cutValue, UInt32 *d, uint32plus _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
- size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
- UInt32 *posRes)
-{
- do // while (hash != size)
- {
- UInt32 delta;
-
- #ifndef cbs
- UInt32 cbs;
- #endif
-
- if (hash == size)
- break;
-
- delta = *hash++;
-
- if (delta == 0)
- return NULL;
-
- #ifndef cbs
- cbs = _cyclicBufferSize;
- if ((UInt32)pos < cbs)
- {
- if (delta > (UInt32)pos)
- return NULL;
- cbs = (UInt32)pos;
- }
- #endif
-
- if (delta >= cbs)
- {
- CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
- *d++ = 0;
- ptr1[0] = kEmptyHashValue;
- ptr1[1] = kEmptyHashValue;
- }
-else
-{
- CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
- CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
- UInt32 *_distances = ++d;
- uint32plus len0 = 0, len1 = 0;
- UInt32 cutValue = _cutValue;
- uint32plus maxLen = _maxLen;
- // lenLimit++; // const Byte *lenLimit = cur + _lenLimit;
-
- for (LOG_ITER(g_NumIters_Tree++);;)
- {
- LOG_ITER(g_NumIters_Loop++);
- {
- // const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
- CLzRef *pair = son + ((size_t)((ptrdiff_t)_cyclicBufferPos - delta
- + (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)
- ) << 1);
- const Byte *pb = cur - delta;
- uint32plus len = (len0 < len1 ? len0 : len1);
-
- #ifdef USE_SON_PREFETCH
- const UInt32 pair0 = *pair;
- #endif
-
- if (pb[len] == cur[len])
- {
- if (++len != lenLimit && pb[len] == cur[len])
- while (++len != lenLimit)
- if (pb[len] != cur[len])
- break;
- if (maxLen < len)
- {
- maxLen = len;
- *d++ = (UInt32)len;
- *d++ = delta - 1;
- if (len == lenLimit)
- {
- {
- const UInt32 pair1 = pair[1];
- *ptr0 = pair1;
- *ptr1 =
- #ifdef USE_SON_PREFETCH
- pair0;
- #else
- pair[0];
- #endif
- }
-
- _distances[-1] = (UInt32)(d - _distances);
-
- #ifdef USE_LONG_MATCH_OPT
-
- if (hash == size || *hash != delta || pb[lenLimit] != cur[lenLimit] || d >= limit)
- break;
-
- {
- const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
- for (;;)
- {
- *d++ = 2;
- *d++ = (UInt32)lenLimit;
- *d++ = delta - 1;
- _cyclicBufferPos++;
- {
- CLzRef *dest = son + ((size_t)_cyclicBufferPos << 1);
- const CLzRef *src = dest + ((diff +
- (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)) << 1);
- #if 0
- *(UInt64 *)(void *)dest = *((const UInt64 *)(const void *)src);
- #else
- const UInt32 p0 = src[0];
- const UInt32 p1 = src[1];
- dest[0] = p0;
- dest[1] = p1;
- #endif
- }
- hash++;
- pos++;
- cur++;
- pb++;
- if (hash == size || *hash != delta || pb[lenLimit] != cur[lenLimit] || d >= limit)
- break;
- }
- }
- #endif
-
- break;
- }
- }
- }
- {
- const UInt32 curMatch = (UInt32)pos - delta;
- if (pb[len] < cur[len])
- {
- delta = pair[1];
- *ptr1 = curMatch;
- ptr1 = pair + 1;
- len1 = len;
- }
- else
- {
- delta = *pair;
- *ptr0 = curMatch;
- ptr0 = pair;
- len0 = len;
- }
-
- {
- if (delta >= curMatch)
- return NULL;
- delta = (UInt32)pos - delta;
- if (delta >= cbs
- // delta >= _cyclicBufferSize || delta >= pos
- || --cutValue == 0)
- {
- *ptr0 = *ptr1 = kEmptyHashValue;
- _distances[-1] = (UInt32)(d - _distances);
- break;
- }
- }
- }
- }
- } // for (tree iterations)
-}
- pos++;
- _cyclicBufferPos++;
- cur++;
- }
- while (d < limit);
- *posRes = (UInt32)pos;
- return d;
-}
-*/
diff --git a/3rdparty/7z/src/LzHash.h b/3rdparty/7z/src/LzHash.h
deleted file mode 100644
index 3b172ebc67..0000000000
--- a/3rdparty/7z/src/LzHash.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* LzHash.h -- HASH constants for LZ algorithms
-2023-03-05 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZ_HASH_H
-#define ZIP7_INC_LZ_HASH_H
-
-/*
- (kHash2Size >= (1 << 8)) : Required
- (kHash3Size >= (1 << 16)) : Required
-*/
-
-#define kHash2Size (1 << 10)
-#define kHash3Size (1 << 16)
-// #define kHash4Size (1 << 20)
-
-#define kFix3HashSize (kHash2Size)
-#define kFix4HashSize (kHash2Size + kHash3Size)
-// #define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
-
-/*
- We use up to 3 crc values for hash:
- crc0
- crc1 << Shift_1
- crc2 << Shift_2
- (Shift_1 = 5) and (Shift_2 = 10) is good tradeoff.
- Small values for Shift are not good for collision rate.
- Big value for Shift_2 increases the minimum size
- of hash table, that will be slow for small files.
-*/
-
-#define kLzHash_CrcShift_1 5
-#define kLzHash_CrcShift_2 10
-
-#endif
diff --git a/3rdparty/7z/src/Lzma2Dec.c b/3rdparty/7z/src/Lzma2Dec.c
deleted file mode 100644
index 54c87a6a26..0000000000
--- a/3rdparty/7z/src/Lzma2Dec.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/* Lzma2Dec.c -- LZMA2 Decoder
-2023-03-03 : Igor Pavlov : Public domain */
-
-/* #define SHOW_DEBUG_INFO */
-
-#include "Precomp.h"
-
-#ifdef SHOW_DEBUG_INFO
-#include
-#endif
-
-#include
-
-#include "Lzma2Dec.h"
-
-/*
-00000000 - End of data
-00000001 U U - Uncompressed, reset dic, need reset state and set new prop
-00000010 U U - Uncompressed, no reset
-100uuuuu U U P P - LZMA, no reset
-101uuuuu U U P P - LZMA, reset state
-110uuuuu U U P P S - LZMA, reset state + set new prop
-111uuuuu U U P P S - LZMA, reset state + set new prop, reset dic
-
- u, U - Unpack Size
- P - Pack Size
- S - Props
-*/
-
-#define LZMA2_CONTROL_COPY_RESET_DIC 1
-
-#define LZMA2_IS_UNCOMPRESSED_STATE(p) (((p)->control & (1 << 7)) == 0)
-
-#define LZMA2_LCLP_MAX 4
-#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11))
-
-#ifdef SHOW_DEBUG_INFO
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-
-typedef enum
-{
- LZMA2_STATE_CONTROL,
- LZMA2_STATE_UNPACK0,
- LZMA2_STATE_UNPACK1,
- LZMA2_STATE_PACK0,
- LZMA2_STATE_PACK1,
- LZMA2_STATE_PROP,
- LZMA2_STATE_DATA,
- LZMA2_STATE_DATA_CONT,
- LZMA2_STATE_FINISHED,
- LZMA2_STATE_ERROR
-} ELzma2State;
-
-static SRes Lzma2Dec_GetOldProps(Byte prop, Byte *props)
-{
- UInt32 dicSize;
- if (prop > 40)
- return SZ_ERROR_UNSUPPORTED;
- dicSize = (prop == 40) ? 0xFFFFFFFF : LZMA2_DIC_SIZE_FROM_PROP(prop);
- props[0] = (Byte)LZMA2_LCLP_MAX;
- props[1] = (Byte)(dicSize);
- props[2] = (Byte)(dicSize >> 8);
- props[3] = (Byte)(dicSize >> 16);
- props[4] = (Byte)(dicSize >> 24);
- return SZ_OK;
-}
-
-SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc)
-{
- Byte props[LZMA_PROPS_SIZE];
- RINOK(Lzma2Dec_GetOldProps(prop, props))
- return LzmaDec_AllocateProbs(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
-}
-
-SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc)
-{
- Byte props[LZMA_PROPS_SIZE];
- RINOK(Lzma2Dec_GetOldProps(prop, props))
- return LzmaDec_Allocate(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
-}
-
-void Lzma2Dec_Init(CLzma2Dec *p)
-{
- p->state = LZMA2_STATE_CONTROL;
- p->needInitLevel = 0xE0;
- p->isExtraMode = False;
- p->unpackSize = 0;
-
- // p->decoder.dicPos = 0; // we can use it instead of full init
- LzmaDec_Init(&p->decoder);
-}
-
-// ELzma2State
-static unsigned Lzma2Dec_UpdateState(CLzma2Dec *p, Byte b)
-{
- switch (p->state)
- {
- case LZMA2_STATE_CONTROL:
- p->isExtraMode = False;
- p->control = b;
- PRF(printf("\n %8X", (unsigned)p->decoder.dicPos));
- PRF(printf(" %02X", (unsigned)b));
- if (b == 0)
- return LZMA2_STATE_FINISHED;
- if (LZMA2_IS_UNCOMPRESSED_STATE(p))
- {
- if (b == LZMA2_CONTROL_COPY_RESET_DIC)
- p->needInitLevel = 0xC0;
- else if (b > 2 || p->needInitLevel == 0xE0)
- return LZMA2_STATE_ERROR;
- }
- else
- {
- if (b < p->needInitLevel)
- return LZMA2_STATE_ERROR;
- p->needInitLevel = 0;
- p->unpackSize = (UInt32)(b & 0x1F) << 16;
- }
- return LZMA2_STATE_UNPACK0;
-
- case LZMA2_STATE_UNPACK0:
- p->unpackSize |= (UInt32)b << 8;
- return LZMA2_STATE_UNPACK1;
-
- case LZMA2_STATE_UNPACK1:
- p->unpackSize |= (UInt32)b;
- p->unpackSize++;
- PRF(printf(" %7u", (unsigned)p->unpackSize));
- return LZMA2_IS_UNCOMPRESSED_STATE(p) ? LZMA2_STATE_DATA : LZMA2_STATE_PACK0;
-
- case LZMA2_STATE_PACK0:
- p->packSize = (UInt32)b << 8;
- return LZMA2_STATE_PACK1;
-
- case LZMA2_STATE_PACK1:
- p->packSize |= (UInt32)b;
- p->packSize++;
- // if (p->packSize < 5) return LZMA2_STATE_ERROR;
- PRF(printf(" %5u", (unsigned)p->packSize));
- return (p->control & 0x40) ? LZMA2_STATE_PROP : LZMA2_STATE_DATA;
-
- case LZMA2_STATE_PROP:
- {
- unsigned lc, lp;
- if (b >= (9 * 5 * 5))
- return LZMA2_STATE_ERROR;
- lc = b % 9;
- b /= 9;
- p->decoder.prop.pb = (Byte)(b / 5);
- lp = b % 5;
- if (lc + lp > LZMA2_LCLP_MAX)
- return LZMA2_STATE_ERROR;
- p->decoder.prop.lc = (Byte)lc;
- p->decoder.prop.lp = (Byte)lp;
- return LZMA2_STATE_DATA;
- }
- }
- return LZMA2_STATE_ERROR;
-}
-
-static void LzmaDec_UpdateWithUncompressed(CLzmaDec *p, const Byte *src, SizeT size)
-{
- memcpy(p->dic + p->dicPos, src, size);
- p->dicPos += size;
- if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= size)
- p->checkDicSize = p->prop.dicSize;
- p->processedPos += (UInt32)size;
-}
-
-void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState);
-
-
-SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit,
- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
-{
- SizeT inSize = *srcLen;
- *srcLen = 0;
- *status = LZMA_STATUS_NOT_SPECIFIED;
-
- while (p->state != LZMA2_STATE_ERROR)
- {
- SizeT dicPos;
-
- if (p->state == LZMA2_STATE_FINISHED)
- {
- *status = LZMA_STATUS_FINISHED_WITH_MARK;
- return SZ_OK;
- }
-
- dicPos = p->decoder.dicPos;
-
- if (dicPos == dicLimit && finishMode == LZMA_FINISH_ANY)
- {
- *status = LZMA_STATUS_NOT_FINISHED;
- return SZ_OK;
- }
-
- if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT)
- {
- if (*srcLen == inSize)
- {
- *status = LZMA_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
- (*srcLen)++;
- p->state = Lzma2Dec_UpdateState(p, *src++);
- if (dicPos == dicLimit && p->state != LZMA2_STATE_FINISHED)
- break;
- continue;
- }
-
- {
- SizeT inCur = inSize - *srcLen;
- SizeT outCur = dicLimit - dicPos;
- ELzmaFinishMode curFinishMode = LZMA_FINISH_ANY;
-
- if (outCur >= p->unpackSize)
- {
- outCur = (SizeT)p->unpackSize;
- curFinishMode = LZMA_FINISH_END;
- }
-
- if (LZMA2_IS_UNCOMPRESSED_STATE(p))
- {
- if (inCur == 0)
- {
- *status = LZMA_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
-
- if (p->state == LZMA2_STATE_DATA)
- {
- BoolInt initDic = (p->control == LZMA2_CONTROL_COPY_RESET_DIC);
- LzmaDec_InitDicAndState(&p->decoder, initDic, False);
- }
-
- if (inCur > outCur)
- inCur = outCur;
- if (inCur == 0)
- break;
-
- LzmaDec_UpdateWithUncompressed(&p->decoder, src, inCur);
-
- src += inCur;
- *srcLen += inCur;
- p->unpackSize -= (UInt32)inCur;
- p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT;
- }
- else
- {
- SRes res;
-
- if (p->state == LZMA2_STATE_DATA)
- {
- BoolInt initDic = (p->control >= 0xE0);
- BoolInt initState = (p->control >= 0xA0);
- LzmaDec_InitDicAndState(&p->decoder, initDic, initState);
- p->state = LZMA2_STATE_DATA_CONT;
- }
-
- if (inCur > p->packSize)
- inCur = (SizeT)p->packSize;
-
- res = LzmaDec_DecodeToDic(&p->decoder, dicPos + outCur, src, &inCur, curFinishMode, status);
-
- src += inCur;
- *srcLen += inCur;
- p->packSize -= (UInt32)inCur;
- outCur = p->decoder.dicPos - dicPos;
- p->unpackSize -= (UInt32)outCur;
-
- if (res != 0)
- break;
-
- if (*status == LZMA_STATUS_NEEDS_MORE_INPUT)
- {
- if (p->packSize == 0)
- break;
- return SZ_OK;
- }
-
- if (inCur == 0 && outCur == 0)
- {
- if (*status != LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
- || p->unpackSize != 0
- || p->packSize != 0)
- break;
- p->state = LZMA2_STATE_CONTROL;
- }
-
- *status = LZMA_STATUS_NOT_SPECIFIED;
- }
- }
- }
-
- *status = LZMA_STATUS_NOT_SPECIFIED;
- p->state = LZMA2_STATE_ERROR;
- return SZ_ERROR_DATA;
-}
-
-
-
-
-ELzma2ParseStatus Lzma2Dec_Parse(CLzma2Dec *p,
- SizeT outSize,
- const Byte *src, SizeT *srcLen,
- int checkFinishBlock)
-{
- SizeT inSize = *srcLen;
- *srcLen = 0;
-
- while (p->state != LZMA2_STATE_ERROR)
- {
- if (p->state == LZMA2_STATE_FINISHED)
- return (ELzma2ParseStatus)LZMA_STATUS_FINISHED_WITH_MARK;
-
- if (outSize == 0 && !checkFinishBlock)
- return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED;
-
- if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT)
- {
- if (*srcLen == inSize)
- return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT;
- (*srcLen)++;
-
- p->state = Lzma2Dec_UpdateState(p, *src++);
-
- if (p->state == LZMA2_STATE_UNPACK0)
- {
- // if (p->decoder.dicPos != 0)
- if (p->control == LZMA2_CONTROL_COPY_RESET_DIC || p->control >= 0xE0)
- return LZMA2_PARSE_STATUS_NEW_BLOCK;
- // if (outSize == 0) return LZMA_STATUS_NOT_FINISHED;
- }
-
- // The following code can be commented.
- // It's not big problem, if we read additional input bytes.
- // It will be stopped later in LZMA2_STATE_DATA / LZMA2_STATE_DATA_CONT state.
-
- if (outSize == 0 && p->state != LZMA2_STATE_FINISHED)
- {
- // checkFinishBlock is true. So we expect that block must be finished,
- // We can return LZMA_STATUS_NOT_SPECIFIED or LZMA_STATUS_NOT_FINISHED here
- // break;
- return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED;
- }
-
- if (p->state == LZMA2_STATE_DATA)
- return LZMA2_PARSE_STATUS_NEW_CHUNK;
-
- continue;
- }
-
- if (outSize == 0)
- return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED;
-
- {
- SizeT inCur = inSize - *srcLen;
-
- if (LZMA2_IS_UNCOMPRESSED_STATE(p))
- {
- if (inCur == 0)
- return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT;
- if (inCur > p->unpackSize)
- inCur = p->unpackSize;
- if (inCur > outSize)
- inCur = outSize;
- p->decoder.dicPos += inCur;
- src += inCur;
- *srcLen += inCur;
- outSize -= inCur;
- p->unpackSize -= (UInt32)inCur;
- p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT;
- }
- else
- {
- p->isExtraMode = True;
-
- if (inCur == 0)
- {
- if (p->packSize != 0)
- return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT;
- }
- else if (p->state == LZMA2_STATE_DATA)
- {
- p->state = LZMA2_STATE_DATA_CONT;
- if (*src != 0)
- {
- // first byte of lzma chunk must be Zero
- *srcLen += 1;
- p->packSize--;
- break;
- }
- }
-
- if (inCur > p->packSize)
- inCur = (SizeT)p->packSize;
-
- src += inCur;
- *srcLen += inCur;
- p->packSize -= (UInt32)inCur;
-
- if (p->packSize == 0)
- {
- SizeT rem = outSize;
- if (rem > p->unpackSize)
- rem = p->unpackSize;
- p->decoder.dicPos += rem;
- p->unpackSize -= (UInt32)rem;
- outSize -= rem;
- if (p->unpackSize == 0)
- p->state = LZMA2_STATE_CONTROL;
- }
- }
- }
- }
-
- p->state = LZMA2_STATE_ERROR;
- return (ELzma2ParseStatus)LZMA_STATUS_NOT_SPECIFIED;
-}
-
-
-
-
-SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
-{
- SizeT outSize = *destLen, inSize = *srcLen;
- *srcLen = *destLen = 0;
-
- for (;;)
- {
- SizeT inCur = inSize, outCur, dicPos;
- ELzmaFinishMode curFinishMode;
- SRes res;
-
- if (p->decoder.dicPos == p->decoder.dicBufSize)
- p->decoder.dicPos = 0;
- dicPos = p->decoder.dicPos;
- curFinishMode = LZMA_FINISH_ANY;
- outCur = p->decoder.dicBufSize - dicPos;
-
- if (outCur >= outSize)
- {
- outCur = outSize;
- curFinishMode = finishMode;
- }
-
- res = Lzma2Dec_DecodeToDic(p, dicPos + outCur, src, &inCur, curFinishMode, status);
-
- src += inCur;
- inSize -= inCur;
- *srcLen += inCur;
- outCur = p->decoder.dicPos - dicPos;
- memcpy(dest, p->decoder.dic + dicPos, outCur);
- dest += outCur;
- outSize -= outCur;
- *destLen += outCur;
- if (res != 0)
- return res;
- if (outCur == 0 || outSize == 0)
- return SZ_OK;
- }
-}
-
-
-SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAllocPtr alloc)
-{
- CLzma2Dec p;
- SRes res;
- SizeT outSize = *destLen, inSize = *srcLen;
- *destLen = *srcLen = 0;
- *status = LZMA_STATUS_NOT_SPECIFIED;
- Lzma2Dec_CONSTRUCT(&p)
- RINOK(Lzma2Dec_AllocateProbs(&p, prop, alloc))
- p.decoder.dic = dest;
- p.decoder.dicBufSize = outSize;
- Lzma2Dec_Init(&p);
- *srcLen = inSize;
- res = Lzma2Dec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
- *destLen = p.decoder.dicPos;
- if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
- res = SZ_ERROR_INPUT_EOF;
- Lzma2Dec_FreeProbs(&p, alloc);
- return res;
-}
-
-#undef PRF
diff --git a/3rdparty/7z/src/Lzma2Dec.h b/3rdparty/7z/src/Lzma2Dec.h
deleted file mode 100644
index 9b15ab04ad..0000000000
--- a/3rdparty/7z/src/Lzma2Dec.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* Lzma2Dec.h -- LZMA2 Decoder
-2023-03-03 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA2_DEC_H
-#define ZIP7_INC_LZMA2_DEC_H
-
-#include "LzmaDec.h"
-
-EXTERN_C_BEGIN
-
-/* ---------- State Interface ---------- */
-
-typedef struct
-{
- unsigned state;
- Byte control;
- Byte needInitLevel;
- Byte isExtraMode;
- Byte _pad_;
- UInt32 packSize;
- UInt32 unpackSize;
- CLzmaDec decoder;
-} CLzma2Dec;
-
-#define Lzma2Dec_CONSTRUCT(p) LzmaDec_CONSTRUCT(&(p)->decoder)
-#define Lzma2Dec_Construct(p) Lzma2Dec_CONSTRUCT(p)
-#define Lzma2Dec_FreeProbs(p, alloc) LzmaDec_FreeProbs(&(p)->decoder, alloc)
-#define Lzma2Dec_Free(p, alloc) LzmaDec_Free(&(p)->decoder, alloc)
-
-SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc);
-SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc);
-void Lzma2Dec_Init(CLzma2Dec *p);
-
-/*
-finishMode:
- It has meaning only if the decoding reaches output limit (*destLen or dicLimit).
- LZMA_FINISH_ANY - use smallest number of input bytes
- LZMA_FINISH_END - read EndOfStream marker after decoding
-
-Returns:
- SZ_OK
- status:
- LZMA_STATUS_FINISHED_WITH_MARK
- LZMA_STATUS_NOT_FINISHED
- LZMA_STATUS_NEEDS_MORE_INPUT
- SZ_ERROR_DATA - Data error
-*/
-
-SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit,
- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
-
-SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
-
-
-/* ---------- LZMA2 block and chunk parsing ---------- */
-
-/*
-Lzma2Dec_Parse() parses compressed data stream up to next independent block or next chunk data.
-It can return LZMA_STATUS_* code or LZMA2_PARSE_STATUS_* code:
- - LZMA2_PARSE_STATUS_NEW_BLOCK - there is new block, and 1 additional byte (control byte of next block header) was read from input.
- - LZMA2_PARSE_STATUS_NEW_CHUNK - there is new chunk, and only lzma2 header of new chunk was read.
- CLzma2Dec::unpackSize contains unpack size of that chunk
-*/
-
-typedef enum
-{
-/*
- LZMA_STATUS_NOT_SPECIFIED // data error
- LZMA_STATUS_FINISHED_WITH_MARK
- LZMA_STATUS_NOT_FINISHED //
- LZMA_STATUS_NEEDS_MORE_INPUT
- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK // unused
-*/
- LZMA2_PARSE_STATUS_NEW_BLOCK = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK + 1,
- LZMA2_PARSE_STATUS_NEW_CHUNK
-} ELzma2ParseStatus;
-
-ELzma2ParseStatus Lzma2Dec_Parse(CLzma2Dec *p,
- SizeT outSize, // output size
- const Byte *src, SizeT *srcLen,
- int checkFinishBlock // set (checkFinishBlock = 1), if it must read full input data, if decoder.dicPos reaches blockMax position.
- );
-
-/*
-LZMA2 parser doesn't decode LZMA chunks, so we must read
- full input LZMA chunk to decode some part of LZMA chunk.
-
-Lzma2Dec_GetUnpackExtra() returns the value that shows
- max possible number of output bytes that can be output by decoder
- at current input positon.
-*/
-
-#define Lzma2Dec_GetUnpackExtra(p) ((p)->isExtraMode ? (p)->unpackSize : 0)
-
-
-/* ---------- One Call Interface ---------- */
-
-/*
-finishMode:
- It has meaning only if the decoding reaches output limit (*destLen).
- LZMA_FINISH_ANY - use smallest number of input bytes
- LZMA_FINISH_END - read EndOfStream marker after decoding
-
-Returns:
- SZ_OK
- status:
- LZMA_STATUS_FINISHED_WITH_MARK
- LZMA_STATUS_NOT_FINISHED
- SZ_ERROR_DATA - Data error
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_UNSUPPORTED - Unsupported properties
- SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
-*/
-
-SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAllocPtr alloc);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Lzma2DecMt.c b/3rdparty/7z/src/Lzma2DecMt.c
deleted file mode 100644
index e1038a1257..0000000000
--- a/3rdparty/7z/src/Lzma2DecMt.c
+++ /dev/null
@@ -1,1095 +0,0 @@
-/* Lzma2DecMt.c -- LZMA2 Decoder Multi-thread
-2023-04-13 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-// #define SHOW_DEBUG_INFO
-// #define Z7_ST
-
-#ifdef SHOW_DEBUG_INFO
-#include
-#endif
-
-#include "Alloc.h"
-
-#include "Lzma2Dec.h"
-#include "Lzma2DecMt.h"
-
-#ifndef Z7_ST
-#include "MtDec.h"
-
-#define LZMA2DECMT_OUT_BLOCK_MAX_DEFAULT (1 << 28)
-#endif
-
-
-#ifndef Z7_ST
-#ifdef SHOW_DEBUG_INFO
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-#define PRF_STR(s) PRF(printf("\n" s "\n");)
-#define PRF_STR_INT_2(s, d1, d2) PRF(printf("\n" s " %d %d\n", (unsigned)d1, (unsigned)d2);)
-#endif
-
-
-void Lzma2DecMtProps_Init(CLzma2DecMtProps *p)
-{
- p->inBufSize_ST = 1 << 20;
- p->outStep_ST = 1 << 20;
-
- #ifndef Z7_ST
- p->numThreads = 1;
- p->inBufSize_MT = 1 << 18;
- p->outBlockMax = LZMA2DECMT_OUT_BLOCK_MAX_DEFAULT;
- p->inBlockMax = p->outBlockMax + p->outBlockMax / 16;
- #endif
-}
-
-
-
-#ifndef Z7_ST
-
-/* ---------- CLzma2DecMtThread ---------- */
-
-typedef struct
-{
- CLzma2Dec dec;
- Byte dec_created;
- Byte needInit;
-
- Byte *outBuf;
- size_t outBufSize;
-
- EMtDecParseState state;
- ELzma2ParseStatus parseStatus;
-
- size_t inPreSize;
- size_t outPreSize;
-
- size_t inCodeSize;
- size_t outCodeSize;
- SRes codeRes;
-
- CAlignOffsetAlloc alloc;
-
- Byte mtPad[1 << 7];
-} CLzma2DecMtThread;
-
-#endif
-
-
-/* ---------- CLzma2DecMt ---------- */
-
-struct CLzma2DecMt
-{
- // ISzAllocPtr alloc;
- ISzAllocPtr allocMid;
-
- CAlignOffsetAlloc alignOffsetAlloc;
- CLzma2DecMtProps props;
- Byte prop;
-
- ISeqInStreamPtr inStream;
- ISeqOutStreamPtr outStream;
- ICompressProgressPtr progress;
-
- BoolInt finishMode;
- BoolInt outSize_Defined;
- UInt64 outSize;
-
- UInt64 outProcessed;
- UInt64 inProcessed;
- BoolInt readWasFinished;
- SRes readRes;
-
- Byte *inBuf;
- size_t inBufSize;
- Byte dec_created;
- CLzma2Dec dec;
-
- size_t inPos;
- size_t inLim;
-
- #ifndef Z7_ST
- UInt64 outProcessed_Parse;
- BoolInt mtc_WasConstructed;
- CMtDec mtc;
- CLzma2DecMtThread coders[MTDEC_THREADS_MAX];
- #endif
-};
-
-
-
-CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid)
-{
- CLzma2DecMt *p = (CLzma2DecMt *)ISzAlloc_Alloc(alloc, sizeof(CLzma2DecMt));
- if (!p)
- return NULL;
-
- // p->alloc = alloc;
- p->allocMid = allocMid;
-
- AlignOffsetAlloc_CreateVTable(&p->alignOffsetAlloc);
- p->alignOffsetAlloc.numAlignBits = 7;
- p->alignOffsetAlloc.offset = 0;
- p->alignOffsetAlloc.baseAlloc = alloc;
-
- p->inBuf = NULL;
- p->inBufSize = 0;
- p->dec_created = False;
-
- // Lzma2DecMtProps_Init(&p->props);
-
- #ifndef Z7_ST
- p->mtc_WasConstructed = False;
- {
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CLzma2DecMtThread *t = &p->coders[i];
- t->dec_created = False;
- t->outBuf = NULL;
- t->outBufSize = 0;
- }
- }
- #endif
-
- return (CLzma2DecMtHandle)(void *)p;
-}
-
-
-#ifndef Z7_ST
-
-static void Lzma2DecMt_FreeOutBufs(CLzma2DecMt *p)
-{
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CLzma2DecMtThread *t = &p->coders[i];
- if (t->outBuf)
- {
- ISzAlloc_Free(p->allocMid, t->outBuf);
- t->outBuf = NULL;
- t->outBufSize = 0;
- }
- }
-}
-
-#endif
-
-
-static void Lzma2DecMt_FreeSt(CLzma2DecMt *p)
-{
- if (p->dec_created)
- {
- Lzma2Dec_Free(&p->dec, &p->alignOffsetAlloc.vt);
- p->dec_created = False;
- }
- if (p->inBuf)
- {
- ISzAlloc_Free(p->allocMid, p->inBuf);
- p->inBuf = NULL;
- }
- p->inBufSize = 0;
-}
-
-
-// #define GET_CLzma2DecMt_p CLzma2DecMt *p = (CLzma2DecMt *)(void *)pp;
-
-void Lzma2DecMt_Destroy(CLzma2DecMtHandle p)
-{
- // GET_CLzma2DecMt_p
-
- Lzma2DecMt_FreeSt(p);
-
- #ifndef Z7_ST
-
- if (p->mtc_WasConstructed)
- {
- MtDec_Destruct(&p->mtc);
- p->mtc_WasConstructed = False;
- }
- {
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CLzma2DecMtThread *t = &p->coders[i];
- if (t->dec_created)
- {
- // we don't need to free dict here
- Lzma2Dec_FreeProbs(&t->dec, &t->alloc.vt); // p->alloc !!!
- t->dec_created = False;
- }
- }
- }
- Lzma2DecMt_FreeOutBufs(p);
-
- #endif
-
- ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, p);
-}
-
-
-
-#ifndef Z7_ST
-
-static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCallbackInfo *cc)
-{
- CLzma2DecMt *me = (CLzma2DecMt *)obj;
- CLzma2DecMtThread *t = &me->coders[coderIndex];
-
- PRF_STR_INT_2("Parse", coderIndex, cc->srcSize)
-
- cc->state = MTDEC_PARSE_CONTINUE;
-
- if (cc->startCall)
- {
- if (!t->dec_created)
- {
- Lzma2Dec_CONSTRUCT(&t->dec)
- t->dec_created = True;
- AlignOffsetAlloc_CreateVTable(&t->alloc);
- {
- /* (1 << 12) is expected size of one way in data cache.
- We optimize alignment for cache line size of 128 bytes and smaller */
- const unsigned kNumAlignBits = 12;
- const unsigned kNumCacheLineBits = 7; /* <= kNumAlignBits */
- t->alloc.numAlignBits = kNumAlignBits;
- t->alloc.offset = ((UInt32)coderIndex * (((unsigned)1 << 11) + (1 << 8) + (1 << 6))) & (((unsigned)1 << kNumAlignBits) - ((unsigned)1 << kNumCacheLineBits));
- t->alloc.baseAlloc = me->alignOffsetAlloc.baseAlloc;
- }
- }
- Lzma2Dec_Init(&t->dec);
-
- t->inPreSize = 0;
- t->outPreSize = 0;
- // t->blockWasFinished = False;
- // t->finishedWithMark = False;
- t->parseStatus = (ELzma2ParseStatus)LZMA_STATUS_NOT_SPECIFIED;
- t->state = MTDEC_PARSE_CONTINUE;
-
- t->inCodeSize = 0;
- t->outCodeSize = 0;
- t->codeRes = SZ_OK;
-
- // (cc->srcSize == 0) is allowed
- }
-
- {
- ELzma2ParseStatus status;
- BoolInt overflow;
- UInt32 unpackRem = 0;
-
- int checkFinishBlock = True;
- size_t limit = me->props.outBlockMax;
- if (me->outSize_Defined)
- {
- UInt64 rem = me->outSize - me->outProcessed_Parse;
- if (limit >= rem)
- {
- limit = (size_t)rem;
- if (!me->finishMode)
- checkFinishBlock = False;
- }
- }
-
- // checkFinishBlock = False, if we want to decode partial data
- // that must be finished at position <= outBlockMax.
-
- {
- const size_t srcOrig = cc->srcSize;
- SizeT srcSize_Point = 0;
- SizeT dicPos_Point = 0;
-
- cc->srcSize = 0;
- overflow = False;
-
- for (;;)
- {
- SizeT srcCur = (SizeT)(srcOrig - cc->srcSize);
-
- status = Lzma2Dec_Parse(&t->dec,
- (SizeT)limit - t->dec.decoder.dicPos,
- cc->src + cc->srcSize, &srcCur,
- checkFinishBlock);
-
- cc->srcSize += srcCur;
-
- if (status == LZMA2_PARSE_STATUS_NEW_CHUNK)
- {
- if (t->dec.unpackSize > me->props.outBlockMax - t->dec.decoder.dicPos)
- {
- overflow = True;
- break;
- }
- continue;
- }
-
- if (status == LZMA2_PARSE_STATUS_NEW_BLOCK)
- {
- if (t->dec.decoder.dicPos == 0)
- continue;
- // we decode small blocks in one thread
- if (t->dec.decoder.dicPos >= (1 << 14))
- break;
- dicPos_Point = t->dec.decoder.dicPos;
- srcSize_Point = (SizeT)cc->srcSize;
- continue;
- }
-
- if ((int)status == LZMA_STATUS_NOT_FINISHED && checkFinishBlock
- // && limit == t->dec.decoder.dicPos
- // && limit == me->props.outBlockMax
- )
- {
- overflow = True;
- break;
- }
-
- unpackRem = Lzma2Dec_GetUnpackExtra(&t->dec);
- break;
- }
-
- if (dicPos_Point != 0
- && (int)status != LZMA2_PARSE_STATUS_NEW_BLOCK
- && (int)status != LZMA_STATUS_FINISHED_WITH_MARK
- && (int)status != LZMA_STATUS_NOT_SPECIFIED)
- {
- // we revert to latest newBlock state
- status = LZMA2_PARSE_STATUS_NEW_BLOCK;
- unpackRem = 0;
- t->dec.decoder.dicPos = dicPos_Point;
- cc->srcSize = srcSize_Point;
- overflow = False;
- }
- }
-
- t->inPreSize += cc->srcSize;
- t->parseStatus = status;
-
- if (overflow)
- cc->state = MTDEC_PARSE_OVERFLOW;
- else
- {
- size_t dicPos = t->dec.decoder.dicPos;
-
- if ((int)status != LZMA_STATUS_NEEDS_MORE_INPUT)
- {
- if (status == LZMA2_PARSE_STATUS_NEW_BLOCK)
- {
- cc->state = MTDEC_PARSE_NEW;
- cc->srcSize--; // we don't need control byte of next block
- t->inPreSize--;
- }
- else
- {
- cc->state = MTDEC_PARSE_END;
- if ((int)status != LZMA_STATUS_FINISHED_WITH_MARK)
- {
- // (status == LZMA_STATUS_NOT_SPECIFIED)
- // (status == LZMA_STATUS_NOT_FINISHED)
- if (unpackRem != 0)
- {
- /* we also reserve space for max possible number of output bytes of current LZMA chunk */
- size_t rem = limit - dicPos;
- if (rem > unpackRem)
- rem = unpackRem;
- dicPos += rem;
- }
- }
- }
-
- me->outProcessed_Parse += dicPos;
- }
-
- cc->outPos = dicPos;
- t->outPreSize = (size_t)dicPos;
- }
-
- t->state = cc->state;
- return;
- }
-}
-
-
-static SRes Lzma2DecMt_MtCallback_PreCode(void *pp, unsigned coderIndex)
-{
- CLzma2DecMt *me = (CLzma2DecMt *)pp;
- CLzma2DecMtThread *t = &me->coders[coderIndex];
- Byte *dest = t->outBuf;
-
- if (t->inPreSize == 0)
- {
- t->codeRes = SZ_ERROR_DATA;
- return t->codeRes;
- }
-
- if (!dest || t->outBufSize < t->outPreSize)
- {
- if (dest)
- {
- ISzAlloc_Free(me->allocMid, dest);
- t->outBuf = NULL;
- t->outBufSize = 0;
- }
-
- dest = (Byte *)ISzAlloc_Alloc(me->allocMid, t->outPreSize
- // + (1 << 28)
- );
- // Sleep(200);
- if (!dest)
- return SZ_ERROR_MEM;
- t->outBuf = dest;
- t->outBufSize = t->outPreSize;
- }
-
- t->dec.decoder.dic = dest;
- t->dec.decoder.dicBufSize = (SizeT)t->outPreSize;
-
- t->needInit = True;
-
- return Lzma2Dec_AllocateProbs(&t->dec, me->prop, &t->alloc.vt); // alloc.vt
-}
-
-
-static SRes Lzma2DecMt_MtCallback_Code(void *pp, unsigned coderIndex,
- const Byte *src, size_t srcSize, int srcFinished,
- // int finished, int blockFinished,
- UInt64 *inCodePos, UInt64 *outCodePos, int *stop)
-{
- CLzma2DecMt *me = (CLzma2DecMt *)pp;
- CLzma2DecMtThread *t = &me->coders[coderIndex];
-
- UNUSED_VAR(srcFinished)
-
- PRF_STR_INT_2("Code", coderIndex, srcSize)
-
- *inCodePos = t->inCodeSize;
- *outCodePos = 0;
- *stop = True;
-
- if (t->needInit)
- {
- Lzma2Dec_Init(&t->dec);
- t->needInit = False;
- }
-
- {
- ELzmaStatus status;
- SizeT srcProcessed = (SizeT)srcSize;
- BoolInt blockWasFinished =
- ((int)t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK
- || t->parseStatus == LZMA2_PARSE_STATUS_NEW_BLOCK);
-
- SRes res = Lzma2Dec_DecodeToDic(&t->dec,
- (SizeT)t->outPreSize,
- src, &srcProcessed,
- blockWasFinished ? LZMA_FINISH_END : LZMA_FINISH_ANY,
- &status);
-
- t->codeRes = res;
-
- t->inCodeSize += srcProcessed;
- *inCodePos = t->inCodeSize;
- t->outCodeSize = t->dec.decoder.dicPos;
- *outCodePos = t->dec.decoder.dicPos;
-
- if (res != SZ_OK)
- return res;
-
- if (srcProcessed == srcSize)
- *stop = False;
-
- if (blockWasFinished)
- {
- if (srcSize != srcProcessed)
- return SZ_ERROR_FAIL;
-
- if (t->inPreSize == t->inCodeSize)
- {
- if (t->outPreSize != t->outCodeSize)
- return SZ_ERROR_FAIL;
- *stop = True;
- }
- }
- else
- {
- if (t->outPreSize == t->outCodeSize)
- *stop = True;
- }
-
- return SZ_OK;
- }
-}
-
-
-#define LZMA2DECMT_STREAM_WRITE_STEP (1 << 24)
-
-static SRes Lzma2DecMt_MtCallback_Write(void *pp, unsigned coderIndex,
- BoolInt needWriteToStream,
- const Byte *src, size_t srcSize, BoolInt isCross,
- BoolInt *needContinue, BoolInt *canRecode)
-{
- CLzma2DecMt *me = (CLzma2DecMt *)pp;
- const CLzma2DecMtThread *t = &me->coders[coderIndex];
- size_t size = t->outCodeSize;
- const Byte *data = t->outBuf;
- BoolInt needContinue2 = True;
-
- UNUSED_VAR(src)
- UNUSED_VAR(srcSize)
- UNUSED_VAR(isCross)
-
- PRF_STR_INT_2("Write", coderIndex, srcSize)
-
- *needContinue = False;
- *canRecode = True;
-
- if (
- // t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK
- t->state == MTDEC_PARSE_OVERFLOW
- || t->state == MTDEC_PARSE_END)
- needContinue2 = False;
-
-
- if (!needWriteToStream)
- return SZ_OK;
-
- me->mtc.inProcessed += t->inCodeSize;
-
- if (t->codeRes == SZ_OK)
- if ((int)t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK
- || t->parseStatus == LZMA2_PARSE_STATUS_NEW_BLOCK)
- if (t->outPreSize != t->outCodeSize
- || t->inPreSize != t->inCodeSize)
- return SZ_ERROR_FAIL;
-
- *canRecode = False;
-
- if (me->outStream)
- {
- for (;;)
- {
- size_t cur = size;
- size_t written;
- if (cur > LZMA2DECMT_STREAM_WRITE_STEP)
- cur = LZMA2DECMT_STREAM_WRITE_STEP;
-
- written = ISeqOutStream_Write(me->outStream, data, cur);
-
- me->outProcessed += written;
- // me->mtc.writtenTotal += written;
- if (written != cur)
- return SZ_ERROR_WRITE;
- data += cur;
- size -= cur;
- if (size == 0)
- {
- *needContinue = needContinue2;
- return SZ_OK;
- }
- RINOK(MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0))
- }
- }
-
- return SZ_ERROR_FAIL;
- /*
- if (size > me->outBufSize)
- return SZ_ERROR_OUTPUT_EOF;
- memcpy(me->outBuf, data, size);
- me->outBufSize -= size;
- me->outBuf += size;
- *needContinue = needContinue2;
- return SZ_OK;
- */
-}
-
-#endif
-
-
-static SRes Lzma2Dec_Prepare_ST(CLzma2DecMt *p)
-{
- if (!p->dec_created)
- {
- Lzma2Dec_CONSTRUCT(&p->dec)
- p->dec_created = True;
- }
-
- RINOK(Lzma2Dec_Allocate(&p->dec, p->prop, &p->alignOffsetAlloc.vt))
-
- if (!p->inBuf || p->inBufSize != p->props.inBufSize_ST)
- {
- ISzAlloc_Free(p->allocMid, p->inBuf);
- p->inBufSize = 0;
- p->inBuf = (Byte *)ISzAlloc_Alloc(p->allocMid, p->props.inBufSize_ST);
- if (!p->inBuf)
- return SZ_ERROR_MEM;
- p->inBufSize = p->props.inBufSize_ST;
- }
-
- Lzma2Dec_Init(&p->dec);
-
- return SZ_OK;
-}
-
-
-static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
- #ifndef Z7_ST
- , BoolInt tMode
- #endif
- )
-{
- SizeT wrPos;
- size_t inPos, inLim;
- const Byte *inData;
- UInt64 inPrev, outPrev;
-
- CLzma2Dec *dec;
-
- #ifndef Z7_ST
- if (tMode)
- {
- Lzma2DecMt_FreeOutBufs(p);
- tMode = MtDec_PrepareRead(&p->mtc);
- }
- #endif
-
- RINOK(Lzma2Dec_Prepare_ST(p))
-
- dec = &p->dec;
-
- inPrev = p->inProcessed;
- outPrev = p->outProcessed;
-
- inPos = 0;
- inLim = 0;
- inData = NULL;
- wrPos = dec->decoder.dicPos;
-
- for (;;)
- {
- SizeT dicPos;
- SizeT size;
- ELzmaFinishMode finishMode;
- SizeT inProcessed;
- ELzmaStatus status;
- SRes res;
-
- SizeT outProcessed;
- BoolInt outFinished;
- BoolInt needStop;
-
- if (inPos == inLim)
- {
- #ifndef Z7_ST
- if (tMode)
- {
- inData = MtDec_Read(&p->mtc, &inLim);
- inPos = 0;
- if (inData)
- continue;
- tMode = False;
- inLim = 0;
- }
- #endif
-
- if (!p->readWasFinished)
- {
- inPos = 0;
- inLim = p->inBufSize;
- inData = p->inBuf;
- p->readRes = ISeqInStream_Read(p->inStream, (void *)(p->inBuf), &inLim);
- // p->readProcessed += inLim;
- // inLim -= 5; p->readWasFinished = True; // for test
- if (inLim == 0 || p->readRes != SZ_OK)
- p->readWasFinished = True;
- }
- }
-
- dicPos = dec->decoder.dicPos;
- {
- SizeT next = dec->decoder.dicBufSize;
- if (next - wrPos > p->props.outStep_ST)
- next = wrPos + (SizeT)p->props.outStep_ST;
- size = next - dicPos;
- }
-
- finishMode = LZMA_FINISH_ANY;
- if (p->outSize_Defined)
- {
- const UInt64 rem = p->outSize - p->outProcessed;
- if (size >= rem)
- {
- size = (SizeT)rem;
- if (p->finishMode)
- finishMode = LZMA_FINISH_END;
- }
- }
-
- inProcessed = (SizeT)(inLim - inPos);
-
- res = Lzma2Dec_DecodeToDic(dec, dicPos + size, inData + inPos, &inProcessed, finishMode, &status);
-
- inPos += inProcessed;
- p->inProcessed += inProcessed;
- outProcessed = dec->decoder.dicPos - dicPos;
- p->outProcessed += outProcessed;
-
- outFinished = (p->outSize_Defined && p->outSize <= p->outProcessed);
-
- needStop = (res != SZ_OK
- || (inProcessed == 0 && outProcessed == 0)
- || status == LZMA_STATUS_FINISHED_WITH_MARK
- || (!p->finishMode && outFinished));
-
- if (needStop || outProcessed >= size)
- {
- SRes res2;
- {
- size_t writeSize = dec->decoder.dicPos - wrPos;
- size_t written = ISeqOutStream_Write(p->outStream, dec->decoder.dic + wrPos, writeSize);
- res2 = (written == writeSize) ? SZ_OK : SZ_ERROR_WRITE;
- }
-
- if (dec->decoder.dicPos == dec->decoder.dicBufSize)
- dec->decoder.dicPos = 0;
- wrPos = dec->decoder.dicPos;
-
- RINOK(res2)
-
- if (needStop)
- {
- if (res != SZ_OK)
- return res;
-
- if (status == LZMA_STATUS_FINISHED_WITH_MARK)
- {
- if (p->finishMode)
- {
- if (p->outSize_Defined && p->outSize != p->outProcessed)
- return SZ_ERROR_DATA;
- }
- return SZ_OK;
- }
-
- if (!p->finishMode && outFinished)
- return SZ_OK;
-
- if (status == LZMA_STATUS_NEEDS_MORE_INPUT)
- return SZ_ERROR_INPUT_EOF;
-
- return SZ_ERROR_DATA;
- }
- }
-
- if (p->progress)
- {
- UInt64 inDelta = p->inProcessed - inPrev;
- UInt64 outDelta = p->outProcessed - outPrev;
- if (inDelta >= (1 << 22) || outDelta >= (1 << 22))
- {
- RINOK(ICompressProgress_Progress(p->progress, p->inProcessed, p->outProcessed))
- inPrev = p->inProcessed;
- outPrev = p->outProcessed;
- }
- }
- }
-}
-
-
-
-SRes Lzma2DecMt_Decode(CLzma2DecMtHandle p,
- Byte prop,
- const CLzma2DecMtProps *props,
- ISeqOutStreamPtr outStream, const UInt64 *outDataSize, int finishMode,
- // Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- // const Byte *inData, size_t inDataSize,
- UInt64 *inProcessed,
- // UInt64 *outProcessed,
- int *isMT,
- ICompressProgressPtr progress)
-{
- // GET_CLzma2DecMt_p
- #ifndef Z7_ST
- BoolInt tMode;
- #endif
-
- *inProcessed = 0;
-
- if (prop > 40)
- return SZ_ERROR_UNSUPPORTED;
-
- p->prop = prop;
- p->props = *props;
-
- p->inStream = inStream;
- p->outStream = outStream;
- p->progress = progress;
-
- p->outSize = 0;
- p->outSize_Defined = False;
- if (outDataSize)
- {
- p->outSize_Defined = True;
- p->outSize = *outDataSize;
- }
- p->finishMode = finishMode;
-
- p->outProcessed = 0;
- p->inProcessed = 0;
-
- p->readWasFinished = False;
- p->readRes = SZ_OK;
-
- *isMT = False;
-
-
- #ifndef Z7_ST
-
- tMode = False;
-
- // p->mtc.parseRes = SZ_OK;
-
- // p->mtc.numFilledThreads = 0;
- // p->mtc.crossStart = 0;
- // p->mtc.crossEnd = 0;
- // p->mtc.allocError_for_Read_BlockIndex = 0;
- // p->mtc.isAllocError = False;
-
- if (p->props.numThreads > 1)
- {
- IMtDecCallback2 vt;
-
- Lzma2DecMt_FreeSt(p);
-
- p->outProcessed_Parse = 0;
-
- if (!p->mtc_WasConstructed)
- {
- p->mtc_WasConstructed = True;
- MtDec_Construct(&p->mtc);
- }
-
- p->mtc.progress = progress;
- p->mtc.inStream = inStream;
-
- // p->outBuf = NULL;
- // p->outBufSize = 0;
- /*
- if (!outStream)
- {
- // p->outBuf = outBuf;
- // p->outBufSize = *outBufSize;
- // *outBufSize = 0;
- return SZ_ERROR_PARAM;
- }
- */
-
- // p->mtc.inBlockMax = p->props.inBlockMax;
- p->mtc.alloc = &p->alignOffsetAlloc.vt;
- // p->alignOffsetAlloc.baseAlloc;
- // p->mtc.inData = inData;
- // p->mtc.inDataSize = inDataSize;
- p->mtc.mtCallback = &vt;
- p->mtc.mtCallbackObject = p;
-
- p->mtc.inBufSize = p->props.inBufSize_MT;
-
- p->mtc.numThreadsMax = p->props.numThreads;
-
- *isMT = True;
-
- vt.Parse = Lzma2DecMt_MtCallback_Parse;
- vt.PreCode = Lzma2DecMt_MtCallback_PreCode;
- vt.Code = Lzma2DecMt_MtCallback_Code;
- vt.Write = Lzma2DecMt_MtCallback_Write;
-
- {
- BoolInt needContinue = False;
-
- SRes res = MtDec_Code(&p->mtc);
-
- /*
- if (!outStream)
- *outBufSize = p->outBuf - outBuf;
- */
-
- *inProcessed = p->mtc.inProcessed;
-
- needContinue = False;
-
- if (res == SZ_OK)
- {
- if (p->mtc.mtProgress.res != SZ_OK)
- res = p->mtc.mtProgress.res;
- else
- needContinue = p->mtc.needContinue;
- }
-
- if (!needContinue)
- {
- if (res == SZ_OK)
- return p->mtc.readRes;
- return res;
- }
-
- tMode = True;
- p->readRes = p->mtc.readRes;
- p->readWasFinished = p->mtc.readWasFinished;
- p->inProcessed = p->mtc.inProcessed;
-
- PRF_STR("----- decoding ST -----")
- }
- }
-
- #endif
-
-
- *isMT = False;
-
- {
- SRes res = Lzma2Dec_Decode_ST(p
- #ifndef Z7_ST
- , tMode
- #endif
- );
-
- *inProcessed = p->inProcessed;
-
- // res = SZ_OK; // for test
- if (res == SZ_ERROR_INPUT_EOF)
- {
- if (p->readRes != SZ_OK)
- res = p->readRes;
- }
- else if (res == SZ_OK && p->readRes != SZ_OK)
- res = p->readRes;
-
- /*
- #ifndef Z7_ST
- if (res == SZ_OK && tMode && p->mtc.parseRes != SZ_OK)
- res = p->mtc.parseRes;
- #endif
- */
-
- return res;
- }
-}
-
-
-/* ---------- Read from CLzma2DecMtHandle Interface ---------- */
-
-SRes Lzma2DecMt_Init(CLzma2DecMtHandle p,
- Byte prop,
- const CLzma2DecMtProps *props,
- const UInt64 *outDataSize, int finishMode,
- ISeqInStreamPtr inStream)
-{
- // GET_CLzma2DecMt_p
-
- if (prop > 40)
- return SZ_ERROR_UNSUPPORTED;
-
- p->prop = prop;
- p->props = *props;
-
- p->inStream = inStream;
-
- p->outSize = 0;
- p->outSize_Defined = False;
- if (outDataSize)
- {
- p->outSize_Defined = True;
- p->outSize = *outDataSize;
- }
- p->finishMode = finishMode;
-
- p->outProcessed = 0;
- p->inProcessed = 0;
-
- p->inPos = 0;
- p->inLim = 0;
-
- return Lzma2Dec_Prepare_ST(p);
-}
-
-
-SRes Lzma2DecMt_Read(CLzma2DecMtHandle p,
- Byte *data, size_t *outSize,
- UInt64 *inStreamProcessed)
-{
- // GET_CLzma2DecMt_p
- ELzmaFinishMode finishMode;
- SRes readRes;
- size_t size = *outSize;
-
- *outSize = 0;
- *inStreamProcessed = 0;
-
- finishMode = LZMA_FINISH_ANY;
- if (p->outSize_Defined)
- {
- const UInt64 rem = p->outSize - p->outProcessed;
- if (size >= rem)
- {
- size = (size_t)rem;
- if (p->finishMode)
- finishMode = LZMA_FINISH_END;
- }
- }
-
- readRes = SZ_OK;
-
- for (;;)
- {
- SizeT inCur;
- SizeT outCur;
- ELzmaStatus status;
- SRes res;
-
- if (p->inPos == p->inLim && readRes == SZ_OK)
- {
- p->inPos = 0;
- p->inLim = p->props.inBufSize_ST;
- readRes = ISeqInStream_Read(p->inStream, p->inBuf, &p->inLim);
- }
-
- inCur = (SizeT)(p->inLim - p->inPos);
- outCur = (SizeT)size;
-
- res = Lzma2Dec_DecodeToBuf(&p->dec, data, &outCur,
- p->inBuf + p->inPos, &inCur, finishMode, &status);
-
- p->inPos += inCur;
- p->inProcessed += inCur;
- *inStreamProcessed += inCur;
- p->outProcessed += outCur;
- *outSize += outCur;
- size -= outCur;
- data += outCur;
-
- if (res != 0)
- return res;
-
- /*
- if (status == LZMA_STATUS_FINISHED_WITH_MARK)
- return readRes;
-
- if (size == 0 && status != LZMA_STATUS_NEEDS_MORE_INPUT)
- {
- if (p->finishMode && p->outSize_Defined && p->outProcessed >= p->outSize)
- return SZ_ERROR_DATA;
- return readRes;
- }
- */
-
- if (inCur == 0 && outCur == 0)
- return readRes;
- }
-}
-
-#undef PRF
-#undef PRF_STR
-#undef PRF_STR_INT_2
diff --git a/3rdparty/7z/src/Lzma2DecMt.h b/3rdparty/7z/src/Lzma2DecMt.h
deleted file mode 100644
index cb7c8f1e53..0000000000
--- a/3rdparty/7z/src/Lzma2DecMt.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* Lzma2DecMt.h -- LZMA2 Decoder Multi-thread
-2023-04-13 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA2_DEC_MT_H
-#define ZIP7_INC_LZMA2_DEC_MT_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-typedef struct
-{
- size_t inBufSize_ST;
- size_t outStep_ST;
-
- #ifndef Z7_ST
- unsigned numThreads;
- size_t inBufSize_MT;
- size_t outBlockMax;
- size_t inBlockMax;
- #endif
-} CLzma2DecMtProps;
-
-/* init to single-thread mode */
-void Lzma2DecMtProps_Init(CLzma2DecMtProps *p);
-
-
-/* ---------- CLzma2DecMtHandle Interface ---------- */
-
-/* Lzma2DecMt_ * functions can return the following exit codes:
-SRes:
- SZ_OK - OK
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_PARAM - Incorrect paramater in props
- SZ_ERROR_WRITE - ISeqOutStream write callback error
- // SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output
- SZ_ERROR_PROGRESS - some break from progress callback
- SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
-*/
-
-typedef struct CLzma2DecMt CLzma2DecMt;
-typedef CLzma2DecMt * CLzma2DecMtHandle;
-// Z7_DECLARE_HANDLE(CLzma2DecMtHandle)
-
-CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid);
-void Lzma2DecMt_Destroy(CLzma2DecMtHandle p);
-
-SRes Lzma2DecMt_Decode(CLzma2DecMtHandle p,
- Byte prop,
- const CLzma2DecMtProps *props,
- ISeqOutStreamPtr outStream,
- const UInt64 *outDataSize, // NULL means undefined
- int finishMode, // 0 - partial unpacking is allowed, 1 - if lzma2 stream must be finished
- // Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- // const Byte *inData, size_t inDataSize,
-
- // out variables:
- UInt64 *inProcessed,
- int *isMT, /* out: (*isMT == 0), if single thread decoding was used */
-
- // UInt64 *outProcessed,
- ICompressProgressPtr progress);
-
-
-/* ---------- Read from CLzma2DecMtHandle Interface ---------- */
-
-SRes Lzma2DecMt_Init(CLzma2DecMtHandle pp,
- Byte prop,
- const CLzma2DecMtProps *props,
- const UInt64 *outDataSize, int finishMode,
- ISeqInStreamPtr inStream);
-
-SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp,
- Byte *data, size_t *outSize,
- UInt64 *inStreamProcessed);
-
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Lzma2Enc.c b/3rdparty/7z/src/Lzma2Enc.c
deleted file mode 100644
index 85aa80d77e..0000000000
--- a/3rdparty/7z/src/Lzma2Enc.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/* Lzma2Enc.c -- LZMA2 Encoder
-2023-04-13 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-/* #define Z7_ST */
-
-#include "Lzma2Enc.h"
-
-#ifndef Z7_ST
-#include "MtCoder.h"
-#else
-#define MTCODER_THREADS_MAX 1
-#endif
-
-#define LZMA2_CONTROL_LZMA (1 << 7)
-#define LZMA2_CONTROL_COPY_NO_RESET 2
-#define LZMA2_CONTROL_COPY_RESET_DIC 1
-#define LZMA2_CONTROL_EOF 0
-
-#define LZMA2_LCLP_MAX 4
-
-#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11))
-
-#define LZMA2_PACK_SIZE_MAX (1 << 16)
-#define LZMA2_COPY_CHUNK_SIZE LZMA2_PACK_SIZE_MAX
-#define LZMA2_UNPACK_SIZE_MAX (1 << 21)
-#define LZMA2_KEEP_WINDOW_SIZE LZMA2_UNPACK_SIZE_MAX
-
-#define LZMA2_CHUNK_SIZE_COMPRESSED_MAX ((1 << 16) + 16)
-
-
-#define PRF(x) /* x */
-
-
-/* ---------- CLimitedSeqInStream ---------- */
-
-typedef struct
-{
- ISeqInStream vt;
- ISeqInStreamPtr realStream;
- UInt64 limit;
- UInt64 processed;
- int finished;
-} CLimitedSeqInStream;
-
-static void LimitedSeqInStream_Init(CLimitedSeqInStream *p)
-{
- p->limit = (UInt64)(Int64)-1;
- p->processed = 0;
- p->finished = 0;
-}
-
-static SRes LimitedSeqInStream_Read(ISeqInStreamPtr pp, void *data, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLimitedSeqInStream)
- size_t size2 = *size;
- SRes res = SZ_OK;
-
- if (p->limit != (UInt64)(Int64)-1)
- {
- const UInt64 rem = p->limit - p->processed;
- if (size2 > rem)
- size2 = (size_t)rem;
- }
- if (size2 != 0)
- {
- res = ISeqInStream_Read(p->realStream, data, &size2);
- p->finished = (size2 == 0 ? 1 : 0);
- p->processed += size2;
- }
- *size = size2;
- return res;
-}
-
-
-/* ---------- CLzma2EncInt ---------- */
-
-typedef struct
-{
- CLzmaEncHandle enc;
- Byte propsAreSet;
- Byte propsByte;
- Byte needInitState;
- Byte needInitProp;
- UInt64 srcPos;
-} CLzma2EncInt;
-
-
-static SRes Lzma2EncInt_InitStream(CLzma2EncInt *p, const CLzma2EncProps *props)
-{
- if (!p->propsAreSet)
- {
- SizeT propsSize = LZMA_PROPS_SIZE;
- Byte propsEncoded[LZMA_PROPS_SIZE];
- RINOK(LzmaEnc_SetProps(p->enc, &props->lzmaProps))
- RINOK(LzmaEnc_WriteProperties(p->enc, propsEncoded, &propsSize))
- p->propsByte = propsEncoded[0];
- p->propsAreSet = True;
- }
- return SZ_OK;
-}
-
-static void Lzma2EncInt_InitBlock(CLzma2EncInt *p)
-{
- p->srcPos = 0;
- p->needInitState = True;
- p->needInitProp = True;
-}
-
-
-SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p, ISeqInStreamPtr inStream, UInt32 keepWindowSize,
- ISzAllocPtr alloc, ISzAllocPtr allocBig);
-SRes LzmaEnc_MemPrepare(CLzmaEncHandle p, const Byte *src, SizeT srcLen,
- UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
-SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
- Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
-const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p);
-void LzmaEnc_Finish(CLzmaEncHandle p);
-void LzmaEnc_SaveState(CLzmaEncHandle p);
-void LzmaEnc_RestoreState(CLzmaEncHandle p);
-
-/*
-UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle p);
-*/
-
-static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
- size_t *packSizeRes, ISeqOutStreamPtr outStream)
-{
- size_t packSizeLimit = *packSizeRes;
- size_t packSize = packSizeLimit;
- UInt32 unpackSize = LZMA2_UNPACK_SIZE_MAX;
- unsigned lzHeaderSize = 5 + (p->needInitProp ? 1 : 0);
- BoolInt useCopyBlock;
- SRes res;
-
- *packSizeRes = 0;
- if (packSize < lzHeaderSize)
- return SZ_ERROR_OUTPUT_EOF;
- packSize -= lzHeaderSize;
-
- LzmaEnc_SaveState(p->enc);
- res = LzmaEnc_CodeOneMemBlock(p->enc, p->needInitState,
- outBuf + lzHeaderSize, &packSize, LZMA2_PACK_SIZE_MAX, &unpackSize);
-
- PRF(printf("\npackSize = %7d unpackSize = %7d ", packSize, unpackSize));
-
- if (unpackSize == 0)
- return res;
-
- if (res == SZ_OK)
- useCopyBlock = (packSize + 2 >= unpackSize || packSize > (1 << 16));
- else
- {
- if (res != SZ_ERROR_OUTPUT_EOF)
- return res;
- res = SZ_OK;
- useCopyBlock = True;
- }
-
- if (useCopyBlock)
- {
- size_t destPos = 0;
- PRF(printf("################# COPY "));
-
- while (unpackSize > 0)
- {
- const UInt32 u = (unpackSize < LZMA2_COPY_CHUNK_SIZE) ? unpackSize : LZMA2_COPY_CHUNK_SIZE;
- if (packSizeLimit - destPos < u + 3)
- return SZ_ERROR_OUTPUT_EOF;
- outBuf[destPos++] = (Byte)(p->srcPos == 0 ? LZMA2_CONTROL_COPY_RESET_DIC : LZMA2_CONTROL_COPY_NO_RESET);
- outBuf[destPos++] = (Byte)((u - 1) >> 8);
- outBuf[destPos++] = (Byte)(u - 1);
- memcpy(outBuf + destPos, LzmaEnc_GetCurBuf(p->enc) - unpackSize, u);
- unpackSize -= u;
- destPos += u;
- p->srcPos += u;
-
- if (outStream)
- {
- *packSizeRes += destPos;
- if (ISeqOutStream_Write(outStream, outBuf, destPos) != destPos)
- return SZ_ERROR_WRITE;
- destPos = 0;
- }
- else
- *packSizeRes = destPos;
- /* needInitState = True; */
- }
-
- LzmaEnc_RestoreState(p->enc);
- return SZ_OK;
- }
-
- {
- size_t destPos = 0;
- const UInt32 u = unpackSize - 1;
- const UInt32 pm = (UInt32)(packSize - 1);
- const unsigned mode = (p->srcPos == 0) ? 3 : (p->needInitState ? (p->needInitProp ? 2 : 1) : 0);
-
- PRF(printf(" "));
-
- outBuf[destPos++] = (Byte)(LZMA2_CONTROL_LZMA | (mode << 5) | ((u >> 16) & 0x1F));
- outBuf[destPos++] = (Byte)(u >> 8);
- outBuf[destPos++] = (Byte)u;
- outBuf[destPos++] = (Byte)(pm >> 8);
- outBuf[destPos++] = (Byte)pm;
-
- if (p->needInitProp)
- outBuf[destPos++] = p->propsByte;
-
- p->needInitProp = False;
- p->needInitState = False;
- destPos += packSize;
- p->srcPos += unpackSize;
-
- if (outStream)
- if (ISeqOutStream_Write(outStream, outBuf, destPos) != destPos)
- return SZ_ERROR_WRITE;
-
- *packSizeRes = destPos;
- return SZ_OK;
- }
-}
-
-
-/* ---------- Lzma2 Props ---------- */
-
-void Lzma2EncProps_Init(CLzma2EncProps *p)
-{
- LzmaEncProps_Init(&p->lzmaProps);
- p->blockSize = LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO;
- p->numBlockThreads_Reduced = -1;
- p->numBlockThreads_Max = -1;
- p->numTotalThreads = -1;
-}
-
-void Lzma2EncProps_Normalize(CLzma2EncProps *p)
-{
- UInt64 fileSize;
- int t1, t1n, t2, t2r, t3;
- {
- CLzmaEncProps lzmaProps = p->lzmaProps;
- LzmaEncProps_Normalize(&lzmaProps);
- t1n = lzmaProps.numThreads;
- }
-
- t1 = p->lzmaProps.numThreads;
- t2 = p->numBlockThreads_Max;
- t3 = p->numTotalThreads;
-
- if (t2 > MTCODER_THREADS_MAX)
- t2 = MTCODER_THREADS_MAX;
-
- if (t3 <= 0)
- {
- if (t2 <= 0)
- t2 = 1;
- t3 = t1n * t2;
- }
- else if (t2 <= 0)
- {
- t2 = t3 / t1n;
- if (t2 == 0)
- {
- t1 = 1;
- t2 = t3;
- }
- if (t2 > MTCODER_THREADS_MAX)
- t2 = MTCODER_THREADS_MAX;
- }
- else if (t1 <= 0)
- {
- t1 = t3 / t2;
- if (t1 == 0)
- t1 = 1;
- }
- else
- t3 = t1n * t2;
-
- p->lzmaProps.numThreads = t1;
-
- t2r = t2;
-
- fileSize = p->lzmaProps.reduceSize;
-
- if ( p->blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
- && p->blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO
- && (p->blockSize < fileSize || fileSize == (UInt64)(Int64)-1))
- p->lzmaProps.reduceSize = p->blockSize;
-
- LzmaEncProps_Normalize(&p->lzmaProps);
-
- p->lzmaProps.reduceSize = fileSize;
-
- t1 = p->lzmaProps.numThreads;
-
- if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
- {
- t2r = t2 = 1;
- t3 = t1;
- }
- else if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO && t2 <= 1)
- {
- /* if there is no block multi-threading, we use SOLID block */
- p->blockSize = LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID;
- }
- else
- {
- if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
- {
- const UInt32 kMinSize = (UInt32)1 << 20;
- const UInt32 kMaxSize = (UInt32)1 << 28;
- const UInt32 dictSize = p->lzmaProps.dictSize;
- UInt64 blockSize = (UInt64)dictSize << 2;
- if (blockSize < kMinSize) blockSize = kMinSize;
- if (blockSize > kMaxSize) blockSize = kMaxSize;
- if (blockSize < dictSize) blockSize = dictSize;
- blockSize += (kMinSize - 1);
- blockSize &= ~(UInt64)(kMinSize - 1);
- p->blockSize = blockSize;
- }
-
- if (t2 > 1 && fileSize != (UInt64)(Int64)-1)
- {
- UInt64 numBlocks = fileSize / p->blockSize;
- if (numBlocks * p->blockSize != fileSize)
- numBlocks++;
- if (numBlocks < (unsigned)t2)
- {
- t2r = (int)numBlocks;
- if (t2r == 0)
- t2r = 1;
- t3 = t1 * t2r;
- }
- }
- }
-
- p->numBlockThreads_Max = t2;
- p->numBlockThreads_Reduced = t2r;
- p->numTotalThreads = t3;
-}
-
-
-static SRes Progress(ICompressProgressPtr p, UInt64 inSize, UInt64 outSize)
-{
- return (p && ICompressProgress_Progress(p, inSize, outSize) != SZ_OK) ? SZ_ERROR_PROGRESS : SZ_OK;
-}
-
-
-/* ---------- Lzma2 ---------- */
-
-struct CLzma2Enc
-{
- Byte propEncoded;
- CLzma2EncProps props;
- UInt64 expectedDataSize;
-
- Byte *tempBufLzma;
-
- ISzAllocPtr alloc;
- ISzAllocPtr allocBig;
-
- CLzma2EncInt coders[MTCODER_THREADS_MAX];
-
- #ifndef Z7_ST
-
- ISeqOutStreamPtr outStream;
- Byte *outBuf;
- size_t outBuf_Rem; /* remainder in outBuf */
-
- size_t outBufSize; /* size of allocated outBufs[i] */
- size_t outBufsDataSizes[MTCODER_BLOCKS_MAX];
- BoolInt mtCoder_WasConstructed;
- CMtCoder mtCoder;
- Byte *outBufs[MTCODER_BLOCKS_MAX];
-
- #endif
-};
-
-
-
-CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- CLzma2Enc *p = (CLzma2Enc *)ISzAlloc_Alloc(alloc, sizeof(CLzma2Enc));
- if (!p)
- return NULL;
- Lzma2EncProps_Init(&p->props);
- Lzma2EncProps_Normalize(&p->props);
- p->expectedDataSize = (UInt64)(Int64)-1;
- p->tempBufLzma = NULL;
- p->alloc = alloc;
- p->allocBig = allocBig;
- {
- unsigned i;
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- p->coders[i].enc = NULL;
- }
-
- #ifndef Z7_ST
- p->mtCoder_WasConstructed = False;
- {
- unsigned i;
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- p->outBufs[i] = NULL;
- p->outBufSize = 0;
- }
- #endif
-
- return (CLzma2EncHandle)p;
-}
-
-
-#ifndef Z7_ST
-
-static void Lzma2Enc_FreeOutBufs(CLzma2Enc *p)
-{
- unsigned i;
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- if (p->outBufs[i])
- {
- ISzAlloc_Free(p->alloc, p->outBufs[i]);
- p->outBufs[i] = NULL;
- }
- p->outBufSize = 0;
-}
-
-#endif
-
-// #define GET_CLzma2Enc_p CLzma2Enc *p = (CLzma2Enc *)(void *)p;
-
-void Lzma2Enc_Destroy(CLzma2EncHandle p)
-{
- // GET_CLzma2Enc_p
- unsigned i;
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- {
- CLzma2EncInt *t = &p->coders[i];
- if (t->enc)
- {
- LzmaEnc_Destroy(t->enc, p->alloc, p->allocBig);
- t->enc = NULL;
- }
- }
-
-
- #ifndef Z7_ST
- if (p->mtCoder_WasConstructed)
- {
- MtCoder_Destruct(&p->mtCoder);
- p->mtCoder_WasConstructed = False;
- }
- Lzma2Enc_FreeOutBufs(p);
- #endif
-
- ISzAlloc_Free(p->alloc, p->tempBufLzma);
- p->tempBufLzma = NULL;
-
- ISzAlloc_Free(p->alloc, p);
-}
-
-
-SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props)
-{
- // GET_CLzma2Enc_p
- CLzmaEncProps lzmaProps = props->lzmaProps;
- LzmaEncProps_Normalize(&lzmaProps);
- if (lzmaProps.lc + lzmaProps.lp > LZMA2_LCLP_MAX)
- return SZ_ERROR_PARAM;
- p->props = *props;
- Lzma2EncProps_Normalize(&p->props);
- return SZ_OK;
-}
-
-
-void Lzma2Enc_SetDataSize(CLzma2EncHandle p, UInt64 expectedDataSiize)
-{
- // GET_CLzma2Enc_p
- p->expectedDataSize = expectedDataSiize;
-}
-
-
-Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p)
-{
- // GET_CLzma2Enc_p
- unsigned i;
- UInt32 dicSize = LzmaEncProps_GetDictSize(&p->props.lzmaProps);
- for (i = 0; i < 40; i++)
- if (dicSize <= LZMA2_DIC_SIZE_FROM_PROP(i))
- break;
- return (Byte)i;
-}
-
-
-static SRes Lzma2Enc_EncodeMt1(
- CLzma2Enc *me,
- CLzma2EncInt *p,
- ISeqOutStreamPtr outStream,
- Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- const Byte *inData, size_t inDataSize,
- int finished,
- ICompressProgressPtr progress)
-{
- UInt64 unpackTotal = 0;
- UInt64 packTotal = 0;
- size_t outLim = 0;
- CLimitedSeqInStream limitedInStream;
-
- if (outBuf)
- {
- outLim = *outBufSize;
- *outBufSize = 0;
- }
-
- if (!p->enc)
- {
- p->propsAreSet = False;
- p->enc = LzmaEnc_Create(me->alloc);
- if (!p->enc)
- return SZ_ERROR_MEM;
- }
-
- limitedInStream.realStream = inStream;
- if (inStream)
- {
- limitedInStream.vt.Read = LimitedSeqInStream_Read;
- }
-
- if (!outBuf)
- {
- // outStream version works only in one thread. So we use CLzma2Enc::tempBufLzma
- if (!me->tempBufLzma)
- {
- me->tempBufLzma = (Byte *)ISzAlloc_Alloc(me->alloc, LZMA2_CHUNK_SIZE_COMPRESSED_MAX);
- if (!me->tempBufLzma)
- return SZ_ERROR_MEM;
- }
- }
-
- RINOK(Lzma2EncInt_InitStream(p, &me->props))
-
- for (;;)
- {
- SRes res = SZ_OK;
- SizeT inSizeCur = 0;
-
- Lzma2EncInt_InitBlock(p);
-
- LimitedSeqInStream_Init(&limitedInStream);
- limitedInStream.limit = me->props.blockSize;
-
- if (inStream)
- {
- UInt64 expected = (UInt64)(Int64)-1;
- // inStream version works only in one thread. So we use CLzma2Enc::expectedDataSize
- if (me->expectedDataSize != (UInt64)(Int64)-1
- && me->expectedDataSize >= unpackTotal)
- expected = me->expectedDataSize - unpackTotal;
- if (me->props.blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
- && expected > me->props.blockSize)
- expected = (size_t)me->props.blockSize;
-
- LzmaEnc_SetDataSize(p->enc, expected);
-
- RINOK(LzmaEnc_PrepareForLzma2(p->enc,
- &limitedInStream.vt,
- LZMA2_KEEP_WINDOW_SIZE,
- me->alloc,
- me->allocBig))
- }
- else
- {
- inSizeCur = (SizeT)(inDataSize - (size_t)unpackTotal);
- if (me->props.blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
- && inSizeCur > me->props.blockSize)
- inSizeCur = (SizeT)(size_t)me->props.blockSize;
-
- // LzmaEnc_SetDataSize(p->enc, inSizeCur);
-
- RINOK(LzmaEnc_MemPrepare(p->enc,
- inData + (size_t)unpackTotal, inSizeCur,
- LZMA2_KEEP_WINDOW_SIZE,
- me->alloc,
- me->allocBig))
- }
-
- for (;;)
- {
- size_t packSize = LZMA2_CHUNK_SIZE_COMPRESSED_MAX;
- if (outBuf)
- packSize = outLim - (size_t)packTotal;
-
- res = Lzma2EncInt_EncodeSubblock(p,
- outBuf ? outBuf + (size_t)packTotal : me->tempBufLzma, &packSize,
- outBuf ? NULL : outStream);
-
- if (res != SZ_OK)
- break;
-
- packTotal += packSize;
- if (outBuf)
- *outBufSize = (size_t)packTotal;
-
- res = Progress(progress, unpackTotal + p->srcPos, packTotal);
- if (res != SZ_OK)
- break;
-
- /*
- if (LzmaEnc_GetNumAvailableBytes(p->enc) == 0)
- break;
- */
-
- if (packSize == 0)
- break;
- }
-
- LzmaEnc_Finish(p->enc);
-
- unpackTotal += p->srcPos;
-
- RINOK(res)
-
- if (p->srcPos != (inStream ? limitedInStream.processed : inSizeCur))
- return SZ_ERROR_FAIL;
-
- if (inStream ? limitedInStream.finished : (unpackTotal == inDataSize))
- {
- if (finished)
- {
- if (outBuf)
- {
- const size_t destPos = *outBufSize;
- if (destPos >= outLim)
- return SZ_ERROR_OUTPUT_EOF;
- outBuf[destPos] = LZMA2_CONTROL_EOF; // 0
- *outBufSize = destPos + 1;
- }
- else
- {
- const Byte b = LZMA2_CONTROL_EOF; // 0;
- if (ISeqOutStream_Write(outStream, &b, 1) != 1)
- return SZ_ERROR_WRITE;
- }
- }
- return SZ_OK;
- }
- }
-}
-
-
-
-#ifndef Z7_ST
-
-static SRes Lzma2Enc_MtCallback_Code(void *p, unsigned coderIndex, unsigned outBufIndex,
- const Byte *src, size_t srcSize, int finished)
-{
- CLzma2Enc *me = (CLzma2Enc *)p;
- size_t destSize = me->outBufSize;
- SRes res;
- CMtProgressThunk progressThunk;
-
- Byte *dest = me->outBufs[outBufIndex];
-
- me->outBufsDataSizes[outBufIndex] = 0;
-
- if (!dest)
- {
- dest = (Byte *)ISzAlloc_Alloc(me->alloc, me->outBufSize);
- if (!dest)
- return SZ_ERROR_MEM;
- me->outBufs[outBufIndex] = dest;
- }
-
- MtProgressThunk_CreateVTable(&progressThunk);
- progressThunk.mtProgress = &me->mtCoder.mtProgress;
- progressThunk.inSize = 0;
- progressThunk.outSize = 0;
-
- res = Lzma2Enc_EncodeMt1(me,
- &me->coders[coderIndex],
- NULL, dest, &destSize,
- NULL, src, srcSize,
- finished,
- &progressThunk.vt);
-
- me->outBufsDataSizes[outBufIndex] = destSize;
-
- return res;
-}
-
-
-static SRes Lzma2Enc_MtCallback_Write(void *p, unsigned outBufIndex)
-{
- CLzma2Enc *me = (CLzma2Enc *)p;
- size_t size = me->outBufsDataSizes[outBufIndex];
- const Byte *data = me->outBufs[outBufIndex];
-
- if (me->outStream)
- return ISeqOutStream_Write(me->outStream, data, size) == size ? SZ_OK : SZ_ERROR_WRITE;
-
- if (size > me->outBuf_Rem)
- return SZ_ERROR_OUTPUT_EOF;
- memcpy(me->outBuf, data, size);
- me->outBuf_Rem -= size;
- me->outBuf += size;
- return SZ_OK;
-}
-
-#endif
-
-
-
-SRes Lzma2Enc_Encode2(CLzma2EncHandle p,
- ISeqOutStreamPtr outStream,
- Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- const Byte *inData, size_t inDataSize,
- ICompressProgressPtr progress)
-{
- // GET_CLzma2Enc_p
-
- if (inStream && inData)
- return SZ_ERROR_PARAM;
-
- if (outStream && outBuf)
- return SZ_ERROR_PARAM;
-
- {
- unsigned i;
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- p->coders[i].propsAreSet = False;
- }
-
- #ifndef Z7_ST
-
- if (p->props.numBlockThreads_Reduced > 1)
- {
- IMtCoderCallback2 vt;
-
- if (!p->mtCoder_WasConstructed)
- {
- p->mtCoder_WasConstructed = True;
- MtCoder_Construct(&p->mtCoder);
- }
-
- vt.Code = Lzma2Enc_MtCallback_Code;
- vt.Write = Lzma2Enc_MtCallback_Write;
-
- p->outStream = outStream;
- p->outBuf = NULL;
- p->outBuf_Rem = 0;
- if (!outStream)
- {
- p->outBuf = outBuf;
- p->outBuf_Rem = *outBufSize;
- *outBufSize = 0;
- }
-
- p->mtCoder.allocBig = p->allocBig;
- p->mtCoder.progress = progress;
- p->mtCoder.inStream = inStream;
- p->mtCoder.inData = inData;
- p->mtCoder.inDataSize = inDataSize;
- p->mtCoder.mtCallback = &vt;
- p->mtCoder.mtCallbackObject = p;
-
- p->mtCoder.blockSize = (size_t)p->props.blockSize;
- if (p->mtCoder.blockSize != p->props.blockSize)
- return SZ_ERROR_PARAM; /* SZ_ERROR_MEM */
-
- {
- const size_t destBlockSize = p->mtCoder.blockSize + (p->mtCoder.blockSize >> 10) + 16;
- if (destBlockSize < p->mtCoder.blockSize)
- return SZ_ERROR_PARAM;
- if (p->outBufSize != destBlockSize)
- Lzma2Enc_FreeOutBufs(p);
- p->outBufSize = destBlockSize;
- }
-
- p->mtCoder.numThreadsMax = (unsigned)p->props.numBlockThreads_Max;
- p->mtCoder.expectedDataSize = p->expectedDataSize;
-
- {
- const SRes res = MtCoder_Code(&p->mtCoder);
- if (!outStream)
- *outBufSize = (size_t)(p->outBuf - outBuf);
- return res;
- }
- }
-
- #endif
-
-
- return Lzma2Enc_EncodeMt1(p,
- &p->coders[0],
- outStream, outBuf, outBufSize,
- inStream, inData, inDataSize,
- True, /* finished */
- progress);
-}
-
-#undef PRF
diff --git a/3rdparty/7z/src/Lzma2Enc.h b/3rdparty/7z/src/Lzma2Enc.h
deleted file mode 100644
index bead0fc52f..0000000000
--- a/3rdparty/7z/src/Lzma2Enc.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Lzma2Enc.h -- LZMA2 Encoder
-2023-04-13 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA2_ENC_H
-#define ZIP7_INC_LZMA2_ENC_H
-
-#include "LzmaEnc.h"
-
-EXTERN_C_BEGIN
-
-#define LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO 0
-#define LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID ((UInt64)(Int64)-1)
-
-typedef struct
-{
- CLzmaEncProps lzmaProps;
- UInt64 blockSize;
- int numBlockThreads_Reduced;
- int numBlockThreads_Max;
- int numTotalThreads;
-} CLzma2EncProps;
-
-void Lzma2EncProps_Init(CLzma2EncProps *p);
-void Lzma2EncProps_Normalize(CLzma2EncProps *p);
-
-/* ---------- CLzmaEnc2Handle Interface ---------- */
-
-/* Lzma2Enc_* functions can return the following exit codes:
-SRes:
- SZ_OK - OK
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_PARAM - Incorrect paramater in props
- SZ_ERROR_WRITE - ISeqOutStream write callback error
- SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output
- SZ_ERROR_PROGRESS - some break from progress callback
- SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
-*/
-
-typedef struct CLzma2Enc CLzma2Enc;
-typedef CLzma2Enc * CLzma2EncHandle;
-// Z7_DECLARE_HANDLE(CLzma2EncHandle)
-
-CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig);
-void Lzma2Enc_Destroy(CLzma2EncHandle p);
-SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props);
-void Lzma2Enc_SetDataSize(CLzma2EncHandle p, UInt64 expectedDataSiize);
-Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p);
-SRes Lzma2Enc_Encode2(CLzma2EncHandle p,
- ISeqOutStreamPtr outStream,
- Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- const Byte *inData, size_t inDataSize,
- ICompressProgressPtr progress);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Lzma86.h b/3rdparty/7z/src/Lzma86.h
deleted file mode 100644
index 9127d73b7d..0000000000
--- a/3rdparty/7z/src/Lzma86.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* Lzma86.h -- LZMA + x86 (BCJ) Filter
-2023-03-03 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA86_H
-#define ZIP7_INC_LZMA86_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define LZMA86_SIZE_OFFSET (1 + 5)
-#define LZMA86_HEADER_SIZE (LZMA86_SIZE_OFFSET + 8)
-
-/*
-It's an example for LZMA + x86 Filter use.
-You can use .lzma86 extension, if you write that stream to file.
-.lzma86 header adds one additional byte to standard .lzma header.
-.lzma86 header (14 bytes):
- Offset Size Description
- 0 1 = 0 - no filter, pure LZMA
- = 1 - x86 filter + LZMA
- 1 1 lc, lp and pb in encoded form
- 2 4 dictSize (little endian)
- 6 8 uncompressed size (little endian)
-
-
-Lzma86_Encode
--------------
-level - compression level: 0 <= level <= 9, the default value for "level" is 5.
-
-dictSize - The dictionary size in bytes. The maximum value is
- 128 MB = (1 << 27) bytes for 32-bit version
- 1 GB = (1 << 30) bytes for 64-bit version
- The default value is 16 MB = (1 << 24) bytes, for level = 5.
- It's recommended to use the dictionary that is larger than 4 KB and
- that can be calculated as (1 << N) or (3 << N) sizes.
- For better compression ratio dictSize must be >= inSize.
-
-filterMode:
- SZ_FILTER_NO - no Filter
- SZ_FILTER_YES - x86 Filter
- SZ_FILTER_AUTO - it tries both alternatives to select best.
- Encoder will use 2 or 3 passes:
- 2 passes when FILTER_NO provides better compression.
- 3 passes when FILTER_YES provides better compression.
-
-Lzma86Encode allocates Data with MyAlloc functions.
-RAM Requirements for compressing:
- RamSize = dictionarySize * 11.5 + 6MB + FilterBlockSize
- filterMode FilterBlockSize
- SZ_FILTER_NO 0
- SZ_FILTER_YES inSize
- SZ_FILTER_AUTO inSize
-
-
-Return code:
- SZ_OK - OK
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_PARAM - Incorrect paramater
- SZ_ERROR_OUTPUT_EOF - output buffer overflow
- SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
-*/
-
-enum ESzFilterMode
-{
- SZ_FILTER_NO,
- SZ_FILTER_YES,
- SZ_FILTER_AUTO
-};
-
-SRes Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen,
- int level, UInt32 dictSize, int filterMode);
-
-
-/*
-Lzma86_GetUnpackSize:
- In:
- src - input data
- srcLen - input data size
- Out:
- unpackSize - size of uncompressed stream
- Return code:
- SZ_OK - OK
- SZ_ERROR_INPUT_EOF - Error in headers
-*/
-
-SRes Lzma86_GetUnpackSize(const Byte *src, SizeT srcLen, UInt64 *unpackSize);
-
-/*
-Lzma86_Decode:
- In:
- dest - output data
- destLen - output data size
- src - input data
- srcLen - input data size
- Out:
- destLen - processed output size
- srcLen - processed input size
- Return code:
- SZ_OK - OK
- SZ_ERROR_DATA - Data error
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_UNSUPPORTED - unsupported file
- SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer
-*/
-
-SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Lzma86Dec.c b/3rdparty/7z/src/Lzma86Dec.c
deleted file mode 100644
index 9b15f3630b..0000000000
--- a/3rdparty/7z/src/Lzma86Dec.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Lzma86Dec.c -- LZMA + x86 (BCJ) Filter Decoder
-2023-03-03 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Lzma86.h"
-
-#include "Alloc.h"
-#include "Bra.h"
-#include "LzmaDec.h"
-
-SRes Lzma86_GetUnpackSize(const Byte *src, SizeT srcLen, UInt64 *unpackSize)
-{
- unsigned i;
- if (srcLen < LZMA86_HEADER_SIZE)
- return SZ_ERROR_INPUT_EOF;
- *unpackSize = 0;
- for (i = 0; i < sizeof(UInt64); i++)
- *unpackSize += ((UInt64)src[LZMA86_SIZE_OFFSET + i]) << (8 * i);
- return SZ_OK;
-}
-
-SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen)
-{
- SRes res;
- int useFilter;
- SizeT inSizePure;
- ELzmaStatus status;
-
- if (*srcLen < LZMA86_HEADER_SIZE)
- return SZ_ERROR_INPUT_EOF;
-
- useFilter = src[0];
-
- if (useFilter > 1)
- {
- *destLen = 0;
- return SZ_ERROR_UNSUPPORTED;
- }
-
- inSizePure = *srcLen - LZMA86_HEADER_SIZE;
- res = LzmaDecode(dest, destLen, src + LZMA86_HEADER_SIZE, &inSizePure,
- src + 1, LZMA_PROPS_SIZE, LZMA_FINISH_ANY, &status, &g_Alloc);
- *srcLen = inSizePure + LZMA86_HEADER_SIZE;
- if (res != SZ_OK)
- return res;
- if (useFilter == 1)
- {
- UInt32 x86State = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
- z7_BranchConvSt_X86_Dec(dest, *destLen, 0, &x86State);
- }
- return SZ_OK;
-}
diff --git a/3rdparty/7z/src/Lzma86Enc.c b/3rdparty/7z/src/Lzma86Enc.c
deleted file mode 100644
index 1c034a74cc..0000000000
--- a/3rdparty/7z/src/Lzma86Enc.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Lzma86Enc.c -- LZMA + x86 (BCJ) Filter Encoder
-2023-03-03 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "Lzma86.h"
-
-#include "Alloc.h"
-#include "Bra.h"
-#include "LzmaEnc.h"
-
-int Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen,
- int level, UInt32 dictSize, int filterMode)
-{
- size_t outSize2 = *destLen;
- Byte *filteredStream;
- BoolInt useFilter;
- int mainResult = SZ_ERROR_OUTPUT_EOF;
- CLzmaEncProps props;
- LzmaEncProps_Init(&props);
- props.level = level;
- props.dictSize = dictSize;
-
- *destLen = 0;
- if (outSize2 < LZMA86_HEADER_SIZE)
- return SZ_ERROR_OUTPUT_EOF;
-
- {
- int i;
- UInt64 t = srcLen;
- for (i = 0; i < 8; i++, t >>= 8)
- dest[LZMA86_SIZE_OFFSET + i] = (Byte)t;
- }
-
- filteredStream = 0;
- useFilter = (filterMode != SZ_FILTER_NO);
- if (useFilter)
- {
- if (srcLen != 0)
- {
- filteredStream = (Byte *)MyAlloc(srcLen);
- if (filteredStream == 0)
- return SZ_ERROR_MEM;
- memcpy(filteredStream, src, srcLen);
- }
- {
- UInt32 x86State = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
- z7_BranchConvSt_X86_Enc(filteredStream, srcLen, 0, &x86State);
- }
- }
-
- {
- size_t minSize = 0;
- BoolInt bestIsFiltered = False;
-
- /* passes for SZ_FILTER_AUTO:
- 0 - BCJ + LZMA
- 1 - LZMA
- 2 - BCJ + LZMA agaian, if pass 0 (BCJ + LZMA) is better.
- */
- int numPasses = (filterMode == SZ_FILTER_AUTO) ? 3 : 1;
-
- int i;
- for (i = 0; i < numPasses; i++)
- {
- size_t outSizeProcessed = outSize2 - LZMA86_HEADER_SIZE;
- size_t outPropsSize = 5;
- SRes curRes;
- BoolInt curModeIsFiltered = (numPasses > 1 && i == numPasses - 1);
- if (curModeIsFiltered && !bestIsFiltered)
- break;
- if (useFilter && i == 0)
- curModeIsFiltered = True;
-
- curRes = LzmaEncode(dest + LZMA86_HEADER_SIZE, &outSizeProcessed,
- curModeIsFiltered ? filteredStream : src, srcLen,
- &props, dest + 1, &outPropsSize, 0,
- NULL, &g_Alloc, &g_Alloc);
-
- if (curRes != SZ_ERROR_OUTPUT_EOF)
- {
- if (curRes != SZ_OK)
- {
- mainResult = curRes;
- break;
- }
- if (outSizeProcessed <= minSize || mainResult != SZ_OK)
- {
- minSize = outSizeProcessed;
- bestIsFiltered = curModeIsFiltered;
- mainResult = SZ_OK;
- }
- }
- }
- dest[0] = (Byte)(bestIsFiltered ? 1 : 0);
- *destLen = LZMA86_HEADER_SIZE + minSize;
- }
- if (useFilter)
- MyFree(filteredStream);
- return mainResult;
-}
diff --git a/3rdparty/7z/src/LzmaDec.c b/3rdparty/7z/src/LzmaDec.c
deleted file mode 100644
index b6bcd5099b..0000000000
--- a/3rdparty/7z/src/LzmaDec.c
+++ /dev/null
@@ -1,1363 +0,0 @@
-/* LzmaDec.c -- LZMA Decoder
-2023-04-07 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-/* #include "CpuArch.h" */
-#include "LzmaDec.h"
-
-// #define kNumTopBits 24
-#define kTopValue ((UInt32)1 << 24)
-
-#define kNumBitModelTotalBits 11
-#define kBitModelTotal (1 << kNumBitModelTotalBits)
-
-#define RC_INIT_SIZE 5
-
-#ifndef Z7_LZMA_DEC_OPT
-
-#define kNumMoveBits 5
-#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
-
-#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
-#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
-#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
-#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
- { UPDATE_0(p) i = (i + i); A0; } else \
- { UPDATE_1(p) i = (i + i) + 1; A1; }
-
-#define TREE_GET_BIT(probs, i) { GET_BIT2(probs + i, i, ;, ;); }
-
-#define REV_BIT(p, i, A0, A1) IF_BIT_0(p + i) \
- { UPDATE_0(p + i) A0; } else \
- { UPDATE_1(p + i) A1; }
-#define REV_BIT_VAR( p, i, m) REV_BIT(p, i, i += m; m += m, m += m; i += m; )
-#define REV_BIT_CONST(p, i, m) REV_BIT(p, i, i += m; , i += m * 2; )
-#define REV_BIT_LAST( p, i, m) REV_BIT(p, i, i -= m , ; )
-
-#define TREE_DECODE(probs, limit, i) \
- { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
-
-/* #define Z7_LZMA_SIZE_OPT */
-
-#ifdef Z7_LZMA_SIZE_OPT
-#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
-#else
-#define TREE_6_DECODE(probs, i) \
- { i = 1; \
- TREE_GET_BIT(probs, i) \
- TREE_GET_BIT(probs, i) \
- TREE_GET_BIT(probs, i) \
- TREE_GET_BIT(probs, i) \
- TREE_GET_BIT(probs, i) \
- TREE_GET_BIT(probs, i) \
- i -= 0x40; }
-#endif
-
-#define NORMAL_LITER_DEC TREE_GET_BIT(prob, symbol)
-#define MATCHED_LITER_DEC \
- matchByte += matchByte; \
- bit = offs; \
- offs &= matchByte; \
- probLit = prob + (offs + bit + symbol); \
- GET_BIT2(probLit, symbol, offs ^= bit; , ;)
-
-#endif // Z7_LZMA_DEC_OPT
-
-
-#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_INPUT_EOF; range <<= 8; code = (code << 8) | (*buf++); }
-
-#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
-#define UPDATE_0_CHECK range = bound;
-#define UPDATE_1_CHECK range -= bound; code -= bound;
-#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
- { UPDATE_0_CHECK i = (i + i); A0; } else \
- { UPDATE_1_CHECK i = (i + i) + 1; A1; }
-#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
-#define TREE_DECODE_CHECK(probs, limit, i) \
- { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
-
-
-#define REV_BIT_CHECK(p, i, m) IF_BIT_0_CHECK(p + i) \
- { UPDATE_0_CHECK i += m; m += m; } else \
- { UPDATE_1_CHECK m += m; i += m; }
-
-
-#define kNumPosBitsMax 4
-#define kNumPosStatesMax (1 << kNumPosBitsMax)
-
-#define kLenNumLowBits 3
-#define kLenNumLowSymbols (1 << kLenNumLowBits)
-#define kLenNumHighBits 8
-#define kLenNumHighSymbols (1 << kLenNumHighBits)
-
-#define LenLow 0
-#define LenHigh (LenLow + 2 * (kNumPosStatesMax << kLenNumLowBits))
-#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
-
-#define LenChoice LenLow
-#define LenChoice2 (LenLow + (1 << kLenNumLowBits))
-
-#define kNumStates 12
-#define kNumStates2 16
-#define kNumLitStates 7
-
-#define kStartPosModelIndex 4
-#define kEndPosModelIndex 14
-#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
-
-#define kNumPosSlotBits 6
-#define kNumLenToPosStates 4
-
-#define kNumAlignBits 4
-#define kAlignTableSize (1 << kNumAlignBits)
-
-#define kMatchMinLen 2
-#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols * 2 + kLenNumHighSymbols)
-
-#define kMatchSpecLen_Error_Data (1 << 9)
-#define kMatchSpecLen_Error_Fail (kMatchSpecLen_Error_Data - 1)
-
-/* External ASM code needs same CLzmaProb array layout. So don't change it. */
-
-/* (probs_1664) is faster and better for code size at some platforms */
-/*
-#ifdef MY_CPU_X86_OR_AMD64
-*/
-#define kStartOffset 1664
-#define GET_PROBS p->probs_1664
-/*
-#define GET_PROBS p->probs + kStartOffset
-#else
-#define kStartOffset 0
-#define GET_PROBS p->probs
-#endif
-*/
-
-#define SpecPos (-kStartOffset)
-#define IsRep0Long (SpecPos + kNumFullDistances)
-#define RepLenCoder (IsRep0Long + (kNumStates2 << kNumPosBitsMax))
-#define LenCoder (RepLenCoder + kNumLenProbs)
-#define IsMatch (LenCoder + kNumLenProbs)
-#define Align (IsMatch + (kNumStates2 << kNumPosBitsMax))
-#define IsRep (Align + kAlignTableSize)
-#define IsRepG0 (IsRep + kNumStates)
-#define IsRepG1 (IsRepG0 + kNumStates)
-#define IsRepG2 (IsRepG1 + kNumStates)
-#define PosSlot (IsRepG2 + kNumStates)
-#define Literal (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
-#define NUM_BASE_PROBS (Literal + kStartOffset)
-
-#if Align != 0 && kStartOffset != 0
- #error Stop_Compiling_Bad_LZMA_kAlign
-#endif
-
-#if NUM_BASE_PROBS != 1984
- #error Stop_Compiling_Bad_LZMA_PROBS
-#endif
-
-
-#define LZMA_LIT_SIZE 0x300
-
-#define LzmaProps_GetNumProbs(p) (NUM_BASE_PROBS + ((UInt32)LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
-
-
-#define CALC_POS_STATE(processedPos, pbMask) (((processedPos) & (pbMask)) << 4)
-#define COMBINED_PS_STATE (posState + state)
-#define GET_LEN_STATE (posState)
-
-#define LZMA_DIC_MIN (1 << 12)
-
-/*
-p->remainLen : shows status of LZMA decoder:
- < kMatchSpecLenStart : the number of bytes to be copied with (p->rep0) offset
- = kMatchSpecLenStart : the LZMA stream was finished with end mark
- = kMatchSpecLenStart + 1 : need init range coder
- = kMatchSpecLenStart + 2 : need init range coder and state
- = kMatchSpecLen_Error_Fail : Internal Code Failure
- = kMatchSpecLen_Error_Data + [0 ... 273] : LZMA Data Error
-*/
-
-/* ---------- LZMA_DECODE_REAL ---------- */
-/*
-LzmaDec_DecodeReal_3() can be implemented in external ASM file.
-3 - is the code compatibility version of that function for check at link time.
-*/
-
-#define LZMA_DECODE_REAL LzmaDec_DecodeReal_3
-
-/*
-LZMA_DECODE_REAL()
-In:
- RangeCoder is normalized
- if (p->dicPos == limit)
- {
- LzmaDec_TryDummy() was called before to exclude LITERAL and MATCH-REP cases.
- So first symbol can be only MATCH-NON-REP. And if that MATCH-NON-REP symbol
- is not END_OF_PAYALOAD_MARKER, then the function doesn't write any byte to dictionary,
- the function returns SZ_OK, and the caller can use (p->remainLen) and (p->reps[0]) later.
- }
-
-Processing:
- The first LZMA symbol will be decoded in any case.
- All main checks for limits are at the end of main loop,
- It decodes additional LZMA-symbols while (p->buf < bufLimit && dicPos < limit),
- RangeCoder is still without last normalization when (p->buf < bufLimit) is being checked.
- But if (p->buf < bufLimit), the caller provided at least (LZMA_REQUIRED_INPUT_MAX + 1) bytes for
- next iteration before limit (bufLimit + LZMA_REQUIRED_INPUT_MAX),
- that is enough for worst case LZMA symbol with one additional RangeCoder normalization for one bit.
- So that function never reads bufLimit [LZMA_REQUIRED_INPUT_MAX] byte.
-
-Out:
- RangeCoder is normalized
- Result:
- SZ_OK - OK
- p->remainLen:
- < kMatchSpecLenStart : the number of bytes to be copied with (p->reps[0]) offset
- = kMatchSpecLenStart : the LZMA stream was finished with end mark
-
- SZ_ERROR_DATA - error, when the MATCH-Symbol refers out of dictionary
- p->remainLen : undefined
- p->reps[*] : undefined
-*/
-
-
-#ifdef Z7_LZMA_DEC_OPT
-
-int Z7_FASTCALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit);
-
-#else
-
-static
-int Z7_FASTCALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
-{
- CLzmaProb *probs = GET_PROBS;
- unsigned state = (unsigned)p->state;
- UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
- unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
- unsigned lc = p->prop.lc;
- unsigned lpMask = ((unsigned)0x100 << p->prop.lp) - ((unsigned)0x100 >> lc);
-
- Byte *dic = p->dic;
- SizeT dicBufSize = p->dicBufSize;
- SizeT dicPos = p->dicPos;
-
- UInt32 processedPos = p->processedPos;
- UInt32 checkDicSize = p->checkDicSize;
- unsigned len = 0;
-
- const Byte *buf = p->buf;
- UInt32 range = p->range;
- UInt32 code = p->code;
-
- do
- {
- CLzmaProb *prob;
- UInt32 bound;
- unsigned ttt;
- unsigned posState = CALC_POS_STATE(processedPos, pbMask);
-
- prob = probs + IsMatch + COMBINED_PS_STATE;
- IF_BIT_0(prob)
- {
- unsigned symbol;
- UPDATE_0(prob)
- prob = probs + Literal;
- if (processedPos != 0 || checkDicSize != 0)
- prob += (UInt32)3 * ((((processedPos << 8) + dic[(dicPos == 0 ? dicBufSize : dicPos) - 1]) & lpMask) << lc);
- processedPos++;
-
- if (state < kNumLitStates)
- {
- state -= (state < 4) ? state : 3;
- symbol = 1;
- #ifdef Z7_LZMA_SIZE_OPT
- do { NORMAL_LITER_DEC } while (symbol < 0x100);
- #else
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- NORMAL_LITER_DEC
- #endif
- }
- else
- {
- unsigned matchByte = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
- unsigned offs = 0x100;
- state -= (state < 10) ? 3 : 6;
- symbol = 1;
- #ifdef Z7_LZMA_SIZE_OPT
- do
- {
- unsigned bit;
- CLzmaProb *probLit;
- MATCHED_LITER_DEC
- }
- while (symbol < 0x100);
- #else
- {
- unsigned bit;
- CLzmaProb *probLit;
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- MATCHED_LITER_DEC
- }
- #endif
- }
-
- dic[dicPos++] = (Byte)symbol;
- continue;
- }
-
- {
- UPDATE_1(prob)
- prob = probs + IsRep + state;
- IF_BIT_0(prob)
- {
- UPDATE_0(prob)
- state += kNumStates;
- prob = probs + LenCoder;
- }
- else
- {
- UPDATE_1(prob)
- prob = probs + IsRepG0 + state;
- IF_BIT_0(prob)
- {
- UPDATE_0(prob)
- prob = probs + IsRep0Long + COMBINED_PS_STATE;
- IF_BIT_0(prob)
- {
- UPDATE_0(prob)
-
- // that case was checked before with kBadRepCode
- // if (checkDicSize == 0 && processedPos == 0) { len = kMatchSpecLen_Error_Data + 1; break; }
- // The caller doesn't allow (dicPos == limit) case here
- // so we don't need the following check:
- // if (dicPos == limit) { state = state < kNumLitStates ? 9 : 11; len = 1; break; }
-
- dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
- dicPos++;
- processedPos++;
- state = state < kNumLitStates ? 9 : 11;
- continue;
- }
- UPDATE_1(prob)
- }
- else
- {
- UInt32 distance;
- UPDATE_1(prob)
- prob = probs + IsRepG1 + state;
- IF_BIT_0(prob)
- {
- UPDATE_0(prob)
- distance = rep1;
- }
- else
- {
- UPDATE_1(prob)
- prob = probs + IsRepG2 + state;
- IF_BIT_0(prob)
- {
- UPDATE_0(prob)
- distance = rep2;
- }
- else
- {
- UPDATE_1(prob)
- distance = rep3;
- rep3 = rep2;
- }
- rep2 = rep1;
- }
- rep1 = rep0;
- rep0 = distance;
- }
- state = state < kNumLitStates ? 8 : 11;
- prob = probs + RepLenCoder;
- }
-
- #ifdef Z7_LZMA_SIZE_OPT
- {
- unsigned lim, offset;
- CLzmaProb *probLen = prob + LenChoice;
- IF_BIT_0(probLen)
- {
- UPDATE_0(probLen)
- probLen = prob + LenLow + GET_LEN_STATE;
- offset = 0;
- lim = (1 << kLenNumLowBits);
- }
- else
- {
- UPDATE_1(probLen)
- probLen = prob + LenChoice2;
- IF_BIT_0(probLen)
- {
- UPDATE_0(probLen)
- probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
- offset = kLenNumLowSymbols;
- lim = (1 << kLenNumLowBits);
- }
- else
- {
- UPDATE_1(probLen)
- probLen = prob + LenHigh;
- offset = kLenNumLowSymbols * 2;
- lim = (1 << kLenNumHighBits);
- }
- }
- TREE_DECODE(probLen, lim, len)
- len += offset;
- }
- #else
- {
- CLzmaProb *probLen = prob + LenChoice;
- IF_BIT_0(probLen)
- {
- UPDATE_0(probLen)
- probLen = prob + LenLow + GET_LEN_STATE;
- len = 1;
- TREE_GET_BIT(probLen, len)
- TREE_GET_BIT(probLen, len)
- TREE_GET_BIT(probLen, len)
- len -= 8;
- }
- else
- {
- UPDATE_1(probLen)
- probLen = prob + LenChoice2;
- IF_BIT_0(probLen)
- {
- UPDATE_0(probLen)
- probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
- len = 1;
- TREE_GET_BIT(probLen, len)
- TREE_GET_BIT(probLen, len)
- TREE_GET_BIT(probLen, len)
- }
- else
- {
- UPDATE_1(probLen)
- probLen = prob + LenHigh;
- TREE_DECODE(probLen, (1 << kLenNumHighBits), len)
- len += kLenNumLowSymbols * 2;
- }
- }
- }
- #endif
-
- if (state >= kNumStates)
- {
- UInt32 distance;
- prob = probs + PosSlot +
- ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
- TREE_6_DECODE(prob, distance)
- if (distance >= kStartPosModelIndex)
- {
- unsigned posSlot = (unsigned)distance;
- unsigned numDirectBits = (unsigned)(((distance >> 1) - 1));
- distance = (2 | (distance & 1));
- if (posSlot < kEndPosModelIndex)
- {
- distance <<= numDirectBits;
- prob = probs + SpecPos;
- {
- UInt32 m = 1;
- distance++;
- do
- {
- REV_BIT_VAR(prob, distance, m)
- }
- while (--numDirectBits);
- distance -= m;
- }
- }
- else
- {
- numDirectBits -= kNumAlignBits;
- do
- {
- NORMALIZE
- range >>= 1;
-
- {
- UInt32 t;
- code -= range;
- t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
- distance = (distance << 1) + (t + 1);
- code += range & t;
- }
- /*
- distance <<= 1;
- if (code >= range)
- {
- code -= range;
- distance |= 1;
- }
- */
- }
- while (--numDirectBits);
- prob = probs + Align;
- distance <<= kNumAlignBits;
- {
- unsigned i = 1;
- REV_BIT_CONST(prob, i, 1)
- REV_BIT_CONST(prob, i, 2)
- REV_BIT_CONST(prob, i, 4)
- REV_BIT_LAST (prob, i, 8)
- distance |= i;
- }
- if (distance == (UInt32)0xFFFFFFFF)
- {
- len = kMatchSpecLenStart;
- state -= kNumStates;
- break;
- }
- }
- }
-
- rep3 = rep2;
- rep2 = rep1;
- rep1 = rep0;
- rep0 = distance + 1;
- state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
- if (distance >= (checkDicSize == 0 ? processedPos: checkDicSize))
- {
- len += kMatchSpecLen_Error_Data + kMatchMinLen;
- // len = kMatchSpecLen_Error_Data;
- // len += kMatchMinLen;
- break;
- }
- }
-
- len += kMatchMinLen;
-
- {
- SizeT rem;
- unsigned curLen;
- SizeT pos;
-
- if ((rem = limit - dicPos) == 0)
- {
- /*
- We stop decoding and return SZ_OK, and we can resume decoding later.
- Any error conditions can be tested later in caller code.
- For more strict mode we can stop decoding with error
- // len += kMatchSpecLen_Error_Data;
- */
- break;
- }
-
- curLen = ((rem < len) ? (unsigned)rem : len);
- pos = dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0);
-
- processedPos += (UInt32)curLen;
-
- len -= curLen;
- if (curLen <= dicBufSize - pos)
- {
- Byte *dest = dic + dicPos;
- ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
- const Byte *lim = dest + curLen;
- dicPos += (SizeT)curLen;
- do
- *(dest) = (Byte)*(dest + src);
- while (++dest != lim);
- }
- else
- {
- do
- {
- dic[dicPos++] = dic[pos];
- if (++pos == dicBufSize)
- pos = 0;
- }
- while (--curLen != 0);
- }
- }
- }
- }
- while (dicPos < limit && buf < bufLimit);
-
- NORMALIZE
-
- p->buf = buf;
- p->range = range;
- p->code = code;
- p->remainLen = (UInt32)len; // & (kMatchSpecLen_Error_Data - 1); // we can write real length for error matches too.
- p->dicPos = dicPos;
- p->processedPos = processedPos;
- p->reps[0] = rep0;
- p->reps[1] = rep1;
- p->reps[2] = rep2;
- p->reps[3] = rep3;
- p->state = (UInt32)state;
- if (len >= kMatchSpecLen_Error_Data)
- return SZ_ERROR_DATA;
- return SZ_OK;
-}
-#endif
-
-
-
-static void Z7_FASTCALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
-{
- unsigned len = (unsigned)p->remainLen;
- if (len == 0 /* || len >= kMatchSpecLenStart */)
- return;
- {
- SizeT dicPos = p->dicPos;
- Byte *dic;
- SizeT dicBufSize;
- SizeT rep0; /* we use SizeT to avoid the BUG of VC14 for AMD64 */
- {
- SizeT rem = limit - dicPos;
- if (rem < len)
- {
- len = (unsigned)(rem);
- if (len == 0)
- return;
- }
- }
-
- if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
- p->checkDicSize = p->prop.dicSize;
-
- p->processedPos += (UInt32)len;
- p->remainLen -= (UInt32)len;
- dic = p->dic;
- rep0 = p->reps[0];
- dicBufSize = p->dicBufSize;
- do
- {
- dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
- dicPos++;
- }
- while (--len);
- p->dicPos = dicPos;
- }
-}
-
-
-/*
-At staring of new stream we have one of the following symbols:
- - Literal - is allowed
- - Non-Rep-Match - is allowed only if it's end marker symbol
- - Rep-Match - is not allowed
-We use early check of (RangeCoder:Code) over kBadRepCode to simplify main decoding code
-*/
-
-#define kRange0 0xFFFFFFFF
-#define kBound0 ((kRange0 >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1))
-#define kBadRepCode (kBound0 + (((kRange0 - kBound0) >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1)))
-#if kBadRepCode != (0xC0000000 - 0x400)
- #error Stop_Compiling_Bad_LZMA_Check
-#endif
-
-
-/*
-LzmaDec_DecodeReal2():
- It calls LZMA_DECODE_REAL() and it adjusts limit according (p->checkDicSize).
-
-We correct (p->checkDicSize) after LZMA_DECODE_REAL() and in LzmaDec_WriteRem(),
-and we support the following state of (p->checkDicSize):
- if (total_processed < p->prop.dicSize) then
- {
- (total_processed == p->processedPos)
- (p->checkDicSize == 0)
- }
- else
- (p->checkDicSize == p->prop.dicSize)
-*/
-
-static int Z7_FASTCALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
-{
- if (p->checkDicSize == 0)
- {
- UInt32 rem = p->prop.dicSize - p->processedPos;
- if (limit - p->dicPos > rem)
- limit = p->dicPos + rem;
- }
- {
- int res = LZMA_DECODE_REAL(p, limit, bufLimit);
- if (p->checkDicSize == 0 && p->processedPos >= p->prop.dicSize)
- p->checkDicSize = p->prop.dicSize;
- return res;
- }
-}
-
-
-
-typedef enum
-{
- DUMMY_INPUT_EOF, /* need more input data */
- DUMMY_LIT,
- DUMMY_MATCH,
- DUMMY_REP
-} ELzmaDummy;
-
-
-#define IS_DUMMY_END_MARKER_POSSIBLE(dummyRes) ((dummyRes) == DUMMY_MATCH)
-
-static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byte **bufOut)
-{
- UInt32 range = p->range;
- UInt32 code = p->code;
- const Byte *bufLimit = *bufOut;
- const CLzmaProb *probs = GET_PROBS;
- unsigned state = (unsigned)p->state;
- ELzmaDummy res;
-
- for (;;)
- {
- const CLzmaProb *prob;
- UInt32 bound;
- unsigned ttt;
- unsigned posState = CALC_POS_STATE(p->processedPos, ((unsigned)1 << p->prop.pb) - 1);
-
- prob = probs + IsMatch + COMBINED_PS_STATE;
- IF_BIT_0_CHECK(prob)
- {
- UPDATE_0_CHECK
-
- prob = probs + Literal;
- if (p->checkDicSize != 0 || p->processedPos != 0)
- prob += ((UInt32)LZMA_LIT_SIZE *
- ((((p->processedPos) & (((unsigned)1 << (p->prop.lp)) - 1)) << p->prop.lc) +
- ((unsigned)p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
-
- if (state < kNumLitStates)
- {
- unsigned symbol = 1;
- do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
- }
- else
- {
- unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
- (p->dicPos < p->reps[0] ? p->dicBufSize : 0)];
- unsigned offs = 0x100;
- unsigned symbol = 1;
- do
- {
- unsigned bit;
- const CLzmaProb *probLit;
- matchByte += matchByte;
- bit = offs;
- offs &= matchByte;
- probLit = prob + (offs + bit + symbol);
- GET_BIT2_CHECK(probLit, symbol, offs ^= bit; , ; )
- }
- while (symbol < 0x100);
- }
- res = DUMMY_LIT;
- }
- else
- {
- unsigned len;
- UPDATE_1_CHECK
-
- prob = probs + IsRep + state;
- IF_BIT_0_CHECK(prob)
- {
- UPDATE_0_CHECK
- state = 0;
- prob = probs + LenCoder;
- res = DUMMY_MATCH;
- }
- else
- {
- UPDATE_1_CHECK
- res = DUMMY_REP;
- prob = probs + IsRepG0 + state;
- IF_BIT_0_CHECK(prob)
- {
- UPDATE_0_CHECK
- prob = probs + IsRep0Long + COMBINED_PS_STATE;
- IF_BIT_0_CHECK(prob)
- {
- UPDATE_0_CHECK
- break;
- }
- else
- {
- UPDATE_1_CHECK
- }
- }
- else
- {
- UPDATE_1_CHECK
- prob = probs + IsRepG1 + state;
- IF_BIT_0_CHECK(prob)
- {
- UPDATE_0_CHECK
- }
- else
- {
- UPDATE_1_CHECK
- prob = probs + IsRepG2 + state;
- IF_BIT_0_CHECK(prob)
- {
- UPDATE_0_CHECK
- }
- else
- {
- UPDATE_1_CHECK
- }
- }
- }
- state = kNumStates;
- prob = probs + RepLenCoder;
- }
- {
- unsigned limit, offset;
- const CLzmaProb *probLen = prob + LenChoice;
- IF_BIT_0_CHECK(probLen)
- {
- UPDATE_0_CHECK
- probLen = prob + LenLow + GET_LEN_STATE;
- offset = 0;
- limit = 1 << kLenNumLowBits;
- }
- else
- {
- UPDATE_1_CHECK
- probLen = prob + LenChoice2;
- IF_BIT_0_CHECK(probLen)
- {
- UPDATE_0_CHECK
- probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
- offset = kLenNumLowSymbols;
- limit = 1 << kLenNumLowBits;
- }
- else
- {
- UPDATE_1_CHECK
- probLen = prob + LenHigh;
- offset = kLenNumLowSymbols * 2;
- limit = 1 << kLenNumHighBits;
- }
- }
- TREE_DECODE_CHECK(probLen, limit, len)
- len += offset;
- }
-
- if (state < 4)
- {
- unsigned posSlot;
- prob = probs + PosSlot +
- ((len < kNumLenToPosStates - 1 ? len : kNumLenToPosStates - 1) <<
- kNumPosSlotBits);
- TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot)
- if (posSlot >= kStartPosModelIndex)
- {
- unsigned numDirectBits = ((posSlot >> 1) - 1);
-
- if (posSlot < kEndPosModelIndex)
- {
- prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits);
- }
- else
- {
- numDirectBits -= kNumAlignBits;
- do
- {
- NORMALIZE_CHECK
- range >>= 1;
- code -= range & (((code - range) >> 31) - 1);
- /* if (code >= range) code -= range; */
- }
- while (--numDirectBits);
- prob = probs + Align;
- numDirectBits = kNumAlignBits;
- }
- {
- unsigned i = 1;
- unsigned m = 1;
- do
- {
- REV_BIT_CHECK(prob, i, m)
- }
- while (--numDirectBits);
- }
- }
- }
- }
- break;
- }
- NORMALIZE_CHECK
-
- *bufOut = buf;
- return res;
-}
-
-void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState);
-void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState)
-{
- p->remainLen = kMatchSpecLenStart + 1;
- p->tempBufSize = 0;
-
- if (initDic)
- {
- p->processedPos = 0;
- p->checkDicSize = 0;
- p->remainLen = kMatchSpecLenStart + 2;
- }
- if (initState)
- p->remainLen = kMatchSpecLenStart + 2;
-}
-
-void LzmaDec_Init(CLzmaDec *p)
-{
- p->dicPos = 0;
- LzmaDec_InitDicAndState(p, True, True);
-}
-
-
-/*
-LZMA supports optional end_marker.
-So the decoder can lookahead for one additional LZMA-Symbol to check end_marker.
-That additional LZMA-Symbol can require up to LZMA_REQUIRED_INPUT_MAX bytes in input stream.
-When the decoder reaches dicLimit, it looks (finishMode) parameter:
- if (finishMode == LZMA_FINISH_ANY), the decoder doesn't lookahead
- if (finishMode != LZMA_FINISH_ANY), the decoder lookahead, if end_marker is possible for current position
-
-When the decoder lookahead, and the lookahead symbol is not end_marker, we have two ways:
- 1) Strict mode (default) : the decoder returns SZ_ERROR_DATA.
- 2) The relaxed mode (alternative mode) : we could return SZ_OK, and the caller
- must check (status) value. The caller can show the error,
- if the end of stream is expected, and the (status) is noit
- LZMA_STATUS_FINISHED_WITH_MARK or LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK.
-*/
-
-
-#define RETURN_NOT_FINISHED_FOR_FINISH \
- *status = LZMA_STATUS_NOT_FINISHED; \
- return SZ_ERROR_DATA; // for strict mode
- // return SZ_OK; // for relaxed mode
-
-
-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
- ELzmaFinishMode finishMode, ELzmaStatus *status)
-{
- SizeT inSize = *srcLen;
- (*srcLen) = 0;
- *status = LZMA_STATUS_NOT_SPECIFIED;
-
- if (p->remainLen > kMatchSpecLenStart)
- {
- if (p->remainLen > kMatchSpecLenStart + 2)
- return p->remainLen == kMatchSpecLen_Error_Fail ? SZ_ERROR_FAIL : SZ_ERROR_DATA;
-
- for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
- p->tempBuf[p->tempBufSize++] = *src++;
- if (p->tempBufSize != 0 && p->tempBuf[0] != 0)
- return SZ_ERROR_DATA;
- if (p->tempBufSize < RC_INIT_SIZE)
- {
- *status = LZMA_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
- p->code =
- ((UInt32)p->tempBuf[1] << 24)
- | ((UInt32)p->tempBuf[2] << 16)
- | ((UInt32)p->tempBuf[3] << 8)
- | ((UInt32)p->tempBuf[4]);
-
- if (p->checkDicSize == 0
- && p->processedPos == 0
- && p->code >= kBadRepCode)
- return SZ_ERROR_DATA;
-
- p->range = 0xFFFFFFFF;
- p->tempBufSize = 0;
-
- if (p->remainLen > kMatchSpecLenStart + 1)
- {
- SizeT numProbs = LzmaProps_GetNumProbs(&p->prop);
- SizeT i;
- CLzmaProb *probs = p->probs;
- for (i = 0; i < numProbs; i++)
- probs[i] = kBitModelTotal >> 1;
- p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
- p->state = 0;
- }
-
- p->remainLen = 0;
- }
-
- for (;;)
- {
- if (p->remainLen == kMatchSpecLenStart)
- {
- if (p->code != 0)
- return SZ_ERROR_DATA;
- *status = LZMA_STATUS_FINISHED_WITH_MARK;
- return SZ_OK;
- }
-
- LzmaDec_WriteRem(p, dicLimit);
-
- {
- // (p->remainLen == 0 || p->dicPos == dicLimit)
-
- int checkEndMarkNow = 0;
-
- if (p->dicPos >= dicLimit)
- {
- if (p->remainLen == 0 && p->code == 0)
- {
- *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
- return SZ_OK;
- }
- if (finishMode == LZMA_FINISH_ANY)
- {
- *status = LZMA_STATUS_NOT_FINISHED;
- return SZ_OK;
- }
- if (p->remainLen != 0)
- {
- RETURN_NOT_FINISHED_FOR_FINISH
- }
- checkEndMarkNow = 1;
- }
-
- // (p->remainLen == 0)
-
- if (p->tempBufSize == 0)
- {
- const Byte *bufLimit;
- int dummyProcessed = -1;
-
- if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
- {
- const Byte *bufOut = src + inSize;
-
- ELzmaDummy dummyRes = LzmaDec_TryDummy(p, src, &bufOut);
-
- if (dummyRes == DUMMY_INPUT_EOF)
- {
- size_t i;
- if (inSize >= LZMA_REQUIRED_INPUT_MAX)
- break;
- (*srcLen) += inSize;
- p->tempBufSize = (unsigned)inSize;
- for (i = 0; i < inSize; i++)
- p->tempBuf[i] = src[i];
- *status = LZMA_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
-
- dummyProcessed = (int)(bufOut - src);
- if ((unsigned)dummyProcessed > LZMA_REQUIRED_INPUT_MAX)
- break;
-
- if (checkEndMarkNow && !IS_DUMMY_END_MARKER_POSSIBLE(dummyRes))
- {
- unsigned i;
- (*srcLen) += (unsigned)dummyProcessed;
- p->tempBufSize = (unsigned)dummyProcessed;
- for (i = 0; i < (unsigned)dummyProcessed; i++)
- p->tempBuf[i] = src[i];
- // p->remainLen = kMatchSpecLen_Error_Data;
- RETURN_NOT_FINISHED_FOR_FINISH
- }
-
- bufLimit = src;
- // we will decode only one iteration
- }
- else
- bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
-
- p->buf = src;
-
- {
- int res = LzmaDec_DecodeReal2(p, dicLimit, bufLimit);
-
- SizeT processed = (SizeT)(p->buf - src);
-
- if (dummyProcessed < 0)
- {
- if (processed > inSize)
- break;
- }
- else if ((unsigned)dummyProcessed != processed)
- break;
-
- src += processed;
- inSize -= processed;
- (*srcLen) += processed;
-
- if (res != SZ_OK)
- {
- p->remainLen = kMatchSpecLen_Error_Data;
- return SZ_ERROR_DATA;
- }
- }
- continue;
- }
-
- {
- // we have some data in (p->tempBuf)
- // in strict mode: tempBufSize is not enough for one Symbol decoding.
- // in relaxed mode: tempBufSize not larger than required for one Symbol decoding.
-
- unsigned rem = p->tempBufSize;
- unsigned ahead = 0;
- int dummyProcessed = -1;
-
- while (rem < LZMA_REQUIRED_INPUT_MAX && ahead < inSize)
- p->tempBuf[rem++] = src[ahead++];
-
- // ahead - the size of new data copied from (src) to (p->tempBuf)
- // rem - the size of temp buffer including new data from (src)
-
- if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
- {
- const Byte *bufOut = p->tempBuf + rem;
-
- ELzmaDummy dummyRes = LzmaDec_TryDummy(p, p->tempBuf, &bufOut);
-
- if (dummyRes == DUMMY_INPUT_EOF)
- {
- if (rem >= LZMA_REQUIRED_INPUT_MAX)
- break;
- p->tempBufSize = rem;
- (*srcLen) += (SizeT)ahead;
- *status = LZMA_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
-
- dummyProcessed = (int)(bufOut - p->tempBuf);
-
- if ((unsigned)dummyProcessed < p->tempBufSize)
- break;
-
- if (checkEndMarkNow && !IS_DUMMY_END_MARKER_POSSIBLE(dummyRes))
- {
- (*srcLen) += (unsigned)dummyProcessed - p->tempBufSize;
- p->tempBufSize = (unsigned)dummyProcessed;
- // p->remainLen = kMatchSpecLen_Error_Data;
- RETURN_NOT_FINISHED_FOR_FINISH
- }
- }
-
- p->buf = p->tempBuf;
-
- {
- // we decode one symbol from (p->tempBuf) here, so the (bufLimit) is equal to (p->buf)
- int res = LzmaDec_DecodeReal2(p, dicLimit, p->buf);
-
- SizeT processed = (SizeT)(p->buf - p->tempBuf);
- rem = p->tempBufSize;
-
- if (dummyProcessed < 0)
- {
- if (processed > LZMA_REQUIRED_INPUT_MAX)
- break;
- if (processed < rem)
- break;
- }
- else if ((unsigned)dummyProcessed != processed)
- break;
-
- processed -= rem;
-
- src += processed;
- inSize -= processed;
- (*srcLen) += processed;
- p->tempBufSize = 0;
-
- if (res != SZ_OK)
- {
- p->remainLen = kMatchSpecLen_Error_Data;
- return SZ_ERROR_DATA;
- }
- }
- }
- }
- }
-
- /* Some unexpected error: internal error of code, memory corruption or hardware failure */
- p->remainLen = kMatchSpecLen_Error_Fail;
- return SZ_ERROR_FAIL;
-}
-
-
-
-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
-{
- SizeT outSize = *destLen;
- SizeT inSize = *srcLen;
- *srcLen = *destLen = 0;
- for (;;)
- {
- SizeT inSizeCur = inSize, outSizeCur, dicPos;
- ELzmaFinishMode curFinishMode;
- SRes res;
- if (p->dicPos == p->dicBufSize)
- p->dicPos = 0;
- dicPos = p->dicPos;
- if (outSize > p->dicBufSize - dicPos)
- {
- outSizeCur = p->dicBufSize;
- curFinishMode = LZMA_FINISH_ANY;
- }
- else
- {
- outSizeCur = dicPos + outSize;
- curFinishMode = finishMode;
- }
-
- res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
- src += inSizeCur;
- inSize -= inSizeCur;
- *srcLen += inSizeCur;
- outSizeCur = p->dicPos - dicPos;
- memcpy(dest, p->dic + dicPos, outSizeCur);
- dest += outSizeCur;
- outSize -= outSizeCur;
- *destLen += outSizeCur;
- if (res != 0)
- return res;
- if (outSizeCur == 0 || outSize == 0)
- return SZ_OK;
- }
-}
-
-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->probs);
- p->probs = NULL;
-}
-
-static void LzmaDec_FreeDict(CLzmaDec *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->dic);
- p->dic = NULL;
-}
-
-void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc)
-{
- LzmaDec_FreeProbs(p, alloc);
- LzmaDec_FreeDict(p, alloc);
-}
-
-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
-{
- UInt32 dicSize;
- Byte d;
-
- if (size < LZMA_PROPS_SIZE)
- return SZ_ERROR_UNSUPPORTED;
- else
- dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
-
- if (dicSize < LZMA_DIC_MIN)
- dicSize = LZMA_DIC_MIN;
- p->dicSize = dicSize;
-
- d = data[0];
- if (d >= (9 * 5 * 5))
- return SZ_ERROR_UNSUPPORTED;
-
- p->lc = (Byte)(d % 9);
- d /= 9;
- p->pb = (Byte)(d / 5);
- p->lp = (Byte)(d % 5);
-
- return SZ_OK;
-}
-
-static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAllocPtr alloc)
-{
- UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
- if (!p->probs || numProbs != p->numProbs)
- {
- LzmaDec_FreeProbs(p, alloc);
- p->probs = (CLzmaProb *)ISzAlloc_Alloc(alloc, numProbs * sizeof(CLzmaProb));
- if (!p->probs)
- return SZ_ERROR_MEM;
- p->probs_1664 = p->probs + 1664;
- p->numProbs = numProbs;
- }
- return SZ_OK;
-}
-
-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
-{
- CLzmaProps propNew;
- RINOK(LzmaProps_Decode(&propNew, props, propsSize))
- RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc))
- p->prop = propNew;
- return SZ_OK;
-}
-
-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
-{
- CLzmaProps propNew;
- SizeT dicBufSize;
- RINOK(LzmaProps_Decode(&propNew, props, propsSize))
- RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc))
-
- {
- UInt32 dictSize = propNew.dicSize;
- SizeT mask = ((UInt32)1 << 12) - 1;
- if (dictSize >= ((UInt32)1 << 30)) mask = ((UInt32)1 << 22) - 1;
- else if (dictSize >= ((UInt32)1 << 22)) mask = ((UInt32)1 << 20) - 1;
- dicBufSize = ((SizeT)dictSize + mask) & ~mask;
- if (dicBufSize < dictSize)
- dicBufSize = dictSize;
- }
-
- if (!p->dic || dicBufSize != p->dicBufSize)
- {
- LzmaDec_FreeDict(p, alloc);
- p->dic = (Byte *)ISzAlloc_Alloc(alloc, dicBufSize);
- if (!p->dic)
- {
- LzmaDec_FreeProbs(p, alloc);
- return SZ_ERROR_MEM;
- }
- }
- p->dicBufSize = dicBufSize;
- p->prop = propNew;
- return SZ_OK;
-}
-
-SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
- ELzmaStatus *status, ISzAllocPtr alloc)
-{
- CLzmaDec p;
- SRes res;
- SizeT outSize = *destLen, inSize = *srcLen;
- *destLen = *srcLen = 0;
- *status = LZMA_STATUS_NOT_SPECIFIED;
- if (inSize < RC_INIT_SIZE)
- return SZ_ERROR_INPUT_EOF;
- LzmaDec_CONSTRUCT(&p)
- RINOK(LzmaDec_AllocateProbs(&p, propData, propSize, alloc))
- p.dic = dest;
- p.dicBufSize = outSize;
- LzmaDec_Init(&p);
- *srcLen = inSize;
- res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
- *destLen = p.dicPos;
- if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
- res = SZ_ERROR_INPUT_EOF;
- LzmaDec_FreeProbs(&p, alloc);
- return res;
-}
diff --git a/3rdparty/7z/src/LzmaDec.h b/3rdparty/7z/src/LzmaDec.h
deleted file mode 100644
index 6432f60852..0000000000
--- a/3rdparty/7z/src/LzmaDec.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* LzmaDec.h -- LZMA Decoder
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA_DEC_H
-#define ZIP7_INC_LZMA_DEC_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-/* #define Z7_LZMA_PROB32 */
-/* Z7_LZMA_PROB32 can increase the speed on some CPUs,
- but memory usage for CLzmaDec::probs will be doubled in that case */
-
-typedef
-#ifdef Z7_LZMA_PROB32
- UInt32
-#else
- UInt16
-#endif
- CLzmaProb;
-
-
-/* ---------- LZMA Properties ---------- */
-
-#define LZMA_PROPS_SIZE 5
-
-typedef struct
-{
- Byte lc;
- Byte lp;
- Byte pb;
- Byte _pad_;
- UInt32 dicSize;
-} CLzmaProps;
-
-/* LzmaProps_Decode - decodes properties
-Returns:
- SZ_OK
- SZ_ERROR_UNSUPPORTED - Unsupported properties
-*/
-
-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
-
-
-/* ---------- LZMA Decoder state ---------- */
-
-/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
- Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
-
-#define LZMA_REQUIRED_INPUT_MAX 20
-
-typedef struct
-{
- /* Don't change this structure. ASM code can use it. */
- CLzmaProps prop;
- CLzmaProb *probs;
- CLzmaProb *probs_1664;
- Byte *dic;
- SizeT dicBufSize;
- SizeT dicPos;
- const Byte *buf;
- UInt32 range;
- UInt32 code;
- UInt32 processedPos;
- UInt32 checkDicSize;
- UInt32 reps[4];
- UInt32 state;
- UInt32 remainLen;
-
- UInt32 numProbs;
- unsigned tempBufSize;
- Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
-} CLzmaDec;
-
-#define LzmaDec_CONSTRUCT(p) { (p)->dic = NULL; (p)->probs = NULL; }
-#define LzmaDec_Construct(p) LzmaDec_CONSTRUCT(p)
-
-void LzmaDec_Init(CLzmaDec *p);
-
-/* There are two types of LZMA streams:
- - Stream with end mark. That end mark adds about 6 bytes to compressed size.
- - Stream without end mark. You must know exact uncompressed size to decompress such stream. */
-
-typedef enum
-{
- LZMA_FINISH_ANY, /* finish at any point */
- LZMA_FINISH_END /* block must be finished at the end */
-} ELzmaFinishMode;
-
-/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
-
- You must use LZMA_FINISH_END, when you know that current output buffer
- covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
-
- If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
- and output value of destLen will be less than output buffer size limit.
- You can check status result also.
-
- You can use multiple checks to test data integrity after full decompression:
- 1) Check Result and "status" variable.
- 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
- 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
- You must use correct finish mode in that case. */
-
-typedef enum
-{
- LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
- LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
- LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
- LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
-} ELzmaStatus;
-
-/* ELzmaStatus is used only as output value for function call */
-
-
-/* ---------- Interfaces ---------- */
-
-/* There are 3 levels of interfaces:
- 1) Dictionary Interface
- 2) Buffer Interface
- 3) One Call Interface
- You can select any of these interfaces, but don't mix functions from different
- groups for same object. */
-
-
-/* There are two variants to allocate state for Dictionary Interface:
- 1) LzmaDec_Allocate / LzmaDec_Free
- 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
- You can use variant 2, if you set dictionary buffer manually.
- For Buffer Interface you must always use variant 1.
-
-LzmaDec_Allocate* can return:
- SZ_OK
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_UNSUPPORTED - Unsupported properties
-*/
-
-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);
-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc);
-
-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);
-void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc);
-
-/* ---------- Dictionary Interface ---------- */
-
-/* You can use it, if you want to eliminate the overhead for data copying from
- dictionary to some other external buffer.
- You must work with CLzmaDec variables directly in this interface.
-
- STEPS:
- LzmaDec_Construct()
- LzmaDec_Allocate()
- for (each new stream)
- {
- LzmaDec_Init()
- while (it needs more decompression)
- {
- LzmaDec_DecodeToDic()
- use data from CLzmaDec::dic and update CLzmaDec::dicPos
- }
- }
- LzmaDec_Free()
-*/
-
-/* LzmaDec_DecodeToDic
-
- The decoding to internal dictionary buffer (CLzmaDec::dic).
- You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
-
-finishMode:
- It has meaning only if the decoding reaches output limit (dicLimit).
- LZMA_FINISH_ANY - Decode just dicLimit bytes.
- LZMA_FINISH_END - Stream must be finished after dicLimit.
-
-Returns:
- SZ_OK
- status:
- LZMA_STATUS_FINISHED_WITH_MARK
- LZMA_STATUS_NOT_FINISHED
- LZMA_STATUS_NEEDS_MORE_INPUT
- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
- SZ_ERROR_DATA - Data error
- SZ_ERROR_FAIL - Some unexpected error: internal error of code, memory corruption or hardware failure
-*/
-
-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
-
-
-/* ---------- Buffer Interface ---------- */
-
-/* It's zlib-like interface.
- See LzmaDec_DecodeToDic description for information about STEPS and return results,
- but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
- to work with CLzmaDec variables manually.
-
-finishMode:
- It has meaning only if the decoding reaches output limit (*destLen).
- LZMA_FINISH_ANY - Decode just destLen bytes.
- LZMA_FINISH_END - Stream must be finished after (*destLen).
-*/
-
-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
-
-
-/* ---------- One Call Interface ---------- */
-
-/* LzmaDecode
-
-finishMode:
- It has meaning only if the decoding reaches output limit (*destLen).
- LZMA_FINISH_ANY - Decode just destLen bytes.
- LZMA_FINISH_END - Stream must be finished after (*destLen).
-
-Returns:
- SZ_OK
- status:
- LZMA_STATUS_FINISHED_WITH_MARK
- LZMA_STATUS_NOT_FINISHED
- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
- SZ_ERROR_DATA - Data error
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_UNSUPPORTED - Unsupported properties
- SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
- SZ_ERROR_FAIL - Some unexpected error: internal error of code, memory corruption or hardware failure
-*/
-
-SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
- ELzmaStatus *status, ISzAllocPtr alloc);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/LzmaEnc.c b/3rdparty/7z/src/LzmaEnc.c
deleted file mode 100644
index 64bb1aaba4..0000000000
--- a/3rdparty/7z/src/LzmaEnc.c
+++ /dev/null
@@ -1,3144 +0,0 @@
-/* LzmaEnc.c -- LZMA Encoder
-2023-04-13: Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-/* #define SHOW_STAT */
-/* #define SHOW_STAT2 */
-
-#if defined(SHOW_STAT) || defined(SHOW_STAT2)
-#include
-#endif
-
-#include "CpuArch.h"
-#include "LzmaEnc.h"
-
-#include "LzFind.h"
-#ifndef Z7_ST
-#include "LzFindMt.h"
-#endif
-
-/* the following LzmaEnc_* declarations is internal LZMA interface for LZMA2 encoder */
-
-SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p, ISeqInStreamPtr inStream, UInt32 keepWindowSize,
- ISzAllocPtr alloc, ISzAllocPtr allocBig);
-SRes LzmaEnc_MemPrepare(CLzmaEncHandle p, const Byte *src, SizeT srcLen,
- UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
-SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
- Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
-const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p);
-void LzmaEnc_Finish(CLzmaEncHandle p);
-void LzmaEnc_SaveState(CLzmaEncHandle p);
-void LzmaEnc_RestoreState(CLzmaEncHandle p);
-
-#ifdef SHOW_STAT
-static unsigned g_STAT_OFFSET = 0;
-#endif
-
-/* for good normalization speed we still reserve 256 MB before 4 GB range */
-#define kLzmaMaxHistorySize ((UInt32)15 << 28)
-
-// #define kNumTopBits 24
-#define kTopValue ((UInt32)1 << 24)
-
-#define kNumBitModelTotalBits 11
-#define kBitModelTotal (1 << kNumBitModelTotalBits)
-#define kNumMoveBits 5
-#define kProbInitValue (kBitModelTotal >> 1)
-
-#define kNumMoveReducingBits 4
-#define kNumBitPriceShiftBits 4
-// #define kBitPrice (1 << kNumBitPriceShiftBits)
-
-#define REP_LEN_COUNT 64
-
-void LzmaEncProps_Init(CLzmaEncProps *p)
-{
- p->level = 5;
- p->dictSize = p->mc = 0;
- p->reduceSize = (UInt64)(Int64)-1;
- p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
- p->numHashOutBits = 0;
- p->writeEndMark = 0;
- p->affinity = 0;
-}
-
-void LzmaEncProps_Normalize(CLzmaEncProps *p)
-{
- int level = p->level;
- if (level < 0) level = 5;
- p->level = level;
-
- if (p->dictSize == 0)
- p->dictSize =
- ( level <= 3 ? ((UInt32)1 << (level * 2 + 16)) :
- ( level <= 6 ? ((UInt32)1 << (level + 19)) :
- ( level <= 7 ? ((UInt32)1 << 25) : ((UInt32)1 << 26)
- )));
-
- if (p->dictSize > p->reduceSize)
- {
- UInt32 v = (UInt32)p->reduceSize;
- const UInt32 kReduceMin = ((UInt32)1 << 12);
- if (v < kReduceMin)
- v = kReduceMin;
- if (p->dictSize > v)
- p->dictSize = v;
- }
-
- if (p->lc < 0) p->lc = 3;
- if (p->lp < 0) p->lp = 0;
- if (p->pb < 0) p->pb = 2;
-
- if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
- if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
- if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
- if (p->numHashBytes < 0) p->numHashBytes = (p->btMode ? 4 : 5);
- if (p->mc == 0) p->mc = (16 + ((unsigned)p->fb >> 1)) >> (p->btMode ? 0 : 1);
-
- if (p->numThreads < 0)
- p->numThreads =
- #ifndef Z7_ST
- ((p->btMode && p->algo) ? 2 : 1);
- #else
- 1;
- #endif
-}
-
-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
-{
- CLzmaEncProps props = *props2;
- LzmaEncProps_Normalize(&props);
- return props.dictSize;
-}
-
-
-/*
-x86/x64:
-
-BSR:
- IF (SRC == 0) ZF = 1, DEST is undefined;
- AMD : DEST is unchanged;
- IF (SRC != 0) ZF = 0; DEST is index of top non-zero bit
- BSR is slow in some processors
-
-LZCNT:
- IF (SRC == 0) CF = 1, DEST is size_in_bits_of_register(src) (32 or 64)
- IF (SRC != 0) CF = 0, DEST = num_lead_zero_bits
- IF (DEST == 0) ZF = 1;
-
-LZCNT works only in new processors starting from Haswell.
-if LZCNT is not supported by processor, then it's executed as BSR.
-LZCNT can be faster than BSR, if supported.
-*/
-
-// #define LZMA_LOG_BSR
-
-#if defined(MY_CPU_ARM_OR_ARM64) /* || defined(MY_CPU_X86_OR_AMD64) */
-
- #if (defined(__clang__) && (__clang_major__ >= 6)) \
- || (defined(__GNUC__) && (__GNUC__ >= 6))
- #define LZMA_LOG_BSR
- #elif defined(_MSC_VER) && (_MSC_VER >= 1300)
- // #if defined(MY_CPU_ARM_OR_ARM64)
- #define LZMA_LOG_BSR
- // #endif
- #endif
-#endif
-
-// #include
-
-#ifdef LZMA_LOG_BSR
-
-#if defined(__clang__) \
- || defined(__GNUC__)
-
-/*
- C code: : (30 - __builtin_clz(x))
- gcc9/gcc10 for x64 /x86 : 30 - (bsr(x) xor 31)
- clang10 for x64 : 31 + (bsr(x) xor -32)
-*/
-
- #define MY_clz(x) ((unsigned)__builtin_clz(x))
- // __lzcnt32
- // __builtin_ia32_lzcnt_u32
-
-#else // #if defined(_MSC_VER)
-
- #ifdef MY_CPU_ARM_OR_ARM64
-
- #define MY_clz _CountLeadingZeros
-
- #else // if defined(MY_CPU_X86_OR_AMD64)
-
- // #define MY_clz __lzcnt // we can use lzcnt (unsupported by old CPU)
- // _BitScanReverse code is not optimal for some MSVC compilers
- #define BSR2_RET(pos, res) { unsigned long zz; _BitScanReverse(&zz, (pos)); zz--; \
- res = (zz + zz) + (pos >> zz); }
-
- #endif // MY_CPU_X86_OR_AMD64
-
-#endif // _MSC_VER
-
-
-#ifndef BSR2_RET
-
- #define BSR2_RET(pos, res) { unsigned zz = 30 - MY_clz(pos); \
- res = (zz + zz) + (pos >> zz); }
-
-#endif
-
-
-unsigned GetPosSlot1(UInt32 pos);
-unsigned GetPosSlot1(UInt32 pos)
-{
- unsigned res;
- BSR2_RET(pos, res);
- return res;
-}
-#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
-#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
-
-
-#else // ! LZMA_LOG_BSR
-
-#define kNumLogBits (11 + sizeof(size_t) / 8 * 3)
-
-#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
-
-static void LzmaEnc_FastPosInit(Byte *g_FastPos)
-{
- unsigned slot;
- g_FastPos[0] = 0;
- g_FastPos[1] = 1;
- g_FastPos += 2;
-
- for (slot = 2; slot < kNumLogBits * 2; slot++)
- {
- size_t k = ((size_t)1 << ((slot >> 1) - 1));
- size_t j;
- for (j = 0; j < k; j++)
- g_FastPos[j] = (Byte)slot;
- g_FastPos += k;
- }
-}
-
-/* we can use ((limit - pos) >> 31) only if (pos < ((UInt32)1 << 31)) */
-/*
-#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
- (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
- res = p->g_FastPos[pos >> zz] + (zz * 2); }
-*/
-
-/*
-#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
- (0 - (((((UInt32)1 << (kNumLogBits)) - 1) - (pos >> 6)) >> 31))); \
- res = p->g_FastPos[pos >> zz] + (zz * 2); }
-*/
-
-#define BSR2_RET(pos, res) { unsigned zz = (pos < (1 << (kNumLogBits + 6))) ? 6 : 6 + kNumLogBits - 1; \
- res = p->g_FastPos[pos >> zz] + (zz * 2); }
-
-/*
-#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
- p->g_FastPos[pos >> 6] + 12 : \
- p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
-*/
-
-#define GetPosSlot1(pos) p->g_FastPos[pos]
-#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
-#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos & (kNumFullDistances - 1)]; else BSR2_RET(pos, res); }
-
-#endif // LZMA_LOG_BSR
-
-
-#define LZMA_NUM_REPS 4
-
-typedef UInt16 CState;
-typedef UInt16 CExtra;
-
-typedef struct
-{
- UInt32 price;
- CState state;
- CExtra extra;
- // 0 : normal
- // 1 : LIT : MATCH
- // > 1 : MATCH (extra-1) : LIT : REP0 (len)
- UInt32 len;
- UInt32 dist;
- UInt32 reps[LZMA_NUM_REPS];
-} COptimal;
-
-
-// 18.06
-#define kNumOpts (1 << 11)
-#define kPackReserve (kNumOpts * 8)
-// #define kNumOpts (1 << 12)
-// #define kPackReserve (1 + kNumOpts * 2)
-
-#define kNumLenToPosStates 4
-#define kNumPosSlotBits 6
-// #define kDicLogSizeMin 0
-#define kDicLogSizeMax 32
-#define kDistTableSizeMax (kDicLogSizeMax * 2)
-
-#define kNumAlignBits 4
-#define kAlignTableSize (1 << kNumAlignBits)
-#define kAlignMask (kAlignTableSize - 1)
-
-#define kStartPosModelIndex 4
-#define kEndPosModelIndex 14
-#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
-
-typedef
-#ifdef Z7_LZMA_PROB32
- UInt32
-#else
- UInt16
-#endif
- CLzmaProb;
-
-#define LZMA_PB_MAX 4
-#define LZMA_LC_MAX 8
-#define LZMA_LP_MAX 4
-
-#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
-
-#define kLenNumLowBits 3
-#define kLenNumLowSymbols (1 << kLenNumLowBits)
-#define kLenNumHighBits 8
-#define kLenNumHighSymbols (1 << kLenNumHighBits)
-#define kLenNumSymbolsTotal (kLenNumLowSymbols * 2 + kLenNumHighSymbols)
-
-#define LZMA_MATCH_LEN_MIN 2
-#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
-
-#define kNumStates 12
-
-
-typedef struct
-{
- CLzmaProb low[LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)];
- CLzmaProb high[kLenNumHighSymbols];
-} CLenEnc;
-
-
-typedef struct
-{
- unsigned tableSize;
- UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
- // UInt32 prices1[LZMA_NUM_PB_STATES_MAX][kLenNumLowSymbols * 2];
- // UInt32 prices2[kLenNumSymbolsTotal];
-} CLenPriceEnc;
-
-#define GET_PRICE_LEN(p, posState, len) \
- ((p)->prices[posState][(size_t)(len) - LZMA_MATCH_LEN_MIN])
-
-/*
-#define GET_PRICE_LEN(p, posState, len) \
- ((p)->prices2[(size_t)(len) - 2] + ((p)->prices1[posState][((len) - 2) & (kLenNumLowSymbols * 2 - 1)] & (((len) - 2 - kLenNumLowSymbols * 2) >> 9)))
-*/
-
-typedef struct
-{
- UInt32 range;
- unsigned cache;
- UInt64 low;
- UInt64 cacheSize;
- Byte *buf;
- Byte *bufLim;
- Byte *bufBase;
- ISeqOutStreamPtr outStream;
- UInt64 processed;
- SRes res;
-} CRangeEnc;
-
-
-typedef struct
-{
- CLzmaProb *litProbs;
-
- unsigned state;
- UInt32 reps[LZMA_NUM_REPS];
-
- CLzmaProb posAlignEncoder[1 << kNumAlignBits];
- CLzmaProb isRep[kNumStates];
- CLzmaProb isRepG0[kNumStates];
- CLzmaProb isRepG1[kNumStates];
- CLzmaProb isRepG2[kNumStates];
- CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
- CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
-
- CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
- CLzmaProb posEncoders[kNumFullDistances];
-
- CLenEnc lenProbs;
- CLenEnc repLenProbs;
-
-} CSaveState;
-
-
-typedef UInt32 CProbPrice;
-
-
-struct CLzmaEnc
-{
- void *matchFinderObj;
- IMatchFinder2 matchFinder;
-
- unsigned optCur;
- unsigned optEnd;
-
- unsigned longestMatchLen;
- unsigned numPairs;
- UInt32 numAvail;
-
- unsigned state;
- unsigned numFastBytes;
- unsigned additionalOffset;
- UInt32 reps[LZMA_NUM_REPS];
- unsigned lpMask, pbMask;
- CLzmaProb *litProbs;
- CRangeEnc rc;
-
- UInt32 backRes;
-
- unsigned lc, lp, pb;
- unsigned lclp;
-
- BoolInt fastMode;
- BoolInt writeEndMark;
- BoolInt finished;
- BoolInt multiThread;
- BoolInt needInit;
- // BoolInt _maxMode;
-
- UInt64 nowPos64;
-
- unsigned matchPriceCount;
- // unsigned alignPriceCount;
- int repLenEncCounter;
-
- unsigned distTableSize;
-
- UInt32 dictSize;
- SRes result;
-
- #ifndef Z7_ST
- BoolInt mtMode;
- // begin of CMatchFinderMt is used in LZ thread
- CMatchFinderMt matchFinderMt;
- // end of CMatchFinderMt is used in BT and HASH threads
- // #else
- // CMatchFinder matchFinderBase;
- #endif
- CMatchFinder matchFinderBase;
-
-
- // we suppose that we have 8-bytes alignment after CMatchFinder
-
- #ifndef Z7_ST
- Byte pad[128];
- #endif
-
- // LZ thread
- CProbPrice ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
-
- // we want {len , dist} pairs to be 8-bytes aligned in matches array
- UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2];
-
- // we want 8-bytes alignment here
- UInt32 alignPrices[kAlignTableSize];
- UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
- UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
-
- CLzmaProb posAlignEncoder[1 << kNumAlignBits];
- CLzmaProb isRep[kNumStates];
- CLzmaProb isRepG0[kNumStates];
- CLzmaProb isRepG1[kNumStates];
- CLzmaProb isRepG2[kNumStates];
- CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
- CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
- CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
- CLzmaProb posEncoders[kNumFullDistances];
-
- CLenEnc lenProbs;
- CLenEnc repLenProbs;
-
- #ifndef LZMA_LOG_BSR
- Byte g_FastPos[1 << kNumLogBits];
- #endif
-
- CLenPriceEnc lenEnc;
- CLenPriceEnc repLenEnc;
-
- COptimal opt[kNumOpts];
-
- CSaveState saveState;
-
- // BoolInt mf_Failure;
- #ifndef Z7_ST
- Byte pad2[128];
- #endif
-};
-
-
-#define MFB (p->matchFinderBase)
-/*
-#ifndef Z7_ST
-#define MFB (p->matchFinderMt.MatchFinder)
-#endif
-*/
-
-// #define GET_CLzmaEnc_p CLzmaEnc *p = (CLzmaEnc*)(void *)p;
-// #define GET_const_CLzmaEnc_p const CLzmaEnc *p = (const CLzmaEnc*)(const void *)p;
-
-#define COPY_ARR(dest, src, arr) memcpy((dest)->arr, (src)->arr, sizeof((src)->arr));
-
-#define COPY_LZMA_ENC_STATE(d, s, p) \
- (d)->state = (s)->state; \
- COPY_ARR(d, s, reps) \
- COPY_ARR(d, s, posAlignEncoder) \
- COPY_ARR(d, s, isRep) \
- COPY_ARR(d, s, isRepG0) \
- COPY_ARR(d, s, isRepG1) \
- COPY_ARR(d, s, isRepG2) \
- COPY_ARR(d, s, isMatch) \
- COPY_ARR(d, s, isRep0Long) \
- COPY_ARR(d, s, posSlotEncoder) \
- COPY_ARR(d, s, posEncoders) \
- (d)->lenProbs = (s)->lenProbs; \
- (d)->repLenProbs = (s)->repLenProbs; \
- memcpy((d)->litProbs, (s)->litProbs, ((UInt32)0x300 << (p)->lclp) * sizeof(CLzmaProb));
-
-void LzmaEnc_SaveState(CLzmaEncHandle p)
-{
- // GET_CLzmaEnc_p
- CSaveState *v = &p->saveState;
- COPY_LZMA_ENC_STATE(v, p, p)
-}
-
-void LzmaEnc_RestoreState(CLzmaEncHandle p)
-{
- // GET_CLzmaEnc_p
- const CSaveState *v = &p->saveState;
- COPY_LZMA_ENC_STATE(p, v, p)
-}
-
-
-Z7_NO_INLINE
-SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props2)
-{
- // GET_CLzmaEnc_p
- CLzmaEncProps props = *props2;
- LzmaEncProps_Normalize(&props);
-
- if (props.lc > LZMA_LC_MAX
- || props.lp > LZMA_LP_MAX
- || props.pb > LZMA_PB_MAX)
- return SZ_ERROR_PARAM;
-
-
- if (props.dictSize > kLzmaMaxHistorySize)
- props.dictSize = kLzmaMaxHistorySize;
-
- #ifndef LZMA_LOG_BSR
- {
- const UInt64 dict64 = props.dictSize;
- if (dict64 > ((UInt64)1 << kDicLogSizeMaxCompress))
- return SZ_ERROR_PARAM;
- }
- #endif
-
- p->dictSize = props.dictSize;
- {
- unsigned fb = (unsigned)props.fb;
- if (fb < 5)
- fb = 5;
- if (fb > LZMA_MATCH_LEN_MAX)
- fb = LZMA_MATCH_LEN_MAX;
- p->numFastBytes = fb;
- }
- p->lc = (unsigned)props.lc;
- p->lp = (unsigned)props.lp;
- p->pb = (unsigned)props.pb;
- p->fastMode = (props.algo == 0);
- // p->_maxMode = True;
- MFB.btMode = (Byte)(props.btMode ? 1 : 0);
- // MFB.btMode = (Byte)(props.btMode);
- {
- unsigned numHashBytes = 4;
- if (props.btMode)
- {
- if (props.numHashBytes < 2) numHashBytes = 2;
- else if (props.numHashBytes < 4) numHashBytes = (unsigned)props.numHashBytes;
- }
- if (props.numHashBytes >= 5) numHashBytes = 5;
-
- MFB.numHashBytes = numHashBytes;
- // MFB.numHashBytes_Min = 2;
- MFB.numHashOutBits = (Byte)props.numHashOutBits;
- }
-
- MFB.cutValue = props.mc;
-
- p->writeEndMark = (BoolInt)props.writeEndMark;
-
- #ifndef Z7_ST
- /*
- if (newMultiThread != _multiThread)
- {
- ReleaseMatchFinder();
- _multiThread = newMultiThread;
- }
- */
- p->multiThread = (props.numThreads > 1);
- p->matchFinderMt.btSync.affinity =
- p->matchFinderMt.hashSync.affinity = props.affinity;
- #endif
-
- return SZ_OK;
-}
-
-
-void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize)
-{
- // GET_CLzmaEnc_p
- MFB.expectedDataSize = expectedDataSiize;
-}
-
-
-#define kState_Start 0
-#define kState_LitAfterMatch 4
-#define kState_LitAfterRep 5
-#define kState_MatchAfterLit 7
-#define kState_RepAfterLit 8
-
-static const Byte kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
-static const Byte kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
-static const Byte kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
-static const Byte kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
-
-#define IsLitState(s) ((s) < 7)
-#define GetLenToPosState2(len) (((len) < kNumLenToPosStates - 1) ? (len) : kNumLenToPosStates - 1)
-#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
-
-#define kInfinityPrice (1 << 30)
-
-static void RangeEnc_Construct(CRangeEnc *p)
-{
- p->outStream = NULL;
- p->bufBase = NULL;
-}
-
-#define RangeEnc_GetProcessed(p) ( (p)->processed + (size_t)((p)->buf - (p)->bufBase) + (p)->cacheSize)
-#define RangeEnc_GetProcessed_sizet(p) ((size_t)(p)->processed + (size_t)((p)->buf - (p)->bufBase) + (size_t)(p)->cacheSize)
-
-#define RC_BUF_SIZE (1 << 16)
-
-static int RangeEnc_Alloc(CRangeEnc *p, ISzAllocPtr alloc)
-{
- if (!p->bufBase)
- {
- p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, RC_BUF_SIZE);
- if (!p->bufBase)
- return 0;
- p->bufLim = p->bufBase + RC_BUF_SIZE;
- }
- return 1;
-}
-
-static void RangeEnc_Free(CRangeEnc *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->bufBase);
- p->bufBase = NULL;
-}
-
-static void RangeEnc_Init(CRangeEnc *p)
-{
- p->range = 0xFFFFFFFF;
- p->cache = 0;
- p->low = 0;
- p->cacheSize = 0;
-
- p->buf = p->bufBase;
-
- p->processed = 0;
- p->res = SZ_OK;
-}
-
-Z7_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
-{
- const size_t num = (size_t)(p->buf - p->bufBase);
- if (p->res == SZ_OK)
- {
- if (num != ISeqOutStream_Write(p->outStream, p->bufBase, num))
- p->res = SZ_ERROR_WRITE;
- }
- p->processed += num;
- p->buf = p->bufBase;
-}
-
-Z7_NO_INLINE static void Z7_FASTCALL RangeEnc_ShiftLow(CRangeEnc *p)
-{
- UInt32 low = (UInt32)p->low;
- unsigned high = (unsigned)(p->low >> 32);
- p->low = (UInt32)(low << 8);
- if (low < (UInt32)0xFF000000 || high != 0)
- {
- {
- Byte *buf = p->buf;
- *buf++ = (Byte)(p->cache + high);
- p->cache = (unsigned)(low >> 24);
- p->buf = buf;
- if (buf == p->bufLim)
- RangeEnc_FlushStream(p);
- if (p->cacheSize == 0)
- return;
- }
- high += 0xFF;
- for (;;)
- {
- Byte *buf = p->buf;
- *buf++ = (Byte)(high);
- p->buf = buf;
- if (buf == p->bufLim)
- RangeEnc_FlushStream(p);
- if (--p->cacheSize == 0)
- return;
- }
- }
- p->cacheSize++;
-}
-
-static void RangeEnc_FlushData(CRangeEnc *p)
-{
- int i;
- for (i = 0; i < 5; i++)
- RangeEnc_ShiftLow(p);
-}
-
-#define RC_NORM(p) if (range < kTopValue) { range <<= 8; RangeEnc_ShiftLow(p); }
-
-#define RC_BIT_PRE(p, prob) \
- ttt = *(prob); \
- newBound = (range >> kNumBitModelTotalBits) * ttt;
-
-// #define Z7_LZMA_ENC_USE_BRANCH
-
-#ifdef Z7_LZMA_ENC_USE_BRANCH
-
-#define RC_BIT(p, prob, bit) { \
- RC_BIT_PRE(p, prob) \
- if (bit == 0) { range = newBound; ttt += (kBitModelTotal - ttt) >> kNumMoveBits; } \
- else { (p)->low += newBound; range -= newBound; ttt -= ttt >> kNumMoveBits; } \
- *(prob) = (CLzmaProb)ttt; \
- RC_NORM(p) \
- }
-
-#else
-
-#define RC_BIT(p, prob, bit) { \
- UInt32 mask; \
- RC_BIT_PRE(p, prob) \
- mask = 0 - (UInt32)bit; \
- range &= mask; \
- mask &= newBound; \
- range -= mask; \
- (p)->low += mask; \
- mask = (UInt32)bit - 1; \
- range += newBound & mask; \
- mask &= (kBitModelTotal - ((1 << kNumMoveBits) - 1)); \
- mask += ((1 << kNumMoveBits) - 1); \
- ttt += (UInt32)((Int32)(mask - ttt) >> kNumMoveBits); \
- *(prob) = (CLzmaProb)ttt; \
- RC_NORM(p) \
- }
-
-#endif
-
-
-
-
-#define RC_BIT_0_BASE(p, prob) \
- range = newBound; *(prob) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
-
-#define RC_BIT_1_BASE(p, prob) \
- range -= newBound; (p)->low += newBound; *(prob) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); \
-
-#define RC_BIT_0(p, prob) \
- RC_BIT_0_BASE(p, prob) \
- RC_NORM(p)
-
-#define RC_BIT_1(p, prob) \
- RC_BIT_1_BASE(p, prob) \
- RC_NORM(p)
-
-static void RangeEnc_EncodeBit_0(CRangeEnc *p, CLzmaProb *prob)
-{
- UInt32 range, ttt, newBound;
- range = p->range;
- RC_BIT_PRE(p, prob)
- RC_BIT_0(p, prob)
- p->range = range;
-}
-
-static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 sym)
-{
- UInt32 range = p->range;
- sym |= 0x100;
- do
- {
- UInt32 ttt, newBound;
- // RangeEnc_EncodeBit(p, probs + (sym >> 8), (sym >> 7) & 1);
- CLzmaProb *prob = probs + (sym >> 8);
- UInt32 bit = (sym >> 7) & 1;
- sym <<= 1;
- RC_BIT(p, prob, bit)
- }
- while (sym < 0x10000);
- p->range = range;
-}
-
-static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 sym, UInt32 matchByte)
-{
- UInt32 range = p->range;
- UInt32 offs = 0x100;
- sym |= 0x100;
- do
- {
- UInt32 ttt, newBound;
- CLzmaProb *prob;
- UInt32 bit;
- matchByte <<= 1;
- // RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (sym >> 8)), (sym >> 7) & 1);
- prob = probs + (offs + (matchByte & offs) + (sym >> 8));
- bit = (sym >> 7) & 1;
- sym <<= 1;
- offs &= ~(matchByte ^ sym);
- RC_BIT(p, prob, bit)
- }
- while (sym < 0x10000);
- p->range = range;
-}
-
-
-
-static void LzmaEnc_InitPriceTables(CProbPrice *ProbPrices)
-{
- UInt32 i;
- for (i = 0; i < (kBitModelTotal >> kNumMoveReducingBits); i++)
- {
- const unsigned kCyclesBits = kNumBitPriceShiftBits;
- UInt32 w = (i << kNumMoveReducingBits) + (1 << (kNumMoveReducingBits - 1));
- unsigned bitCount = 0;
- unsigned j;
- for (j = 0; j < kCyclesBits; j++)
- {
- w = w * w;
- bitCount <<= 1;
- while (w >= ((UInt32)1 << 16))
- {
- w >>= 1;
- bitCount++;
- }
- }
- ProbPrices[i] = (CProbPrice)(((unsigned)kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
- // printf("\n%3d: %5d", i, ProbPrices[i]);
- }
-}
-
-
-#define GET_PRICE(prob, bit) \
- p->ProbPrices[((prob) ^ (unsigned)(((-(int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]
-
-#define GET_PRICEa(prob, bit) \
- ProbPrices[((prob) ^ (unsigned)((-((int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]
-
-#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
-#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
-
-#define GET_PRICEa_0(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
-#define GET_PRICEa_1(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
-
-
-static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 sym, const CProbPrice *ProbPrices)
-{
- UInt32 price = 0;
- sym |= 0x100;
- do
- {
- unsigned bit = sym & 1;
- sym >>= 1;
- price += GET_PRICEa(probs[sym], bit);
- }
- while (sym >= 2);
- return price;
-}
-
-
-static UInt32 LitEnc_Matched_GetPrice(const CLzmaProb *probs, UInt32 sym, UInt32 matchByte, const CProbPrice *ProbPrices)
-{
- UInt32 price = 0;
- UInt32 offs = 0x100;
- sym |= 0x100;
- do
- {
- matchByte <<= 1;
- price += GET_PRICEa(probs[offs + (matchByte & offs) + (sym >> 8)], (sym >> 7) & 1);
- sym <<= 1;
- offs &= ~(matchByte ^ sym);
- }
- while (sym < 0x10000);
- return price;
-}
-
-
-static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, unsigned numBits, unsigned sym)
-{
- UInt32 range = rc->range;
- unsigned m = 1;
- do
- {
- UInt32 ttt, newBound;
- unsigned bit = sym & 1;
- // RangeEnc_EncodeBit(rc, probs + m, bit);
- sym >>= 1;
- RC_BIT(rc, probs + m, bit)
- m = (m << 1) | bit;
- }
- while (--numBits);
- rc->range = range;
-}
-
-
-
-static void LenEnc_Init(CLenEnc *p)
-{
- unsigned i;
- for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)); i++)
- p->low[i] = kProbInitValue;
- for (i = 0; i < kLenNumHighSymbols; i++)
- p->high[i] = kProbInitValue;
-}
-
-static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posState)
-{
- UInt32 range, ttt, newBound;
- CLzmaProb *probs = p->low;
- range = rc->range;
- RC_BIT_PRE(rc, probs)
- if (sym >= kLenNumLowSymbols)
- {
- RC_BIT_1(rc, probs)
- probs += kLenNumLowSymbols;
- RC_BIT_PRE(rc, probs)
- if (sym >= kLenNumLowSymbols * 2)
- {
- RC_BIT_1(rc, probs)
- rc->range = range;
- // RcTree_Encode(rc, p->high, kLenNumHighBits, sym - kLenNumLowSymbols * 2);
- LitEnc_Encode(rc, p->high, sym - kLenNumLowSymbols * 2);
- return;
- }
- sym -= kLenNumLowSymbols;
- }
-
- // RcTree_Encode(rc, probs + (posState << kLenNumLowBits), kLenNumLowBits, sym);
- {
- unsigned m;
- unsigned bit;
- RC_BIT_0(rc, probs)
- probs += (posState << (1 + kLenNumLowBits));
- bit = (sym >> 2) ; RC_BIT(rc, probs + 1, bit) m = (1 << 1) + bit;
- bit = (sym >> 1) & 1; RC_BIT(rc, probs + m, bit) m = (m << 1) + bit;
- bit = sym & 1; RC_BIT(rc, probs + m, bit)
- rc->range = range;
- }
-}
-
-static void SetPrices_3(const CLzmaProb *probs, UInt32 startPrice, UInt32 *prices, const CProbPrice *ProbPrices)
-{
- unsigned i;
- for (i = 0; i < 8; i += 2)
- {
- UInt32 price = startPrice;
- UInt32 prob;
- price += GET_PRICEa(probs[1 ], (i >> 2));
- price += GET_PRICEa(probs[2 + (i >> 2)], (i >> 1) & 1);
- prob = probs[4 + (i >> 1)];
- prices[i ] = price + GET_PRICEa_0(prob);
- prices[i + 1] = price + GET_PRICEa_1(prob);
- }
-}
-
-
-Z7_NO_INLINE static void Z7_FASTCALL LenPriceEnc_UpdateTables(
- CLenPriceEnc *p,
- unsigned numPosStates,
- const CLenEnc *enc,
- const CProbPrice *ProbPrices)
-{
- UInt32 b;
-
- {
- unsigned prob = enc->low[0];
- UInt32 a, c;
- unsigned posState;
- b = GET_PRICEa_1(prob);
- a = GET_PRICEa_0(prob);
- c = b + GET_PRICEa_0(enc->low[kLenNumLowSymbols]);
- for (posState = 0; posState < numPosStates; posState++)
- {
- UInt32 *prices = p->prices[posState];
- const CLzmaProb *probs = enc->low + (posState << (1 + kLenNumLowBits));
- SetPrices_3(probs, a, prices, ProbPrices);
- SetPrices_3(probs + kLenNumLowSymbols, c, prices + kLenNumLowSymbols, ProbPrices);
- }
- }
-
- /*
- {
- unsigned i;
- UInt32 b;
- a = GET_PRICEa_0(enc->low[0]);
- for (i = 0; i < kLenNumLowSymbols; i++)
- p->prices2[i] = a;
- a = GET_PRICEa_1(enc->low[0]);
- b = a + GET_PRICEa_0(enc->low[kLenNumLowSymbols]);
- for (i = kLenNumLowSymbols; i < kLenNumLowSymbols * 2; i++)
- p->prices2[i] = b;
- a += GET_PRICEa_1(enc->low[kLenNumLowSymbols]);
- }
- */
-
- // p->counter = numSymbols;
- // p->counter = 64;
-
- {
- unsigned i = p->tableSize;
-
- if (i > kLenNumLowSymbols * 2)
- {
- const CLzmaProb *probs = enc->high;
- UInt32 *prices = p->prices[0] + kLenNumLowSymbols * 2;
- i -= kLenNumLowSymbols * 2 - 1;
- i >>= 1;
- b += GET_PRICEa_1(enc->low[kLenNumLowSymbols]);
- do
- {
- /*
- p->prices2[i] = a +
- // RcTree_GetPrice(enc->high, kLenNumHighBits, i - kLenNumLowSymbols * 2, ProbPrices);
- LitEnc_GetPrice(probs, i - kLenNumLowSymbols * 2, ProbPrices);
- */
- // UInt32 price = a + RcTree_GetPrice(probs, kLenNumHighBits - 1, sym, ProbPrices);
- unsigned sym = --i + (1 << (kLenNumHighBits - 1));
- UInt32 price = b;
- do
- {
- unsigned bit = sym & 1;
- sym >>= 1;
- price += GET_PRICEa(probs[sym], bit);
- }
- while (sym >= 2);
-
- {
- unsigned prob = probs[(size_t)i + (1 << (kLenNumHighBits - 1))];
- prices[(size_t)i * 2 ] = price + GET_PRICEa_0(prob);
- prices[(size_t)i * 2 + 1] = price + GET_PRICEa_1(prob);
- }
- }
- while (i);
-
- {
- unsigned posState;
- size_t num = (p->tableSize - kLenNumLowSymbols * 2) * sizeof(p->prices[0][0]);
- for (posState = 1; posState < numPosStates; posState++)
- memcpy(p->prices[posState] + kLenNumLowSymbols * 2, p->prices[0] + kLenNumLowSymbols * 2, num);
- }
- }
- }
-}
-
-/*
- #ifdef SHOW_STAT
- g_STAT_OFFSET += num;
- printf("\n MovePos %u", num);
- #endif
-*/
-
-#define MOVE_POS(p, num) { \
- p->additionalOffset += (num); \
- p->matchFinder.Skip(p->matchFinderObj, (UInt32)(num)); }
-
-
-static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
-{
- unsigned numPairs;
-
- p->additionalOffset++;
- p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
- {
- const UInt32 *d = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
- // if (!d) { p->mf_Failure = True; *numPairsRes = 0; return 0; }
- numPairs = (unsigned)(d - p->matches);
- }
- *numPairsRes = numPairs;
-
- #ifdef SHOW_STAT
- printf("\n i = %u numPairs = %u ", g_STAT_OFFSET, numPairs / 2);
- g_STAT_OFFSET++;
- {
- unsigned i;
- for (i = 0; i < numPairs; i += 2)
- printf("%2u %6u | ", p->matches[i], p->matches[i + 1]);
- }
- #endif
-
- if (numPairs == 0)
- return 0;
- {
- const unsigned len = p->matches[(size_t)numPairs - 2];
- if (len != p->numFastBytes)
- return len;
- {
- UInt32 numAvail = p->numAvail;
- if (numAvail > LZMA_MATCH_LEN_MAX)
- numAvail = LZMA_MATCH_LEN_MAX;
- {
- const Byte *p1 = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
- const Byte *p2 = p1 + len;
- const ptrdiff_t dif = (ptrdiff_t)-1 - (ptrdiff_t)p->matches[(size_t)numPairs - 1];
- const Byte *lim = p1 + numAvail;
- for (; p2 != lim && *p2 == p2[dif]; p2++)
- {}
- return (unsigned)(p2 - p1);
- }
- }
- }
-}
-
-#define MARK_LIT ((UInt32)(Int32)-1)
-
-#define MakeAs_Lit(p) { (p)->dist = MARK_LIT; (p)->extra = 0; }
-#define MakeAs_ShortRep(p) { (p)->dist = 0; (p)->extra = 0; }
-#define IsShortRep(p) ((p)->dist == 0)
-
-
-#define GetPrice_ShortRep(p, state, posState) \
- ( GET_PRICE_0(p->isRepG0[state]) + GET_PRICE_0(p->isRep0Long[state][posState]))
-
-#define GetPrice_Rep_0(p, state, posState) ( \
- GET_PRICE_1(p->isMatch[state][posState]) \
- + GET_PRICE_1(p->isRep0Long[state][posState])) \
- + GET_PRICE_1(p->isRep[state]) \
- + GET_PRICE_0(p->isRepG0[state])
-
-Z7_FORCE_INLINE
-static UInt32 GetPrice_PureRep(const CLzmaEnc *p, unsigned repIndex, size_t state, size_t posState)
-{
- UInt32 price;
- UInt32 prob = p->isRepG0[state];
- if (repIndex == 0)
- {
- price = GET_PRICE_0(prob);
- price += GET_PRICE_1(p->isRep0Long[state][posState]);
- }
- else
- {
- price = GET_PRICE_1(prob);
- prob = p->isRepG1[state];
- if (repIndex == 1)
- price += GET_PRICE_0(prob);
- else
- {
- price += GET_PRICE_1(prob);
- price += GET_PRICE(p->isRepG2[state], repIndex - 2);
- }
- }
- return price;
-}
-
-
-static unsigned Backward(CLzmaEnc *p, unsigned cur)
-{
- unsigned wr = cur + 1;
- p->optEnd = wr;
-
- for (;;)
- {
- UInt32 dist = p->opt[cur].dist;
- unsigned len = (unsigned)p->opt[cur].len;
- unsigned extra = (unsigned)p->opt[cur].extra;
- cur -= len;
-
- if (extra)
- {
- wr--;
- p->opt[wr].len = (UInt32)len;
- cur -= extra;
- len = extra;
- if (extra == 1)
- {
- p->opt[wr].dist = dist;
- dist = MARK_LIT;
- }
- else
- {
- p->opt[wr].dist = 0;
- len--;
- wr--;
- p->opt[wr].dist = MARK_LIT;
- p->opt[wr].len = 1;
- }
- }
-
- if (cur == 0)
- {
- p->backRes = dist;
- p->optCur = wr;
- return len;
- }
-
- wr--;
- p->opt[wr].dist = dist;
- p->opt[wr].len = (UInt32)len;
- }
-}
-
-
-
-#define LIT_PROBS(pos, prevByte) \
- (p->litProbs + (UInt32)3 * (((((pos) << 8) + (prevByte)) & p->lpMask) << p->lc))
-
-
-static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
-{
- unsigned last, cur;
- UInt32 reps[LZMA_NUM_REPS];
- unsigned repLens[LZMA_NUM_REPS];
- UInt32 *matches;
-
- {
- UInt32 numAvail;
- unsigned numPairs, mainLen, repMaxIndex, i, posState;
- UInt32 matchPrice, repMatchPrice;
- const Byte *data;
- Byte curByte, matchByte;
-
- p->optCur = p->optEnd = 0;
-
- if (p->additionalOffset == 0)
- mainLen = ReadMatchDistances(p, &numPairs);
- else
- {
- mainLen = p->longestMatchLen;
- numPairs = p->numPairs;
- }
-
- numAvail = p->numAvail;
- if (numAvail < 2)
- {
- p->backRes = MARK_LIT;
- return 1;
- }
- if (numAvail > LZMA_MATCH_LEN_MAX)
- numAvail = LZMA_MATCH_LEN_MAX;
-
- data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
- repMaxIndex = 0;
-
- for (i = 0; i < LZMA_NUM_REPS; i++)
- {
- unsigned len;
- const Byte *data2;
- reps[i] = p->reps[i];
- data2 = data - reps[i];
- if (data[0] != data2[0] || data[1] != data2[1])
- {
- repLens[i] = 0;
- continue;
- }
- for (len = 2; len < numAvail && data[len] == data2[len]; len++)
- {}
- repLens[i] = len;
- if (len > repLens[repMaxIndex])
- repMaxIndex = i;
- if (len == LZMA_MATCH_LEN_MAX) // 21.03 : optimization
- break;
- }
-
- if (repLens[repMaxIndex] >= p->numFastBytes)
- {
- unsigned len;
- p->backRes = (UInt32)repMaxIndex;
- len = repLens[repMaxIndex];
- MOVE_POS(p, len - 1)
- return len;
- }
-
- matches = p->matches;
- #define MATCHES matches
- // #define MATCHES p->matches
-
- if (mainLen >= p->numFastBytes)
- {
- p->backRes = MATCHES[(size_t)numPairs - 1] + LZMA_NUM_REPS;
- MOVE_POS(p, mainLen - 1)
- return mainLen;
- }
-
- curByte = *data;
- matchByte = *(data - reps[0]);
-
- last = repLens[repMaxIndex];
- if (last <= mainLen)
- last = mainLen;
-
- if (last < 2 && curByte != matchByte)
- {
- p->backRes = MARK_LIT;
- return 1;
- }
-
- p->opt[0].state = (CState)p->state;
-
- posState = (position & p->pbMask);
-
- {
- const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
- p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
- (!IsLitState(p->state) ?
- LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) :
- LitEnc_GetPrice(probs, curByte, p->ProbPrices));
- }
-
- MakeAs_Lit(&p->opt[1])
-
- matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
- repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
-
- // 18.06
- if (matchByte == curByte && repLens[0] == 0)
- {
- UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, p->state, posState);
- if (shortRepPrice < p->opt[1].price)
- {
- p->opt[1].price = shortRepPrice;
- MakeAs_ShortRep(&p->opt[1])
- }
- if (last < 2)
- {
- p->backRes = p->opt[1].dist;
- return 1;
- }
- }
-
- p->opt[1].len = 1;
-
- p->opt[0].reps[0] = reps[0];
- p->opt[0].reps[1] = reps[1];
- p->opt[0].reps[2] = reps[2];
- p->opt[0].reps[3] = reps[3];
-
- // ---------- REP ----------
-
- for (i = 0; i < LZMA_NUM_REPS; i++)
- {
- unsigned repLen = repLens[i];
- UInt32 price;
- if (repLen < 2)
- continue;
- price = repMatchPrice + GetPrice_PureRep(p, i, p->state, posState);
- do
- {
- UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, repLen);
- COptimal *opt = &p->opt[repLen];
- if (price2 < opt->price)
- {
- opt->price = price2;
- opt->len = (UInt32)repLen;
- opt->dist = (UInt32)i;
- opt->extra = 0;
- }
- }
- while (--repLen >= 2);
- }
-
-
- // ---------- MATCH ----------
- {
- unsigned len = repLens[0] + 1;
- if (len <= mainLen)
- {
- unsigned offs = 0;
- UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
-
- if (len < 2)
- len = 2;
- else
- while (len > MATCHES[offs])
- offs += 2;
-
- for (; ; len++)
- {
- COptimal *opt;
- UInt32 dist = MATCHES[(size_t)offs + 1];
- UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
- unsigned lenToPosState = GetLenToPosState(len);
-
- if (dist < kNumFullDistances)
- price += p->distancesPrices[lenToPosState][dist & (kNumFullDistances - 1)];
- else
- {
- unsigned slot;
- GetPosSlot2(dist, slot)
- price += p->alignPrices[dist & kAlignMask];
- price += p->posSlotPrices[lenToPosState][slot];
- }
-
- opt = &p->opt[len];
-
- if (price < opt->price)
- {
- opt->price = price;
- opt->len = (UInt32)len;
- opt->dist = dist + LZMA_NUM_REPS;
- opt->extra = 0;
- }
-
- if (len == MATCHES[offs])
- {
- offs += 2;
- if (offs == numPairs)
- break;
- }
- }
- }
- }
-
-
- cur = 0;
-
- #ifdef SHOW_STAT2
- /* if (position >= 0) */
- {
- unsigned i;
- printf("\n pos = %4X", position);
- for (i = cur; i <= last; i++)
- printf("\nprice[%4X] = %u", position - cur + i, p->opt[i].price);
- }
- #endif
- }
-
-
-
- // ---------- Optimal Parsing ----------
-
- for (;;)
- {
- unsigned numAvail;
- UInt32 numAvailFull;
- unsigned newLen, numPairs, prev, state, posState, startLen;
- UInt32 litPrice, matchPrice, repMatchPrice;
- BoolInt nextIsLit;
- Byte curByte, matchByte;
- const Byte *data;
- COptimal *curOpt, *nextOpt;
-
- if (++cur == last)
- break;
-
- // 18.06
- if (cur >= kNumOpts - 64)
- {
- unsigned j, best;
- UInt32 price = p->opt[cur].price;
- best = cur;
- for (j = cur + 1; j <= last; j++)
- {
- UInt32 price2 = p->opt[j].price;
- if (price >= price2)
- {
- price = price2;
- best = j;
- }
- }
- {
- unsigned delta = best - cur;
- if (delta != 0)
- {
- MOVE_POS(p, delta)
- }
- }
- cur = best;
- break;
- }
-
- newLen = ReadMatchDistances(p, &numPairs);
-
- if (newLen >= p->numFastBytes)
- {
- p->numPairs = numPairs;
- p->longestMatchLen = newLen;
- break;
- }
-
- curOpt = &p->opt[cur];
-
- position++;
-
- // we need that check here, if skip_items in p->opt are possible
- /*
- if (curOpt->price >= kInfinityPrice)
- continue;
- */
-
- prev = cur - curOpt->len;
-
- if (curOpt->len == 1)
- {
- state = (unsigned)p->opt[prev].state;
- if (IsShortRep(curOpt))
- state = kShortRepNextStates[state];
- else
- state = kLiteralNextStates[state];
- }
- else
- {
- const COptimal *prevOpt;
- UInt32 b0;
- UInt32 dist = curOpt->dist;
-
- if (curOpt->extra)
- {
- prev -= (unsigned)curOpt->extra;
- state = kState_RepAfterLit;
- if (curOpt->extra == 1)
- state = (dist < LZMA_NUM_REPS ? kState_RepAfterLit : kState_MatchAfterLit);
- }
- else
- {
- state = (unsigned)p->opt[prev].state;
- if (dist < LZMA_NUM_REPS)
- state = kRepNextStates[state];
- else
- state = kMatchNextStates[state];
- }
-
- prevOpt = &p->opt[prev];
- b0 = prevOpt->reps[0];
-
- if (dist < LZMA_NUM_REPS)
- {
- if (dist == 0)
- {
- reps[0] = b0;
- reps[1] = prevOpt->reps[1];
- reps[2] = prevOpt->reps[2];
- reps[3] = prevOpt->reps[3];
- }
- else
- {
- reps[1] = b0;
- b0 = prevOpt->reps[1];
- if (dist == 1)
- {
- reps[0] = b0;
- reps[2] = prevOpt->reps[2];
- reps[3] = prevOpt->reps[3];
- }
- else
- {
- reps[2] = b0;
- reps[0] = prevOpt->reps[dist];
- reps[3] = prevOpt->reps[dist ^ 1];
- }
- }
- }
- else
- {
- reps[0] = (dist - LZMA_NUM_REPS + 1);
- reps[1] = b0;
- reps[2] = prevOpt->reps[1];
- reps[3] = prevOpt->reps[2];
- }
- }
-
- curOpt->state = (CState)state;
- curOpt->reps[0] = reps[0];
- curOpt->reps[1] = reps[1];
- curOpt->reps[2] = reps[2];
- curOpt->reps[3] = reps[3];
-
- data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
- curByte = *data;
- matchByte = *(data - reps[0]);
-
- posState = (position & p->pbMask);
-
- /*
- The order of Price checks:
- < LIT
- <= SHORT_REP
- < LIT : REP_0
- < REP [ : LIT : REP_0 ]
- < MATCH [ : LIT : REP_0 ]
- */
-
- {
- UInt32 curPrice = curOpt->price;
- unsigned prob = p->isMatch[state][posState];
- matchPrice = curPrice + GET_PRICE_1(prob);
- litPrice = curPrice + GET_PRICE_0(prob);
- }
-
- nextOpt = &p->opt[(size_t)cur + 1];
- nextIsLit = False;
-
- // here we can allow skip_items in p->opt, if we don't check (nextOpt->price < kInfinityPrice)
- // 18.new.06
- if ((nextOpt->price < kInfinityPrice
- // && !IsLitState(state)
- && matchByte == curByte)
- || litPrice > nextOpt->price
- )
- litPrice = 0;
- else
- {
- const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
- litPrice += (!IsLitState(state) ?
- LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) :
- LitEnc_GetPrice(probs, curByte, p->ProbPrices));
-
- if (litPrice < nextOpt->price)
- {
- nextOpt->price = litPrice;
- nextOpt->len = 1;
- MakeAs_Lit(nextOpt)
- nextIsLit = True;
- }
- }
-
- repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
-
- numAvailFull = p->numAvail;
- {
- unsigned temp = kNumOpts - 1 - cur;
- if (numAvailFull > temp)
- numAvailFull = (UInt32)temp;
- }
-
- // 18.06
- // ---------- SHORT_REP ----------
- if (IsLitState(state)) // 18.new
- if (matchByte == curByte)
- if (repMatchPrice < nextOpt->price) // 18.new
- // if (numAvailFull < 2 || data[1] != *(data - reps[0] + 1))
- if (
- // nextOpt->price >= kInfinityPrice ||
- nextOpt->len < 2 // we can check nextOpt->len, if skip items are not allowed in p->opt
- || (nextOpt->dist != 0
- // && nextOpt->extra <= 1 // 17.old
- )
- )
- {
- UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, state, posState);
- // if (shortRepPrice <= nextOpt->price) // 17.old
- if (shortRepPrice < nextOpt->price) // 18.new
- {
- nextOpt->price = shortRepPrice;
- nextOpt->len = 1;
- MakeAs_ShortRep(nextOpt)
- nextIsLit = False;
- }
- }
-
- if (numAvailFull < 2)
- continue;
- numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
-
- // numAvail <= p->numFastBytes
-
- // ---------- LIT : REP_0 ----------
-
- if (!nextIsLit
- && litPrice != 0 // 18.new
- && matchByte != curByte
- && numAvailFull > 2)
- {
- const Byte *data2 = data - reps[0];
- if (data[1] == data2[1] && data[2] == data2[2])
- {
- unsigned len;
- unsigned limit = p->numFastBytes + 1;
- if (limit > numAvailFull)
- limit = numAvailFull;
- for (len = 3; len < limit && data[len] == data2[len]; len++)
- {}
-
- {
- unsigned state2 = kLiteralNextStates[state];
- unsigned posState2 = (position + 1) & p->pbMask;
- UInt32 price = litPrice + GetPrice_Rep_0(p, state2, posState2);
- {
- unsigned offset = cur + len;
-
- if (last < offset)
- last = offset;
-
- // do
- {
- UInt32 price2;
- COptimal *opt;
- len--;
- // price2 = price + GetPrice_Len_Rep_0(p, len, state2, posState2);
- price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len);
-
- opt = &p->opt[offset];
- // offset--;
- if (price2 < opt->price)
- {
- opt->price = price2;
- opt->len = (UInt32)len;
- opt->dist = 0;
- opt->extra = 1;
- }
- }
- // while (len >= 3);
- }
- }
- }
- }
-
- startLen = 2; /* speed optimization */
-
- {
- // ---------- REP ----------
- unsigned repIndex = 0; // 17.old
- // unsigned repIndex = IsLitState(state) ? 0 : 1; // 18.notused
- for (; repIndex < LZMA_NUM_REPS; repIndex++)
- {
- unsigned len;
- UInt32 price;
- const Byte *data2 = data - reps[repIndex];
- if (data[0] != data2[0] || data[1] != data2[1])
- continue;
-
- for (len = 2; len < numAvail && data[len] == data2[len]; len++)
- {}
-
- // if (len < startLen) continue; // 18.new: speed optimization
-
- {
- unsigned offset = cur + len;
- if (last < offset)
- last = offset;
- }
- {
- unsigned len2 = len;
- price = repMatchPrice + GetPrice_PureRep(p, repIndex, state, posState);
- do
- {
- UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, len2);
- COptimal *opt = &p->opt[cur + len2];
- if (price2 < opt->price)
- {
- opt->price = price2;
- opt->len = (UInt32)len2;
- opt->dist = (UInt32)repIndex;
- opt->extra = 0;
- }
- }
- while (--len2 >= 2);
- }
-
- if (repIndex == 0) startLen = len + 1; // 17.old
- // startLen = len + 1; // 18.new
-
- /* if (_maxMode) */
- {
- // ---------- REP : LIT : REP_0 ----------
- // numFastBytes + 1 + numFastBytes
-
- unsigned len2 = len + 1;
- unsigned limit = len2 + p->numFastBytes;
- if (limit > numAvailFull)
- limit = numAvailFull;
-
- len2 += 2;
- if (len2 <= limit)
- if (data[len2 - 2] == data2[len2 - 2])
- if (data[len2 - 1] == data2[len2 - 1])
- {
- unsigned state2 = kRepNextStates[state];
- unsigned posState2 = (position + len) & p->pbMask;
- price += GET_PRICE_LEN(&p->repLenEnc, posState, len)
- + GET_PRICE_0(p->isMatch[state2][posState2])
- + LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]),
- data[len], data2[len], p->ProbPrices);
-
- // state2 = kLiteralNextStates[state2];
- state2 = kState_LitAfterRep;
- posState2 = (posState2 + 1) & p->pbMask;
-
-
- price += GetPrice_Rep_0(p, state2, posState2);
-
- for (; len2 < limit && data[len2] == data2[len2]; len2++)
- {}
-
- len2 -= len;
- // if (len2 >= 3)
- {
- {
- unsigned offset = cur + len + len2;
-
- if (last < offset)
- last = offset;
- // do
- {
- UInt32 price2;
- COptimal *opt;
- len2--;
- // price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2);
- price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2);
-
- opt = &p->opt[offset];
- // offset--;
- if (price2 < opt->price)
- {
- opt->price = price2;
- opt->len = (UInt32)len2;
- opt->extra = (CExtra)(len + 1);
- opt->dist = (UInt32)repIndex;
- }
- }
- // while (len2 >= 3);
- }
- }
- }
- }
- }
- }
-
-
- // ---------- MATCH ----------
- /* for (unsigned len = 2; len <= newLen; len++) */
- if (newLen > numAvail)
- {
- newLen = numAvail;
- for (numPairs = 0; newLen > MATCHES[numPairs]; numPairs += 2);
- MATCHES[numPairs] = (UInt32)newLen;
- numPairs += 2;
- }
-
- // startLen = 2; /* speed optimization */
-
- if (newLen >= startLen)
- {
- UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
- UInt32 dist;
- unsigned offs, posSlot, len;
-
- {
- unsigned offset = cur + newLen;
- if (last < offset)
- last = offset;
- }
-
- offs = 0;
- while (startLen > MATCHES[offs])
- offs += 2;
- dist = MATCHES[(size_t)offs + 1];
-
- // if (dist >= kNumFullDistances)
- GetPosSlot2(dist, posSlot)
-
- for (len = /*2*/ startLen; ; len++)
- {
- UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
- {
- COptimal *opt;
- unsigned lenNorm = len - 2;
- lenNorm = GetLenToPosState2(lenNorm);
- if (dist < kNumFullDistances)
- price += p->distancesPrices[lenNorm][dist & (kNumFullDistances - 1)];
- else
- price += p->posSlotPrices[lenNorm][posSlot] + p->alignPrices[dist & kAlignMask];
-
- opt = &p->opt[cur + len];
- if (price < opt->price)
- {
- opt->price = price;
- opt->len = (UInt32)len;
- opt->dist = dist + LZMA_NUM_REPS;
- opt->extra = 0;
- }
- }
-
- if (len == MATCHES[offs])
- {
- // if (p->_maxMode) {
- // MATCH : LIT : REP_0
-
- const Byte *data2 = data - dist - 1;
- unsigned len2 = len + 1;
- unsigned limit = len2 + p->numFastBytes;
- if (limit > numAvailFull)
- limit = numAvailFull;
-
- len2 += 2;
- if (len2 <= limit)
- if (data[len2 - 2] == data2[len2 - 2])
- if (data[len2 - 1] == data2[len2 - 1])
- {
- for (; len2 < limit && data[len2] == data2[len2]; len2++)
- {}
-
- len2 -= len;
-
- // if (len2 >= 3)
- {
- unsigned state2 = kMatchNextStates[state];
- unsigned posState2 = (position + len) & p->pbMask;
- unsigned offset;
- price += GET_PRICE_0(p->isMatch[state2][posState2]);
- price += LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]),
- data[len], data2[len], p->ProbPrices);
-
- // state2 = kLiteralNextStates[state2];
- state2 = kState_LitAfterMatch;
-
- posState2 = (posState2 + 1) & p->pbMask;
- price += GetPrice_Rep_0(p, state2, posState2);
-
- offset = cur + len + len2;
-
- if (last < offset)
- last = offset;
- // do
- {
- UInt32 price2;
- COptimal *opt;
- len2--;
- // price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2);
- price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2);
- opt = &p->opt[offset];
- // offset--;
- if (price2 < opt->price)
- {
- opt->price = price2;
- opt->len = (UInt32)len2;
- opt->extra = (CExtra)(len + 1);
- opt->dist = dist + LZMA_NUM_REPS;
- }
- }
- // while (len2 >= 3);
- }
-
- }
-
- offs += 2;
- if (offs == numPairs)
- break;
- dist = MATCHES[(size_t)offs + 1];
- // if (dist >= kNumFullDistances)
- GetPosSlot2(dist, posSlot)
- }
- }
- }
- }
-
- do
- p->opt[last].price = kInfinityPrice;
- while (--last);
-
- return Backward(p, cur);
-}
-
-
-
-#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
-
-
-
-static unsigned GetOptimumFast(CLzmaEnc *p)
-{
- UInt32 numAvail, mainDist;
- unsigned mainLen, numPairs, repIndex, repLen, i;
- const Byte *data;
-
- if (p->additionalOffset == 0)
- mainLen = ReadMatchDistances(p, &numPairs);
- else
- {
- mainLen = p->longestMatchLen;
- numPairs = p->numPairs;
- }
-
- numAvail = p->numAvail;
- p->backRes = MARK_LIT;
- if (numAvail < 2)
- return 1;
- // if (mainLen < 2 && p->state == 0) return 1; // 18.06.notused
- if (numAvail > LZMA_MATCH_LEN_MAX)
- numAvail = LZMA_MATCH_LEN_MAX;
- data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
- repLen = repIndex = 0;
-
- for (i = 0; i < LZMA_NUM_REPS; i++)
- {
- unsigned len;
- const Byte *data2 = data - p->reps[i];
- if (data[0] != data2[0] || data[1] != data2[1])
- continue;
- for (len = 2; len < numAvail && data[len] == data2[len]; len++)
- {}
- if (len >= p->numFastBytes)
- {
- p->backRes = (UInt32)i;
- MOVE_POS(p, len - 1)
- return len;
- }
- if (len > repLen)
- {
- repIndex = i;
- repLen = len;
- }
- }
-
- if (mainLen >= p->numFastBytes)
- {
- p->backRes = p->matches[(size_t)numPairs - 1] + LZMA_NUM_REPS;
- MOVE_POS(p, mainLen - 1)
- return mainLen;
- }
-
- mainDist = 0; /* for GCC */
-
- if (mainLen >= 2)
- {
- mainDist = p->matches[(size_t)numPairs - 1];
- while (numPairs > 2)
- {
- UInt32 dist2;
- if (mainLen != p->matches[(size_t)numPairs - 4] + 1)
- break;
- dist2 = p->matches[(size_t)numPairs - 3];
- if (!ChangePair(dist2, mainDist))
- break;
- numPairs -= 2;
- mainLen--;
- mainDist = dist2;
- }
- if (mainLen == 2 && mainDist >= 0x80)
- mainLen = 1;
- }
-
- if (repLen >= 2)
- if ( repLen + 1 >= mainLen
- || (repLen + 2 >= mainLen && mainDist >= (1 << 9))
- || (repLen + 3 >= mainLen && mainDist >= (1 << 15)))
- {
- p->backRes = (UInt32)repIndex;
- MOVE_POS(p, repLen - 1)
- return repLen;
- }
-
- if (mainLen < 2 || numAvail <= 2)
- return 1;
-
- {
- unsigned len1 = ReadMatchDistances(p, &p->numPairs);
- p->longestMatchLen = len1;
-
- if (len1 >= 2)
- {
- UInt32 newDist = p->matches[(size_t)p->numPairs - 1];
- if ( (len1 >= mainLen && newDist < mainDist)
- || (len1 == mainLen + 1 && !ChangePair(mainDist, newDist))
- || (len1 > mainLen + 1)
- || (len1 + 1 >= mainLen && mainLen >= 3 && ChangePair(newDist, mainDist)))
- return 1;
- }
- }
-
- data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
-
- for (i = 0; i < LZMA_NUM_REPS; i++)
- {
- unsigned len, limit;
- const Byte *data2 = data - p->reps[i];
- if (data[0] != data2[0] || data[1] != data2[1])
- continue;
- limit = mainLen - 1;
- for (len = 2;; len++)
- {
- if (len >= limit)
- return 1;
- if (data[len] != data2[len])
- break;
- }
- }
-
- p->backRes = mainDist + LZMA_NUM_REPS;
- if (mainLen != 2)
- {
- MOVE_POS(p, mainLen - 2)
- }
- return mainLen;
-}
-
-
-
-
-static void WriteEndMarker(CLzmaEnc *p, unsigned posState)
-{
- UInt32 range;
- range = p->rc.range;
- {
- UInt32 ttt, newBound;
- CLzmaProb *prob = &p->isMatch[p->state][posState];
- RC_BIT_PRE(&p->rc, prob)
- RC_BIT_1(&p->rc, prob)
- prob = &p->isRep[p->state];
- RC_BIT_PRE(&p->rc, prob)
- RC_BIT_0(&p->rc, prob)
- }
- p->state = kMatchNextStates[p->state];
-
- p->rc.range = range;
- LenEnc_Encode(&p->lenProbs, &p->rc, 0, posState);
- range = p->rc.range;
-
- {
- // RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[0], (1 << kNumPosSlotBits) - 1);
- CLzmaProb *probs = p->posSlotEncoder[0];
- unsigned m = 1;
- do
- {
- UInt32 ttt, newBound;
- RC_BIT_PRE(p, probs + m)
- RC_BIT_1(&p->rc, probs + m)
- m = (m << 1) + 1;
- }
- while (m < (1 << kNumPosSlotBits));
- }
- {
- // RangeEnc_EncodeDirectBits(&p->rc, ((UInt32)1 << (30 - kNumAlignBits)) - 1, 30 - kNumAlignBits); UInt32 range = p->range;
- unsigned numBits = 30 - kNumAlignBits;
- do
- {
- range >>= 1;
- p->rc.low += range;
- RC_NORM(&p->rc)
- }
- while (--numBits);
- }
-
- {
- // RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
- CLzmaProb *probs = p->posAlignEncoder;
- unsigned m = 1;
- do
- {
- UInt32 ttt, newBound;
- RC_BIT_PRE(p, probs + m)
- RC_BIT_1(&p->rc, probs + m)
- m = (m << 1) + 1;
- }
- while (m < kAlignTableSize);
- }
- p->rc.range = range;
-}
-
-
-static SRes CheckErrors(CLzmaEnc *p)
-{
- if (p->result != SZ_OK)
- return p->result;
- if (p->rc.res != SZ_OK)
- p->result = SZ_ERROR_WRITE;
-
- #ifndef Z7_ST
- if (
- // p->mf_Failure ||
- (p->mtMode &&
- ( // p->matchFinderMt.failure_LZ_LZ ||
- p->matchFinderMt.failure_LZ_BT))
- )
- {
- p->result = MY_HRES_ERROR_INTERNAL_ERROR;
- // printf("\nCheckErrors p->matchFinderMt.failureLZ\n");
- }
- #endif
-
- if (MFB.result != SZ_OK)
- p->result = SZ_ERROR_READ;
-
- if (p->result != SZ_OK)
- p->finished = True;
- return p->result;
-}
-
-
-Z7_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
-{
- /* ReleaseMFStream(); */
- p->finished = True;
- if (p->writeEndMark)
- WriteEndMarker(p, nowPos & p->pbMask);
- RangeEnc_FlushData(&p->rc);
- RangeEnc_FlushStream(&p->rc);
- return CheckErrors(p);
-}
-
-
-Z7_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
-{
- unsigned i;
- const CProbPrice *ProbPrices = p->ProbPrices;
- const CLzmaProb *probs = p->posAlignEncoder;
- // p->alignPriceCount = 0;
- for (i = 0; i < kAlignTableSize / 2; i++)
- {
- UInt32 price = 0;
- unsigned sym = i;
- unsigned m = 1;
- unsigned bit;
- UInt32 prob;
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
- prob = probs[m];
- p->alignPrices[i ] = price + GET_PRICEa_0(prob);
- p->alignPrices[i + 8] = price + GET_PRICEa_1(prob);
- // p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
- }
-}
-
-
-Z7_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
-{
- // int y; for (y = 0; y < 100; y++) {
-
- UInt32 tempPrices[kNumFullDistances];
- unsigned i, lps;
-
- const CProbPrice *ProbPrices = p->ProbPrices;
- p->matchPriceCount = 0;
-
- for (i = kStartPosModelIndex / 2; i < kNumFullDistances / 2; i++)
- {
- unsigned posSlot = GetPosSlot1(i);
- unsigned footerBits = (posSlot >> 1) - 1;
- unsigned base = ((2 | (posSlot & 1)) << footerBits);
- const CLzmaProb *probs = p->posEncoders + (size_t)base * 2;
- // tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base, footerBits, i - base, p->ProbPrices);
- UInt32 price = 0;
- unsigned m = 1;
- unsigned sym = i;
- unsigned offset = (unsigned)1 << footerBits;
- base += i;
-
- if (footerBits)
- do
- {
- unsigned bit = sym & 1;
- sym >>= 1;
- price += GET_PRICEa(probs[m], bit);
- m = (m << 1) + bit;
- }
- while (--footerBits);
-
- {
- unsigned prob = probs[m];
- tempPrices[base ] = price + GET_PRICEa_0(prob);
- tempPrices[base + offset] = price + GET_PRICEa_1(prob);
- }
- }
-
- for (lps = 0; lps < kNumLenToPosStates; lps++)
- {
- unsigned slot;
- unsigned distTableSize2 = (p->distTableSize + 1) >> 1;
- UInt32 *posSlotPrices = p->posSlotPrices[lps];
- const CLzmaProb *probs = p->posSlotEncoder[lps];
-
- for (slot = 0; slot < distTableSize2; slot++)
- {
- // posSlotPrices[slot] = RcTree_GetPrice(encoder, kNumPosSlotBits, slot, p->ProbPrices);
- UInt32 price;
- unsigned bit;
- unsigned sym = slot + (1 << (kNumPosSlotBits - 1));
- unsigned prob;
- bit = sym & 1; sym >>= 1; price = GET_PRICEa(probs[sym], bit);
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
- bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
- prob = probs[(size_t)slot + (1 << (kNumPosSlotBits - 1))];
- posSlotPrices[(size_t)slot * 2 ] = price + GET_PRICEa_0(prob);
- posSlotPrices[(size_t)slot * 2 + 1] = price + GET_PRICEa_1(prob);
- }
-
- {
- UInt32 delta = ((UInt32)((kEndPosModelIndex / 2 - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
- for (slot = kEndPosModelIndex / 2; slot < distTableSize2; slot++)
- {
- posSlotPrices[(size_t)slot * 2 ] += delta;
- posSlotPrices[(size_t)slot * 2 + 1] += delta;
- delta += ((UInt32)1 << kNumBitPriceShiftBits);
- }
- }
-
- {
- UInt32 *dp = p->distancesPrices[lps];
-
- dp[0] = posSlotPrices[0];
- dp[1] = posSlotPrices[1];
- dp[2] = posSlotPrices[2];
- dp[3] = posSlotPrices[3];
-
- for (i = 4; i < kNumFullDistances; i += 2)
- {
- UInt32 slotPrice = posSlotPrices[GetPosSlot1(i)];
- dp[i ] = slotPrice + tempPrices[i];
- dp[i + 1] = slotPrice + tempPrices[i + 1];
- }
- }
- }
- // }
-}
-
-
-
-static void LzmaEnc_Construct(CLzmaEnc *p)
-{
- RangeEnc_Construct(&p->rc);
- MatchFinder_Construct(&MFB);
-
- #ifndef Z7_ST
- p->matchFinderMt.MatchFinder = &MFB;
- MatchFinderMt_Construct(&p->matchFinderMt);
- #endif
-
- {
- CLzmaEncProps props;
- LzmaEncProps_Init(&props);
- LzmaEnc_SetProps((CLzmaEncHandle)(void *)p, &props);
- }
-
- #ifndef LZMA_LOG_BSR
- LzmaEnc_FastPosInit(p->g_FastPos);
- #endif
-
- LzmaEnc_InitPriceTables(p->ProbPrices);
- p->litProbs = NULL;
- p->saveState.litProbs = NULL;
-}
-
-CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc)
-{
- void *p;
- p = ISzAlloc_Alloc(alloc, sizeof(CLzmaEnc));
- if (p)
- LzmaEnc_Construct((CLzmaEnc *)p);
- return p;
-}
-
-static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->litProbs);
- ISzAlloc_Free(alloc, p->saveState.litProbs);
- p->litProbs = NULL;
- p->saveState.litProbs = NULL;
-}
-
-static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- #ifndef Z7_ST
- MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
- #endif
-
- MatchFinder_Free(&MFB, allocBig);
- LzmaEnc_FreeLits(p, alloc);
- RangeEnc_Free(&p->rc, alloc);
-}
-
-void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- // GET_CLzmaEnc_p
- LzmaEnc_Destruct(p, alloc, allocBig);
- ISzAlloc_Free(alloc, p);
-}
-
-
-Z7_NO_INLINE
-static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpackSize)
-{
- UInt32 nowPos32, startPos32;
- if (p->needInit)
- {
- #ifndef Z7_ST
- if (p->mtMode)
- {
- RINOK(MatchFinderMt_InitMt(&p->matchFinderMt))
- }
- #endif
- p->matchFinder.Init(p->matchFinderObj);
- p->needInit = 0;
- }
-
- if (p->finished)
- return p->result;
- RINOK(CheckErrors(p))
-
- nowPos32 = (UInt32)p->nowPos64;
- startPos32 = nowPos32;
-
- if (p->nowPos64 == 0)
- {
- unsigned numPairs;
- Byte curByte;
- if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
- return Flush(p, nowPos32);
- ReadMatchDistances(p, &numPairs);
- RangeEnc_EncodeBit_0(&p->rc, &p->isMatch[kState_Start][0]);
- // p->state = kLiteralNextStates[p->state];
- curByte = *(p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset);
- LitEnc_Encode(&p->rc, p->litProbs, curByte);
- p->additionalOffset--;
- nowPos32++;
- }
-
- if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
-
- for (;;)
- {
- UInt32 dist;
- unsigned len, posState;
- UInt32 range, ttt, newBound;
- CLzmaProb *probs;
-
- if (p->fastMode)
- len = GetOptimumFast(p);
- else
- {
- unsigned oci = p->optCur;
- if (p->optEnd == oci)
- len = GetOptimum(p, nowPos32);
- else
- {
- const COptimal *opt = &p->opt[oci];
- len = opt->len;
- p->backRes = opt->dist;
- p->optCur = oci + 1;
- }
- }
-
- posState = (unsigned)nowPos32 & p->pbMask;
- range = p->rc.range;
- probs = &p->isMatch[p->state][posState];
-
- RC_BIT_PRE(&p->rc, probs)
-
- dist = p->backRes;
-
- #ifdef SHOW_STAT2
- printf("\n pos = %6X, len = %3u pos = %6u", nowPos32, len, dist);
- #endif
-
- if (dist == MARK_LIT)
- {
- Byte curByte;
- const Byte *data;
- unsigned state;
-
- RC_BIT_0(&p->rc, probs)
- p->rc.range = range;
- data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
- probs = LIT_PROBS(nowPos32, *(data - 1));
- curByte = *data;
- state = p->state;
- p->state = kLiteralNextStates[state];
- if (IsLitState(state))
- LitEnc_Encode(&p->rc, probs, curByte);
- else
- LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0]));
- }
- else
- {
- RC_BIT_1(&p->rc, probs)
- probs = &p->isRep[p->state];
- RC_BIT_PRE(&p->rc, probs)
-
- if (dist < LZMA_NUM_REPS)
- {
- RC_BIT_1(&p->rc, probs)
- probs = &p->isRepG0[p->state];
- RC_BIT_PRE(&p->rc, probs)
- if (dist == 0)
- {
- RC_BIT_0(&p->rc, probs)
- probs = &p->isRep0Long[p->state][posState];
- RC_BIT_PRE(&p->rc, probs)
- if (len != 1)
- {
- RC_BIT_1_BASE(&p->rc, probs)
- }
- else
- {
- RC_BIT_0_BASE(&p->rc, probs)
- p->state = kShortRepNextStates[p->state];
- }
- }
- else
- {
- RC_BIT_1(&p->rc, probs)
- probs = &p->isRepG1[p->state];
- RC_BIT_PRE(&p->rc, probs)
- if (dist == 1)
- {
- RC_BIT_0_BASE(&p->rc, probs)
- dist = p->reps[1];
- }
- else
- {
- RC_BIT_1(&p->rc, probs)
- probs = &p->isRepG2[p->state];
- RC_BIT_PRE(&p->rc, probs)
- if (dist == 2)
- {
- RC_BIT_0_BASE(&p->rc, probs)
- dist = p->reps[2];
- }
- else
- {
- RC_BIT_1_BASE(&p->rc, probs)
- dist = p->reps[3];
- p->reps[3] = p->reps[2];
- }
- p->reps[2] = p->reps[1];
- }
- p->reps[1] = p->reps[0];
- p->reps[0] = dist;
- }
-
- RC_NORM(&p->rc)
-
- p->rc.range = range;
-
- if (len != 1)
- {
- LenEnc_Encode(&p->repLenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState);
- --p->repLenEncCounter;
- p->state = kRepNextStates[p->state];
- }
- }
- else
- {
- unsigned posSlot;
- RC_BIT_0(&p->rc, probs)
- p->rc.range = range;
- p->state = kMatchNextStates[p->state];
-
- LenEnc_Encode(&p->lenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState);
- // --p->lenEnc.counter;
-
- dist -= LZMA_NUM_REPS;
- p->reps[3] = p->reps[2];
- p->reps[2] = p->reps[1];
- p->reps[1] = p->reps[0];
- p->reps[0] = dist + 1;
-
- p->matchPriceCount++;
- GetPosSlot(dist, posSlot)
- // RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], posSlot);
- {
- UInt32 sym = (UInt32)posSlot + (1 << kNumPosSlotBits);
- range = p->rc.range;
- probs = p->posSlotEncoder[GetLenToPosState(len)];
- do
- {
- CLzmaProb *prob = probs + (sym >> kNumPosSlotBits);
- UInt32 bit = (sym >> (kNumPosSlotBits - 1)) & 1;
- sym <<= 1;
- RC_BIT(&p->rc, prob, bit)
- }
- while (sym < (1 << kNumPosSlotBits * 2));
- p->rc.range = range;
- }
-
- if (dist >= kStartPosModelIndex)
- {
- unsigned footerBits = ((posSlot >> 1) - 1);
-
- if (dist < kNumFullDistances)
- {
- unsigned base = ((2 | (posSlot & 1)) << footerBits);
- RcTree_ReverseEncode(&p->rc, p->posEncoders + base, footerBits, (unsigned)(dist /* - base */));
- }
- else
- {
- UInt32 pos2 = (dist | 0xF) << (32 - footerBits);
- range = p->rc.range;
- // RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
- /*
- do
- {
- range >>= 1;
- p->rc.low += range & (0 - ((dist >> --footerBits) & 1));
- RC_NORM(&p->rc)
- }
- while (footerBits > kNumAlignBits);
- */
- do
- {
- range >>= 1;
- p->rc.low += range & (0 - (pos2 >> 31));
- pos2 += pos2;
- RC_NORM(&p->rc)
- }
- while (pos2 != 0xF0000000);
-
-
- // RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
-
- {
- unsigned m = 1;
- unsigned bit;
- bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
- bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
- bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
- bit = dist & 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit)
- p->rc.range = range;
- // p->alignPriceCount++;
- }
- }
- }
- }
- }
-
- nowPos32 += (UInt32)len;
- p->additionalOffset -= len;
-
- if (p->additionalOffset == 0)
- {
- UInt32 processed;
-
- if (!p->fastMode)
- {
- /*
- if (p->alignPriceCount >= 16) // kAlignTableSize
- FillAlignPrices(p);
- if (p->matchPriceCount >= 128)
- FillDistancesPrices(p);
- if (p->lenEnc.counter <= 0)
- LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices);
- */
- if (p->matchPriceCount >= 64)
- {
- FillAlignPrices(p);
- // { int y; for (y = 0; y < 100; y++) {
- FillDistancesPrices(p);
- // }}
- LenPriceEnc_UpdateTables(&p->lenEnc, (unsigned)1 << p->pb, &p->lenProbs, p->ProbPrices);
- }
- if (p->repLenEncCounter <= 0)
- {
- p->repLenEncCounter = REP_LEN_COUNT;
- LenPriceEnc_UpdateTables(&p->repLenEnc, (unsigned)1 << p->pb, &p->repLenProbs, p->ProbPrices);
- }
- }
-
- if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
- break;
- processed = nowPos32 - startPos32;
-
- if (maxPackSize)
- {
- if (processed + kNumOpts + 300 >= maxUnpackSize
- || RangeEnc_GetProcessed_sizet(&p->rc) + kPackReserve >= maxPackSize)
- break;
- }
- else if (processed >= (1 << 17))
- {
- p->nowPos64 += nowPos32 - startPos32;
- return CheckErrors(p);
- }
- }
- }
-
- p->nowPos64 += nowPos32 - startPos32;
- return Flush(p, nowPos32);
-}
-
-
-
-#define kBigHashDicLimit ((UInt32)1 << 24)
-
-static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- UInt32 beforeSize = kNumOpts;
- UInt32 dictSize;
-
- if (!RangeEnc_Alloc(&p->rc, alloc))
- return SZ_ERROR_MEM;
-
- #ifndef Z7_ST
- p->mtMode = (p->multiThread && !p->fastMode && (MFB.btMode != 0));
- #endif
-
- {
- unsigned lclp = p->lc + p->lp;
- if (!p->litProbs || !p->saveState.litProbs || p->lclp != lclp)
- {
- LzmaEnc_FreeLits(p, alloc);
- p->litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb));
- p->saveState.litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb));
- if (!p->litProbs || !p->saveState.litProbs)
- {
- LzmaEnc_FreeLits(p, alloc);
- return SZ_ERROR_MEM;
- }
- p->lclp = lclp;
- }
- }
-
- MFB.bigHash = (Byte)(p->dictSize > kBigHashDicLimit ? 1 : 0);
-
-
- dictSize = p->dictSize;
- if (dictSize == ((UInt32)2 << 30) ||
- dictSize == ((UInt32)3 << 30))
- {
- /* 21.03 : here we reduce the dictionary for 2 reasons:
- 1) we don't want 32-bit back_distance matches in decoder for 2 GB dictionary.
- 2) we want to elimate useless last MatchFinder_Normalize3() for corner cases,
- where data size is aligned for 1 GB: 5/6/8 GB.
- That reducing must be >= 1 for such corner cases. */
- dictSize -= 1;
- }
-
- if (beforeSize + dictSize < keepWindowSize)
- beforeSize = keepWindowSize - dictSize;
-
- /* in worst case we can look ahead for
- max(LZMA_MATCH_LEN_MAX, numFastBytes + 1 + numFastBytes) bytes.
- we send larger value for (keepAfter) to MantchFinder_Create():
- (numFastBytes + LZMA_MATCH_LEN_MAX + 1)
- */
-
- #ifndef Z7_ST
- if (p->mtMode)
- {
- RINOK(MatchFinderMt_Create(&p->matchFinderMt, dictSize, beforeSize,
- p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 18.04 */
- , allocBig))
- p->matchFinderObj = &p->matchFinderMt;
- MFB.bigHash = (Byte)(MFB.hashMask >= 0xFFFFFF ? 1 : 0);
- MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
- }
- else
- #endif
- {
- if (!MatchFinder_Create(&MFB, dictSize, beforeSize,
- p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 21.03 */
- , allocBig))
- return SZ_ERROR_MEM;
- p->matchFinderObj = &MFB;
- MatchFinder_CreateVTable(&MFB, &p->matchFinder);
- }
-
- return SZ_OK;
-}
-
-static void LzmaEnc_Init(CLzmaEnc *p)
-{
- unsigned i;
- p->state = 0;
- p->reps[0] =
- p->reps[1] =
- p->reps[2] =
- p->reps[3] = 1;
-
- RangeEnc_Init(&p->rc);
-
- for (i = 0; i < (1 << kNumAlignBits); i++)
- p->posAlignEncoder[i] = kProbInitValue;
-
- for (i = 0; i < kNumStates; i++)
- {
- unsigned j;
- for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
- {
- p->isMatch[i][j] = kProbInitValue;
- p->isRep0Long[i][j] = kProbInitValue;
- }
- p->isRep[i] = kProbInitValue;
- p->isRepG0[i] = kProbInitValue;
- p->isRepG1[i] = kProbInitValue;
- p->isRepG2[i] = kProbInitValue;
- }
-
- {
- for (i = 0; i < kNumLenToPosStates; i++)
- {
- CLzmaProb *probs = p->posSlotEncoder[i];
- unsigned j;
- for (j = 0; j < (1 << kNumPosSlotBits); j++)
- probs[j] = kProbInitValue;
- }
- }
- {
- for (i = 0; i < kNumFullDistances; i++)
- p->posEncoders[i] = kProbInitValue;
- }
-
- {
- UInt32 num = (UInt32)0x300 << (p->lp + p->lc);
- UInt32 k;
- CLzmaProb *probs = p->litProbs;
- for (k = 0; k < num; k++)
- probs[k] = kProbInitValue;
- }
-
-
- LenEnc_Init(&p->lenProbs);
- LenEnc_Init(&p->repLenProbs);
-
- p->optEnd = 0;
- p->optCur = 0;
-
- {
- for (i = 0; i < kNumOpts; i++)
- p->opt[i].price = kInfinityPrice;
- }
-
- p->additionalOffset = 0;
-
- p->pbMask = ((unsigned)1 << p->pb) - 1;
- p->lpMask = ((UInt32)0x100 << p->lp) - ((unsigned)0x100 >> p->lc);
-
- // p->mf_Failure = False;
-}
-
-
-static void LzmaEnc_InitPrices(CLzmaEnc *p)
-{
- if (!p->fastMode)
- {
- FillDistancesPrices(p);
- FillAlignPrices(p);
- }
-
- p->lenEnc.tableSize =
- p->repLenEnc.tableSize =
- p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
-
- p->repLenEncCounter = REP_LEN_COUNT;
-
- LenPriceEnc_UpdateTables(&p->lenEnc, (unsigned)1 << p->pb, &p->lenProbs, p->ProbPrices);
- LenPriceEnc_UpdateTables(&p->repLenEnc, (unsigned)1 << p->pb, &p->repLenProbs, p->ProbPrices);
-}
-
-static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- unsigned i;
- for (i = kEndPosModelIndex / 2; i < kDicLogSizeMax; i++)
- if (p->dictSize <= ((UInt32)1 << i))
- break;
- p->distTableSize = i * 2;
-
- p->finished = False;
- p->result = SZ_OK;
- p->nowPos64 = 0;
- p->needInit = 1;
- RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig))
- LzmaEnc_Init(p);
- LzmaEnc_InitPrices(p);
- return SZ_OK;
-}
-
-static SRes LzmaEnc_Prepare(CLzmaEncHandle p,
- ISeqOutStreamPtr outStream,
- ISeqInStreamPtr inStream,
- ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- // GET_CLzmaEnc_p
- MatchFinder_SET_STREAM(&MFB, inStream)
- p->rc.outStream = outStream;
- return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
-}
-
-SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p,
- ISeqInStreamPtr inStream, UInt32 keepWindowSize,
- ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- // GET_CLzmaEnc_p
- MatchFinder_SET_STREAM(&MFB, inStream)
- return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
-}
-
-SRes LzmaEnc_MemPrepare(CLzmaEncHandle p,
- const Byte *src, SizeT srcLen,
- UInt32 keepWindowSize,
- ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- // GET_CLzmaEnc_p
- MatchFinder_SET_DIRECT_INPUT_BUF(&MFB, src, srcLen)
- LzmaEnc_SetDataSize(p, srcLen);
- return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
-}
-
-void LzmaEnc_Finish(CLzmaEncHandle p)
-{
- #ifndef Z7_ST
- // GET_CLzmaEnc_p
- if (p->mtMode)
- MatchFinderMt_ReleaseStream(&p->matchFinderMt);
- #else
- UNUSED_VAR(p)
- #endif
-}
-
-
-typedef struct
-{
- ISeqOutStream vt;
- Byte *data;
- size_t rem;
- BoolInt overflow;
-} CLzmaEnc_SeqOutStreamBuf;
-
-static size_t SeqOutStreamBuf_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLzmaEnc_SeqOutStreamBuf)
- if (p->rem < size)
- {
- size = p->rem;
- p->overflow = True;
- }
- if (size != 0)
- {
- memcpy(p->data, data, size);
- p->rem -= size;
- p->data += size;
- }
- return size;
-}
-
-
-/*
-UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle p)
-{
- GET_const_CLzmaEnc_p
- return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
-}
-*/
-
-const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p)
-{
- // GET_const_CLzmaEnc_p
- return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
-}
-
-
-// (desiredPackSize == 0) is not allowed
-SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
- Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
-{
- // GET_CLzmaEnc_p
- UInt64 nowPos64;
- SRes res;
- CLzmaEnc_SeqOutStreamBuf outStream;
-
- outStream.vt.Write = SeqOutStreamBuf_Write;
- outStream.data = dest;
- outStream.rem = *destLen;
- outStream.overflow = False;
-
- p->writeEndMark = False;
- p->finished = False;
- p->result = SZ_OK;
-
- if (reInit)
- LzmaEnc_Init(p);
- LzmaEnc_InitPrices(p);
- RangeEnc_Init(&p->rc);
- p->rc.outStream = &outStream.vt;
- nowPos64 = p->nowPos64;
-
- res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
-
- *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
- *destLen -= outStream.rem;
- if (outStream.overflow)
- return SZ_ERROR_OUTPUT_EOF;
-
- return res;
-}
-
-
-Z7_NO_INLINE
-static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgressPtr progress)
-{
- SRes res = SZ_OK;
-
- #ifndef Z7_ST
- Byte allocaDummy[0x300];
- allocaDummy[0] = 0;
- allocaDummy[1] = allocaDummy[0];
- #endif
-
- for (;;)
- {
- res = LzmaEnc_CodeOneBlock(p, 0, 0);
- if (res != SZ_OK || p->finished)
- break;
- if (progress)
- {
- res = ICompressProgress_Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
- if (res != SZ_OK)
- {
- res = SZ_ERROR_PROGRESS;
- break;
- }
- }
- }
-
- LzmaEnc_Finish((CLzmaEncHandle)(void *)p);
-
- /*
- if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&MFB))
- res = SZ_ERROR_FAIL;
- }
- */
-
- return res;
-}
-
-
-SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress,
- ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- // GET_CLzmaEnc_p
- RINOK(LzmaEnc_Prepare(p, outStream, inStream, alloc, allocBig))
- return LzmaEnc_Encode2(p, progress);
-}
-
-
-SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *props, SizeT *size)
-{
- if (*size < LZMA_PROPS_SIZE)
- return SZ_ERROR_PARAM;
- *size = LZMA_PROPS_SIZE;
- {
- // GET_CLzmaEnc_p
- const UInt32 dictSize = p->dictSize;
- UInt32 v;
- props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
-
- // we write aligned dictionary value to properties for lzma decoder
- if (dictSize >= ((UInt32)1 << 21))
- {
- const UInt32 kDictMask = ((UInt32)1 << 20) - 1;
- v = (dictSize + kDictMask) & ~kDictMask;
- if (v < dictSize)
- v = dictSize;
- }
- else
- {
- unsigned i = 11 * 2;
- do
- {
- v = (UInt32)(2 + (i & 1)) << (i >> 1);
- i++;
- }
- while (v < dictSize);
- }
-
- SetUi32(props + 1, v)
- return SZ_OK;
- }
-}
-
-
-unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p)
-{
- // GET_CLzmaEnc_p
- return (unsigned)p->writeEndMark;
-}
-
-
-SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
- int writeEndMark, ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- SRes res;
- // GET_CLzmaEnc_p
-
- CLzmaEnc_SeqOutStreamBuf outStream;
-
- outStream.vt.Write = SeqOutStreamBuf_Write;
- outStream.data = dest;
- outStream.rem = *destLen;
- outStream.overflow = False;
-
- p->writeEndMark = writeEndMark;
- p->rc.outStream = &outStream.vt;
-
- res = LzmaEnc_MemPrepare(p, src, srcLen, 0, alloc, allocBig);
-
- if (res == SZ_OK)
- {
- res = LzmaEnc_Encode2(p, progress);
- if (res == SZ_OK && p->nowPos64 != srcLen)
- res = SZ_ERROR_FAIL;
- }
-
- *destLen -= (SizeT)outStream.rem;
- if (outStream.overflow)
- return SZ_ERROR_OUTPUT_EOF;
- return res;
-}
-
-
-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
- const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
- ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- CLzmaEncHandle p = LzmaEnc_Create(alloc);
- SRes res;
- if (!p)
- return SZ_ERROR_MEM;
-
- res = LzmaEnc_SetProps(p, props);
- if (res == SZ_OK)
- {
- res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
- if (res == SZ_OK)
- res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
- writeEndMark, progress, alloc, allocBig);
- }
-
- LzmaEnc_Destroy(p, alloc, allocBig);
- return res;
-}
-
-
-/*
-#ifndef Z7_ST
-void LzmaEnc_GetLzThreads(CLzmaEncHandle p, HANDLE lz_threads[2])
-{
- GET_const_CLzmaEnc_p
- lz_threads[0] = p->matchFinderMt.hashSync.thread;
- lz_threads[1] = p->matchFinderMt.btSync.thread;
-}
-#endif
-*/
diff --git a/3rdparty/7z/src/LzmaEnc.h b/3rdparty/7z/src/LzmaEnc.h
deleted file mode 100644
index 08711cbac4..0000000000
--- a/3rdparty/7z/src/LzmaEnc.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* LzmaEnc.h -- LZMA Encoder
-2023-04-13 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA_ENC_H
-#define ZIP7_INC_LZMA_ENC_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define LZMA_PROPS_SIZE 5
-
-typedef struct
-{
- int level; /* 0 <= level <= 9 */
- UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
- (1 << 12) <= dictSize <= (3 << 29) for 64-bit version
- default = (1 << 24) */
- int lc; /* 0 <= lc <= 8, default = 3 */
- int lp; /* 0 <= lp <= 4, default = 0 */
- int pb; /* 0 <= pb <= 4, default = 2 */
- int algo; /* 0 - fast, 1 - normal, default = 1 */
- int fb; /* 5 <= fb <= 273, default = 32 */
- int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
- int numHashBytes; /* 2, 3 or 4, default = 4 */
- unsigned numHashOutBits; /* default = ? */
- UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
- unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
- int numThreads; /* 1 or 2, default = 2 */
-
- // int _pad;
-
- UInt64 reduceSize; /* estimated size of data that will be compressed. default = (UInt64)(Int64)-1.
- Encoder uses this value to reduce dictionary size */
-
- UInt64 affinity;
-} CLzmaEncProps;
-
-void LzmaEncProps_Init(CLzmaEncProps *p);
-void LzmaEncProps_Normalize(CLzmaEncProps *p);
-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
-
-
-/* ---------- CLzmaEncHandle Interface ---------- */
-
-/* LzmaEnc* functions can return the following exit codes:
-SRes:
- SZ_OK - OK
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_PARAM - Incorrect paramater in props
- SZ_ERROR_WRITE - ISeqOutStream write callback error
- SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output
- SZ_ERROR_PROGRESS - some break from progress callback
- SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
-*/
-
-typedef struct CLzmaEnc CLzmaEnc;
-typedef CLzmaEnc * CLzmaEncHandle;
-// Z7_DECLARE_HANDLE(CLzmaEncHandle)
-
-CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc);
-void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig);
-
-SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
-void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize);
-SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
-unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p);
-
-SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
- ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
-SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
- int writeEndMark, ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
-
-
-/* ---------- One Call Interface ---------- */
-
-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
- const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
- ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/LzmaLib.c b/3rdparty/7z/src/LzmaLib.c
deleted file mode 100644
index 350c1953de..0000000000
--- a/3rdparty/7z/src/LzmaLib.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/* LzmaLib.c -- LZMA library wrapper
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Alloc.h"
-#include "LzmaDec.h"
-#include "LzmaEnc.h"
-#include "LzmaLib.h"
-
-Z7_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
- unsigned char *outProps, size_t *outPropsSize,
- int level, /* 0 <= level <= 9, default = 5 */
- unsigned dictSize, /* use (1 << N) or (3 << N). 4 KB < dictSize <= 128 MB */
- int lc, /* 0 <= lc <= 8, default = 3 */
- int lp, /* 0 <= lp <= 4, default = 0 */
- int pb, /* 0 <= pb <= 4, default = 2 */
- int fb, /* 5 <= fb <= 273, default = 32 */
- int numThreads /* 1 or 2, default = 2 */
-)
-{
- CLzmaEncProps props;
- LzmaEncProps_Init(&props);
- props.level = level;
- props.dictSize = dictSize;
- props.lc = lc;
- props.lp = lp;
- props.pb = pb;
- props.fb = fb;
- props.numThreads = numThreads;
-
- return LzmaEncode(dest, destLen, src, srcLen, &props, outProps, outPropsSize, 0,
- NULL, &g_Alloc, &g_Alloc);
-}
-
-
-Z7_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t *srcLen,
- const unsigned char *props, size_t propsSize)
-{
- ELzmaStatus status;
- return LzmaDecode(dest, destLen, src, srcLen, props, (unsigned)propsSize, LZMA_FINISH_ANY, &status, &g_Alloc);
-}
diff --git a/3rdparty/7z/src/LzmaLib.h b/3rdparty/7z/src/LzmaLib.h
deleted file mode 100644
index 79612a42e3..0000000000
--- a/3rdparty/7z/src/LzmaLib.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* LzmaLib.h -- LZMA library interface
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_LZMA_LIB_H
-#define ZIP7_INC_LZMA_LIB_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define Z7_STDAPI int Z7_STDCALL
-
-#define LZMA_PROPS_SIZE 5
-
-/*
-RAM requirements for LZMA:
- for compression: (dictSize * 11.5 + 6 MB) + state_size
- for decompression: dictSize + state_size
- state_size = (4 + (1.5 << (lc + lp))) KB
- by default (lc=3, lp=0), state_size = 16 KB.
-
-LZMA properties (5 bytes) format
- Offset Size Description
- 0 1 lc, lp and pb in encoded form.
- 1 4 dictSize (little endian).
-*/
-
-/*
-LzmaCompress
-------------
-
-outPropsSize -
- In: the pointer to the size of outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5.
- Out: the pointer to the size of written properties in outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5.
-
- LZMA Encoder will use defult values for any parameter, if it is
- -1 for any from: level, loc, lp, pb, fb, numThreads
- 0 for dictSize
-
-level - compression level: 0 <= level <= 9;
-
- level dictSize algo fb
- 0: 64 KB 0 32
- 1: 256 KB 0 32
- 2: 1 MB 0 32
- 3: 4 MB 0 32
- 4: 16 MB 0 32
- 5: 16 MB 1 32
- 6: 32 MB 1 32
- 7: 32 MB 1 64
- 8: 64 MB 1 64
- 9: 64 MB 1 64
-
- The default value for "level" is 5.
-
- algo = 0 means fast method
- algo = 1 means normal method
-
-dictSize - The dictionary size in bytes. The maximum value is
- 128 MB = (1 << 27) bytes for 32-bit version
- 1 GB = (1 << 30) bytes for 64-bit version
- The default value is 16 MB = (1 << 24) bytes.
- It's recommended to use the dictionary that is larger than 4 KB and
- that can be calculated as (1 << N) or (3 << N) sizes.
-
-lc - The number of literal context bits (high bits of previous literal).
- It can be in the range from 0 to 8. The default value is 3.
- Sometimes lc=4 gives the gain for big files.
-
-lp - The number of literal pos bits (low bits of current position for literals).
- It can be in the range from 0 to 4. The default value is 0.
- The lp switch is intended for periodical data when the period is equal to 2^lp.
- For example, for 32-bit (4 bytes) periodical data you can use lp=2. Often it's
- better to set lc=0, if you change lp switch.
-
-pb - The number of pos bits (low bits of current position).
- It can be in the range from 0 to 4. The default value is 2.
- The pb switch is intended for periodical data when the period is equal 2^pb.
-
-fb - Word size (the number of fast bytes).
- It can be in the range from 5 to 273. The default value is 32.
- Usually, a big number gives a little bit better compression ratio and
- slower compression process.
-
-numThreads - The number of thereads. 1 or 2. The default value is 2.
- Fast mode (algo = 0) can use only 1 thread.
-
-In:
- dest - output data buffer
- destLen - output data buffer size
- src - input data
- srcLen - input data size
-Out:
- destLen - processed output size
-Returns:
- SZ_OK - OK
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_PARAM - Incorrect paramater
- SZ_ERROR_OUTPUT_EOF - output buffer overflow
- SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
-*/
-
-Z7_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
- unsigned char *outProps, size_t *outPropsSize, /* *outPropsSize must be = 5 */
- int level, /* 0 <= level <= 9, default = 5 */
- unsigned dictSize, /* default = (1 << 24) */
- int lc, /* 0 <= lc <= 8, default = 3 */
- int lp, /* 0 <= lp <= 4, default = 0 */
- int pb, /* 0 <= pb <= 4, default = 2 */
- int fb, /* 5 <= fb <= 273, default = 32 */
- int numThreads /* 1 or 2, default = 2 */
- );
-
-/*
-LzmaUncompress
---------------
-In:
- dest - output data buffer
- destLen - output data buffer size
- src - input data
- srcLen - input data size
-Out:
- destLen - processed output size
- srcLen - processed input size
-Returns:
- SZ_OK - OK
- SZ_ERROR_DATA - Data error
- SZ_ERROR_MEM - Memory allocation arror
- SZ_ERROR_UNSUPPORTED - Unsupported properties
- SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer (src)
-*/
-
-Z7_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, SizeT *srcLen,
- const unsigned char *props, size_t propsSize);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/MtCoder.c b/3rdparty/7z/src/MtCoder.c
deleted file mode 100644
index f264584ed8..0000000000
--- a/3rdparty/7z/src/MtCoder.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/* MtCoder.c -- Multi-thread Coder
-2023-04-13 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "MtCoder.h"
-
-#ifndef Z7_ST
-
-static SRes MtProgressThunk_Progress(ICompressProgressPtr pp, UInt64 inSize, UInt64 outSize)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CMtProgressThunk)
- UInt64 inSize2 = 0;
- UInt64 outSize2 = 0;
- if (inSize != (UInt64)(Int64)-1)
- {
- inSize2 = inSize - p->inSize;
- p->inSize = inSize;
- }
- if (outSize != (UInt64)(Int64)-1)
- {
- outSize2 = outSize - p->outSize;
- p->outSize = outSize;
- }
- return MtProgress_ProgressAdd(p->mtProgress, inSize2, outSize2);
-}
-
-
-void MtProgressThunk_CreateVTable(CMtProgressThunk *p)
-{
- p->vt.Progress = MtProgressThunk_Progress;
-}
-
-
-
-#define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; }
-
-
-static THREAD_FUNC_DECL ThreadFunc(void *pp);
-
-
-static SRes MtCoderThread_CreateAndStart(CMtCoderThread *t)
-{
- WRes wres = AutoResetEvent_OptCreate_And_Reset(&t->startEvent);
- if (wres == 0)
- {
- t->stop = False;
- if (!Thread_WasCreated(&t->thread))
- wres = Thread_Create(&t->thread, ThreadFunc, t);
- if (wres == 0)
- wres = Event_Set(&t->startEvent);
- }
- if (wres == 0)
- return SZ_OK;
- return MY_SRes_HRESULT_FROM_WRes(wres);
-}
-
-
-static void MtCoderThread_Destruct(CMtCoderThread *t)
-{
- if (Thread_WasCreated(&t->thread))
- {
- t->stop = 1;
- Event_Set(&t->startEvent);
- Thread_Wait_Close(&t->thread);
- }
-
- Event_Close(&t->startEvent);
-
- if (t->inBuf)
- {
- ISzAlloc_Free(t->mtCoder->allocBig, t->inBuf);
- t->inBuf = NULL;
- }
-}
-
-
-
-
-/*
- ThreadFunc2() returns:
- SZ_OK - in all normal cases (even for stream error or memory allocation error)
- SZ_ERROR_THREAD - in case of failure in system synch function
-*/
-
-static SRes ThreadFunc2(CMtCoderThread *t)
-{
- CMtCoder *mtc = t->mtCoder;
-
- for (;;)
- {
- unsigned bi;
- SRes res;
- SRes res2;
- BoolInt finished;
- unsigned bufIndex;
- size_t size;
- const Byte *inData;
- UInt64 readProcessed = 0;
-
- RINOK_THREAD(Event_Wait(&mtc->readEvent))
-
- /* after Event_Wait(&mtc->readEvent) we must call Event_Set(&mtc->readEvent) in any case to unlock another threads */
-
- if (mtc->stopReading)
- {
- return Event_Set(&mtc->readEvent) == 0 ? SZ_OK : SZ_ERROR_THREAD;
- }
-
- res = MtProgress_GetError(&mtc->mtProgress);
-
- size = 0;
- inData = NULL;
- finished = True;
-
- if (res == SZ_OK)
- {
- size = mtc->blockSize;
- if (mtc->inStream)
- {
- if (!t->inBuf)
- {
- t->inBuf = (Byte *)ISzAlloc_Alloc(mtc->allocBig, mtc->blockSize);
- if (!t->inBuf)
- res = SZ_ERROR_MEM;
- }
- if (res == SZ_OK)
- {
- res = SeqInStream_ReadMax(mtc->inStream, t->inBuf, &size);
- readProcessed = mtc->readProcessed + size;
- mtc->readProcessed = readProcessed;
- }
- if (res != SZ_OK)
- {
- mtc->readRes = res;
- /* after reading error - we can stop encoding of previous blocks */
- MtProgress_SetError(&mtc->mtProgress, res);
- }
- else
- finished = (size != mtc->blockSize);
- }
- else
- {
- size_t rem;
- readProcessed = mtc->readProcessed;
- rem = mtc->inDataSize - (size_t)readProcessed;
- if (size > rem)
- size = rem;
- inData = mtc->inData + (size_t)readProcessed;
- readProcessed += size;
- mtc->readProcessed = readProcessed;
- finished = (mtc->inDataSize == (size_t)readProcessed);
- }
- }
-
- /* we must get some block from blocksSemaphore before Event_Set(&mtc->readEvent) */
-
- res2 = SZ_OK;
-
- if (Semaphore_Wait(&mtc->blocksSemaphore) != 0)
- {
- res2 = SZ_ERROR_THREAD;
- if (res == SZ_OK)
- {
- res = res2;
- // MtProgress_SetError(&mtc->mtProgress, res);
- }
- }
-
- bi = mtc->blockIndex;
-
- if (++mtc->blockIndex >= mtc->numBlocksMax)
- mtc->blockIndex = 0;
-
- bufIndex = (unsigned)(int)-1;
-
- if (res == SZ_OK)
- res = MtProgress_GetError(&mtc->mtProgress);
-
- if (res != SZ_OK)
- finished = True;
-
- if (!finished)
- {
- if (mtc->numStartedThreads < mtc->numStartedThreadsLimit
- && mtc->expectedDataSize != readProcessed)
- {
- res = MtCoderThread_CreateAndStart(&mtc->threads[mtc->numStartedThreads]);
- if (res == SZ_OK)
- mtc->numStartedThreads++;
- else
- {
- MtProgress_SetError(&mtc->mtProgress, res);
- finished = True;
- }
- }
- }
-
- if (finished)
- mtc->stopReading = True;
-
- RINOK_THREAD(Event_Set(&mtc->readEvent))
-
- if (res2 != SZ_OK)
- return res2;
-
- if (res == SZ_OK)
- {
- CriticalSection_Enter(&mtc->cs);
- bufIndex = mtc->freeBlockHead;
- mtc->freeBlockHead = mtc->freeBlockList[bufIndex];
- CriticalSection_Leave(&mtc->cs);
-
- res = mtc->mtCallback->Code(mtc->mtCallbackObject, t->index, bufIndex,
- mtc->inStream ? t->inBuf : inData, size, finished);
-
- // MtProgress_Reinit(&mtc->mtProgress, t->index);
-
- if (res != SZ_OK)
- MtProgress_SetError(&mtc->mtProgress, res);
- }
-
- {
- CMtCoderBlock *block = &mtc->blocks[bi];
- block->res = res;
- block->bufIndex = bufIndex;
- block->finished = finished;
- }
-
- #ifdef MTCODER_USE_WRITE_THREAD
- RINOK_THREAD(Event_Set(&mtc->writeEvents[bi]))
- #else
- {
- unsigned wi;
- {
- CriticalSection_Enter(&mtc->cs);
- wi = mtc->writeIndex;
- if (wi == bi)
- mtc->writeIndex = (unsigned)(int)-1;
- else
- mtc->ReadyBlocks[bi] = True;
- CriticalSection_Leave(&mtc->cs);
- }
-
- if (wi != bi)
- {
- if (res != SZ_OK || finished)
- return 0;
- continue;
- }
-
- if (mtc->writeRes != SZ_OK)
- res = mtc->writeRes;
-
- for (;;)
- {
- if (res == SZ_OK && bufIndex != (unsigned)(int)-1)
- {
- res = mtc->mtCallback->Write(mtc->mtCallbackObject, bufIndex);
- if (res != SZ_OK)
- {
- mtc->writeRes = res;
- MtProgress_SetError(&mtc->mtProgress, res);
- }
- }
-
- if (++wi >= mtc->numBlocksMax)
- wi = 0;
- {
- BoolInt isReady;
-
- CriticalSection_Enter(&mtc->cs);
-
- if (bufIndex != (unsigned)(int)-1)
- {
- mtc->freeBlockList[bufIndex] = mtc->freeBlockHead;
- mtc->freeBlockHead = bufIndex;
- }
-
- isReady = mtc->ReadyBlocks[wi];
-
- if (isReady)
- mtc->ReadyBlocks[wi] = False;
- else
- mtc->writeIndex = wi;
-
- CriticalSection_Leave(&mtc->cs);
-
- RINOK_THREAD(Semaphore_Release1(&mtc->blocksSemaphore))
-
- if (!isReady)
- break;
- }
-
- {
- CMtCoderBlock *block = &mtc->blocks[wi];
- if (res == SZ_OK && block->res != SZ_OK)
- res = block->res;
- bufIndex = block->bufIndex;
- finished = block->finished;
- }
- }
- }
- #endif
-
- if (finished || res != SZ_OK)
- return 0;
- }
-}
-
-
-static THREAD_FUNC_DECL ThreadFunc(void *pp)
-{
- CMtCoderThread *t = (CMtCoderThread *)pp;
- for (;;)
- {
- if (Event_Wait(&t->startEvent) != 0)
- return (THREAD_FUNC_RET_TYPE)SZ_ERROR_THREAD;
- if (t->stop)
- return 0;
- {
- SRes res = ThreadFunc2(t);
- CMtCoder *mtc = t->mtCoder;
- if (res != SZ_OK)
- {
- MtProgress_SetError(&mtc->mtProgress, res);
- }
-
- #ifndef MTCODER_USE_WRITE_THREAD
- {
- unsigned numFinished = (unsigned)InterlockedIncrement(&mtc->numFinishedThreads);
- if (numFinished == mtc->numStartedThreads)
- if (Event_Set(&mtc->finishedEvent) != 0)
- return (THREAD_FUNC_RET_TYPE)SZ_ERROR_THREAD;
- }
- #endif
- }
- }
-}
-
-
-
-void MtCoder_Construct(CMtCoder *p)
-{
- unsigned i;
-
- p->blockSize = 0;
- p->numThreadsMax = 0;
- p->expectedDataSize = (UInt64)(Int64)-1;
-
- p->inStream = NULL;
- p->inData = NULL;
- p->inDataSize = 0;
-
- p->progress = NULL;
- p->allocBig = NULL;
-
- p->mtCallback = NULL;
- p->mtCallbackObject = NULL;
-
- p->allocatedBufsSize = 0;
-
- Event_Construct(&p->readEvent);
- Semaphore_Construct(&p->blocksSemaphore);
-
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- {
- CMtCoderThread *t = &p->threads[i];
- t->mtCoder = p;
- t->index = i;
- t->inBuf = NULL;
- t->stop = False;
- Event_Construct(&t->startEvent);
- Thread_CONSTRUCT(&t->thread)
- }
-
- #ifdef MTCODER_USE_WRITE_THREAD
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- Event_Construct(&p->writeEvents[i]);
- #else
- Event_Construct(&p->finishedEvent);
- #endif
-
- CriticalSection_Init(&p->cs);
- CriticalSection_Init(&p->mtProgress.cs);
-}
-
-
-
-
-static void MtCoder_Free(CMtCoder *p)
-{
- unsigned i;
-
- /*
- p->stopReading = True;
- if (Event_IsCreated(&p->readEvent))
- Event_Set(&p->readEvent);
- */
-
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- MtCoderThread_Destruct(&p->threads[i]);
-
- Event_Close(&p->readEvent);
- Semaphore_Close(&p->blocksSemaphore);
-
- #ifdef MTCODER_USE_WRITE_THREAD
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- Event_Close(&p->writeEvents[i]);
- #else
- Event_Close(&p->finishedEvent);
- #endif
-}
-
-
-void MtCoder_Destruct(CMtCoder *p)
-{
- MtCoder_Free(p);
-
- CriticalSection_Delete(&p->cs);
- CriticalSection_Delete(&p->mtProgress.cs);
-}
-
-
-SRes MtCoder_Code(CMtCoder *p)
-{
- unsigned numThreads = p->numThreadsMax;
- unsigned numBlocksMax;
- unsigned i;
- SRes res = SZ_OK;
-
- if (numThreads > MTCODER_THREADS_MAX)
- numThreads = MTCODER_THREADS_MAX;
- numBlocksMax = MTCODER_GET_NUM_BLOCKS_FROM_THREADS(numThreads);
-
- if (p->blockSize < ((UInt32)1 << 26)) numBlocksMax++;
- if (p->blockSize < ((UInt32)1 << 24)) numBlocksMax++;
- if (p->blockSize < ((UInt32)1 << 22)) numBlocksMax++;
-
- if (numBlocksMax > MTCODER_BLOCKS_MAX)
- numBlocksMax = MTCODER_BLOCKS_MAX;
-
- if (p->blockSize != p->allocatedBufsSize)
- {
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- {
- CMtCoderThread *t = &p->threads[i];
- if (t->inBuf)
- {
- ISzAlloc_Free(p->allocBig, t->inBuf);
- t->inBuf = NULL;
- }
- }
- p->allocatedBufsSize = p->blockSize;
- }
-
- p->readRes = SZ_OK;
-
- MtProgress_Init(&p->mtProgress, p->progress);
-
- #ifdef MTCODER_USE_WRITE_THREAD
- for (i = 0; i < numBlocksMax; i++)
- {
- RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->writeEvents[i]))
- }
- #else
- RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->finishedEvent))
- #endif
-
- {
- RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->readEvent))
- RINOK_THREAD(Semaphore_OptCreateInit(&p->blocksSemaphore, numBlocksMax, numBlocksMax))
- }
-
- for (i = 0; i < MTCODER_BLOCKS_MAX - 1; i++)
- p->freeBlockList[i] = i + 1;
- p->freeBlockList[MTCODER_BLOCKS_MAX - 1] = (unsigned)(int)-1;
- p->freeBlockHead = 0;
-
- p->readProcessed = 0;
- p->blockIndex = 0;
- p->numBlocksMax = numBlocksMax;
- p->stopReading = False;
-
- #ifndef MTCODER_USE_WRITE_THREAD
- p->writeIndex = 0;
- p->writeRes = SZ_OK;
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- p->ReadyBlocks[i] = False;
- p->numFinishedThreads = 0;
- #endif
-
- p->numStartedThreadsLimit = numThreads;
- p->numStartedThreads = 0;
-
- // for (i = 0; i < numThreads; i++)
- {
- CMtCoderThread *nextThread = &p->threads[p->numStartedThreads++];
- RINOK(MtCoderThread_CreateAndStart(nextThread))
- }
-
- RINOK_THREAD(Event_Set(&p->readEvent))
-
- #ifdef MTCODER_USE_WRITE_THREAD
- {
- unsigned bi = 0;
-
- for (;; bi++)
- {
- if (bi >= numBlocksMax)
- bi = 0;
-
- RINOK_THREAD(Event_Wait(&p->writeEvents[bi]))
-
- {
- const CMtCoderBlock *block = &p->blocks[bi];
- unsigned bufIndex = block->bufIndex;
- BoolInt finished = block->finished;
- if (res == SZ_OK && block->res != SZ_OK)
- res = block->res;
-
- if (bufIndex != (unsigned)(int)-1)
- {
- if (res == SZ_OK)
- {
- res = p->mtCallback->Write(p->mtCallbackObject, bufIndex);
- if (res != SZ_OK)
- MtProgress_SetError(&p->mtProgress, res);
- }
-
- CriticalSection_Enter(&p->cs);
- {
- p->freeBlockList[bufIndex] = p->freeBlockHead;
- p->freeBlockHead = bufIndex;
- }
- CriticalSection_Leave(&p->cs);
- }
-
- RINOK_THREAD(Semaphore_Release1(&p->blocksSemaphore))
-
- if (finished)
- break;
- }
- }
- }
- #else
- {
- WRes wres = Event_Wait(&p->finishedEvent);
- res = MY_SRes_HRESULT_FROM_WRes(wres);
- }
- #endif
-
- if (res == SZ_OK)
- res = p->readRes;
-
- if (res == SZ_OK)
- res = p->mtProgress.res;
-
- #ifndef MTCODER_USE_WRITE_THREAD
- if (res == SZ_OK)
- res = p->writeRes;
- #endif
-
- if (res != SZ_OK)
- MtCoder_Free(p);
- return res;
-}
-
-#endif
-
-#undef RINOK_THREAD
diff --git a/3rdparty/7z/src/MtCoder.h b/3rdparty/7z/src/MtCoder.h
deleted file mode 100644
index c031fe0f73..0000000000
--- a/3rdparty/7z/src/MtCoder.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* MtCoder.h -- Multi-thread Coder
-2023-04-13 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_MT_CODER_H
-#define ZIP7_INC_MT_CODER_H
-
-#include "MtDec.h"
-
-EXTERN_C_BEGIN
-
-/*
- if ( defined MTCODER_USE_WRITE_THREAD) : main thread writes all data blocks to output stream
- if (not defined MTCODER_USE_WRITE_THREAD) : any coder thread can write data blocks to output stream
-*/
-/* #define MTCODER_USE_WRITE_THREAD */
-
-#ifndef Z7_ST
- #define MTCODER_GET_NUM_BLOCKS_FROM_THREADS(numThreads) ((numThreads) + (numThreads) / 8 + 1)
- #define MTCODER_THREADS_MAX 64
- #define MTCODER_BLOCKS_MAX (MTCODER_GET_NUM_BLOCKS_FROM_THREADS(MTCODER_THREADS_MAX) + 3)
-#else
- #define MTCODER_THREADS_MAX 1
- #define MTCODER_BLOCKS_MAX 1
-#endif
-
-
-#ifndef Z7_ST
-
-
-typedef struct
-{
- ICompressProgress vt;
- CMtProgress *mtProgress;
- UInt64 inSize;
- UInt64 outSize;
-} CMtProgressThunk;
-
-void MtProgressThunk_CreateVTable(CMtProgressThunk *p);
-
-#define MtProgressThunk_INIT(p) { (p)->inSize = 0; (p)->outSize = 0; }
-
-
-struct CMtCoder_;
-
-
-typedef struct
-{
- struct CMtCoder_ *mtCoder;
- unsigned index;
- int stop;
- Byte *inBuf;
-
- CAutoResetEvent startEvent;
- CThread thread;
-} CMtCoderThread;
-
-
-typedef struct
-{
- SRes (*Code)(void *p, unsigned coderIndex, unsigned outBufIndex,
- const Byte *src, size_t srcSize, int finished);
- SRes (*Write)(void *p, unsigned outBufIndex);
-} IMtCoderCallback2;
-
-
-typedef struct
-{
- SRes res;
- unsigned bufIndex;
- BoolInt finished;
-} CMtCoderBlock;
-
-
-typedef struct CMtCoder_
-{
- /* input variables */
-
- size_t blockSize; /* size of input block */
- unsigned numThreadsMax;
- UInt64 expectedDataSize;
-
- ISeqInStreamPtr inStream;
- const Byte *inData;
- size_t inDataSize;
-
- ICompressProgressPtr progress;
- ISzAllocPtr allocBig;
-
- IMtCoderCallback2 *mtCallback;
- void *mtCallbackObject;
-
-
- /* internal variables */
-
- size_t allocatedBufsSize;
-
- CAutoResetEvent readEvent;
- CSemaphore blocksSemaphore;
-
- BoolInt stopReading;
- SRes readRes;
-
- #ifdef MTCODER_USE_WRITE_THREAD
- CAutoResetEvent writeEvents[MTCODER_BLOCKS_MAX];
- #else
- CAutoResetEvent finishedEvent;
- SRes writeRes;
- unsigned writeIndex;
- Byte ReadyBlocks[MTCODER_BLOCKS_MAX];
- LONG numFinishedThreads;
- #endif
-
- unsigned numStartedThreadsLimit;
- unsigned numStartedThreads;
-
- unsigned numBlocksMax;
- unsigned blockIndex;
- UInt64 readProcessed;
-
- CCriticalSection cs;
-
- unsigned freeBlockHead;
- unsigned freeBlockList[MTCODER_BLOCKS_MAX];
-
- CMtProgress mtProgress;
- CMtCoderBlock blocks[MTCODER_BLOCKS_MAX];
- CMtCoderThread threads[MTCODER_THREADS_MAX];
-} CMtCoder;
-
-
-void MtCoder_Construct(CMtCoder *p);
-void MtCoder_Destruct(CMtCoder *p);
-SRes MtCoder_Code(CMtCoder *p);
-
-
-#endif
-
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/MtDec.c b/3rdparty/7z/src/MtDec.c
deleted file mode 100644
index 5aef9447d8..0000000000
--- a/3rdparty/7z/src/MtDec.c
+++ /dev/null
@@ -1,1114 +0,0 @@
-/* MtDec.c -- Multi-thread Decoder
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-// #define SHOW_DEBUG_INFO
-
-// #include
-#include
-
-#ifdef SHOW_DEBUG_INFO
-#include
-#endif
-
-#include "MtDec.h"
-
-#ifndef Z7_ST
-
-#ifdef SHOW_DEBUG_INFO
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-
-#define PRF_STR_INT(s, d) PRF(printf("\n" s " %d\n", (unsigned)d))
-
-void MtProgress_Init(CMtProgress *p, ICompressProgressPtr progress)
-{
- p->progress = progress;
- p->res = SZ_OK;
- p->totalInSize = 0;
- p->totalOutSize = 0;
-}
-
-
-SRes MtProgress_Progress_ST(CMtProgress *p)
-{
- if (p->res == SZ_OK && p->progress)
- if (ICompressProgress_Progress(p->progress, p->totalInSize, p->totalOutSize) != SZ_OK)
- p->res = SZ_ERROR_PROGRESS;
- return p->res;
-}
-
-
-SRes MtProgress_ProgressAdd(CMtProgress *p, UInt64 inSize, UInt64 outSize)
-{
- SRes res;
- CriticalSection_Enter(&p->cs);
-
- p->totalInSize += inSize;
- p->totalOutSize += outSize;
- if (p->res == SZ_OK && p->progress)
- if (ICompressProgress_Progress(p->progress, p->totalInSize, p->totalOutSize) != SZ_OK)
- p->res = SZ_ERROR_PROGRESS;
- res = p->res;
-
- CriticalSection_Leave(&p->cs);
- return res;
-}
-
-
-SRes MtProgress_GetError(CMtProgress *p)
-{
- SRes res;
- CriticalSection_Enter(&p->cs);
- res = p->res;
- CriticalSection_Leave(&p->cs);
- return res;
-}
-
-
-void MtProgress_SetError(CMtProgress *p, SRes res)
-{
- CriticalSection_Enter(&p->cs);
- if (p->res == SZ_OK)
- p->res = res;
- CriticalSection_Leave(&p->cs);
-}
-
-
-#define RINOK_THREAD(x) RINOK_WRes(x)
-
-
-struct CMtDecBufLink_
-{
- struct CMtDecBufLink_ *next;
- void *pad[3];
-};
-
-typedef struct CMtDecBufLink_ CMtDecBufLink;
-
-#define MTDEC__LINK_DATA_OFFSET sizeof(CMtDecBufLink)
-#define MTDEC__DATA_PTR_FROM_LINK(link) ((Byte *)(link) + MTDEC__LINK_DATA_OFFSET)
-
-
-
-static THREAD_FUNC_DECL MtDec_ThreadFunc(void *pp);
-
-
-static WRes MtDecThread_CreateEvents(CMtDecThread *t)
-{
- WRes wres = AutoResetEvent_OptCreate_And_Reset(&t->canWrite);
- if (wres == 0)
- {
- wres = AutoResetEvent_OptCreate_And_Reset(&t->canRead);
- if (wres == 0)
- return SZ_OK;
- }
- return wres;
-}
-
-
-static SRes MtDecThread_CreateAndStart(CMtDecThread *t)
-{
- WRes wres = MtDecThread_CreateEvents(t);
- // wres = 17; // for test
- if (wres == 0)
- {
- if (Thread_WasCreated(&t->thread))
- return SZ_OK;
- wres = Thread_Create(&t->thread, MtDec_ThreadFunc, t);
- if (wres == 0)
- return SZ_OK;
- }
- return MY_SRes_HRESULT_FROM_WRes(wres);
-}
-
-
-void MtDecThread_FreeInBufs(CMtDecThread *t)
-{
- if (t->inBuf)
- {
- void *link = t->inBuf;
- t->inBuf = NULL;
- do
- {
- void *next = ((CMtDecBufLink *)link)->next;
- ISzAlloc_Free(t->mtDec->alloc, link);
- link = next;
- }
- while (link);
- }
-}
-
-
-static void MtDecThread_CloseThread(CMtDecThread *t)
-{
- if (Thread_WasCreated(&t->thread))
- {
- Event_Set(&t->canWrite); /* we can disable it. There are no threads waiting canWrite in normal cases */
- Event_Set(&t->canRead);
- Thread_Wait_Close(&t->thread);
- }
-
- Event_Close(&t->canRead);
- Event_Close(&t->canWrite);
-}
-
-static void MtDec_CloseThreads(CMtDec *p)
-{
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- MtDecThread_CloseThread(&p->threads[i]);
-}
-
-static void MtDecThread_Destruct(CMtDecThread *t)
-{
- MtDecThread_CloseThread(t);
- MtDecThread_FreeInBufs(t);
-}
-
-
-
-static SRes MtDec_GetError_Spec(CMtDec *p, UInt64 interruptIndex, BoolInt *wasInterrupted)
-{
- SRes res;
- CriticalSection_Enter(&p->mtProgress.cs);
- *wasInterrupted = (p->needInterrupt && interruptIndex > p->interruptIndex);
- res = p->mtProgress.res;
- CriticalSection_Leave(&p->mtProgress.cs);
- return res;
-}
-
-static SRes MtDec_Progress_GetError_Spec(CMtDec *p, UInt64 inSize, UInt64 outSize, UInt64 interruptIndex, BoolInt *wasInterrupted)
-{
- SRes res;
- CriticalSection_Enter(&p->mtProgress.cs);
-
- p->mtProgress.totalInSize += inSize;
- p->mtProgress.totalOutSize += outSize;
- if (p->mtProgress.res == SZ_OK && p->mtProgress.progress)
- if (ICompressProgress_Progress(p->mtProgress.progress, p->mtProgress.totalInSize, p->mtProgress.totalOutSize) != SZ_OK)
- p->mtProgress.res = SZ_ERROR_PROGRESS;
-
- *wasInterrupted = (p->needInterrupt && interruptIndex > p->interruptIndex);
- res = p->mtProgress.res;
-
- CriticalSection_Leave(&p->mtProgress.cs);
-
- return res;
-}
-
-static void MtDec_Interrupt(CMtDec *p, UInt64 interruptIndex)
-{
- CriticalSection_Enter(&p->mtProgress.cs);
- if (!p->needInterrupt || interruptIndex < p->interruptIndex)
- {
- p->interruptIndex = interruptIndex;
- p->needInterrupt = True;
- }
- CriticalSection_Leave(&p->mtProgress.cs);
-}
-
-Byte *MtDec_GetCrossBuff(CMtDec *p)
-{
- Byte *cr = p->crossBlock;
- if (!cr)
- {
- cr = (Byte *)ISzAlloc_Alloc(p->alloc, MTDEC__LINK_DATA_OFFSET + p->inBufSize);
- if (!cr)
- return NULL;
- p->crossBlock = cr;
- }
- return MTDEC__DATA_PTR_FROM_LINK(cr);
-}
-
-
-/*
- MtDec_ThreadFunc2() returns:
- 0 - in all normal cases (even for stream error or memory allocation error)
- (!= 0) - WRes error return by system threading function
-*/
-
-// #define MTDEC_ProgessStep (1 << 22)
-#define MTDEC_ProgessStep (1 << 0)
-
-static WRes MtDec_ThreadFunc2(CMtDecThread *t)
-{
- CMtDec *p = t->mtDec;
-
- PRF_STR_INT("MtDec_ThreadFunc2", t->index)
-
- // SetThreadAffinityMask(GetCurrentThread(), 1 << t->index);
-
- for (;;)
- {
- SRes res, codeRes;
- BoolInt wasInterrupted, isAllocError, overflow, finish;
- SRes threadingErrorSRes;
- BoolInt needCode, needWrite, needContinue;
-
- size_t inDataSize_Start;
- UInt64 inDataSize;
- // UInt64 inDataSize_Full;
-
- UInt64 blockIndex;
-
- UInt64 inPrev = 0;
- UInt64 outPrev = 0;
- UInt64 inCodePos;
- UInt64 outCodePos;
-
- Byte *afterEndData = NULL;
- size_t afterEndData_Size = 0;
- BoolInt afterEndData_IsCross = False;
-
- BoolInt canCreateNewThread = False;
- // CMtDecCallbackInfo parse;
- CMtDecThread *nextThread;
-
- PRF_STR_INT("=============== Event_Wait(&t->canRead)", t->index)
-
- RINOK_THREAD(Event_Wait(&t->canRead))
- if (p->exitThread)
- return 0;
-
- PRF_STR_INT("after Event_Wait(&t->canRead)", t->index)
-
- // if (t->index == 3) return 19; // for test
-
- blockIndex = p->blockIndex++;
-
- // PRF(printf("\ncanRead\n"))
-
- res = MtDec_Progress_GetError_Spec(p, 0, 0, blockIndex, &wasInterrupted);
-
- finish = p->readWasFinished;
- needCode = False;
- needWrite = False;
- isAllocError = False;
- overflow = False;
-
- inDataSize_Start = 0;
- inDataSize = 0;
- // inDataSize_Full = 0;
-
- if (res == SZ_OK && !wasInterrupted)
- {
- // if (p->inStream)
- {
- CMtDecBufLink *prev = NULL;
- CMtDecBufLink *link = (CMtDecBufLink *)t->inBuf;
- size_t crossSize = p->crossEnd - p->crossStart;
-
- PRF(printf("\ncrossSize = %d\n", crossSize));
-
- for (;;)
- {
- if (!link)
- {
- link = (CMtDecBufLink *)ISzAlloc_Alloc(p->alloc, MTDEC__LINK_DATA_OFFSET + p->inBufSize);
- if (!link)
- {
- finish = True;
- // p->allocError_for_Read_BlockIndex = blockIndex;
- isAllocError = True;
- break;
- }
- link->next = NULL;
- if (prev)
- {
- // static unsigned g_num = 0;
- // printf("\n%6d : %x", ++g_num, (unsigned)(size_t)((Byte *)link - (Byte *)prev));
- prev->next = link;
- }
- else
- t->inBuf = (void *)link;
- }
-
- {
- Byte *data = MTDEC__DATA_PTR_FROM_LINK(link);
- Byte *parseData = data;
- size_t size;
-
- if (crossSize != 0)
- {
- inDataSize = crossSize;
- // inDataSize_Full = inDataSize;
- inDataSize_Start = crossSize;
- size = crossSize;
- parseData = MTDEC__DATA_PTR_FROM_LINK(p->crossBlock) + p->crossStart;
- PRF(printf("\ncross : crossStart = %7d crossEnd = %7d finish = %1d",
- (int)p->crossStart, (int)p->crossEnd, (int)finish));
- }
- else
- {
- size = p->inBufSize;
-
- res = SeqInStream_ReadMax(p->inStream, data, &size);
-
- // size = 10; // test
-
- inDataSize += size;
- // inDataSize_Full = inDataSize;
- if (!prev)
- inDataSize_Start = size;
-
- p->readProcessed += size;
- finish = (size != p->inBufSize);
- if (finish)
- p->readWasFinished = True;
-
- // res = E_INVALIDARG; // test
-
- if (res != SZ_OK)
- {
- // PRF(printf("\nRead error = %d\n", res))
- // we want to decode all data before error
- p->readRes = res;
- // p->readError_BlockIndex = blockIndex;
- p->readWasFinished = True;
- finish = True;
- res = SZ_OK;
- // break;
- }
-
- if (inDataSize - inPrev >= MTDEC_ProgessStep)
- {
- res = MtDec_Progress_GetError_Spec(p, 0, 0, blockIndex, &wasInterrupted);
- if (res != SZ_OK || wasInterrupted)
- break;
- inPrev = inDataSize;
- }
- }
-
- {
- CMtDecCallbackInfo parse;
-
- parse.startCall = (prev == NULL);
- parse.src = parseData;
- parse.srcSize = size;
- parse.srcFinished = finish;
- parse.canCreateNewThread = True;
-
- PRF(printf("\nParse size = %d\n", (unsigned)size));
-
- p->mtCallback->Parse(p->mtCallbackObject, t->index, &parse);
-
- PRF(printf(" Parse processed = %d, state = %d \n", (unsigned)parse.srcSize, (unsigned)parse.state));
-
- needWrite = True;
- canCreateNewThread = parse.canCreateNewThread;
-
- // printf("\n\n%12I64u %12I64u", (UInt64)p->mtProgress.totalInSize, (UInt64)p->mtProgress.totalOutSize);
-
- if (
- // parseRes != SZ_OK ||
- // inDataSize - (size - parse.srcSize) > p->inBlockMax
- // ||
- parse.state == MTDEC_PARSE_OVERFLOW
- // || wasInterrupted
- )
- {
- // Overflow or Parse error - switch from MT decoding to ST decoding
- finish = True;
- overflow = True;
-
- {
- PRF(printf("\n Overflow"));
- // PRF(printf("\nisBlockFinished = %d", (unsigned)parse.blockWasFinished));
- PRF(printf("\n inDataSize = %d", (unsigned)inDataSize));
- }
-
- if (crossSize != 0)
- memcpy(data, parseData, size);
- p->crossStart = 0;
- p->crossEnd = 0;
- break;
- }
-
- if (crossSize != 0)
- {
- memcpy(data, parseData, parse.srcSize);
- p->crossStart += parse.srcSize;
- }
-
- if (parse.state != MTDEC_PARSE_CONTINUE || finish)
- {
- // we don't need to parse in current thread anymore
-
- if (parse.state == MTDEC_PARSE_END)
- finish = True;
-
- needCode = True;
- // p->crossFinished = finish;
-
- if (parse.srcSize == size)
- {
- // full parsed - no cross transfer
- p->crossStart = 0;
- p->crossEnd = 0;
- break;
- }
-
- if (parse.state == MTDEC_PARSE_END)
- {
- afterEndData = parseData + parse.srcSize;
- afterEndData_Size = size - parse.srcSize;
- if (crossSize != 0)
- afterEndData_IsCross = True;
- // we reduce data size to required bytes (parsed only)
- inDataSize -= afterEndData_Size;
- if (!prev)
- inDataSize_Start = parse.srcSize;
- break;
- }
-
- {
- // partial parsed - need cross transfer
- if (crossSize != 0)
- inDataSize = parse.srcSize; // it's only parsed now
- else
- {
- // partial parsed - is not in initial cross block - we need to copy new data to cross block
- Byte *cr = MtDec_GetCrossBuff(p);
- if (!cr)
- {
- {
- PRF(printf("\ncross alloc error error\n"));
- // res = SZ_ERROR_MEM;
- finish = True;
- // p->allocError_for_Read_BlockIndex = blockIndex;
- isAllocError = True;
- break;
- }
- }
-
- {
- size_t crSize = size - parse.srcSize;
- inDataSize -= crSize;
- p->crossEnd = crSize;
- p->crossStart = 0;
- memcpy(cr, parseData + parse.srcSize, crSize);
- }
- }
-
- // inDataSize_Full = inDataSize;
- if (!prev)
- inDataSize_Start = parse.srcSize; // it's partial size (parsed only)
-
- finish = False;
- break;
- }
- }
-
- if (parse.srcSize != size)
- {
- res = SZ_ERROR_FAIL;
- PRF(printf("\nfinished error SZ_ERROR_FAIL = %d\n", res));
- break;
- }
- }
- }
-
- prev = link;
- link = link->next;
-
- if (crossSize != 0)
- {
- crossSize = 0;
- p->crossStart = 0;
- p->crossEnd = 0;
- }
- }
- }
-
- if (res == SZ_OK)
- res = MtDec_GetError_Spec(p, blockIndex, &wasInterrupted);
- }
-
- codeRes = SZ_OK;
-
- if (res == SZ_OK && needCode && !wasInterrupted)
- {
- codeRes = p->mtCallback->PreCode(p->mtCallbackObject, t->index);
- if (codeRes != SZ_OK)
- {
- needCode = False;
- finish = True;
- // SZ_ERROR_MEM is expected error here.
- // if (codeRes == SZ_ERROR_MEM) - we will try single-thread decoding later.
- // if (codeRes != SZ_ERROR_MEM) - we can stop decoding or try single-thread decoding.
- }
- }
-
- if (res != SZ_OK || wasInterrupted)
- finish = True;
-
- nextThread = NULL;
- threadingErrorSRes = SZ_OK;
-
- if (!finish)
- {
- if (p->numStartedThreads < p->numStartedThreads_Limit && canCreateNewThread)
- {
- SRes res2 = MtDecThread_CreateAndStart(&p->threads[p->numStartedThreads]);
- if (res2 == SZ_OK)
- {
- // if (p->numStartedThreads % 1000 == 0) PRF(printf("\n numStartedThreads=%d\n", p->numStartedThreads));
- p->numStartedThreads++;
- }
- else
- {
- PRF(printf("\nERROR: numStartedThreads=%d\n", p->numStartedThreads));
- if (p->numStartedThreads == 1)
- {
- // if only one thread is possible, we leave muti-threading code
- finish = True;
- needCode = False;
- threadingErrorSRes = res2;
- }
- else
- p->numStartedThreads_Limit = p->numStartedThreads;
- }
- }
-
- if (!finish)
- {
- unsigned nextIndex = t->index + 1;
- nextThread = &p->threads[nextIndex >= p->numStartedThreads ? 0 : nextIndex];
- RINOK_THREAD(Event_Set(&nextThread->canRead))
- // We have started executing for new iteration (with next thread)
- // And that next thread now is responsible for possible exit from decoding (threading_code)
- }
- }
-
- // each call of Event_Set(&nextThread->canRead) must be followed by call of Event_Set(&nextThread->canWrite)
- // if ( !finish ) we must call Event_Set(&nextThread->canWrite) in any case
- // if ( finish ) we switch to single-thread mode and there are 2 ways at the end of current iteration (current block):
- // - if (needContinue) after Write(&needContinue), we restore decoding with new iteration
- // - otherwise we stop decoding and exit from MtDec_ThreadFunc2()
-
- // Don't change (finish) variable in the further code
-
-
- // ---------- CODE ----------
-
- inPrev = 0;
- outPrev = 0;
- inCodePos = 0;
- outCodePos = 0;
-
- if (res == SZ_OK && needCode && codeRes == SZ_OK)
- {
- BoolInt isStartBlock = True;
- CMtDecBufLink *link = (CMtDecBufLink *)t->inBuf;
-
- for (;;)
- {
- size_t inSize;
- int stop;
-
- if (isStartBlock)
- inSize = inDataSize_Start;
- else
- {
- UInt64 rem = inDataSize - inCodePos;
- inSize = p->inBufSize;
- if (inSize > rem)
- inSize = (size_t)rem;
- }
-
- inCodePos += inSize;
- stop = True;
-
- codeRes = p->mtCallback->Code(p->mtCallbackObject, t->index,
- (const Byte *)MTDEC__DATA_PTR_FROM_LINK(link), inSize,
- (inCodePos == inDataSize), // srcFinished
- &inCodePos, &outCodePos, &stop);
-
- if (codeRes != SZ_OK)
- {
- PRF(printf("\nCode Interrupt error = %x\n", codeRes));
- // we interrupt only later blocks
- MtDec_Interrupt(p, blockIndex);
- break;
- }
-
- if (stop || inCodePos == inDataSize)
- break;
-
- {
- const UInt64 inDelta = inCodePos - inPrev;
- const UInt64 outDelta = outCodePos - outPrev;
- if (inDelta >= MTDEC_ProgessStep || outDelta >= MTDEC_ProgessStep)
- {
- // Sleep(1);
- res = MtDec_Progress_GetError_Spec(p, inDelta, outDelta, blockIndex, &wasInterrupted);
- if (res != SZ_OK || wasInterrupted)
- break;
- inPrev = inCodePos;
- outPrev = outCodePos;
- }
- }
-
- link = link->next;
- isStartBlock = False;
- }
- }
-
-
- // ---------- WRITE ----------
-
- RINOK_THREAD(Event_Wait(&t->canWrite))
-
- {
- BoolInt isErrorMode = False;
- BoolInt canRecode = True;
- BoolInt needWriteToStream = needWrite;
-
- if (p->exitThread) return 0; // it's never executed in normal cases
-
- if (p->wasInterrupted)
- wasInterrupted = True;
- else
- {
- if (codeRes != SZ_OK) // || !needCode // check it !!!
- {
- p->wasInterrupted = True;
- p->codeRes = codeRes;
- if (codeRes == SZ_ERROR_MEM)
- isAllocError = True;
- }
-
- if (threadingErrorSRes)
- {
- p->wasInterrupted = True;
- p->threadingErrorSRes = threadingErrorSRes;
- needWriteToStream = False;
- }
- if (isAllocError)
- {
- p->wasInterrupted = True;
- p->isAllocError = True;
- needWriteToStream = False;
- }
- if (overflow)
- {
- p->wasInterrupted = True;
- p->overflow = True;
- needWriteToStream = False;
- }
- }
-
- if (needCode)
- {
- if (wasInterrupted)
- {
- inCodePos = 0;
- outCodePos = 0;
- }
- {
- const UInt64 inDelta = inCodePos - inPrev;
- const UInt64 outDelta = outCodePos - outPrev;
- // if (inDelta != 0 || outDelta != 0)
- res = MtProgress_ProgressAdd(&p->mtProgress, inDelta, outDelta);
- }
- }
-
- needContinue = (!finish);
-
- // if (res == SZ_OK && needWrite && !wasInterrupted)
- if (needWrite)
- {
- // p->inProcessed += inCodePos;
-
- PRF(printf("\n--Write afterSize = %d\n", (unsigned)afterEndData_Size));
-
- res = p->mtCallback->Write(p->mtCallbackObject, t->index,
- res == SZ_OK && needWriteToStream && !wasInterrupted, // needWrite
- afterEndData, afterEndData_Size, afterEndData_IsCross,
- &needContinue,
- &canRecode);
-
- // res = SZ_ERROR_FAIL; // for test
-
- PRF(printf("\nAfter Write needContinue = %d\n", (unsigned)needContinue));
- PRF(printf("\nprocessed = %d\n", (unsigned)p->inProcessed));
-
- if (res != SZ_OK)
- {
- PRF(printf("\nWrite error = %d\n", res));
- isErrorMode = True;
- p->wasInterrupted = True;
- }
- if (res != SZ_OK
- || (!needContinue && !finish))
- {
- PRF(printf("\nWrite Interrupt error = %x\n", res));
- MtDec_Interrupt(p, blockIndex);
- }
- }
-
- if (canRecode)
- if (!needCode
- || res != SZ_OK
- || p->wasInterrupted
- || codeRes != SZ_OK
- || wasInterrupted
- || p->numFilledThreads != 0
- || isErrorMode)
- {
- if (p->numFilledThreads == 0)
- p->filledThreadStart = t->index;
- if (inDataSize != 0 || !finish)
- {
- t->inDataSize_Start = inDataSize_Start;
- t->inDataSize = inDataSize;
- p->numFilledThreads++;
- }
- PRF(printf("\np->numFilledThreads = %d\n", p->numFilledThreads));
- PRF(printf("p->filledThreadStart = %d\n", p->filledThreadStart));
- }
-
- if (!finish)
- {
- RINOK_THREAD(Event_Set(&nextThread->canWrite))
- }
- else
- {
- if (needContinue)
- {
- // we restore decoding with new iteration
- RINOK_THREAD(Event_Set(&p->threads[0].canWrite))
- }
- else
- {
- // we exit from decoding
- if (t->index == 0)
- return SZ_OK;
- p->exitThread = True;
- }
- RINOK_THREAD(Event_Set(&p->threads[0].canRead))
- }
- }
- }
-}
-
-#ifdef _WIN32
-#define USE_ALLOCA
-#endif
-
-#ifdef USE_ALLOCA
-#ifdef _WIN32
-#include
-#else
-#include
-#endif
-#endif
-
-
-static THREAD_FUNC_DECL MtDec_ThreadFunc1(void *pp)
-{
- WRes res;
-
- CMtDecThread *t = (CMtDecThread *)pp;
- CMtDec *p;
-
- // fprintf(stdout, "\n%d = %p\n", t->index, &t);
-
- res = MtDec_ThreadFunc2(t);
- p = t->mtDec;
- if (res == 0)
- return (THREAD_FUNC_RET_TYPE)(UINT_PTR)p->exitThreadWRes;
- {
- // it's unexpected situation for some threading function error
- if (p->exitThreadWRes == 0)
- p->exitThreadWRes = res;
- PRF(printf("\nthread exit error = %d\n", res));
- p->exitThread = True;
- Event_Set(&p->threads[0].canRead);
- Event_Set(&p->threads[0].canWrite);
- MtProgress_SetError(&p->mtProgress, MY_SRes_HRESULT_FROM_WRes(res));
- }
- return (THREAD_FUNC_RET_TYPE)(UINT_PTR)res;
-}
-
-static Z7_NO_INLINE THREAD_FUNC_DECL MtDec_ThreadFunc(void *pp)
-{
- #ifdef USE_ALLOCA
- CMtDecThread *t = (CMtDecThread *)pp;
- // fprintf(stderr, "\n%d = %p - before", t->index, &t);
- t->allocaPtr = alloca(t->index * 128);
- #endif
- return MtDec_ThreadFunc1(pp);
-}
-
-
-int MtDec_PrepareRead(CMtDec *p)
-{
- if (p->crossBlock && p->crossStart == p->crossEnd)
- {
- ISzAlloc_Free(p->alloc, p->crossBlock);
- p->crossBlock = NULL;
- }
-
- {
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- if (i > p->numStartedThreads
- || p->numFilledThreads <=
- (i >= p->filledThreadStart ?
- i - p->filledThreadStart :
- i + p->numStartedThreads - p->filledThreadStart))
- MtDecThread_FreeInBufs(&p->threads[i]);
- }
-
- return (p->numFilledThreads != 0) || (p->crossStart != p->crossEnd);
-}
-
-
-const Byte *MtDec_Read(CMtDec *p, size_t *inLim)
-{
- while (p->numFilledThreads != 0)
- {
- CMtDecThread *t = &p->threads[p->filledThreadStart];
-
- if (*inLim != 0)
- {
- {
- void *link = t->inBuf;
- void *next = ((CMtDecBufLink *)link)->next;
- ISzAlloc_Free(p->alloc, link);
- t->inBuf = next;
- }
-
- if (t->inDataSize == 0)
- {
- MtDecThread_FreeInBufs(t);
- if (--p->numFilledThreads == 0)
- break;
- if (++p->filledThreadStart == p->numStartedThreads)
- p->filledThreadStart = 0;
- t = &p->threads[p->filledThreadStart];
- }
- }
-
- {
- size_t lim = t->inDataSize_Start;
- if (lim != 0)
- t->inDataSize_Start = 0;
- else
- {
- UInt64 rem = t->inDataSize;
- lim = p->inBufSize;
- if (lim > rem)
- lim = (size_t)rem;
- }
- t->inDataSize -= lim;
- *inLim = lim;
- return (const Byte *)MTDEC__DATA_PTR_FROM_LINK(t->inBuf);
- }
- }
-
- {
- size_t crossSize = p->crossEnd - p->crossStart;
- if (crossSize != 0)
- {
- const Byte *data = MTDEC__DATA_PTR_FROM_LINK(p->crossBlock) + p->crossStart;
- *inLim = crossSize;
- p->crossStart = 0;
- p->crossEnd = 0;
- return data;
- }
- *inLim = 0;
- if (p->crossBlock)
- {
- ISzAlloc_Free(p->alloc, p->crossBlock);
- p->crossBlock = NULL;
- }
- return NULL;
- }
-}
-
-
-void MtDec_Construct(CMtDec *p)
-{
- unsigned i;
-
- p->inBufSize = (size_t)1 << 18;
-
- p->numThreadsMax = 0;
-
- p->inStream = NULL;
-
- // p->inData = NULL;
- // p->inDataSize = 0;
-
- p->crossBlock = NULL;
- p->crossStart = 0;
- p->crossEnd = 0;
-
- p->numFilledThreads = 0;
-
- p->progress = NULL;
- p->alloc = NULL;
-
- p->mtCallback = NULL;
- p->mtCallbackObject = NULL;
-
- p->allocatedBufsSize = 0;
-
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CMtDecThread *t = &p->threads[i];
- t->mtDec = p;
- t->index = i;
- t->inBuf = NULL;
- Event_Construct(&t->canRead);
- Event_Construct(&t->canWrite);
- Thread_CONSTRUCT(&t->thread)
- }
-
- // Event_Construct(&p->finishedEvent);
-
- CriticalSection_Init(&p->mtProgress.cs);
-}
-
-
-static void MtDec_Free(CMtDec *p)
-{
- unsigned i;
-
- p->exitThread = True;
-
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- MtDecThread_Destruct(&p->threads[i]);
-
- // Event_Close(&p->finishedEvent);
-
- if (p->crossBlock)
- {
- ISzAlloc_Free(p->alloc, p->crossBlock);
- p->crossBlock = NULL;
- }
-}
-
-
-void MtDec_Destruct(CMtDec *p)
-{
- MtDec_Free(p);
-
- CriticalSection_Delete(&p->mtProgress.cs);
-}
-
-
-SRes MtDec_Code(CMtDec *p)
-{
- unsigned i;
-
- p->inProcessed = 0;
-
- p->blockIndex = 1; // it must be larger than not_defined index (0)
- p->isAllocError = False;
- p->overflow = False;
- p->threadingErrorSRes = SZ_OK;
-
- p->needContinue = True;
-
- p->readWasFinished = False;
- p->needInterrupt = False;
- p->interruptIndex = (UInt64)(Int64)-1;
-
- p->readProcessed = 0;
- p->readRes = SZ_OK;
- p->codeRes = SZ_OK;
- p->wasInterrupted = False;
-
- p->crossStart = 0;
- p->crossEnd = 0;
-
- p->filledThreadStart = 0;
- p->numFilledThreads = 0;
-
- {
- unsigned numThreads = p->numThreadsMax;
- if (numThreads > MTDEC_THREADS_MAX)
- numThreads = MTDEC_THREADS_MAX;
- p->numStartedThreads_Limit = numThreads;
- p->numStartedThreads = 0;
- }
-
- if (p->inBufSize != p->allocatedBufsSize)
- {
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CMtDecThread *t = &p->threads[i];
- if (t->inBuf)
- MtDecThread_FreeInBufs(t);
- }
- if (p->crossBlock)
- {
- ISzAlloc_Free(p->alloc, p->crossBlock);
- p->crossBlock = NULL;
- }
-
- p->allocatedBufsSize = p->inBufSize;
- }
-
- MtProgress_Init(&p->mtProgress, p->progress);
-
- // RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->finishedEvent))
- p->exitThread = False;
- p->exitThreadWRes = 0;
-
- {
- WRes wres;
- SRes sres;
- CMtDecThread *nextThread = &p->threads[p->numStartedThreads++];
- // wres = MtDecThread_CreateAndStart(nextThread);
- wres = MtDecThread_CreateEvents(nextThread);
- if (wres == 0) { wres = Event_Set(&nextThread->canWrite);
- if (wres == 0) { wres = Event_Set(&nextThread->canRead);
- if (wres == 0) { THREAD_FUNC_RET_TYPE res = MtDec_ThreadFunc(nextThread);
- wres = (WRes)(UINT_PTR)res;
- if (wres != 0)
- {
- p->needContinue = False;
- MtDec_CloseThreads(p);
- }}}}
-
- // wres = 17; // for test
- // wres = Event_Wait(&p->finishedEvent);
-
- sres = MY_SRes_HRESULT_FROM_WRes(wres);
-
- if (sres != 0)
- p->threadingErrorSRes = sres;
-
- if (
- // wres == 0
- // wres != 0
- // || p->mtc.codeRes == SZ_ERROR_MEM
- p->isAllocError
- || p->threadingErrorSRes != SZ_OK
- || p->overflow)
- {
- // p->needContinue = True;
- }
- else
- p->needContinue = False;
-
- if (p->needContinue)
- return SZ_OK;
-
- // if (sres != SZ_OK)
- return sres;
- // return SZ_ERROR_FAIL;
- }
-}
-
-#endif
-
-#undef PRF
diff --git a/3rdparty/7z/src/MtDec.h b/3rdparty/7z/src/MtDec.h
deleted file mode 100644
index f214b3ab43..0000000000
--- a/3rdparty/7z/src/MtDec.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* MtDec.h -- Multi-thread Decoder
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_MT_DEC_H
-#define ZIP7_INC_MT_DEC_H
-
-#include "7zTypes.h"
-
-#ifndef Z7_ST
-#include "Threads.h"
-#endif
-
-EXTERN_C_BEGIN
-
-#ifndef Z7_ST
-
-#ifndef Z7_ST
- #define MTDEC_THREADS_MAX 32
-#else
- #define MTDEC_THREADS_MAX 1
-#endif
-
-
-typedef struct
-{
- ICompressProgressPtr progress;
- SRes res;
- UInt64 totalInSize;
- UInt64 totalOutSize;
- CCriticalSection cs;
-} CMtProgress;
-
-void MtProgress_Init(CMtProgress *p, ICompressProgressPtr progress);
-SRes MtProgress_Progress_ST(CMtProgress *p);
-SRes MtProgress_ProgressAdd(CMtProgress *p, UInt64 inSize, UInt64 outSize);
-SRes MtProgress_GetError(CMtProgress *p);
-void MtProgress_SetError(CMtProgress *p, SRes res);
-
-struct CMtDec;
-
-typedef struct
-{
- struct CMtDec_ *mtDec;
- unsigned index;
- void *inBuf;
-
- size_t inDataSize_Start; // size of input data in start block
- UInt64 inDataSize; // total size of input data in all blocks
-
- CThread thread;
- CAutoResetEvent canRead;
- CAutoResetEvent canWrite;
- void *allocaPtr;
-} CMtDecThread;
-
-void MtDecThread_FreeInBufs(CMtDecThread *t);
-
-
-typedef enum
-{
- MTDEC_PARSE_CONTINUE, // continue this block with more input data
- MTDEC_PARSE_OVERFLOW, // MT buffers overflow, need switch to single-thread
- MTDEC_PARSE_NEW, // new block
- MTDEC_PARSE_END // end of block threading. But we still can return to threading after Write(&needContinue)
-} EMtDecParseState;
-
-typedef struct
-{
- // in
- int startCall;
- const Byte *src;
- size_t srcSize;
- // in : (srcSize == 0) is allowed
- // out : it's allowed to return less that actually was used ?
- int srcFinished;
-
- // out
- EMtDecParseState state;
- BoolInt canCreateNewThread;
- UInt64 outPos; // check it (size_t)
-} CMtDecCallbackInfo;
-
-
-typedef struct
-{
- void (*Parse)(void *p, unsigned coderIndex, CMtDecCallbackInfo *ci);
-
- // PreCode() and Code():
- // (SRes_return_result != SZ_OK) means stop decoding, no need another blocks
- SRes (*PreCode)(void *p, unsigned coderIndex);
- SRes (*Code)(void *p, unsigned coderIndex,
- const Byte *src, size_t srcSize, int srcFinished,
- UInt64 *inCodePos, UInt64 *outCodePos, int *stop);
- // stop - means stop another Code calls
-
-
- /* Write() must be called, if Parse() was called
- set (needWrite) if
- {
- && (was not interrupted by progress)
- && (was not interrupted in previous block)
- }
-
- out:
- if (*needContinue), decoder still need to continue decoding with new iteration,
- even after MTDEC_PARSE_END
- if (*canRecode), we didn't flush current block data, so we still can decode current block later.
- */
- SRes (*Write)(void *p, unsigned coderIndex,
- BoolInt needWriteToStream,
- const Byte *src, size_t srcSize, BoolInt isCross,
- // int srcFinished,
- BoolInt *needContinue,
- BoolInt *canRecode);
-
-} IMtDecCallback2;
-
-
-
-typedef struct CMtDec_
-{
- /* input variables */
-
- size_t inBufSize; /* size of input block */
- unsigned numThreadsMax;
- // size_t inBlockMax;
- unsigned numThreadsMax_2;
-
- ISeqInStreamPtr inStream;
- // const Byte *inData;
- // size_t inDataSize;
-
- ICompressProgressPtr progress;
- ISzAllocPtr alloc;
-
- IMtDecCallback2 *mtCallback;
- void *mtCallbackObject;
-
-
- /* internal variables */
-
- size_t allocatedBufsSize;
-
- BoolInt exitThread;
- WRes exitThreadWRes;
-
- UInt64 blockIndex;
- BoolInt isAllocError;
- BoolInt overflow;
- SRes threadingErrorSRes;
-
- BoolInt needContinue;
-
- // CAutoResetEvent finishedEvent;
-
- SRes readRes;
- SRes codeRes;
-
- BoolInt wasInterrupted;
-
- unsigned numStartedThreads_Limit;
- unsigned numStartedThreads;
-
- Byte *crossBlock;
- size_t crossStart;
- size_t crossEnd;
- UInt64 readProcessed;
- BoolInt readWasFinished;
- UInt64 inProcessed;
-
- unsigned filledThreadStart;
- unsigned numFilledThreads;
-
- #ifndef Z7_ST
- BoolInt needInterrupt;
- UInt64 interruptIndex;
- CMtProgress mtProgress;
- CMtDecThread threads[MTDEC_THREADS_MAX];
- #endif
-} CMtDec;
-
-
-void MtDec_Construct(CMtDec *p);
-void MtDec_Destruct(CMtDec *p);
-
-/*
-MtDec_Code() returns:
- SZ_OK - in most cases
- MY_SRes_HRESULT_FROM_WRes(WRes_error) - in case of unexpected error in threading function
-*/
-
-SRes MtDec_Code(CMtDec *p);
-Byte *MtDec_GetCrossBuff(CMtDec *p);
-
-int MtDec_PrepareRead(CMtDec *p);
-const Byte *MtDec_Read(CMtDec *p, size_t *inLim);
-
-#endif
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Ppmd.h b/3rdparty/7z/src/Ppmd.h
deleted file mode 100644
index da1ed375ed..0000000000
--- a/3rdparty/7z/src/Ppmd.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* Ppmd.h -- PPMD codec common code
-2023-03-05 : Igor Pavlov : Public domain
-This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
-
-#ifndef ZIP7_INC_PPMD_H
-#define ZIP7_INC_PPMD_H
-
-#include "CpuArch.h"
-
-EXTERN_C_BEGIN
-
-#if defined(MY_CPU_SIZEOF_POINTER) && (MY_CPU_SIZEOF_POINTER == 4)
-/*
- PPMD code always uses 32-bit internal fields in PPMD structures to store internal references in main block.
- if (PPMD_32BIT is defined), the PPMD code stores internal pointers to 32-bit reference fields.
- if (PPMD_32BIT is NOT defined), the PPMD code stores internal UInt32 offsets to reference fields.
- if (pointer size is 64-bit), then (PPMD_32BIT) mode is not allowed,
- if (pointer size is 32-bit), then (PPMD_32BIT) mode is optional,
- and it's allowed to disable PPMD_32BIT mode even if pointer is 32-bit.
- PPMD code works slightly faster in (PPMD_32BIT) mode.
-*/
- #define PPMD_32BIT
-#endif
-
-#define PPMD_INT_BITS 7
-#define PPMD_PERIOD_BITS 7
-#define PPMD_BIN_SCALE (1 << (PPMD_INT_BITS + PPMD_PERIOD_BITS))
-
-#define PPMD_GET_MEAN_SPEC(summ, shift, round) (((summ) + (1 << ((shift) - (round)))) >> (shift))
-#define PPMD_GET_MEAN(summ) PPMD_GET_MEAN_SPEC((summ), PPMD_PERIOD_BITS, 2)
-#define PPMD_UPDATE_PROB_0(prob) ((prob) + (1 << PPMD_INT_BITS) - PPMD_GET_MEAN(prob))
-#define PPMD_UPDATE_PROB_1(prob) ((prob) - PPMD_GET_MEAN(prob))
-
-#define PPMD_N1 4
-#define PPMD_N2 4
-#define PPMD_N3 4
-#define PPMD_N4 ((128 + 3 - 1 * PPMD_N1 - 2 * PPMD_N2 - 3 * PPMD_N3) / 4)
-#define PPMD_NUM_INDEXES (PPMD_N1 + PPMD_N2 + PPMD_N3 + PPMD_N4)
-
-MY_CPU_pragma_pack_push_1
-/* Most compilers works OK here even without #pragma pack(push, 1), but some GCC compilers need it. */
-
-/* SEE-contexts for PPM-contexts with masked symbols */
-typedef struct
-{
- UInt16 Summ; /* Freq */
- Byte Shift; /* Speed of Freq change; low Shift is for fast change */
- Byte Count; /* Count to next change of Shift */
-} CPpmd_See;
-
-#define Ppmd_See_UPDATE(p) \
- { if ((p)->Shift < PPMD_PERIOD_BITS && --(p)->Count == 0) \
- { (p)->Summ = (UInt16)((p)->Summ << 1); \
- (p)->Count = (Byte)(3 << (p)->Shift++); }}
-
-
-typedef struct
-{
- Byte Symbol;
- Byte Freq;
- UInt16 Successor_0;
- UInt16 Successor_1;
-} CPpmd_State;
-
-typedef struct CPpmd_State2_
-{
- Byte Symbol;
- Byte Freq;
-} CPpmd_State2;
-
-typedef struct CPpmd_State4_
-{
- UInt16 Successor_0;
- UInt16 Successor_1;
-} CPpmd_State4;
-
-MY_CPU_pragma_pop
-
-/*
- PPMD code can write full CPpmd_State structure data to CPpmd*_Context
- at (byte offset = 2) instead of some fields of original CPpmd*_Context structure.
-
- If we use pointers to different types, but that point to shared
- memory space, we can have aliasing problem (strict aliasing).
-
- XLC compiler in -O2 mode can change the order of memory write instructions
- in relation to read instructions, if we have use pointers to different types.
-
- To solve that aliasing problem we use combined CPpmd*_Context structure
- with unions that contain the fields from both structures:
- the original CPpmd*_Context and CPpmd_State.
- So we can access the fields from both structures via one pointer,
- and the compiler doesn't change the order of write instructions
- in relation to read instructions.
-
- If we don't use memory write instructions to shared memory in
- some local code, and we use only reading instructions (read only),
- then probably it's safe to use pointers to different types for reading.
-*/
-
-
-
-#ifdef PPMD_32BIT
-
- #define Ppmd_Ref_Type(type) type *
- #define Ppmd_GetRef(p, ptr) (ptr)
- #define Ppmd_GetPtr(p, ptr) (ptr)
- #define Ppmd_GetPtr_Type(p, ptr, note_type) (ptr)
-
-#else
-
- #define Ppmd_Ref_Type(type) UInt32
- #define Ppmd_GetRef(p, ptr) ((UInt32)((Byte *)(ptr) - (p)->Base))
- #define Ppmd_GetPtr(p, offs) ((void *)((p)->Base + (offs)))
- #define Ppmd_GetPtr_Type(p, offs, type) ((type *)Ppmd_GetPtr(p, offs))
-
-#endif // PPMD_32BIT
-
-
-typedef Ppmd_Ref_Type(CPpmd_State) CPpmd_State_Ref;
-typedef Ppmd_Ref_Type(void) CPpmd_Void_Ref;
-typedef Ppmd_Ref_Type(Byte) CPpmd_Byte_Ref;
-
-
-/*
-#ifdef MY_CPU_LE_UNALIGN
-// the unaligned 32-bit access latency can be too large, if the data is not in L1 cache.
-#define Ppmd_GET_SUCCESSOR(p) ((CPpmd_Void_Ref)*(const UInt32 *)(const void *)&(p)->Successor_0)
-#define Ppmd_SET_SUCCESSOR(p, v) *(UInt32 *)(void *)(void *)&(p)->Successor_0 = (UInt32)(v)
-
-#else
-*/
-
-/*
- We can write 16-bit halves to 32-bit (Successor) field in any selected order.
- But the native order is more consistent way.
- So we use the native order, if LE/BE order can be detected here at compile time.
-*/
-
-#ifdef MY_CPU_BE
-
- #define Ppmd_GET_SUCCESSOR(p) \
- ( (CPpmd_Void_Ref) (((UInt32)(p)->Successor_0 << 16) | (p)->Successor_1) )
-
- #define Ppmd_SET_SUCCESSOR(p, v) { \
- (p)->Successor_0 = (UInt16)(((UInt32)(v) >> 16) /* & 0xFFFF */); \
- (p)->Successor_1 = (UInt16)((UInt32)(v) /* & 0xFFFF */); }
-
-#else
-
- #define Ppmd_GET_SUCCESSOR(p) \
- ( (CPpmd_Void_Ref) ((p)->Successor_0 | ((UInt32)(p)->Successor_1 << 16)) )
-
- #define Ppmd_SET_SUCCESSOR(p, v) { \
- (p)->Successor_0 = (UInt16)((UInt32)(v) /* & 0xFFFF */); \
- (p)->Successor_1 = (UInt16)(((UInt32)(v) >> 16) /* & 0xFFFF */); }
-
-#endif
-
-// #endif
-
-
-#define PPMD_SetAllBitsIn256Bytes(p) \
- { size_t z; for (z = 0; z < 256 / sizeof(p[0]); z += 8) { \
- p[z+7] = p[z+6] = p[z+5] = p[z+4] = p[z+3] = p[z+2] = p[z+1] = p[z+0] = ~(size_t)0; }}
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Ppmd7.c b/3rdparty/7z/src/Ppmd7.c
deleted file mode 100644
index d84e6fe9af..0000000000
--- a/3rdparty/7z/src/Ppmd7.c
+++ /dev/null
@@ -1,1122 +0,0 @@
-/* Ppmd7.c -- PPMdH codec
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "Ppmd7.h"
-
-/* define PPMD7_ORDER_0_SUPPPORT to suport order-0 mode, unsupported by orignal PPMd var.H. code */
-// #define PPMD7_ORDER_0_SUPPPORT
-
-MY_ALIGN(16)
-static const Byte PPMD7_kExpEscape[16] = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
-MY_ALIGN(16)
-static const UInt16 PPMD7_kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051};
-
-#define MAX_FREQ 124
-#define UNIT_SIZE 12
-
-#define U2B(nu) ((UInt32)(nu) * UNIT_SIZE)
-#define U2I(nu) (p->Units2Indx[(size_t)(nu) - 1])
-#define I2U(indx) ((unsigned)p->Indx2Units[indx])
-#define I2U_UInt16(indx) ((UInt16)p->Indx2Units[indx])
-
-#define REF(ptr) Ppmd_GetRef(p, ptr)
-
-#define STATS_REF(ptr) ((CPpmd_State_Ref)REF(ptr))
-
-#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
-#define STATS(ctx) Ppmd7_GetStats(p, ctx)
-#define ONE_STATE(ctx) Ppmd7Context_OneState(ctx)
-#define SUFFIX(ctx) CTX((ctx)->Suffix)
-
-typedef CPpmd7_Context * PPMD7_CTX_PTR;
-
-struct CPpmd7_Node_;
-
-typedef Ppmd_Ref_Type(struct CPpmd7_Node_) CPpmd7_Node_Ref;
-
-typedef struct CPpmd7_Node_
-{
- UInt16 Stamp; /* must be at offset 0 as CPpmd7_Context::NumStats. Stamp=0 means free */
- UInt16 NU;
- CPpmd7_Node_Ref Next; /* must be at offset >= 4 */
- CPpmd7_Node_Ref Prev;
-} CPpmd7_Node;
-
-#define NODE(r) Ppmd_GetPtr_Type(p, r, CPpmd7_Node)
-
-void Ppmd7_Construct(CPpmd7 *p)
-{
- unsigned i, k, m;
-
- p->Base = NULL;
-
- for (i = 0, k = 0; i < PPMD_NUM_INDEXES; i++)
- {
- unsigned step = (i >= 12 ? 4 : (i >> 2) + 1);
- do { p->Units2Indx[k++] = (Byte)i; } while (--step);
- p->Indx2Units[i] = (Byte)k;
- }
-
- p->NS2BSIndx[0] = (0 << 1);
- p->NS2BSIndx[1] = (1 << 1);
- memset(p->NS2BSIndx + 2, (2 << 1), 9);
- memset(p->NS2BSIndx + 11, (3 << 1), 256 - 11);
-
- for (i = 0; i < 3; i++)
- p->NS2Indx[i] = (Byte)i;
-
- for (m = i, k = 1; i < 256; i++)
- {
- p->NS2Indx[i] = (Byte)m;
- if (--k == 0)
- k = (++m) - 2;
- }
-
- memcpy(p->ExpEscape, PPMD7_kExpEscape, 16);
-}
-
-
-void Ppmd7_Free(CPpmd7 *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->Base);
- p->Size = 0;
- p->Base = NULL;
-}
-
-
-BoolInt Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAllocPtr alloc)
-{
- if (!p->Base || p->Size != size)
- {
- Ppmd7_Free(p, alloc);
- p->AlignOffset = (4 - size) & 3;
- if ((p->Base = (Byte *)ISzAlloc_Alloc(alloc, p->AlignOffset + size)) == NULL)
- return False;
- p->Size = size;
- }
- return True;
-}
-
-
-
-// ---------- Internal Memory Allocator ----------
-
-/* We can use CPpmd7_Node in list of free units (as in Ppmd8)
- But we still need one additional list walk pass in Ppmd7_GlueFreeBlocks().
- So we use simple CPpmd_Void_Ref instead of CPpmd7_Node in Ppmd7_InsertNode() / Ppmd7_RemoveNode()
-*/
-
-#define EMPTY_NODE 0
-
-
-static void Ppmd7_InsertNode(CPpmd7 *p, void *node, unsigned indx)
-{
- *((CPpmd_Void_Ref *)node) = p->FreeList[indx];
- // ((CPpmd7_Node *)node)->Next = (CPpmd7_Node_Ref)p->FreeList[indx];
-
- p->FreeList[indx] = REF(node);
-
-}
-
-
-static void *Ppmd7_RemoveNode(CPpmd7 *p, unsigned indx)
-{
- CPpmd_Void_Ref *node = (CPpmd_Void_Ref *)Ppmd7_GetPtr(p, p->FreeList[indx]);
- p->FreeList[indx] = *node;
- // CPpmd7_Node *node = NODE((CPpmd7_Node_Ref)p->FreeList[indx]);
- // p->FreeList[indx] = node->Next;
- return node;
-}
-
-
-static void Ppmd7_SplitBlock(CPpmd7 *p, void *ptr, unsigned oldIndx, unsigned newIndx)
-{
- unsigned i, nu = I2U(oldIndx) - I2U(newIndx);
- ptr = (Byte *)ptr + U2B(I2U(newIndx));
- if (I2U(i = U2I(nu)) != nu)
- {
- unsigned k = I2U(--i);
- Ppmd7_InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1);
- }
- Ppmd7_InsertNode(p, ptr, i);
-}
-
-
-/* we use CPpmd7_Node_Union union to solve XLC -O2 strict pointer aliasing problem */
-
-typedef union
-{
- CPpmd7_Node Node;
- CPpmd7_Node_Ref NextRef;
-} CPpmd7_Node_Union;
-
-/* Original PPmdH (Ppmd7) code uses doubly linked list in Ppmd7_GlueFreeBlocks()
- we use single linked list similar to Ppmd8 code */
-
-
-static void Ppmd7_GlueFreeBlocks(CPpmd7 *p)
-{
- /*
- we use first UInt16 field of 12-bytes UNITs as record type stamp
- CPpmd_State { Byte Symbol; Byte Freq; : Freq != 0
- CPpmd7_Context { UInt16 NumStats; : NumStats != 0
- CPpmd7_Node { UInt16 Stamp : Stamp == 0 for free record
- : Stamp == 1 for head record and guard
- Last 12-bytes UNIT in array is always contains 12-bytes order-0 CPpmd7_Context record.
- */
- CPpmd7_Node_Ref head, n = 0;
-
- p->GlueCount = 255;
-
-
- /* we set guard NODE at LoUnit */
- if (p->LoUnit != p->HiUnit)
- ((CPpmd7_Node *)(void *)p->LoUnit)->Stamp = 1;
-
- {
- /* Create list of free blocks.
- We still need one additional list walk pass before Glue. */
- unsigned i;
- for (i = 0; i < PPMD_NUM_INDEXES; i++)
- {
- const UInt16 nu = I2U_UInt16(i);
- CPpmd7_Node_Ref next = (CPpmd7_Node_Ref)p->FreeList[i];
- p->FreeList[i] = 0;
- while (next != 0)
- {
- /* Don't change the order of the following commands: */
- CPpmd7_Node_Union *un = (CPpmd7_Node_Union *)NODE(next);
- const CPpmd7_Node_Ref tmp = next;
- next = un->NextRef;
- un->Node.Stamp = EMPTY_NODE;
- un->Node.NU = nu;
- un->Node.Next = n;
- n = tmp;
- }
- }
- }
-
- head = n;
- /* Glue and Fill must walk the list in same direction */
- {
- /* Glue free blocks */
- CPpmd7_Node_Ref *prev = &head;
- while (n)
- {
- CPpmd7_Node *node = NODE(n);
- UInt32 nu = node->NU;
- n = node->Next;
- if (nu == 0)
- {
- *prev = n;
- continue;
- }
- prev = &node->Next;
- for (;;)
- {
- CPpmd7_Node *node2 = node + nu;
- nu += node2->NU;
- if (node2->Stamp != EMPTY_NODE || nu >= 0x10000)
- break;
- node->NU = (UInt16)nu;
- node2->NU = 0;
- }
- }
- }
-
- /* Fill lists of free blocks */
- for (n = head; n != 0;)
- {
- CPpmd7_Node *node = NODE(n);
- UInt32 nu = node->NU;
- unsigned i;
- n = node->Next;
- if (nu == 0)
- continue;
- for (; nu > 128; nu -= 128, node += 128)
- Ppmd7_InsertNode(p, node, PPMD_NUM_INDEXES - 1);
- if (I2U(i = U2I(nu)) != nu)
- {
- unsigned k = I2U(--i);
- Ppmd7_InsertNode(p, node + k, (unsigned)nu - k - 1);
- }
- Ppmd7_InsertNode(p, node, i);
- }
-}
-
-
-Z7_NO_INLINE
-static void *Ppmd7_AllocUnitsRare(CPpmd7 *p, unsigned indx)
-{
- unsigned i;
-
- if (p->GlueCount == 0)
- {
- Ppmd7_GlueFreeBlocks(p);
- if (p->FreeList[indx] != 0)
- return Ppmd7_RemoveNode(p, indx);
- }
-
- i = indx;
-
- do
- {
- if (++i == PPMD_NUM_INDEXES)
- {
- UInt32 numBytes = U2B(I2U(indx));
- Byte *us = p->UnitsStart;
- p->GlueCount--;
- return ((UInt32)(us - p->Text) > numBytes) ? (p->UnitsStart = us - numBytes) : NULL;
- }
- }
- while (p->FreeList[i] == 0);
-
- {
- void *block = Ppmd7_RemoveNode(p, i);
- Ppmd7_SplitBlock(p, block, i, indx);
- return block;
- }
-}
-
-
-static void *Ppmd7_AllocUnits(CPpmd7 *p, unsigned indx)
-{
- if (p->FreeList[indx] != 0)
- return Ppmd7_RemoveNode(p, indx);
- {
- UInt32 numBytes = U2B(I2U(indx));
- Byte *lo = p->LoUnit;
- if ((UInt32)(p->HiUnit - lo) >= numBytes)
- {
- p->LoUnit = lo + numBytes;
- return lo;
- }
- }
- return Ppmd7_AllocUnitsRare(p, indx);
-}
-
-
-#define MEM_12_CPY(dest, src, num) \
- { UInt32 *d = (UInt32 *)dest; const UInt32 *z = (const UInt32 *)src; UInt32 n = num; \
- do { d[0] = z[0]; d[1] = z[1]; d[2] = z[2]; z += 3; d += 3; } while (--n); }
-
-
-/*
-static void *ShrinkUnits(CPpmd7 *p, void *oldPtr, unsigned oldNU, unsigned newNU)
-{
- unsigned i0 = U2I(oldNU);
- unsigned i1 = U2I(newNU);
- if (i0 == i1)
- return oldPtr;
- if (p->FreeList[i1] != 0)
- {
- void *ptr = Ppmd7_RemoveNode(p, i1);
- MEM_12_CPY(ptr, oldPtr, newNU)
- Ppmd7_InsertNode(p, oldPtr, i0);
- return ptr;
- }
- Ppmd7_SplitBlock(p, oldPtr, i0, i1);
- return oldPtr;
-}
-*/
-
-
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-static void SetSuccessor(CPpmd_State *p, CPpmd_Void_Ref v)
-{
- Ppmd_SET_SUCCESSOR(p, v)
-}
-
-
-
-Z7_NO_INLINE
-static
-void Ppmd7_RestartModel(CPpmd7 *p)
-{
- unsigned i, k;
-
- memset(p->FreeList, 0, sizeof(p->FreeList));
-
- p->Text = p->Base + p->AlignOffset;
- p->HiUnit = p->Text + p->Size;
- p->LoUnit = p->UnitsStart = p->HiUnit - p->Size / 8 / UNIT_SIZE * 7 * UNIT_SIZE;
- p->GlueCount = 0;
-
- p->OrderFall = p->MaxOrder;
- p->RunLength = p->InitRL = -(Int32)((p->MaxOrder < 12) ? p->MaxOrder : 12) - 1;
- p->PrevSuccess = 0;
-
- {
- CPpmd7_Context *mc = (PPMD7_CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */
- CPpmd_State *s = (CPpmd_State *)p->LoUnit; /* Ppmd7_AllocUnits(p, PPMD_NUM_INDEXES - 1); */
-
- p->LoUnit += U2B(256 / 2);
- p->MaxContext = p->MinContext = mc;
- p->FoundState = s;
-
- mc->NumStats = 256;
- mc->Union2.SummFreq = 256 + 1;
- mc->Union4.Stats = REF(s);
- mc->Suffix = 0;
-
- for (i = 0; i < 256; i++, s++)
- {
- s->Symbol = (Byte)i;
- s->Freq = 1;
- SetSuccessor(s, 0);
- }
-
- #ifdef PPMD7_ORDER_0_SUPPPORT
- if (p->MaxOrder == 0)
- {
- CPpmd_Void_Ref r = REF(mc);
- s = p->FoundState;
- for (i = 0; i < 256; i++, s++)
- SetSuccessor(s, r);
- return;
- }
- #endif
- }
-
- for (i = 0; i < 128; i++)
-
-
-
- for (k = 0; k < 8; k++)
- {
- unsigned m;
- UInt16 *dest = p->BinSumm[i] + k;
- const UInt16 val = (UInt16)(PPMD_BIN_SCALE - PPMD7_kInitBinEsc[k] / (i + 2));
- for (m = 0; m < 64; m += 8)
- dest[m] = val;
- }
-
-
- for (i = 0; i < 25; i++)
- {
-
- CPpmd_See *s = p->See[i];
-
-
-
- unsigned summ = ((5 * i + 10) << (PPMD_PERIOD_BITS - 4));
- for (k = 0; k < 16; k++, s++)
- {
- s->Summ = (UInt16)summ;
- s->Shift = (PPMD_PERIOD_BITS - 4);
- s->Count = 4;
- }
- }
-
- p->DummySee.Summ = 0; /* unused */
- p->DummySee.Shift = PPMD_PERIOD_BITS;
- p->DummySee.Count = 64; /* unused */
-}
-
-
-void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder)
-{
- p->MaxOrder = maxOrder;
-
- Ppmd7_RestartModel(p);
-}
-
-
-
-/*
- Ppmd7_CreateSuccessors()
- It's called when (FoundState->Successor) is RAW-Successor,
- that is the link to position in Raw text.
- So we create Context records and write the links to
- FoundState->Successor and to identical RAW-Successors in suffix
- contexts of MinContex.
-
- The function returns:
- if (OrderFall == 0) then MinContext is already at MAX order,
- { return pointer to new or existing context of same MAX order }
- else
- { return pointer to new real context that will be (Order+1) in comparison with MinContext
-
- also it can return pointer to real context of same order,
-*/
-
-Z7_NO_INLINE
-static PPMD7_CTX_PTR Ppmd7_CreateSuccessors(CPpmd7 *p)
-{
- PPMD7_CTX_PTR c = p->MinContext;
- CPpmd_Byte_Ref upBranch = (CPpmd_Byte_Ref)SUCCESSOR(p->FoundState);
- Byte newSym, newFreq;
- unsigned numPs = 0;
- CPpmd_State *ps[PPMD7_MAX_ORDER];
-
- if (p->OrderFall != 0)
- ps[numPs++] = p->FoundState;
-
- while (c->Suffix)
- {
- CPpmd_Void_Ref successor;
- CPpmd_State *s;
- c = SUFFIX(c);
-
-
- if (c->NumStats != 1)
- {
- Byte sym = p->FoundState->Symbol;
- for (s = STATS(c); s->Symbol != sym; s++);
-
- }
- else
- {
- s = ONE_STATE(c);
-
- }
- successor = SUCCESSOR(s);
- if (successor != upBranch)
- {
- // (c) is real record Context here,
- c = CTX(successor);
- if (numPs == 0)
- {
- // (c) is real record MAX Order Context here,
- // So we don't need to create any new contexts.
- return c;
- }
- break;
- }
- ps[numPs++] = s;
- }
-
- // All created contexts will have single-symbol with new RAW-Successor
- // All new RAW-Successors will point to next position in RAW text
- // after FoundState->Successor
-
- newSym = *(const Byte *)Ppmd7_GetPtr(p, upBranch);
- upBranch++;
-
-
- if (c->NumStats == 1)
- newFreq = ONE_STATE(c)->Freq;
- else
- {
- UInt32 cf, s0;
- CPpmd_State *s;
- for (s = STATS(c); s->Symbol != newSym; s++);
- cf = (UInt32)s->Freq - 1;
- s0 = (UInt32)c->Union2.SummFreq - c->NumStats - cf;
- /*
- cf - is frequency of symbol that will be Successor in new context records.
- s0 - is commulative frequency sum of another symbols from parent context.
- max(newFreq)= (s->Freq + 1), when (s0 == 1)
- we have requirement (Ppmd7Context_OneState()->Freq <= 128) in BinSumm[]
- so (s->Freq < 128) - is requirement for multi-symbol contexts
- */
- newFreq = (Byte)(1 + ((2 * cf <= s0) ? (5 * cf > s0) : (2 * cf + s0 - 1) / (2 * s0) + 1));
- }
-
- // Create new single-symbol contexts from low order to high order in loop
-
- do
- {
- PPMD7_CTX_PTR c1;
- /* = AllocContext(p); */
- if (p->HiUnit != p->LoUnit)
- c1 = (PPMD7_CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE);
- else if (p->FreeList[0] != 0)
- c1 = (PPMD7_CTX_PTR)Ppmd7_RemoveNode(p, 0);
- else
- {
- c1 = (PPMD7_CTX_PTR)Ppmd7_AllocUnitsRare(p, 0);
- if (!c1)
- return NULL;
- }
-
- c1->NumStats = 1;
- ONE_STATE(c1)->Symbol = newSym;
- ONE_STATE(c1)->Freq = newFreq;
- SetSuccessor(ONE_STATE(c1), upBranch);
- c1->Suffix = REF(c);
- SetSuccessor(ps[--numPs], REF(c1));
- c = c1;
- }
- while (numPs != 0);
-
- return c;
-}
-
-
-
-#define SWAP_STATES(s) \
- { CPpmd_State tmp = s[0]; s[0] = s[-1]; s[-1] = tmp; }
-
-
-void Ppmd7_UpdateModel(CPpmd7 *p);
-Z7_NO_INLINE
-void Ppmd7_UpdateModel(CPpmd7 *p)
-{
- CPpmd_Void_Ref maxSuccessor, minSuccessor;
- PPMD7_CTX_PTR c, mc;
- unsigned s0, ns;
-
-
-
- if (p->FoundState->Freq < MAX_FREQ / 4 && p->MinContext->Suffix != 0)
- {
- /* Update Freqs in Suffix Context */
-
- c = SUFFIX(p->MinContext);
-
- if (c->NumStats == 1)
- {
- CPpmd_State *s = ONE_STATE(c);
- if (s->Freq < 32)
- s->Freq++;
- }
- else
- {
- CPpmd_State *s = STATS(c);
- Byte sym = p->FoundState->Symbol;
-
- if (s->Symbol != sym)
- {
- do
- {
- // s++; if (s->Symbol == sym) break;
- s++;
- }
- while (s->Symbol != sym);
-
- if (s[0].Freq >= s[-1].Freq)
- {
- SWAP_STATES(s)
- s--;
- }
- }
-
- if (s->Freq < MAX_FREQ - 9)
- {
- s->Freq = (Byte)(s->Freq + 2);
- c->Union2.SummFreq = (UInt16)(c->Union2.SummFreq + 2);
- }
- }
- }
-
-
- if (p->OrderFall == 0)
- {
- /* MAX ORDER context */
- /* (FoundState->Successor) is RAW-Successor. */
- p->MaxContext = p->MinContext = Ppmd7_CreateSuccessors(p);
- if (!p->MinContext)
- {
- Ppmd7_RestartModel(p);
- return;
- }
- SetSuccessor(p->FoundState, REF(p->MinContext));
- return;
- }
-
-
- /* NON-MAX ORDER context */
-
- {
- Byte *text = p->Text;
- *text++ = p->FoundState->Symbol;
- p->Text = text;
- if (text >= p->UnitsStart)
- {
- Ppmd7_RestartModel(p);
- return;
- }
- maxSuccessor = REF(text);
- }
-
- minSuccessor = SUCCESSOR(p->FoundState);
-
- if (minSuccessor)
- {
- // there is Successor for FoundState in MinContext.
- // So the next context will be one order higher than MinContext.
-
- if (minSuccessor <= maxSuccessor)
- {
- // minSuccessor is RAW-Successor. So we will create real contexts records:
- PPMD7_CTX_PTR cs = Ppmd7_CreateSuccessors(p);
- if (!cs)
- {
- Ppmd7_RestartModel(p);
- return;
- }
- minSuccessor = REF(cs);
- }
-
- // minSuccessor now is real Context pointer that points to existing (Order+1) context
-
- if (--p->OrderFall == 0)
- {
- /*
- if we move to MaxOrder context, then minSuccessor will be common Succesor for both:
- MinContext that is (MaxOrder - 1)
- MaxContext that is (MaxOrder)
- so we don't need new RAW-Successor, and we can use real minSuccessor
- as succssors for both MinContext and MaxContext.
- */
- maxSuccessor = minSuccessor;
-
- /*
- if (MaxContext != MinContext)
- {
- there was order fall from MaxOrder and we don't need current symbol
- to transfer some RAW-Succesors to real contexts.
- So we roll back pointer in raw data for one position.
- }
- */
- p->Text -= (p->MaxContext != p->MinContext);
- }
- }
- else
- {
- /*
- FoundState has NULL-Successor here.
- And only root 0-order context can contain NULL-Successors.
- We change Successor in FoundState to RAW-Successor,
- And next context will be same 0-order root Context.
- */
- SetSuccessor(p->FoundState, maxSuccessor);
- minSuccessor = REF(p->MinContext);
- }
-
- mc = p->MinContext;
- c = p->MaxContext;
-
- p->MaxContext = p->MinContext = CTX(minSuccessor);
-
- if (c == mc)
- return;
-
- // s0 : is pure Escape Freq
- s0 = mc->Union2.SummFreq - (ns = mc->NumStats) - ((unsigned)p->FoundState->Freq - 1);
-
- do
- {
- unsigned ns1;
- UInt32 sum;
-
- if ((ns1 = c->NumStats) != 1)
- {
- if ((ns1 & 1) == 0)
- {
- /* Expand for one UNIT */
- unsigned oldNU = ns1 >> 1;
- unsigned i = U2I(oldNU);
- if (i != U2I((size_t)oldNU + 1))
- {
- void *ptr = Ppmd7_AllocUnits(p, i + 1);
- void *oldPtr;
- if (!ptr)
- {
- Ppmd7_RestartModel(p);
- return;
- }
- oldPtr = STATS(c);
- MEM_12_CPY(ptr, oldPtr, oldNU)
- Ppmd7_InsertNode(p, oldPtr, i);
- c->Union4.Stats = STATS_REF(ptr);
- }
- }
- sum = c->Union2.SummFreq;
- /* max increase of Escape_Freq is 3 here.
- total increase of Union2.SummFreq for all symbols is less than 256 here */
- sum += (UInt32)(2 * ns1 < ns) + 2 * ((unsigned)(4 * ns1 <= ns) & (sum <= 8 * ns1));
- /* original PPMdH uses 16-bit variable for (sum) here.
- But (sum < 0x9000). So we don't truncate (sum) to 16-bit */
- // sum = (UInt16)sum;
- }
- else
- {
- // instead of One-symbol context we create 2-symbol context
- CPpmd_State *s = (CPpmd_State*)Ppmd7_AllocUnits(p, 0);
- if (!s)
- {
- Ppmd7_RestartModel(p);
- return;
- }
- {
- unsigned freq = c->Union2.State2.Freq;
- // s = *ONE_STATE(c);
- s->Symbol = c->Union2.State2.Symbol;
- s->Successor_0 = c->Union4.State4.Successor_0;
- s->Successor_1 = c->Union4.State4.Successor_1;
- // SetSuccessor(s, c->Union4.Stats); // call it only for debug purposes to check the order of
- // (Successor_0 and Successor_1) in LE/BE.
- c->Union4.Stats = REF(s);
- if (freq < MAX_FREQ / 4 - 1)
- freq <<= 1;
- else
- freq = MAX_FREQ - 4;
- // (max(s->freq) == 120), when we convert from 1-symbol into 2-symbol context
- s->Freq = (Byte)freq;
- // max(InitEsc = PPMD7_kExpEscape[*]) is 25. So the max(escapeFreq) is 26 here
- sum = freq + p->InitEsc + (ns > 3);
- }
- }
-
- {
- CPpmd_State *s = STATS(c) + ns1;
- UInt32 cf = 2 * (sum + 6) * (UInt32)p->FoundState->Freq;
- UInt32 sf = (UInt32)s0 + sum;
- s->Symbol = p->FoundState->Symbol;
- c->NumStats = (UInt16)(ns1 + 1);
- SetSuccessor(s, maxSuccessor);
-
- if (cf < 6 * sf)
- {
- cf = (UInt32)1 + (cf > sf) + (cf >= 4 * sf);
- sum += 3;
- /* It can add (0, 1, 2) to Escape_Freq */
- }
- else
- {
- cf = (UInt32)4 + (cf >= 9 * sf) + (cf >= 12 * sf) + (cf >= 15 * sf);
- sum += cf;
- }
-
- c->Union2.SummFreq = (UInt16)sum;
- s->Freq = (Byte)cf;
- }
- c = SUFFIX(c);
- }
- while (c != mc);
-}
-
-
-
-Z7_NO_INLINE
-static void Ppmd7_Rescale(CPpmd7 *p)
-{
- unsigned i, adder, sumFreq, escFreq;
- CPpmd_State *stats = STATS(p->MinContext);
- CPpmd_State *s = p->FoundState;
-
- /* Sort the list by Freq */
- if (s != stats)
- {
- CPpmd_State tmp = *s;
- do
- s[0] = s[-1];
- while (--s != stats);
- *s = tmp;
- }
-
- sumFreq = s->Freq;
- escFreq = p->MinContext->Union2.SummFreq - sumFreq;
-
- /*
- if (p->OrderFall == 0), adder = 0 : it's allowed to remove symbol from MAX Order context
- if (p->OrderFall != 0), adder = 1 : it's NOT allowed to remove symbol from NON-MAX Order context
- */
-
- adder = (p->OrderFall != 0);
-
- #ifdef PPMD7_ORDER_0_SUPPPORT
- adder |= (p->MaxOrder == 0); // we don't remove symbols from order-0 context
- #endif
-
- sumFreq = (sumFreq + 4 + adder) >> 1;
- i = (unsigned)p->MinContext->NumStats - 1;
- s->Freq = (Byte)sumFreq;
-
- do
- {
- unsigned freq = (++s)->Freq;
- escFreq -= freq;
- freq = (freq + adder) >> 1;
- sumFreq += freq;
- s->Freq = (Byte)freq;
- if (freq > s[-1].Freq)
- {
- CPpmd_State tmp = *s;
- CPpmd_State *s1 = s;
- do
- {
- s1[0] = s1[-1];
- }
- while (--s1 != stats && freq > s1[-1].Freq);
- *s1 = tmp;
- }
- }
- while (--i);
-
- if (s->Freq == 0)
- {
- /* Remove all items with Freq == 0 */
- CPpmd7_Context *mc;
- unsigned numStats, numStatsNew, n0, n1;
-
- i = 0; do { i++; } while ((--s)->Freq == 0);
-
- /* We increase (escFreq) for the number of removed symbols.
- So we will have (0.5) increase for Escape_Freq in avarage per
- removed symbol after Escape_Freq halving */
- escFreq += i;
- mc = p->MinContext;
- numStats = mc->NumStats;
- numStatsNew = numStats - i;
- mc->NumStats = (UInt16)(numStatsNew);
- n0 = (numStats + 1) >> 1;
-
- if (numStatsNew == 1)
- {
- /* Create Single-Symbol context */
- unsigned freq = stats->Freq;
-
- do
- {
- escFreq >>= 1;
- freq = (freq + 1) >> 1;
- }
- while (escFreq > 1);
-
- s = ONE_STATE(mc);
- *s = *stats;
- s->Freq = (Byte)freq; // (freq <= 260 / 4)
- p->FoundState = s;
- Ppmd7_InsertNode(p, stats, U2I(n0));
- return;
- }
-
- n1 = (numStatsNew + 1) >> 1;
- if (n0 != n1)
- {
- // p->MinContext->Union4.Stats = STATS_REF(ShrinkUnits(p, stats, n0, n1));
- unsigned i0 = U2I(n0);
- unsigned i1 = U2I(n1);
- if (i0 != i1)
- {
- if (p->FreeList[i1] != 0)
- {
- void *ptr = Ppmd7_RemoveNode(p, i1);
- p->MinContext->Union4.Stats = STATS_REF(ptr);
- MEM_12_CPY(ptr, (const void *)stats, n1)
- Ppmd7_InsertNode(p, stats, i0);
- }
- else
- Ppmd7_SplitBlock(p, stats, i0, i1);
- }
- }
- }
- {
- CPpmd7_Context *mc = p->MinContext;
- mc->Union2.SummFreq = (UInt16)(sumFreq + escFreq - (escFreq >> 1));
- // Escape_Freq halving here
- p->FoundState = STATS(mc);
- }
-}
-
-
-CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *escFreq)
-{
- CPpmd_See *see;
- const CPpmd7_Context *mc = p->MinContext;
- unsigned numStats = mc->NumStats;
- if (numStats != 256)
- {
- unsigned nonMasked = numStats - numMasked;
- see = p->See[(unsigned)p->NS2Indx[(size_t)nonMasked - 1]]
- + (nonMasked < (unsigned)SUFFIX(mc)->NumStats - numStats)
- + 2 * (unsigned)(mc->Union2.SummFreq < 11 * numStats)
- + 4 * (unsigned)(numMasked > nonMasked) +
- p->HiBitsFlag;
- {
- // if (see->Summ) field is larger than 16-bit, we need only low 16 bits of Summ
- unsigned summ = (UInt16)see->Summ; // & 0xFFFF
- unsigned r = (summ >> see->Shift);
- see->Summ = (UInt16)(summ - r);
- *escFreq = r + (r == 0);
- }
- }
- else
- {
- see = &p->DummySee;
- *escFreq = 1;
- }
- return see;
-}
-
-
-static void Ppmd7_NextContext(CPpmd7 *p)
-{
- PPMD7_CTX_PTR c = CTX(SUCCESSOR(p->FoundState));
- if (p->OrderFall == 0 && (const Byte *)c > p->Text)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd7_UpdateModel(p);
-}
-
-
-void Ppmd7_Update1(CPpmd7 *p)
-{
- CPpmd_State *s = p->FoundState;
- unsigned freq = s->Freq;
- freq += 4;
- p->MinContext->Union2.SummFreq = (UInt16)(p->MinContext->Union2.SummFreq + 4);
- s->Freq = (Byte)freq;
- if (freq > s[-1].Freq)
- {
- SWAP_STATES(s)
- p->FoundState = --s;
- if (freq > MAX_FREQ)
- Ppmd7_Rescale(p);
- }
- Ppmd7_NextContext(p);
-}
-
-
-void Ppmd7_Update1_0(CPpmd7 *p)
-{
- CPpmd_State *s = p->FoundState;
- CPpmd7_Context *mc = p->MinContext;
- unsigned freq = s->Freq;
- unsigned summFreq = mc->Union2.SummFreq;
- p->PrevSuccess = (2 * freq > summFreq);
- p->RunLength += (int)p->PrevSuccess;
- mc->Union2.SummFreq = (UInt16)(summFreq + 4);
- freq += 4;
- s->Freq = (Byte)freq;
- if (freq > MAX_FREQ)
- Ppmd7_Rescale(p);
- Ppmd7_NextContext(p);
-}
-
-
-/*
-void Ppmd7_UpdateBin(CPpmd7 *p)
-{
- unsigned freq = p->FoundState->Freq;
- p->FoundState->Freq = (Byte)(freq + (freq < 128));
- p->PrevSuccess = 1;
- p->RunLength++;
- Ppmd7_NextContext(p);
-}
-*/
-
-void Ppmd7_Update2(CPpmd7 *p)
-{
- CPpmd_State *s = p->FoundState;
- unsigned freq = s->Freq;
- freq += 4;
- p->RunLength = p->InitRL;
- p->MinContext->Union2.SummFreq = (UInt16)(p->MinContext->Union2.SummFreq + 4);
- s->Freq = (Byte)freq;
- if (freq > MAX_FREQ)
- Ppmd7_Rescale(p);
- Ppmd7_UpdateModel(p);
-}
-
-
-
-/*
-PPMd Memory Map:
-{
- [ 0 ] contains subset of original raw text, that is required to create context
- records, Some symbols are not written, when max order context was reached
- [ Text ] free area
- [ UnitsStart ] CPpmd_State vectors and CPpmd7_Context records
- [ LoUnit ] free area for CPpmd_State and CPpmd7_Context items
-[ HiUnit ] CPpmd7_Context records
- [ Size ] end of array
-}
-
-These addresses don't cross at any time.
-And the following condtions is true for addresses:
- (0 <= Text < UnitsStart <= LoUnit <= HiUnit <= Size)
-
-Raw text is BYTE--aligned.
-the data in block [ UnitsStart ... Size ] contains 12-bytes aligned UNITs.
-
-Last UNIT of array at offset (Size - 12) is root order-0 CPpmd7_Context record.
-The code can free UNITs memory blocks that were allocated to store CPpmd_State vectors.
-The code doesn't free UNITs allocated for CPpmd7_Context records.
-
-The code calls Ppmd7_RestartModel(), when there is no free memory for allocation.
-And Ppmd7_RestartModel() changes the state to orignal start state, with full free block.
-
-
-The code allocates UNITs with the following order:
-
-Allocation of 1 UNIT for Context record
- - from free space (HiUnit) down to (LoUnit)
- - from FreeList[0]
- - Ppmd7_AllocUnitsRare()
-
-Ppmd7_AllocUnits() for CPpmd_State vectors:
- - from FreeList[i]
- - from free space (LoUnit) up to (HiUnit)
- - Ppmd7_AllocUnitsRare()
-
-Ppmd7_AllocUnitsRare()
- - if (GlueCount == 0)
- { Glue lists, GlueCount = 255, allocate from FreeList[i]] }
- - loop for all higher sized FreeList[...] lists
- - from (UnitsStart - Text), GlueCount--
- - ERROR
-
-
-Each Record with Context contains the CPpmd_State vector, where each
-CPpmd_State contains the link to Successor.
-There are 3 types of Successor:
- 1) NULL-Successor - NULL pointer. NULL-Successor links can be stored
- only in 0-order Root Context Record.
- We use 0 value as NULL-Successor
- 2) RAW-Successor - the link to position in raw text,
- that "RAW-Successor" is being created after first
- occurrence of new symbol for some existing context record.
- (RAW-Successor > 0).
- 3) RECORD-Successor - the link to CPpmd7_Context record of (Order+1),
- that record is being created when we go via RAW-Successor again.
-
-For any successors at any time: the following condtions are true for Successor links:
-(NULL-Successor < RAW-Successor < UnitsStart <= RECORD-Successor)
-
-
----------- Symbol Frequency, SummFreq and Range in Range_Coder ----------
-
-CPpmd7_Context::SummFreq = Sum(Stats[].Freq) + Escape_Freq
-
-The PPMd code tries to fulfill the condition:
- (SummFreq <= (256 * 128 = RC::kBot))
-
-We have (Sum(Stats[].Freq) <= 256 * 124), because of (MAX_FREQ = 124)
-So (4 = 128 - 124) is average reserve for Escape_Freq for each symbol.
-If (CPpmd_State::Freq) is not aligned for 4, the reserve can be 5, 6 or 7.
-SummFreq and Escape_Freq can be changed in Ppmd7_Rescale() and *Update*() functions.
-Ppmd7_Rescale() can remove symbols only from max-order contexts. So Escape_Freq can increase after multiple calls of Ppmd7_Rescale() for
-max-order context.
-
-When the PPMd code still break (Total <= RC::Range) condition in range coder,
-we have two ways to resolve that problem:
- 1) we can report error, if we want to keep compatibility with original PPMd code that has no fix for such cases.
- 2) we can reduce (Total) value to (RC::Range) by reducing (Escape_Freq) part of (Total) value.
-*/
-
-#undef MAX_FREQ
-#undef UNIT_SIZE
-#undef U2B
-#undef U2I
-#undef I2U
-#undef I2U_UInt16
-#undef REF
-#undef STATS_REF
-#undef CTX
-#undef STATS
-#undef ONE_STATE
-#undef SUFFIX
-#undef NODE
-#undef EMPTY_NODE
-#undef MEM_12_CPY
-#undef SUCCESSOR
-#undef SWAP_STATES
diff --git a/3rdparty/7z/src/Ppmd7.h b/3rdparty/7z/src/Ppmd7.h
deleted file mode 100644
index 65c22ae9a1..0000000000
--- a/3rdparty/7z/src/Ppmd7.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* Ppmd7.h -- Ppmd7 (PPMdH) compression codec
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.H (2001): Dmitry Shkarin : Public domain */
-
-
-#ifndef ZIP7_INC_PPMD7_H
-#define ZIP7_INC_PPMD7_H
-
-#include "Ppmd.h"
-
-EXTERN_C_BEGIN
-
-#define PPMD7_MIN_ORDER 2
-#define PPMD7_MAX_ORDER 64
-
-#define PPMD7_MIN_MEM_SIZE (1 << 11)
-#define PPMD7_MAX_MEM_SIZE (0xFFFFFFFF - 12 * 3)
-
-struct CPpmd7_Context_;
-
-typedef Ppmd_Ref_Type(struct CPpmd7_Context_) CPpmd7_Context_Ref;
-
-// MY_CPU_pragma_pack_push_1
-
-typedef struct CPpmd7_Context_
-{
- UInt16 NumStats;
-
-
- union
- {
- UInt16 SummFreq;
- CPpmd_State2 State2;
- } Union2;
-
- union
- {
- CPpmd_State_Ref Stats;
- CPpmd_State4 State4;
- } Union4;
-
- CPpmd7_Context_Ref Suffix;
-} CPpmd7_Context;
-
-// MY_CPU_pragma_pop
-
-#define Ppmd7Context_OneState(p) ((CPpmd_State *)&(p)->Union2)
-
-
-
-
-typedef struct
-{
- UInt32 Range;
- UInt32 Code;
- UInt32 Low;
- IByteInPtr Stream;
-} CPpmd7_RangeDec;
-
-
-typedef struct
-{
- UInt32 Range;
- Byte Cache;
- // Byte _dummy_[3];
- UInt64 Low;
- UInt64 CacheSize;
- IByteOutPtr Stream;
-} CPpmd7z_RangeEnc;
-
-
-typedef struct
-{
- CPpmd7_Context *MinContext, *MaxContext;
- CPpmd_State *FoundState;
- unsigned OrderFall, InitEsc, PrevSuccess, MaxOrder, HiBitsFlag;
- Int32 RunLength, InitRL; /* must be 32-bit at least */
-
- UInt32 Size;
- UInt32 GlueCount;
- UInt32 AlignOffset;
- Byte *Base, *LoUnit, *HiUnit, *Text, *UnitsStart;
-
-
-
-
- union
- {
- CPpmd7_RangeDec dec;
- CPpmd7z_RangeEnc enc;
- } rc;
-
- Byte Indx2Units[PPMD_NUM_INDEXES + 2]; // +2 for alignment
- Byte Units2Indx[128];
- CPpmd_Void_Ref FreeList[PPMD_NUM_INDEXES];
-
- Byte NS2BSIndx[256], NS2Indx[256];
- Byte ExpEscape[16];
- CPpmd_See DummySee, See[25][16];
- UInt16 BinSumm[128][64];
- // int LastSymbol;
-} CPpmd7;
-
-
-void Ppmd7_Construct(CPpmd7 *p);
-BoolInt Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAllocPtr alloc);
-void Ppmd7_Free(CPpmd7 *p, ISzAllocPtr alloc);
-void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder);
-#define Ppmd7_WasAllocated(p) ((p)->Base != NULL)
-
-
-/* ---------- Internal Functions ---------- */
-
-#define Ppmd7_GetPtr(p, ptr) Ppmd_GetPtr(p, ptr)
-#define Ppmd7_GetContext(p, ptr) Ppmd_GetPtr_Type(p, ptr, CPpmd7_Context)
-#define Ppmd7_GetStats(p, ctx) Ppmd_GetPtr_Type(p, (ctx)->Union4.Stats, CPpmd_State)
-
-void Ppmd7_Update1(CPpmd7 *p);
-void Ppmd7_Update1_0(CPpmd7 *p);
-void Ppmd7_Update2(CPpmd7 *p);
-
-#define PPMD7_HiBitsFlag_3(sym) ((((unsigned)sym + 0xC0) >> (8 - 3)) & (1 << 3))
-#define PPMD7_HiBitsFlag_4(sym) ((((unsigned)sym + 0xC0) >> (8 - 4)) & (1 << 4))
-// #define PPMD7_HiBitsFlag_3(sym) ((sym) < 0x40 ? 0 : (1 << 3))
-// #define PPMD7_HiBitsFlag_4(sym) ((sym) < 0x40 ? 0 : (1 << 4))
-
-#define Ppmd7_GetBinSumm(p) \
- &p->BinSumm[(size_t)(unsigned)Ppmd7Context_OneState(p->MinContext)->Freq - 1] \
- [ p->PrevSuccess + ((p->RunLength >> 26) & 0x20) \
- + p->NS2BSIndx[(size_t)Ppmd7_GetContext(p, p->MinContext->Suffix)->NumStats - 1] \
- + PPMD7_HiBitsFlag_4(Ppmd7Context_OneState(p->MinContext)->Symbol) \
- + (p->HiBitsFlag = PPMD7_HiBitsFlag_3(p->FoundState->Symbol)) ]
-
-CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *scale);
-
-
-/*
-We support two versions of Ppmd7 (PPMdH) methods that use same CPpmd7 structure:
- 1) Ppmd7a_*: original PPMdH
- 2) Ppmd7z_*: modified PPMdH with 7z Range Coder
-Ppmd7_*: the structures and functions that are common for both versions of PPMd7 (PPMdH)
-*/
-
-/* ---------- Decode ---------- */
-
-#define PPMD7_SYM_END (-1)
-#define PPMD7_SYM_ERROR (-2)
-
-/*
-You must set (CPpmd7::rc.dec.Stream) before Ppmd7*_RangeDec_Init()
-
-Ppmd7*_DecodeSymbol()
-out:
- >= 0 : decoded byte
- -1 : PPMD7_SYM_END : End of payload marker
- -2 : PPMD7_SYM_ERROR : Data error
-*/
-
-/* Ppmd7a_* : original PPMdH */
-BoolInt Ppmd7a_RangeDec_Init(CPpmd7_RangeDec *p);
-#define Ppmd7a_RangeDec_IsFinishedOK(p) ((p)->Code == 0)
-int Ppmd7a_DecodeSymbol(CPpmd7 *p);
-
-/* Ppmd7z_* : modified PPMdH with 7z Range Coder */
-BoolInt Ppmd7z_RangeDec_Init(CPpmd7_RangeDec *p);
-#define Ppmd7z_RangeDec_IsFinishedOK(p) ((p)->Code == 0)
-int Ppmd7z_DecodeSymbol(CPpmd7 *p);
-// Byte *Ppmd7z_DecodeSymbols(CPpmd7 *p, Byte *buf, const Byte *lim);
-
-
-/* ---------- Encode ---------- */
-
-void Ppmd7z_Init_RangeEnc(CPpmd7 *p);
-void Ppmd7z_Flush_RangeEnc(CPpmd7 *p);
-// void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol);
-void Ppmd7z_EncodeSymbols(CPpmd7 *p, const Byte *buf, const Byte *lim);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Ppmd7Dec.c b/3rdparty/7z/src/Ppmd7Dec.c
deleted file mode 100644
index d45e24a948..0000000000
--- a/3rdparty/7z/src/Ppmd7Dec.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/* Ppmd7Dec.c -- Ppmd7z (PPMdH with 7z Range Coder) Decoder
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.H (2001): Dmitry Shkarin : Public domain */
-
-
-#include "Precomp.h"
-
-#include "Ppmd7.h"
-
-#define kTopValue ((UInt32)1 << 24)
-
-
-#define READ_BYTE(p) IByteIn_Read((p)->Stream)
-
-BoolInt Ppmd7z_RangeDec_Init(CPpmd7_RangeDec *p)
-{
- unsigned i;
- p->Code = 0;
- p->Range = 0xFFFFFFFF;
- if (READ_BYTE(p) != 0)
- return False;
- for (i = 0; i < 4; i++)
- p->Code = (p->Code << 8) | READ_BYTE(p);
- return (p->Code < 0xFFFFFFFF);
-}
-
-#define RC_NORM_BASE(p) if ((p)->Range < kTopValue) \
- { (p)->Code = ((p)->Code << 8) | READ_BYTE(p); (p)->Range <<= 8;
-
-#define RC_NORM_1(p) RC_NORM_BASE(p) }
-#define RC_NORM(p) RC_NORM_BASE(p) RC_NORM_BASE(p) }}
-
-// we must use only one type of Normalization from two: LOCAL or REMOTE
-#define RC_NORM_LOCAL(p) // RC_NORM(p)
-#define RC_NORM_REMOTE(p) RC_NORM(p)
-
-#define R (&p->rc.dec)
-
-Z7_FORCE_INLINE
-// Z7_NO_INLINE
-static void Ppmd7z_RD_Decode(CPpmd7 *p, UInt32 start, UInt32 size)
-{
-
-
- R->Code -= start * R->Range;
- R->Range *= size;
- RC_NORM_LOCAL(R)
-}
-
-#define RC_Decode(start, size) Ppmd7z_RD_Decode(p, start, size);
-#define RC_DecodeFinal(start, size) RC_Decode(start, size) RC_NORM_REMOTE(R)
-#define RC_GetThreshold(total) (R->Code / (R->Range /= (total)))
-
-
-#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
-// typedef CPpmd7_Context * CTX_PTR;
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-void Ppmd7_UpdateModel(CPpmd7 *p);
-
-#define MASK(sym) ((unsigned char *)charMask)[sym]
-// Z7_FORCE_INLINE
-// static
-int Ppmd7z_DecodeSymbol(CPpmd7 *p)
-{
- size_t charMask[256 / sizeof(size_t)];
-
- if (p->MinContext->NumStats != 1)
- {
- CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext);
- unsigned i;
- UInt32 count, hiCnt;
- const UInt32 summFreq = p->MinContext->Union2.SummFreq;
-
-
-
-
- count = RC_GetThreshold(summFreq);
- hiCnt = count;
-
- if ((Int32)(count -= s->Freq) < 0)
- {
- Byte sym;
- RC_DecodeFinal(0, s->Freq)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd7_Update1_0(p);
- return sym;
- }
-
- p->PrevSuccess = 0;
- i = (unsigned)p->MinContext->NumStats - 1;
-
- do
- {
- if ((Int32)(count -= (++s)->Freq) < 0)
- {
- Byte sym;
- RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd7_Update1(p);
- return sym;
- }
- }
- while (--i);
-
- if (hiCnt >= summFreq)
- return PPMD7_SYM_ERROR;
-
- hiCnt -= count;
- RC_Decode(hiCnt, summFreq - hiCnt)
-
- p->HiBitsFlag = PPMD7_HiBitsFlag_3(p->FoundState->Symbol);
- PPMD_SetAllBitsIn256Bytes(charMask)
- // i = p->MinContext->NumStats - 1;
- // do { MASK((--s)->Symbol) = 0; } while (--i);
- {
- CPpmd_State *s2 = Ppmd7_GetStats(p, p->MinContext);
- MASK(s->Symbol) = 0;
- do
- {
- unsigned sym0 = s2[0].Symbol;
- unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
- else
- {
- CPpmd_State *s = Ppmd7Context_OneState(p->MinContext);
- UInt16 *prob = Ppmd7_GetBinSumm(p);
- UInt32 pr = *prob;
- UInt32 size0 = (R->Range >> 14) * pr;
- pr = PPMD_UPDATE_PROB_1(pr);
-
- if (R->Code < size0)
- {
- Byte sym;
- *prob = (UInt16)(pr + (1 << PPMD_INT_BITS));
-
- // RangeDec_DecodeBit0(size0);
- R->Range = size0;
- RC_NORM_1(R)
- /* we can use single byte normalization here because of
- (min(BinSumm[][]) = 95) > (1 << (14 - 8)) */
-
- // sym = (p->FoundState = Ppmd7Context_OneState(p->MinContext))->Symbol;
- // Ppmd7_UpdateBin(p);
- {
- unsigned freq = s->Freq;
- CPpmd7_Context *c = CTX(SUCCESSOR(s));
- sym = s->Symbol;
- p->FoundState = s;
- p->PrevSuccess = 1;
- p->RunLength++;
- s->Freq = (Byte)(freq + (freq < 128));
- // NextContext(p);
- if (p->OrderFall == 0 && (const Byte *)c > p->Text)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd7_UpdateModel(p);
- }
- return sym;
- }
-
- *prob = (UInt16)pr;
- p->InitEsc = p->ExpEscape[pr >> 10];
-
- // RangeDec_DecodeBit1(size0);
-
- R->Code -= size0;
- R->Range -= size0;
- RC_NORM_LOCAL(R)
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- MASK(Ppmd7Context_OneState(p->MinContext)->Symbol) = 0;
- p->PrevSuccess = 0;
- }
-
- for (;;)
- {
- CPpmd_State *s, *s2;
- UInt32 freqSum, count, hiCnt;
-
- CPpmd_See *see;
- CPpmd7_Context *mc;
- unsigned numMasked;
- RC_NORM_REMOTE(R)
- mc = p->MinContext;
- numMasked = mc->NumStats;
-
- do
- {
- p->OrderFall++;
- if (!mc->Suffix)
- return PPMD7_SYM_END;
- mc = Ppmd7_GetContext(p, mc->Suffix);
- }
- while (mc->NumStats == numMasked);
-
- s = Ppmd7_GetStats(p, mc);
-
- {
- unsigned num = mc->NumStats;
- unsigned num2 = num / 2;
-
- num &= 1;
- hiCnt = (s->Freq & (unsigned)(MASK(s->Symbol))) & (0 - (UInt32)num);
- s += num;
- p->MinContext = mc;
-
- do
- {
- unsigned sym0 = s[0].Symbol;
- unsigned sym1 = s[1].Symbol;
- s += 2;
- hiCnt += (s[-2].Freq & (unsigned)(MASK(sym0)));
- hiCnt += (s[-1].Freq & (unsigned)(MASK(sym1)));
- }
- while (--num2);
- }
-
- see = Ppmd7_MakeEscFreq(p, numMasked, &freqSum);
- freqSum += hiCnt;
-
-
-
-
- count = RC_GetThreshold(freqSum);
-
- if (count < hiCnt)
- {
- Byte sym;
-
- s = Ppmd7_GetStats(p, p->MinContext);
- hiCnt = count;
- // count -= s->Freq & (unsigned)(MASK(s->Symbol));
- // if ((Int32)count >= 0)
- {
- for (;;)
- {
- count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
- // count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
- }
- }
- s--;
- RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
-
- // new (see->Summ) value can overflow over 16-bits in some rare cases
- Ppmd_See_UPDATE(see)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd7_Update2(p);
- return sym;
- }
-
- if (count >= freqSum)
- return PPMD7_SYM_ERROR;
-
- RC_Decode(hiCnt, freqSum - hiCnt)
-
- // We increase (see->Summ) for sum of Freqs of all non_Masked symbols.
- // new (see->Summ) value can overflow over 16-bits in some rare cases
- see->Summ = (UInt16)(see->Summ + freqSum);
-
- s = Ppmd7_GetStats(p, p->MinContext);
- s2 = s + p->MinContext->NumStats;
- do
- {
- MASK(s->Symbol) = 0;
- s++;
- }
- while (s != s2);
- }
-}
-
-/*
-Byte *Ppmd7z_DecodeSymbols(CPpmd7 *p, Byte *buf, const Byte *lim)
-{
- int sym = 0;
- if (buf != lim)
- do
- {
- sym = Ppmd7z_DecodeSymbol(p);
- if (sym < 0)
- break;
- *buf = (Byte)sym;
- }
- while (++buf < lim);
- p->LastSymbol = sym;
- return buf;
-}
-*/
-
-#undef kTopValue
-#undef READ_BYTE
-#undef RC_NORM_BASE
-#undef RC_NORM_1
-#undef RC_NORM
-#undef RC_NORM_LOCAL
-#undef RC_NORM_REMOTE
-#undef R
-#undef RC_Decode
-#undef RC_DecodeFinal
-#undef RC_GetThreshold
-#undef CTX
-#undef SUCCESSOR
-#undef MASK
diff --git a/3rdparty/7z/src/Ppmd7Enc.c b/3rdparty/7z/src/Ppmd7Enc.c
deleted file mode 100644
index 1d2d370aca..0000000000
--- a/3rdparty/7z/src/Ppmd7Enc.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/* Ppmd7Enc.c -- Ppmd7z (PPMdH with 7z Range Coder) Encoder
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.H (2001): Dmitry Shkarin : Public domain */
-
-
-#include "Precomp.h"
-
-#include "Ppmd7.h"
-
-#define kTopValue ((UInt32)1 << 24)
-
-#define R (&p->rc.enc)
-
-void Ppmd7z_Init_RangeEnc(CPpmd7 *p)
-{
- R->Low = 0;
- R->Range = 0xFFFFFFFF;
- R->Cache = 0;
- R->CacheSize = 1;
-}
-
-Z7_NO_INLINE
-static void Ppmd7z_RangeEnc_ShiftLow(CPpmd7 *p)
-{
- if ((UInt32)R->Low < (UInt32)0xFF000000 || (unsigned)(R->Low >> 32) != 0)
- {
- Byte temp = R->Cache;
- do
- {
- IByteOut_Write(R->Stream, (Byte)(temp + (Byte)(R->Low >> 32)));
- temp = 0xFF;
- }
- while (--R->CacheSize != 0);
- R->Cache = (Byte)((UInt32)R->Low >> 24);
- }
- R->CacheSize++;
- R->Low = (UInt32)((UInt32)R->Low << 8);
-}
-
-#define RC_NORM_BASE(p) if (R->Range < kTopValue) { R->Range <<= 8; Ppmd7z_RangeEnc_ShiftLow(p);
-#define RC_NORM_1(p) RC_NORM_BASE(p) }
-#define RC_NORM(p) RC_NORM_BASE(p) RC_NORM_BASE(p) }}
-
-// we must use only one type of Normalization from two: LOCAL or REMOTE
-#define RC_NORM_LOCAL(p) // RC_NORM(p)
-#define RC_NORM_REMOTE(p) RC_NORM(p)
-
-/*
-#define Ppmd7z_RangeEnc_Encode(p, start, _size_) \
- { UInt32 size = _size_; \
- R->Low += start * R->Range; \
- R->Range *= size; \
- RC_NORM_LOCAL(p); }
-*/
-
-Z7_FORCE_INLINE
-// Z7_NO_INLINE
-static void Ppmd7z_RangeEnc_Encode(CPpmd7 *p, UInt32 start, UInt32 size)
-{
- R->Low += start * R->Range;
- R->Range *= size;
- RC_NORM_LOCAL(p)
-}
-
-void Ppmd7z_Flush_RangeEnc(CPpmd7 *p)
-{
- unsigned i;
- for (i = 0; i < 5; i++)
- Ppmd7z_RangeEnc_ShiftLow(p);
-}
-
-
-
-#define RC_Encode(start, size) Ppmd7z_RangeEnc_Encode(p, start, size);
-#define RC_EncodeFinal(start, size) RC_Encode(start, size) RC_NORM_REMOTE(p)
-
-#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
-#define SUFFIX(ctx) CTX((ctx)->Suffix)
-// typedef CPpmd7_Context * CTX_PTR;
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-
-void Ppmd7_UpdateModel(CPpmd7 *p);
-
-#define MASK(sym) ((unsigned char *)charMask)[sym]
-
-Z7_FORCE_INLINE
-static
-void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
-{
- size_t charMask[256 / sizeof(size_t)];
-
- if (p->MinContext->NumStats != 1)
- {
- CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext);
- UInt32 sum;
- unsigned i;
-
-
-
-
- R->Range /= p->MinContext->Union2.SummFreq;
-
- if (s->Symbol == symbol)
- {
- // R->Range /= p->MinContext->Union2.SummFreq;
- RC_EncodeFinal(0, s->Freq)
- p->FoundState = s;
- Ppmd7_Update1_0(p);
- return;
- }
- p->PrevSuccess = 0;
- sum = s->Freq;
- i = (unsigned)p->MinContext->NumStats - 1;
- do
- {
- if ((++s)->Symbol == symbol)
- {
- // R->Range /= p->MinContext->Union2.SummFreq;
- RC_EncodeFinal(sum, s->Freq)
- p->FoundState = s;
- Ppmd7_Update1(p);
- return;
- }
- sum += s->Freq;
- }
- while (--i);
-
- // R->Range /= p->MinContext->Union2.SummFreq;
- RC_Encode(sum, p->MinContext->Union2.SummFreq - sum)
-
- p->HiBitsFlag = PPMD7_HiBitsFlag_3(p->FoundState->Symbol);
- PPMD_SetAllBitsIn256Bytes(charMask)
- // MASK(s->Symbol) = 0;
- // i = p->MinContext->NumStats - 1;
- // do { MASK((--s)->Symbol) = 0; } while (--i);
- {
- CPpmd_State *s2 = Ppmd7_GetStats(p, p->MinContext);
- MASK(s->Symbol) = 0;
- do
- {
- unsigned sym0 = s2[0].Symbol;
- unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
- else
- {
- UInt16 *prob = Ppmd7_GetBinSumm(p);
- CPpmd_State *s = Ppmd7Context_OneState(p->MinContext);
- UInt32 pr = *prob;
- const UInt32 bound = (R->Range >> 14) * pr;
- pr = PPMD_UPDATE_PROB_1(pr);
- if (s->Symbol == symbol)
- {
- *prob = (UInt16)(pr + (1 << PPMD_INT_BITS));
- // RangeEnc_EncodeBit_0(p, bound);
- R->Range = bound;
- RC_NORM_1(p)
-
- // p->FoundState = s;
- // Ppmd7_UpdateBin(p);
- {
- const unsigned freq = s->Freq;
- CPpmd7_Context *c = CTX(SUCCESSOR(s));
- p->FoundState = s;
- p->PrevSuccess = 1;
- p->RunLength++;
- s->Freq = (Byte)(freq + (freq < 128));
- // NextContext(p);
- if (p->OrderFall == 0 && (const Byte *)c > p->Text)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd7_UpdateModel(p);
- }
- return;
- }
-
- *prob = (UInt16)pr;
- p->InitEsc = p->ExpEscape[pr >> 10];
- // RangeEnc_EncodeBit_1(p, bound);
- R->Low += bound;
- R->Range -= bound;
- RC_NORM_LOCAL(p)
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- MASK(s->Symbol) = 0;
- p->PrevSuccess = 0;
- }
-
- for (;;)
- {
- CPpmd_See *see;
- CPpmd_State *s;
- UInt32 sum, escFreq;
- CPpmd7_Context *mc;
- unsigned i, numMasked;
-
- RC_NORM_REMOTE(p)
-
- mc = p->MinContext;
- numMasked = mc->NumStats;
-
- do
- {
- p->OrderFall++;
- if (!mc->Suffix)
- return; /* EndMarker (symbol = -1) */
- mc = Ppmd7_GetContext(p, mc->Suffix);
- i = mc->NumStats;
- }
- while (i == numMasked);
-
- p->MinContext = mc;
-
- // see = Ppmd7_MakeEscFreq(p, numMasked, &escFreq);
- {
- if (i != 256)
- {
- unsigned nonMasked = i - numMasked;
- see = p->See[(unsigned)p->NS2Indx[(size_t)nonMasked - 1]]
- + p->HiBitsFlag
- + (nonMasked < (unsigned)SUFFIX(mc)->NumStats - i)
- + 2 * (unsigned)(mc->Union2.SummFreq < 11 * i)
- + 4 * (unsigned)(numMasked > nonMasked);
- {
- // if (see->Summ) field is larger than 16-bit, we need only low 16 bits of Summ
- unsigned summ = (UInt16)see->Summ; // & 0xFFFF
- unsigned r = (summ >> see->Shift);
- see->Summ = (UInt16)(summ - r);
- escFreq = r + (r == 0);
- }
- }
- else
- {
- see = &p->DummySee;
- escFreq = 1;
- }
- }
-
- s = Ppmd7_GetStats(p, mc);
- sum = 0;
- // i = mc->NumStats;
-
- do
- {
- const unsigned cur = s->Symbol;
- if ((int)cur == symbol)
- {
- const UInt32 low = sum;
- const UInt32 freq = s->Freq;
- unsigned num2;
-
- Ppmd_See_UPDATE(see)
- p->FoundState = s;
- sum += escFreq;
-
- num2 = i / 2;
- i &= 1;
- sum += freq & (0 - (UInt32)i);
- if (num2 != 0)
- {
- s += i;
- for (;;)
- {
- unsigned sym0 = s[0].Symbol;
- unsigned sym1 = s[1].Symbol;
- s += 2;
- sum += (s[-2].Freq & (unsigned)(MASK(sym0)));
- sum += (s[-1].Freq & (unsigned)(MASK(sym1)));
- if (--num2 == 0)
- break;
- }
- }
-
-
- R->Range /= sum;
- RC_EncodeFinal(low, freq)
- Ppmd7_Update2(p);
- return;
- }
- sum += (s->Freq & (unsigned)(MASK(cur)));
- s++;
- }
- while (--i);
-
- {
- const UInt32 total = sum + escFreq;
- see->Summ = (UInt16)(see->Summ + total);
-
- R->Range /= total;
- RC_Encode(sum, escFreq)
- }
-
- {
- const CPpmd_State *s2 = Ppmd7_GetStats(p, p->MinContext);
- s--;
- MASK(s->Symbol) = 0;
- do
- {
- const unsigned sym0 = s2[0].Symbol;
- const unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
-}
-
-
-void Ppmd7z_EncodeSymbols(CPpmd7 *p, const Byte *buf, const Byte *lim)
-{
- for (; buf < lim; buf++)
- {
- Ppmd7z_EncodeSymbol(p, *buf);
- }
-}
-
-#undef kTopValue
-#undef WRITE_BYTE
-#undef RC_NORM_BASE
-#undef RC_NORM_1
-#undef RC_NORM
-#undef RC_NORM_LOCAL
-#undef RC_NORM_REMOTE
-#undef R
-#undef RC_Encode
-#undef RC_EncodeFinal
-#undef SUFFIX
-#undef CTX
-#undef SUCCESSOR
-#undef MASK
diff --git a/3rdparty/7z/src/Ppmd7aDec.c b/3rdparty/7z/src/Ppmd7aDec.c
deleted file mode 100644
index 55e164e19f..0000000000
--- a/3rdparty/7z/src/Ppmd7aDec.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/* Ppmd7aDec.c -- PPMd7a (PPMdH) Decoder
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.H (2001): Dmitry Shkarin : Public domain
- Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
-
-#include "Precomp.h"
-
-#include "Ppmd7.h"
-
-#define kTop ((UInt32)1 << 24)
-#define kBot ((UInt32)1 << 15)
-
-#define READ_BYTE(p) IByteIn_Read((p)->Stream)
-
-BoolInt Ppmd7a_RangeDec_Init(CPpmd7_RangeDec *p)
-{
- unsigned i;
- p->Code = 0;
- p->Range = 0xFFFFFFFF;
- p->Low = 0;
-
- for (i = 0; i < 4; i++)
- p->Code = (p->Code << 8) | READ_BYTE(p);
- return (p->Code < 0xFFFFFFFF);
-}
-
-#define RC_NORM(p) \
- while ((p->Low ^ (p->Low + p->Range)) < kTop \
- || (p->Range < kBot && ((p->Range = (0 - p->Low) & (kBot - 1)), 1))) { \
- p->Code = (p->Code << 8) | READ_BYTE(p); \
- p->Range <<= 8; p->Low <<= 8; }
-
-// we must use only one type of Normalization from two: LOCAL or REMOTE
-#define RC_NORM_LOCAL(p) // RC_NORM(p)
-#define RC_NORM_REMOTE(p) RC_NORM(p)
-
-#define R (&p->rc.dec)
-
-Z7_FORCE_INLINE
-// Z7_NO_INLINE
-static void Ppmd7a_RD_Decode(CPpmd7 *p, UInt32 start, UInt32 size)
-{
- start *= R->Range;
- R->Low += start;
- R->Code -= start;
- R->Range *= size;
- RC_NORM_LOCAL(R)
-}
-
-#define RC_Decode(start, size) Ppmd7a_RD_Decode(p, start, size);
-#define RC_DecodeFinal(start, size) RC_Decode(start, size) RC_NORM_REMOTE(R)
-#define RC_GetThreshold(total) (R->Code / (R->Range /= (total)))
-
-
-#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
-typedef CPpmd7_Context * CTX_PTR;
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-void Ppmd7_UpdateModel(CPpmd7 *p);
-
-#define MASK(sym) ((unsigned char *)charMask)[sym]
-
-
-int Ppmd7a_DecodeSymbol(CPpmd7 *p)
-{
- size_t charMask[256 / sizeof(size_t)];
-
- if (p->MinContext->NumStats != 1)
- {
- CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext);
- unsigned i;
- UInt32 count, hiCnt;
- const UInt32 summFreq = p->MinContext->Union2.SummFreq;
-
- if (summFreq > R->Range)
- return PPMD7_SYM_ERROR;
-
- count = RC_GetThreshold(summFreq);
- hiCnt = count;
-
- if ((Int32)(count -= s->Freq) < 0)
- {
- Byte sym;
- RC_DecodeFinal(0, s->Freq)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd7_Update1_0(p);
- return sym;
- }
-
- p->PrevSuccess = 0;
- i = (unsigned)p->MinContext->NumStats - 1;
-
- do
- {
- if ((Int32)(count -= (++s)->Freq) < 0)
- {
- Byte sym;
- RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd7_Update1(p);
- return sym;
- }
- }
- while (--i);
-
- if (hiCnt >= summFreq)
- return PPMD7_SYM_ERROR;
-
- hiCnt -= count;
- RC_Decode(hiCnt, summFreq - hiCnt)
-
- p->HiBitsFlag = PPMD7_HiBitsFlag_3(p->FoundState->Symbol);
- PPMD_SetAllBitsIn256Bytes(charMask)
- // i = p->MinContext->NumStats - 1;
- // do { MASK((--s)->Symbol) = 0; } while (--i);
- {
- CPpmd_State *s2 = Ppmd7_GetStats(p, p->MinContext);
- MASK(s->Symbol) = 0;
- do
- {
- unsigned sym0 = s2[0].Symbol;
- unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
- else
- {
- CPpmd_State *s = Ppmd7Context_OneState(p->MinContext);
- UInt16 *prob = Ppmd7_GetBinSumm(p);
- UInt32 pr = *prob;
- UInt32 size0 = (R->Range >> 14) * pr;
- pr = PPMD_UPDATE_PROB_1(pr);
-
- if (R->Code < size0)
- {
- Byte sym;
- *prob = (UInt16)(pr + (1 << PPMD_INT_BITS));
-
- // RangeDec_DecodeBit0(size0);
- R->Range = size0;
- RC_NORM(R)
-
-
-
- // sym = (p->FoundState = Ppmd7Context_OneState(p->MinContext))->Symbol;
- // Ppmd7_UpdateBin(p);
- {
- unsigned freq = s->Freq;
- CTX_PTR c = CTX(SUCCESSOR(s));
- sym = s->Symbol;
- p->FoundState = s;
- p->PrevSuccess = 1;
- p->RunLength++;
- s->Freq = (Byte)(freq + (freq < 128));
- // NextContext(p);
- if (p->OrderFall == 0 && (const Byte *)c > p->Text)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd7_UpdateModel(p);
- }
- return sym;
- }
-
- *prob = (UInt16)pr;
- p->InitEsc = p->ExpEscape[pr >> 10];
-
- // RangeDec_DecodeBit1(size0);
- R->Low += size0;
- R->Code -= size0;
- R->Range = (R->Range & ~((UInt32)PPMD_BIN_SCALE - 1)) - size0;
- RC_NORM_LOCAL(R)
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- MASK(Ppmd7Context_OneState(p->MinContext)->Symbol) = 0;
- p->PrevSuccess = 0;
- }
-
- for (;;)
- {
- CPpmd_State *s, *s2;
- UInt32 freqSum, count, hiCnt;
-
- CPpmd_See *see;
- CPpmd7_Context *mc;
- unsigned numMasked;
- RC_NORM_REMOTE(R)
- mc = p->MinContext;
- numMasked = mc->NumStats;
-
- do
- {
- p->OrderFall++;
- if (!mc->Suffix)
- return PPMD7_SYM_END;
- mc = Ppmd7_GetContext(p, mc->Suffix);
- }
- while (mc->NumStats == numMasked);
-
- s = Ppmd7_GetStats(p, mc);
-
- {
- unsigned num = mc->NumStats;
- unsigned num2 = num / 2;
-
- num &= 1;
- hiCnt = (s->Freq & (unsigned)(MASK(s->Symbol))) & (0 - (UInt32)num);
- s += num;
- p->MinContext = mc;
-
- do
- {
- unsigned sym0 = s[0].Symbol;
- unsigned sym1 = s[1].Symbol;
- s += 2;
- hiCnt += (s[-2].Freq & (unsigned)(MASK(sym0)));
- hiCnt += (s[-1].Freq & (unsigned)(MASK(sym1)));
- }
- while (--num2);
- }
-
- see = Ppmd7_MakeEscFreq(p, numMasked, &freqSum);
- freqSum += hiCnt;
-
- if (freqSum > R->Range)
- return PPMD7_SYM_ERROR;
-
- count = RC_GetThreshold(freqSum);
-
- if (count < hiCnt)
- {
- Byte sym;
-
- s = Ppmd7_GetStats(p, p->MinContext);
- hiCnt = count;
- // count -= s->Freq & (unsigned)(MASK(s->Symbol));
- // if ((Int32)count >= 0)
- {
- for (;;)
- {
- count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
- // count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
- }
- }
- s--;
- RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
-
- // new (see->Summ) value can overflow over 16-bits in some rare cases
- Ppmd_See_UPDATE(see)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd7_Update2(p);
- return sym;
- }
-
- if (count >= freqSum)
- return PPMD7_SYM_ERROR;
-
- RC_Decode(hiCnt, freqSum - hiCnt)
-
- // We increase (see->Summ) for sum of Freqs of all non_Masked symbols.
- // new (see->Summ) value can overflow over 16-bits in some rare cases
- see->Summ = (UInt16)(see->Summ + freqSum);
-
- s = Ppmd7_GetStats(p, p->MinContext);
- s2 = s + p->MinContext->NumStats;
- do
- {
- MASK(s->Symbol) = 0;
- s++;
- }
- while (s != s2);
- }
-}
-
-#undef kTop
-#undef kBot
-#undef READ_BYTE
-#undef RC_NORM_BASE
-#undef RC_NORM_1
-#undef RC_NORM
-#undef RC_NORM_LOCAL
-#undef RC_NORM_REMOTE
-#undef R
-#undef RC_Decode
-#undef RC_DecodeFinal
-#undef RC_GetThreshold
-#undef CTX
-#undef SUCCESSOR
-#undef MASK
diff --git a/3rdparty/7z/src/Ppmd8.c b/3rdparty/7z/src/Ppmd8.c
deleted file mode 100644
index 28abf27973..0000000000
--- a/3rdparty/7z/src/Ppmd8.c
+++ /dev/null
@@ -1,1565 +0,0 @@
-/* Ppmd8.c -- PPMdI codec
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on PPMd var.I (2002): Dmitry Shkarin : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "Ppmd8.h"
-
-
-
-
-MY_ALIGN(16)
-static const Byte PPMD8_kExpEscape[16] = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
-MY_ALIGN(16)
-static const UInt16 PPMD8_kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051};
-
-#define MAX_FREQ 124
-#define UNIT_SIZE 12
-
-#define U2B(nu) ((UInt32)(nu) * UNIT_SIZE)
-#define U2I(nu) (p->Units2Indx[(size_t)(nu) - 1])
-#define I2U(indx) ((unsigned)p->Indx2Units[indx])
-
-
-#define REF(ptr) Ppmd_GetRef(p, ptr)
-
-#define STATS_REF(ptr) ((CPpmd_State_Ref)REF(ptr))
-
-#define CTX(ref) ((CPpmd8_Context *)Ppmd8_GetContext(p, ref))
-#define STATS(ctx) Ppmd8_GetStats(p, ctx)
-#define ONE_STATE(ctx) Ppmd8Context_OneState(ctx)
-#define SUFFIX(ctx) CTX((ctx)->Suffix)
-
-typedef CPpmd8_Context * PPMD8_CTX_PTR;
-
-struct CPpmd8_Node_;
-
-typedef Ppmd_Ref_Type(struct CPpmd8_Node_) CPpmd8_Node_Ref;
-
-typedef struct CPpmd8_Node_
-{
- UInt32 Stamp;
-
- CPpmd8_Node_Ref Next;
- UInt32 NU;
-} CPpmd8_Node;
-
-#define NODE(r) Ppmd_GetPtr_Type(p, r, CPpmd8_Node)
-
-void Ppmd8_Construct(CPpmd8 *p)
-{
- unsigned i, k, m;
-
- p->Base = NULL;
-
- for (i = 0, k = 0; i < PPMD_NUM_INDEXES; i++)
- {
- unsigned step = (i >= 12 ? 4 : (i >> 2) + 1);
- do { p->Units2Indx[k++] = (Byte)i; } while (--step);
- p->Indx2Units[i] = (Byte)k;
- }
-
- p->NS2BSIndx[0] = (0 << 1);
- p->NS2BSIndx[1] = (1 << 1);
- memset(p->NS2BSIndx + 2, (2 << 1), 9);
- memset(p->NS2BSIndx + 11, (3 << 1), 256 - 11);
-
- for (i = 0; i < 5; i++)
- p->NS2Indx[i] = (Byte)i;
-
- for (m = i, k = 1; i < 260; i++)
- {
- p->NS2Indx[i] = (Byte)m;
- if (--k == 0)
- k = (++m) - 4;
- }
-
- memcpy(p->ExpEscape, PPMD8_kExpEscape, 16);
-}
-
-
-void Ppmd8_Free(CPpmd8 *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->Base);
- p->Size = 0;
- p->Base = NULL;
-}
-
-
-BoolInt Ppmd8_Alloc(CPpmd8 *p, UInt32 size, ISzAllocPtr alloc)
-{
- if (!p->Base || p->Size != size)
- {
- Ppmd8_Free(p, alloc);
- p->AlignOffset = (4 - size) & 3;
- if ((p->Base = (Byte *)ISzAlloc_Alloc(alloc, p->AlignOffset + size)) == NULL)
- return False;
- p->Size = size;
- }
- return True;
-}
-
-
-
-// ---------- Internal Memory Allocator ----------
-
-
-
-
-
-
-#define EMPTY_NODE 0xFFFFFFFF
-
-
-static void Ppmd8_InsertNode(CPpmd8 *p, void *node, unsigned indx)
-{
- ((CPpmd8_Node *)node)->Stamp = EMPTY_NODE;
- ((CPpmd8_Node *)node)->Next = (CPpmd8_Node_Ref)p->FreeList[indx];
- ((CPpmd8_Node *)node)->NU = I2U(indx);
- p->FreeList[indx] = REF(node);
- p->Stamps[indx]++;
-}
-
-
-static void *Ppmd8_RemoveNode(CPpmd8 *p, unsigned indx)
-{
- CPpmd8_Node *node = NODE((CPpmd8_Node_Ref)p->FreeList[indx]);
- p->FreeList[indx] = node->Next;
- p->Stamps[indx]--;
-
- return node;
-}
-
-
-static void Ppmd8_SplitBlock(CPpmd8 *p, void *ptr, unsigned oldIndx, unsigned newIndx)
-{
- unsigned i, nu = I2U(oldIndx) - I2U(newIndx);
- ptr = (Byte *)ptr + U2B(I2U(newIndx));
- if (I2U(i = U2I(nu)) != nu)
- {
- unsigned k = I2U(--i);
- Ppmd8_InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1);
- }
- Ppmd8_InsertNode(p, ptr, i);
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-static void Ppmd8_GlueFreeBlocks(CPpmd8 *p)
-{
- /*
- we use first UInt32 field of 12-bytes UNITs as record type stamp
- CPpmd_State { Byte Symbol; Byte Freq; : Freq != 0xFF
- CPpmd8_Context { Byte NumStats; Byte Flags; UInt16 SummFreq; : Flags != 0xFF ???
- CPpmd8_Node { UInt32 Stamp : Stamp == 0xFFFFFFFF for free record
- : Stamp == 0 for guard
- Last 12-bytes UNIT in array is always contains 12-bytes order-0 CPpmd8_Context record
- */
- CPpmd8_Node_Ref n;
-
- p->GlueCount = 1 << 13;
- memset(p->Stamps, 0, sizeof(p->Stamps));
-
- /* we set guard NODE at LoUnit */
- if (p->LoUnit != p->HiUnit)
- ((CPpmd8_Node *)(void *)p->LoUnit)->Stamp = 0;
-
- {
- /* Glue free blocks */
- CPpmd8_Node_Ref *prev = &n;
- unsigned i;
- for (i = 0; i < PPMD_NUM_INDEXES; i++)
- {
-
- CPpmd8_Node_Ref next = (CPpmd8_Node_Ref)p->FreeList[i];
- p->FreeList[i] = 0;
- while (next != 0)
- {
- CPpmd8_Node *node = NODE(next);
- UInt32 nu = node->NU;
- *prev = next;
- next = node->Next;
- if (nu != 0)
- {
- CPpmd8_Node *node2;
- prev = &(node->Next);
- while ((node2 = node + nu)->Stamp == EMPTY_NODE)
- {
- nu += node2->NU;
- node2->NU = 0;
- node->NU = nu;
- }
- }
- }
- }
-
- *prev = 0;
- }
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /* Fill lists of free blocks */
- while (n != 0)
- {
- CPpmd8_Node *node = NODE(n);
- UInt32 nu = node->NU;
- unsigned i;
- n = node->Next;
- if (nu == 0)
- continue;
- for (; nu > 128; nu -= 128, node += 128)
- Ppmd8_InsertNode(p, node, PPMD_NUM_INDEXES - 1);
- if (I2U(i = U2I(nu)) != nu)
- {
- unsigned k = I2U(--i);
- Ppmd8_InsertNode(p, node + k, (unsigned)nu - k - 1);
- }
- Ppmd8_InsertNode(p, node, i);
- }
-}
-
-
-Z7_NO_INLINE
-static void *Ppmd8_AllocUnitsRare(CPpmd8 *p, unsigned indx)
-{
- unsigned i;
-
- if (p->GlueCount == 0)
- {
- Ppmd8_GlueFreeBlocks(p);
- if (p->FreeList[indx] != 0)
- return Ppmd8_RemoveNode(p, indx);
- }
-
- i = indx;
-
- do
- {
- if (++i == PPMD_NUM_INDEXES)
- {
- UInt32 numBytes = U2B(I2U(indx));
- Byte *us = p->UnitsStart;
- p->GlueCount--;
- return ((UInt32)(us - p->Text) > numBytes) ? (p->UnitsStart = us - numBytes) : (NULL);
- }
- }
- while (p->FreeList[i] == 0);
-
- {
- void *block = Ppmd8_RemoveNode(p, i);
- Ppmd8_SplitBlock(p, block, i, indx);
- return block;
- }
-}
-
-
-static void *Ppmd8_AllocUnits(CPpmd8 *p, unsigned indx)
-{
- if (p->FreeList[indx] != 0)
- return Ppmd8_RemoveNode(p, indx);
- {
- UInt32 numBytes = U2B(I2U(indx));
- Byte *lo = p->LoUnit;
- if ((UInt32)(p->HiUnit - lo) >= numBytes)
- {
- p->LoUnit = lo + numBytes;
- return lo;
- }
- }
- return Ppmd8_AllocUnitsRare(p, indx);
-}
-
-
-#define MEM_12_CPY(dest, src, num) \
- { UInt32 *d = (UInt32 *)dest; const UInt32 *z = (const UInt32 *)src; UInt32 n = num; \
- do { d[0] = z[0]; d[1] = z[1]; d[2] = z[2]; z += 3; d += 3; } while (--n); }
-
-
-
-static void *ShrinkUnits(CPpmd8 *p, void *oldPtr, unsigned oldNU, unsigned newNU)
-{
- unsigned i0 = U2I(oldNU);
- unsigned i1 = U2I(newNU);
- if (i0 == i1)
- return oldPtr;
- if (p->FreeList[i1] != 0)
- {
- void *ptr = Ppmd8_RemoveNode(p, i1);
- MEM_12_CPY(ptr, oldPtr, newNU)
- Ppmd8_InsertNode(p, oldPtr, i0);
- return ptr;
- }
- Ppmd8_SplitBlock(p, oldPtr, i0, i1);
- return oldPtr;
-}
-
-
-static void FreeUnits(CPpmd8 *p, void *ptr, unsigned nu)
-{
- Ppmd8_InsertNode(p, ptr, U2I(nu));
-}
-
-
-static void SpecialFreeUnit(CPpmd8 *p, void *ptr)
-{
- if ((Byte *)ptr != p->UnitsStart)
- Ppmd8_InsertNode(p, ptr, 0);
- else
- {
- #ifdef PPMD8_FREEZE_SUPPORT
- *(UInt32 *)ptr = EMPTY_NODE; /* it's used for (Flags == 0xFF) check in RemoveBinContexts() */
- #endif
- p->UnitsStart += UNIT_SIZE;
- }
-}
-
-
-/*
-static void *MoveUnitsUp(CPpmd8 *p, void *oldPtr, unsigned nu)
-{
- unsigned indx = U2I(nu);
- void *ptr;
- if ((Byte *)oldPtr > p->UnitsStart + (1 << 14) || REF(oldPtr) > p->FreeList[indx])
- return oldPtr;
- ptr = Ppmd8_RemoveNode(p, indx);
- MEM_12_CPY(ptr, oldPtr, nu)
- if ((Byte *)oldPtr != p->UnitsStart)
- Ppmd8_InsertNode(p, oldPtr, indx);
- else
- p->UnitsStart += U2B(I2U(indx));
- return ptr;
-}
-*/
-
-static void ExpandTextArea(CPpmd8 *p)
-{
- UInt32 count[PPMD_NUM_INDEXES];
- unsigned i;
-
- memset(count, 0, sizeof(count));
- if (p->LoUnit != p->HiUnit)
- ((CPpmd8_Node *)(void *)p->LoUnit)->Stamp = 0;
-
- {
- CPpmd8_Node *node = (CPpmd8_Node *)(void *)p->UnitsStart;
- while (node->Stamp == EMPTY_NODE)
- {
- UInt32 nu = node->NU;
- node->Stamp = 0;
- count[U2I(nu)]++;
- node += nu;
- }
- p->UnitsStart = (Byte *)node;
- }
-
- for (i = 0; i < PPMD_NUM_INDEXES; i++)
- {
- UInt32 cnt = count[i];
- if (cnt == 0)
- continue;
- {
- CPpmd8_Node_Ref *prev = (CPpmd8_Node_Ref *)&p->FreeList[i];
- CPpmd8_Node_Ref n = *prev;
- p->Stamps[i] -= cnt;
- for (;;)
- {
- CPpmd8_Node *node = NODE(n);
- n = node->Next;
- if (node->Stamp != 0)
- {
- prev = &node->Next;
- continue;
- }
- *prev = n;
- if (--cnt == 0)
- break;
- }
- }
- }
-}
-
-
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-static void Ppmd8State_SetSuccessor(CPpmd_State *p, CPpmd_Void_Ref v)
-{
- Ppmd_SET_SUCCESSOR(p, v)
-}
-
-#define RESET_TEXT(offs) { p->Text = p->Base + p->AlignOffset + (offs); }
-
-Z7_NO_INLINE
-static
-void Ppmd8_RestartModel(CPpmd8 *p)
-{
- unsigned i, k, m;
-
- memset(p->FreeList, 0, sizeof(p->FreeList));
- memset(p->Stamps, 0, sizeof(p->Stamps));
- RESET_TEXT(0)
- p->HiUnit = p->Text + p->Size;
- p->LoUnit = p->UnitsStart = p->HiUnit - p->Size / 8 / UNIT_SIZE * 7 * UNIT_SIZE;
- p->GlueCount = 0;
-
- p->OrderFall = p->MaxOrder;
- p->RunLength = p->InitRL = -(Int32)((p->MaxOrder < 12) ? p->MaxOrder : 12) - 1;
- p->PrevSuccess = 0;
-
- {
- CPpmd8_Context *mc = (PPMD8_CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */
- CPpmd_State *s = (CPpmd_State *)p->LoUnit; /* Ppmd8_AllocUnits(p, PPMD_NUM_INDEXES - 1); */
-
- p->LoUnit += U2B(256 / 2);
- p->MaxContext = p->MinContext = mc;
- p->FoundState = s;
- mc->Flags = 0;
- mc->NumStats = 256 - 1;
- mc->Union2.SummFreq = 256 + 1;
- mc->Union4.Stats = REF(s);
- mc->Suffix = 0;
-
- for (i = 0; i < 256; i++, s++)
- {
- s->Symbol = (Byte)i;
- s->Freq = 1;
- Ppmd8State_SetSuccessor(s, 0);
- }
- }
-
-
-
-
-
-
-
-
-
-
-
-
- for (i = m = 0; m < 25; m++)
- {
- while (p->NS2Indx[i] == m)
- i++;
- for (k = 0; k < 8; k++)
- {
- unsigned r;
- UInt16 *dest = p->BinSumm[m] + k;
- const UInt16 val = (UInt16)(PPMD_BIN_SCALE - PPMD8_kInitBinEsc[k] / (i + 1));
- for (r = 0; r < 64; r += 8)
- dest[r] = val;
- }
- }
-
- for (i = m = 0; m < 24; m++)
- {
- unsigned summ;
- CPpmd_See *s;
- while (p->NS2Indx[(size_t)i + 3] == m + 3)
- i++;
- s = p->See[m];
- summ = ((2 * i + 5) << (PPMD_PERIOD_BITS - 4));
- for (k = 0; k < 32; k++, s++)
- {
- s->Summ = (UInt16)summ;
- s->Shift = (PPMD_PERIOD_BITS - 4);
- s->Count = 7;
- }
- }
-
- p->DummySee.Summ = 0; /* unused */
- p->DummySee.Shift = PPMD_PERIOD_BITS;
- p->DummySee.Count = 64; /* unused */
-}
-
-
-void Ppmd8_Init(CPpmd8 *p, unsigned maxOrder, unsigned restoreMethod)
-{
- p->MaxOrder = maxOrder;
- p->RestoreMethod = restoreMethod;
- Ppmd8_RestartModel(p);
-}
-
-
-#define FLAG_RESCALED (1 << 2)
-// #define FLAG_SYM_HIGH (1 << 3)
-#define FLAG_PREV_HIGH (1 << 4)
-
-#define HiBits_Prepare(sym) ((unsigned)(sym) + 0xC0)
-
-#define HiBits_Convert_3(flags) (((flags) >> (8 - 3)) & (1 << 3))
-#define HiBits_Convert_4(flags) (((flags) >> (8 - 4)) & (1 << 4))
-
-#define PPMD8_HiBitsFlag_3(sym) HiBits_Convert_3(HiBits_Prepare(sym))
-#define PPMD8_HiBitsFlag_4(sym) HiBits_Convert_4(HiBits_Prepare(sym))
-
-// #define PPMD8_HiBitsFlag_3(sym) (0x08 * ((sym) >= 0x40))
-// #define PPMD8_HiBitsFlag_4(sym) (0x10 * ((sym) >= 0x40))
-
-/*
-Refresh() is called when we remove some symbols (successors) in context.
-It increases Escape_Freq for sum of all removed symbols.
-*/
-
-static void Refresh(CPpmd8 *p, PPMD8_CTX_PTR ctx, unsigned oldNU, unsigned scale)
-{
- unsigned i = ctx->NumStats, escFreq, sumFreq, flags;
- CPpmd_State *s = (CPpmd_State *)ShrinkUnits(p, STATS(ctx), oldNU, (i + 2) >> 1);
- ctx->Union4.Stats = REF(s);
-
- // #ifdef PPMD8_FREEZE_SUPPORT
- /*
- (ctx->Union2.SummFreq >= ((UInt32)1 << 15)) can be in FREEZE mode for some files.
- It's not good for range coder. So new versions of support fix:
- - original PPMdI code rev.1
- + original PPMdI code rev.2
- - 7-Zip default ((PPMD8_FREEZE_SUPPORT is not defined)
- + 7-Zip (p->RestoreMethod >= PPMD8_RESTORE_METHOD_FREEZE)
- if we use that fixed line, we can lose compatibility with some files created before fix
- if we don't use that fixed line, the program can work incorrectly in FREEZE mode in rare case.
- */
- // if (p->RestoreMethod >= PPMD8_RESTORE_METHOD_FREEZE)
- {
- scale |= (ctx->Union2.SummFreq >= ((UInt32)1 << 15));
- }
- // #endif
-
-
-
- flags = HiBits_Prepare(s->Symbol);
- {
- unsigned freq = s->Freq;
- escFreq = ctx->Union2.SummFreq - freq;
- freq = (freq + scale) >> scale;
- sumFreq = freq;
- s->Freq = (Byte)freq;
- }
-
- do
- {
- unsigned freq = (++s)->Freq;
- escFreq -= freq;
- freq = (freq + scale) >> scale;
- sumFreq += freq;
- s->Freq = (Byte)freq;
- flags |= HiBits_Prepare(s->Symbol);
- }
- while (--i);
-
- ctx->Union2.SummFreq = (UInt16)(sumFreq + ((escFreq + scale) >> scale));
- ctx->Flags = (Byte)((ctx->Flags & (FLAG_PREV_HIGH + FLAG_RESCALED * scale)) + HiBits_Convert_3(flags));
-}
-
-
-static void SWAP_STATES(CPpmd_State *t1, CPpmd_State *t2)
-{
- CPpmd_State tmp = *t1;
- *t1 = *t2;
- *t2 = tmp;
-}
-
-
-/*
-CutOff() reduces contexts:
- It conversts Successors at MaxOrder to another Contexts to NULL-Successors
- It removes RAW-Successors and NULL-Successors that are not Order-0
- and it removes contexts when it has no Successors.
- if the (Union4.Stats) is close to (UnitsStart), it moves it up.
-*/
-
-static CPpmd_Void_Ref CutOff(CPpmd8 *p, PPMD8_CTX_PTR ctx, unsigned order)
-{
- int ns = ctx->NumStats;
- unsigned nu;
- CPpmd_State *stats;
-
- if (ns == 0)
- {
- CPpmd_State *s = ONE_STATE(ctx);
- CPpmd_Void_Ref successor = SUCCESSOR(s);
- if ((Byte *)Ppmd8_GetPtr(p, successor) >= p->UnitsStart)
- {
- if (order < p->MaxOrder)
- successor = CutOff(p, CTX(successor), order + 1);
- else
- successor = 0;
- Ppmd8State_SetSuccessor(s, successor);
- if (successor || order <= 9) /* O_BOUND */
- return REF(ctx);
- }
- SpecialFreeUnit(p, ctx);
- return 0;
- }
-
- nu = ((unsigned)ns + 2) >> 1;
- // ctx->Union4.Stats = STATS_REF(MoveUnitsUp(p, STATS(ctx), nu));
- {
- unsigned indx = U2I(nu);
- stats = STATS(ctx);
-
- if ((UInt32)((Byte *)stats - p->UnitsStart) <= (1 << 14)
- && (CPpmd_Void_Ref)ctx->Union4.Stats <= p->FreeList[indx])
- {
- void *ptr = Ppmd8_RemoveNode(p, indx);
- ctx->Union4.Stats = STATS_REF(ptr);
- MEM_12_CPY(ptr, (const void *)stats, nu)
- if ((Byte *)stats != p->UnitsStart)
- Ppmd8_InsertNode(p, stats, indx);
- else
- p->UnitsStart += U2B(I2U(indx));
- stats = ptr;
- }
- }
-
- {
- CPpmd_State *s = stats + (unsigned)ns;
- do
- {
- CPpmd_Void_Ref successor = SUCCESSOR(s);
- if ((Byte *)Ppmd8_GetPtr(p, successor) < p->UnitsStart)
- {
- CPpmd_State *s2 = stats + (unsigned)(ns--);
- if (order)
- {
- if (s != s2)
- *s = *s2;
- }
- else
- {
- SWAP_STATES(s, s2);
- Ppmd8State_SetSuccessor(s2, 0);
- }
- }
- else
- {
- if (order < p->MaxOrder)
- Ppmd8State_SetSuccessor(s, CutOff(p, CTX(successor), order + 1));
- else
- Ppmd8State_SetSuccessor(s, 0);
- }
- }
- while (--s >= stats);
- }
-
- if (ns != ctx->NumStats && order)
- {
- if (ns < 0)
- {
- FreeUnits(p, stats, nu);
- SpecialFreeUnit(p, ctx);
- return 0;
- }
- ctx->NumStats = (Byte)ns;
- if (ns == 0)
- {
- const Byte sym = stats->Symbol;
- ctx->Flags = (Byte)((ctx->Flags & FLAG_PREV_HIGH) + PPMD8_HiBitsFlag_3(sym));
- // *ONE_STATE(ctx) = *stats;
- ctx->Union2.State2.Symbol = sym;
- ctx->Union2.State2.Freq = (Byte)(((unsigned)stats->Freq + 11) >> 3);
- ctx->Union4.State4.Successor_0 = stats->Successor_0;
- ctx->Union4.State4.Successor_1 = stats->Successor_1;
- FreeUnits(p, stats, nu);
- }
- else
- {
- Refresh(p, ctx, nu, ctx->Union2.SummFreq > 16 * (unsigned)ns);
- }
- }
-
- return REF(ctx);
-}
-
-
-
-#ifdef PPMD8_FREEZE_SUPPORT
-
-/*
-RemoveBinContexts()
- It conversts Successors at MaxOrder to another Contexts to NULL-Successors
- It changes RAW-Successors to NULL-Successors
- removes Bin Context without Successor, if suffix of that context is also binary.
-*/
-
-static CPpmd_Void_Ref RemoveBinContexts(CPpmd8 *p, PPMD8_CTX_PTR ctx, unsigned order)
-{
- if (!ctx->NumStats)
- {
- CPpmd_State *s = ONE_STATE(ctx);
- CPpmd_Void_Ref successor = SUCCESSOR(s);
- if ((Byte *)Ppmd8_GetPtr(p, successor) >= p->UnitsStart && order < p->MaxOrder)
- successor = RemoveBinContexts(p, CTX(successor), order + 1);
- else
- successor = 0;
- Ppmd8State_SetSuccessor(s, successor);
- /* Suffix context can be removed already, since different (high-order)
- Successors may refer to same context. So we check Flags == 0xFF (Stamp == EMPTY_NODE) */
- if (!successor && (!SUFFIX(ctx)->NumStats || SUFFIX(ctx)->Flags == 0xFF))
- {
- FreeUnits(p, ctx, 1);
- return 0;
- }
- }
- else
- {
- CPpmd_State *s = STATS(ctx) + ctx->NumStats;
- do
- {
- CPpmd_Void_Ref successor = SUCCESSOR(s);
- if ((Byte *)Ppmd8_GetPtr(p, successor) >= p->UnitsStart && order < p->MaxOrder)
- Ppmd8State_SetSuccessor(s, RemoveBinContexts(p, CTX(successor), order + 1));
- else
- Ppmd8State_SetSuccessor(s, 0);
- }
- while (--s >= STATS(ctx));
- }
-
- return REF(ctx);
-}
-
-#endif
-
-
-
-static UInt32 GetUsedMemory(const CPpmd8 *p)
-{
- UInt32 v = 0;
- unsigned i;
- for (i = 0; i < PPMD_NUM_INDEXES; i++)
- v += p->Stamps[i] * I2U(i);
- return p->Size - (UInt32)(p->HiUnit - p->LoUnit) - (UInt32)(p->UnitsStart - p->Text) - U2B(v);
-}
-
-#ifdef PPMD8_FREEZE_SUPPORT
- #define RESTORE_MODEL(c1, fSuccessor) RestoreModel(p, c1, fSuccessor)
-#else
- #define RESTORE_MODEL(c1, fSuccessor) RestoreModel(p, c1)
-#endif
-
-
-static void RestoreModel(CPpmd8 *p, PPMD8_CTX_PTR ctxError
- #ifdef PPMD8_FREEZE_SUPPORT
- , PPMD8_CTX_PTR fSuccessor
- #endif
- )
-{
- PPMD8_CTX_PTR c;
- CPpmd_State *s;
- RESET_TEXT(0)
-
- // we go here in cases of error of allocation for context (c1)
- // Order(MinContext) < Order(ctxError) <= Order(MaxContext)
-
- // We remove last symbol from each of contexts [p->MaxContext ... ctxError) contexts
- // So we rollback all created (symbols) before error.
- for (c = p->MaxContext; c != ctxError; c = SUFFIX(c))
- if (--(c->NumStats) == 0)
- {
- s = STATS(c);
- c->Flags = (Byte)((c->Flags & FLAG_PREV_HIGH) + PPMD8_HiBitsFlag_3(s->Symbol));
- // *ONE_STATE(c) = *s;
- c->Union2.State2.Symbol = s->Symbol;
- c->Union2.State2.Freq = (Byte)(((unsigned)s->Freq + 11) >> 3);
- c->Union4.State4.Successor_0 = s->Successor_0;
- c->Union4.State4.Successor_1 = s->Successor_1;
-
- SpecialFreeUnit(p, s);
- }
- else
- {
- /* Refresh() can increase Escape_Freq on value of Freq of last symbol, that was added before error.
- so the largest possible increase for Escape_Freq is (8) from value before ModelUpoadet() */
- Refresh(p, c, ((unsigned)c->NumStats + 3) >> 1, 0);
- }
-
- // increase Escape Freq for context [ctxError ... p->MinContext)
- for (; c != p->MinContext; c = SUFFIX(c))
- if (c->NumStats == 0)
- {
- // ONE_STATE(c)
- c->Union2.State2.Freq = (Byte)(((unsigned)c->Union2.State2.Freq + 1) >> 1);
- }
- else if ((c->Union2.SummFreq = (UInt16)(c->Union2.SummFreq + 4)) > 128 + 4 * c->NumStats)
- Refresh(p, c, ((unsigned)c->NumStats + 2) >> 1, 1);
-
- #ifdef PPMD8_FREEZE_SUPPORT
- if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
- {
- p->MaxContext = fSuccessor;
- p->GlueCount += !(p->Stamps[1] & 1); // why?
- }
- else if (p->RestoreMethod == PPMD8_RESTORE_METHOD_FREEZE)
- {
- while (p->MaxContext->Suffix)
- p->MaxContext = SUFFIX(p->MaxContext);
- RemoveBinContexts(p, p->MaxContext, 0);
- // we change the current mode to (PPMD8_RESTORE_METHOD_FREEZE + 1)
- p->RestoreMethod = PPMD8_RESTORE_METHOD_FREEZE + 1;
- p->GlueCount = 0;
- p->OrderFall = p->MaxOrder;
- }
- else
- #endif
- if (p->RestoreMethod == PPMD8_RESTORE_METHOD_RESTART || GetUsedMemory(p) < (p->Size >> 1))
- Ppmd8_RestartModel(p);
- else
- {
- while (p->MaxContext->Suffix)
- p->MaxContext = SUFFIX(p->MaxContext);
- do
- {
- CutOff(p, p->MaxContext, 0);
- ExpandTextArea(p);
- }
- while (GetUsedMemory(p) > 3 * (p->Size >> 2));
- p->GlueCount = 0;
- p->OrderFall = p->MaxOrder;
- }
- p->MinContext = p->MaxContext;
-}
-
-
-
-Z7_NO_INLINE
-static PPMD8_CTX_PTR Ppmd8_CreateSuccessors(CPpmd8 *p, BoolInt skip, CPpmd_State *s1, PPMD8_CTX_PTR c)
-{
-
- CPpmd_Byte_Ref upBranch = (CPpmd_Byte_Ref)SUCCESSOR(p->FoundState);
- Byte newSym, newFreq, flags;
- unsigned numPs = 0;
- CPpmd_State *ps[PPMD8_MAX_ORDER + 1]; /* fixed over Shkarin's code. Maybe it could work without + 1 too. */
-
- if (!skip)
- ps[numPs++] = p->FoundState;
-
- while (c->Suffix)
- {
- CPpmd_Void_Ref successor;
- CPpmd_State *s;
- c = SUFFIX(c);
-
- if (s1) { s = s1; s1 = NULL; }
- else if (c->NumStats != 0)
- {
- Byte sym = p->FoundState->Symbol;
- for (s = STATS(c); s->Symbol != sym; s++);
- if (s->Freq < MAX_FREQ - 9) { s->Freq++; c->Union2.SummFreq++; }
- }
- else
- {
- s = ONE_STATE(c);
- s->Freq = (Byte)(s->Freq + (!SUFFIX(c)->NumStats & (s->Freq < 24)));
- }
- successor = SUCCESSOR(s);
- if (successor != upBranch)
- {
-
- c = CTX(successor);
- if (numPs == 0)
- {
-
-
- return c;
- }
- break;
- }
- ps[numPs++] = s;
- }
-
-
-
-
-
- newSym = *(const Byte *)Ppmd8_GetPtr(p, upBranch);
- upBranch++;
- flags = (Byte)(PPMD8_HiBitsFlag_4(p->FoundState->Symbol) + PPMD8_HiBitsFlag_3(newSym));
-
- if (c->NumStats == 0)
- newFreq = c->Union2.State2.Freq;
- else
- {
- UInt32 cf, s0;
- CPpmd_State *s;
- for (s = STATS(c); s->Symbol != newSym; s++);
- cf = (UInt32)s->Freq - 1;
- s0 = (UInt32)c->Union2.SummFreq - c->NumStats - cf;
- /*
-
-
- max(newFreq)= (s->Freq - 1), when (s0 == 1)
-
-
- */
- newFreq = (Byte)(1 + ((2 * cf <= s0) ? (5 * cf > s0) : ((cf + 2 * s0 - 3) / s0)));
- }
-
-
-
- do
- {
- PPMD8_CTX_PTR c1;
- /* = AllocContext(p); */
- if (p->HiUnit != p->LoUnit)
- c1 = (PPMD8_CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE);
- else if (p->FreeList[0] != 0)
- c1 = (PPMD8_CTX_PTR)Ppmd8_RemoveNode(p, 0);
- else
- {
- c1 = (PPMD8_CTX_PTR)Ppmd8_AllocUnitsRare(p, 0);
- if (!c1)
- return NULL;
- }
- c1->Flags = flags;
- c1->NumStats = 0;
- c1->Union2.State2.Symbol = newSym;
- c1->Union2.State2.Freq = newFreq;
- Ppmd8State_SetSuccessor(ONE_STATE(c1), upBranch);
- c1->Suffix = REF(c);
- Ppmd8State_SetSuccessor(ps[--numPs], REF(c1));
- c = c1;
- }
- while (numPs != 0);
-
- return c;
-}
-
-
-static PPMD8_CTX_PTR ReduceOrder(CPpmd8 *p, CPpmd_State *s1, PPMD8_CTX_PTR c)
-{
- CPpmd_State *s = NULL;
- PPMD8_CTX_PTR c1 = c;
- CPpmd_Void_Ref upBranch = REF(p->Text);
-
- #ifdef PPMD8_FREEZE_SUPPORT
- /* The BUG in Shkarin's code was fixed: ps could overflow in CUT_OFF mode. */
- CPpmd_State *ps[PPMD8_MAX_ORDER + 1];
- unsigned numPs = 0;
- ps[numPs++] = p->FoundState;
- #endif
-
- Ppmd8State_SetSuccessor(p->FoundState, upBranch);
- p->OrderFall++;
-
- for (;;)
- {
- if (s1)
- {
- c = SUFFIX(c);
- s = s1;
- s1 = NULL;
- }
- else
- {
- if (!c->Suffix)
- {
- #ifdef PPMD8_FREEZE_SUPPORT
- if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
- {
- do { Ppmd8State_SetSuccessor(ps[--numPs], REF(c)); } while (numPs);
- RESET_TEXT(1)
- p->OrderFall = 1;
- }
- #endif
- return c;
- }
- c = SUFFIX(c);
- if (c->NumStats)
- {
- if ((s = STATS(c))->Symbol != p->FoundState->Symbol)
- do { s++; } while (s->Symbol != p->FoundState->Symbol);
- if (s->Freq < MAX_FREQ - 9)
- {
- s->Freq = (Byte)(s->Freq + 2);
- c->Union2.SummFreq = (UInt16)(c->Union2.SummFreq + 2);
- }
- }
- else
- {
- s = ONE_STATE(c);
- s->Freq = (Byte)(s->Freq + (s->Freq < 32));
- }
- }
- if (SUCCESSOR(s))
- break;
- #ifdef PPMD8_FREEZE_SUPPORT
- ps[numPs++] = s;
- #endif
- Ppmd8State_SetSuccessor(s, upBranch);
- p->OrderFall++;
- }
-
- #ifdef PPMD8_FREEZE_SUPPORT
- if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
- {
- c = CTX(SUCCESSOR(s));
- do { Ppmd8State_SetSuccessor(ps[--numPs], REF(c)); } while (numPs);
- RESET_TEXT(1)
- p->OrderFall = 1;
- return c;
- }
- else
- #endif
- if (SUCCESSOR(s) <= upBranch)
- {
- PPMD8_CTX_PTR successor;
- CPpmd_State *s2 = p->FoundState;
- p->FoundState = s;
-
- successor = Ppmd8_CreateSuccessors(p, False, NULL, c);
- if (!successor)
- Ppmd8State_SetSuccessor(s, 0);
- else
- Ppmd8State_SetSuccessor(s, REF(successor));
- p->FoundState = s2;
- }
-
- {
- CPpmd_Void_Ref successor = SUCCESSOR(s);
- if (p->OrderFall == 1 && c1 == p->MaxContext)
- {
- Ppmd8State_SetSuccessor(p->FoundState, successor);
- p->Text--;
- }
- if (successor == 0)
- return NULL;
- return CTX(successor);
- }
-}
-
-
-
-void Ppmd8_UpdateModel(CPpmd8 *p);
-Z7_NO_INLINE
-void Ppmd8_UpdateModel(CPpmd8 *p)
-{
- CPpmd_Void_Ref maxSuccessor, minSuccessor = SUCCESSOR(p->FoundState);
- PPMD8_CTX_PTR c;
- unsigned s0, ns, fFreq = p->FoundState->Freq;
- Byte flag, fSymbol = p->FoundState->Symbol;
- {
- CPpmd_State *s = NULL;
- if (p->FoundState->Freq < MAX_FREQ / 4 && p->MinContext->Suffix != 0)
- {
- /* Update Freqs in Suffix Context */
-
- c = SUFFIX(p->MinContext);
-
- if (c->NumStats == 0)
- {
- s = ONE_STATE(c);
- if (s->Freq < 32)
- s->Freq++;
- }
- else
- {
- Byte sym = p->FoundState->Symbol;
- s = STATS(c);
-
- if (s->Symbol != sym)
- {
- do
- {
-
- s++;
- }
- while (s->Symbol != sym);
-
- if (s[0].Freq >= s[-1].Freq)
- {
- SWAP_STATES(&s[0], &s[-1]);
- s--;
- }
- }
-
- if (s->Freq < MAX_FREQ - 9)
- {
- s->Freq = (Byte)(s->Freq + 2);
- c->Union2.SummFreq = (UInt16)(c->Union2.SummFreq + 2);
- }
- }
- }
-
- c = p->MaxContext;
- if (p->OrderFall == 0 && minSuccessor)
- {
- PPMD8_CTX_PTR cs = Ppmd8_CreateSuccessors(p, True, s, p->MinContext);
- if (!cs)
- {
- Ppmd8State_SetSuccessor(p->FoundState, 0);
- RESTORE_MODEL(c, CTX(minSuccessor));
- return;
- }
- Ppmd8State_SetSuccessor(p->FoundState, REF(cs));
- p->MinContext = p->MaxContext = cs;
- return;
- }
-
-
-
-
- {
- Byte *text = p->Text;
- *text++ = p->FoundState->Symbol;
- p->Text = text;
- if (text >= p->UnitsStart)
- {
- RESTORE_MODEL(c, CTX(minSuccessor)); /* check it */
- return;
- }
- maxSuccessor = REF(text);
- }
-
- if (!minSuccessor)
- {
- PPMD8_CTX_PTR cs = ReduceOrder(p, s, p->MinContext);
- if (!cs)
- {
- RESTORE_MODEL(c, NULL);
- return;
- }
- minSuccessor = REF(cs);
- }
- else if ((Byte *)Ppmd8_GetPtr(p, minSuccessor) < p->UnitsStart)
- {
- PPMD8_CTX_PTR cs = Ppmd8_CreateSuccessors(p, False, s, p->MinContext);
- if (!cs)
- {
- RESTORE_MODEL(c, NULL);
- return;
- }
- minSuccessor = REF(cs);
- }
-
- if (--p->OrderFall == 0)
- {
- maxSuccessor = minSuccessor;
- p->Text -= (p->MaxContext != p->MinContext);
- }
- #ifdef PPMD8_FREEZE_SUPPORT
- else if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
- {
- maxSuccessor = minSuccessor;
- RESET_TEXT(0)
- p->OrderFall = 0;
- }
- #endif
- }
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- flag = (Byte)(PPMD8_HiBitsFlag_3(fSymbol));
- s0 = p->MinContext->Union2.SummFreq - (ns = p->MinContext->NumStats) - fFreq;
-
- for (; c != p->MinContext; c = SUFFIX(c))
- {
- unsigned ns1;
- UInt32 sum;
-
- if ((ns1 = c->NumStats) != 0)
- {
- if ((ns1 & 1) != 0)
- {
- /* Expand for one UNIT */
- unsigned oldNU = (ns1 + 1) >> 1;
- unsigned i = U2I(oldNU);
- if (i != U2I((size_t)oldNU + 1))
- {
- void *ptr = Ppmd8_AllocUnits(p, i + 1);
- void *oldPtr;
- if (!ptr)
- {
- RESTORE_MODEL(c, CTX(minSuccessor));
- return;
- }
- oldPtr = STATS(c);
- MEM_12_CPY(ptr, oldPtr, oldNU)
- Ppmd8_InsertNode(p, oldPtr, i);
- c->Union4.Stats = STATS_REF(ptr);
- }
- }
- sum = c->Union2.SummFreq;
- /* max increase of Escape_Freq is 1 here.
- an average increase is 1/3 per symbol */
- sum += (3 * ns1 + 1 < ns);
- /* original PPMdH uses 16-bit variable for (sum) here.
- But (sum < ???). Do we need to truncate (sum) to 16-bit */
- // sum = (UInt16)sum;
- }
- else
- {
-
- CPpmd_State *s = (CPpmd_State*)Ppmd8_AllocUnits(p, 0);
- if (!s)
- {
- RESTORE_MODEL(c, CTX(minSuccessor));
- return;
- }
- {
- unsigned freq = c->Union2.State2.Freq;
- // s = *ONE_STATE(c);
- s->Symbol = c->Union2.State2.Symbol;
- s->Successor_0 = c->Union4.State4.Successor_0;
- s->Successor_1 = c->Union4.State4.Successor_1;
- // Ppmd8State_SetSuccessor(s, c->Union4.Stats); // call it only for debug purposes to check the order of
- // (Successor_0 and Successor_1) in LE/BE.
- c->Union4.Stats = REF(s);
- if (freq < MAX_FREQ / 4 - 1)
- freq <<= 1;
- else
- freq = MAX_FREQ - 4;
-
- s->Freq = (Byte)freq;
-
- sum = freq + p->InitEsc + (ns > 2); // Ppmd8 (> 2)
- }
- }
-
- {
- CPpmd_State *s = STATS(c) + ns1 + 1;
- UInt32 cf = 2 * (sum + 6) * (UInt32)fFreq;
- UInt32 sf = (UInt32)s0 + sum;
- s->Symbol = fSymbol;
- c->NumStats = (Byte)(ns1 + 1);
- Ppmd8State_SetSuccessor(s, maxSuccessor);
- c->Flags |= flag;
- if (cf < 6 * sf)
- {
- cf = (unsigned)1 + (cf > sf) + (cf >= 4 * sf);
- sum += 4;
- /* It can add (1, 2, 3) to Escape_Freq */
- }
- else
- {
- cf = (unsigned)4 + (cf > 9 * sf) + (cf > 12 * sf) + (cf > 15 * sf);
- sum += cf;
- }
-
- c->Union2.SummFreq = (UInt16)sum;
- s->Freq = (Byte)cf;
- }
-
- }
- p->MaxContext = p->MinContext = CTX(minSuccessor);
-}
-
-
-
-Z7_NO_INLINE
-static void Ppmd8_Rescale(CPpmd8 *p)
-{
- unsigned i, adder, sumFreq, escFreq;
- CPpmd_State *stats = STATS(p->MinContext);
- CPpmd_State *s = p->FoundState;
-
- /* Sort the list by Freq */
- if (s != stats)
- {
- CPpmd_State tmp = *s;
- do
- s[0] = s[-1];
- while (--s != stats);
- *s = tmp;
- }
-
- sumFreq = s->Freq;
- escFreq = p->MinContext->Union2.SummFreq - sumFreq;
-
-
-
-
-
-
- adder = (p->OrderFall != 0);
-
- #ifdef PPMD8_FREEZE_SUPPORT
- adder |= (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE);
- #endif
-
- sumFreq = (sumFreq + 4 + adder) >> 1;
- i = p->MinContext->NumStats;
- s->Freq = (Byte)sumFreq;
-
- do
- {
- unsigned freq = (++s)->Freq;
- escFreq -= freq;
- freq = (freq + adder) >> 1;
- sumFreq += freq;
- s->Freq = (Byte)freq;
- if (freq > s[-1].Freq)
- {
- CPpmd_State tmp = *s;
- CPpmd_State *s1 = s;
- do
- {
- s1[0] = s1[-1];
- }
- while (--s1 != stats && freq > s1[-1].Freq);
- *s1 = tmp;
- }
- }
- while (--i);
-
- if (s->Freq == 0)
- {
- /* Remove all items with Freq == 0 */
- CPpmd8_Context *mc;
- unsigned numStats, numStatsNew, n0, n1;
-
- i = 0; do { i++; } while ((--s)->Freq == 0);
-
-
-
-
- escFreq += i;
- mc = p->MinContext;
- numStats = mc->NumStats;
- numStatsNew = numStats - i;
- mc->NumStats = (Byte)(numStatsNew);
- n0 = (numStats + 2) >> 1;
-
- if (numStatsNew == 0)
- {
-
- unsigned freq = (2 * (unsigned)stats->Freq + escFreq - 1) / escFreq;
- if (freq > MAX_FREQ / 3)
- freq = MAX_FREQ / 3;
- mc->Flags = (Byte)((mc->Flags & FLAG_PREV_HIGH) + PPMD8_HiBitsFlag_3(stats->Symbol));
-
-
-
-
-
- s = ONE_STATE(mc);
- *s = *stats;
- s->Freq = (Byte)freq;
- p->FoundState = s;
- Ppmd8_InsertNode(p, stats, U2I(n0));
- return;
- }
-
- n1 = (numStatsNew + 2) >> 1;
- if (n0 != n1)
- mc->Union4.Stats = STATS_REF(ShrinkUnits(p, stats, n0, n1));
- {
- // here we are for max order only. So Ppmd8_MakeEscFreq() doesn't use mc->Flags
- // but we still need current (Flags & FLAG_PREV_HIGH), if we will convert context to 1-symbol context later.
- /*
- unsigned flags = HiBits_Prepare((s = STATS(mc))->Symbol);
- i = mc->NumStats;
- do { flags |= HiBits_Prepare((++s)->Symbol); } while (--i);
- mc->Flags = (Byte)((mc->Flags & ~FLAG_SYM_HIGH) + HiBits_Convert_3(flags));
- */
- }
- }
-
-
-
-
-
-
- {
- CPpmd8_Context *mc = p->MinContext;
- mc->Union2.SummFreq = (UInt16)(sumFreq + escFreq - (escFreq >> 1));
- mc->Flags |= FLAG_RESCALED;
- p->FoundState = STATS(mc);
- }
-}
-
-
-CPpmd_See *Ppmd8_MakeEscFreq(CPpmd8 *p, unsigned numMasked1, UInt32 *escFreq)
-{
- CPpmd_See *see;
- const CPpmd8_Context *mc = p->MinContext;
- unsigned numStats = mc->NumStats;
- if (numStats != 0xFF)
- {
- // (3 <= numStats + 2 <= 256) (3 <= NS2Indx[3] and NS2Indx[256] === 26)
- see = p->See[(size_t)(unsigned)p->NS2Indx[(size_t)numStats + 2] - 3]
- + (mc->Union2.SummFreq > 11 * (numStats + 1))
- + 2 * (unsigned)(2 * numStats < ((unsigned)SUFFIX(mc)->NumStats + numMasked1))
- + mc->Flags;
-
- {
- // if (see->Summ) field is larger than 16-bit, we need only low 16 bits of Summ
- unsigned summ = (UInt16)see->Summ; // & 0xFFFF
- unsigned r = (summ >> see->Shift);
- see->Summ = (UInt16)(summ - r);
- *escFreq = r + (r == 0);
- }
- }
- else
- {
- see = &p->DummySee;
- *escFreq = 1;
- }
- return see;
-}
-
-
-static void Ppmd8_NextContext(CPpmd8 *p)
-{
- PPMD8_CTX_PTR c = CTX(SUCCESSOR(p->FoundState));
- if (p->OrderFall == 0 && (const Byte *)c >= p->UnitsStart)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd8_UpdateModel(p);
-}
-
-
-void Ppmd8_Update1(CPpmd8 *p)
-{
- CPpmd_State *s = p->FoundState;
- unsigned freq = s->Freq;
- freq += 4;
- p->MinContext->Union2.SummFreq = (UInt16)(p->MinContext->Union2.SummFreq + 4);
- s->Freq = (Byte)freq;
- if (freq > s[-1].Freq)
- {
- SWAP_STATES(s, &s[-1]);
- p->FoundState = --s;
- if (freq > MAX_FREQ)
- Ppmd8_Rescale(p);
- }
- Ppmd8_NextContext(p);
-}
-
-
-void Ppmd8_Update1_0(CPpmd8 *p)
-{
- CPpmd_State *s = p->FoundState;
- CPpmd8_Context *mc = p->MinContext;
- unsigned freq = s->Freq;
- unsigned summFreq = mc->Union2.SummFreq;
- p->PrevSuccess = (2 * freq >= summFreq); // Ppmd8 (>=)
- p->RunLength += (int)p->PrevSuccess;
- mc->Union2.SummFreq = (UInt16)(summFreq + 4);
- freq += 4;
- s->Freq = (Byte)freq;
- if (freq > MAX_FREQ)
- Ppmd8_Rescale(p);
- Ppmd8_NextContext(p);
-}
-
-
-/*
-void Ppmd8_UpdateBin(CPpmd8 *p)
-{
- unsigned freq = p->FoundState->Freq;
- p->FoundState->Freq = (Byte)(freq + (freq < 196)); // Ppmd8 (196)
- p->PrevSuccess = 1;
- p->RunLength++;
- Ppmd8_NextContext(p);
-}
-*/
-
-void Ppmd8_Update2(CPpmd8 *p)
-{
- CPpmd_State *s = p->FoundState;
- unsigned freq = s->Freq;
- freq += 4;
- p->RunLength = p->InitRL;
- p->MinContext->Union2.SummFreq = (UInt16)(p->MinContext->Union2.SummFreq + 4);
- s->Freq = (Byte)freq;
- if (freq > MAX_FREQ)
- Ppmd8_Rescale(p);
- Ppmd8_UpdateModel(p);
-}
-
-/* H->I changes:
- NS2Indx
- GlueCount, and Glue method
- BinSum
- See / EscFreq
- Ppmd8_CreateSuccessors updates more suffix contexts
- Ppmd8_UpdateModel consts.
- PrevSuccess Update
-
-Flags:
- (1 << 2) - the Context was Rescaled
- (1 << 3) - there is symbol in Stats with (sym >= 0x40) in
- (1 << 4) - main symbol of context is (sym >= 0x40)
-*/
-
-#undef RESET_TEXT
-#undef FLAG_RESCALED
-#undef FLAG_PREV_HIGH
-#undef HiBits_Prepare
-#undef HiBits_Convert_3
-#undef HiBits_Convert_4
-#undef PPMD8_HiBitsFlag_3
-#undef PPMD8_HiBitsFlag_4
-#undef RESTORE_MODEL
-
-#undef MAX_FREQ
-#undef UNIT_SIZE
-#undef U2B
-#undef U2I
-#undef I2U
-
-#undef REF
-#undef STATS_REF
-#undef CTX
-#undef STATS
-#undef ONE_STATE
-#undef SUFFIX
-#undef NODE
-#undef EMPTY_NODE
-#undef MEM_12_CPY
-#undef SUCCESSOR
-#undef SWAP_STATES
diff --git a/3rdparty/7z/src/Ppmd8.h b/3rdparty/7z/src/Ppmd8.h
deleted file mode 100644
index d5bb57e159..0000000000
--- a/3rdparty/7z/src/Ppmd8.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* Ppmd8.h -- Ppmd8 (PPMdI) compression codec
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.I (2002): Dmitry Shkarin : Public domain
- Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
-
-#ifndef ZIP7_INC_PPMD8_H
-#define ZIP7_INC_PPMD8_H
-
-#include "Ppmd.h"
-
-EXTERN_C_BEGIN
-
-#define PPMD8_MIN_ORDER 2
-#define PPMD8_MAX_ORDER 16
-
-
-
-
-struct CPpmd8_Context_;
-
-typedef Ppmd_Ref_Type(struct CPpmd8_Context_) CPpmd8_Context_Ref;
-
-// MY_CPU_pragma_pack_push_1
-
-typedef struct CPpmd8_Context_
-{
- Byte NumStats;
- Byte Flags;
-
- union
- {
- UInt16 SummFreq;
- CPpmd_State2 State2;
- } Union2;
-
- union
- {
- CPpmd_State_Ref Stats;
- CPpmd_State4 State4;
- } Union4;
-
- CPpmd8_Context_Ref Suffix;
-} CPpmd8_Context;
-
-// MY_CPU_pragma_pop
-
-#define Ppmd8Context_OneState(p) ((CPpmd_State *)&(p)->Union2)
-
-/* PPMdI code rev.2 contains the fix over PPMdI code rev.1.
- But the code PPMdI.2 is not compatible with PPMdI.1 for some files compressed
- in FREEZE mode. So we disable FREEZE mode support. */
-
-// #define PPMD8_FREEZE_SUPPORT
-
-enum
-{
- PPMD8_RESTORE_METHOD_RESTART,
- PPMD8_RESTORE_METHOD_CUT_OFF
- #ifdef PPMD8_FREEZE_SUPPORT
- , PPMD8_RESTORE_METHOD_FREEZE
- #endif
- , PPMD8_RESTORE_METHOD_UNSUPPPORTED
-};
-
-
-
-
-
-
-
-
-typedef struct
-{
- CPpmd8_Context *MinContext, *MaxContext;
- CPpmd_State *FoundState;
- unsigned OrderFall, InitEsc, PrevSuccess, MaxOrder, RestoreMethod;
- Int32 RunLength, InitRL; /* must be 32-bit at least */
-
- UInt32 Size;
- UInt32 GlueCount;
- UInt32 AlignOffset;
- Byte *Base, *LoUnit, *HiUnit, *Text, *UnitsStart;
-
- UInt32 Range;
- UInt32 Code;
- UInt32 Low;
- union
- {
- IByteInPtr In;
- IByteOutPtr Out;
- } Stream;
-
- Byte Indx2Units[PPMD_NUM_INDEXES + 2]; // +2 for alignment
- Byte Units2Indx[128];
- CPpmd_Void_Ref FreeList[PPMD_NUM_INDEXES];
- UInt32 Stamps[PPMD_NUM_INDEXES];
- Byte NS2BSIndx[256], NS2Indx[260];
- Byte ExpEscape[16];
- CPpmd_See DummySee, See[24][32];
- UInt16 BinSumm[25][64];
-
-} CPpmd8;
-
-
-void Ppmd8_Construct(CPpmd8 *p);
-BoolInt Ppmd8_Alloc(CPpmd8 *p, UInt32 size, ISzAllocPtr alloc);
-void Ppmd8_Free(CPpmd8 *p, ISzAllocPtr alloc);
-void Ppmd8_Init(CPpmd8 *p, unsigned maxOrder, unsigned restoreMethod);
-#define Ppmd8_WasAllocated(p) ((p)->Base != NULL)
-
-
-/* ---------- Internal Functions ---------- */
-
-#define Ppmd8_GetPtr(p, ptr) Ppmd_GetPtr(p, ptr)
-#define Ppmd8_GetContext(p, ptr) Ppmd_GetPtr_Type(p, ptr, CPpmd8_Context)
-#define Ppmd8_GetStats(p, ctx) Ppmd_GetPtr_Type(p, (ctx)->Union4.Stats, CPpmd_State)
-
-void Ppmd8_Update1(CPpmd8 *p);
-void Ppmd8_Update1_0(CPpmd8 *p);
-void Ppmd8_Update2(CPpmd8 *p);
-
-
-
-
-
-
-#define Ppmd8_GetBinSumm(p) \
- &p->BinSumm[p->NS2Indx[(size_t)Ppmd8Context_OneState(p->MinContext)->Freq - 1]] \
- [ p->PrevSuccess + ((p->RunLength >> 26) & 0x20) \
- + p->NS2BSIndx[Ppmd8_GetContext(p, p->MinContext->Suffix)->NumStats] + \
- + p->MinContext->Flags ]
-
-
-CPpmd_See *Ppmd8_MakeEscFreq(CPpmd8 *p, unsigned numMasked, UInt32 *scale);
-
-
-/* 20.01: the original PPMdI encoder and decoder probably could work incorrectly in some rare cases,
- where the original PPMdI code can give "Divide by Zero" operation.
- We use the following fix to allow correct working of encoder and decoder in any cases.
- We correct (Escape_Freq) and (_sum_), if (_sum_) is larger than p->Range) */
-#define PPMD8_CORRECT_SUM_RANGE(p, _sum_) if (_sum_ > p->Range /* /1 */) _sum_ = p->Range;
-
-
-/* ---------- Decode ---------- */
-
-#define PPMD8_SYM_END (-1)
-#define PPMD8_SYM_ERROR (-2)
-
-/*
-You must set (CPpmd8::Stream.In) before Ppmd8_RangeDec_Init()
-
-Ppmd8_DecodeSymbol()
-out:
- >= 0 : decoded byte
- -1 : PPMD8_SYM_END : End of payload marker
- -2 : PPMD8_SYM_ERROR : Data error
-*/
-
-
-BoolInt Ppmd8_Init_RangeDec(CPpmd8 *p);
-#define Ppmd8_RangeDec_IsFinishedOK(p) ((p)->Code == 0)
-int Ppmd8_DecodeSymbol(CPpmd8 *p);
-
-
-
-
-
-
-
-
-/* ---------- Encode ---------- */
-
-#define Ppmd8_Init_RangeEnc(p) { (p)->Low = 0; (p)->Range = 0xFFFFFFFF; }
-void Ppmd8_Flush_RangeEnc(CPpmd8 *p);
-void Ppmd8_EncodeSymbol(CPpmd8 *p, int symbol);
-
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Ppmd8Dec.c b/3rdparty/7z/src/Ppmd8Dec.c
deleted file mode 100644
index 72d3626ea1..0000000000
--- a/3rdparty/7z/src/Ppmd8Dec.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/* Ppmd8Dec.c -- Ppmd8 (PPMdI) Decoder
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.I (2002): Dmitry Shkarin : Public domain
- Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
-
-#include "Precomp.h"
-
-#include "Ppmd8.h"
-
-#define kTop ((UInt32)1 << 24)
-#define kBot ((UInt32)1 << 15)
-
-#define READ_BYTE(p) IByteIn_Read((p)->Stream.In)
-
-BoolInt Ppmd8_Init_RangeDec(CPpmd8 *p)
-{
- unsigned i;
- p->Code = 0;
- p->Range = 0xFFFFFFFF;
- p->Low = 0;
-
- for (i = 0; i < 4; i++)
- p->Code = (p->Code << 8) | READ_BYTE(p);
- return (p->Code < 0xFFFFFFFF);
-}
-
-#define RC_NORM(p) \
- while ((p->Low ^ (p->Low + p->Range)) < kTop \
- || (p->Range < kBot && ((p->Range = (0 - p->Low) & (kBot - 1)), 1))) { \
- p->Code = (p->Code << 8) | READ_BYTE(p); \
- p->Range <<= 8; p->Low <<= 8; }
-
-// we must use only one type of Normalization from two: LOCAL or REMOTE
-#define RC_NORM_LOCAL(p) // RC_NORM(p)
-#define RC_NORM_REMOTE(p) RC_NORM(p)
-
-#define R p
-
-Z7_FORCE_INLINE
-// Z7_NO_INLINE
-static void Ppmd8_RD_Decode(CPpmd8 *p, UInt32 start, UInt32 size)
-{
- start *= R->Range;
- R->Low += start;
- R->Code -= start;
- R->Range *= size;
- RC_NORM_LOCAL(R)
-}
-
-#define RC_Decode(start, size) Ppmd8_RD_Decode(p, start, size);
-#define RC_DecodeFinal(start, size) RC_Decode(start, size) RC_NORM_REMOTE(R)
-#define RC_GetThreshold(total) (R->Code / (R->Range /= (total)))
-
-
-#define CTX(ref) ((CPpmd8_Context *)Ppmd8_GetContext(p, ref))
-// typedef CPpmd8_Context * CTX_PTR;
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-void Ppmd8_UpdateModel(CPpmd8 *p);
-
-#define MASK(sym) ((unsigned char *)charMask)[sym]
-
-
-int Ppmd8_DecodeSymbol(CPpmd8 *p)
-{
- size_t charMask[256 / sizeof(size_t)];
-
- if (p->MinContext->NumStats != 0)
- {
- CPpmd_State *s = Ppmd8_GetStats(p, p->MinContext);
- unsigned i;
- UInt32 count, hiCnt;
- UInt32 summFreq = p->MinContext->Union2.SummFreq;
-
- PPMD8_CORRECT_SUM_RANGE(p, summFreq)
-
-
- count = RC_GetThreshold(summFreq);
- hiCnt = count;
-
- if ((Int32)(count -= s->Freq) < 0)
- {
- Byte sym;
- RC_DecodeFinal(0, s->Freq)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd8_Update1_0(p);
- return sym;
- }
-
- p->PrevSuccess = 0;
- i = p->MinContext->NumStats;
-
- do
- {
- if ((Int32)(count -= (++s)->Freq) < 0)
- {
- Byte sym;
- RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd8_Update1(p);
- return sym;
- }
- }
- while (--i);
-
- if (hiCnt >= summFreq)
- return PPMD8_SYM_ERROR;
-
- hiCnt -= count;
- RC_Decode(hiCnt, summFreq - hiCnt)
-
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- // i = p->MinContext->NumStats - 1;
- // do { MASK((--s)->Symbol) = 0; } while (--i);
- {
- CPpmd_State *s2 = Ppmd8_GetStats(p, p->MinContext);
- MASK(s->Symbol) = 0;
- do
- {
- unsigned sym0 = s2[0].Symbol;
- unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
- else
- {
- CPpmd_State *s = Ppmd8Context_OneState(p->MinContext);
- UInt16 *prob = Ppmd8_GetBinSumm(p);
- UInt32 pr = *prob;
- UInt32 size0 = (R->Range >> 14) * pr;
- pr = PPMD_UPDATE_PROB_1(pr);
-
- if (R->Code < size0)
- {
- Byte sym;
- *prob = (UInt16)(pr + (1 << PPMD_INT_BITS));
-
- // RangeDec_DecodeBit0(size0);
- R->Range = size0;
- RC_NORM(R)
-
-
-
- // sym = (p->FoundState = Ppmd8Context_OneState(p->MinContext))->Symbol;
- // Ppmd8_UpdateBin(p);
- {
- unsigned freq = s->Freq;
- CPpmd8_Context *c = CTX(SUCCESSOR(s));
- sym = s->Symbol;
- p->FoundState = s;
- p->PrevSuccess = 1;
- p->RunLength++;
- s->Freq = (Byte)(freq + (freq < 196));
- // NextContext(p);
- if (p->OrderFall == 0 && (const Byte *)c >= p->UnitsStart)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd8_UpdateModel(p);
- }
- return sym;
- }
-
- *prob = (UInt16)pr;
- p->InitEsc = p->ExpEscape[pr >> 10];
-
- // RangeDec_DecodeBit1(rc2, size0);
- R->Low += size0;
- R->Code -= size0;
- R->Range = (R->Range & ~((UInt32)PPMD_BIN_SCALE - 1)) - size0;
- RC_NORM_LOCAL(R)
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- MASK(Ppmd8Context_OneState(p->MinContext)->Symbol) = 0;
- p->PrevSuccess = 0;
- }
-
- for (;;)
- {
- CPpmd_State *s, *s2;
- UInt32 freqSum, count, hiCnt;
- UInt32 freqSum2;
- CPpmd_See *see;
- CPpmd8_Context *mc;
- unsigned numMasked;
- RC_NORM_REMOTE(R)
- mc = p->MinContext;
- numMasked = mc->NumStats;
-
- do
- {
- p->OrderFall++;
- if (!mc->Suffix)
- return PPMD8_SYM_END;
- mc = Ppmd8_GetContext(p, mc->Suffix);
- }
- while (mc->NumStats == numMasked);
-
- s = Ppmd8_GetStats(p, mc);
-
- {
- unsigned num = (unsigned)mc->NumStats + 1;
- unsigned num2 = num / 2;
-
- num &= 1;
- hiCnt = (s->Freq & (unsigned)(MASK(s->Symbol))) & (0 - (UInt32)num);
- s += num;
- p->MinContext = mc;
-
- do
- {
- unsigned sym0 = s[0].Symbol;
- unsigned sym1 = s[1].Symbol;
- s += 2;
- hiCnt += (s[-2].Freq & (unsigned)(MASK(sym0)));
- hiCnt += (s[-1].Freq & (unsigned)(MASK(sym1)));
- }
- while (--num2);
- }
-
- see = Ppmd8_MakeEscFreq(p, numMasked, &freqSum);
- freqSum += hiCnt;
- freqSum2 = freqSum;
- PPMD8_CORRECT_SUM_RANGE(R, freqSum2)
-
-
- count = RC_GetThreshold(freqSum2);
-
- if (count < hiCnt)
- {
- Byte sym;
- // Ppmd_See_UPDATE(see) // new (see->Summ) value can overflow over 16-bits in some rare cases
- s = Ppmd8_GetStats(p, p->MinContext);
- hiCnt = count;
-
-
- {
- for (;;)
- {
- count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
- // count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
- }
- }
- s--;
- RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
-
- // new (see->Summ) value can overflow over 16-bits in some rare cases
- Ppmd_See_UPDATE(see)
- p->FoundState = s;
- sym = s->Symbol;
- Ppmd8_Update2(p);
- return sym;
- }
-
- if (count >= freqSum2)
- return PPMD8_SYM_ERROR;
-
- RC_Decode(hiCnt, freqSum2 - hiCnt)
-
- // We increase (see->Summ) for sum of Freqs of all non_Masked symbols.
- // new (see->Summ) value can overflow over 16-bits in some rare cases
- see->Summ = (UInt16)(see->Summ + freqSum);
-
- s = Ppmd8_GetStats(p, p->MinContext);
- s2 = s + p->MinContext->NumStats + 1;
- do
- {
- MASK(s->Symbol) = 0;
- s++;
- }
- while (s != s2);
- }
-}
-
-#undef kTop
-#undef kBot
-#undef READ_BYTE
-#undef RC_NORM_BASE
-#undef RC_NORM_1
-#undef RC_NORM
-#undef RC_NORM_LOCAL
-#undef RC_NORM_REMOTE
-#undef R
-#undef RC_Decode
-#undef RC_DecodeFinal
-#undef RC_GetThreshold
-#undef CTX
-#undef SUCCESSOR
-#undef MASK
diff --git a/3rdparty/7z/src/Ppmd8Enc.c b/3rdparty/7z/src/Ppmd8Enc.c
deleted file mode 100644
index 9e29ef710e..0000000000
--- a/3rdparty/7z/src/Ppmd8Enc.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/* Ppmd8Enc.c -- Ppmd8 (PPMdI) Encoder
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on:
- PPMd var.I (2002): Dmitry Shkarin : Public domain
- Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
-
-#include "Precomp.h"
-
-#include "Ppmd8.h"
-
-#define kTop ((UInt32)1 << 24)
-#define kBot ((UInt32)1 << 15)
-
-#define WRITE_BYTE(p) IByteOut_Write(p->Stream.Out, (Byte)(p->Low >> 24))
-
-void Ppmd8_Flush_RangeEnc(CPpmd8 *p)
-{
- unsigned i;
- for (i = 0; i < 4; i++, p->Low <<= 8 )
- WRITE_BYTE(p);
-}
-
-
-
-
-
-
-#define RC_NORM(p) \
- while ((p->Low ^ (p->Low + p->Range)) < kTop \
- || (p->Range < kBot && ((p->Range = (0 - p->Low) & (kBot - 1)), 1))) \
- { WRITE_BYTE(p); p->Range <<= 8; p->Low <<= 8; }
-
-
-
-
-
-
-
-
-
-
-
-
-
-// we must use only one type of Normalization from two: LOCAL or REMOTE
-#define RC_NORM_LOCAL(p) // RC_NORM(p)
-#define RC_NORM_REMOTE(p) RC_NORM(p)
-
-// #define RC_PRE(total) p->Range /= total;
-// #define RC_PRE(total)
-
-#define R p
-
-
-
-
-Z7_FORCE_INLINE
-// Z7_NO_INLINE
-static void Ppmd8_RangeEnc_Encode(CPpmd8 *p, UInt32 start, UInt32 size, UInt32 total)
-{
- R->Low += start * (R->Range /= total);
- R->Range *= size;
- RC_NORM_LOCAL(R)
-}
-
-
-
-
-
-
-
-
-
-
-#define RC_Encode(start, size, total) Ppmd8_RangeEnc_Encode(p, start, size, total);
-#define RC_EncodeFinal(start, size, total) RC_Encode(start, size, total) RC_NORM_REMOTE(p)
-
-#define CTX(ref) ((CPpmd8_Context *)Ppmd8_GetContext(p, ref))
-
-// typedef CPpmd8_Context * CTX_PTR;
-#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
-
-void Ppmd8_UpdateModel(CPpmd8 *p);
-
-#define MASK(sym) ((unsigned char *)charMask)[sym]
-
-// Z7_FORCE_INLINE
-// static
-void Ppmd8_EncodeSymbol(CPpmd8 *p, int symbol)
-{
- size_t charMask[256 / sizeof(size_t)];
-
- if (p->MinContext->NumStats != 0)
- {
- CPpmd_State *s = Ppmd8_GetStats(p, p->MinContext);
- UInt32 sum;
- unsigned i;
- UInt32 summFreq = p->MinContext->Union2.SummFreq;
-
- PPMD8_CORRECT_SUM_RANGE(p, summFreq)
-
- // RC_PRE(summFreq);
-
- if (s->Symbol == symbol)
- {
-
- RC_EncodeFinal(0, s->Freq, summFreq)
- p->FoundState = s;
- Ppmd8_Update1_0(p);
- return;
- }
- p->PrevSuccess = 0;
- sum = s->Freq;
- i = p->MinContext->NumStats;
- do
- {
- if ((++s)->Symbol == symbol)
- {
-
- RC_EncodeFinal(sum, s->Freq, summFreq)
- p->FoundState = s;
- Ppmd8_Update1(p);
- return;
- }
- sum += s->Freq;
- }
- while (--i);
-
-
- RC_Encode(sum, summFreq - sum, summFreq)
-
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- // MASK(s->Symbol) = 0;
- // i = p->MinContext->NumStats;
- // do { MASK((--s)->Symbol) = 0; } while (--i);
- {
- CPpmd_State *s2 = Ppmd8_GetStats(p, p->MinContext);
- MASK(s->Symbol) = 0;
- do
- {
- unsigned sym0 = s2[0].Symbol;
- unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
- else
- {
- UInt16 *prob = Ppmd8_GetBinSumm(p);
- CPpmd_State *s = Ppmd8Context_OneState(p->MinContext);
- UInt32 pr = *prob;
- const UInt32 bound = (R->Range >> 14) * pr;
- pr = PPMD_UPDATE_PROB_1(pr);
- if (s->Symbol == symbol)
- {
- *prob = (UInt16)(pr + (1 << PPMD_INT_BITS));
- // RangeEnc_EncodeBit_0(p, bound);
- R->Range = bound;
- RC_NORM(R)
-
- // p->FoundState = s;
- // Ppmd8_UpdateBin(p);
- {
- const unsigned freq = s->Freq;
- CPpmd8_Context *c = CTX(SUCCESSOR(s));
- p->FoundState = s;
- p->PrevSuccess = 1;
- p->RunLength++;
- s->Freq = (Byte)(freq + (freq < 196)); // Ppmd8 (196)
- // NextContext(p);
- if (p->OrderFall == 0 && (const Byte *)c >= p->UnitsStart)
- p->MaxContext = p->MinContext = c;
- else
- Ppmd8_UpdateModel(p);
- }
- return;
- }
-
- *prob = (UInt16)pr;
- p->InitEsc = p->ExpEscape[pr >> 10];
- // RangeEnc_EncodeBit_1(p, bound);
- R->Low += bound;
- R->Range = (R->Range & ~((UInt32)PPMD_BIN_SCALE - 1)) - bound;
- RC_NORM_LOCAL(R)
-
- PPMD_SetAllBitsIn256Bytes(charMask)
- MASK(s->Symbol) = 0;
- p->PrevSuccess = 0;
- }
-
- for (;;)
- {
- CPpmd_See *see;
- CPpmd_State *s;
- UInt32 sum, escFreq;
- CPpmd8_Context *mc;
- unsigned i, numMasked;
-
- RC_NORM_REMOTE(p)
-
- mc = p->MinContext;
- numMasked = mc->NumStats;
-
- do
- {
- p->OrderFall++;
- if (!mc->Suffix)
- return; /* EndMarker (symbol = -1) */
- mc = Ppmd8_GetContext(p, mc->Suffix);
-
- }
- while (mc->NumStats == numMasked);
-
- p->MinContext = mc;
-
- see = Ppmd8_MakeEscFreq(p, numMasked, &escFreq);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- s = Ppmd8_GetStats(p, p->MinContext);
- sum = 0;
- i = (unsigned)p->MinContext->NumStats + 1;
-
- do
- {
- const unsigned cur = s->Symbol;
- if ((int)cur == symbol)
- {
- const UInt32 low = sum;
- const UInt32 freq = s->Freq;
- unsigned num2;
-
- Ppmd_See_UPDATE(see)
- p->FoundState = s;
- sum += escFreq;
-
- num2 = i / 2;
- i &= 1;
- sum += freq & (0 - (UInt32)i);
- if (num2 != 0)
- {
- s += i;
- for (;;)
- {
- unsigned sym0 = s[0].Symbol;
- unsigned sym1 = s[1].Symbol;
- s += 2;
- sum += (s[-2].Freq & (unsigned)(MASK(sym0)));
- sum += (s[-1].Freq & (unsigned)(MASK(sym1)));
- if (--num2 == 0)
- break;
- }
- }
-
- PPMD8_CORRECT_SUM_RANGE(p, sum)
-
- RC_EncodeFinal(low, freq, sum)
- Ppmd8_Update2(p);
- return;
- }
- sum += (s->Freq & (unsigned)(MASK(cur)));
- s++;
- }
- while (--i);
-
- {
- UInt32 total = sum + escFreq;
- see->Summ = (UInt16)(see->Summ + total);
- PPMD8_CORRECT_SUM_RANGE(p, total)
-
- RC_Encode(sum, total - sum, total)
- }
-
- {
- const CPpmd_State *s2 = Ppmd8_GetStats(p, p->MinContext);
- s--;
- MASK(s->Symbol) = 0;
- do
- {
- const unsigned sym0 = s2[0].Symbol;
- const unsigned sym1 = s2[1].Symbol;
- s2 += 2;
- MASK(sym0) = 0;
- MASK(sym1) = 0;
- }
- while (s2 < s);
- }
- }
-}
-
-
-
-
-
-
-
-
-
-#undef kTop
-#undef kBot
-#undef WRITE_BYTE
-#undef RC_NORM_BASE
-#undef RC_NORM_1
-#undef RC_NORM
-#undef RC_NORM_LOCAL
-#undef RC_NORM_REMOTE
-#undef R
-#undef RC_Encode
-#undef RC_EncodeFinal
-
-#undef CTX
-#undef SUCCESSOR
-#undef MASK
diff --git a/3rdparty/7z/src/Precomp.h b/3rdparty/7z/src/Precomp.h
deleted file mode 100644
index dc476aa69e..0000000000
--- a/3rdparty/7z/src/Precomp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Precomp.h -- StdAfx
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_PRECOMP_H
-#define ZIP7_INC_PRECOMP_H
-
-#include "Compiler.h"
-/* #include "7zTypes.h" */
-
-#endif
diff --git a/3rdparty/7z/src/RotateDefs.h b/3rdparty/7z/src/RotateDefs.h
deleted file mode 100644
index 8026cd1066..0000000000
--- a/3rdparty/7z/src/RotateDefs.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* RotateDefs.h -- Rotate functions
-2023-06-18 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_ROTATE_DEFS_H
-#define ZIP7_INC_ROTATE_DEFS_H
-
-#ifdef _MSC_VER
-
-#include
-
-/* don't use _rotl with old MINGW. It can insert slow call to function. */
-
-/* #if (_MSC_VER >= 1200) */
-#pragma intrinsic(_rotl)
-#pragma intrinsic(_rotr)
-/* #endif */
-
-#define rotlFixed(x, n) _rotl((x), (n))
-#define rotrFixed(x, n) _rotr((x), (n))
-
-#if (_MSC_VER >= 1300)
-#define Z7_ROTL64(x, n) _rotl64((x), (n))
-#define Z7_ROTR64(x, n) _rotr64((x), (n))
-#else
-#define Z7_ROTL64(x, n) (((x) << (n)) | ((x) >> (64 - (n))))
-#define Z7_ROTR64(x, n) (((x) >> (n)) | ((x) << (64 - (n))))
-#endif
-
-#else
-
-/* new compilers can translate these macros to fast commands. */
-
-#if defined(__clang__) && (__clang_major__ >= 4) \
- || defined(__GNUC__) && (__GNUC__ >= 5)
-/* GCC 4.9.0 and clang 3.5 can recognize more correct version: */
-#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (-(n) & 31)))
-#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (-(n) & 31)))
-#define Z7_ROTL64(x, n) (((x) << (n)) | ((x) >> (-(n) & 63)))
-#define Z7_ROTR64(x, n) (((x) >> (n)) | ((x) << (-(n) & 63)))
-#else
-/* for old GCC / clang: */
-#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
-#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (32 - (n))))
-#define Z7_ROTL64(x, n) (((x) << (n)) | ((x) >> (64 - (n))))
-#define Z7_ROTR64(x, n) (((x) >> (n)) | ((x) << (64 - (n))))
-#endif
-
-#endif
-
-#endif
diff --git a/3rdparty/7z/src/Sha1.c b/3rdparty/7z/src/Sha1.c
deleted file mode 100644
index fd6c018c95..0000000000
--- a/3rdparty/7z/src/Sha1.c
+++ /dev/null
@@ -1,498 +0,0 @@
-/* Sha1.c -- SHA-1 Hash
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on public domain code of Steve Reid from Wei Dai's Crypto++ library. */
-
-#include "Precomp.h"
-
-#include
-
-#include "CpuArch.h"
-#include "RotateDefs.h"
-#include "Sha1.h"
-
-#if defined(_MSC_VER) && (_MSC_VER < 1900)
-// #define USE_MY_MM
-#endif
-
-#ifdef MY_CPU_X86_OR_AMD64
- #ifdef _MSC_VER
- #if _MSC_VER >= 1200
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #elif defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 8) // fix that check
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #elif defined(__INTEL_COMPILER)
- #if (__INTEL_COMPILER >= 1800) // fix that check
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #endif
-#elif defined(MY_CPU_ARM_OR_ARM64)
- #ifdef _MSC_VER
- #if _MSC_VER >= 1910 && _MSC_VER >= 1929 && _MSC_FULL_VER >= 192930037
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #elif defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 6) // fix that check
- #define Z7_COMPILER_SHA1_SUPPORTED
- #endif
- #endif
-#endif
-
-void Z7_FASTCALL Sha1_UpdateBlocks(UInt32 state[5], const Byte *data, size_t numBlocks);
-
-#ifdef Z7_COMPILER_SHA1_SUPPORTED
- void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[5], const Byte *data, size_t numBlocks);
-
- static SHA1_FUNC_UPDATE_BLOCKS g_SHA1_FUNC_UPDATE_BLOCKS = Sha1_UpdateBlocks;
- static SHA1_FUNC_UPDATE_BLOCKS g_SHA1_FUNC_UPDATE_BLOCKS_HW;
-
- #define SHA1_UPDATE_BLOCKS(p) p->func_UpdateBlocks
-#else
- #define SHA1_UPDATE_BLOCKS(p) Sha1_UpdateBlocks
-#endif
-
-
-BoolInt Sha1_SetFunction(CSha1 *p, unsigned algo)
-{
- SHA1_FUNC_UPDATE_BLOCKS func = Sha1_UpdateBlocks;
-
- #ifdef Z7_COMPILER_SHA1_SUPPORTED
- if (algo != SHA1_ALGO_SW)
- {
- if (algo == SHA1_ALGO_DEFAULT)
- func = g_SHA1_FUNC_UPDATE_BLOCKS;
- else
- {
- if (algo != SHA1_ALGO_HW)
- return False;
- func = g_SHA1_FUNC_UPDATE_BLOCKS_HW;
- if (!func)
- return False;
- }
- }
- #else
- if (algo > 1)
- return False;
- #endif
-
- p->func_UpdateBlocks = func;
- return True;
-}
-
-
-/* define it for speed optimization */
-// #define Z7_SHA1_UNROLL
-
-// allowed unroll steps: (1, 2, 4, 5, 20)
-
-#undef Z7_SHA1_BIG_W
-#ifdef Z7_SHA1_UNROLL
- #define STEP_PRE 20
- #define STEP_MAIN 20
-#else
- #define Z7_SHA1_BIG_W
- #define STEP_PRE 5
- #define STEP_MAIN 5
-#endif
-
-
-#ifdef Z7_SHA1_BIG_W
- #define kNumW 80
- #define w(i) W[i]
-#else
- #define kNumW 16
- #define w(i) W[(i)&15]
-#endif
-
-#define w0(i) (W[i] = GetBe32(data + (size_t)(i) * 4))
-#define w1(i) (w(i) = rotlFixed(w((size_t)(i)-3) ^ w((size_t)(i)-8) ^ w((size_t)(i)-14) ^ w((size_t)(i)-16), 1))
-
-#define f0(x,y,z) ( 0x5a827999 + (z^(x&(y^z))) )
-#define f1(x,y,z) ( 0x6ed9eba1 + (x^y^z) )
-#define f2(x,y,z) ( 0x8f1bbcdc + ((x&y)|(z&(x|y))) )
-#define f3(x,y,z) ( 0xca62c1d6 + (x^y^z) )
-
-/*
-#define T1(fx, ww) \
- tmp = e + fx(b,c,d) + ww + rotlFixed(a, 5); \
- e = d; \
- d = c; \
- c = rotlFixed(b, 30); \
- b = a; \
- a = tmp; \
-*/
-
-#define T5(a,b,c,d,e, fx, ww) \
- e += fx(b,c,d) + ww + rotlFixed(a, 5); \
- b = rotlFixed(b, 30); \
-
-
-/*
-#define R1(i, fx, wx) \
- T1 ( fx, wx(i)); \
-
-#define R2(i, fx, wx) \
- R1 ( (i) , fx, wx); \
- R1 ( (i) + 1, fx, wx); \
-
-#define R4(i, fx, wx) \
- R2 ( (i) , fx, wx); \
- R2 ( (i) + 2, fx, wx); \
-*/
-
-#define M5(i, fx, wx0, wx1) \
- T5 ( a,b,c,d,e, fx, wx0((i) ) ) \
- T5 ( e,a,b,c,d, fx, wx1((i)+1) ) \
- T5 ( d,e,a,b,c, fx, wx1((i)+2) ) \
- T5 ( c,d,e,a,b, fx, wx1((i)+3) ) \
- T5 ( b,c,d,e,a, fx, wx1((i)+4) ) \
-
-#define R5(i, fx, wx) \
- M5 ( i, fx, wx, wx) \
-
-
-#if STEP_PRE > 5
-
- #define R20_START \
- R5 ( 0, f0, w0) \
- R5 ( 5, f0, w0) \
- R5 ( 10, f0, w0) \
- M5 ( 15, f0, w0, w1) \
-
- #elif STEP_PRE == 5
-
- #define R20_START \
- { size_t i; for (i = 0; i < 15; i += STEP_PRE) \
- { R5(i, f0, w0) } } \
- M5 ( 15, f0, w0, w1) \
-
-#else
-
- #if STEP_PRE == 1
- #define R_PRE R1
- #elif STEP_PRE == 2
- #define R_PRE R2
- #elif STEP_PRE == 4
- #define R_PRE R4
- #endif
-
- #define R20_START \
- { size_t i; for (i = 0; i < 16; i += STEP_PRE) \
- { R_PRE(i, f0, w0) } } \
- R4 ( 16, f0, w1) \
-
-#endif
-
-
-
-#if STEP_MAIN > 5
-
- #define R20(ii, fx) \
- R5 ( (ii) , fx, w1) \
- R5 ( (ii) + 5 , fx, w1) \
- R5 ( (ii) + 10, fx, w1) \
- R5 ( (ii) + 15, fx, w1) \
-
-#else
-
- #if STEP_MAIN == 1
- #define R_MAIN R1
- #elif STEP_MAIN == 2
- #define R_MAIN R2
- #elif STEP_MAIN == 4
- #define R_MAIN R4
- #elif STEP_MAIN == 5
- #define R_MAIN R5
- #endif
-
- #define R20(ii, fx) \
- { size_t i; for (i = (ii); i < (ii) + 20; i += STEP_MAIN) \
- { R_MAIN(i, fx, w1) } } \
-
-#endif
-
-
-
-void Sha1_InitState(CSha1 *p)
-{
- p->count = 0;
- p->state[0] = 0x67452301;
- p->state[1] = 0xEFCDAB89;
- p->state[2] = 0x98BADCFE;
- p->state[3] = 0x10325476;
- p->state[4] = 0xC3D2E1F0;
-}
-
-void Sha1_Init(CSha1 *p)
-{
- p->func_UpdateBlocks =
- #ifdef Z7_COMPILER_SHA1_SUPPORTED
- g_SHA1_FUNC_UPDATE_BLOCKS;
- #else
- NULL;
- #endif
- Sha1_InitState(p);
-}
-
-
-Z7_NO_INLINE
-void Z7_FASTCALL Sha1_UpdateBlocks(UInt32 state[5], const Byte *data, size_t numBlocks)
-{
- UInt32 a, b, c, d, e;
- UInt32 W[kNumW];
- // if (numBlocks != 0x1264378347) return;
- if (numBlocks == 0)
- return;
-
- a = state[0];
- b = state[1];
- c = state[2];
- d = state[3];
- e = state[4];
-
- do
- {
- #if STEP_PRE < 5 || STEP_MAIN < 5
- UInt32 tmp;
- #endif
-
- R20_START
- R20(20, f1)
- R20(40, f2)
- R20(60, f3)
-
- a += state[0];
- b += state[1];
- c += state[2];
- d += state[3];
- e += state[4];
-
- state[0] = a;
- state[1] = b;
- state[2] = c;
- state[3] = d;
- state[4] = e;
-
- data += 64;
- }
- while (--numBlocks);
-}
-
-
-#define Sha1_UpdateBlock(p) SHA1_UPDATE_BLOCKS(p)(p->state, p->buffer, 1)
-
-void Sha1_Update(CSha1 *p, const Byte *data, size_t size)
-{
- if (size == 0)
- return;
-
- {
- unsigned pos = (unsigned)p->count & 0x3F;
- unsigned num;
-
- p->count += size;
-
- num = 64 - pos;
- if (num > size)
- {
- memcpy(p->buffer + pos, data, size);
- return;
- }
-
- if (pos != 0)
- {
- size -= num;
- memcpy(p->buffer + pos, data, num);
- data += num;
- Sha1_UpdateBlock(p);
- }
- }
- {
- size_t numBlocks = size >> 6;
- SHA1_UPDATE_BLOCKS(p)(p->state, data, numBlocks);
- size &= 0x3F;
- if (size == 0)
- return;
- data += (numBlocks << 6);
- memcpy(p->buffer, data, size);
- }
-}
-
-
-void Sha1_Final(CSha1 *p, Byte *digest)
-{
- unsigned pos = (unsigned)p->count & 0x3F;
-
-
- p->buffer[pos++] = 0x80;
-
- if (pos > (64 - 8))
- {
- while (pos != 64) { p->buffer[pos++] = 0; }
- // memset(&p->buf.buffer[pos], 0, 64 - pos);
- Sha1_UpdateBlock(p);
- pos = 0;
- }
-
- /*
- if (pos & 3)
- {
- p->buffer[pos] = 0;
- p->buffer[pos + 1] = 0;
- p->buffer[pos + 2] = 0;
- pos += 3;
- pos &= ~3;
- }
- {
- for (; pos < 64 - 8; pos += 4)
- *(UInt32 *)(&p->buffer[pos]) = 0;
- }
- */
-
- memset(&p->buffer[pos], 0, (64 - 8) - pos);
-
- {
- const UInt64 numBits = (p->count << 3);
- SetBe32(p->buffer + 64 - 8, (UInt32)(numBits >> 32))
- SetBe32(p->buffer + 64 - 4, (UInt32)(numBits))
- }
-
- Sha1_UpdateBlock(p);
-
- SetBe32(digest, p->state[0])
- SetBe32(digest + 4, p->state[1])
- SetBe32(digest + 8, p->state[2])
- SetBe32(digest + 12, p->state[3])
- SetBe32(digest + 16, p->state[4])
-
-
-
-
- Sha1_InitState(p);
-}
-
-
-void Sha1_PrepareBlock(const CSha1 *p, Byte *block, unsigned size)
-{
- const UInt64 numBits = (p->count + size) << 3;
- SetBe32(&((UInt32 *)(void *)block)[SHA1_NUM_BLOCK_WORDS - 2], (UInt32)(numBits >> 32))
- SetBe32(&((UInt32 *)(void *)block)[SHA1_NUM_BLOCK_WORDS - 1], (UInt32)(numBits))
- // SetBe32((UInt32 *)(block + size), 0x80000000);
- SetUi32((UInt32 *)(void *)(block + size), 0x80)
- size += 4;
- while (size != (SHA1_NUM_BLOCK_WORDS - 2) * 4)
- {
- *((UInt32 *)(void *)(block + size)) = 0;
- size += 4;
- }
-}
-
-void Sha1_GetBlockDigest(const CSha1 *p, const Byte *data, Byte *destDigest)
-{
- MY_ALIGN (16)
- UInt32 st[SHA1_NUM_DIGEST_WORDS];
-
- st[0] = p->state[0];
- st[1] = p->state[1];
- st[2] = p->state[2];
- st[3] = p->state[3];
- st[4] = p->state[4];
-
- SHA1_UPDATE_BLOCKS(p)(st, data, 1);
-
- SetBe32(destDigest + 0 , st[0])
- SetBe32(destDigest + 1 * 4, st[1])
- SetBe32(destDigest + 2 * 4, st[2])
- SetBe32(destDigest + 3 * 4, st[3])
- SetBe32(destDigest + 4 * 4, st[4])
-}
-
-
-void Sha1Prepare(void)
-{
- #ifdef Z7_COMPILER_SHA1_SUPPORTED
- SHA1_FUNC_UPDATE_BLOCKS f, f_hw;
- f = Sha1_UpdateBlocks;
- f_hw = NULL;
- #ifdef MY_CPU_X86_OR_AMD64
- #ifndef USE_MY_MM
- if (CPU_IsSupported_SHA()
- && CPU_IsSupported_SSSE3()
- // && CPU_IsSupported_SSE41()
- )
- #endif
- #else
- if (CPU_IsSupported_SHA1())
- #endif
- {
- // printf("\n========== HW SHA1 ======== \n");
- #if defined(MY_CPU_ARM_OR_ARM64) && defined(_MSC_VER)
- /* there was bug in MSVC compiler for ARM64 -O2 before version VS2019 16.10 (19.29.30037).
- It generated incorrect SHA-1 code.
- 21.03 : we test sha1-hardware code at runtime initialization */
-
- #pragma message("== SHA1 code: MSC compiler : failure-check code was inserted")
-
- UInt32 state[5] = { 0, 1, 2, 3, 4 } ;
- Byte data[64];
- unsigned i;
- for (i = 0; i < sizeof(data); i += 2)
- {
- data[i ] = (Byte)(i);
- data[i + 1] = (Byte)(i + 1);
- }
-
- Sha1_UpdateBlocks_HW(state, data, sizeof(data) / 64);
-
- if ( state[0] != 0x9acd7297
- || state[1] != 0x4624d898
- || state[2] != 0x0bf079f0
- || state[3] != 0x031e61b3
- || state[4] != 0x8323fe20)
- {
- // printf("\n========== SHA-1 hardware version failure ======== \n");
- }
- else
- #endif
- {
- f = f_hw = Sha1_UpdateBlocks_HW;
- }
- }
- g_SHA1_FUNC_UPDATE_BLOCKS = f;
- g_SHA1_FUNC_UPDATE_BLOCKS_HW = f_hw;
- #endif
-}
-
-#undef kNumW
-#undef w
-#undef w0
-#undef w1
-#undef f0
-#undef f1
-#undef f2
-#undef f3
-#undef T1
-#undef T5
-#undef M5
-#undef R1
-#undef R2
-#undef R4
-#undef R5
-#undef R20_START
-#undef R_PRE
-#undef R_MAIN
-#undef STEP_PRE
-#undef STEP_MAIN
-#undef Z7_SHA1_BIG_W
-#undef Z7_SHA1_UNROLL
-#undef Z7_COMPILER_SHA1_SUPPORTED
diff --git a/3rdparty/7z/src/Sha1.h b/3rdparty/7z/src/Sha1.h
deleted file mode 100644
index fecd9d3107..0000000000
--- a/3rdparty/7z/src/Sha1.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Sha1.h -- SHA-1 Hash
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_SHA1_H
-#define ZIP7_INC_SHA1_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define SHA1_NUM_BLOCK_WORDS 16
-#define SHA1_NUM_DIGEST_WORDS 5
-
-#define SHA1_BLOCK_SIZE (SHA1_NUM_BLOCK_WORDS * 4)
-#define SHA1_DIGEST_SIZE (SHA1_NUM_DIGEST_WORDS * 4)
-
-typedef void (Z7_FASTCALL *SHA1_FUNC_UPDATE_BLOCKS)(UInt32 state[5], const Byte *data, size_t numBlocks);
-
-/*
- if (the system supports different SHA1 code implementations)
- {
- (CSha1::func_UpdateBlocks) will be used
- (CSha1::func_UpdateBlocks) can be set by
- Sha1_Init() - to default (fastest)
- Sha1_SetFunction() - to any algo
- }
- else
- {
- (CSha1::func_UpdateBlocks) is ignored.
- }
-*/
-
-typedef struct
-{
- SHA1_FUNC_UPDATE_BLOCKS func_UpdateBlocks;
- UInt64 count;
- UInt64 _pad_2[2];
- UInt32 state[SHA1_NUM_DIGEST_WORDS];
- UInt32 _pad_3[3];
- Byte buffer[SHA1_BLOCK_SIZE];
-} CSha1;
-
-
-#define SHA1_ALGO_DEFAULT 0
-#define SHA1_ALGO_SW 1
-#define SHA1_ALGO_HW 2
-
-/*
-Sha1_SetFunction()
-return:
- 0 - (algo) value is not supported, and func_UpdateBlocks was not changed
- 1 - func_UpdateBlocks was set according (algo) value.
-*/
-
-BoolInt Sha1_SetFunction(CSha1 *p, unsigned algo);
-
-void Sha1_InitState(CSha1 *p);
-void Sha1_Init(CSha1 *p);
-void Sha1_Update(CSha1 *p, const Byte *data, size_t size);
-void Sha1_Final(CSha1 *p, Byte *digest);
-
-void Sha1_PrepareBlock(const CSha1 *p, Byte *block, unsigned size);
-void Sha1_GetBlockDigest(const CSha1 *p, const Byte *data, Byte *destDigest);
-
-// void Z7_FASTCALL Sha1_UpdateBlocks(UInt32 state[5], const Byte *data, size_t numBlocks);
-
-/*
-call Sha1Prepare() once at program start.
-It prepares all supported implementations, and detects the fastest implementation.
-*/
-
-void Sha1Prepare(void);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Sha1Opt.c b/3rdparty/7z/src/Sha1Opt.c
deleted file mode 100644
index 27796aa47e..0000000000
--- a/3rdparty/7z/src/Sha1Opt.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/* Sha1Opt.c -- SHA-1 optimized code for SHA-1 hardware instructions
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-#include "Compiler.h"
-#include "CpuArch.h"
-
-#if defined(_MSC_VER)
-#if (_MSC_VER < 1900) && (_MSC_VER >= 1200)
-// #define USE_MY_MM
-#endif
-#endif
-
-#ifdef MY_CPU_X86_OR_AMD64
- #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1600) // fix that check
- #define USE_HW_SHA
- #elif defined(Z7_LLVM_CLANG_VERSION) && (Z7_LLVM_CLANG_VERSION >= 30800) \
- || defined(Z7_APPLE_CLANG_VERSION) && (Z7_APPLE_CLANG_VERSION >= 50100) \
- || defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40900)
- #define USE_HW_SHA
- #if !defined(_INTEL_COMPILER)
- // icc defines __GNUC__, but icc doesn't support __attribute__(__target__)
- #if !defined(__SHA__) || !defined(__SSSE3__)
- #define ATTRIB_SHA __attribute__((__target__("sha,ssse3")))
- #endif
- #endif
- #elif defined(_MSC_VER)
- #ifdef USE_MY_MM
- #define USE_VER_MIN 1300
- #else
- #define USE_VER_MIN 1900
- #endif
- #if (_MSC_VER >= USE_VER_MIN)
- #define USE_HW_SHA
- #endif
- #endif
-// #endif // MY_CPU_X86_OR_AMD64
-
-#ifdef USE_HW_SHA
-
-// #pragma message("Sha1 HW")
-
-// sse/sse2/ssse3:
-#include
-// sha*:
-#include
-
-#if defined (__clang__) && defined(_MSC_VER)
- // #if !defined(__SSSE3__)
- // #endif
- #if !defined(__SHA__)
- #include
- #endif
-#else
-
-#ifdef USE_MY_MM
-#include "My_mm.h"
-#endif
-
-#endif
-
-/*
-SHA1 uses:
-SSE2:
- _mm_loadu_si128
- _mm_storeu_si128
- _mm_set_epi32
- _mm_add_epi32
- _mm_shuffle_epi32 / pshufd
- _mm_xor_si128
- _mm_cvtsi128_si32
- _mm_cvtsi32_si128
-SSSE3:
- _mm_shuffle_epi8 / pshufb
-
-SHA:
- _mm_sha1*
-*/
-
-
-#define XOR_SI128(dest, src) dest = _mm_xor_si128(dest, src);
-#define SHUFFLE_EPI8(dest, mask) dest = _mm_shuffle_epi8(dest, mask);
-#define SHUFFLE_EPI32(dest, mask) dest = _mm_shuffle_epi32(dest, mask);
-#ifdef __clang__
-#define SHA1_RNDS4_RET_TYPE_CAST (__m128i)
-#else
-#define SHA1_RNDS4_RET_TYPE_CAST
-#endif
-#define SHA1_RND4(abcd, e0, f) abcd = SHA1_RNDS4_RET_TYPE_CAST _mm_sha1rnds4_epu32(abcd, e0, f);
-#define SHA1_NEXTE(e, m) e = _mm_sha1nexte_epu32(e, m);
-#define ADD_EPI32(dest, src) dest = _mm_add_epi32(dest, src);
-#define SHA1_MSG1(dest, src) dest = _mm_sha1msg1_epu32(dest, src);
-#define SHA1_MSG2(dest, src) dest = _mm_sha1msg2_epu32(dest, src);
-
-
-#define LOAD_SHUFFLE(m, k) \
- m = _mm_loadu_si128((const __m128i *)(const void *)(data + (k) * 16)); \
- SHUFFLE_EPI8(m, mask) \
-
-#define SM1(m0, m1, m2, m3) \
- SHA1_MSG1(m0, m1) \
-
-#define SM2(m0, m1, m2, m3) \
- XOR_SI128(m3, m1) \
- SHA1_MSG2(m3, m2) \
-
-#define SM3(m0, m1, m2, m3) \
- XOR_SI128(m3, m1) \
- SM1(m0, m1, m2, m3) \
- SHA1_MSG2(m3, m2) \
-
-#define NNN(m0, m1, m2, m3)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#define R4(k, e0, e1, m0, m1, m2, m3, OP) \
- e1 = abcd; \
- SHA1_RND4(abcd, e0, (k) / 5) \
- SHA1_NEXTE(e1, m1) \
- OP(m0, m1, m2, m3) \
-
-#define R16(k, mx, OP0, OP1, OP2, OP3) \
- R4 ( (k)*4+0, e0,e1, m0,m1,m2,m3, OP0 ) \
- R4 ( (k)*4+1, e1,e0, m1,m2,m3,m0, OP1 ) \
- R4 ( (k)*4+2, e0,e1, m2,m3,m0,m1, OP2 ) \
- R4 ( (k)*4+3, e1,e0, m3,mx,m1,m2, OP3 ) \
-
-#define PREPARE_STATE \
- SHUFFLE_EPI32 (abcd, 0x1B) \
- SHUFFLE_EPI32 (e0, 0x1B) \
-
-
-
-
-
-void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[5], const Byte *data, size_t numBlocks);
-#ifdef ATTRIB_SHA
-ATTRIB_SHA
-#endif
-void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[5], const Byte *data, size_t numBlocks)
-{
- const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
-
- __m128i abcd, e0;
-
- if (numBlocks == 0)
- return;
-
- abcd = _mm_loadu_si128((const __m128i *) (const void *) &state[0]); // dbca
- e0 = _mm_cvtsi32_si128((int)state[4]); // 000e
-
- PREPARE_STATE
-
- do
- {
- __m128i abcd_save, e2;
- __m128i m0, m1, m2, m3;
- __m128i e1;
-
-
- abcd_save = abcd;
- e2 = e0;
-
- LOAD_SHUFFLE (m0, 0)
- LOAD_SHUFFLE (m1, 1)
- LOAD_SHUFFLE (m2, 2)
- LOAD_SHUFFLE (m3, 3)
-
- ADD_EPI32(e0, m0)
-
- R16 ( 0, m0, SM1, SM3, SM3, SM3 )
- R16 ( 1, m0, SM3, SM3, SM3, SM3 )
- R16 ( 2, m0, SM3, SM3, SM3, SM3 )
- R16 ( 3, m0, SM3, SM3, SM3, SM3 )
- R16 ( 4, e2, SM2, NNN, NNN, NNN )
-
- ADD_EPI32(abcd, abcd_save)
-
- data += 64;
- }
- while (--numBlocks);
-
- PREPARE_STATE
-
- _mm_storeu_si128((__m128i *) (void *) state, abcd);
- *(state+4) = (UInt32)_mm_cvtsi128_si32(e0);
-}
-
-#endif // USE_HW_SHA
-
-#elif defined(MY_CPU_ARM_OR_ARM64)
-
- #if defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define USE_HW_SHA
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 6) // fix that check
- #define USE_HW_SHA
- #endif
- #elif defined(_MSC_VER)
- #if _MSC_VER >= 1910
- #define USE_HW_SHA
- #endif
- #endif
-
-#ifdef USE_HW_SHA
-
-// #pragma message("=== Sha1 HW === ")
-
-#if defined(__clang__) || defined(__GNUC__)
- #ifdef MY_CPU_ARM64
- #define ATTRIB_SHA __attribute__((__target__("+crypto")))
- #else
- #define ATTRIB_SHA __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
- #endif
-#else
- // _MSC_VER
- // for arm32
- #define _ARM_USE_NEW_NEON_INTRINSICS
-#endif
-
-#if defined(_MSC_VER) && defined(MY_CPU_ARM64)
-#include
-#else
-#include
-#endif
-
-typedef uint32x4_t v128;
-// typedef __n128 v128; // MSVC
-
-#ifdef MY_CPU_BE
- #define MY_rev32_for_LE(x)
-#else
- #define MY_rev32_for_LE(x) x = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(x)))
-#endif
-
-#define LOAD_128(_p) (*(const v128 *)(const void *)(_p))
-#define STORE_128(_p, _v) *(v128 *)(void *)(_p) = (_v)
-
-#define LOAD_SHUFFLE(m, k) \
- m = LOAD_128((data + (k) * 16)); \
- MY_rev32_for_LE(m); \
-
-#define SU0(dest, src2, src3) dest = vsha1su0q_u32(dest, src2, src3);
-#define SU1(dest, src) dest = vsha1su1q_u32(dest, src);
-#define C(e) abcd = vsha1cq_u32(abcd, e, t);
-#define P(e) abcd = vsha1pq_u32(abcd, e, t);
-#define M(e) abcd = vsha1mq_u32(abcd, e, t);
-#define H(e) e = vsha1h_u32(vgetq_lane_u32(abcd, 0))
-#define T(m, c) t = vaddq_u32(m, c)
-
-void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
-#ifdef ATTRIB_SHA
-ATTRIB_SHA
-#endif
-void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
-{
- v128 abcd;
- v128 c0, c1, c2, c3;
- uint32_t e0;
-
- if (numBlocks == 0)
- return;
-
- c0 = vdupq_n_u32(0x5a827999);
- c1 = vdupq_n_u32(0x6ed9eba1);
- c2 = vdupq_n_u32(0x8f1bbcdc);
- c3 = vdupq_n_u32(0xca62c1d6);
-
- abcd = LOAD_128(&state[0]);
- e0 = state[4];
-
- do
- {
- v128 abcd_save;
- v128 m0, m1, m2, m3;
- v128 t;
- uint32_t e0_save, e1;
-
- abcd_save = abcd;
- e0_save = e0;
-
- LOAD_SHUFFLE (m0, 0)
- LOAD_SHUFFLE (m1, 1)
- LOAD_SHUFFLE (m2, 2)
- LOAD_SHUFFLE (m3, 3)
-
- T(m0, c0); H(e1); C(e0);
- T(m1, c0); SU0(m0, m1, m2); H(e0); C(e1);
- T(m2, c0); SU0(m1, m2, m3); SU1(m0, m3); H(e1); C(e0);
- T(m3, c0); SU0(m2, m3, m0); SU1(m1, m0); H(e0); C(e1);
- T(m0, c0); SU0(m3, m0, m1); SU1(m2, m1); H(e1); C(e0);
- T(m1, c1); SU0(m0, m1, m2); SU1(m3, m2); H(e0); P(e1);
- T(m2, c1); SU0(m1, m2, m3); SU1(m0, m3); H(e1); P(e0);
- T(m3, c1); SU0(m2, m3, m0); SU1(m1, m0); H(e0); P(e1);
- T(m0, c1); SU0(m3, m0, m1); SU1(m2, m1); H(e1); P(e0);
- T(m1, c1); SU0(m0, m1, m2); SU1(m3, m2); H(e0); P(e1);
- T(m2, c2); SU0(m1, m2, m3); SU1(m0, m3); H(e1); M(e0);
- T(m3, c2); SU0(m2, m3, m0); SU1(m1, m0); H(e0); M(e1);
- T(m0, c2); SU0(m3, m0, m1); SU1(m2, m1); H(e1); M(e0);
- T(m1, c2); SU0(m0, m1, m2); SU1(m3, m2); H(e0); M(e1);
- T(m2, c2); SU0(m1, m2, m3); SU1(m0, m3); H(e1); M(e0);
- T(m3, c3); SU0(m2, m3, m0); SU1(m1, m0); H(e0); P(e1);
- T(m0, c3); SU0(m3, m0, m1); SU1(m2, m1); H(e1); P(e0);
- T(m1, c3); SU1(m3, m2); H(e0); P(e1);
- T(m2, c3); H(e1); P(e0);
- T(m3, c3); H(e0); P(e1);
-
- abcd = vaddq_u32(abcd, abcd_save);
- e0 += e0_save;
-
- data += 64;
- }
- while (--numBlocks);
-
- STORE_128(&state[0], abcd);
- state[4] = e0;
-}
-
-#endif // USE_HW_SHA
-
-#endif // MY_CPU_ARM_OR_ARM64
-
-
-#ifndef USE_HW_SHA
-
-// #error Stop_Compiling_UNSUPPORTED_SHA
-// #include
-
-// #include "Sha1.h"
-void Z7_FASTCALL Sha1_UpdateBlocks(UInt32 state[5], const Byte *data, size_t numBlocks);
-
-#pragma message("Sha1 HW-SW stub was used")
-
-void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[5], const Byte *data, size_t numBlocks);
-void Z7_FASTCALL Sha1_UpdateBlocks_HW(UInt32 state[5], const Byte *data, size_t numBlocks)
-{
- Sha1_UpdateBlocks(state, data, numBlocks);
- /*
- UNUSED_VAR(state);
- UNUSED_VAR(data);
- UNUSED_VAR(numBlocks);
- exit(1);
- return;
- */
-}
-
-#endif
-
-#undef SU0
-#undef SU1
-#undef C
-#undef P
-#undef M
-#undef H
-#undef T
-#undef MY_rev32_for_LE
-#undef NNN
-#undef LOAD_128
-#undef STORE_128
-#undef LOAD_SHUFFLE
-#undef SM1
-#undef SM2
-#undef SM3
-#undef NNN
-#undef R4
-#undef R16
-#undef PREPARE_STATE
-#undef USE_HW_SHA
-#undef ATTRIB_SHA
-#undef USE_VER_MIN
diff --git a/3rdparty/7z/src/Sha256.c b/3rdparty/7z/src/Sha256.c
deleted file mode 100644
index 538ccaa996..0000000000
--- a/3rdparty/7z/src/Sha256.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/* Sha256.c -- SHA-256 Hash
-2023-04-02 : Igor Pavlov : Public domain
-This code is based on public domain code from Wei Dai's Crypto++ library. */
-
-#include "Precomp.h"
-
-#include
-
-#include "CpuArch.h"
-#include "RotateDefs.h"
-#include "Sha256.h"
-
-#if defined(_MSC_VER) && (_MSC_VER < 1900)
-// #define USE_MY_MM
-#endif
-
-#ifdef MY_CPU_X86_OR_AMD64
- #ifdef _MSC_VER
- #if _MSC_VER >= 1200
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #elif defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 8) // fix that check
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #elif defined(__INTEL_COMPILER)
- #if (__INTEL_COMPILER >= 1800) // fix that check
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #endif
-#elif defined(MY_CPU_ARM_OR_ARM64)
- #ifdef _MSC_VER
- #if _MSC_VER >= 1910
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #elif defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 6) // fix that check
- #define Z7_COMPILER_SHA256_SUPPORTED
- #endif
- #endif
-#endif
-
-void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
-
-#ifdef Z7_COMPILER_SHA256_SUPPORTED
- void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
-
- static SHA256_FUNC_UPDATE_BLOCKS g_SHA256_FUNC_UPDATE_BLOCKS = Sha256_UpdateBlocks;
- static SHA256_FUNC_UPDATE_BLOCKS g_SHA256_FUNC_UPDATE_BLOCKS_HW;
-
- #define SHA256_UPDATE_BLOCKS(p) p->func_UpdateBlocks
-#else
- #define SHA256_UPDATE_BLOCKS(p) Sha256_UpdateBlocks
-#endif
-
-
-BoolInt Sha256_SetFunction(CSha256 *p, unsigned algo)
-{
- SHA256_FUNC_UPDATE_BLOCKS func = Sha256_UpdateBlocks;
-
- #ifdef Z7_COMPILER_SHA256_SUPPORTED
- if (algo != SHA256_ALGO_SW)
- {
- if (algo == SHA256_ALGO_DEFAULT)
- func = g_SHA256_FUNC_UPDATE_BLOCKS;
- else
- {
- if (algo != SHA256_ALGO_HW)
- return False;
- func = g_SHA256_FUNC_UPDATE_BLOCKS_HW;
- if (!func)
- return False;
- }
- }
- #else
- if (algo > 1)
- return False;
- #endif
-
- p->func_UpdateBlocks = func;
- return True;
-}
-
-
-/* define it for speed optimization */
-
-#ifdef Z7_SFX
- #define STEP_PRE 1
- #define STEP_MAIN 1
-#else
- #define STEP_PRE 2
- #define STEP_MAIN 4
- // #define Z7_SHA256_UNROLL
-#endif
-
-#undef Z7_SHA256_BIG_W
-#if STEP_MAIN != 16
- #define Z7_SHA256_BIG_W
-#endif
-
-
-
-
-void Sha256_InitState(CSha256 *p)
-{
- p->count = 0;
- p->state[0] = 0x6a09e667;
- p->state[1] = 0xbb67ae85;
- p->state[2] = 0x3c6ef372;
- p->state[3] = 0xa54ff53a;
- p->state[4] = 0x510e527f;
- p->state[5] = 0x9b05688c;
- p->state[6] = 0x1f83d9ab;
- p->state[7] = 0x5be0cd19;
-}
-
-void Sha256_Init(CSha256 *p)
-{
- p->func_UpdateBlocks =
- #ifdef Z7_COMPILER_SHA256_SUPPORTED
- g_SHA256_FUNC_UPDATE_BLOCKS;
- #else
- NULL;
- #endif
- Sha256_InitState(p);
-}
-
-#define S0(x) (rotrFixed(x, 2) ^ rotrFixed(x,13) ^ rotrFixed(x, 22))
-#define S1(x) (rotrFixed(x, 6) ^ rotrFixed(x,11) ^ rotrFixed(x, 25))
-#define s0(x) (rotrFixed(x, 7) ^ rotrFixed(x,18) ^ (x >> 3))
-#define s1(x) (rotrFixed(x,17) ^ rotrFixed(x,19) ^ (x >> 10))
-
-#define Ch(x,y,z) (z^(x&(y^z)))
-#define Maj(x,y,z) ((x&y)|(z&(x|y)))
-
-
-#define W_PRE(i) (W[(i) + (size_t)(j)] = GetBe32(data + ((size_t)(j) + i) * 4))
-
-#define blk2_main(j, i) s1(w(j, (i)-2)) + w(j, (i)-7) + s0(w(j, (i)-15))
-
-#ifdef Z7_SHA256_BIG_W
- // we use +i instead of +(i) to change the order to solve CLANG compiler warning for signed/unsigned.
- #define w(j, i) W[(size_t)(j) + i]
- #define blk2(j, i) (w(j, i) = w(j, (i)-16) + blk2_main(j, i))
-#else
- #if STEP_MAIN == 16
- #define w(j, i) W[(i) & 15]
- #else
- #define w(j, i) W[((size_t)(j) + (i)) & 15]
- #endif
- #define blk2(j, i) (w(j, i) += blk2_main(j, i))
-#endif
-
-#define W_MAIN(i) blk2(j, i)
-
-
-#define T1(wx, i) \
- tmp = h + S1(e) + Ch(e,f,g) + K[(i)+(size_t)(j)] + wx(i); \
- h = g; \
- g = f; \
- f = e; \
- e = d + tmp; \
- tmp += S0(a) + Maj(a, b, c); \
- d = c; \
- c = b; \
- b = a; \
- a = tmp; \
-
-#define R1_PRE(i) T1( W_PRE, i)
-#define R1_MAIN(i) T1( W_MAIN, i)
-
-#if (!defined(Z7_SHA256_UNROLL) || STEP_MAIN < 8) && (STEP_MAIN >= 4)
-#define R2_MAIN(i) \
- R1_MAIN(i) \
- R1_MAIN(i + 1) \
-
-#endif
-
-
-
-#if defined(Z7_SHA256_UNROLL) && STEP_MAIN >= 8
-
-#define T4( a,b,c,d,e,f,g,h, wx, i) \
- h += S1(e) + Ch(e,f,g) + K[(i)+(size_t)(j)] + wx(i); \
- tmp = h; \
- h += d; \
- d = tmp + S0(a) + Maj(a, b, c); \
-
-#define R4( wx, i) \
- T4 ( a,b,c,d,e,f,g,h, wx, (i )); \
- T4 ( d,a,b,c,h,e,f,g, wx, (i+1)); \
- T4 ( c,d,a,b,g,h,e,f, wx, (i+2)); \
- T4 ( b,c,d,a,f,g,h,e, wx, (i+3)); \
-
-#define R4_PRE(i) R4( W_PRE, i)
-#define R4_MAIN(i) R4( W_MAIN, i)
-
-
-#define T8( a,b,c,d,e,f,g,h, wx, i) \
- h += S1(e) + Ch(e,f,g) + K[(i)+(size_t)(j)] + wx(i); \
- d += h; \
- h += S0(a) + Maj(a, b, c); \
-
-#define R8( wx, i) \
- T8 ( a,b,c,d,e,f,g,h, wx, i ); \
- T8 ( h,a,b,c,d,e,f,g, wx, i+1); \
- T8 ( g,h,a,b,c,d,e,f, wx, i+2); \
- T8 ( f,g,h,a,b,c,d,e, wx, i+3); \
- T8 ( e,f,g,h,a,b,c,d, wx, i+4); \
- T8 ( d,e,f,g,h,a,b,c, wx, i+5); \
- T8 ( c,d,e,f,g,h,a,b, wx, i+6); \
- T8 ( b,c,d,e,f,g,h,a, wx, i+7); \
-
-#define R8_PRE(i) R8( W_PRE, i)
-#define R8_MAIN(i) R8( W_MAIN, i)
-
-#endif
-
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
-
-// static
-extern MY_ALIGN(64)
-const UInt32 SHA256_K_ARRAY[64];
-
-MY_ALIGN(64)
-const UInt32 SHA256_K_ARRAY[64] = {
- 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
- 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
- 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
- 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
- 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
- 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
- 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
- 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
- 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
- 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
- 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
- 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
- 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
- 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
- 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
- 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
-};
-
-#define K SHA256_K_ARRAY
-
-
-Z7_NO_INLINE
-void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks)
-{
- UInt32 W
- #ifdef Z7_SHA256_BIG_W
- [64];
- #else
- [16];
- #endif
-
- unsigned j;
-
- UInt32 a,b,c,d,e,f,g,h;
-
- #if !defined(Z7_SHA256_UNROLL) || (STEP_MAIN <= 4) || (STEP_PRE <= 4)
- UInt32 tmp;
- #endif
-
- a = state[0];
- b = state[1];
- c = state[2];
- d = state[3];
- e = state[4];
- f = state[5];
- g = state[6];
- h = state[7];
-
- while (numBlocks)
- {
-
- for (j = 0; j < 16; j += STEP_PRE)
- {
- #if STEP_PRE > 4
-
- #if STEP_PRE < 8
- R4_PRE(0);
- #else
- R8_PRE(0);
- #if STEP_PRE == 16
- R8_PRE(8);
- #endif
- #endif
-
- #else
-
- R1_PRE(0)
- #if STEP_PRE >= 2
- R1_PRE(1)
- #if STEP_PRE >= 4
- R1_PRE(2)
- R1_PRE(3)
- #endif
- #endif
-
- #endif
- }
-
- for (j = 16; j < 64; j += STEP_MAIN)
- {
- #if defined(Z7_SHA256_UNROLL) && STEP_MAIN >= 8
-
- #if STEP_MAIN < 8
- R4_MAIN(0)
- #else
- R8_MAIN(0)
- #if STEP_MAIN == 16
- R8_MAIN(8)
- #endif
- #endif
-
- #else
-
- R1_MAIN(0)
- #if STEP_MAIN >= 2
- R1_MAIN(1)
- #if STEP_MAIN >= 4
- R2_MAIN(2)
- #if STEP_MAIN >= 8
- R2_MAIN(4)
- R2_MAIN(6)
- #if STEP_MAIN >= 16
- R2_MAIN(8)
- R2_MAIN(10)
- R2_MAIN(12)
- R2_MAIN(14)
- #endif
- #endif
- #endif
- #endif
- #endif
- }
-
- a += state[0]; state[0] = a;
- b += state[1]; state[1] = b;
- c += state[2]; state[2] = c;
- d += state[3]; state[3] = d;
- e += state[4]; state[4] = e;
- f += state[5]; state[5] = f;
- g += state[6]; state[6] = g;
- h += state[7]; state[7] = h;
-
- data += 64;
- numBlocks--;
- }
-
- /* Wipe variables */
- /* memset(W, 0, sizeof(W)); */
-}
-
-#undef S0
-#undef S1
-#undef s0
-#undef s1
-#undef K
-
-#define Sha256_UpdateBlock(p) SHA256_UPDATE_BLOCKS(p)(p->state, p->buffer, 1)
-
-void Sha256_Update(CSha256 *p, const Byte *data, size_t size)
-{
- if (size == 0)
- return;
-
- {
- unsigned pos = (unsigned)p->count & 0x3F;
- unsigned num;
-
- p->count += size;
-
- num = 64 - pos;
- if (num > size)
- {
- memcpy(p->buffer + pos, data, size);
- return;
- }
-
- if (pos != 0)
- {
- size -= num;
- memcpy(p->buffer + pos, data, num);
- data += num;
- Sha256_UpdateBlock(p);
- }
- }
- {
- size_t numBlocks = size >> 6;
- SHA256_UPDATE_BLOCKS(p)(p->state, data, numBlocks);
- size &= 0x3F;
- if (size == 0)
- return;
- data += (numBlocks << 6);
- memcpy(p->buffer, data, size);
- }
-}
-
-
-void Sha256_Final(CSha256 *p, Byte *digest)
-{
- unsigned pos = (unsigned)p->count & 0x3F;
- unsigned i;
-
- p->buffer[pos++] = 0x80;
-
- if (pos > (64 - 8))
- {
- while (pos != 64) { p->buffer[pos++] = 0; }
- // memset(&p->buf.buffer[pos], 0, 64 - pos);
- Sha256_UpdateBlock(p);
- pos = 0;
- }
-
- /*
- if (pos & 3)
- {
- p->buffer[pos] = 0;
- p->buffer[pos + 1] = 0;
- p->buffer[pos + 2] = 0;
- pos += 3;
- pos &= ~3;
- }
- {
- for (; pos < 64 - 8; pos += 4)
- *(UInt32 *)(&p->buffer[pos]) = 0;
- }
- */
-
- memset(&p->buffer[pos], 0, (64 - 8) - pos);
-
- {
- UInt64 numBits = (p->count << 3);
- SetBe32(p->buffer + 64 - 8, (UInt32)(numBits >> 32))
- SetBe32(p->buffer + 64 - 4, (UInt32)(numBits))
- }
-
- Sha256_UpdateBlock(p);
-
- for (i = 0; i < 8; i += 2)
- {
- UInt32 v0 = p->state[i];
- UInt32 v1 = p->state[(size_t)i + 1];
- SetBe32(digest , v0)
- SetBe32(digest + 4, v1)
- digest += 8;
- }
-
- Sha256_InitState(p);
-}
-
-
-void Sha256Prepare(void)
-{
- #ifdef Z7_COMPILER_SHA256_SUPPORTED
- SHA256_FUNC_UPDATE_BLOCKS f, f_hw;
- f = Sha256_UpdateBlocks;
- f_hw = NULL;
- #ifdef MY_CPU_X86_OR_AMD64
- #ifndef USE_MY_MM
- if (CPU_IsSupported_SHA()
- && CPU_IsSupported_SSSE3()
- // && CPU_IsSupported_SSE41()
- )
- #endif
- #else
- if (CPU_IsSupported_SHA2())
- #endif
- {
- // printf("\n========== HW SHA256 ======== \n");
- f = f_hw = Sha256_UpdateBlocks_HW;
- }
- g_SHA256_FUNC_UPDATE_BLOCKS = f;
- g_SHA256_FUNC_UPDATE_BLOCKS_HW = f_hw;
- #endif
-}
-
-#undef S0
-#undef S1
-#undef s0
-#undef s1
-#undef Ch
-#undef Maj
-#undef W_MAIN
-#undef W_PRE
-#undef w
-#undef blk2_main
-#undef blk2
-#undef T1
-#undef T4
-#undef T8
-#undef R1_PRE
-#undef R1_MAIN
-#undef R2_MAIN
-#undef R4
-#undef R4_PRE
-#undef R4_MAIN
-#undef R8
-#undef R8_PRE
-#undef R8_MAIN
-#undef STEP_PRE
-#undef STEP_MAIN
-#undef Z7_SHA256_BIG_W
-#undef Z7_SHA256_UNROLL
-#undef Z7_COMPILER_SHA256_SUPPORTED
diff --git a/3rdparty/7z/src/Sha256.h b/3rdparty/7z/src/Sha256.h
deleted file mode 100644
index af8c9bc8a8..0000000000
--- a/3rdparty/7z/src/Sha256.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Sha256.h -- SHA-256 Hash
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_SHA256_H
-#define ZIP7_INC_SHA256_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#define SHA256_NUM_BLOCK_WORDS 16
-#define SHA256_NUM_DIGEST_WORDS 8
-
-#define SHA256_BLOCK_SIZE (SHA256_NUM_BLOCK_WORDS * 4)
-#define SHA256_DIGEST_SIZE (SHA256_NUM_DIGEST_WORDS * 4)
-
-typedef void (Z7_FASTCALL *SHA256_FUNC_UPDATE_BLOCKS)(UInt32 state[8], const Byte *data, size_t numBlocks);
-
-/*
- if (the system supports different SHA256 code implementations)
- {
- (CSha256::func_UpdateBlocks) will be used
- (CSha256::func_UpdateBlocks) can be set by
- Sha256_Init() - to default (fastest)
- Sha256_SetFunction() - to any algo
- }
- else
- {
- (CSha256::func_UpdateBlocks) is ignored.
- }
-*/
-
-typedef struct
-{
- SHA256_FUNC_UPDATE_BLOCKS func_UpdateBlocks;
- UInt64 count;
- UInt64 _pad_2[2];
- UInt32 state[SHA256_NUM_DIGEST_WORDS];
-
- Byte buffer[SHA256_BLOCK_SIZE];
-} CSha256;
-
-
-#define SHA256_ALGO_DEFAULT 0
-#define SHA256_ALGO_SW 1
-#define SHA256_ALGO_HW 2
-
-/*
-Sha256_SetFunction()
-return:
- 0 - (algo) value is not supported, and func_UpdateBlocks was not changed
- 1 - func_UpdateBlocks was set according (algo) value.
-*/
-
-BoolInt Sha256_SetFunction(CSha256 *p, unsigned algo);
-
-void Sha256_InitState(CSha256 *p);
-void Sha256_Init(CSha256 *p);
-void Sha256_Update(CSha256 *p, const Byte *data, size_t size);
-void Sha256_Final(CSha256 *p, Byte *digest);
-
-
-
-
-// void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
-
-/*
-call Sha256Prepare() once at program start.
-It prepares all supported implementations, and detects the fastest implementation.
-*/
-
-void Sha256Prepare(void);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Sha256Opt.c b/3rdparty/7z/src/Sha256Opt.c
deleted file mode 100644
index 4fccb336f1..0000000000
--- a/3rdparty/7z/src/Sha256Opt.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/* Sha256Opt.c -- SHA-256 optimized code for SHA-256 hardware instructions
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-#include "Compiler.h"
-#include "CpuArch.h"
-
-#if defined(_MSC_VER)
-#if (_MSC_VER < 1900) && (_MSC_VER >= 1200)
-// #define USE_MY_MM
-#endif
-#endif
-
-#ifdef MY_CPU_X86_OR_AMD64
- #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1600) // fix that check
- #define USE_HW_SHA
- #elif defined(Z7_LLVM_CLANG_VERSION) && (Z7_LLVM_CLANG_VERSION >= 30800) \
- || defined(Z7_APPLE_CLANG_VERSION) && (Z7_APPLE_CLANG_VERSION >= 50100) \
- || defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40900)
- #define USE_HW_SHA
- #if !defined(_INTEL_COMPILER)
- // icc defines __GNUC__, but icc doesn't support __attribute__(__target__)
- #if !defined(__SHA__) || !defined(__SSSE3__)
- #define ATTRIB_SHA __attribute__((__target__("sha,ssse3")))
- #endif
- #endif
- #elif defined(_MSC_VER)
- #ifdef USE_MY_MM
- #define USE_VER_MIN 1300
- #else
- #define USE_VER_MIN 1900
- #endif
- #if (_MSC_VER >= USE_VER_MIN)
- #define USE_HW_SHA
- #endif
- #endif
-// #endif // MY_CPU_X86_OR_AMD64
-
-#ifdef USE_HW_SHA
-
-// #pragma message("Sha256 HW")
-
-// sse/sse2/ssse3:
-#include
-// sha*:
-#include
-
-#if defined (__clang__) && defined(_MSC_VER)
- // #if !defined(__SSSE3__)
- // #endif
- #if !defined(__SHA__)
- #include
- #endif
-#else
-
-#ifdef USE_MY_MM
-#include "My_mm.h"
-#endif
-
-#endif
-
-/*
-SHA256 uses:
-SSE2:
- _mm_loadu_si128
- _mm_storeu_si128
- _mm_set_epi32
- _mm_add_epi32
- _mm_shuffle_epi32 / pshufd
-
-
-
-SSSE3:
- _mm_shuffle_epi8 / pshufb
- _mm_alignr_epi8
-SHA:
- _mm_sha256*
-*/
-
-// K array must be aligned for 16-bytes at least.
-// The compiler can look align attribute and selects
-// movdqu - for code without align attribute
-// movdqa - for code with align attribute
-extern
-MY_ALIGN(64)
-const UInt32 SHA256_K_ARRAY[64];
-
-#define K SHA256_K_ARRAY
-
-
-#define ADD_EPI32(dest, src) dest = _mm_add_epi32(dest, src);
-#define SHA256_MSG1(dest, src) dest = _mm_sha256msg1_epu32(dest, src);
-#define SHA25G_MSG2(dest, src) dest = _mm_sha256msg2_epu32(dest, src);
-
-
-#define LOAD_SHUFFLE(m, k) \
- m = _mm_loadu_si128((const __m128i *)(const void *)(data + (k) * 16)); \
- m = _mm_shuffle_epi8(m, mask); \
-
-#define SM1(g0, g1, g2, g3) \
- SHA256_MSG1(g3, g0); \
-
-#define SM2(g0, g1, g2, g3) \
- tmp = _mm_alignr_epi8(g1, g0, 4); \
- ADD_EPI32(g2, tmp) \
- SHA25G_MSG2(g2, g1); \
-
-// #define LS0(k, g0, g1, g2, g3) LOAD_SHUFFLE(g0, k)
-// #define LS1(k, g0, g1, g2, g3) LOAD_SHUFFLE(g1, k+1)
-
-
-#define NNN(g0, g1, g2, g3)
-
-
-#define RND2(t0, t1) \
- t0 = _mm_sha256rnds2_epu32(t0, t1, msg);
-
-#define RND2_0(m, k) \
- msg = _mm_add_epi32(m, *(const __m128i *) (const void *) &K[(k) * 4]); \
- RND2(state0, state1); \
- msg = _mm_shuffle_epi32(msg, 0x0E); \
-
-
-#define RND2_1 \
- RND2(state1, state0); \
-
-
-// We use scheme with 3 rounds ahead for SHA256_MSG1 / 2 rounds ahead for SHA256_MSG2
-
-#define R4(k, g0, g1, g2, g3, OP0, OP1) \
- RND2_0(g0, k) \
- OP0(g0, g1, g2, g3) \
- RND2_1 \
- OP1(g0, g1, g2, g3) \
-
-#define R16(k, OP0, OP1, OP2, OP3, OP4, OP5, OP6, OP7) \
- R4 ( (k)*4+0, m0,m1,m2,m3, OP0, OP1 ) \
- R4 ( (k)*4+1, m1,m2,m3,m0, OP2, OP3 ) \
- R4 ( (k)*4+2, m2,m3,m0,m1, OP4, OP5 ) \
- R4 ( (k)*4+3, m3,m0,m1,m2, OP6, OP7 ) \
-
-#define PREPARE_STATE \
- tmp = _mm_shuffle_epi32(state0, 0x1B); /* abcd */ \
- state0 = _mm_shuffle_epi32(state1, 0x1B); /* efgh */ \
- state1 = state0; \
- state0 = _mm_unpacklo_epi64(state0, tmp); /* cdgh */ \
- state1 = _mm_unpackhi_epi64(state1, tmp); /* abef */ \
-
-
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
-#ifdef ATTRIB_SHA
-ATTRIB_SHA
-#endif
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
-{
- const __m128i mask = _mm_set_epi32(0x0c0d0e0f, 0x08090a0b, 0x04050607, 0x00010203);
- __m128i tmp;
- __m128i state0, state1;
-
- if (numBlocks == 0)
- return;
-
- state0 = _mm_loadu_si128((const __m128i *) (const void *) &state[0]);
- state1 = _mm_loadu_si128((const __m128i *) (const void *) &state[4]);
-
- PREPARE_STATE
-
- do
- {
- __m128i state0_save, state1_save;
- __m128i m0, m1, m2, m3;
- __m128i msg;
- // #define msg tmp
-
- state0_save = state0;
- state1_save = state1;
-
- LOAD_SHUFFLE (m0, 0)
- LOAD_SHUFFLE (m1, 1)
- LOAD_SHUFFLE (m2, 2)
- LOAD_SHUFFLE (m3, 3)
-
-
-
- R16 ( 0, NNN, NNN, SM1, NNN, SM1, SM2, SM1, SM2 )
- R16 ( 1, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 )
- R16 ( 2, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 )
- R16 ( 3, SM1, SM2, NNN, SM2, NNN, NNN, NNN, NNN )
-
- ADD_EPI32(state0, state0_save)
- ADD_EPI32(state1, state1_save)
-
- data += 64;
- }
- while (--numBlocks);
-
- PREPARE_STATE
-
- _mm_storeu_si128((__m128i *) (void *) &state[0], state0);
- _mm_storeu_si128((__m128i *) (void *) &state[4], state1);
-}
-
-#endif // USE_HW_SHA
-
-#elif defined(MY_CPU_ARM_OR_ARM64)
-
- #if defined(__clang__)
- #if (__clang_major__ >= 8) // fix that check
- #define USE_HW_SHA
- #endif
- #elif defined(__GNUC__)
- #if (__GNUC__ >= 6) // fix that check
- #define USE_HW_SHA
- #endif
- #elif defined(_MSC_VER)
- #if _MSC_VER >= 1910
- #define USE_HW_SHA
- #endif
- #endif
-
-#ifdef USE_HW_SHA
-
-// #pragma message("=== Sha256 HW === ")
-
-#if defined(__clang__) || defined(__GNUC__)
- #ifdef MY_CPU_ARM64
- #define ATTRIB_SHA __attribute__((__target__("+crypto")))
- #else
- #define ATTRIB_SHA __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
- #endif
-#else
- // _MSC_VER
- // for arm32
- #define _ARM_USE_NEW_NEON_INTRINSICS
-#endif
-
-#if defined(_MSC_VER) && defined(MY_CPU_ARM64)
-#include
-#else
-#include
-#endif
-
-typedef uint32x4_t v128;
-// typedef __n128 v128; // MSVC
-
-#ifdef MY_CPU_BE
- #define MY_rev32_for_LE(x)
-#else
- #define MY_rev32_for_LE(x) x = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(x)))
-#endif
-
-#define LOAD_128(_p) (*(const v128 *)(const void *)(_p))
-#define STORE_128(_p, _v) *(v128 *)(void *)(_p) = (_v)
-
-#define LOAD_SHUFFLE(m, k) \
- m = LOAD_128((data + (k) * 16)); \
- MY_rev32_for_LE(m); \
-
-// K array must be aligned for 16-bytes at least.
-extern
-MY_ALIGN(64)
-const UInt32 SHA256_K_ARRAY[64];
-
-#define K SHA256_K_ARRAY
-
-
-#define SHA256_SU0(dest, src) dest = vsha256su0q_u32(dest, src);
-#define SHA25G_SU1(dest, src2, src3) dest = vsha256su1q_u32(dest, src2, src3);
-
-#define SM1(g0, g1, g2, g3) SHA256_SU0(g3, g0)
-#define SM2(g0, g1, g2, g3) SHA25G_SU1(g2, g0, g1)
-#define NNN(g0, g1, g2, g3)
-
-
-#define R4(k, g0, g1, g2, g3, OP0, OP1) \
- msg = vaddq_u32(g0, *(const v128 *) (const void *) &K[(k) * 4]); \
- tmp = state0; \
- state0 = vsha256hq_u32( state0, state1, msg ); \
- state1 = vsha256h2q_u32( state1, tmp, msg ); \
- OP0(g0, g1, g2, g3); \
- OP1(g0, g1, g2, g3); \
-
-
-#define R16(k, OP0, OP1, OP2, OP3, OP4, OP5, OP6, OP7) \
- R4 ( (k)*4+0, m0, m1, m2, m3, OP0, OP1 ) \
- R4 ( (k)*4+1, m1, m2, m3, m0, OP2, OP3 ) \
- R4 ( (k)*4+2, m2, m3, m0, m1, OP4, OP5 ) \
- R4 ( (k)*4+3, m3, m0, m1, m2, OP6, OP7 ) \
-
-
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
-#ifdef ATTRIB_SHA
-ATTRIB_SHA
-#endif
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
-{
- v128 state0, state1;
-
- if (numBlocks == 0)
- return;
-
- state0 = LOAD_128(&state[0]);
- state1 = LOAD_128(&state[4]);
-
- do
- {
- v128 state0_save, state1_save;
- v128 m0, m1, m2, m3;
- v128 msg, tmp;
-
- state0_save = state0;
- state1_save = state1;
-
- LOAD_SHUFFLE (m0, 0)
- LOAD_SHUFFLE (m1, 1)
- LOAD_SHUFFLE (m2, 2)
- LOAD_SHUFFLE (m3, 3)
-
- R16 ( 0, NNN, NNN, SM1, NNN, SM1, SM2, SM1, SM2 );
- R16 ( 1, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 );
- R16 ( 2, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 );
- R16 ( 3, SM1, SM2, NNN, SM2, NNN, NNN, NNN, NNN );
-
- state0 = vaddq_u32(state0, state0_save);
- state1 = vaddq_u32(state1, state1_save);
-
- data += 64;
- }
- while (--numBlocks);
-
- STORE_128(&state[0], state0);
- STORE_128(&state[4], state1);
-}
-
-#endif // USE_HW_SHA
-
-#endif // MY_CPU_ARM_OR_ARM64
-
-
-#ifndef USE_HW_SHA
-
-// #error Stop_Compiling_UNSUPPORTED_SHA
-// #include
-
-// #include "Sha256.h"
-void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
-
-#pragma message("Sha256 HW-SW stub was used")
-
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
-void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
-{
- Sha256_UpdateBlocks(state, data, numBlocks);
- /*
- UNUSED_VAR(state);
- UNUSED_VAR(data);
- UNUSED_VAR(numBlocks);
- exit(1);
- return;
- */
-}
-
-#endif
-
-
-
-#undef K
-#undef RND2
-#undef RND2_0
-#undef RND2_1
-
-#undef MY_rev32_for_LE
-#undef NNN
-#undef LOAD_128
-#undef STORE_128
-#undef LOAD_SHUFFLE
-#undef SM1
-#undef SM2
-
-#undef NNN
-#undef R4
-#undef R16
-#undef PREPARE_STATE
-#undef USE_HW_SHA
-#undef ATTRIB_SHA
-#undef USE_VER_MIN
diff --git a/3rdparty/7z/src/Sort.c b/3rdparty/7z/src/Sort.c
deleted file mode 100644
index 73dcbf0596..0000000000
--- a/3rdparty/7z/src/Sort.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Sort.c -- Sort functions
-2014-04-05 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Sort.h"
-
-#define HeapSortDown(p, k, size, temp) \
- { for (;;) { \
- size_t s = (k << 1); \
- if (s > size) break; \
- if (s < size && p[s + 1] > p[s]) s++; \
- if (temp >= p[s]) break; \
- p[k] = p[s]; k = s; \
- } p[k] = temp; }
-
-void HeapSort(UInt32 *p, size_t size)
-{
- if (size <= 1)
- return;
- p--;
- {
- size_t i = size / 2;
- do
- {
- UInt32 temp = p[i];
- size_t k = i;
- HeapSortDown(p, k, size, temp)
- }
- while (--i != 0);
- }
- /*
- do
- {
- size_t k = 1;
- UInt32 temp = p[size];
- p[size--] = p[1];
- HeapSortDown(p, k, size, temp)
- }
- while (size > 1);
- */
- while (size > 3)
- {
- UInt32 temp = p[size];
- size_t k = (p[3] > p[2]) ? 3 : 2;
- p[size--] = p[1];
- p[1] = p[k];
- HeapSortDown(p, k, size, temp)
- }
- {
- UInt32 temp = p[size];
- p[size] = p[1];
- if (size > 2 && p[2] < temp)
- {
- p[1] = p[2];
- p[2] = temp;
- }
- else
- p[1] = temp;
- }
-}
-
-void HeapSort64(UInt64 *p, size_t size)
-{
- if (size <= 1)
- return;
- p--;
- {
- size_t i = size / 2;
- do
- {
- UInt64 temp = p[i];
- size_t k = i;
- HeapSortDown(p, k, size, temp)
- }
- while (--i != 0);
- }
- /*
- do
- {
- size_t k = 1;
- UInt64 temp = p[size];
- p[size--] = p[1];
- HeapSortDown(p, k, size, temp)
- }
- while (size > 1);
- */
- while (size > 3)
- {
- UInt64 temp = p[size];
- size_t k = (p[3] > p[2]) ? 3 : 2;
- p[size--] = p[1];
- p[1] = p[k];
- HeapSortDown(p, k, size, temp)
- }
- {
- UInt64 temp = p[size];
- p[size] = p[1];
- if (size > 2 && p[2] < temp)
- {
- p[1] = p[2];
- p[2] = temp;
- }
- else
- p[1] = temp;
- }
-}
-
-/*
-#define HeapSortRefDown(p, vals, n, size, temp) \
- { size_t k = n; UInt32 val = vals[temp]; for (;;) { \
- size_t s = (k << 1); \
- if (s > size) break; \
- if (s < size && vals[p[s + 1]] > vals[p[s]]) s++; \
- if (val >= vals[p[s]]) break; \
- p[k] = p[s]; k = s; \
- } p[k] = temp; }
-
-void HeapSortRef(UInt32 *p, UInt32 *vals, size_t size)
-{
- if (size <= 1)
- return;
- p--;
- {
- size_t i = size / 2;
- do
- {
- UInt32 temp = p[i];
- HeapSortRefDown(p, vals, i, size, temp);
- }
- while (--i != 0);
- }
- do
- {
- UInt32 temp = p[size];
- p[size--] = p[1];
- HeapSortRefDown(p, vals, 1, size, temp);
- }
- while (size > 1);
-}
-*/
diff --git a/3rdparty/7z/src/Sort.h b/3rdparty/7z/src/Sort.h
deleted file mode 100644
index 1bb2b1e7e6..0000000000
--- a/3rdparty/7z/src/Sort.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Sort.h -- Sort functions
-2023-03-05 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_SORT_H
-#define ZIP7_INC_SORT_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-void HeapSort(UInt32 *p, size_t size);
-void HeapSort64(UInt64 *p, size_t size);
-
-/* void HeapSortRef(UInt32 *p, UInt32 *vals, size_t size); */
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/SwapBytes.c b/3rdparty/7z/src/SwapBytes.c
deleted file mode 100644
index 7901bbaa87..0000000000
--- a/3rdparty/7z/src/SwapBytes.c
+++ /dev/null
@@ -1,800 +0,0 @@
-/* SwapBytes.c -- Byte Swap conversion filter
-2023-04-07 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "Compiler.h"
-#include "CpuArch.h"
-#include "RotateDefs.h"
-#include "SwapBytes.h"
-
-typedef UInt16 CSwapUInt16;
-typedef UInt32 CSwapUInt32;
-
-// #define k_SwapBytes_Mode_BASE 0
-
-#ifdef MY_CPU_X86_OR_AMD64
-
-#define k_SwapBytes_Mode_SSE2 1
-#define k_SwapBytes_Mode_SSSE3 2
-#define k_SwapBytes_Mode_AVX2 3
-
- // #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
- #if defined(__clang__) && (__clang_major__ >= 4) \
- || defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40701)
- #define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_AVX2
- #define SWAP_ATTRIB_SSE2 __attribute__((__target__("sse2")))
- #define SWAP_ATTRIB_SSSE3 __attribute__((__target__("ssse3")))
- #define SWAP_ATTRIB_AVX2 __attribute__((__target__("avx2")))
- #elif defined(_MSC_VER)
- #if (_MSC_VER == 1900)
- #pragma warning(disable : 4752) // found Intel(R) Advanced Vector Extensions; consider using /arch:AVX
- #endif
- #if (_MSC_VER >= 1900)
- #define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_AVX2
- #elif (_MSC_VER >= 1500) // (VS2008)
- #define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_SSSE3
- #elif (_MSC_VER >= 1310) // (VS2003)
- #define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_SSE2
- #endif
- #endif // _MSC_VER
-
-/*
-// for debug
-#ifdef k_SwapBytes_Mode_MAX
-#undef k_SwapBytes_Mode_MAX
-#endif
-*/
-
-#ifndef k_SwapBytes_Mode_MAX
-#define k_SwapBytes_Mode_MAX 0
-#endif
-
-#if (k_SwapBytes_Mode_MAX != 0) && defined(MY_CPU_AMD64)
- #define k_SwapBytes_Mode_MIN k_SwapBytes_Mode_SSE2
-#else
- #define k_SwapBytes_Mode_MIN 0
-#endif
-
-#if (k_SwapBytes_Mode_MAX >= k_SwapBytes_Mode_AVX2)
- #define USE_SWAP_AVX2
-#endif
-#if (k_SwapBytes_Mode_MAX >= k_SwapBytes_Mode_SSSE3)
- #define USE_SWAP_SSSE3
-#endif
-#if (k_SwapBytes_Mode_MAX >= k_SwapBytes_Mode_SSE2)
- #define USE_SWAP_128
-#endif
-
-#if k_SwapBytes_Mode_MAX <= k_SwapBytes_Mode_MIN || !defined(USE_SWAP_128)
-#define FORCE_SWAP_MODE
-#endif
-
-
-#ifdef USE_SWAP_128
-/*
- MMX
- SSE
- SSE2
- SSE3
- SSSE3
- SSE4.1
- SSE4.2
- SSE4A
- AES
- AVX, AVX2, FMA
-*/
-
-#include // sse2
-// typedef __m128i v128;
-
-#define SWAP2_128(i) { \
- const __m128i v = *(const __m128i *)(const void *)(items + (i) * 8); \
- *( __m128i *)( void *)(items + (i) * 8) = \
- _mm_or_si128( \
- _mm_slli_epi16(v, 8), \
- _mm_srli_epi16(v, 8)); }
-// _mm_or_si128() has more ports to execute than _mm_add_epi16().
-
-static
-#ifdef SWAP_ATTRIB_SSE2
-SWAP_ATTRIB_SSE2
-#endif
-void
-Z7_FASTCALL
-SwapBytes2_128(CSwapUInt16 *items, const CSwapUInt16 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
- SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
- }
- while (items != lim);
-}
-
-/*
-// sse2
-#define SWAP4_128_pack(i) { \
- __m128i v = *(const __m128i *)(const void *)(items + (i) * 4); \
- __m128i v0 = _mm_unpacklo_epi8(v, mask); \
- __m128i v1 = _mm_unpackhi_epi8(v, mask); \
- v0 = _mm_shufflelo_epi16(v0, 0x1b); \
- v1 = _mm_shufflelo_epi16(v1, 0x1b); \
- v0 = _mm_shufflehi_epi16(v0, 0x1b); \
- v1 = _mm_shufflehi_epi16(v1, 0x1b); \
- *(__m128i *)(void *)(items + (i) * 4) = _mm_packus_epi16(v0, v1); }
-
-static
-#ifdef SWAP_ATTRIB_SSE2
-SWAP_ATTRIB_SSE2
-#endif
-void
-Z7_FASTCALL
-SwapBytes4_128_pack(CSwapUInt32 *items, const CSwapUInt32 *lim)
-{
- const __m128i mask = _mm_setzero_si128();
- // const __m128i mask = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, 0);
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP4_128_pack(0); items += 1 * 4;
- // SWAP4_128_pack(0); SWAP4_128_pack(1); items += 2 * 4;
- }
- while (items != lim);
-}
-
-// sse2
-#define SWAP4_128_shift(i) { \
- __m128i v = *(const __m128i *)(const void *)(items + (i) * 4); \
- __m128i v2; \
- v2 = _mm_or_si128( \
- _mm_slli_si128(_mm_and_si128(v, mask), 1), \
- _mm_and_si128(_mm_srli_si128(v, 1), mask)); \
- v = _mm_or_si128( \
- _mm_slli_epi32(v, 24), \
- _mm_srli_epi32(v, 24)); \
- *(__m128i *)(void *)(items + (i) * 4) = _mm_or_si128(v2, v); }
-
-static
-#ifdef SWAP_ATTRIB_SSE2
-SWAP_ATTRIB_SSE2
-#endif
-void
-Z7_FASTCALL
-SwapBytes4_128_shift(CSwapUInt32 *items, const CSwapUInt32 *lim)
-{
- #define M1 0xff00
- const __m128i mask = _mm_set_epi32(M1, M1, M1, M1);
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- // SWAP4_128_shift(0) SWAP4_128_shift(1) items += 2 * 4;
- // SWAP4_128_shift(0) SWAP4_128_shift(1) items += 2 * 4;
- SWAP4_128_shift(0); items += 1 * 4;
- }
- while (items != lim);
-}
-*/
-
-
-#if defined(USE_SWAP_SSSE3) || defined(USE_SWAP_AVX2)
-
-#define SWAP_SHUF_REV_SEQ_2_VALS(v) (v)+1, (v)
-#define SWAP_SHUF_REV_SEQ_4_VALS(v) (v)+3, (v)+2, (v)+1, (v)
-
-#define SWAP2_SHUF_MASK_16_BYTES \
- SWAP_SHUF_REV_SEQ_2_VALS (0 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (1 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (2 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (3 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (4 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (5 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (6 * 2), \
- SWAP_SHUF_REV_SEQ_2_VALS (7 * 2)
-
-#define SWAP4_SHUF_MASK_16_BYTES \
- SWAP_SHUF_REV_SEQ_4_VALS (0 * 4), \
- SWAP_SHUF_REV_SEQ_4_VALS (1 * 4), \
- SWAP_SHUF_REV_SEQ_4_VALS (2 * 4), \
- SWAP_SHUF_REV_SEQ_4_VALS (3 * 4)
-
-#if defined(USE_SWAP_AVX2)
-/* if we use 256_BIT_INIT_MASK, each static array mask will be larger for 16 bytes */
-// #define SWAP_USE_256_BIT_INIT_MASK
-#endif
-
-#if defined(SWAP_USE_256_BIT_INIT_MASK) && defined(USE_SWAP_AVX2)
-#define SWAP_MASK_INIT_SIZE 32
-#else
-#define SWAP_MASK_INIT_SIZE 16
-#endif
-
-MY_ALIGN(SWAP_MASK_INIT_SIZE)
-static const Byte k_ShufMask_Swap2[] =
-{
- SWAP2_SHUF_MASK_16_BYTES
- #if SWAP_MASK_INIT_SIZE > 16
- , SWAP2_SHUF_MASK_16_BYTES
- #endif
-};
-
-MY_ALIGN(SWAP_MASK_INIT_SIZE)
-static const Byte k_ShufMask_Swap4[] =
-{
- SWAP4_SHUF_MASK_16_BYTES
- #if SWAP_MASK_INIT_SIZE > 16
- , SWAP4_SHUF_MASK_16_BYTES
- #endif
-};
-
-
-#ifdef USE_SWAP_SSSE3
-
-#include // ssse3
-
-#define SHUF_128(i) *(items + (i)) = \
- _mm_shuffle_epi8(*(items + (i)), mask); // SSSE3
-
-// Z7_NO_INLINE
-static
-#ifdef SWAP_ATTRIB_SSSE3
-SWAP_ATTRIB_SSSE3
-#endif
-Z7_ATTRIB_NO_VECTORIZE
-void
-Z7_FASTCALL
-ShufBytes_128(void *items8, const void *lim8, const void *mask128_ptr)
-{
- __m128i *items = (__m128i *)items8;
- const __m128i *lim = (const __m128i *)lim8;
- // const __m128i mask = _mm_set_epi8(SHUF_SWAP2_MASK_16_VALS);
- // const __m128i mask = _mm_set_epi8(SHUF_SWAP4_MASK_16_VALS);
- // const __m128i mask = _mm_load_si128((const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
- // const __m128i mask = _mm_load_si128((const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
- // const __m128i mask = *(const __m128i *)(const void *)&(k_ShufMask_Swap4[0]);
- const __m128i mask = *(const __m128i *)mask128_ptr;
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SHUF_128(0) SHUF_128(1) items += 2;
- SHUF_128(0) SHUF_128(1) items += 2;
- }
- while (items != lim);
-}
-
-#endif // USE_SWAP_SSSE3
-
-
-
-#ifdef USE_SWAP_AVX2
-
-#include // avx, avx2
-#if defined(__clang__)
-#include
-#include
-#endif
-
-#define SHUF_256(i) *(items + (i)) = \
- _mm256_shuffle_epi8(*(items + (i)), mask); // AVX2
-
-// Z7_NO_INLINE
-static
-#ifdef SWAP_ATTRIB_AVX2
-SWAP_ATTRIB_AVX2
-#endif
-Z7_ATTRIB_NO_VECTORIZE
-void
-Z7_FASTCALL
-ShufBytes_256(void *items8, const void *lim8, const void *mask128_ptr)
-{
- __m256i *items = (__m256i *)items8;
- const __m256i *lim = (const __m256i *)lim8;
- /*
- UNUSED_VAR(mask128_ptr)
- __m256i mask =
- for Swap4: _mm256_setr_epi8(SWAP4_SHUF_MASK_16_BYTES, SWAP4_SHUF_MASK_16_BYTES);
- for Swap2: _mm256_setr_epi8(SWAP2_SHUF_MASK_16_BYTES, SWAP2_SHUF_MASK_16_BYTES);
- */
- const __m256i mask =
- #if SWAP_MASK_INIT_SIZE > 16
- *(const __m256i *)(const void *)mask128_ptr;
- #else
- /* msvc: broadcastsi128() version reserves the stack for no reason
- msvc 19.29-: _mm256_insertf128_si256() / _mm256_set_m128i)) versions use non-avx movdqu xmm0,XMMWORD PTR [r8]
- msvc 19.30+ (VS2022): replaces _mm256_set_m128i(m,m) to vbroadcastf128(m) as we want
- */
- // _mm256_broadcastsi128_si256(*mask128_ptr);
- /*
- #define MY_mm256_set_m128i(hi, lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1)
- MY_mm256_set_m128i
- */
- _mm256_set_m128i(
- *(const __m128i *)mask128_ptr,
- *(const __m128i *)mask128_ptr);
- #endif
-
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SHUF_256(0) SHUF_256(1) items += 2;
- SHUF_256(0) SHUF_256(1) items += 2;
- }
- while (items != lim);
-}
-
-#endif // USE_SWAP_AVX2
-#endif // USE_SWAP_SSSE3 || USE_SWAP_AVX2
-#endif // USE_SWAP_128
-
-
-
-// compile message "NEON intrinsics not available with the soft-float ABI"
-#elif defined(MY_CPU_ARM_OR_ARM64) || \
- (defined(__ARM_ARCH) && (__ARM_ARCH >= 7))
-// #elif defined(MY_CPU_ARM64)
-
- #if defined(__clang__) && (__clang_major__ >= 8) \
- || defined(__GNUC__) && (__GNUC__ >= 8)
- #if (defined(__ARM_ARCH) && (__ARM_ARCH >= 7)) \
- || defined(MY_CPU_ARM64)
- #define USE_SWAP_128
- #endif
- #ifdef MY_CPU_ARM64
- // #define SWAP_ATTRIB_NEON __attribute__((__target__("")))
- #else
- // #define SWAP_ATTRIB_NEON __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
- #endif
- #elif defined(_MSC_VER)
- #if (_MSC_VER >= 1910)
- #define USE_SWAP_128
- #endif
- #endif
-
- #if defined(_MSC_VER) && defined(MY_CPU_ARM64)
- #include
- #else
- #include
- #endif
-
-#ifndef USE_SWAP_128
- #define FORCE_SWAP_MODE
-#else
-
-#ifdef MY_CPU_ARM64
- // for debug : comment it
- #define FORCE_SWAP_MODE
-#else
- #define k_SwapBytes_Mode_NEON 1
-#endif
-// typedef uint8x16_t v128;
-#define SWAP2_128(i) *(uint8x16_t *) (void *)(items + (i) * 8) = \
- vrev16q_u8(*(const uint8x16_t *)(const void *)(items + (i) * 8));
-#define SWAP4_128(i) *(uint8x16_t *) (void *)(items + (i) * 4) = \
- vrev32q_u8(*(const uint8x16_t *)(const void *)(items + (i) * 4));
-
-// Z7_NO_INLINE
-static
-#ifdef SWAP_ATTRIB_NEON
-SWAP_ATTRIB_NEON
-#endif
-Z7_ATTRIB_NO_VECTORIZE
-void
-Z7_FASTCALL
-SwapBytes2_128(CSwapUInt16 *items, const CSwapUInt16 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
- SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
- }
- while (items != lim);
-}
-
-// Z7_NO_INLINE
-static
-#ifdef SWAP_ATTRIB_NEON
-SWAP_ATTRIB_NEON
-#endif
-Z7_ATTRIB_NO_VECTORIZE
-void
-Z7_FASTCALL
-SwapBytes4_128(CSwapUInt32 *items, const CSwapUInt32 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP4_128(0) SWAP4_128(1) items += 2 * 4;
- SWAP4_128(0) SWAP4_128(1) items += 2 * 4;
- }
- while (items != lim);
-}
-
-#endif // USE_SWAP_128
-
-#else // MY_CPU_ARM_OR_ARM64
-#define FORCE_SWAP_MODE
-#endif // MY_CPU_ARM_OR_ARM64
-
-
-
-
-
-
-#if defined(Z7_MSC_VER_ORIGINAL) && defined(MY_CPU_X86)
- /* _byteswap_ushort() in MSVC x86 32-bit works via slow { mov dh, al; mov dl, ah }
- So we use own versions of byteswap function */
- #if (_MSC_VER < 1400 ) // old MSVC-X86 without _rotr16() support
- #define SWAP2_16(i) { UInt32 v = items[i]; v += (v << 16); v >>= 8; items[i] = (CSwapUInt16)v; }
- #else // is new MSVC-X86 with fast _rotr16()
- #include
- #define SWAP2_16(i) { items[i] = _rotr16(items[i], 8); }
- #endif
-#else // is not MSVC-X86
- #define SWAP2_16(i) { CSwapUInt16 v = items[i]; items[i] = Z7_BSWAP16(v); }
-#endif // MSVC-X86
-
-#if defined(Z7_CPU_FAST_BSWAP_SUPPORTED)
- #define SWAP4_32(i) { CSwapUInt32 v = items[i]; items[i] = Z7_BSWAP32(v); }
-#else
- #define SWAP4_32(i) \
- { UInt32 v = items[i]; \
- v = ((v & 0xff00ff) << 8) + ((v >> 8) & 0xff00ff); \
- v = rotlFixed(v, 16); \
- items[i] = v; }
-#endif
-
-
-
-
-#if defined(FORCE_SWAP_MODE) && defined(USE_SWAP_128)
- #define DEFAULT_Swap2 SwapBytes2_128
- #if !defined(MY_CPU_X86_OR_AMD64)
- #define DEFAULT_Swap4 SwapBytes4_128
- #endif
-#endif
-
-#if !defined(DEFAULT_Swap2) || !defined(DEFAULT_Swap4)
-
-#define SWAP_BASE_FUNCS_PREFIXES \
-Z7_FORCE_INLINE \
-static \
-Z7_ATTRIB_NO_VECTOR \
-void Z7_FASTCALL
-
-
-#ifdef MY_CPU_64BIT
-
-#if defined(MY_CPU_ARM64) \
- && defined(__ARM_ARCH) && (__ARM_ARCH >= 8) \
- && ( (defined(__GNUC__) && (__GNUC__ >= 4)) \
- || (defined(__clang__) && (__clang_major__ >= 4)))
-
- #define SWAP2_64_VAR(v) asm ("rev16 %x0,%x0" : "+r" (v));
- #define SWAP4_64_VAR(v) asm ("rev32 %x0,%x0" : "+r" (v));
-
-#else // is not ARM64-GNU
-
-#if !defined(MY_CPU_X86_OR_AMD64) || (k_SwapBytes_Mode_MIN == 0) || !defined(USE_SWAP_128)
- #define SWAP2_64_VAR(v) \
- v = ( 0x00ff00ff00ff00ff & (v >> 8)) \
- + ((0x00ff00ff00ff00ff & v) << 8);
- /* plus gives faster code in MSVC */
-#endif
-
-#ifdef Z7_CPU_FAST_BSWAP_SUPPORTED
- #define SWAP4_64_VAR(v) \
- v = Z7_BSWAP64(v); \
- v = Z7_ROTL64(v, 32);
-#else
- #define SWAP4_64_VAR(v) \
- v = ( 0x000000ff000000ff & (v >> 24)) \
- + ((0x000000ff000000ff & v) << 24 ) \
- + ( 0x0000ff000000ff00 & (v >> 8)) \
- + ((0x0000ff000000ff00 & v) << 8 ) \
- ;
-#endif
-
-#endif // ARM64-GNU
-
-
-#ifdef SWAP2_64_VAR
-
-#define SWAP2_64(i) { \
- UInt64 v = *(const UInt64 *)(const void *)(items + (i) * 4); \
- SWAP2_64_VAR(v) \
- *(UInt64 *)(void *)(items + (i) * 4) = v; }
-
-SWAP_BASE_FUNCS_PREFIXES
-SwapBytes2_64(CSwapUInt16 *items, const CSwapUInt16 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP2_64(0) SWAP2_64(1) items += 2 * 4;
- SWAP2_64(0) SWAP2_64(1) items += 2 * 4;
- }
- while (items != lim);
-}
-
- #define DEFAULT_Swap2 SwapBytes2_64
- #if !defined(FORCE_SWAP_MODE)
- #define SWAP2_DEFAULT_MODE 0
- #endif
-#else // !defined(SWAP2_64_VAR)
- #define DEFAULT_Swap2 SwapBytes2_128
- #if !defined(FORCE_SWAP_MODE)
- #define SWAP2_DEFAULT_MODE 1
- #endif
-#endif // SWAP2_64_VAR
-
-
-#define SWAP4_64(i) { \
- UInt64 v = *(const UInt64 *)(const void *)(items + (i) * 2); \
- SWAP4_64_VAR(v) \
- *(UInt64 *)(void *)(items + (i) * 2) = v; }
-
-SWAP_BASE_FUNCS_PREFIXES
-SwapBytes4_64(CSwapUInt32 *items, const CSwapUInt32 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP4_64(0) SWAP4_64(1) items += 2 * 2;
- SWAP4_64(0) SWAP4_64(1) items += 2 * 2;
- }
- while (items != lim);
-}
-
-#define DEFAULT_Swap4 SwapBytes4_64
-
-#else // is not 64BIT
-
-
-#if defined(MY_CPU_ARM_OR_ARM64) \
- && defined(__ARM_ARCH) && (__ARM_ARCH >= 6) \
- && ( (defined(__GNUC__) && (__GNUC__ >= 4)) \
- || (defined(__clang__) && (__clang_major__ >= 4)))
-
-#ifdef MY_CPU_64BIT
- #define SWAP2_32_VAR(v) asm ("rev16 %w0,%w0" : "+r" (v));
-#else
- #define SWAP2_32_VAR(v) asm ("rev16 %0,%0" : "+r" (v)); // for clang/gcc
- // asm ("rev16 %r0,%r0" : "+r" (a)); // for gcc
-#endif
-
-#elif defined(_MSC_VER) && (_MSC_VER < 1300) && defined(MY_CPU_X86) \
- || !defined(Z7_CPU_FAST_BSWAP_SUPPORTED) \
- || !defined(Z7_CPU_FAST_ROTATE_SUPPORTED)
- // old msvc doesn't support _byteswap_ulong()
- #define SWAP2_32_VAR(v) \
- v = ((v & 0xff00ff) << 8) + ((v >> 8) & 0xff00ff);
-
-#else // is not ARM and is not old-MSVC-X86 and fast BSWAP/ROTATE are supported
- #define SWAP2_32_VAR(v) \
- v = Z7_BSWAP32(v); \
- v = rotlFixed(v, 16);
-
-#endif // GNU-ARM*
-
-#define SWAP2_32(i) { \
- UInt32 v = *(const UInt32 *)(const void *)(items + (i) * 2); \
- SWAP2_32_VAR(v); \
- *(UInt32 *)(void *)(items + (i) * 2) = v; }
-
-
-SWAP_BASE_FUNCS_PREFIXES
-SwapBytes2_32(CSwapUInt16 *items, const CSwapUInt16 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP2_32(0) SWAP2_32(1) items += 2 * 2;
- SWAP2_32(0) SWAP2_32(1) items += 2 * 2;
- }
- while (items != lim);
-}
-
-
-SWAP_BASE_FUNCS_PREFIXES
-SwapBytes4_32(CSwapUInt32 *items, const CSwapUInt32 *lim)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- do
- {
- SWAP4_32(0) SWAP4_32(1) items += 2;
- SWAP4_32(0) SWAP4_32(1) items += 2;
- }
- while (items != lim);
-}
-
-#define DEFAULT_Swap2 SwapBytes2_32
-#define DEFAULT_Swap4 SwapBytes4_32
-#if !defined(FORCE_SWAP_MODE)
- #define SWAP2_DEFAULT_MODE 0
-#endif
-
-#endif // MY_CPU_64BIT
-#endif // if !defined(DEFAULT_Swap2) || !defined(DEFAULT_Swap4)
-
-
-
-#if !defined(FORCE_SWAP_MODE)
-static unsigned g_SwapBytes_Mode;
-#endif
-
-/* size of largest unrolled loop iteration: 128 bytes = 4 * 32 bytes (AVX). */
-#define SWAP_ITERATION_BLOCK_SIZE_MAX (1 << 7)
-
-// 32 bytes for (AVX) or 2 * 16-bytes for NEON.
-#define SWAP_VECTOR_ALIGN_SIZE (1 << 5)
-
-Z7_NO_INLINE
-void z7_SwapBytes2(CSwapUInt16 *items, size_t numItems)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (SWAP_VECTOR_ALIGN_SIZE - 1)) != 0; numItems--)
- {
- SWAP2_16(0)
- items++;
- }
- {
- const size_t k_Align_Mask = SWAP_ITERATION_BLOCK_SIZE_MAX / sizeof(CSwapUInt16) - 1;
- size_t numItems2 = numItems;
- CSwapUInt16 *lim;
- numItems &= k_Align_Mask;
- numItems2 &= ~(size_t)k_Align_Mask;
- lim = items + numItems2;
- if (numItems2 != 0)
- {
- #if !defined(FORCE_SWAP_MODE)
- #ifdef MY_CPU_X86_OR_AMD64
- #ifdef USE_SWAP_AVX2
- if (g_SwapBytes_Mode > k_SwapBytes_Mode_SSSE3)
- ShufBytes_256((__m256i *)(void *)items,
- (const __m256i *)(const void *)lim,
- (const __m128i *)(const void *)&(k_ShufMask_Swap2[0]));
- else
- #endif
- #ifdef USE_SWAP_SSSE3
- if (g_SwapBytes_Mode >= k_SwapBytes_Mode_SSSE3)
- ShufBytes_128((__m128i *)(void *)items,
- (const __m128i *)(const void *)lim,
- (const __m128i *)(const void *)&(k_ShufMask_Swap2[0]));
- else
- #endif
- #endif // MY_CPU_X86_OR_AMD64
- #if SWAP2_DEFAULT_MODE == 0
- if (g_SwapBytes_Mode != 0)
- SwapBytes2_128(items, lim);
- else
- #endif
- #endif // FORCE_SWAP_MODE
- DEFAULT_Swap2(items, lim);
- }
- items = lim;
- }
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (; numItems != 0; numItems--)
- {
- SWAP2_16(0)
- items++;
- }
-}
-
-
-Z7_NO_INLINE
-void z7_SwapBytes4(CSwapUInt32 *items, size_t numItems)
-{
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (SWAP_VECTOR_ALIGN_SIZE - 1)) != 0; numItems--)
- {
- SWAP4_32(0)
- items++;
- }
- {
- const size_t k_Align_Mask = SWAP_ITERATION_BLOCK_SIZE_MAX / sizeof(CSwapUInt32) - 1;
- size_t numItems2 = numItems;
- CSwapUInt32 *lim;
- numItems &= k_Align_Mask;
- numItems2 &= ~(size_t)k_Align_Mask;
- lim = items + numItems2;
- if (numItems2 != 0)
- {
- #if !defined(FORCE_SWAP_MODE)
- #ifdef MY_CPU_X86_OR_AMD64
- #ifdef USE_SWAP_AVX2
- if (g_SwapBytes_Mode > k_SwapBytes_Mode_SSSE3)
- ShufBytes_256((__m256i *)(void *)items,
- (const __m256i *)(const void *)lim,
- (const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
- else
- #endif
- #ifdef USE_SWAP_SSSE3
- if (g_SwapBytes_Mode >= k_SwapBytes_Mode_SSSE3)
- ShufBytes_128((__m128i *)(void *)items,
- (const __m128i *)(const void *)lim,
- (const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
- else
- #endif
- #else // MY_CPU_X86_OR_AMD64
-
- if (g_SwapBytes_Mode != 0)
- SwapBytes4_128(items, lim);
- else
- #endif // MY_CPU_X86_OR_AMD64
- #endif // FORCE_SWAP_MODE
- DEFAULT_Swap4(items, lim);
- }
- items = lim;
- }
- Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
- for (; numItems != 0; numItems--)
- {
- SWAP4_32(0)
- items++;
- }
-}
-
-
-// #define SHOW_HW_STATUS
-
-#ifdef SHOW_HW_STATUS
-#include
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-
-void z7_SwapBytesPrepare(void)
-{
-#ifndef FORCE_SWAP_MODE
- unsigned mode = 0; // k_SwapBytes_Mode_BASE;
-
-#ifdef MY_CPU_ARM_OR_ARM64
- {
- if (CPU_IsSupported_NEON())
- {
- // #pragma message ("=== SwapBytes NEON")
- PRF(printf("\n=== SwapBytes NEON\n");)
- mode = k_SwapBytes_Mode_NEON;
- }
- }
-#else // MY_CPU_ARM_OR_ARM64
- {
- #ifdef USE_SWAP_AVX2
- if (CPU_IsSupported_AVX2())
- {
- // #pragma message ("=== SwapBytes AVX2")
- PRF(printf("\n=== SwapBytes AVX2\n");)
- mode = k_SwapBytes_Mode_AVX2;
- }
- else
- #endif
- #ifdef USE_SWAP_SSSE3
- if (CPU_IsSupported_SSSE3())
- {
- // #pragma message ("=== SwapBytes SSSE3")
- PRF(printf("\n=== SwapBytes SSSE3\n");)
- mode = k_SwapBytes_Mode_SSSE3;
- }
- else
- #endif
- #if !defined(MY_CPU_AMD64)
- if (CPU_IsSupported_SSE2())
- #endif
- {
- // #pragma message ("=== SwapBytes SSE2")
- PRF(printf("\n=== SwapBytes SSE2\n");)
- mode = k_SwapBytes_Mode_SSE2;
- }
- }
-#endif // MY_CPU_ARM_OR_ARM64
- g_SwapBytes_Mode = mode;
- // g_SwapBytes_Mode = 0; // for debug
-#endif // FORCE_SWAP_MODE
- PRF(printf("\n=== SwapBytesPrepare\n");)
-}
-
-#undef PRF
diff --git a/3rdparty/7z/src/SwapBytes.h b/3rdparty/7z/src/SwapBytes.h
deleted file mode 100644
index d442467386..0000000000
--- a/3rdparty/7z/src/SwapBytes.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SwapBytes.h -- Byte Swap conversion filter
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_SWAP_BYTES_H
-#define ZIP7_INC_SWAP_BYTES_H
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-void z7_SwapBytes2(UInt16 *data, size_t numItems);
-void z7_SwapBytes4(UInt32 *data, size_t numItems);
-void z7_SwapBytesPrepare(void);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Threads.c b/3rdparty/7z/src/Threads.c
deleted file mode 100644
index d8aaa83277..0000000000
--- a/3rdparty/7z/src/Threads.c
+++ /dev/null
@@ -1,562 +0,0 @@
-/* Threads.c -- multithreading library
-2023-03-04 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#ifdef _WIN32
-
-#ifndef USE_THREADS_CreateThread
-#include
-#endif
-
-#include "Threads.h"
-
-static WRes GetError(void)
-{
- const DWORD res = GetLastError();
- return res ? (WRes)res : 1;
-}
-
-static WRes HandleToWRes(HANDLE h) { return (h != NULL) ? 0 : GetError(); }
-static WRes BOOLToWRes(BOOL v) { return v ? 0 : GetError(); }
-
-WRes HandlePtr_Close(HANDLE *p)
-{
- if (*p != NULL)
- {
- if (!CloseHandle(*p))
- return GetError();
- *p = NULL;
- }
- return 0;
-}
-
-WRes Handle_WaitObject(HANDLE h)
-{
- DWORD dw = WaitForSingleObject(h, INFINITE);
- /*
- (dw) result:
- WAIT_OBJECT_0 // 0
- WAIT_ABANDONED // 0x00000080 : is not compatible with Win32 Error space
- WAIT_TIMEOUT // 0x00000102 : is compatible with Win32 Error space
- WAIT_FAILED // 0xFFFFFFFF
- */
- if (dw == WAIT_FAILED)
- {
- dw = GetLastError();
- if (dw == 0)
- return WAIT_FAILED;
- }
- return (WRes)dw;
-}
-
-#define Thread_Wait(p) Handle_WaitObject(*(p))
-
-WRes Thread_Wait_Close(CThread *p)
-{
- WRes res = Thread_Wait(p);
- WRes res2 = Thread_Close(p);
- return (res != 0 ? res : res2);
-}
-
-WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
-{
- /* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
-
- #ifdef USE_THREADS_CreateThread
-
- DWORD threadId;
- *p = CreateThread(NULL, 0, func, param, 0, &threadId);
-
- #else
-
- unsigned threadId;
- *p = (HANDLE)(_beginthreadex(NULL, 0, func, param, 0, &threadId));
-
- #endif
-
- /* maybe we must use errno here, but probably GetLastError() is also OK. */
- return HandleToWRes(*p);
-}
-
-
-WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
-{
- #ifdef USE_THREADS_CreateThread
-
- UNUSED_VAR(affinity)
- return Thread_Create(p, func, param);
-
- #else
-
- /* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
- HANDLE h;
- WRes wres;
- unsigned threadId;
- h = (HANDLE)(_beginthreadex(NULL, 0, func, param, CREATE_SUSPENDED, &threadId));
- *p = h;
- wres = HandleToWRes(h);
- if (h)
- {
- {
- // DWORD_PTR prevMask =
- SetThreadAffinityMask(h, (DWORD_PTR)affinity);
- /*
- if (prevMask == 0)
- {
- // affinity change is non-critical error, so we can ignore it
- // wres = GetError();
- }
- */
- }
- {
- DWORD prevSuspendCount = ResumeThread(h);
- /* ResumeThread() returns:
- 0 : was_not_suspended
- 1 : was_resumed
- -1 : error
- */
- if (prevSuspendCount == (DWORD)-1)
- wres = GetError();
- }
- }
-
- /* maybe we must use errno here, but probably GetLastError() is also OK. */
- return wres;
-
- #endif
-}
-
-
-static WRes Event_Create(CEvent *p, BOOL manualReset, int signaled)
-{
- *p = CreateEvent(NULL, manualReset, (signaled ? TRUE : FALSE), NULL);
- return HandleToWRes(*p);
-}
-
-WRes Event_Set(CEvent *p) { return BOOLToWRes(SetEvent(*p)); }
-WRes Event_Reset(CEvent *p) { return BOOLToWRes(ResetEvent(*p)); }
-
-WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled) { return Event_Create(p, TRUE, signaled); }
-WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled) { return Event_Create(p, FALSE, signaled); }
-WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p) { return ManualResetEvent_Create(p, 0); }
-WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p) { return AutoResetEvent_Create(p, 0); }
-
-
-WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
-{
- // negative ((LONG)maxCount) is not supported in WIN32::CreateSemaphore()
- *p = CreateSemaphore(NULL, (LONG)initCount, (LONG)maxCount, NULL);
- return HandleToWRes(*p);
-}
-
-WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
-{
- // if (Semaphore_IsCreated(p))
- {
- WRes wres = Semaphore_Close(p);
- if (wres != 0)
- return wres;
- }
- return Semaphore_Create(p, initCount, maxCount);
-}
-
-static WRes Semaphore_Release(CSemaphore *p, LONG releaseCount, LONG *previousCount)
- { return BOOLToWRes(ReleaseSemaphore(*p, releaseCount, previousCount)); }
-WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num)
- { return Semaphore_Release(p, (LONG)num, NULL); }
-WRes Semaphore_Release1(CSemaphore *p) { return Semaphore_ReleaseN(p, 1); }
-
-WRes CriticalSection_Init(CCriticalSection *p)
-{
- /* InitializeCriticalSection() can raise exception:
- Windows XP, 2003 : can raise a STATUS_NO_MEMORY exception
- Windows Vista+ : no exceptions */
- #ifdef _MSC_VER
- #ifdef __clang__
- #pragma GCC diagnostic ignored "-Wlanguage-extension-token"
- #endif
- __try
- #endif
- {
- InitializeCriticalSection(p);
- /* InitializeCriticalSectionAndSpinCount(p, 0); */
- }
- #ifdef _MSC_VER
- __except (EXCEPTION_EXECUTE_HANDLER) { return ERROR_NOT_ENOUGH_MEMORY; }
- #endif
- return 0;
-}
-
-
-
-
-#else // _WIN32
-
-// ---------- POSIX ----------
-
-#ifndef __APPLE__
-#ifndef Z7_AFFINITY_DISABLE
-// _GNU_SOURCE can be required for pthread_setaffinity_np() / CPU_ZERO / CPU_SET
-// clang < 3.6 : unknown warning group '-Wreserved-id-macro'
-// clang 3.6 - 12.01 : gives warning "macro name is a reserved identifier"
-// clang >= 13 : do not give warning
-#if !defined(_GNU_SOURCE)
- #if defined(__clang__) && (__clang_major__ >= 4) && (__clang_major__ <= 12)
- #pragma GCC diagnostic ignored "-Wreserved-id-macro"
- #endif
-#define _GNU_SOURCE
-#endif // !defined(_GNU_SOURCE)
-#endif // Z7_AFFINITY_DISABLE
-#endif // __APPLE__
-
-#include "Threads.h"
-
-#include
-#include
-#include
-#ifdef Z7_AFFINITY_SUPPORTED
-// #include
-#endif
-
-
-// #include
-// #define PRF(p) p
-#define PRF(p)
-#define Print(s) PRF(printf("\n%s\n", s);)
-
-WRes Thread_Create_With_CpuSet(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, const CCpuSet *cpuSet)
-{
- // new thread in Posix probably inherits affinity from parrent thread
- Print("Thread_Create_With_CpuSet")
-
- pthread_attr_t attr;
- int ret;
- // int ret2;
-
- p->_created = 0;
-
- RINOK(pthread_attr_init(&attr))
-
- ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
-
- if (!ret)
- {
- if (cpuSet)
- {
- #ifdef Z7_AFFINITY_SUPPORTED
-
- /*
- printf("\n affinity :");
- unsigned i;
- for (i = 0; i < sizeof(*cpuSet) && i < 8; i++)
- {
- Byte b = *((const Byte *)cpuSet + i);
- char temp[32];
- #define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
- temp[0] = GET_HEX_CHAR((b & 0xF));
- temp[1] = GET_HEX_CHAR((b >> 4));
- // temp[0] = GET_HEX_CHAR((b >> 4)); // big-endian
- // temp[1] = GET_HEX_CHAR((b & 0xF)); // big-endian
- temp[2] = 0;
- printf("%s", temp);
- }
- printf("\n");
- */
-
- // ret2 =
- pthread_attr_setaffinity_np(&attr, sizeof(*cpuSet), cpuSet);
- // if (ret2) ret = ret2;
- #endif
- }
-
- ret = pthread_create(&p->_tid, &attr, func, param);
-
- if (!ret)
- {
- p->_created = 1;
- /*
- if (cpuSet)
- {
- // ret2 =
- pthread_setaffinity_np(p->_tid, sizeof(*cpuSet), cpuSet);
- // if (ret2) ret = ret2;
- }
- */
- }
- }
- // ret2 =
- pthread_attr_destroy(&attr);
- // if (ret2 != 0) ret = ret2;
- return ret;
-}
-
-
-WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
-{
- return Thread_Create_With_CpuSet(p, func, param, NULL);
-}
-
-
-WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
-{
- Print("Thread_Create_WithAffinity")
- CCpuSet cs;
- unsigned i;
- CpuSet_Zero(&cs);
- for (i = 0; i < sizeof(affinity) * 8; i++)
- {
- if (affinity == 0)
- break;
- if (affinity & 1)
- {
- CpuSet_Set(&cs, i);
- }
- affinity >>= 1;
- }
- return Thread_Create_With_CpuSet(p, func, param, &cs);
-}
-
-
-WRes Thread_Close(CThread *p)
-{
- // Print("Thread_Close")
- int ret;
- if (!p->_created)
- return 0;
-
- ret = pthread_detach(p->_tid);
- p->_tid = 0;
- p->_created = 0;
- return ret;
-}
-
-
-WRes Thread_Wait_Close(CThread *p)
-{
- // Print("Thread_Wait_Close")
- void *thread_return;
- int ret;
- if (!p->_created)
- return EINVAL;
-
- ret = pthread_join(p->_tid, &thread_return);
- // probably we can't use that (_tid) after pthread_join(), so we close thread here
- p->_created = 0;
- p->_tid = 0;
- return ret;
-}
-
-
-
-static WRes Event_Create(CEvent *p, int manualReset, int signaled)
-{
- RINOK(pthread_mutex_init(&p->_mutex, NULL))
- RINOK(pthread_cond_init(&p->_cond, NULL))
- p->_manual_reset = manualReset;
- p->_state = (signaled ? True : False);
- p->_created = 1;
- return 0;
-}
-
-WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled)
- { return Event_Create(p, True, signaled); }
-WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p)
- { return ManualResetEvent_Create(p, 0); }
-WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled)
- { return Event_Create(p, False, signaled); }
-WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p)
- { return AutoResetEvent_Create(p, 0); }
-
-
-WRes Event_Set(CEvent *p)
-{
- RINOK(pthread_mutex_lock(&p->_mutex))
- p->_state = True;
- int res1 = pthread_cond_broadcast(&p->_cond);
- int res2 = pthread_mutex_unlock(&p->_mutex);
- return (res2 ? res2 : res1);
-}
-
-WRes Event_Reset(CEvent *p)
-{
- RINOK(pthread_mutex_lock(&p->_mutex))
- p->_state = False;
- return pthread_mutex_unlock(&p->_mutex);
-}
-
-WRes Event_Wait(CEvent *p)
-{
- RINOK(pthread_mutex_lock(&p->_mutex))
- while (p->_state == False)
- {
- // ETIMEDOUT
- // ret =
- pthread_cond_wait(&p->_cond, &p->_mutex);
- // if (ret != 0) break;
- }
- if (p->_manual_reset == False)
- {
- p->_state = False;
- }
- return pthread_mutex_unlock(&p->_mutex);
-}
-
-WRes Event_Close(CEvent *p)
-{
- if (!p->_created)
- return 0;
- p->_created = 0;
- {
- int res1 = pthread_mutex_destroy(&p->_mutex);
- int res2 = pthread_cond_destroy(&p->_cond);
- return (res1 ? res1 : res2);
- }
-}
-
-
-WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
-{
- if (initCount > maxCount || maxCount < 1)
- return EINVAL;
- RINOK(pthread_mutex_init(&p->_mutex, NULL))
- RINOK(pthread_cond_init(&p->_cond, NULL))
- p->_count = initCount;
- p->_maxCount = maxCount;
- p->_created = 1;
- return 0;
-}
-
-
-WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
-{
- if (Semaphore_IsCreated(p))
- {
- /*
- WRes wres = Semaphore_Close(p);
- if (wres != 0)
- return wres;
- */
- if (initCount > maxCount || maxCount < 1)
- return EINVAL;
- // return EINVAL; // for debug
- p->_count = initCount;
- p->_maxCount = maxCount;
- return 0;
- }
- return Semaphore_Create(p, initCount, maxCount);
-}
-
-
-WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
-{
- UInt32 newCount;
- int ret;
-
- if (releaseCount < 1)
- return EINVAL;
-
- RINOK(pthread_mutex_lock(&p->_mutex))
-
- newCount = p->_count + releaseCount;
- if (newCount > p->_maxCount)
- ret = ERROR_TOO_MANY_POSTS; // EINVAL;
- else
- {
- p->_count = newCount;
- ret = pthread_cond_broadcast(&p->_cond);
- }
- RINOK(pthread_mutex_unlock(&p->_mutex))
- return ret;
-}
-
-WRes Semaphore_Wait(CSemaphore *p)
-{
- RINOK(pthread_mutex_lock(&p->_mutex))
- while (p->_count < 1)
- {
- pthread_cond_wait(&p->_cond, &p->_mutex);
- }
- p->_count--;
- return pthread_mutex_unlock(&p->_mutex);
-}
-
-WRes Semaphore_Close(CSemaphore *p)
-{
- if (!p->_created)
- return 0;
- p->_created = 0;
- {
- int res1 = pthread_mutex_destroy(&p->_mutex);
- int res2 = pthread_cond_destroy(&p->_cond);
- return (res1 ? res1 : res2);
- }
-}
-
-
-
-WRes CriticalSection_Init(CCriticalSection *p)
-{
- // Print("CriticalSection_Init")
- if (!p)
- return EINTR;
- return pthread_mutex_init(&p->_mutex, NULL);
-}
-
-void CriticalSection_Enter(CCriticalSection *p)
-{
- // Print("CriticalSection_Enter")
- if (p)
- {
- // int ret =
- pthread_mutex_lock(&p->_mutex);
- }
-}
-
-void CriticalSection_Leave(CCriticalSection *p)
-{
- // Print("CriticalSection_Leave")
- if (p)
- {
- // int ret =
- pthread_mutex_unlock(&p->_mutex);
- }
-}
-
-void CriticalSection_Delete(CCriticalSection *p)
-{
- // Print("CriticalSection_Delete")
- if (p)
- {
- // int ret =
- pthread_mutex_destroy(&p->_mutex);
- }
-}
-
-LONG InterlockedIncrement(LONG volatile *addend)
-{
- // Print("InterlockedIncrement")
- #ifdef USE_HACK_UNSAFE_ATOMIC
- LONG val = *addend + 1;
- *addend = val;
- return val;
- #else
-
- #if defined(__clang__) && (__clang_major__ >= 8)
- #pragma GCC diagnostic ignored "-Watomic-implicit-seq-cst"
- #endif
- return __sync_add_and_fetch(addend, 1);
- #endif
-}
-
-#endif // _WIN32
-
-WRes AutoResetEvent_OptCreate_And_Reset(CAutoResetEvent *p)
-{
- if (Event_IsCreated(p))
- return Event_Reset(p);
- return AutoResetEvent_CreateNotSignaled(p);
-}
-
-#undef PRF
-#undef Print
diff --git a/3rdparty/7z/src/Threads.h b/3rdparty/7z/src/Threads.h
deleted file mode 100644
index c6aa6a8f18..0000000000
--- a/3rdparty/7z/src/Threads.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Threads.h -- multithreading library
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_THREADS_H
-#define ZIP7_INC_THREADS_H
-
-#ifdef _WIN32
-#include "7zWindows.h"
-
-#else
-
-#if defined(__linux__)
-#if !defined(__APPLE__) && !defined(_AIX) && !defined(__ANDROID__)
-#ifndef Z7_AFFINITY_DISABLE
-#define Z7_AFFINITY_SUPPORTED
-// #pragma message(" ==== Z7_AFFINITY_SUPPORTED")
-// #define _GNU_SOURCE
-#endif
-#endif
-#endif
-
-#include
-
-#endif
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-#ifdef _WIN32
-
-WRes HandlePtr_Close(HANDLE *h);
-WRes Handle_WaitObject(HANDLE h);
-
-typedef HANDLE CThread;
-
-#define Thread_CONSTRUCT(p) { *(p) = NULL; }
-#define Thread_WasCreated(p) (*(p) != NULL)
-#define Thread_Close(p) HandlePtr_Close(p)
-// #define Thread_Wait(p) Handle_WaitObject(*(p))
-
-#ifdef UNDER_CE
- // if (USE_THREADS_CreateThread is defined), we use _beginthreadex()
- // if (USE_THREADS_CreateThread is not definned), we use CreateThread()
- #define USE_THREADS_CreateThread
-#endif
-
-typedef
- #ifdef USE_THREADS_CreateThread
- DWORD
- #else
- unsigned
- #endif
- THREAD_FUNC_RET_TYPE;
-
-#define THREAD_FUNC_RET_ZERO 0
-
-typedef DWORD_PTR CAffinityMask;
-typedef DWORD_PTR CCpuSet;
-
-#define CpuSet_Zero(p) *(p) = (0)
-#define CpuSet_Set(p, cpu) *(p) |= ((DWORD_PTR)1 << (cpu))
-
-#else // _WIN32
-
-typedef struct
-{
- pthread_t _tid;
- int _created;
-} CThread;
-
-#define Thread_CONSTRUCT(p) { (p)->_tid = 0; (p)->_created = 0; }
-#define Thread_WasCreated(p) ((p)->_created != 0)
-WRes Thread_Close(CThread *p);
-// #define Thread_Wait Thread_Wait_Close
-
-typedef void * THREAD_FUNC_RET_TYPE;
-#define THREAD_FUNC_RET_ZERO NULL
-
-
-typedef UInt64 CAffinityMask;
-
-#ifdef Z7_AFFINITY_SUPPORTED
-
-typedef cpu_set_t CCpuSet;
-#define CpuSet_Zero(p) CPU_ZERO(p)
-#define CpuSet_Set(p, cpu) CPU_SET(cpu, p)
-#define CpuSet_IsSet(p, cpu) CPU_ISSET(cpu, p)
-
-#else
-
-typedef UInt64 CCpuSet;
-#define CpuSet_Zero(p) *(p) = (0)
-#define CpuSet_Set(p, cpu) *(p) |= ((UInt64)1 << (cpu))
-#define CpuSet_IsSet(p, cpu) ((*(p) & ((UInt64)1 << (cpu))) != 0)
-
-#endif
-
-
-#endif // _WIN32
-
-
-#define THREAD_FUNC_CALL_TYPE Z7_STDCALL
-
-#if defined(_WIN32) && defined(__GNUC__)
-/* GCC compiler for x86 32-bit uses the rule:
- the stack is 16-byte aligned before CALL instruction for function calling.
- But only root function main() contains instructions that
- set 16-byte alignment for stack pointer. And another functions
- just keep alignment, if it was set in some parent function.
-
- The problem:
- if we create new thread in MinGW (GCC) 32-bit x86 via _beginthreadex() or CreateThread(),
- the root function of thread doesn't set 16-byte alignment.
- And stack frames in all child functions also will be unaligned in that case.
-
- Here we set (force_align_arg_pointer) attribute for root function of new thread.
- Do we need (force_align_arg_pointer) also for another systems? */
-
- #define THREAD_FUNC_ATTRIB_ALIGN_ARG __attribute__((force_align_arg_pointer))
- // #define THREAD_FUNC_ATTRIB_ALIGN_ARG // for debug : bad alignment in SSE functions
-#else
- #define THREAD_FUNC_ATTRIB_ALIGN_ARG
-#endif
-
-#define THREAD_FUNC_DECL THREAD_FUNC_ATTRIB_ALIGN_ARG THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
-
-typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *);
-WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param);
-WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity);
-WRes Thread_Wait_Close(CThread *p);
-
-#ifdef _WIN32
-#define Thread_Create_With_CpuSet(p, func, param, cs) \
- Thread_Create_With_Affinity(p, func, param, *cs)
-#else
-WRes Thread_Create_With_CpuSet(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, const CCpuSet *cpuSet);
-#endif
-
-
-#ifdef _WIN32
-
-typedef HANDLE CEvent;
-typedef CEvent CAutoResetEvent;
-typedef CEvent CManualResetEvent;
-#define Event_Construct(p) *(p) = NULL
-#define Event_IsCreated(p) (*(p) != NULL)
-#define Event_Close(p) HandlePtr_Close(p)
-#define Event_Wait(p) Handle_WaitObject(*(p))
-WRes Event_Set(CEvent *p);
-WRes Event_Reset(CEvent *p);
-WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
-WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
-WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
-WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
-
-typedef HANDLE CSemaphore;
-#define Semaphore_Construct(p) *(p) = NULL
-#define Semaphore_IsCreated(p) (*(p) != NULL)
-#define Semaphore_Close(p) HandlePtr_Close(p)
-#define Semaphore_Wait(p) Handle_WaitObject(*(p))
-WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
-WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
-WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
-WRes Semaphore_Release1(CSemaphore *p);
-
-typedef CRITICAL_SECTION CCriticalSection;
-WRes CriticalSection_Init(CCriticalSection *p);
-#define CriticalSection_Delete(p) DeleteCriticalSection(p)
-#define CriticalSection_Enter(p) EnterCriticalSection(p)
-#define CriticalSection_Leave(p) LeaveCriticalSection(p)
-
-
-#else // _WIN32
-
-typedef struct _CEvent
-{
- int _created;
- int _manual_reset;
- int _state;
- pthread_mutex_t _mutex;
- pthread_cond_t _cond;
-} CEvent;
-
-typedef CEvent CAutoResetEvent;
-typedef CEvent CManualResetEvent;
-
-#define Event_Construct(p) (p)->_created = 0
-#define Event_IsCreated(p) ((p)->_created)
-
-WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
-WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
-WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
-WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
-
-WRes Event_Set(CEvent *p);
-WRes Event_Reset(CEvent *p);
-WRes Event_Wait(CEvent *p);
-WRes Event_Close(CEvent *p);
-
-
-typedef struct _CSemaphore
-{
- int _created;
- UInt32 _count;
- UInt32 _maxCount;
- pthread_mutex_t _mutex;
- pthread_cond_t _cond;
-} CSemaphore;
-
-#define Semaphore_Construct(p) (p)->_created = 0
-#define Semaphore_IsCreated(p) ((p)->_created)
-
-WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
-WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
-WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
-#define Semaphore_Release1(p) Semaphore_ReleaseN(p, 1)
-WRes Semaphore_Wait(CSemaphore *p);
-WRes Semaphore_Close(CSemaphore *p);
-
-
-typedef struct _CCriticalSection
-{
- pthread_mutex_t _mutex;
-} CCriticalSection;
-
-WRes CriticalSection_Init(CCriticalSection *p);
-void CriticalSection_Delete(CCriticalSection *cs);
-void CriticalSection_Enter(CCriticalSection *cs);
-void CriticalSection_Leave(CCriticalSection *cs);
-
-LONG InterlockedIncrement(LONG volatile *addend);
-
-#endif // _WIN32
-
-WRes AutoResetEvent_OptCreate_And_Reset(CAutoResetEvent *p);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/Xz.c b/3rdparty/7z/src/Xz.c
deleted file mode 100644
index 87eaf80a93..0000000000
--- a/3rdparty/7z/src/Xz.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/* Xz.c - Xz
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "7zCrc.h"
-#include "CpuArch.h"
-#include "Xz.h"
-#include "XzCrc64.h"
-
-const Byte XZ_SIG[XZ_SIG_SIZE] = { 0xFD, '7', 'z', 'X', 'Z', 0 };
-/* const Byte XZ_FOOTER_SIG[XZ_FOOTER_SIG_SIZE] = { 'Y', 'Z' }; */
-
-unsigned Xz_WriteVarInt(Byte *buf, UInt64 v)
-{
- unsigned i = 0;
- do
- {
- buf[i++] = (Byte)((v & 0x7F) | 0x80);
- v >>= 7;
- }
- while (v != 0);
- buf[(size_t)i - 1] &= 0x7F;
- return i;
-}
-
-void Xz_Construct(CXzStream *p)
-{
- p->numBlocks = 0;
- p->blocks = NULL;
- p->flags = 0;
-}
-
-void Xz_Free(CXzStream *p, ISzAllocPtr alloc)
-{
- ISzAlloc_Free(alloc, p->blocks);
- p->numBlocks = 0;
- p->blocks = NULL;
-}
-
-unsigned XzFlags_GetCheckSize(CXzStreamFlags f)
-{
- unsigned t = XzFlags_GetCheckType(f);
- return (t == 0) ? 0 : ((unsigned)4 << ((t - 1) / 3));
-}
-
-void XzCheck_Init(CXzCheck *p, unsigned mode)
-{
- p->mode = mode;
- switch (mode)
- {
- case XZ_CHECK_CRC32: p->crc = CRC_INIT_VAL; break;
- case XZ_CHECK_CRC64: p->crc64 = CRC64_INIT_VAL; break;
- case XZ_CHECK_SHA256: Sha256_Init(&p->sha); break;
- }
-}
-
-void XzCheck_Update(CXzCheck *p, const void *data, size_t size)
-{
- switch (p->mode)
- {
- case XZ_CHECK_CRC32: p->crc = CrcUpdate(p->crc, data, size); break;
- case XZ_CHECK_CRC64: p->crc64 = Crc64Update(p->crc64, data, size); break;
- case XZ_CHECK_SHA256: Sha256_Update(&p->sha, (const Byte *)data, size); break;
- }
-}
-
-int XzCheck_Final(CXzCheck *p, Byte *digest)
-{
- switch (p->mode)
- {
- case XZ_CHECK_CRC32:
- SetUi32(digest, CRC_GET_DIGEST(p->crc))
- break;
- case XZ_CHECK_CRC64:
- {
- int i;
- UInt64 v = CRC64_GET_DIGEST(p->crc64);
- for (i = 0; i < 8; i++, v >>= 8)
- digest[i] = (Byte)(v & 0xFF);
- break;
- }
- case XZ_CHECK_SHA256:
- Sha256_Final(&p->sha, digest);
- break;
- default:
- return 0;
- }
- return 1;
-}
diff --git a/3rdparty/7z/src/Xz.h b/3rdparty/7z/src/Xz.h
deleted file mode 100644
index 27472db917..0000000000
--- a/3rdparty/7z/src/Xz.h
+++ /dev/null
@@ -1,535 +0,0 @@
-/* Xz.h - Xz interface
-2023-04-13 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_XZ_H
-#define ZIP7_INC_XZ_H
-
-#include "Sha256.h"
-#include "Delta.h"
-
-EXTERN_C_BEGIN
-
-#define XZ_ID_Subblock 1
-#define XZ_ID_Delta 3
-#define XZ_ID_X86 4
-#define XZ_ID_PPC 5
-#define XZ_ID_IA64 6
-#define XZ_ID_ARM 7
-#define XZ_ID_ARMT 8
-#define XZ_ID_SPARC 9
-#define XZ_ID_ARM64 0xa
-#define XZ_ID_LZMA2 0x21
-
-unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value);
-unsigned Xz_WriteVarInt(Byte *buf, UInt64 v);
-
-/* ---------- xz block ---------- */
-
-#define XZ_BLOCK_HEADER_SIZE_MAX 1024
-
-#define XZ_NUM_FILTERS_MAX 4
-#define XZ_BF_NUM_FILTERS_MASK 3
-#define XZ_BF_PACK_SIZE (1 << 6)
-#define XZ_BF_UNPACK_SIZE (1 << 7)
-
-#define XZ_FILTER_PROPS_SIZE_MAX 20
-
-typedef struct
-{
- UInt64 id;
- UInt32 propsSize;
- Byte props[XZ_FILTER_PROPS_SIZE_MAX];
-} CXzFilter;
-
-typedef struct
-{
- UInt64 packSize;
- UInt64 unpackSize;
- Byte flags;
- CXzFilter filters[XZ_NUM_FILTERS_MAX];
-} CXzBlock;
-
-#define XzBlock_GetNumFilters(p) (((unsigned)(p)->flags & XZ_BF_NUM_FILTERS_MASK) + 1)
-#define XzBlock_HasPackSize(p) (((p)->flags & XZ_BF_PACK_SIZE) != 0)
-#define XzBlock_HasUnpackSize(p) (((p)->flags & XZ_BF_UNPACK_SIZE) != 0)
-#define XzBlock_HasUnsupportedFlags(p) (((p)->flags & ~(XZ_BF_NUM_FILTERS_MASK | XZ_BF_PACK_SIZE | XZ_BF_UNPACK_SIZE)) != 0)
-
-SRes XzBlock_Parse(CXzBlock *p, const Byte *header);
-SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStreamPtr inStream, BoolInt *isIndex, UInt32 *headerSizeRes);
-
-/* ---------- xz stream ---------- */
-
-#define XZ_SIG_SIZE 6
-#define XZ_FOOTER_SIG_SIZE 2
-
-extern const Byte XZ_SIG[XZ_SIG_SIZE];
-
-/*
-extern const Byte XZ_FOOTER_SIG[XZ_FOOTER_SIG_SIZE];
-*/
-
-#define XZ_FOOTER_SIG_0 'Y'
-#define XZ_FOOTER_SIG_1 'Z'
-
-#define XZ_STREAM_FLAGS_SIZE 2
-#define XZ_STREAM_CRC_SIZE 4
-
-#define XZ_STREAM_HEADER_SIZE (XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE + XZ_STREAM_CRC_SIZE)
-#define XZ_STREAM_FOOTER_SIZE (XZ_FOOTER_SIG_SIZE + XZ_STREAM_FLAGS_SIZE + XZ_STREAM_CRC_SIZE + 4)
-
-#define XZ_CHECK_MASK 0xF
-#define XZ_CHECK_NO 0
-#define XZ_CHECK_CRC32 1
-#define XZ_CHECK_CRC64 4
-#define XZ_CHECK_SHA256 10
-
-typedef struct
-{
- unsigned mode;
- UInt32 crc;
- UInt64 crc64;
- CSha256 sha;
-} CXzCheck;
-
-void XzCheck_Init(CXzCheck *p, unsigned mode);
-void XzCheck_Update(CXzCheck *p, const void *data, size_t size);
-int XzCheck_Final(CXzCheck *p, Byte *digest);
-
-typedef UInt16 CXzStreamFlags;
-
-#define XzFlags_IsSupported(f) ((f) <= XZ_CHECK_MASK)
-#define XzFlags_GetCheckType(f) ((f) & XZ_CHECK_MASK)
-#define XzFlags_HasDataCrc32(f) (Xz_GetCheckType(f) == XZ_CHECK_CRC32)
-unsigned XzFlags_GetCheckSize(CXzStreamFlags f);
-
-SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf);
-SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStreamPtr inStream);
-
-typedef struct
-{
- UInt64 unpackSize;
- UInt64 totalSize;
-} CXzBlockSizes;
-
-typedef struct
-{
- CXzStreamFlags flags;
- // Byte _pad[6];
- size_t numBlocks;
- CXzBlockSizes *blocks;
- UInt64 startOffset;
-} CXzStream;
-
-void Xz_Construct(CXzStream *p);
-void Xz_Free(CXzStream *p, ISzAllocPtr alloc);
-
-#define XZ_SIZE_OVERFLOW ((UInt64)(Int64)-1)
-
-UInt64 Xz_GetUnpackSize(const CXzStream *p);
-UInt64 Xz_GetPackSize(const CXzStream *p);
-
-typedef struct
-{
- size_t num;
- size_t numAllocated;
- CXzStream *streams;
-} CXzs;
-
-void Xzs_Construct(CXzs *p);
-void Xzs_Free(CXzs *p, ISzAllocPtr alloc);
-SRes Xzs_ReadBackward(CXzs *p, ILookInStreamPtr inStream, Int64 *startOffset, ICompressProgressPtr progress, ISzAllocPtr alloc);
-
-UInt64 Xzs_GetNumBlocks(const CXzs *p);
-UInt64 Xzs_GetUnpackSize(const CXzs *p);
-
-
-// ECoderStatus values are identical to ELzmaStatus values of LZMA2 decoder
-
-typedef enum
-{
- CODER_STATUS_NOT_SPECIFIED, /* use main error code instead */
- CODER_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
- CODER_STATUS_NOT_FINISHED, /* stream was not finished */
- CODER_STATUS_NEEDS_MORE_INPUT /* you must provide more input bytes */
-} ECoderStatus;
-
-
-// ECoderFinishMode values are identical to ELzmaFinishMode
-
-typedef enum
-{
- CODER_FINISH_ANY, /* finish at any point */
- CODER_FINISH_END /* block must be finished at the end */
-} ECoderFinishMode;
-
-
-typedef struct
-{
- void *p; // state object;
- void (*Free)(void *p, ISzAllocPtr alloc);
- SRes (*SetProps)(void *p, const Byte *props, size_t propSize, ISzAllocPtr alloc);
- void (*Init)(void *p);
- SRes (*Code2)(void *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- int srcWasFinished, ECoderFinishMode finishMode,
- // int *wasFinished,
- ECoderStatus *status);
- SizeT (*Filter)(void *p, Byte *data, SizeT size);
-} IStateCoder;
-
-
-typedef struct
-{
- UInt32 methodId;
- UInt32 delta;
- UInt32 ip;
- UInt32 X86_State;
- Byte delta_State[DELTA_STATE_SIZE];
-} CXzBcFilterStateBase;
-
-typedef SizeT (*Xz_Func_BcFilterStateBase_Filter)(CXzBcFilterStateBase *p, Byte *data, SizeT size);
-
-SRes Xz_StateCoder_Bc_SetFromMethod_Func(IStateCoder *p, UInt64 id,
- Xz_Func_BcFilterStateBase_Filter func, ISzAllocPtr alloc);
-
-
-#define MIXCODER_NUM_FILTERS_MAX 4
-
-typedef struct
-{
- ISzAllocPtr alloc;
- Byte *buf;
- unsigned numCoders;
-
- Byte *outBuf;
- size_t outBufSize;
- size_t outWritten; // is equal to lzmaDecoder.dicPos (in outBuf mode)
- BoolInt wasFinished;
- SRes res;
- ECoderStatus status;
- // BoolInt SingleBufMode;
-
- int finished[MIXCODER_NUM_FILTERS_MAX - 1];
- size_t pos[MIXCODER_NUM_FILTERS_MAX - 1];
- size_t size[MIXCODER_NUM_FILTERS_MAX - 1];
- UInt64 ids[MIXCODER_NUM_FILTERS_MAX];
- SRes results[MIXCODER_NUM_FILTERS_MAX];
- IStateCoder coders[MIXCODER_NUM_FILTERS_MAX];
-} CMixCoder;
-
-
-typedef enum
-{
- XZ_STATE_STREAM_HEADER,
- XZ_STATE_STREAM_INDEX,
- XZ_STATE_STREAM_INDEX_CRC,
- XZ_STATE_STREAM_FOOTER,
- XZ_STATE_STREAM_PADDING,
- XZ_STATE_BLOCK_HEADER,
- XZ_STATE_BLOCK,
- XZ_STATE_BLOCK_FOOTER
-} EXzState;
-
-
-typedef struct
-{
- EXzState state;
- UInt32 pos;
- unsigned alignPos;
- unsigned indexPreSize;
-
- CXzStreamFlags streamFlags;
-
- UInt32 blockHeaderSize;
- UInt64 packSize;
- UInt64 unpackSize;
-
- UInt64 numBlocks; // number of finished blocks in current stream
- UInt64 indexSize;
- UInt64 indexPos;
- UInt64 padSize;
-
- UInt64 numStartedStreams;
- UInt64 numFinishedStreams;
- UInt64 numTotalBlocks;
-
- UInt32 crc;
- CMixCoder decoder;
- CXzBlock block;
- CXzCheck check;
- CSha256 sha;
-
- BoolInt parseMode;
- BoolInt headerParsedOk;
- BoolInt decodeToStreamSignature;
- unsigned decodeOnlyOneBlock;
-
- Byte *outBuf;
- size_t outBufSize;
- size_t outDataWritten; // the size of data in (outBuf) that were fully unpacked
-
- Byte shaDigest[SHA256_DIGEST_SIZE];
- Byte buf[XZ_BLOCK_HEADER_SIZE_MAX];
-} CXzUnpacker;
-
-/* alloc : aligned for cache line allocation is better */
-void XzUnpacker_Construct(CXzUnpacker *p, ISzAllocPtr alloc);
-void XzUnpacker_Init(CXzUnpacker *p);
-void XzUnpacker_SetOutBuf(CXzUnpacker *p, Byte *outBuf, size_t outBufSize);
-void XzUnpacker_Free(CXzUnpacker *p);
-
-/*
- XzUnpacker
- The sequence for decoding functions:
- {
- XzUnpacker_Construct()
- [Decoding_Calls]
- XzUnpacker_Free()
- }
-
- [Decoding_Calls]
-
- There are 3 types of interfaces for [Decoding_Calls] calls:
-
- Interface-1 : Partial output buffers:
- {
- XzUnpacker_Init()
- for()
- {
- XzUnpacker_Code();
- }
- XzUnpacker_IsStreamWasFinished()
- }
-
- Interface-2 : Direct output buffer:
- Use it, if you know exact size of decoded data, and you need
- whole xz unpacked data in one output buffer.
- xz unpacker doesn't allocate additional buffer for lzma2 dictionary in that mode.
- {
- XzUnpacker_Init()
- XzUnpacker_SetOutBufMode(); // to set output buffer and size
- for()
- {
- XzUnpacker_Code(); // (dest = NULL) in XzUnpacker_Code()
- }
- XzUnpacker_IsStreamWasFinished()
- }
-
- Interface-3 : Direct output buffer : One call full decoding
- It unpacks whole input buffer to output buffer in one call.
- It uses Interface-2 internally.
- {
- XzUnpacker_CodeFull()
- XzUnpacker_IsStreamWasFinished()
- }
-*/
-
-/*
-finishMode:
- It has meaning only if the decoding reaches output limit (*destLen).
- CODER_FINISH_ANY - use smallest number of input bytes
- CODER_FINISH_END - read EndOfStream marker after decoding
-
-Returns:
- SZ_OK
- status:
- CODER_STATUS_NOT_FINISHED,
- CODER_STATUS_NEEDS_MORE_INPUT - the decoder can return it in two cases:
- 1) it needs more input data to finish current xz stream
- 2) xz stream was finished successfully. But the decoder supports multiple
- concatented xz streams. So it expects more input data for new xz streams.
- Call XzUnpacker_IsStreamWasFinished() to check that latest xz stream was finished successfully.
-
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_DATA - Data error
- SZ_ERROR_UNSUPPORTED - Unsupported method or method properties
- SZ_ERROR_CRC - CRC error
- // SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
-
- SZ_ERROR_NO_ARCHIVE - the error with xz Stream Header with one of the following reasons:
- - xz Stream Signature failure
- - CRC32 of xz Stream Header is failed
- - The size of Stream padding is not multiple of four bytes.
- It's possible to get that error, if xz stream was finished and the stream
- contains some another data. In that case you can call XzUnpacker_GetExtraSize()
- function to get real size of xz stream.
-*/
-
-
-SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen, int srcFinished,
- ECoderFinishMode finishMode, ECoderStatus *status);
-
-SRes XzUnpacker_CodeFull(CXzUnpacker *p, Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen,
- ECoderFinishMode finishMode, ECoderStatus *status);
-
-/*
-If you decode full xz stream(s), then you can call XzUnpacker_IsStreamWasFinished()
-after successful XzUnpacker_CodeFull() or after last call of XzUnpacker_Code().
-*/
-
-BoolInt XzUnpacker_IsStreamWasFinished(const CXzUnpacker *p);
-
-/*
-XzUnpacker_GetExtraSize() returns then number of unconfirmed bytes,
- if it's in (XZ_STATE_STREAM_HEADER) state or in (XZ_STATE_STREAM_PADDING) state.
-These bytes can be some data after xz archive, or
-it can be start of new xz stream.
-
-Call XzUnpacker_GetExtraSize() after XzUnpacker_Code() function to detect real size of
-xz stream in two cases, if XzUnpacker_Code() returns:
- res == SZ_OK && status == CODER_STATUS_NEEDS_MORE_INPUT
- res == SZ_ERROR_NO_ARCHIVE
-*/
-
-UInt64 XzUnpacker_GetExtraSize(const CXzUnpacker *p);
-
-
-/*
- for random block decoding:
- XzUnpacker_Init();
- set CXzUnpacker::streamFlags
- XzUnpacker_PrepareToRandomBlockDecoding()
- loop
- {
- XzUnpacker_Code()
- XzUnpacker_IsBlockFinished()
- }
-*/
-
-void XzUnpacker_PrepareToRandomBlockDecoding(CXzUnpacker *p);
-BoolInt XzUnpacker_IsBlockFinished(const CXzUnpacker *p);
-
-#define XzUnpacker_GetPackSizeForIndex(p) ((p)->packSize + (p)->blockHeaderSize + XzFlags_GetCheckSize((p)->streamFlags))
-
-
-
-
-
-
-/* ---- Single-Thread and Multi-Thread xz Decoding with Input/Output Streams ---- */
-
-/*
- if (CXzDecMtProps::numThreads > 1), the decoder can try to use
- Multi-Threading. The decoder analyses xz block header, and if
- there are pack size and unpack size values stored in xz block header,
- the decoder reads compressed data of block to internal buffers,
- and then it can start parallel decoding, if there are another blocks.
- The decoder can switch back to Single-Thread decoding after some conditions.
-
- The sequence of calls for xz decoding with in/out Streams:
- {
- XzDecMt_Create()
- XzDecMtProps_Init(XzDecMtProps) to set default values of properties
- // then you can change some XzDecMtProps parameters with required values
- // here you can set the number of threads and (memUseMax) - the maximum
- Memory usage for multithreading decoding.
- for()
- {
- XzDecMt_Decode() // one call per one file
- }
- XzDecMt_Destroy()
- }
-*/
-
-
-typedef struct
-{
- size_t inBufSize_ST; // size of input buffer for Single-Thread decoding
- size_t outStep_ST; // size of output buffer for Single-Thread decoding
- BoolInt ignoreErrors; // if set to 1, the decoder can ignore some errors and it skips broken parts of data.
-
- #ifndef Z7_ST
- unsigned numThreads; // the number of threads for Multi-Thread decoding. if (umThreads == 1) it will use Single-thread decoding
- size_t inBufSize_MT; // size of small input data buffers for Multi-Thread decoding. Big number of such small buffers can be created
- size_t memUseMax; // the limit of total memory usage for Multi-Thread decoding.
- // it's recommended to set (memUseMax) manually to value that is smaller of total size of RAM in computer.
- #endif
-} CXzDecMtProps;
-
-void XzDecMtProps_Init(CXzDecMtProps *p);
-
-typedef struct CXzDecMt CXzDecMt;
-typedef CXzDecMt * CXzDecMtHandle;
-// Z7_DECLARE_HANDLE(CXzDecMtHandle)
-
-/*
- alloc : XzDecMt uses CAlignOffsetAlloc internally for addresses allocated by (alloc).
- allocMid : for big allocations, aligned allocation is better
-*/
-
-CXzDecMtHandle XzDecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid);
-void XzDecMt_Destroy(CXzDecMtHandle p);
-
-
-typedef struct
-{
- Byte UnpackSize_Defined;
- Byte NumStreams_Defined;
- Byte NumBlocks_Defined;
-
- Byte DataAfterEnd; // there are some additional data after good xz streams, and that data is not new xz stream.
- Byte DecodingTruncated; // Decoding was Truncated, we need only partial output data
-
- UInt64 InSize; // pack size processed. That value doesn't include the data after
- // end of xz stream, if that data was not correct
- UInt64 OutSize;
-
- UInt64 NumStreams;
- UInt64 NumBlocks;
-
- SRes DecodeRes; // the error code of xz streams data decoding
- SRes ReadRes; // error code from ISeqInStream:Read()
- SRes ProgressRes; // error code from ICompressProgress:Progress()
-
- SRes CombinedRes; // Combined result error code that shows main rusult
- // = S_OK, if there is no error.
- // but check also (DataAfterEnd) that can show additional minor errors.
-
- SRes CombinedRes_Type; // = SZ_ERROR_READ, if error from ISeqInStream
- // = SZ_ERROR_PROGRESS, if error from ICompressProgress
- // = SZ_ERROR_WRITE, if error from ISeqOutStream
- // = SZ_ERROR_* codes for decoding
-} CXzStatInfo;
-
-void XzStatInfo_Clear(CXzStatInfo *p);
-
-/*
-
-XzDecMt_Decode()
-SRes: it's combined decoding result. It also is equal to stat->CombinedRes.
-
- SZ_OK - no error
- check also output value in (stat->DataAfterEnd)
- that can show additional possible error
-
- SZ_ERROR_MEM - Memory allocation error
- SZ_ERROR_NO_ARCHIVE - is not xz archive
- SZ_ERROR_ARCHIVE - Headers error
- SZ_ERROR_DATA - Data Error
- SZ_ERROR_UNSUPPORTED - Unsupported method or method properties
- SZ_ERROR_CRC - CRC Error
- SZ_ERROR_INPUT_EOF - it needs more input data
- SZ_ERROR_WRITE - ISeqOutStream error
- (SZ_ERROR_READ) - ISeqInStream errors
- (SZ_ERROR_PROGRESS) - ICompressProgress errors
- // SZ_ERROR_THREAD - error in multi-threading functions
- MY_SRes_HRESULT_FROM_WRes(WRes_error) - error in multi-threading function
-*/
-
-SRes XzDecMt_Decode(CXzDecMtHandle p,
- const CXzDecMtProps *props,
- const UInt64 *outDataSize, // NULL means undefined
- int finishMode, // 0 - partial unpacking is allowed, 1 - xz stream(s) must be finished
- ISeqOutStreamPtr outStream,
- // Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- // const Byte *inData, size_t inDataSize,
- CXzStatInfo *stat, // out: decoding results and statistics
- int *isMT, // out: 0 means that ST (Single-Thread) version was used
- // 1 means that MT (Multi-Thread) version was used
- ICompressProgressPtr progress);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/XzCrc64.c b/3rdparty/7z/src/XzCrc64.c
deleted file mode 100644
index b2592af3ee..0000000000
--- a/3rdparty/7z/src/XzCrc64.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/* XzCrc64.c -- CRC64 calculation
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "XzCrc64.h"
-#include "CpuArch.h"
-
-#define kCrc64Poly UINT64_CONST(0xC96C5795D7870F42)
-
-#ifdef MY_CPU_LE
- #define CRC64_NUM_TABLES 4
-#else
- #define CRC64_NUM_TABLES 5
-
- UInt64 Z7_FASTCALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
-#endif
-
-#ifndef MY_CPU_BE
- UInt64 Z7_FASTCALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
-#endif
-
-typedef UInt64 (Z7_FASTCALL *CRC64_FUNC)(UInt64 v, const void *data, size_t size, const UInt64 *table);
-
-static CRC64_FUNC g_Crc64Update;
-UInt64 g_Crc64Table[256 * CRC64_NUM_TABLES];
-
-UInt64 Z7_FASTCALL Crc64Update(UInt64 v, const void *data, size_t size)
-{
- return g_Crc64Update(v, data, size, g_Crc64Table);
-}
-
-UInt64 Z7_FASTCALL Crc64Calc(const void *data, size_t size)
-{
- return g_Crc64Update(CRC64_INIT_VAL, data, size, g_Crc64Table) ^ CRC64_INIT_VAL;
-}
-
-void Z7_FASTCALL Crc64GenerateTable(void)
-{
- UInt32 i;
- for (i = 0; i < 256; i++)
- {
- UInt64 r = i;
- unsigned j;
- for (j = 0; j < 8; j++)
- r = (r >> 1) ^ (kCrc64Poly & ((UInt64)0 - (r & 1)));
- g_Crc64Table[i] = r;
- }
- for (i = 256; i < 256 * CRC64_NUM_TABLES; i++)
- {
- const UInt64 r = g_Crc64Table[(size_t)i - 256];
- g_Crc64Table[i] = g_Crc64Table[r & 0xFF] ^ (r >> 8);
- }
-
- #ifdef MY_CPU_LE
-
- g_Crc64Update = XzCrc64UpdateT4;
-
- #else
- {
- #ifndef MY_CPU_BE
- UInt32 k = 1;
- if (*(const Byte *)&k == 1)
- g_Crc64Update = XzCrc64UpdateT4;
- else
- #endif
- {
- for (i = 256 * CRC64_NUM_TABLES - 1; i >= 256; i--)
- {
- const UInt64 x = g_Crc64Table[(size_t)i - 256];
- g_Crc64Table[i] = Z7_BSWAP64(x);
- }
- g_Crc64Update = XzCrc64UpdateT1_BeT4;
- }
- }
- #endif
-}
-
-#undef kCrc64Poly
-#undef CRC64_NUM_TABLES
diff --git a/3rdparty/7z/src/XzCrc64.h b/3rdparty/7z/src/XzCrc64.h
deleted file mode 100644
index 1d2793fbc6..0000000000
--- a/3rdparty/7z/src/XzCrc64.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* XzCrc64.h -- CRC64 calculation
-2023-04-02 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_XZ_CRC64_H
-#define ZIP7_INC_XZ_CRC64_H
-
-#include
-
-#include "7zTypes.h"
-
-EXTERN_C_BEGIN
-
-extern UInt64 g_Crc64Table[];
-
-void Z7_FASTCALL Crc64GenerateTable(void);
-
-#define CRC64_INIT_VAL UINT64_CONST(0xFFFFFFFFFFFFFFFF)
-#define CRC64_GET_DIGEST(crc) ((crc) ^ CRC64_INIT_VAL)
-#define CRC64_UPDATE_BYTE(crc, b) (g_Crc64Table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
-
-UInt64 Z7_FASTCALL Crc64Update(UInt64 crc, const void *data, size_t size);
-UInt64 Z7_FASTCALL Crc64Calc(const void *data, size_t size);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/XzCrc64Opt.c b/3rdparty/7z/src/XzCrc64Opt.c
deleted file mode 100644
index 524d8a1239..0000000000
--- a/3rdparty/7z/src/XzCrc64Opt.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/* XzCrc64Opt.c -- CRC64 calculation
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include "CpuArch.h"
-
-#ifndef MY_CPU_BE
-
-#define CRC64_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
-
-UInt64 Z7_FASTCALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
-UInt64 Z7_FASTCALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table)
-{
- const Byte *p = (const Byte *)data;
- for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
- v = CRC64_UPDATE_BYTE_2(v, *p);
- for (; size >= 4; size -= 4, p += 4)
- {
- const UInt32 d = (UInt32)v ^ *(const UInt32 *)(const void *)p;
- v = (v >> 32)
- ^ (table + 0x300)[((d ) & 0xFF)]
- ^ (table + 0x200)[((d >> 8) & 0xFF)]
- ^ (table + 0x100)[((d >> 16) & 0xFF)]
- ^ (table + 0x000)[((d >> 24))];
- }
- for (; size > 0; size--, p++)
- v = CRC64_UPDATE_BYTE_2(v, *p);
- return v;
-}
-
-#endif
-
-
-#ifndef MY_CPU_LE
-
-#define CRC64_UPDATE_BYTE_2_BE(crc, b) (table[(Byte)((crc) >> 56) ^ (b)] ^ ((crc) << 8))
-
-UInt64 Z7_FASTCALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
-UInt64 Z7_FASTCALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table)
-{
- const Byte *p = (const Byte *)data;
- table += 0x100;
- v = Z7_BSWAP64(v);
- for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
- v = CRC64_UPDATE_BYTE_2_BE(v, *p);
- for (; size >= 4; size -= 4, p += 4)
- {
- const UInt32 d = (UInt32)(v >> 32) ^ *(const UInt32 *)(const void *)p;
- v = (v << 32)
- ^ (table + 0x000)[((d ) & 0xFF)]
- ^ (table + 0x100)[((d >> 8) & 0xFF)]
- ^ (table + 0x200)[((d >> 16) & 0xFF)]
- ^ (table + 0x300)[((d >> 24))];
- }
- for (; size > 0; size--, p++)
- v = CRC64_UPDATE_BYTE_2_BE(v, *p);
- return Z7_BSWAP64(v);
-}
-
-#endif
diff --git a/3rdparty/7z/src/XzDec.c b/3rdparty/7z/src/XzDec.c
deleted file mode 100644
index 2d113a6643..0000000000
--- a/3rdparty/7z/src/XzDec.c
+++ /dev/null
@@ -1,2875 +0,0 @@
-/* XzDec.c -- Xz Decode
-2023-04-13 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-// #include
-
-// #define XZ_DUMP
-
-/* #define XZ_DUMP */
-
-#ifdef XZ_DUMP
-#include
-#endif
-
-// #define SHOW_DEBUG_INFO
-
-#ifdef SHOW_DEBUG_INFO
-#include
-#endif
-
-#ifdef SHOW_DEBUG_INFO
-#define PRF(x) x
-#else
-#define PRF(x)
-#endif
-
-#define PRF_STR(s) PRF(printf("\n" s "\n"))
-#define PRF_STR_INT(s, d) PRF(printf("\n" s " %d\n", (unsigned)d))
-
-#include
-#include
-
-#include "7zCrc.h"
-#include "Alloc.h"
-#include "Bra.h"
-#include "CpuArch.h"
-#include "Delta.h"
-#include "Lzma2Dec.h"
-
-// #define USE_SUBBLOCK
-
-#ifdef USE_SUBBLOCK
-#include "Bcj3Dec.c"
-#include "SbDec.h"
-#endif
-
-#include "Xz.h"
-
-#define XZ_CHECK_SIZE_MAX 64
-
-#define CODER_BUF_SIZE ((size_t)1 << 17)
-
-unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value)
-{
- unsigned i, limit;
- *value = 0;
- limit = (maxSize > 9) ? 9 : (unsigned)maxSize;
-
- for (i = 0; i < limit;)
- {
- Byte b = p[i];
- *value |= (UInt64)(b & 0x7F) << (7 * i++);
- if ((b & 0x80) == 0)
- return (b == 0 && i != 1) ? 0 : i;
- }
- return 0;
-}
-
-
-/* ---------- XzBcFilterState ---------- */
-
-#define BRA_BUF_SIZE (1 << 14)
-
-typedef struct
-{
- size_t bufPos;
- size_t bufConv;
- size_t bufTotal;
- Byte *buf; // must be aligned for 4 bytes
- Xz_Func_BcFilterStateBase_Filter filter_func;
- // int encodeMode;
- CXzBcFilterStateBase base;
- // Byte buf[BRA_BUF_SIZE];
-} CXzBcFilterState;
-
-
-static void XzBcFilterState_Free(void *pp, ISzAllocPtr alloc)
-{
- if (pp)
- {
- CXzBcFilterState *p = ((CXzBcFilterState *)pp);
- ISzAlloc_Free(alloc, p->buf);
- ISzAlloc_Free(alloc, pp);
- }
-}
-
-
-static SRes XzBcFilterState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc)
-{
- CXzBcFilterStateBase *p = &((CXzBcFilterState *)pp)->base;
- UNUSED_VAR(alloc)
- p->ip = 0;
- if (p->methodId == XZ_ID_Delta)
- {
- if (propSize != 1)
- return SZ_ERROR_UNSUPPORTED;
- p->delta = (unsigned)props[0] + 1;
- }
- else
- {
- if (propSize == 4)
- {
- UInt32 v = GetUi32(props);
- switch (p->methodId)
- {
- case XZ_ID_PPC:
- case XZ_ID_ARM:
- case XZ_ID_SPARC:
- case XZ_ID_ARM64:
- if ((v & 3) != 0)
- return SZ_ERROR_UNSUPPORTED;
- break;
- case XZ_ID_ARMT:
- if ((v & 1) != 0)
- return SZ_ERROR_UNSUPPORTED;
- break;
- case XZ_ID_IA64:
- if ((v & 0xF) != 0)
- return SZ_ERROR_UNSUPPORTED;
- break;
- }
- p->ip = v;
- }
- else if (propSize != 0)
- return SZ_ERROR_UNSUPPORTED;
- }
- return SZ_OK;
-}
-
-
-static void XzBcFilterState_Init(void *pp)
-{
- CXzBcFilterState *p = ((CXzBcFilterState *)pp);
- p->bufPos = p->bufConv = p->bufTotal = 0;
- p->base.X86_State = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
- if (p->base.methodId == XZ_ID_Delta)
- Delta_Init(p->base.delta_State);
-}
-
-
-static const z7_Func_BranchConv g_Funcs_BranchConv_RISC_Dec[] =
-{
- Z7_BRANCH_CONV_DEC(PPC),
- Z7_BRANCH_CONV_DEC(IA64),
- Z7_BRANCH_CONV_DEC(ARM),
- Z7_BRANCH_CONV_DEC(ARMT),
- Z7_BRANCH_CONV_DEC(SPARC),
- Z7_BRANCH_CONV_DEC(ARM64)
-};
-
-static SizeT XzBcFilterStateBase_Filter_Dec(CXzBcFilterStateBase *p, Byte *data, SizeT size)
-{
- switch (p->methodId)
- {
- case XZ_ID_Delta:
- Delta_Decode(p->delta_State, p->delta, data, size);
- break;
- case XZ_ID_X86:
- size = (SizeT)(z7_BranchConvSt_X86_Dec(data, size, p->ip, &p->X86_State) - data);
- break;
- default:
- if (p->methodId >= XZ_ID_PPC)
- {
- const UInt32 i = p->methodId - XZ_ID_PPC;
- if (i < Z7_ARRAY_SIZE(g_Funcs_BranchConv_RISC_Dec))
- size = (SizeT)(g_Funcs_BranchConv_RISC_Dec[i](data, size, p->ip) - data);
- }
- break;
- }
- p->ip += (UInt32)size;
- return size;
-}
-
-
-static SizeT XzBcFilterState_Filter(void *pp, Byte *data, SizeT size)
-{
- CXzBcFilterState *p = ((CXzBcFilterState *)pp);
- return p->filter_func(&p->base, data, size);
-}
-
-
-static SRes XzBcFilterState_Code2(void *pp,
- Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen, int srcWasFinished,
- ECoderFinishMode finishMode,
- // int *wasFinished
- ECoderStatus *status)
-{
- CXzBcFilterState *p = ((CXzBcFilterState *)pp);
- SizeT destRem = *destLen;
- SizeT srcRem = *srcLen;
- UNUSED_VAR(finishMode)
-
- *destLen = 0;
- *srcLen = 0;
- // *wasFinished = False;
- *status = CODER_STATUS_NOT_FINISHED;
-
- while (destRem != 0)
- {
- {
- size_t size = p->bufConv - p->bufPos;
- if (size)
- {
- if (size > destRem)
- size = destRem;
- memcpy(dest, p->buf + p->bufPos, size);
- p->bufPos += size;
- *destLen += size;
- dest += size;
- destRem -= size;
- continue;
- }
- }
-
- p->bufTotal -= p->bufPos;
- memmove(p->buf, p->buf + p->bufPos, p->bufTotal);
- p->bufPos = 0;
- p->bufConv = 0;
- {
- size_t size = BRA_BUF_SIZE - p->bufTotal;
- if (size > srcRem)
- size = srcRem;
- memcpy(p->buf + p->bufTotal, src, size);
- *srcLen += size;
- src += size;
- srcRem -= size;
- p->bufTotal += size;
- }
- if (p->bufTotal == 0)
- break;
-
- p->bufConv = p->filter_func(&p->base, p->buf, p->bufTotal);
-
- if (p->bufConv == 0)
- {
- if (!srcWasFinished)
- break;
- p->bufConv = p->bufTotal;
- }
- }
-
- if (p->bufTotal == p->bufPos && srcRem == 0 && srcWasFinished)
- {
- *status = CODER_STATUS_FINISHED_WITH_MARK;
- // *wasFinished = 1;
- }
-
- return SZ_OK;
-}
-
-
-#define XZ_IS_SUPPORTED_FILTER_ID(id) \
- ((id) >= XZ_ID_Delta && (id) <= XZ_ID_ARM64)
-
-SRes Xz_StateCoder_Bc_SetFromMethod_Func(IStateCoder *p, UInt64 id,
- Xz_Func_BcFilterStateBase_Filter func, ISzAllocPtr alloc)
-{
- CXzBcFilterState *decoder;
- if (!XZ_IS_SUPPORTED_FILTER_ID(id))
- return SZ_ERROR_UNSUPPORTED;
- decoder = (CXzBcFilterState *)p->p;
- if (!decoder)
- {
- decoder = (CXzBcFilterState *)ISzAlloc_Alloc(alloc, sizeof(CXzBcFilterState));
- if (!decoder)
- return SZ_ERROR_MEM;
- decoder->buf = ISzAlloc_Alloc(alloc, BRA_BUF_SIZE);
- if (!decoder->buf)
- {
- ISzAlloc_Free(alloc, decoder);
- return SZ_ERROR_MEM;
- }
- p->p = decoder;
- p->Free = XzBcFilterState_Free;
- p->SetProps = XzBcFilterState_SetProps;
- p->Init = XzBcFilterState_Init;
- p->Code2 = XzBcFilterState_Code2;
- p->Filter = XzBcFilterState_Filter;
- decoder->filter_func = func;
- }
- decoder->base.methodId = (UInt32)id;
- // decoder->encodeMode = encodeMode;
- return SZ_OK;
-}
-
-
-
-/* ---------- SbState ---------- */
-
-#ifdef USE_SUBBLOCK
-
-static void SbState_Free(void *pp, ISzAllocPtr alloc)
-{
- CSbDec *p = (CSbDec *)pp;
- SbDec_Free(p);
- ISzAlloc_Free(alloc, pp);
-}
-
-static SRes SbState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc)
-{
- UNUSED_VAR(pp)
- UNUSED_VAR(props)
- UNUSED_VAR(alloc)
- return (propSize == 0) ? SZ_OK : SZ_ERROR_UNSUPPORTED;
-}
-
-static void SbState_Init(void *pp)
-{
- SbDec_Init((CSbDec *)pp);
-}
-
-static SRes SbState_Code2(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- int srcWasFinished, ECoderFinishMode finishMode,
- // int *wasFinished
- ECoderStatus *status)
-{
- CSbDec *p = (CSbDec *)pp;
- SRes res;
- UNUSED_VAR(srcWasFinished)
- p->dest = dest;
- p->destLen = *destLen;
- p->src = src;
- p->srcLen = *srcLen;
- p->finish = finishMode; /* change it */
- res = SbDec_Decode((CSbDec *)pp);
- *destLen -= p->destLen;
- *srcLen -= p->srcLen;
- // *wasFinished = (*destLen == 0 && *srcLen == 0); /* change it */
- *status = (*destLen == 0 && *srcLen == 0) ?
- CODER_STATUS_FINISHED_WITH_MARK :
- CODER_STATUS_NOT_FINISHED;
- return res;
-}
-
-static SRes SbState_SetFromMethod(IStateCoder *p, ISzAllocPtr alloc)
-{
- CSbDec *decoder = (CSbDec *)p->p;
- if (!decoder)
- {
- decoder = (CSbDec *)ISzAlloc_Alloc(alloc, sizeof(CSbDec));
- if (!decoder)
- return SZ_ERROR_MEM;
- p->p = decoder;
- p->Free = SbState_Free;
- p->SetProps = SbState_SetProps;
- p->Init = SbState_Init;
- p->Code2 = SbState_Code2;
- p->Filter = NULL;
- }
- SbDec_Construct(decoder);
- SbDec_SetAlloc(decoder, alloc);
- return SZ_OK;
-}
-
-#endif
-
-
-
-/* ---------- Lzma2 ---------- */
-
-typedef struct
-{
- CLzma2Dec decoder;
- BoolInt outBufMode;
-} CLzma2Dec_Spec;
-
-
-static void Lzma2State_Free(void *pp, ISzAllocPtr alloc)
-{
- CLzma2Dec_Spec *p = (CLzma2Dec_Spec *)pp;
- if (p->outBufMode)
- Lzma2Dec_FreeProbs(&p->decoder, alloc);
- else
- Lzma2Dec_Free(&p->decoder, alloc);
- ISzAlloc_Free(alloc, pp);
-}
-
-static SRes Lzma2State_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc)
-{
- if (propSize != 1)
- return SZ_ERROR_UNSUPPORTED;
- {
- CLzma2Dec_Spec *p = (CLzma2Dec_Spec *)pp;
- if (p->outBufMode)
- return Lzma2Dec_AllocateProbs(&p->decoder, props[0], alloc);
- else
- return Lzma2Dec_Allocate(&p->decoder, props[0], alloc);
- }
-}
-
-static void Lzma2State_Init(void *pp)
-{
- Lzma2Dec_Init(&((CLzma2Dec_Spec *)pp)->decoder);
-}
-
-
-/*
- if (outBufMode), then (dest) is not used. Use NULL.
- Data is unpacked to (spec->decoder.decoder.dic) output buffer.
-*/
-
-static SRes Lzma2State_Code2(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
- int srcWasFinished, ECoderFinishMode finishMode,
- // int *wasFinished,
- ECoderStatus *status)
-{
- CLzma2Dec_Spec *spec = (CLzma2Dec_Spec *)pp;
- ELzmaStatus status2;
- /* ELzmaFinishMode fm = (finishMode == LZMA_FINISH_ANY) ? LZMA_FINISH_ANY : LZMA_FINISH_END; */
- SRes res;
- UNUSED_VAR(srcWasFinished)
- if (spec->outBufMode)
- {
- SizeT dicPos = spec->decoder.decoder.dicPos;
- SizeT dicLimit = dicPos + *destLen;
- res = Lzma2Dec_DecodeToDic(&spec->decoder, dicLimit, src, srcLen, (ELzmaFinishMode)finishMode, &status2);
- *destLen = spec->decoder.decoder.dicPos - dicPos;
- }
- else
- res = Lzma2Dec_DecodeToBuf(&spec->decoder, dest, destLen, src, srcLen, (ELzmaFinishMode)finishMode, &status2);
- // *wasFinished = (status2 == LZMA_STATUS_FINISHED_WITH_MARK);
- // ECoderStatus values are identical to ELzmaStatus values of LZMA2 decoder
- *status = (ECoderStatus)status2;
- return res;
-}
-
-
-static SRes Lzma2State_SetFromMethod(IStateCoder *p, Byte *outBuf, size_t outBufSize, ISzAllocPtr alloc)
-{
- CLzma2Dec_Spec *spec = (CLzma2Dec_Spec *)p->p;
- if (!spec)
- {
- spec = (CLzma2Dec_Spec *)ISzAlloc_Alloc(alloc, sizeof(CLzma2Dec_Spec));
- if (!spec)
- return SZ_ERROR_MEM;
- p->p = spec;
- p->Free = Lzma2State_Free;
- p->SetProps = Lzma2State_SetProps;
- p->Init = Lzma2State_Init;
- p->Code2 = Lzma2State_Code2;
- p->Filter = NULL;
- Lzma2Dec_CONSTRUCT(&spec->decoder)
- }
- spec->outBufMode = False;
- if (outBuf)
- {
- spec->outBufMode = True;
- spec->decoder.decoder.dic = outBuf;
- spec->decoder.decoder.dicBufSize = outBufSize;
- }
- return SZ_OK;
-}
-
-
-static SRes Lzma2State_ResetOutBuf(IStateCoder *p, Byte *outBuf, size_t outBufSize)
-{
- CLzma2Dec_Spec *spec = (CLzma2Dec_Spec *)p->p;
- if ((spec->outBufMode && !outBuf) || (!spec->outBufMode && outBuf))
- return SZ_ERROR_FAIL;
- if (outBuf)
- {
- spec->decoder.decoder.dic = outBuf;
- spec->decoder.decoder.dicBufSize = outBufSize;
- }
- return SZ_OK;
-}
-
-
-
-static void MixCoder_Construct(CMixCoder *p, ISzAllocPtr alloc)
-{
- unsigned i;
- p->alloc = alloc;
- p->buf = NULL;
- p->numCoders = 0;
-
- p->outBufSize = 0;
- p->outBuf = NULL;
- // p->SingleBufMode = False;
-
- for (i = 0; i < MIXCODER_NUM_FILTERS_MAX; i++)
- p->coders[i].p = NULL;
-}
-
-
-static void MixCoder_Free(CMixCoder *p)
-{
- unsigned i;
- p->numCoders = 0;
- for (i = 0; i < MIXCODER_NUM_FILTERS_MAX; i++)
- {
- IStateCoder *sc = &p->coders[i];
- if (sc->p)
- {
- sc->Free(sc->p, p->alloc);
- sc->p = NULL;
- }
- }
- if (p->buf)
- {
- ISzAlloc_Free(p->alloc, p->buf);
- p->buf = NULL; /* 9.31: the BUG was fixed */
- }
-}
-
-static void MixCoder_Init(CMixCoder *p)
-{
- unsigned i;
- for (i = 0; i < MIXCODER_NUM_FILTERS_MAX - 1; i++)
- {
- p->size[i] = 0;
- p->pos[i] = 0;
- p->finished[i] = 0;
- }
- for (i = 0; i < p->numCoders; i++)
- {
- IStateCoder *coder = &p->coders[i];
- coder->Init(coder->p);
- p->results[i] = SZ_OK;
- }
- p->outWritten = 0;
- p->wasFinished = False;
- p->res = SZ_OK;
- p->status = CODER_STATUS_NOT_SPECIFIED;
-}
-
-
-static SRes MixCoder_SetFromMethod(CMixCoder *p, unsigned coderIndex, UInt64 methodId, Byte *outBuf, size_t outBufSize)
-{
- IStateCoder *sc = &p->coders[coderIndex];
- p->ids[coderIndex] = methodId;
- switch (methodId)
- {
- case XZ_ID_LZMA2: return Lzma2State_SetFromMethod(sc, outBuf, outBufSize, p->alloc);
- #ifdef USE_SUBBLOCK
- case XZ_ID_Subblock: return SbState_SetFromMethod(sc, p->alloc);
- #endif
- }
- if (coderIndex == 0)
- return SZ_ERROR_UNSUPPORTED;
- return Xz_StateCoder_Bc_SetFromMethod_Func(sc, methodId,
- XzBcFilterStateBase_Filter_Dec, p->alloc);
-}
-
-
-static SRes MixCoder_ResetFromMethod(CMixCoder *p, unsigned coderIndex, UInt64 methodId, Byte *outBuf, size_t outBufSize)
-{
- IStateCoder *sc = &p->coders[coderIndex];
- switch (methodId)
- {
- case XZ_ID_LZMA2: return Lzma2State_ResetOutBuf(sc, outBuf, outBufSize);
- }
- return SZ_ERROR_UNSUPPORTED;
-}
-
-
-
-/*
- if (destFinish) - then unpack data block is finished at (*destLen) position,
- and we can return data that were not processed by filter
-
-output (status) can be :
- CODER_STATUS_NOT_FINISHED
- CODER_STATUS_FINISHED_WITH_MARK
- CODER_STATUS_NEEDS_MORE_INPUT - not implemented still
-*/
-
-static SRes MixCoder_Code(CMixCoder *p,
- Byte *dest, SizeT *destLen, int destFinish,
- const Byte *src, SizeT *srcLen, int srcWasFinished,
- ECoderFinishMode finishMode)
-{
- SizeT destLenOrig = *destLen;
- SizeT srcLenOrig = *srcLen;
-
- *destLen = 0;
- *srcLen = 0;
-
- if (p->wasFinished)
- return p->res;
-
- p->status = CODER_STATUS_NOT_FINISHED;
-
- // if (p->SingleBufMode)
- if (p->outBuf)
- {
- SRes res;
- SizeT destLen2, srcLen2;
- int wasFinished;
-
- PRF_STR("------- MixCoder Single ----------")
-
- srcLen2 = srcLenOrig;
- destLen2 = destLenOrig;
-
- {
- IStateCoder *coder = &p->coders[0];
- res = coder->Code2(coder->p, NULL, &destLen2, src, &srcLen2, srcWasFinished, finishMode,
- // &wasFinished,
- &p->status);
- wasFinished = (p->status == CODER_STATUS_FINISHED_WITH_MARK);
- }
-
- p->res = res;
-
- /*
- if (wasFinished)
- p->status = CODER_STATUS_FINISHED_WITH_MARK;
- else
- {
- if (res == SZ_OK)
- if (destLen2 != destLenOrig)
- p->status = CODER_STATUS_NEEDS_MORE_INPUT;
- }
- */
-
-
- *srcLen = srcLen2;
- src += srcLen2;
- p->outWritten += destLen2;
-
- if (res != SZ_OK || srcWasFinished || wasFinished)
- p->wasFinished = True;
-
- if (p->numCoders == 1)
- *destLen = destLen2;
- else if (p->wasFinished)
- {
- unsigned i;
- size_t processed = p->outWritten;
-
- for (i = 1; i < p->numCoders; i++)
- {
- IStateCoder *coder = &p->coders[i];
- processed = coder->Filter(coder->p, p->outBuf, processed);
- if (wasFinished || (destFinish && p->outWritten == destLenOrig))
- processed = p->outWritten;
- PRF_STR_INT("filter", i)
- }
- *destLen = processed;
- }
- return res;
- }
-
- PRF_STR("standard mix")
-
- if (p->numCoders != 1)
- {
- if (!p->buf)
- {
- p->buf = (Byte *)ISzAlloc_Alloc(p->alloc, CODER_BUF_SIZE * (MIXCODER_NUM_FILTERS_MAX - 1));
- if (!p->buf)
- return SZ_ERROR_MEM;
- }
-
- finishMode = CODER_FINISH_ANY;
- }
-
- for (;;)
- {
- BoolInt processed = False;
- BoolInt allFinished = True;
- SRes resMain = SZ_OK;
- unsigned i;
-
- p->status = CODER_STATUS_NOT_FINISHED;
- /*
- if (p->numCoders == 1 && *destLen == destLenOrig && finishMode == LZMA_FINISH_ANY)
- break;
- */
-
- for (i = 0; i < p->numCoders; i++)
- {
- SRes res;
- IStateCoder *coder = &p->coders[i];
- Byte *dest2;
- SizeT destLen2, srcLen2; // destLen2_Orig;
- const Byte *src2;
- int srcFinished2;
- int encodingWasFinished;
- ECoderStatus status2;
-
- if (i == 0)
- {
- src2 = src;
- srcLen2 = srcLenOrig - *srcLen;
- srcFinished2 = srcWasFinished;
- }
- else
- {
- size_t k = i - 1;
- src2 = p->buf + (CODER_BUF_SIZE * k) + p->pos[k];
- srcLen2 = p->size[k] - p->pos[k];
- srcFinished2 = p->finished[k];
- }
-
- if (i == p->numCoders - 1)
- {
- dest2 = dest;
- destLen2 = destLenOrig - *destLen;
- }
- else
- {
- if (p->pos[i] != p->size[i])
- continue;
- dest2 = p->buf + (CODER_BUF_SIZE * i);
- destLen2 = CODER_BUF_SIZE;
- }
-
- // destLen2_Orig = destLen2;
-
- if (p->results[i] != SZ_OK)
- {
- if (resMain == SZ_OK)
- resMain = p->results[i];
- continue;
- }
-
- res = coder->Code2(coder->p,
- dest2, &destLen2,
- src2, &srcLen2, srcFinished2,
- finishMode,
- // &encodingWasFinished,
- &status2);
-
- if (res != SZ_OK)
- {
- p->results[i] = res;
- if (resMain == SZ_OK)
- resMain = res;
- }
-
- encodingWasFinished = (status2 == CODER_STATUS_FINISHED_WITH_MARK);
-
- if (!encodingWasFinished)
- {
- allFinished = False;
- if (p->numCoders == 1 && res == SZ_OK)
- p->status = status2;
- }
-
- if (i == 0)
- {
- *srcLen += srcLen2;
- src += srcLen2;
- }
- else
- p->pos[(size_t)i - 1] += srcLen2;
-
- if (i == p->numCoders - 1)
- {
- *destLen += destLen2;
- dest += destLen2;
- }
- else
- {
- p->size[i] = destLen2;
- p->pos[i] = 0;
- p->finished[i] = encodingWasFinished;
- }
-
- if (destLen2 != 0 || srcLen2 != 0)
- processed = True;
- }
-
- if (!processed)
- {
- if (allFinished)
- p->status = CODER_STATUS_FINISHED_WITH_MARK;
- return resMain;
- }
- }
-}
-
-
-SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf)
-{
- *p = (CXzStreamFlags)GetBe16(buf + XZ_SIG_SIZE);
- if (CrcCalc(buf + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE) !=
- GetUi32(buf + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE))
- return SZ_ERROR_NO_ARCHIVE;
- return XzFlags_IsSupported(*p) ? SZ_OK : SZ_ERROR_UNSUPPORTED;
-}
-
-static BoolInt Xz_CheckFooter(CXzStreamFlags flags, UInt64 indexSize, const Byte *buf)
-{
- return indexSize == (((UInt64)GetUi32(buf + 4) + 1) << 2)
- && GetUi32(buf) == CrcCalc(buf + 4, 6)
- && flags == GetBe16(buf + 8)
- && buf[10] == XZ_FOOTER_SIG_0
- && buf[11] == XZ_FOOTER_SIG_1;
-}
-
-#define READ_VARINT_AND_CHECK(buf, pos, size, res) \
- { unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \
- if (s == 0) return SZ_ERROR_ARCHIVE; \
- pos += s; }
-
-
-static BoolInt XzBlock_AreSupportedFilters(const CXzBlock *p)
-{
- const unsigned numFilters = XzBlock_GetNumFilters(p) - 1;
- unsigned i;
- {
- const CXzFilter *f = &p->filters[numFilters];
- if (f->id != XZ_ID_LZMA2 || f->propsSize != 1 || f->props[0] > 40)
- return False;
- }
-
- for (i = 0; i < numFilters; i++)
- {
- const CXzFilter *f = &p->filters[i];
- if (f->id == XZ_ID_Delta)
- {
- if (f->propsSize != 1)
- return False;
- }
- else if (!XZ_IS_SUPPORTED_FILTER_ID(f->id)
- || (f->propsSize != 0 && f->propsSize != 4))
- return False;
- }
- return True;
-}
-
-
-SRes XzBlock_Parse(CXzBlock *p, const Byte *header)
-{
- unsigned pos;
- unsigned numFilters, i;
- unsigned headerSize = (unsigned)header[0] << 2;
-
- /* (headerSize != 0) : another code checks */
-
- if (CrcCalc(header, headerSize) != GetUi32(header + headerSize))
- return SZ_ERROR_ARCHIVE;
-
- pos = 1;
- p->flags = header[pos++];
-
- p->packSize = (UInt64)(Int64)-1;
- if (XzBlock_HasPackSize(p))
- {
- READ_VARINT_AND_CHECK(header, pos, headerSize, &p->packSize)
- if (p->packSize == 0 || p->packSize + headerSize >= (UInt64)1 << 63)
- return SZ_ERROR_ARCHIVE;
- }
-
- p->unpackSize = (UInt64)(Int64)-1;
- if (XzBlock_HasUnpackSize(p))
- {
- READ_VARINT_AND_CHECK(header, pos, headerSize, &p->unpackSize)
- }
-
- numFilters = XzBlock_GetNumFilters(p);
- for (i = 0; i < numFilters; i++)
- {
- CXzFilter *filter = p->filters + i;
- UInt64 size;
- READ_VARINT_AND_CHECK(header, pos, headerSize, &filter->id)
- READ_VARINT_AND_CHECK(header, pos, headerSize, &size)
- if (size > headerSize - pos || size > XZ_FILTER_PROPS_SIZE_MAX)
- return SZ_ERROR_ARCHIVE;
- filter->propsSize = (UInt32)size;
- memcpy(filter->props, header + pos, (size_t)size);
- pos += (unsigned)size;
-
- #ifdef XZ_DUMP
- printf("\nf[%u] = %2X: ", i, (unsigned)filter->id);
- {
- unsigned i;
- for (i = 0; i < size; i++)
- printf(" %2X", filter->props[i]);
- }
- #endif
- }
-
- if (XzBlock_HasUnsupportedFlags(p))
- return SZ_ERROR_UNSUPPORTED;
-
- while (pos < headerSize)
- if (header[pos++] != 0)
- return SZ_ERROR_ARCHIVE;
- return SZ_OK;
-}
-
-
-
-
-static SRes XzDecMix_Init(CMixCoder *p, const CXzBlock *block, Byte *outBuf, size_t outBufSize)
-{
- unsigned i;
- BoolInt needReInit = True;
- unsigned numFilters = XzBlock_GetNumFilters(block);
-
- if (numFilters == p->numCoders && ((p->outBuf && outBuf) || (!p->outBuf && !outBuf)))
- {
- needReInit = False;
- for (i = 0; i < numFilters; i++)
- if (p->ids[i] != block->filters[numFilters - 1 - i].id)
- {
- needReInit = True;
- break;
- }
- }
-
- // p->SingleBufMode = (outBuf != NULL);
- p->outBuf = outBuf;
- p->outBufSize = outBufSize;
-
- // p->SingleBufMode = False;
- // outBuf = NULL;
-
- if (needReInit)
- {
- MixCoder_Free(p);
- for (i = 0; i < numFilters; i++)
- {
- RINOK(MixCoder_SetFromMethod(p, i, block->filters[numFilters - 1 - i].id, outBuf, outBufSize))
- }
- p->numCoders = numFilters;
- }
- else
- {
- RINOK(MixCoder_ResetFromMethod(p, 0, block->filters[numFilters - 1].id, outBuf, outBufSize))
- }
-
- for (i = 0; i < numFilters; i++)
- {
- const CXzFilter *f = &block->filters[numFilters - 1 - i];
- IStateCoder *sc = &p->coders[i];
- RINOK(sc->SetProps(sc->p, f->props, f->propsSize, p->alloc))
- }
-
- MixCoder_Init(p);
- return SZ_OK;
-}
-
-
-
-void XzUnpacker_Init(CXzUnpacker *p)
-{
- p->state = XZ_STATE_STREAM_HEADER;
- p->pos = 0;
- p->numStartedStreams = 0;
- p->numFinishedStreams = 0;
- p->numTotalBlocks = 0;
- p->padSize = 0;
- p->decodeOnlyOneBlock = 0;
-
- p->parseMode = False;
- p->decodeToStreamSignature = False;
-
- // p->outBuf = NULL;
- // p->outBufSize = 0;
- p->outDataWritten = 0;
-}
-
-
-void XzUnpacker_SetOutBuf(CXzUnpacker *p, Byte *outBuf, size_t outBufSize)
-{
- p->outBuf = outBuf;
- p->outBufSize = outBufSize;
-}
-
-
-void XzUnpacker_Construct(CXzUnpacker *p, ISzAllocPtr alloc)
-{
- MixCoder_Construct(&p->decoder, alloc);
- p->outBuf = NULL;
- p->outBufSize = 0;
- XzUnpacker_Init(p);
-}
-
-
-void XzUnpacker_Free(CXzUnpacker *p)
-{
- MixCoder_Free(&p->decoder);
-}
-
-
-void XzUnpacker_PrepareToRandomBlockDecoding(CXzUnpacker *p)
-{
- p->indexSize = 0;
- p->numBlocks = 0;
- Sha256_Init(&p->sha);
- p->state = XZ_STATE_BLOCK_HEADER;
- p->pos = 0;
- p->decodeOnlyOneBlock = 1;
-}
-
-
-static void XzUnpacker_UpdateIndex(CXzUnpacker *p, UInt64 packSize, UInt64 unpackSize)
-{
- Byte temp[32];
- unsigned num = Xz_WriteVarInt(temp, packSize);
- num += Xz_WriteVarInt(temp + num, unpackSize);
- Sha256_Update(&p->sha, temp, num);
- p->indexSize += num;
- p->numBlocks++;
-}
-
-
-
-SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen, int srcFinished,
- ECoderFinishMode finishMode, ECoderStatus *status)
-{
- SizeT destLenOrig = *destLen;
- SizeT srcLenOrig = *srcLen;
- *destLen = 0;
- *srcLen = 0;
- *status = CODER_STATUS_NOT_SPECIFIED;
-
- for (;;)
- {
- SizeT srcRem;
-
- if (p->state == XZ_STATE_BLOCK)
- {
- SizeT destLen2 = destLenOrig - *destLen;
- SizeT srcLen2 = srcLenOrig - *srcLen;
- SRes res;
-
- ECoderFinishMode finishMode2 = finishMode;
- BoolInt srcFinished2 = srcFinished;
- BoolInt destFinish = False;
-
- if (p->block.packSize != (UInt64)(Int64)-1)
- {
- UInt64 rem = p->block.packSize - p->packSize;
- if (srcLen2 >= rem)
- {
- srcFinished2 = True;
- srcLen2 = (SizeT)rem;
- }
- if (rem == 0 && p->block.unpackSize == p->unpackSize)
- return SZ_ERROR_DATA;
- }
-
- if (p->block.unpackSize != (UInt64)(Int64)-1)
- {
- UInt64 rem = p->block.unpackSize - p->unpackSize;
- if (destLen2 >= rem)
- {
- destFinish = True;
- finishMode2 = CODER_FINISH_END;
- destLen2 = (SizeT)rem;
- }
- }
-
- /*
- if (srcLen2 == 0 && destLen2 == 0)
- {
- *status = CODER_STATUS_NOT_FINISHED;
- return SZ_OK;
- }
- */
-
- {
- res = MixCoder_Code(&p->decoder,
- (p->outBuf ? NULL : dest), &destLen2, destFinish,
- src, &srcLen2, srcFinished2,
- finishMode2);
-
- *status = p->decoder.status;
- XzCheck_Update(&p->check, (p->outBuf ? p->outBuf + p->outDataWritten : dest), destLen2);
- if (!p->outBuf)
- dest += destLen2;
- p->outDataWritten += destLen2;
- }
-
- (*srcLen) += srcLen2;
- src += srcLen2;
- p->packSize += srcLen2;
- (*destLen) += destLen2;
- p->unpackSize += destLen2;
-
- RINOK(res)
-
- if (*status != CODER_STATUS_FINISHED_WITH_MARK)
- {
- if (p->block.packSize == p->packSize
- && *status == CODER_STATUS_NEEDS_MORE_INPUT)
- {
- PRF_STR("CODER_STATUS_NEEDS_MORE_INPUT")
- *status = CODER_STATUS_NOT_SPECIFIED;
- return SZ_ERROR_DATA;
- }
-
- return SZ_OK;
- }
- {
- XzUnpacker_UpdateIndex(p, XzUnpacker_GetPackSizeForIndex(p), p->unpackSize);
- p->state = XZ_STATE_BLOCK_FOOTER;
- p->pos = 0;
- p->alignPos = 0;
- *status = CODER_STATUS_NOT_SPECIFIED;
-
- if ((p->block.packSize != (UInt64)(Int64)-1 && p->block.packSize != p->packSize)
- || (p->block.unpackSize != (UInt64)(Int64)-1 && p->block.unpackSize != p->unpackSize))
- {
- PRF_STR("ERROR: block.size mismatch")
- return SZ_ERROR_DATA;
- }
- }
- // continue;
- }
-
- srcRem = srcLenOrig - *srcLen;
-
- // XZ_STATE_BLOCK_FOOTER can transit to XZ_STATE_BLOCK_HEADER without input bytes
- if (srcRem == 0 && p->state != XZ_STATE_BLOCK_FOOTER)
- {
- *status = CODER_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
-
- switch (p->state)
- {
- case XZ_STATE_STREAM_HEADER:
- {
- if (p->pos < XZ_STREAM_HEADER_SIZE)
- {
- if (p->pos < XZ_SIG_SIZE && *src != XZ_SIG[p->pos])
- return SZ_ERROR_NO_ARCHIVE;
- if (p->decodeToStreamSignature)
- return SZ_OK;
- p->buf[p->pos++] = *src++;
- (*srcLen)++;
- }
- else
- {
- RINOK(Xz_ParseHeader(&p->streamFlags, p->buf))
- p->numStartedStreams++;
- p->indexSize = 0;
- p->numBlocks = 0;
- Sha256_Init(&p->sha);
- p->state = XZ_STATE_BLOCK_HEADER;
- p->pos = 0;
- }
- break;
- }
-
- case XZ_STATE_BLOCK_HEADER:
- {
- if (p->pos == 0)
- {
- p->buf[p->pos++] = *src++;
- (*srcLen)++;
- if (p->buf[0] == 0)
- {
- if (p->decodeOnlyOneBlock)
- return SZ_ERROR_DATA;
- p->indexPreSize = 1 + Xz_WriteVarInt(p->buf + 1, p->numBlocks);
- p->indexPos = p->indexPreSize;
- p->indexSize += p->indexPreSize;
- Sha256_Final(&p->sha, p->shaDigest);
- Sha256_Init(&p->sha);
- p->crc = CrcUpdate(CRC_INIT_VAL, p->buf, p->indexPreSize);
- p->state = XZ_STATE_STREAM_INDEX;
- break;
- }
- p->blockHeaderSize = ((UInt32)p->buf[0] << 2) + 4;
- break;
- }
-
- if (p->pos != p->blockHeaderSize)
- {
- UInt32 cur = p->blockHeaderSize - p->pos;
- if (cur > srcRem)
- cur = (UInt32)srcRem;
- memcpy(p->buf + p->pos, src, cur);
- p->pos += cur;
- (*srcLen) += cur;
- src += cur;
- }
- else
- {
- RINOK(XzBlock_Parse(&p->block, p->buf))
- if (!XzBlock_AreSupportedFilters(&p->block))
- return SZ_ERROR_UNSUPPORTED;
- p->numTotalBlocks++;
- p->state = XZ_STATE_BLOCK;
- p->packSize = 0;
- p->unpackSize = 0;
- XzCheck_Init(&p->check, XzFlags_GetCheckType(p->streamFlags));
- if (p->parseMode)
- {
- p->headerParsedOk = True;
- return SZ_OK;
- }
- RINOK(XzDecMix_Init(&p->decoder, &p->block, p->outBuf, p->outBufSize))
- }
- break;
- }
-
- case XZ_STATE_BLOCK_FOOTER:
- {
- if ((((unsigned)p->packSize + p->alignPos) & 3) != 0)
- {
- if (srcRem == 0)
- {
- *status = CODER_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
- (*srcLen)++;
- p->alignPos++;
- if (*src++ != 0)
- return SZ_ERROR_CRC;
- }
- else
- {
- UInt32 checkSize = XzFlags_GetCheckSize(p->streamFlags);
- UInt32 cur = checkSize - p->pos;
- if (cur != 0)
- {
- if (srcRem == 0)
- {
- *status = CODER_STATUS_NEEDS_MORE_INPUT;
- return SZ_OK;
- }
- if (cur > srcRem)
- cur = (UInt32)srcRem;
- memcpy(p->buf + p->pos, src, cur);
- p->pos += cur;
- (*srcLen) += cur;
- src += cur;
- if (checkSize != p->pos)
- break;
- }
- {
- Byte digest[XZ_CHECK_SIZE_MAX];
- p->state = XZ_STATE_BLOCK_HEADER;
- p->pos = 0;
- if (XzCheck_Final(&p->check, digest) && memcmp(digest, p->buf, checkSize) != 0)
- return SZ_ERROR_CRC;
- if (p->decodeOnlyOneBlock)
- {
- *status = CODER_STATUS_FINISHED_WITH_MARK;
- return SZ_OK;
- }
- }
- }
- break;
- }
-
- case XZ_STATE_STREAM_INDEX:
- {
- if (p->pos < p->indexPreSize)
- {
- (*srcLen)++;
- if (*src++ != p->buf[p->pos++])
- return SZ_ERROR_CRC;
- }
- else
- {
- if (p->indexPos < p->indexSize)
- {
- UInt64 cur = p->indexSize - p->indexPos;
- if (srcRem > cur)
- srcRem = (SizeT)cur;
- p->crc = CrcUpdate(p->crc, src, srcRem);
- Sha256_Update(&p->sha, src, srcRem);
- (*srcLen) += srcRem;
- src += srcRem;
- p->indexPos += srcRem;
- }
- else if ((p->indexPos & 3) != 0)
- {
- Byte b = *src++;
- p->crc = CRC_UPDATE_BYTE(p->crc, b);
- (*srcLen)++;
- p->indexPos++;
- p->indexSize++;
- if (b != 0)
- return SZ_ERROR_CRC;
- }
- else
- {
- Byte digest[SHA256_DIGEST_SIZE];
- p->state = XZ_STATE_STREAM_INDEX_CRC;
- p->indexSize += 4;
- p->pos = 0;
- Sha256_Final(&p->sha, digest);
- if (memcmp(digest, p->shaDigest, SHA256_DIGEST_SIZE) != 0)
- return SZ_ERROR_CRC;
- }
- }
- break;
- }
-
- case XZ_STATE_STREAM_INDEX_CRC:
- {
- if (p->pos < 4)
- {
- (*srcLen)++;
- p->buf[p->pos++] = *src++;
- }
- else
- {
- const Byte *ptr = p->buf;
- p->state = XZ_STATE_STREAM_FOOTER;
- p->pos = 0;
- if (CRC_GET_DIGEST(p->crc) != GetUi32(ptr))
- return SZ_ERROR_CRC;
- }
- break;
- }
-
- case XZ_STATE_STREAM_FOOTER:
- {
- UInt32 cur = XZ_STREAM_FOOTER_SIZE - p->pos;
- if (cur > srcRem)
- cur = (UInt32)srcRem;
- memcpy(p->buf + p->pos, src, cur);
- p->pos += cur;
- (*srcLen) += cur;
- src += cur;
- if (p->pos == XZ_STREAM_FOOTER_SIZE)
- {
- p->state = XZ_STATE_STREAM_PADDING;
- p->numFinishedStreams++;
- p->padSize = 0;
- if (!Xz_CheckFooter(p->streamFlags, p->indexSize, p->buf))
- return SZ_ERROR_CRC;
- }
- break;
- }
-
- case XZ_STATE_STREAM_PADDING:
- {
- if (*src != 0)
- {
- if (((UInt32)p->padSize & 3) != 0)
- return SZ_ERROR_NO_ARCHIVE;
- p->pos = 0;
- p->state = XZ_STATE_STREAM_HEADER;
- }
- else
- {
- (*srcLen)++;
- src++;
- p->padSize++;
- }
- break;
- }
-
- case XZ_STATE_BLOCK: break; /* to disable GCC warning */
- }
- }
- /*
- if (p->state == XZ_STATE_FINISHED)
- *status = CODER_STATUS_FINISHED_WITH_MARK;
- return SZ_OK;
- */
-}
-
-
-SRes XzUnpacker_CodeFull(CXzUnpacker *p, Byte *dest, SizeT *destLen,
- const Byte *src, SizeT *srcLen,
- ECoderFinishMode finishMode, ECoderStatus *status)
-{
- XzUnpacker_Init(p);
- XzUnpacker_SetOutBuf(p, dest, *destLen);
-
- return XzUnpacker_Code(p,
- NULL, destLen,
- src, srcLen, True,
- finishMode, status);
-}
-
-
-BoolInt XzUnpacker_IsBlockFinished(const CXzUnpacker *p)
-{
- return (p->state == XZ_STATE_BLOCK_HEADER) && (p->pos == 0);
-}
-
-BoolInt XzUnpacker_IsStreamWasFinished(const CXzUnpacker *p)
-{
- return (p->state == XZ_STATE_STREAM_PADDING) && (((UInt32)p->padSize & 3) == 0);
-}
-
-UInt64 XzUnpacker_GetExtraSize(const CXzUnpacker *p)
-{
- UInt64 num = 0;
- if (p->state == XZ_STATE_STREAM_PADDING)
- num = p->padSize;
- else if (p->state == XZ_STATE_STREAM_HEADER)
- num = p->padSize + p->pos;
- return num;
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#ifndef Z7_ST
-#include "MtDec.h"
-#endif
-
-
-void XzDecMtProps_Init(CXzDecMtProps *p)
-{
- p->inBufSize_ST = 1 << 18;
- p->outStep_ST = 1 << 20;
- p->ignoreErrors = False;
-
- #ifndef Z7_ST
- p->numThreads = 1;
- p->inBufSize_MT = 1 << 18;
- p->memUseMax = sizeof(size_t) << 28;
- #endif
-}
-
-
-
-#ifndef Z7_ST
-
-/* ---------- CXzDecMtThread ---------- */
-
-typedef struct
-{
- Byte *outBuf;
- size_t outBufSize;
- size_t outPreSize;
- size_t inPreSize;
- size_t inPreHeaderSize;
- size_t blockPackSize_for_Index; // including block header and checksum.
- size_t blockPackTotal; // including stream header, block header and checksum.
- size_t inCodeSize;
- size_t outCodeSize;
- ECoderStatus status;
- SRes codeRes;
- BoolInt skipMode;
- // BoolInt finishedWithMark;
- EMtDecParseState parseState;
- BoolInt parsing_Truncated;
- BoolInt atBlockHeader;
- CXzStreamFlags streamFlags;
- // UInt64 numFinishedStreams
- UInt64 numStreams;
- UInt64 numTotalBlocks;
- UInt64 numBlocks;
-
- BoolInt dec_created;
- CXzUnpacker dec;
-
- Byte mtPad[1 << 7];
-} CXzDecMtThread;
-
-#endif
-
-
-/* ---------- CXzDecMt ---------- */
-
-struct CXzDecMt
-{
- CAlignOffsetAlloc alignOffsetAlloc;
- ISzAllocPtr allocMid;
-
- CXzDecMtProps props;
- size_t unpackBlockMaxSize;
-
- ISeqInStreamPtr inStream;
- ISeqOutStreamPtr outStream;
- ICompressProgressPtr progress;
-
- BoolInt finishMode;
- BoolInt outSize_Defined;
- UInt64 outSize;
-
- UInt64 outProcessed;
- UInt64 inProcessed;
- UInt64 readProcessed;
- BoolInt readWasFinished;
- SRes readRes;
- SRes writeRes;
-
- Byte *outBuf;
- size_t outBufSize;
- Byte *inBuf;
- size_t inBufSize;
-
- CXzUnpacker dec;
-
- ECoderStatus status;
- SRes codeRes;
-
- #ifndef Z7_ST
- BoolInt mainDecoderWasCalled;
- // int statErrorDefined;
- int finishedDecoderIndex;
-
- // global values that are used in Parse stage
- CXzStreamFlags streamFlags;
- // UInt64 numFinishedStreams
- UInt64 numStreams;
- UInt64 numTotalBlocks;
- UInt64 numBlocks;
-
- // UInt64 numBadBlocks;
- SRes mainErrorCode; // it's set to error code, if the size Code() output doesn't patch the size from Parsing stage
- // it can be = SZ_ERROR_INPUT_EOF
- // it can be = SZ_ERROR_DATA, in some another cases
- BoolInt isBlockHeaderState_Parse;
- BoolInt isBlockHeaderState_Write;
- UInt64 outProcessed_Parse;
- BoolInt parsing_Truncated;
-
- BoolInt mtc_WasConstructed;
- CMtDec mtc;
- CXzDecMtThread coders[MTDEC_THREADS_MAX];
- #endif
-};
-
-
-
-CXzDecMtHandle XzDecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid)
-{
- CXzDecMt *p = (CXzDecMt *)ISzAlloc_Alloc(alloc, sizeof(CXzDecMt));
- if (!p)
- return NULL;
-
- AlignOffsetAlloc_CreateVTable(&p->alignOffsetAlloc);
- p->alignOffsetAlloc.baseAlloc = alloc;
- p->alignOffsetAlloc.numAlignBits = 7;
- p->alignOffsetAlloc.offset = 0;
-
- p->allocMid = allocMid;
-
- p->outBuf = NULL;
- p->outBufSize = 0;
- p->inBuf = NULL;
- p->inBufSize = 0;
-
- XzUnpacker_Construct(&p->dec, &p->alignOffsetAlloc.vt);
-
- p->unpackBlockMaxSize = 0;
-
- XzDecMtProps_Init(&p->props);
-
- #ifndef Z7_ST
- p->mtc_WasConstructed = False;
- {
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CXzDecMtThread *coder = &p->coders[i];
- coder->dec_created = False;
- coder->outBuf = NULL;
- coder->outBufSize = 0;
- }
- }
- #endif
-
- return (CXzDecMtHandle)p;
-}
-
-
-#ifndef Z7_ST
-
-static void XzDecMt_FreeOutBufs(CXzDecMt *p)
-{
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CXzDecMtThread *coder = &p->coders[i];
- if (coder->outBuf)
- {
- ISzAlloc_Free(p->allocMid, coder->outBuf);
- coder->outBuf = NULL;
- coder->outBufSize = 0;
- }
- }
- p->unpackBlockMaxSize = 0;
-}
-
-#endif
-
-
-
-static void XzDecMt_FreeSt(CXzDecMt *p)
-{
- XzUnpacker_Free(&p->dec);
-
- if (p->outBuf)
- {
- ISzAlloc_Free(p->allocMid, p->outBuf);
- p->outBuf = NULL;
- }
- p->outBufSize = 0;
-
- if (p->inBuf)
- {
- ISzAlloc_Free(p->allocMid, p->inBuf);
- p->inBuf = NULL;
- }
- p->inBufSize = 0;
-}
-
-
-// #define GET_CXzDecMt_p CXzDecMt *p = pp;
-
-void XzDecMt_Destroy(CXzDecMtHandle p)
-{
- // GET_CXzDecMt_p
-
- XzDecMt_FreeSt(p);
-
- #ifndef Z7_ST
-
- if (p->mtc_WasConstructed)
- {
- MtDec_Destruct(&p->mtc);
- p->mtc_WasConstructed = False;
- }
- {
- unsigned i;
- for (i = 0; i < MTDEC_THREADS_MAX; i++)
- {
- CXzDecMtThread *t = &p->coders[i];
- if (t->dec_created)
- {
- // we don't need to free dict here
- XzUnpacker_Free(&t->dec);
- t->dec_created = False;
- }
- }
- }
- XzDecMt_FreeOutBufs(p);
-
- #endif
-
- ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, p);
-}
-
-
-
-#ifndef Z7_ST
-
-static void XzDecMt_Callback_Parse(void *obj, unsigned coderIndex, CMtDecCallbackInfo *cc)
-{
- CXzDecMt *me = (CXzDecMt *)obj;
- CXzDecMtThread *coder = &me->coders[coderIndex];
- size_t srcSize = cc->srcSize;
-
- cc->srcSize = 0;
- cc->outPos = 0;
- cc->state = MTDEC_PARSE_CONTINUE;
-
- cc->canCreateNewThread = True;
-
- if (cc->startCall)
- {
- coder->outPreSize = 0;
- coder->inPreSize = 0;
- coder->inPreHeaderSize = 0;
- coder->parseState = MTDEC_PARSE_CONTINUE;
- coder->parsing_Truncated = False;
- coder->skipMode = False;
- coder->codeRes = SZ_OK;
- coder->status = CODER_STATUS_NOT_SPECIFIED;
- coder->inCodeSize = 0;
- coder->outCodeSize = 0;
-
- coder->numStreams = me->numStreams;
- coder->numTotalBlocks = me->numTotalBlocks;
- coder->numBlocks = me->numBlocks;
-
- if (!coder->dec_created)
- {
- XzUnpacker_Construct(&coder->dec, &me->alignOffsetAlloc.vt);
- coder->dec_created = True;
- }
-
- XzUnpacker_Init(&coder->dec);
-
- if (me->isBlockHeaderState_Parse)
- {
- coder->dec.streamFlags = me->streamFlags;
- coder->atBlockHeader = True;
- XzUnpacker_PrepareToRandomBlockDecoding(&coder->dec);
- }
- else
- {
- coder->atBlockHeader = False;
- me->isBlockHeaderState_Parse = True;
- }
-
- coder->dec.numStartedStreams = me->numStreams;
- coder->dec.numTotalBlocks = me->numTotalBlocks;
- coder->dec.numBlocks = me->numBlocks;
- }
-
- while (!coder->skipMode)
- {
- ECoderStatus status;
- SRes res;
- size_t srcSize2 = srcSize;
- size_t destSize = (size_t)0 - 1;
-
- coder->dec.parseMode = True;
- coder->dec.headerParsedOk = False;
-
- PRF_STR_INT("Parse", srcSize2)
-
- res = XzUnpacker_Code(&coder->dec,
- NULL, &destSize,
- cc->src, &srcSize2, cc->srcFinished,
- CODER_FINISH_END, &status);
-
- // PRF(printf(" res = %d, srcSize2 = %d", res, (unsigned)srcSize2));
-
- coder->codeRes = res;
- coder->status = status;
- cc->srcSize += srcSize2;
- srcSize -= srcSize2;
- coder->inPreHeaderSize += srcSize2;
- coder->inPreSize = coder->inPreHeaderSize;
-
- if (res != SZ_OK)
- {
- cc->state =
- coder->parseState = MTDEC_PARSE_END;
- /*
- if (res == SZ_ERROR_MEM)
- return res;
- return SZ_OK;
- */
- return; // res;
- }
-
- if (coder->dec.headerParsedOk)
- {
- const CXzBlock *block = &coder->dec.block;
- if (XzBlock_HasUnpackSize(block)
- // && block->unpackSize <= me->props.outBlockMax
- && XzBlock_HasPackSize(block))
- {
- {
- if (block->unpackSize * 2 * me->mtc.numStartedThreads > me->props.memUseMax)
- {
- cc->state = MTDEC_PARSE_OVERFLOW;
- return; // SZ_OK;
- }
- }
- {
- UInt64 packSize = block->packSize;
- UInt64 packSizeAligned = packSize + ((0 - (unsigned)packSize) & 3);
- UInt32 checkSize = XzFlags_GetCheckSize(coder->dec.streamFlags);
- UInt64 blockPackSum = coder->inPreSize + packSizeAligned + checkSize;
- // if (blockPackSum <= me->props.inBlockMax)
- // unpackBlockMaxSize
- {
- coder->blockPackSize_for_Index = (size_t)(coder->dec.blockHeaderSize + packSize + checkSize);
- coder->blockPackTotal = (size_t)blockPackSum;
- coder->outPreSize = (size_t)block->unpackSize;
- coder->streamFlags = coder->dec.streamFlags;
- me->streamFlags = coder->dec.streamFlags;
- coder->skipMode = True;
- break;
- }
- }
- }
- }
- else
- // if (coder->inPreSize <= me->props.inBlockMax)
- {
- if (!cc->srcFinished)
- return; // SZ_OK;
- cc->state =
- coder->parseState = MTDEC_PARSE_END;
- return; // SZ_OK;
- }
- cc->state = MTDEC_PARSE_OVERFLOW;
- return; // SZ_OK;
- }
-
- // ---------- skipMode ----------
- {
- UInt64 rem = coder->blockPackTotal - coder->inPreSize;
- size_t cur = srcSize;
- if (cur > rem)
- cur = (size_t)rem;
- cc->srcSize += cur;
- coder->inPreSize += cur;
- srcSize -= cur;
-
- if (coder->inPreSize == coder->blockPackTotal)
- {
- if (srcSize == 0)
- {
- if (!cc->srcFinished)
- return; // SZ_OK;
- cc->state = MTDEC_PARSE_END;
- }
- else if ((cc->src)[cc->srcSize] == 0) // we check control byte of next block
- cc->state = MTDEC_PARSE_END;
- else
- {
- cc->state = MTDEC_PARSE_NEW;
-
- {
- size_t blockMax = me->unpackBlockMaxSize;
- if (blockMax < coder->outPreSize)
- blockMax = coder->outPreSize;
- {
- UInt64 required = (UInt64)blockMax * (me->mtc.numStartedThreads + 1) * 2;
- if (me->props.memUseMax < required)
- cc->canCreateNewThread = False;
- }
- }
-
- if (me->outSize_Defined)
- {
- // next block can be zero size
- const UInt64 rem2 = me->outSize - me->outProcessed_Parse;
- if (rem2 < coder->outPreSize)
- {
- coder->parsing_Truncated = True;
- cc->state = MTDEC_PARSE_END;
- }
- me->outProcessed_Parse += coder->outPreSize;
- }
- }
- }
- else if (cc->srcFinished)
- cc->state = MTDEC_PARSE_END;
- else
- return; // SZ_OK;
-
- coder->parseState = cc->state;
- cc->outPos = coder->outPreSize;
-
- me->numStreams = coder->dec.numStartedStreams;
- me->numTotalBlocks = coder->dec.numTotalBlocks;
- me->numBlocks = coder->dec.numBlocks + 1;
- return; // SZ_OK;
- }
-}
-
-
-static SRes XzDecMt_Callback_PreCode(void *pp, unsigned coderIndex)
-{
- CXzDecMt *me = (CXzDecMt *)pp;
- CXzDecMtThread *coder = &me->coders[coderIndex];
- Byte *dest;
-
- if (!coder->dec.headerParsedOk)
- return SZ_OK;
-
- dest = coder->outBuf;
-
- if (!dest || coder->outBufSize < coder->outPreSize)
- {
- if (dest)
- {
- ISzAlloc_Free(me->allocMid, dest);
- coder->outBuf = NULL;
- coder->outBufSize = 0;
- }
- {
- size_t outPreSize = coder->outPreSize;
- if (outPreSize == 0)
- outPreSize = 1;
- dest = (Byte *)ISzAlloc_Alloc(me->allocMid, outPreSize);
- }
- if (!dest)
- return SZ_ERROR_MEM;
- coder->outBuf = dest;
- coder->outBufSize = coder->outPreSize;
-
- if (coder->outBufSize > me->unpackBlockMaxSize)
- me->unpackBlockMaxSize = coder->outBufSize;
- }
-
- // return SZ_ERROR_MEM;
-
- XzUnpacker_SetOutBuf(&coder->dec, coder->outBuf, coder->outBufSize);
-
- {
- SRes res = XzDecMix_Init(&coder->dec.decoder, &coder->dec.block, coder->outBuf, coder->outBufSize);
- // res = SZ_ERROR_UNSUPPORTED; // to test
- coder->codeRes = res;
- if (res != SZ_OK)
- {
- // if (res == SZ_ERROR_MEM) return res;
- if (me->props.ignoreErrors && res != SZ_ERROR_MEM)
- return SZ_OK;
- return res;
- }
- }
-
- return SZ_OK;
-}
-
-
-static SRes XzDecMt_Callback_Code(void *pp, unsigned coderIndex,
- const Byte *src, size_t srcSize, int srcFinished,
- // int finished, int blockFinished,
- UInt64 *inCodePos, UInt64 *outCodePos, int *stop)
-{
- CXzDecMt *me = (CXzDecMt *)pp;
- CXzDecMtThread *coder = &me->coders[coderIndex];
-
- *inCodePos = coder->inCodeSize;
- *outCodePos = coder->outCodeSize;
- *stop = True;
-
- if (srcSize > coder->inPreSize - coder->inCodeSize)
- return SZ_ERROR_FAIL;
-
- if (coder->inCodeSize < coder->inPreHeaderSize)
- {
- size_t step = coder->inPreHeaderSize - coder->inCodeSize;
- if (step > srcSize)
- step = srcSize;
- src += step;
- srcSize -= step;
- coder->inCodeSize += step;
- *inCodePos = coder->inCodeSize;
- if (coder->inCodeSize < coder->inPreHeaderSize)
- {
- *stop = False;
- return SZ_OK;
- }
- }
-
- if (!coder->dec.headerParsedOk)
- return SZ_OK;
- if (!coder->outBuf)
- return SZ_OK;
-
- if (coder->codeRes == SZ_OK)
- {
- ECoderStatus status;
- SRes res;
- size_t srcProcessed = srcSize;
- size_t outSizeCur = coder->outPreSize - coder->dec.outDataWritten;
-
- // PRF(printf("\nCallback_Code: Code %d %d\n", (unsigned)srcSize, (unsigned)outSizeCur));
-
- res = XzUnpacker_Code(&coder->dec,
- NULL, &outSizeCur,
- src, &srcProcessed, srcFinished,
- // coder->finishedWithMark ? CODER_FINISH_END : CODER_FINISH_ANY,
- CODER_FINISH_END,
- &status);
-
- // PRF(printf(" res = %d, srcSize2 = %d, outSizeCur = %d", res, (unsigned)srcProcessed, (unsigned)outSizeCur));
-
- coder->codeRes = res;
- coder->status = status;
- coder->inCodeSize += srcProcessed;
- coder->outCodeSize = coder->dec.outDataWritten;
- *inCodePos = coder->inCodeSize;
- *outCodePos = coder->outCodeSize;
-
- if (res == SZ_OK)
- {
- if (srcProcessed == srcSize)
- *stop = False;
- return SZ_OK;
- }
- }
-
- if (me->props.ignoreErrors && coder->codeRes != SZ_ERROR_MEM)
- {
- *inCodePos = coder->inPreSize;
- *outCodePos = coder->outPreSize;
- return SZ_OK;
- }
- return coder->codeRes;
-}
-
-
-#define XZDECMT_STREAM_WRITE_STEP (1 << 24)
-
-static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex,
- BoolInt needWriteToStream,
- const Byte *src, size_t srcSize, BoolInt isCross,
- // int srcFinished,
- BoolInt *needContinue,
- BoolInt *canRecode)
-{
- CXzDecMt *me = (CXzDecMt *)pp;
- const CXzDecMtThread *coder = &me->coders[coderIndex];
-
- // PRF(printf("\nWrite processed = %d srcSize = %d\n", (unsigned)me->mtc.inProcessed, (unsigned)srcSize));
-
- *needContinue = False;
- *canRecode = True;
-
- if (!needWriteToStream)
- return SZ_OK;
-
- if (!coder->dec.headerParsedOk || !coder->outBuf)
- {
- if (me->finishedDecoderIndex < 0)
- me->finishedDecoderIndex = (int)coderIndex;
- return SZ_OK;
- }
-
- if (me->finishedDecoderIndex >= 0)
- return SZ_OK;
-
- me->mtc.inProcessed += coder->inCodeSize;
-
- *canRecode = False;
-
- {
- SRes res;
- size_t size = coder->outCodeSize;
- Byte *data = coder->outBuf;
-
- // we use in me->dec: sha, numBlocks, indexSize
-
- if (!me->isBlockHeaderState_Write)
- {
- XzUnpacker_PrepareToRandomBlockDecoding(&me->dec);
- me->dec.decodeOnlyOneBlock = False;
- me->dec.numStartedStreams = coder->dec.numStartedStreams;
- me->dec.streamFlags = coder->streamFlags;
-
- me->isBlockHeaderState_Write = True;
- }
-
- me->dec.numTotalBlocks = coder->dec.numTotalBlocks;
- XzUnpacker_UpdateIndex(&me->dec, coder->blockPackSize_for_Index, coder->outPreSize);
-
- if (coder->outPreSize != size)
- {
- if (me->props.ignoreErrors)
- {
- memset(data + size, 0, coder->outPreSize - size);
- size = coder->outPreSize;
- }
- // me->numBadBlocks++;
- if (me->mainErrorCode == SZ_OK)
- {
- if ((int)coder->status == LZMA_STATUS_NEEDS_MORE_INPUT)
- me->mainErrorCode = SZ_ERROR_INPUT_EOF;
- else
- me->mainErrorCode = SZ_ERROR_DATA;
- }
- }
-
- if (me->writeRes != SZ_OK)
- return me->writeRes;
-
- res = SZ_OK;
- {
- if (me->outSize_Defined)
- {
- const UInt64 rem = me->outSize - me->outProcessed;
- if (size > rem)
- size = (SizeT)rem;
- }
-
- for (;;)
- {
- size_t cur = size;
- size_t written;
- if (cur > XZDECMT_STREAM_WRITE_STEP)
- cur = XZDECMT_STREAM_WRITE_STEP;
-
- written = ISeqOutStream_Write(me->outStream, data, cur);
-
- // PRF(printf("\nWritten ask = %d written = %d\n", (unsigned)cur, (unsigned)written));
-
- me->outProcessed += written;
- if (written != cur)
- {
- me->writeRes = SZ_ERROR_WRITE;
- res = me->writeRes;
- break;
- }
- data += cur;
- size -= cur;
- // PRF_STR_INT("Written size =", size)
- if (size == 0)
- break;
- res = MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0);
- if (res != SZ_OK)
- break;
- }
- }
-
- if (coder->codeRes != SZ_OK)
- if (!me->props.ignoreErrors)
- {
- me->finishedDecoderIndex = (int)coderIndex;
- return res;
- }
-
- RINOK(res)
-
- if (coder->inPreSize != coder->inCodeSize
- || coder->blockPackTotal != coder->inCodeSize)
- {
- me->finishedDecoderIndex = (int)coderIndex;
- return SZ_OK;
- }
-
- if (coder->parseState != MTDEC_PARSE_END)
- {
- *needContinue = True;
- return SZ_OK;
- }
- }
-
- // (coder->state == MTDEC_PARSE_END) means that there are no other working threads
- // so we can use mtc variables without lock
-
- PRF_STR_INT("Write MTDEC_PARSE_END", me->mtc.inProcessed)
-
- me->mtc.mtProgress.totalInSize = me->mtc.inProcessed;
- {
- CXzUnpacker *dec = &me->dec;
-
- PRF_STR_INT("PostSingle", srcSize)
-
- {
- size_t srcProcessed = srcSize;
- ECoderStatus status;
- size_t outSizeCur = 0;
- SRes res;
-
- // dec->decodeOnlyOneBlock = False;
- dec->decodeToStreamSignature = True;
-
- me->mainDecoderWasCalled = True;
-
- if (coder->parsing_Truncated)
- {
- me->parsing_Truncated = True;
- return SZ_OK;
- }
-
- /*
- We have processed all xz-blocks of stream,
- And xz unpacker is at XZ_STATE_BLOCK_HEADER state, where
- (src) is a pointer to xz-Index structure.
- We finish reading of current xz-Stream, including Zero padding after xz-Stream.
- We exit, if we reach extra byte (first byte of new-Stream or another data).
- But we don't update input stream pointer for that new extra byte.
- If extra byte is not correct first byte of xz-signature,
- we have SZ_ERROR_NO_ARCHIVE error here.
- */
-
- res = XzUnpacker_Code(dec,
- NULL, &outSizeCur,
- src, &srcProcessed,
- me->mtc.readWasFinished, // srcFinished
- CODER_FINISH_END, // CODER_FINISH_ANY,
- &status);
-
- // res = SZ_ERROR_ARCHIVE; // for failure test
-
- me->status = status;
- me->codeRes = res;
-
- if (isCross)
- me->mtc.crossStart += srcProcessed;
-
- me->mtc.inProcessed += srcProcessed;
- me->mtc.mtProgress.totalInSize = me->mtc.inProcessed;
-
- srcSize -= srcProcessed;
- src += srcProcessed;
-
- if (res != SZ_OK)
- {
- return SZ_OK;
- // return res;
- }
-
- if (dec->state == XZ_STATE_STREAM_HEADER)
- {
- *needContinue = True;
- me->isBlockHeaderState_Parse = False;
- me->isBlockHeaderState_Write = False;
-
- if (!isCross)
- {
- Byte *crossBuf = MtDec_GetCrossBuff(&me->mtc);
- if (!crossBuf)
- return SZ_ERROR_MEM;
- if (srcSize != 0)
- memcpy(crossBuf, src, srcSize);
- me->mtc.crossStart = 0;
- me->mtc.crossEnd = srcSize;
- }
-
- PRF_STR_INT("XZ_STATE_STREAM_HEADER crossEnd = ", (unsigned)me->mtc.crossEnd)
-
- return SZ_OK;
- }
-
- if (status != CODER_STATUS_NEEDS_MORE_INPUT || srcSize != 0)
- {
- return SZ_ERROR_FAIL;
- }
-
- if (me->mtc.readWasFinished)
- {
- return SZ_OK;
- }
- }
-
- {
- size_t inPos;
- size_t inLim;
- // const Byte *inData;
- UInt64 inProgressPrev = me->mtc.inProcessed;
-
- // XzDecMt_Prepare_InBuf_ST(p);
- Byte *crossBuf = MtDec_GetCrossBuff(&me->mtc);
- if (!crossBuf)
- return SZ_ERROR_MEM;
-
- inPos = 0;
- inLim = 0;
-
- // inData = crossBuf;
-
- for (;;)
- {
- SizeT inProcessed;
- SizeT outProcessed;
- ECoderStatus status;
- SRes res;
-
- if (inPos == inLim)
- {
- if (!me->mtc.readWasFinished)
- {
- inPos = 0;
- inLim = me->mtc.inBufSize;
- me->mtc.readRes = ISeqInStream_Read(me->inStream, (void *)crossBuf, &inLim);
- me->mtc.readProcessed += inLim;
- if (inLim == 0 || me->mtc.readRes != SZ_OK)
- me->mtc.readWasFinished = True;
- }
- }
-
- inProcessed = inLim - inPos;
- outProcessed = 0;
-
- res = XzUnpacker_Code(dec,
- NULL, &outProcessed,
- crossBuf + inPos, &inProcessed,
- (inProcessed == 0), // srcFinished
- CODER_FINISH_END, &status);
-
- me->codeRes = res;
- me->status = status;
- inPos += inProcessed;
- me->mtc.inProcessed += inProcessed;
- me->mtc.mtProgress.totalInSize = me->mtc.inProcessed;
-
- if (res != SZ_OK)
- {
- return SZ_OK;
- // return res;
- }
-
- if (dec->state == XZ_STATE_STREAM_HEADER)
- {
- *needContinue = True;
- me->mtc.crossStart = inPos;
- me->mtc.crossEnd = inLim;
- me->isBlockHeaderState_Parse = False;
- me->isBlockHeaderState_Write = False;
- return SZ_OK;
- }
-
- if (status != CODER_STATUS_NEEDS_MORE_INPUT)
- return SZ_ERROR_FAIL;
-
- if (me->mtc.progress)
- {
- UInt64 inDelta = me->mtc.inProcessed - inProgressPrev;
- if (inDelta >= (1 << 22))
- {
- RINOK(MtProgress_Progress_ST(&me->mtc.mtProgress))
- inProgressPrev = me->mtc.inProcessed;
- }
- }
- if (me->mtc.readWasFinished)
- return SZ_OK;
- }
- }
- }
-}
-
-
-#endif
-
-
-
-void XzStatInfo_Clear(CXzStatInfo *p)
-{
- p->InSize = 0;
- p->OutSize = 0;
-
- p->NumStreams = 0;
- p->NumBlocks = 0;
-
- p->UnpackSize_Defined = False;
-
- p->NumStreams_Defined = False;
- p->NumBlocks_Defined = False;
-
- p->DataAfterEnd = False;
- p->DecodingTruncated = False;
-
- p->DecodeRes = SZ_OK;
- p->ReadRes = SZ_OK;
- p->ProgressRes = SZ_OK;
-
- p->CombinedRes = SZ_OK;
- p->CombinedRes_Type = SZ_OK;
-}
-
-
-
-/*
- XzDecMt_Decode_ST() can return SZ_OK or the following errors
- - SZ_ERROR_MEM for memory allocation error
- - error from XzUnpacker_Code() function
- - SZ_ERROR_WRITE for ISeqOutStream::Write(). stat->CombinedRes_Type = SZ_ERROR_WRITE in that case
- - ICompressProgress::Progress() error, stat->CombinedRes_Type = SZ_ERROR_PROGRESS.
- But XzDecMt_Decode_ST() doesn't return ISeqInStream::Read() errors.
- ISeqInStream::Read() result is set to p->readRes.
- also it can set stat->CombinedRes_Type to SZ_ERROR_WRITE or SZ_ERROR_PROGRESS.
-*/
-
-static SRes XzDecMt_Decode_ST(CXzDecMt *p
- #ifndef Z7_ST
- , BoolInt tMode
- #endif
- , CXzStatInfo *stat)
-{
- size_t outPos;
- size_t inPos, inLim;
- const Byte *inData;
- UInt64 inPrev, outPrev;
-
- CXzUnpacker *dec;
-
- #ifndef Z7_ST
- if (tMode)
- {
- XzDecMt_FreeOutBufs(p);
- tMode = MtDec_PrepareRead(&p->mtc);
- }
- #endif
-
- if (!p->outBuf || p->outBufSize != p->props.outStep_ST)
- {
- ISzAlloc_Free(p->allocMid, p->outBuf);
- p->outBufSize = 0;
- p->outBuf = (Byte *)ISzAlloc_Alloc(p->allocMid, p->props.outStep_ST);
- if (!p->outBuf)
- return SZ_ERROR_MEM;
- p->outBufSize = p->props.outStep_ST;
- }
-
- if (!p->inBuf || p->inBufSize != p->props.inBufSize_ST)
- {
- ISzAlloc_Free(p->allocMid, p->inBuf);
- p->inBufSize = 0;
- p->inBuf = (Byte *)ISzAlloc_Alloc(p->allocMid, p->props.inBufSize_ST);
- if (!p->inBuf)
- return SZ_ERROR_MEM;
- p->inBufSize = p->props.inBufSize_ST;
- }
-
- dec = &p->dec;
- dec->decodeToStreamSignature = False;
- // dec->decodeOnlyOneBlock = False;
-
- XzUnpacker_SetOutBuf(dec, NULL, 0);
-
- inPrev = p->inProcessed;
- outPrev = p->outProcessed;
-
- inPos = 0;
- inLim = 0;
- inData = NULL;
- outPos = 0;
-
- for (;;)
- {
- SizeT outSize;
- BoolInt finished;
- ECoderFinishMode finishMode;
- SizeT inProcessed;
- ECoderStatus status;
- SRes res;
-
- SizeT outProcessed;
-
-
-
- if (inPos == inLim)
- {
- #ifndef Z7_ST
- if (tMode)
- {
- inData = MtDec_Read(&p->mtc, &inLim);
- inPos = 0;
- if (inData)
- continue;
- tMode = False;
- inLim = 0;
- }
- #endif
-
- if (!p->readWasFinished)
- {
- inPos = 0;
- inLim = p->inBufSize;
- inData = p->inBuf;
- p->readRes = ISeqInStream_Read(p->inStream, (void *)p->inBuf, &inLim);
- p->readProcessed += inLim;
- if (inLim == 0 || p->readRes != SZ_OK)
- p->readWasFinished = True;
- }
- }
-
- outSize = p->props.outStep_ST - outPos;
-
- finishMode = CODER_FINISH_ANY;
- if (p->outSize_Defined)
- {
- const UInt64 rem = p->outSize - p->outProcessed;
- if (outSize >= rem)
- {
- outSize = (SizeT)rem;
- if (p->finishMode)
- finishMode = CODER_FINISH_END;
- }
- }
-
- inProcessed = inLim - inPos;
- outProcessed = outSize;
-
- res = XzUnpacker_Code(dec, p->outBuf + outPos, &outProcessed,
- inData + inPos, &inProcessed,
- (inPos == inLim), // srcFinished
- finishMode, &status);
-
- p->codeRes = res;
- p->status = status;
-
- inPos += inProcessed;
- outPos += outProcessed;
- p->inProcessed += inProcessed;
- p->outProcessed += outProcessed;
-
- finished = ((inProcessed == 0 && outProcessed == 0) || res != SZ_OK);
-
- if (finished || outProcessed >= outSize)
- if (outPos != 0)
- {
- const size_t written = ISeqOutStream_Write(p->outStream, p->outBuf, outPos);
- // p->outProcessed += written; // 21.01: BUG fixed
- if (written != outPos)
- {
- stat->CombinedRes_Type = SZ_ERROR_WRITE;
- return SZ_ERROR_WRITE;
- }
- outPos = 0;
- }
-
- if (p->progress && res == SZ_OK)
- {
- if (p->inProcessed - inPrev >= (1 << 22) ||
- p->outProcessed - outPrev >= (1 << 22))
- {
- res = ICompressProgress_Progress(p->progress, p->inProcessed, p->outProcessed);
- if (res != SZ_OK)
- {
- stat->CombinedRes_Type = SZ_ERROR_PROGRESS;
- stat->ProgressRes = res;
- return res;
- }
- inPrev = p->inProcessed;
- outPrev = p->outProcessed;
- }
- }
-
- if (finished)
- {
- // p->codeRes is preliminary error from XzUnpacker_Code.
- // and it can be corrected later as final result
- // so we return SZ_OK here instead of (res);
- return SZ_OK;
- // return res;
- }
- }
-}
-
-
-
-/*
-XzStatInfo_SetStat() transforms
- CXzUnpacker return code and status to combined CXzStatInfo results.
- it can convert SZ_OK to SZ_ERROR_INPUT_EOF
- it can convert SZ_ERROR_NO_ARCHIVE to SZ_OK and (DataAfterEnd = 1)
-*/
-
-static void XzStatInfo_SetStat(const CXzUnpacker *dec,
- int finishMode,
- // UInt64 readProcessed,
- UInt64 inProcessed,
- SRes res, // it's result from CXzUnpacker unpacker
- ECoderStatus status,
- BoolInt decodingTruncated,
- CXzStatInfo *stat)
-{
- UInt64 extraSize;
-
- stat->DecodingTruncated = (Byte)(decodingTruncated ? 1 : 0);
- stat->InSize = inProcessed;
- stat->NumStreams = dec->numStartedStreams;
- stat->NumBlocks = dec->numTotalBlocks;
-
- stat->UnpackSize_Defined = True;
- stat->NumStreams_Defined = True;
- stat->NumBlocks_Defined = True;
-
- extraSize = XzUnpacker_GetExtraSize(dec);
-
- if (res == SZ_OK)
- {
- if (status == CODER_STATUS_NEEDS_MORE_INPUT)
- {
- // CODER_STATUS_NEEDS_MORE_INPUT is expected status for correct xz streams
- // any extra data is part of correct data
- extraSize = 0;
- // if xz stream was not finished, then we need more data
- if (!XzUnpacker_IsStreamWasFinished(dec))
- res = SZ_ERROR_INPUT_EOF;
- }
- else
- {
- // CODER_STATUS_FINISHED_WITH_MARK is not possible for multi stream xz decoding
- // so he we have (status == CODER_STATUS_NOT_FINISHED)
- // if (status != CODER_STATUS_FINISHED_WITH_MARK)
- if (!decodingTruncated || finishMode)
- res = SZ_ERROR_DATA;
- }
- }
- else if (res == SZ_ERROR_NO_ARCHIVE)
- {
- /*
- SZ_ERROR_NO_ARCHIVE is possible for 2 states:
- XZ_STATE_STREAM_HEADER - if bad signature or bad CRC
- XZ_STATE_STREAM_PADDING - if non-zero padding data
- extraSize and inProcessed don't include "bad" byte
- */
- // if (inProcessed == extraSize), there was no any good xz stream header, and we keep error
- if (inProcessed != extraSize) // if there were good xz streams before error
- {
- // if (extraSize != 0 || readProcessed != inProcessed)
- {
- // he we suppose that all xz streams were finsihed OK, and we have
- // some extra data after all streams
- stat->DataAfterEnd = True;
- res = SZ_OK;
- }
- }
- }
-
- if (stat->DecodeRes == SZ_OK)
- stat->DecodeRes = res;
-
- stat->InSize -= extraSize;
-}
-
-
-
-SRes XzDecMt_Decode(CXzDecMtHandle p,
- const CXzDecMtProps *props,
- const UInt64 *outDataSize, int finishMode,
- ISeqOutStreamPtr outStream,
- // Byte *outBuf, size_t *outBufSize,
- ISeqInStreamPtr inStream,
- // const Byte *inData, size_t inDataSize,
- CXzStatInfo *stat,
- int *isMT,
- ICompressProgressPtr progress)
-{
- // GET_CXzDecMt_p
- #ifndef Z7_ST
- BoolInt tMode;
- #endif
-
- XzStatInfo_Clear(stat);
-
- p->props = *props;
-
- p->inStream = inStream;
- p->outStream = outStream;
- p->progress = progress;
- // p->stat = stat;
-
- p->outSize = 0;
- p->outSize_Defined = False;
- if (outDataSize)
- {
- p->outSize_Defined = True;
- p->outSize = *outDataSize;
- }
-
- p->finishMode = finishMode;
-
- // p->outSize = 457; p->outSize_Defined = True; p->finishMode = False; // for test
-
- p->writeRes = SZ_OK;
- p->outProcessed = 0;
- p->inProcessed = 0;
- p->readProcessed = 0;
- p->readWasFinished = False;
- p->readRes = SZ_OK;
-
- p->codeRes = SZ_OK;
- p->status = CODER_STATUS_NOT_SPECIFIED;
-
- XzUnpacker_Init(&p->dec);
-
- *isMT = False;
-
- /*
- p->outBuf = NULL;
- p->outBufSize = 0;
- if (!outStream)
- {
- p->outBuf = outBuf;
- p->outBufSize = *outBufSize;
- *outBufSize = 0;
- }
- */
-
-
- #ifndef Z7_ST
-
- p->isBlockHeaderState_Parse = False;
- p->isBlockHeaderState_Write = False;
- // p->numBadBlocks = 0;
- p->mainErrorCode = SZ_OK;
- p->mainDecoderWasCalled = False;
-
- tMode = False;
-
- if (p->props.numThreads > 1)
- {
- IMtDecCallback2 vt;
- BoolInt needContinue;
- SRes res;
- // we just free ST buffers here
- // but we still keep state variables, that was set in XzUnpacker_Init()
- XzDecMt_FreeSt(p);
-
- p->outProcessed_Parse = 0;
- p->parsing_Truncated = False;
-
- p->numStreams = 0;
- p->numTotalBlocks = 0;
- p->numBlocks = 0;
- p->finishedDecoderIndex = -1;
-
- if (!p->mtc_WasConstructed)
- {
- p->mtc_WasConstructed = True;
- MtDec_Construct(&p->mtc);
- }
-
- p->mtc.mtCallback = &vt;
- p->mtc.mtCallbackObject = p;
-
- p->mtc.progress = progress;
- p->mtc.inStream = inStream;
- p->mtc.alloc = &p->alignOffsetAlloc.vt;
- // p->mtc.inData = inData;
- // p->mtc.inDataSize = inDataSize;
- p->mtc.inBufSize = p->props.inBufSize_MT;
- // p->mtc.inBlockMax = p->props.inBlockMax;
- p->mtc.numThreadsMax = p->props.numThreads;
-
- *isMT = True;
-
- vt.Parse = XzDecMt_Callback_Parse;
- vt.PreCode = XzDecMt_Callback_PreCode;
- vt.Code = XzDecMt_Callback_Code;
- vt.Write = XzDecMt_Callback_Write;
-
-
- res = MtDec_Code(&p->mtc);
-
-
- stat->InSize = p->mtc.inProcessed;
-
- p->inProcessed = p->mtc.inProcessed;
- p->readRes = p->mtc.readRes;
- p->readWasFinished = p->mtc.readWasFinished;
- p->readProcessed = p->mtc.readProcessed;
-
- tMode = True;
- needContinue = False;
-
- if (res == SZ_OK)
- {
- if (p->mtc.mtProgress.res != SZ_OK)
- {
- res = p->mtc.mtProgress.res;
- stat->ProgressRes = res;
- stat->CombinedRes_Type = SZ_ERROR_PROGRESS;
- }
- else
- needContinue = p->mtc.needContinue;
- }
-
- if (!needContinue)
- {
- {
- SRes codeRes;
- BoolInt truncated = False;
- ECoderStatus status;
- const CXzUnpacker *dec;
-
- stat->OutSize = p->outProcessed;
-
- if (p->finishedDecoderIndex >= 0)
- {
- const CXzDecMtThread *coder = &p->coders[(unsigned)p->finishedDecoderIndex];
- codeRes = coder->codeRes;
- dec = &coder->dec;
- status = coder->status;
- }
- else if (p->mainDecoderWasCalled)
- {
- codeRes = p->codeRes;
- dec = &p->dec;
- status = p->status;
- truncated = p->parsing_Truncated;
- }
- else
- return SZ_ERROR_FAIL;
-
- if (p->mainErrorCode != SZ_OK)
- stat->DecodeRes = p->mainErrorCode;
-
- XzStatInfo_SetStat(dec, p->finishMode,
- // p->mtc.readProcessed,
- p->mtc.inProcessed,
- codeRes, status,
- truncated,
- stat);
- }
-
- if (res == SZ_OK)
- {
- stat->ReadRes = p->mtc.readRes;
-
- if (p->writeRes != SZ_OK)
- {
- res = p->writeRes;
- stat->CombinedRes_Type = SZ_ERROR_WRITE;
- }
- else if (p->mtc.readRes != SZ_OK
- // && p->mtc.inProcessed == p->mtc.readProcessed
- && stat->DecodeRes == SZ_ERROR_INPUT_EOF)
- {
- res = p->mtc.readRes;
- stat->CombinedRes_Type = SZ_ERROR_READ;
- }
- else if (stat->DecodeRes != SZ_OK)
- res = stat->DecodeRes;
- }
-
- stat->CombinedRes = res;
- if (stat->CombinedRes_Type == SZ_OK)
- stat->CombinedRes_Type = res;
- return res;
- }
-
- PRF_STR("----- decoding ST -----")
- }
-
- #endif
-
-
- *isMT = False;
-
- {
- SRes res = XzDecMt_Decode_ST(p
- #ifndef Z7_ST
- , tMode
- #endif
- , stat
- );
-
- #ifndef Z7_ST
- // we must set error code from MT decoding at first
- if (p->mainErrorCode != SZ_OK)
- stat->DecodeRes = p->mainErrorCode;
- #endif
-
- XzStatInfo_SetStat(&p->dec,
- p->finishMode,
- // p->readProcessed,
- p->inProcessed,
- p->codeRes, p->status,
- False, // truncated
- stat);
-
- stat->ReadRes = p->readRes;
-
- if (res == SZ_OK)
- {
- if (p->readRes != SZ_OK
- // && p->inProcessed == p->readProcessed
- && stat->DecodeRes == SZ_ERROR_INPUT_EOF)
- {
- // we set read error as combined error, only if that error was the reason
- // of decoding problem
- res = p->readRes;
- stat->CombinedRes_Type = SZ_ERROR_READ;
- }
- else if (stat->DecodeRes != SZ_OK)
- res = stat->DecodeRes;
- }
-
- stat->CombinedRes = res;
- if (stat->CombinedRes_Type == SZ_OK)
- stat->CombinedRes_Type = res;
- return res;
- }
-}
-
-#undef PRF
-#undef PRF_STR
-#undef PRF_STR_INT_2
diff --git a/3rdparty/7z/src/XzEnc.c b/3rdparty/7z/src/XzEnc.c
deleted file mode 100644
index d0c36ea65c..0000000000
--- a/3rdparty/7z/src/XzEnc.c
+++ /dev/null
@@ -1,1362 +0,0 @@
-/* XzEnc.c -- Xz Encode
-2023-04-13 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-#include
-
-#include "7zCrc.h"
-#include "Bra.h"
-#include "CpuArch.h"
-
-#ifdef USE_SUBBLOCK
-#include "Bcj3Enc.c"
-#include "SbFind.c"
-#include "SbEnc.c"
-#endif
-
-#include "XzEnc.h"
-
-// #define Z7_ST
-
-#ifndef Z7_ST
-#include "MtCoder.h"
-#else
-#define MTCODER_THREADS_MAX 1
-#define MTCODER_BLOCKS_MAX 1
-#endif
-
-#define XZ_GET_PAD_SIZE(dataSize) ((4 - ((unsigned)(dataSize) & 3)) & 3)
-
-/* max pack size for LZMA2 block + check-64bytrs: */
-#define XZ_GET_MAX_BLOCK_PACK_SIZE(unpackSize) ((unpackSize) + ((unpackSize) >> 10) + 16 + 64)
-
-#define XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(unpackSize) (XZ_BLOCK_HEADER_SIZE_MAX + XZ_GET_MAX_BLOCK_PACK_SIZE(unpackSize))
-
-
-// #define XzBlock_ClearFlags(p) (p)->flags = 0;
-#define XzBlock_ClearFlags_SetNumFilters(p, n) (p)->flags = (Byte)((n) - 1);
-#define XzBlock_SetHasPackSize(p) (p)->flags |= XZ_BF_PACK_SIZE;
-#define XzBlock_SetHasUnpackSize(p) (p)->flags |= XZ_BF_UNPACK_SIZE;
-
-
-static SRes WriteBytes(ISeqOutStreamPtr s, const void *buf, size_t size)
-{
- return (ISeqOutStream_Write(s, buf, size) == size) ? SZ_OK : SZ_ERROR_WRITE;
-}
-
-static SRes WriteBytes_UpdateCrc(ISeqOutStreamPtr s, const void *buf, size_t size, UInt32 *crc)
-{
- *crc = CrcUpdate(*crc, buf, size);
- return WriteBytes(s, buf, size);
-}
-
-
-static SRes Xz_WriteHeader(CXzStreamFlags f, ISeqOutStreamPtr s)
-{
- UInt32 crc;
- Byte header[XZ_STREAM_HEADER_SIZE];
- memcpy(header, XZ_SIG, XZ_SIG_SIZE);
- header[XZ_SIG_SIZE] = (Byte)(f >> 8);
- header[XZ_SIG_SIZE + 1] = (Byte)(f & 0xFF);
- crc = CrcCalc(header + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE);
- SetUi32(header + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE, crc)
- return WriteBytes(s, header, XZ_STREAM_HEADER_SIZE);
-}
-
-
-static SRes XzBlock_WriteHeader(const CXzBlock *p, ISeqOutStreamPtr s)
-{
- Byte header[XZ_BLOCK_HEADER_SIZE_MAX];
-
- unsigned pos = 1;
- unsigned numFilters, i;
- header[pos++] = p->flags;
-
- if (XzBlock_HasPackSize(p)) pos += Xz_WriteVarInt(header + pos, p->packSize);
- if (XzBlock_HasUnpackSize(p)) pos += Xz_WriteVarInt(header + pos, p->unpackSize);
- numFilters = XzBlock_GetNumFilters(p);
-
- for (i = 0; i < numFilters; i++)
- {
- const CXzFilter *f = &p->filters[i];
- pos += Xz_WriteVarInt(header + pos, f->id);
- pos += Xz_WriteVarInt(header + pos, f->propsSize);
- memcpy(header + pos, f->props, f->propsSize);
- pos += f->propsSize;
- }
-
- while ((pos & 3) != 0)
- header[pos++] = 0;
-
- header[0] = (Byte)(pos >> 2);
- SetUi32(header + pos, CrcCalc(header, pos))
- return WriteBytes(s, header, pos + 4);
-}
-
-
-
-
-typedef struct
-{
- size_t numBlocks;
- size_t size;
- size_t allocated;
- Byte *blocks;
-} CXzEncIndex;
-
-
-static void XzEncIndex_Construct(CXzEncIndex *p)
-{
- p->numBlocks = 0;
- p->size = 0;
- p->allocated = 0;
- p->blocks = NULL;
-}
-
-static void XzEncIndex_Init(CXzEncIndex *p)
-{
- p->numBlocks = 0;
- p->size = 0;
-}
-
-static void XzEncIndex_Free(CXzEncIndex *p, ISzAllocPtr alloc)
-{
- if (p->blocks)
- {
- ISzAlloc_Free(alloc, p->blocks);
- p->blocks = NULL;
- }
- p->numBlocks = 0;
- p->size = 0;
- p->allocated = 0;
-}
-
-
-static SRes XzEncIndex_ReAlloc(CXzEncIndex *p, size_t newSize, ISzAllocPtr alloc)
-{
- Byte *blocks = (Byte *)ISzAlloc_Alloc(alloc, newSize);
- if (!blocks)
- return SZ_ERROR_MEM;
- if (p->size != 0)
- memcpy(blocks, p->blocks, p->size);
- if (p->blocks)
- ISzAlloc_Free(alloc, p->blocks);
- p->blocks = blocks;
- p->allocated = newSize;
- return SZ_OK;
-}
-
-
-static SRes XzEncIndex_PreAlloc(CXzEncIndex *p, UInt64 numBlocks, UInt64 unpackSize, UInt64 totalSize, ISzAllocPtr alloc)
-{
- UInt64 pos;
- {
- Byte buf[32];
- unsigned pos2 = Xz_WriteVarInt(buf, totalSize);
- pos2 += Xz_WriteVarInt(buf + pos2, unpackSize);
- pos = numBlocks * pos2;
- }
-
- if (pos <= p->allocated - p->size)
- return SZ_OK;
- {
- UInt64 newSize64 = p->size + pos;
- size_t newSize = (size_t)newSize64;
- if (newSize != newSize64)
- return SZ_ERROR_MEM;
- return XzEncIndex_ReAlloc(p, newSize, alloc);
- }
-}
-
-
-static SRes XzEncIndex_AddIndexRecord(CXzEncIndex *p, UInt64 unpackSize, UInt64 totalSize, ISzAllocPtr alloc)
-{
- Byte buf[32];
- unsigned pos = Xz_WriteVarInt(buf, totalSize);
- pos += Xz_WriteVarInt(buf + pos, unpackSize);
-
- if (pos > p->allocated - p->size)
- {
- size_t newSize = p->allocated * 2 + 16 * 2;
- if (newSize < p->size + pos)
- return SZ_ERROR_MEM;
- RINOK(XzEncIndex_ReAlloc(p, newSize, alloc))
- }
- memcpy(p->blocks + p->size, buf, pos);
- p->size += pos;
- p->numBlocks++;
- return SZ_OK;
-}
-
-
-static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, ISeqOutStreamPtr s)
-{
- Byte buf[32];
- UInt64 globalPos;
- UInt32 crc = CRC_INIT_VAL;
- unsigned pos = 1 + Xz_WriteVarInt(buf + 1, p->numBlocks);
-
- globalPos = pos;
- buf[0] = 0;
- RINOK(WriteBytes_UpdateCrc(s, buf, pos, &crc))
- RINOK(WriteBytes_UpdateCrc(s, p->blocks, p->size, &crc))
- globalPos += p->size;
-
- pos = XZ_GET_PAD_SIZE(globalPos);
- buf[1] = 0;
- buf[2] = 0;
- buf[3] = 0;
- globalPos += pos;
-
- crc = CrcUpdate(crc, buf + 4 - pos, pos);
- SetUi32(buf + 4, CRC_GET_DIGEST(crc))
-
- SetUi32(buf + 8 + 4, (UInt32)(globalPos >> 2))
- buf[8 + 8] = (Byte)(flags >> 8);
- buf[8 + 9] = (Byte)(flags & 0xFF);
- SetUi32(buf + 8, CrcCalc(buf + 8 + 4, 6))
- buf[8 + 10] = XZ_FOOTER_SIG_0;
- buf[8 + 11] = XZ_FOOTER_SIG_1;
-
- return WriteBytes(s, buf + 4 - pos, pos + 4 + 12);
-}
-
-
-
-/* ---------- CSeqCheckInStream ---------- */
-
-typedef struct
-{
- ISeqInStream vt;
- ISeqInStreamPtr realStream;
- const Byte *data;
- UInt64 limit;
- UInt64 processed;
- int realStreamFinished;
- CXzCheck check;
-} CSeqCheckInStream;
-
-static void SeqCheckInStream_Init(CSeqCheckInStream *p, unsigned checkMode)
-{
- p->limit = (UInt64)(Int64)-1;
- p->processed = 0;
- p->realStreamFinished = 0;
- XzCheck_Init(&p->check, checkMode);
-}
-
-static void SeqCheckInStream_GetDigest(CSeqCheckInStream *p, Byte *digest)
-{
- XzCheck_Final(&p->check, digest);
-}
-
-static SRes SeqCheckInStream_Read(ISeqInStreamPtr pp, void *data, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSeqCheckInStream)
- size_t size2 = *size;
- SRes res = SZ_OK;
-
- if (p->limit != (UInt64)(Int64)-1)
- {
- UInt64 rem = p->limit - p->processed;
- if (size2 > rem)
- size2 = (size_t)rem;
- }
- if (size2 != 0)
- {
- if (p->realStream)
- {
- res = ISeqInStream_Read(p->realStream, data, &size2);
- p->realStreamFinished = (size2 == 0) ? 1 : 0;
- }
- else
- memcpy(data, p->data + (size_t)p->processed, size2);
- XzCheck_Update(&p->check, data, size2);
- p->processed += size2;
- }
- *size = size2;
- return res;
-}
-
-
-/* ---------- CSeqSizeOutStream ---------- */
-
-typedef struct
-{
- ISeqOutStream vt;
- ISeqOutStreamPtr realStream;
- Byte *outBuf;
- size_t outBufLimit;
- UInt64 processed;
-} CSeqSizeOutStream;
-
-static size_t SeqSizeOutStream_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSeqSizeOutStream)
- if (p->realStream)
- size = ISeqOutStream_Write(p->realStream, data, size);
- else
- {
- if (size > p->outBufLimit - (size_t)p->processed)
- return 0;
- memcpy(p->outBuf + (size_t)p->processed, data, size);
- }
- p->processed += size;
- return size;
-}
-
-
-/* ---------- CSeqInFilter ---------- */
-
-#define FILTER_BUF_SIZE (1 << 20)
-
-typedef struct
-{
- ISeqInStream vt;
- ISeqInStreamPtr realStream;
- IStateCoder StateCoder;
- Byte *buf;
- size_t curPos;
- size_t endPos;
- int srcWasFinished;
-} CSeqInFilter;
-
-
-static const z7_Func_BranchConv g_Funcs_BranchConv_RISC_Enc[] =
-{
- Z7_BRANCH_CONV_ENC(PPC),
- Z7_BRANCH_CONV_ENC(IA64),
- Z7_BRANCH_CONV_ENC(ARM),
- Z7_BRANCH_CONV_ENC(ARMT),
- Z7_BRANCH_CONV_ENC(SPARC),
- Z7_BRANCH_CONV_ENC(ARM64)
-};
-
-static SizeT XzBcFilterStateBase_Filter_Enc(CXzBcFilterStateBase *p, Byte *data, SizeT size)
-{
- switch (p->methodId)
- {
- case XZ_ID_Delta:
- Delta_Encode(p->delta_State, p->delta, data, size);
- break;
- case XZ_ID_X86:
- size = (SizeT)(z7_BranchConvSt_X86_Enc(data, size, p->ip, &p->X86_State) - data);
- break;
- default:
- if (p->methodId >= XZ_ID_PPC)
- {
- const UInt32 i = p->methodId - XZ_ID_PPC;
- if (i < Z7_ARRAY_SIZE(g_Funcs_BranchConv_RISC_Enc))
- size = (SizeT)(g_Funcs_BranchConv_RISC_Enc[i](data, size, p->ip) - data);
- }
- break;
- }
- p->ip += (UInt32)size;
- return size;
-}
-
-
-static SRes SeqInFilter_Init(CSeqInFilter *p, const CXzFilter *props, ISzAllocPtr alloc)
-{
- if (!p->buf)
- {
- p->buf = (Byte *)ISzAlloc_Alloc(alloc, FILTER_BUF_SIZE);
- if (!p->buf)
- return SZ_ERROR_MEM;
- }
- p->curPos = p->endPos = 0;
- p->srcWasFinished = 0;
- RINOK(Xz_StateCoder_Bc_SetFromMethod_Func(&p->StateCoder, props->id, XzBcFilterStateBase_Filter_Enc, alloc))
- RINOK(p->StateCoder.SetProps(p->StateCoder.p, props->props, props->propsSize, alloc))
- p->StateCoder.Init(p->StateCoder.p);
- return SZ_OK;
-}
-
-
-static SRes SeqInFilter_Read(ISeqInStreamPtr pp, void *data, size_t *size)
-{
- Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSeqInFilter)
- const size_t sizeOriginal = *size;
- if (sizeOriginal == 0)
- return SZ_OK;
- *size = 0;
-
- for (;;)
- {
- if (!p->srcWasFinished && p->curPos == p->endPos)
- {
- p->curPos = 0;
- p->endPos = FILTER_BUF_SIZE;
- RINOK(ISeqInStream_Read(p->realStream, p->buf, &p->endPos))
- if (p->endPos == 0)
- p->srcWasFinished = 1;
- }
- {
- SizeT srcLen = p->endPos - p->curPos;
- ECoderStatus status;
- SRes res;
- *size = sizeOriginal;
- res = p->StateCoder.Code2(p->StateCoder.p,
- (Byte *)data, size,
- p->buf + p->curPos, &srcLen,
- p->srcWasFinished, CODER_FINISH_ANY,
- &status);
- p->curPos += srcLen;
- if (*size != 0 || srcLen == 0 || res != SZ_OK)
- return res;
- }
- }
-}
-
-static void SeqInFilter_Construct(CSeqInFilter *p)
-{
- p->buf = NULL;
- p->StateCoder.p = NULL;
- p->vt.Read = SeqInFilter_Read;
-}
-
-static void SeqInFilter_Free(CSeqInFilter *p, ISzAllocPtr alloc)
-{
- if (p->StateCoder.p)
- {
- p->StateCoder.Free(p->StateCoder.p, alloc);
- p->StateCoder.p = NULL;
- }
- if (p->buf)
- {
- ISzAlloc_Free(alloc, p->buf);
- p->buf = NULL;
- }
-}
-
-
-/* ---------- CSbEncInStream ---------- */
-
-#ifdef USE_SUBBLOCK
-
-typedef struct
-{
- ISeqInStream vt;
- ISeqInStreamPtr inStream;
- CSbEnc enc;
-} CSbEncInStream;
-
-static SRes SbEncInStream_Read(ISeqInStreamPtr pp, void *data, size_t *size)
-{
- CSbEncInStream *p = Z7_CONTAINER_FROM_VTBL(pp, CSbEncInStream, vt);
- size_t sizeOriginal = *size;
- if (sizeOriginal == 0)
- return SZ_OK;
-
- for (;;)
- {
- if (p->enc.needRead && !p->enc.readWasFinished)
- {
- size_t processed = p->enc.needReadSizeMax;
- RINOK(p->inStream->Read(p->inStream, p->enc.buf + p->enc.readPos, &processed))
- p->enc.readPos += processed;
- if (processed == 0)
- {
- p->enc.readWasFinished = True;
- p->enc.isFinalFinished = True;
- }
- p->enc.needRead = False;
- }
-
- *size = sizeOriginal;
- RINOK(SbEnc_Read(&p->enc, data, size))
- if (*size != 0 || !p->enc.needRead)
- return SZ_OK;
- }
-}
-
-void SbEncInStream_Construct(CSbEncInStream *p, ISzAllocPtr alloc)
-{
- SbEnc_Construct(&p->enc, alloc);
- p->vt.Read = SbEncInStream_Read;
-}
-
-SRes SbEncInStream_Init(CSbEncInStream *p)
-{
- return SbEnc_Init(&p->enc);
-}
-
-void SbEncInStream_Free(CSbEncInStream *p)
-{
- SbEnc_Free(&p->enc);
-}
-
-#endif
-
-
-
-/* ---------- CXzProps ---------- */
-
-
-void XzFilterProps_Init(CXzFilterProps *p)
-{
- p->id = 0;
- p->delta = 0;
- p->ip = 0;
- p->ipDefined = False;
-}
-
-void XzProps_Init(CXzProps *p)
-{
- p->checkId = XZ_CHECK_CRC32;
- p->blockSize = XZ_PROPS_BLOCK_SIZE_AUTO;
- p->numBlockThreads_Reduced = -1;
- p->numBlockThreads_Max = -1;
- p->numTotalThreads = -1;
- p->reduceSize = (UInt64)(Int64)-1;
- p->forceWriteSizesInHeader = 0;
- // p->forceWriteSizesInHeader = 1;
-
- XzFilterProps_Init(&p->filterProps);
- Lzma2EncProps_Init(&p->lzma2Props);
-}
-
-
-static void XzEncProps_Normalize_Fixed(CXzProps *p)
-{
- UInt64 fileSize;
- int t1, t1n, t2, t2r, t3;
- {
- CLzma2EncProps tp = p->lzma2Props;
- if (tp.numTotalThreads <= 0)
- tp.numTotalThreads = p->numTotalThreads;
- Lzma2EncProps_Normalize(&tp);
- t1n = tp.numTotalThreads;
- }
-
- t1 = p->lzma2Props.numTotalThreads;
- t2 = p->numBlockThreads_Max;
- t3 = p->numTotalThreads;
-
- if (t2 > MTCODER_THREADS_MAX)
- t2 = MTCODER_THREADS_MAX;
-
- if (t3 <= 0)
- {
- if (t2 <= 0)
- t2 = 1;
- t3 = t1n * t2;
- }
- else if (t2 <= 0)
- {
- t2 = t3 / t1n;
- if (t2 == 0)
- {
- t1 = 1;
- t2 = t3;
- }
- if (t2 > MTCODER_THREADS_MAX)
- t2 = MTCODER_THREADS_MAX;
- }
- else if (t1 <= 0)
- {
- t1 = t3 / t2;
- if (t1 == 0)
- t1 = 1;
- }
- else
- t3 = t1n * t2;
-
- p->lzma2Props.numTotalThreads = t1;
-
- t2r = t2;
-
- fileSize = p->reduceSize;
-
- if ((p->blockSize < fileSize || fileSize == (UInt64)(Int64)-1))
- p->lzma2Props.lzmaProps.reduceSize = p->blockSize;
-
- Lzma2EncProps_Normalize(&p->lzma2Props);
-
- t1 = p->lzma2Props.numTotalThreads;
-
- {
- if (t2 > 1 && fileSize != (UInt64)(Int64)-1)
- {
- UInt64 numBlocks = fileSize / p->blockSize;
- if (numBlocks * p->blockSize != fileSize)
- numBlocks++;
- if (numBlocks < (unsigned)t2)
- {
- t2r = (int)numBlocks;
- if (t2r == 0)
- t2r = 1;
- t3 = t1 * t2r;
- }
- }
- }
-
- p->numBlockThreads_Max = t2;
- p->numBlockThreads_Reduced = t2r;
- p->numTotalThreads = t3;
-}
-
-
-static void XzProps_Normalize(CXzProps *p)
-{
- /* we normalize xzProps properties, but we normalize only some of CXzProps::lzma2Props properties.
- Lzma2Enc_SetProps() will normalize lzma2Props later. */
-
- if (p->blockSize == XZ_PROPS_BLOCK_SIZE_SOLID)
- {
- p->lzma2Props.lzmaProps.reduceSize = p->reduceSize;
- p->numBlockThreads_Reduced = 1;
- p->numBlockThreads_Max = 1;
- if (p->lzma2Props.numTotalThreads <= 0)
- p->lzma2Props.numTotalThreads = p->numTotalThreads;
- return;
- }
- else
- {
- CLzma2EncProps *lzma2 = &p->lzma2Props;
- if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
- {
- // xz-auto
- p->lzma2Props.lzmaProps.reduceSize = p->reduceSize;
-
- if (lzma2->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
- {
- // if (xz-auto && lzma2-solid) - we use solid for both
- p->blockSize = XZ_PROPS_BLOCK_SIZE_SOLID;
- p->numBlockThreads_Reduced = 1;
- p->numBlockThreads_Max = 1;
- if (p->lzma2Props.numTotalThreads <= 0)
- p->lzma2Props.numTotalThreads = p->numTotalThreads;
- }
- else
- {
- // if (xz-auto && (lzma2-auto || lzma2-fixed_)
- // we calculate block size for lzma2 and use that block size for xz, lzma2 uses single-chunk per block
- CLzma2EncProps tp = p->lzma2Props;
- if (tp.numTotalThreads <= 0)
- tp.numTotalThreads = p->numTotalThreads;
-
- Lzma2EncProps_Normalize(&tp);
-
- p->blockSize = tp.blockSize; // fixed or solid
- p->numBlockThreads_Reduced = tp.numBlockThreads_Reduced;
- p->numBlockThreads_Max = tp.numBlockThreads_Max;
- if (lzma2->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
- lzma2->blockSize = tp.blockSize; // fixed or solid, LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
- if (lzma2->lzmaProps.reduceSize > tp.blockSize && tp.blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
- lzma2->lzmaProps.reduceSize = tp.blockSize;
- lzma2->numBlockThreads_Reduced = 1;
- lzma2->numBlockThreads_Max = 1;
- return;
- }
- }
- else
- {
- // xz-fixed
- // we can use xz::reduceSize or xz::blockSize as base for lzmaProps::reduceSize
-
- p->lzma2Props.lzmaProps.reduceSize = p->reduceSize;
- {
- UInt64 r = p->reduceSize;
- if (r > p->blockSize || r == (UInt64)(Int64)-1)
- r = p->blockSize;
- lzma2->lzmaProps.reduceSize = r;
- }
- if (lzma2->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
- lzma2->blockSize = LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID;
- else if (lzma2->blockSize > p->blockSize && lzma2->blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
- lzma2->blockSize = p->blockSize;
-
- XzEncProps_Normalize_Fixed(p);
- }
- }
-}
-
-
-/* ---------- CLzma2WithFilters ---------- */
-
-typedef struct
-{
- CLzma2EncHandle lzma2;
- CSeqInFilter filter;
-
- #ifdef USE_SUBBLOCK
- CSbEncInStream sb;
- #endif
-} CLzma2WithFilters;
-
-
-static void Lzma2WithFilters_Construct(CLzma2WithFilters *p)
-{
- p->lzma2 = NULL;
- SeqInFilter_Construct(&p->filter);
-
- #ifdef USE_SUBBLOCK
- SbEncInStream_Construct(&p->sb, alloc);
- #endif
-}
-
-
-static SRes Lzma2WithFilters_Create(CLzma2WithFilters *p, ISzAllocPtr alloc, ISzAllocPtr bigAlloc)
-{
- if (!p->lzma2)
- {
- p->lzma2 = Lzma2Enc_Create(alloc, bigAlloc);
- if (!p->lzma2)
- return SZ_ERROR_MEM;
- }
- return SZ_OK;
-}
-
-
-static void Lzma2WithFilters_Free(CLzma2WithFilters *p, ISzAllocPtr alloc)
-{
- #ifdef USE_SUBBLOCK
- SbEncInStream_Free(&p->sb);
- #endif
-
- SeqInFilter_Free(&p->filter, alloc);
- if (p->lzma2)
- {
- Lzma2Enc_Destroy(p->lzma2);
- p->lzma2 = NULL;
- }
-}
-
-
-typedef struct
-{
- UInt64 unpackSize;
- UInt64 totalSize;
- size_t headerSize;
-} CXzEncBlockInfo;
-
-
-static SRes Xz_CompressBlock(
- CLzma2WithFilters *lzmaf,
-
- ISeqOutStreamPtr outStream,
- Byte *outBufHeader,
- Byte *outBufData, size_t outBufDataLimit,
-
- ISeqInStreamPtr inStream,
- // UInt64 expectedSize,
- const Byte *inBuf, // used if (!inStream)
- size_t inBufSize, // used if (!inStream), it's block size, props->blockSize is ignored
-
- const CXzProps *props,
- ICompressProgressPtr progress,
- int *inStreamFinished, /* only for inStream version */
- CXzEncBlockInfo *blockSizes,
- ISzAllocPtr alloc,
- ISzAllocPtr allocBig)
-{
- CSeqCheckInStream checkInStream;
- CSeqSizeOutStream seqSizeOutStream;
- CXzBlock block;
- unsigned filterIndex = 0;
- CXzFilter *filter = NULL;
- const CXzFilterProps *fp = &props->filterProps;
- if (fp->id == 0)
- fp = NULL;
-
- *inStreamFinished = False;
-
- RINOK(Lzma2WithFilters_Create(lzmaf, alloc, allocBig))
-
- RINOK(Lzma2Enc_SetProps(lzmaf->lzma2, &props->lzma2Props))
-
- // XzBlock_ClearFlags(&block)
- XzBlock_ClearFlags_SetNumFilters(&block, 1 + (fp ? 1 : 0))
-
- if (fp)
- {
- filter = &block.filters[filterIndex++];
- filter->id = fp->id;
- filter->propsSize = 0;
-
- if (fp->id == XZ_ID_Delta)
- {
- filter->props[0] = (Byte)(fp->delta - 1);
- filter->propsSize = 1;
- }
- else if (fp->ipDefined)
- {
- Byte *ptr = filter->props;
- SetUi32(ptr, fp->ip)
- filter->propsSize = 4;
- }
- }
-
- {
- CXzFilter *f = &block.filters[filterIndex++];
- f->id = XZ_ID_LZMA2;
- f->propsSize = 1;
- f->props[0] = Lzma2Enc_WriteProperties(lzmaf->lzma2);
- }
-
- seqSizeOutStream.vt.Write = SeqSizeOutStream_Write;
- seqSizeOutStream.realStream = outStream;
- seqSizeOutStream.outBuf = outBufData;
- seqSizeOutStream.outBufLimit = outBufDataLimit;
- seqSizeOutStream.processed = 0;
-
- /*
- if (expectedSize != (UInt64)(Int64)-1)
- {
- block.unpackSize = expectedSize;
- if (props->blockSize != (UInt64)(Int64)-1)
- if (expectedSize > props->blockSize)
- block.unpackSize = props->blockSize;
- XzBlock_SetHasUnpackSize(&block)
- }
- */
-
- if (outStream)
- {
- RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt))
- }
-
- checkInStream.vt.Read = SeqCheckInStream_Read;
- SeqCheckInStream_Init(&checkInStream, props->checkId);
-
- checkInStream.realStream = inStream;
- checkInStream.data = inBuf;
- checkInStream.limit = props->blockSize;
- if (!inStream)
- checkInStream.limit = inBufSize;
-
- if (fp)
- {
- #ifdef USE_SUBBLOCK
- if (fp->id == XZ_ID_Subblock)
- {
- lzmaf->sb.inStream = &checkInStream.vt;
- RINOK(SbEncInStream_Init(&lzmaf->sb))
- }
- else
- #endif
- {
- lzmaf->filter.realStream = &checkInStream.vt;
- RINOK(SeqInFilter_Init(&lzmaf->filter, filter, alloc))
- }
- }
-
- {
- SRes res;
- Byte *outBuf = NULL;
- size_t outSize = 0;
- BoolInt useStream = (fp || inStream);
- // useStream = True;
-
- if (!useStream)
- {
- XzCheck_Update(&checkInStream.check, inBuf, inBufSize);
- checkInStream.processed = inBufSize;
- }
-
- if (!outStream)
- {
- outBuf = seqSizeOutStream.outBuf; // + (size_t)seqSizeOutStream.processed;
- outSize = seqSizeOutStream.outBufLimit; // - (size_t)seqSizeOutStream.processed;
- }
-
- res = Lzma2Enc_Encode2(lzmaf->lzma2,
- outBuf ? NULL : &seqSizeOutStream.vt,
- outBuf,
- outBuf ? &outSize : NULL,
-
- useStream ?
- (fp ?
- (
- #ifdef USE_SUBBLOCK
- (fp->id == XZ_ID_Subblock) ? &lzmaf->sb.vt:
- #endif
- &lzmaf->filter.vt) :
- &checkInStream.vt) : NULL,
-
- useStream ? NULL : inBuf,
- useStream ? 0 : inBufSize,
-
- progress);
-
- if (outBuf)
- seqSizeOutStream.processed += outSize;
-
- RINOK(res)
- blockSizes->unpackSize = checkInStream.processed;
- }
- {
- Byte buf[4 + 64];
- unsigned padSize = XZ_GET_PAD_SIZE(seqSizeOutStream.processed);
- UInt64 packSize = seqSizeOutStream.processed;
-
- buf[0] = 0;
- buf[1] = 0;
- buf[2] = 0;
- buf[3] = 0;
-
- SeqCheckInStream_GetDigest(&checkInStream, buf + 4);
- RINOK(WriteBytes(&seqSizeOutStream.vt, buf + (4 - padSize), padSize + XzFlags_GetCheckSize((CXzStreamFlags)props->checkId)))
-
- blockSizes->totalSize = seqSizeOutStream.processed - padSize;
-
- if (!outStream)
- {
- seqSizeOutStream.outBuf = outBufHeader;
- seqSizeOutStream.outBufLimit = XZ_BLOCK_HEADER_SIZE_MAX;
- seqSizeOutStream.processed = 0;
-
- block.unpackSize = blockSizes->unpackSize;
- XzBlock_SetHasUnpackSize(&block)
-
- block.packSize = packSize;
- XzBlock_SetHasPackSize(&block)
-
- RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt))
-
- blockSizes->headerSize = (size_t)seqSizeOutStream.processed;
- blockSizes->totalSize += seqSizeOutStream.processed;
- }
- }
-
- if (inStream)
- *inStreamFinished = checkInStream.realStreamFinished;
- else
- {
- *inStreamFinished = False;
- if (checkInStream.processed != inBufSize)
- return SZ_ERROR_FAIL;
- }
-
- return SZ_OK;
-}
-
-
-
-typedef struct
-{
- ICompressProgress vt;
- ICompressProgressPtr progress;
- UInt64 inOffset;
- UInt64 outOffset;
-} CCompressProgress_XzEncOffset;
-
-
-static SRes CompressProgress_XzEncOffset_Progress(ICompressProgressPtr pp, UInt64 inSize, UInt64 outSize)
-{
- const CCompressProgress_XzEncOffset *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CCompressProgress_XzEncOffset, vt);
- inSize += p->inOffset;
- outSize += p->outOffset;
- return ICompressProgress_Progress(p->progress, inSize, outSize);
-}
-
-
-
-
-struct CXzEnc
-{
- ISzAllocPtr alloc;
- ISzAllocPtr allocBig;
-
- CXzProps xzProps;
- UInt64 expectedDataSize;
-
- CXzEncIndex xzIndex;
-
- CLzma2WithFilters lzmaf_Items[MTCODER_THREADS_MAX];
-
- size_t outBufSize; /* size of allocated outBufs[i] */
- Byte *outBufs[MTCODER_BLOCKS_MAX];
-
- #ifndef Z7_ST
- unsigned checkType;
- ISeqOutStreamPtr outStream;
- BoolInt mtCoder_WasConstructed;
- CMtCoder mtCoder;
- CXzEncBlockInfo EncBlocks[MTCODER_BLOCKS_MAX];
- #endif
-};
-
-
-static void XzEnc_Construct(CXzEnc *p)
-{
- unsigned i;
-
- XzEncIndex_Construct(&p->xzIndex);
-
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- Lzma2WithFilters_Construct(&p->lzmaf_Items[i]);
-
- #ifndef Z7_ST
- p->mtCoder_WasConstructed = False;
- {
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- p->outBufs[i] = NULL;
- p->outBufSize = 0;
- }
- #endif
-}
-
-
-static void XzEnc_FreeOutBufs(CXzEnc *p)
-{
- unsigned i;
- for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
- if (p->outBufs[i])
- {
- ISzAlloc_Free(p->alloc, p->outBufs[i]);
- p->outBufs[i] = NULL;
- }
- p->outBufSize = 0;
-}
-
-
-static void XzEnc_Free(CXzEnc *p, ISzAllocPtr alloc)
-{
- unsigned i;
-
- XzEncIndex_Free(&p->xzIndex, alloc);
-
- for (i = 0; i < MTCODER_THREADS_MAX; i++)
- Lzma2WithFilters_Free(&p->lzmaf_Items[i], alloc);
-
- #ifndef Z7_ST
- if (p->mtCoder_WasConstructed)
- {
- MtCoder_Destruct(&p->mtCoder);
- p->mtCoder_WasConstructed = False;
- }
- XzEnc_FreeOutBufs(p);
- #endif
-}
-
-
-CXzEncHandle XzEnc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig)
-{
- CXzEnc *p = (CXzEnc *)ISzAlloc_Alloc(alloc, sizeof(CXzEnc));
- if (!p)
- return NULL;
- XzEnc_Construct(p);
- XzProps_Init(&p->xzProps);
- XzProps_Normalize(&p->xzProps);
- p->expectedDataSize = (UInt64)(Int64)-1;
- p->alloc = alloc;
- p->allocBig = allocBig;
- return (CXzEncHandle)p;
-}
-
-// #define GET_CXzEnc_p CXzEnc *p = (CXzEnc *)(void *)pp;
-
-void XzEnc_Destroy(CXzEncHandle p)
-{
- // GET_CXzEnc_p
- XzEnc_Free(p, p->alloc);
- ISzAlloc_Free(p->alloc, p);
-}
-
-
-SRes XzEnc_SetProps(CXzEncHandle p, const CXzProps *props)
-{
- // GET_CXzEnc_p
- p->xzProps = *props;
- XzProps_Normalize(&p->xzProps);
- return SZ_OK;
-}
-
-
-void XzEnc_SetDataSize(CXzEncHandle p, UInt64 expectedDataSiize)
-{
- // GET_CXzEnc_p
- p->expectedDataSize = expectedDataSiize;
-}
-
-
-
-
-#ifndef Z7_ST
-
-static SRes XzEnc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBufIndex,
- const Byte *src, size_t srcSize, int finished)
-{
- CXzEnc *me = (CXzEnc *)pp;
- SRes res;
- CMtProgressThunk progressThunk;
-
- Byte *dest = me->outBufs[outBufIndex];
-
- UNUSED_VAR(finished)
-
- {
- CXzEncBlockInfo *bInfo = &me->EncBlocks[outBufIndex];
- bInfo->totalSize = 0;
- bInfo->unpackSize = 0;
- bInfo->headerSize = 0;
- }
-
- if (!dest)
- {
- dest = (Byte *)ISzAlloc_Alloc(me->alloc, me->outBufSize);
- if (!dest)
- return SZ_ERROR_MEM;
- me->outBufs[outBufIndex] = dest;
- }
-
- MtProgressThunk_CreateVTable(&progressThunk);
- progressThunk.mtProgress = &me->mtCoder.mtProgress;
- MtProgressThunk_INIT(&progressThunk)
-
- {
- CXzEncBlockInfo blockSizes;
- int inStreamFinished;
-
- res = Xz_CompressBlock(
- &me->lzmaf_Items[coderIndex],
-
- NULL,
- dest,
- dest + XZ_BLOCK_HEADER_SIZE_MAX, me->outBufSize - XZ_BLOCK_HEADER_SIZE_MAX,
-
- NULL,
- // srcSize, // expectedSize
- src, srcSize,
-
- &me->xzProps,
- &progressThunk.vt,
- &inStreamFinished,
- &blockSizes,
- me->alloc,
- me->allocBig);
-
- if (res == SZ_OK)
- me->EncBlocks[outBufIndex] = blockSizes;
-
- return res;
- }
-}
-
-
-static SRes XzEnc_MtCallback_Write(void *pp, unsigned outBufIndex)
-{
- CXzEnc *me = (CXzEnc *)pp;
-
- const CXzEncBlockInfo *bInfo = &me->EncBlocks[outBufIndex];
- const Byte *data = me->outBufs[outBufIndex];
-
- RINOK(WriteBytes(me->outStream, data, bInfo->headerSize))
-
- {
- UInt64 totalPackFull = bInfo->totalSize + XZ_GET_PAD_SIZE(bInfo->totalSize);
- RINOK(WriteBytes(me->outStream, data + XZ_BLOCK_HEADER_SIZE_MAX, (size_t)totalPackFull - bInfo->headerSize))
- }
-
- return XzEncIndex_AddIndexRecord(&me->xzIndex, bInfo->unpackSize, bInfo->totalSize, me->alloc);
-}
-
-#endif
-
-
-
-SRes XzEnc_Encode(CXzEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress)
-{
- // GET_CXzEnc_p
-
- const CXzProps *props = &p->xzProps;
-
- XzEncIndex_Init(&p->xzIndex);
- {
- UInt64 numBlocks = 1;
- UInt64 blockSize = props->blockSize;
-
- if (blockSize != XZ_PROPS_BLOCK_SIZE_SOLID
- && props->reduceSize != (UInt64)(Int64)-1)
- {
- numBlocks = props->reduceSize / blockSize;
- if (numBlocks * blockSize != props->reduceSize)
- numBlocks++;
- }
- else
- blockSize = (UInt64)1 << 62;
-
- RINOK(XzEncIndex_PreAlloc(&p->xzIndex, numBlocks, blockSize, XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(blockSize), p->alloc))
- }
-
- RINOK(Xz_WriteHeader((CXzStreamFlags)props->checkId, outStream))
-
-
- #ifndef Z7_ST
- if (props->numBlockThreads_Reduced > 1)
- {
- IMtCoderCallback2 vt;
-
- if (!p->mtCoder_WasConstructed)
- {
- p->mtCoder_WasConstructed = True;
- MtCoder_Construct(&p->mtCoder);
- }
-
- vt.Code = XzEnc_MtCallback_Code;
- vt.Write = XzEnc_MtCallback_Write;
-
- p->checkType = props->checkId;
- p->xzProps = *props;
-
- p->outStream = outStream;
-
- p->mtCoder.allocBig = p->allocBig;
- p->mtCoder.progress = progress;
- p->mtCoder.inStream = inStream;
- p->mtCoder.inData = NULL;
- p->mtCoder.inDataSize = 0;
- p->mtCoder.mtCallback = &vt;
- p->mtCoder.mtCallbackObject = p;
-
- if ( props->blockSize == XZ_PROPS_BLOCK_SIZE_SOLID
- || props->blockSize == XZ_PROPS_BLOCK_SIZE_AUTO)
- return SZ_ERROR_FAIL;
-
- p->mtCoder.blockSize = (size_t)props->blockSize;
- if (p->mtCoder.blockSize != props->blockSize)
- return SZ_ERROR_PARAM; /* SZ_ERROR_MEM */
-
- {
- size_t destBlockSize = XZ_BLOCK_HEADER_SIZE_MAX + XZ_GET_MAX_BLOCK_PACK_SIZE(p->mtCoder.blockSize);
- if (destBlockSize < p->mtCoder.blockSize)
- return SZ_ERROR_PARAM;
- if (p->outBufSize != destBlockSize)
- XzEnc_FreeOutBufs(p);
- p->outBufSize = destBlockSize;
- }
-
- p->mtCoder.numThreadsMax = (unsigned)props->numBlockThreads_Max;
- p->mtCoder.expectedDataSize = p->expectedDataSize;
-
- RINOK(MtCoder_Code(&p->mtCoder))
- }
- else
- #endif
- {
- int writeStartSizes;
- CCompressProgress_XzEncOffset progress2;
- Byte *bufData = NULL;
- size_t bufSize = 0;
-
- progress2.vt.Progress = CompressProgress_XzEncOffset_Progress;
- progress2.inOffset = 0;
- progress2.outOffset = 0;
- progress2.progress = progress;
-
- writeStartSizes = 0;
-
- if (props->blockSize != XZ_PROPS_BLOCK_SIZE_SOLID)
- {
- writeStartSizes = (props->forceWriteSizesInHeader > 0);
-
- if (writeStartSizes)
- {
- size_t t2;
- size_t t = (size_t)props->blockSize;
- if (t != props->blockSize)
- return SZ_ERROR_PARAM;
- t = XZ_GET_MAX_BLOCK_PACK_SIZE(t);
- if (t < props->blockSize)
- return SZ_ERROR_PARAM;
- t2 = XZ_BLOCK_HEADER_SIZE_MAX + t;
- if (!p->outBufs[0] || t2 != p->outBufSize)
- {
- XzEnc_FreeOutBufs(p);
- p->outBufs[0] = (Byte *)ISzAlloc_Alloc(p->alloc, t2);
- if (!p->outBufs[0])
- return SZ_ERROR_MEM;
- p->outBufSize = t2;
- }
- bufData = p->outBufs[0] + XZ_BLOCK_HEADER_SIZE_MAX;
- bufSize = t;
- }
- }
-
- for (;;)
- {
- CXzEncBlockInfo blockSizes;
- int inStreamFinished;
-
- /*
- UInt64 rem = (UInt64)(Int64)-1;
- if (props->reduceSize != (UInt64)(Int64)-1
- && props->reduceSize >= progress2.inOffset)
- rem = props->reduceSize - progress2.inOffset;
- */
-
- blockSizes.headerSize = 0; // for GCC
-
- RINOK(Xz_CompressBlock(
- &p->lzmaf_Items[0],
-
- writeStartSizes ? NULL : outStream,
- writeStartSizes ? p->outBufs[0] : NULL,
- bufData, bufSize,
-
- inStream,
- // rem,
- NULL, 0,
-
- props,
- progress ? &progress2.vt : NULL,
- &inStreamFinished,
- &blockSizes,
- p->alloc,
- p->allocBig))
-
- {
- UInt64 totalPackFull = blockSizes.totalSize + XZ_GET_PAD_SIZE(blockSizes.totalSize);
-
- if (writeStartSizes)
- {
- RINOK(WriteBytes(outStream, p->outBufs[0], blockSizes.headerSize))
- RINOK(WriteBytes(outStream, bufData, (size_t)totalPackFull - blockSizes.headerSize))
- }
-
- RINOK(XzEncIndex_AddIndexRecord(&p->xzIndex, blockSizes.unpackSize, blockSizes.totalSize, p->alloc))
-
- progress2.inOffset += blockSizes.unpackSize;
- progress2.outOffset += totalPackFull;
- }
-
- if (inStreamFinished)
- break;
- }
- }
-
- return XzEncIndex_WriteFooter(&p->xzIndex, (CXzStreamFlags)props->checkId, outStream);
-}
-
-
-#include "Alloc.h"
-
-SRes Xz_Encode(ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
- const CXzProps *props, ICompressProgressPtr progress)
-{
- SRes res;
- CXzEncHandle xz = XzEnc_Create(&g_Alloc, &g_BigAlloc);
- if (!xz)
- return SZ_ERROR_MEM;
- res = XzEnc_SetProps(xz, props);
- if (res == SZ_OK)
- res = XzEnc_Encode(xz, outStream, inStream, progress);
- XzEnc_Destroy(xz);
- return res;
-}
-
-
-SRes Xz_EncodeEmpty(ISeqOutStreamPtr outStream)
-{
- SRes res;
- CXzEncIndex xzIndex;
- XzEncIndex_Construct(&xzIndex);
- res = Xz_WriteHeader((CXzStreamFlags)0, outStream);
- if (res == SZ_OK)
- res = XzEncIndex_WriteFooter(&xzIndex, (CXzStreamFlags)0, outStream);
- XzEncIndex_Free(&xzIndex, NULL); // g_Alloc
- return res;
-}
diff --git a/3rdparty/7z/src/XzEnc.h b/3rdparty/7z/src/XzEnc.h
deleted file mode 100644
index 31026f7e09..0000000000
--- a/3rdparty/7z/src/XzEnc.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* XzEnc.h -- Xz Encode
-2023-04-13 : Igor Pavlov : Public domain */
-
-#ifndef ZIP7_INC_XZ_ENC_H
-#define ZIP7_INC_XZ_ENC_H
-
-#include "Lzma2Enc.h"
-
-#include "Xz.h"
-
-EXTERN_C_BEGIN
-
-
-#define XZ_PROPS_BLOCK_SIZE_AUTO LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO
-#define XZ_PROPS_BLOCK_SIZE_SOLID LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
-
-
-typedef struct
-{
- UInt32 id;
- UInt32 delta;
- UInt32 ip;
- int ipDefined;
-} CXzFilterProps;
-
-void XzFilterProps_Init(CXzFilterProps *p);
-
-
-typedef struct
-{
- CLzma2EncProps lzma2Props;
- CXzFilterProps filterProps;
- unsigned checkId;
- UInt64 blockSize;
- int numBlockThreads_Reduced;
- int numBlockThreads_Max;
- int numTotalThreads;
- int forceWriteSizesInHeader;
- UInt64 reduceSize;
-} CXzProps;
-
-void XzProps_Init(CXzProps *p);
-
-typedef struct CXzEnc CXzEnc;
-typedef CXzEnc * CXzEncHandle;
-// Z7_DECLARE_HANDLE(CXzEncHandle)
-
-CXzEncHandle XzEnc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig);
-void XzEnc_Destroy(CXzEncHandle p);
-SRes XzEnc_SetProps(CXzEncHandle p, const CXzProps *props);
-void XzEnc_SetDataSize(CXzEncHandle p, UInt64 expectedDataSiize);
-SRes XzEnc_Encode(CXzEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress);
-
-SRes Xz_Encode(ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
- const CXzProps *props, ICompressProgressPtr progress);
-
-SRes Xz_EncodeEmpty(ISeqOutStreamPtr outStream);
-
-EXTERN_C_END
-
-#endif
diff --git a/3rdparty/7z/src/XzIn.c b/3rdparty/7z/src/XzIn.c
deleted file mode 100644
index afe9904332..0000000000
--- a/3rdparty/7z/src/XzIn.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/* XzIn.c - Xz input
-2023-04-02 : Igor Pavlov : Public domain */
-
-#include "Precomp.h"
-
-#include
-
-#include "7zCrc.h"
-#include "CpuArch.h"
-#include "Xz.h"
-
-/*
-#define XZ_FOOTER_SIG_CHECK(p) (memcmp((p), XZ_FOOTER_SIG, XZ_FOOTER_SIG_SIZE) == 0)
-*/
-#define XZ_FOOTER_SIG_CHECK(p) ((p)[0] == XZ_FOOTER_SIG_0 && (p)[1] == XZ_FOOTER_SIG_1)
-
-
-SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStreamPtr inStream)
-{
- Byte sig[XZ_STREAM_HEADER_SIZE];
- size_t processedSize = XZ_STREAM_HEADER_SIZE;
- RINOK(SeqInStream_ReadMax(inStream, sig, &processedSize))
- if (processedSize != XZ_STREAM_HEADER_SIZE
- || memcmp(sig, XZ_SIG, XZ_SIG_SIZE) != 0)
- return SZ_ERROR_NO_ARCHIVE;
- return Xz_ParseHeader(p, sig);
-}
-
-#define READ_VARINT_AND_CHECK(buf, pos, size, res) \
- { unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \
- if (s == 0) return SZ_ERROR_ARCHIVE; \
- pos += s; }
-
-SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStreamPtr inStream, BoolInt *isIndex, UInt32 *headerSizeRes)
-{
- Byte header[XZ_BLOCK_HEADER_SIZE_MAX];
- unsigned headerSize;
- *headerSizeRes = 0;
- RINOK(SeqInStream_ReadByte(inStream, &header[0]))
- headerSize = (unsigned)header[0];
- if (headerSize == 0)
- {
- *headerSizeRes = 1;
- *isIndex = True;
- return SZ_OK;
- }
-
- *isIndex = False;
- headerSize = (headerSize << 2) + 4;
- *headerSizeRes = headerSize;
- {
- size_t processedSize = headerSize - 1;
- RINOK(SeqInStream_ReadMax(inStream, header + 1, &processedSize))
- if (processedSize != headerSize - 1)
- return SZ_ERROR_INPUT_EOF;
- }
- return XzBlock_Parse(p, header);
-}
-
-#define ADD_SIZE_CHECK(size, val) \
- { UInt64 newSize = size + (val); if (newSize < size) return XZ_SIZE_OVERFLOW; size = newSize; }
-
-UInt64 Xz_GetUnpackSize(const CXzStream *p)
-{
- UInt64 size = 0;
- size_t i;
- for (i = 0; i < p->numBlocks; i++)
- {
- ADD_SIZE_CHECK(size, p->blocks[i].unpackSize)
- }
- return size;
-}
-
-UInt64 Xz_GetPackSize(const CXzStream *p)
-{
- UInt64 size = 0;
- size_t i;
- for (i = 0; i < p->numBlocks; i++)
- {
- ADD_SIZE_CHECK(size, (p->blocks[i].totalSize + 3) & ~(UInt64)3)
- }
- return size;
-}
-
-/*
-SRes XzBlock_ReadFooter(CXzBlock *p, CXzStreamFlags f, ISeqInStreamPtr inStream)
-{
- return SeqInStream_Read(inStream, p->check, XzFlags_GetCheckSize(f));
-}
-*/
-
-static SRes Xz_ReadIndex2(CXzStream *p, const Byte *buf, size_t size, ISzAllocPtr alloc)
-{
- size_t numBlocks, pos = 1;
- UInt32 crc;
-
- if (size < 5 || buf[0] != 0)
- return SZ_ERROR_ARCHIVE;
-
- size -= 4;
- crc = CrcCalc(buf, size);
- if (crc != GetUi32(buf + size))
- return SZ_ERROR_ARCHIVE;
-
- {
- UInt64 numBlocks64;
- READ_VARINT_AND_CHECK(buf, pos, size, &numBlocks64)
- numBlocks = (size_t)numBlocks64;
- if (numBlocks != numBlocks64 || numBlocks * 2 > size)
- return SZ_ERROR_ARCHIVE;
- }
-
- Xz_Free(p, alloc);
- if (numBlocks != 0)
- {
- size_t i;
- p->numBlocks = numBlocks;
- p->blocks = (CXzBlockSizes *)ISzAlloc_Alloc(alloc, sizeof(CXzBlockSizes) * numBlocks);
- if (!p->blocks)
- return SZ_ERROR_MEM;
- for (i = 0; i < numBlocks; i++)
- {
- CXzBlockSizes *block = &p->blocks[i];
- READ_VARINT_AND_CHECK(buf, pos, size, &block->totalSize)
- READ_VARINT_AND_CHECK(buf, pos, size, &block->unpackSize)
- if (block->totalSize == 0)
- return SZ_ERROR_ARCHIVE;
- }
- }
- while ((pos & 3) != 0)
- if (buf[pos++] != 0)
- return SZ_ERROR_ARCHIVE;
- return (pos == size) ? SZ_OK : SZ_ERROR_ARCHIVE;
-}
-
-static SRes Xz_ReadIndex(CXzStream *p, ILookInStreamPtr stream, UInt64 indexSize, ISzAllocPtr alloc)
-{
- SRes res;
- size_t size;
- Byte *buf;
- if (indexSize > ((UInt32)1 << 31))
- return SZ_ERROR_UNSUPPORTED;
- size = (size_t)indexSize;
- if (size != indexSize)
- return SZ_ERROR_UNSUPPORTED;
- buf = (Byte *)ISzAlloc_Alloc(alloc, size);
- if (!buf)
- return SZ_ERROR_MEM;
- res = LookInStream_Read2(stream, buf, size, SZ_ERROR_UNSUPPORTED);
- if (res == SZ_OK)
- res = Xz_ReadIndex2(p, buf, size, alloc);
- ISzAlloc_Free(alloc, buf);
- return res;
-}
-
-static SRes LookInStream_SeekRead_ForArc(ILookInStreamPtr stream, UInt64 offset, void *buf, size_t size)
-{
- RINOK(LookInStream_SeekTo(stream, offset))
- return LookInStream_Read(stream, buf, size);
- /* return LookInStream_Read2(stream, buf, size, SZ_ERROR_NO_ARCHIVE); */
-}
-
-static SRes Xz_ReadBackward(CXzStream *p, ILookInStreamPtr stream, Int64 *startOffset, ISzAllocPtr alloc)
-{
- UInt64 indexSize;
- Byte buf[XZ_STREAM_FOOTER_SIZE];
- UInt64 pos = (UInt64)*startOffset;
-
- if ((pos & 3) != 0 || pos < XZ_STREAM_FOOTER_SIZE)
- return SZ_ERROR_NO_ARCHIVE;
-
- pos -= XZ_STREAM_FOOTER_SIZE;
- RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE))
-
- if (!XZ_FOOTER_SIG_CHECK(buf + 10))
- {
- UInt32 total = 0;
- pos += XZ_STREAM_FOOTER_SIZE;
-
- for (;;)
- {
- size_t i;
- #define TEMP_BUF_SIZE (1 << 10)
- Byte temp[TEMP_BUF_SIZE];
-
- i = (pos > TEMP_BUF_SIZE) ? TEMP_BUF_SIZE : (size_t)pos;
- pos -= i;
- RINOK(LookInStream_SeekRead_ForArc(stream, pos, temp, i))
- total += (UInt32)i;
- for (; i != 0; i--)
- if (temp[i - 1] != 0)
- break;
- if (i != 0)
- {
- if ((i & 3) != 0)
- return SZ_ERROR_NO_ARCHIVE;
- pos += i;
- break;
- }
- if (pos < XZ_STREAM_FOOTER_SIZE || total > (1 << 16))
- return SZ_ERROR_NO_ARCHIVE;
- }
-
- if (pos < XZ_STREAM_FOOTER_SIZE)
- return SZ_ERROR_NO_ARCHIVE;
- pos -= XZ_STREAM_FOOTER_SIZE;
- RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE))
- if (!XZ_FOOTER_SIG_CHECK(buf + 10))
- return SZ_ERROR_NO_ARCHIVE;
- }
-
- p->flags = (CXzStreamFlags)GetBe16(buf + 8);
-
- if (!XzFlags_IsSupported(p->flags))
- return SZ_ERROR_UNSUPPORTED;
-
- {
- /* to eliminate GCC 6.3 warning:
- dereferencing type-punned pointer will break strict-aliasing rules */
- const Byte *buf_ptr = buf;
- if (GetUi32(buf_ptr) != CrcCalc(buf + 4, 6))
- return SZ_ERROR_ARCHIVE;
- }
-
- indexSize = ((UInt64)GetUi32(buf + 4) + 1) << 2;
-
- if (pos < indexSize)
- return SZ_ERROR_ARCHIVE;
-
- pos -= indexSize;
- RINOK(LookInStream_SeekTo(stream, pos))
- RINOK(Xz_ReadIndex(p, stream, indexSize, alloc))
-
- {
- UInt64 totalSize = Xz_GetPackSize(p);
- if (totalSize == XZ_SIZE_OVERFLOW
- || totalSize >= ((UInt64)1 << 63)
- || pos < totalSize + XZ_STREAM_HEADER_SIZE)
- return SZ_ERROR_ARCHIVE;
- pos -= (totalSize + XZ_STREAM_HEADER_SIZE);
- RINOK(LookInStream_SeekTo(stream, pos))
- *startOffset = (Int64)pos;
- }
- {
- CXzStreamFlags headerFlags;
- CSecToRead secToRead;
- SecToRead_CreateVTable(&secToRead);
- secToRead.realStream = stream;
-
- RINOK(Xz_ReadHeader(&headerFlags, &secToRead.vt))
- return (p->flags == headerFlags) ? SZ_OK : SZ_ERROR_ARCHIVE;
- }
-}
-
-
-/* ---------- Xz Streams ---------- */
-
-void Xzs_Construct(CXzs *p)
-{
- p->num = p->numAllocated = 0;
- p->streams = 0;
-}
-
-void Xzs_Free(CXzs *p, ISzAllocPtr alloc)
-{
- size_t i;
- for (i = 0; i < p->num; i++)
- Xz_Free(&p->streams[i], alloc);
- ISzAlloc_Free(alloc, p->streams);
- p->num = p->numAllocated = 0;
- p->streams = 0;
-}
-
-UInt64 Xzs_GetNumBlocks(const CXzs *p)
-{
- UInt64 num = 0;
- size_t i;
- for (i = 0; i < p->num; i++)
- num += p->streams[i].numBlocks;
- return num;
-}
-
-UInt64 Xzs_GetUnpackSize(const CXzs *p)
-{
- UInt64 size = 0;
- size_t i;
- for (i = 0; i < p->num; i++)
- {
- ADD_SIZE_CHECK(size, Xz_GetUnpackSize(&p->streams[i]))
- }
- return size;
-}
-
-/*
-UInt64 Xzs_GetPackSize(const CXzs *p)
-{
- UInt64 size = 0;
- size_t i;
- for (i = 0; i < p->num; i++)
- {
- ADD_SIZE_CHECK(size, Xz_GetTotalSize(&p->streams[i]))
- }
- return size;
-}
-*/
-
-SRes Xzs_ReadBackward(CXzs *p, ILookInStreamPtr stream, Int64 *startOffset, ICompressProgressPtr progress, ISzAllocPtr alloc)
-{
- Int64 endOffset = 0;
- RINOK(ILookInStream_Seek(stream, &endOffset, SZ_SEEK_END))
- *startOffset = endOffset;
- for (;;)
- {
- CXzStream st;
- SRes res;
- Xz_Construct(&st);
- res = Xz_ReadBackward(&st, stream, startOffset, alloc);
- st.startOffset = (UInt64)*startOffset;
- RINOK(res)
- if (p->num == p->numAllocated)
- {
- const size_t newNum = p->num + p->num / 4 + 1;
- void *data = ISzAlloc_Alloc(alloc, newNum * sizeof(CXzStream));
- if (!data)
- return SZ_ERROR_MEM;
- p->numAllocated = newNum;
- if (p->num != 0)
- memcpy(data, p->streams, p->num * sizeof(CXzStream));
- ISzAlloc_Free(alloc, p->streams);
- p->streams = (CXzStream *)data;
- }
- p->streams[p->num++] = st;
- if (*startOffset == 0)
- break;
- RINOK(LookInStream_SeekTo(stream, (UInt64)*startOffset))
- if (progress && ICompressProgress_Progress(progress, (UInt64)(endOffset - *startOffset), (UInt64)(Int64)-1) != SZ_OK)
- return SZ_ERROR_PROGRESS;
- }
- return SZ_OK;
-}
diff --git a/3rdparty/7zip/7zip b/3rdparty/7zip/7zip
new file mode 160000
index 0000000000..e5431fa6f5
--- /dev/null
+++ b/3rdparty/7zip/7zip
@@ -0,0 +1 @@
+Subproject commit e5431fa6f5505e385c6f9367260717e9c47dc2ee
diff --git a/3rdparty/7zip/7zip.filters b/3rdparty/7zip/7zip.filters
new file mode 100644
index 0000000000..1ba9da711a
--- /dev/null
+++ b/3rdparty/7zip/7zip.filters
@@ -0,0 +1,111 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/3rdparty/7z/7zlib.vcxproj b/3rdparty/7zip/7zip.vcxproj
similarity index 61%
rename from 3rdparty/7z/7zlib.vcxproj
rename to 3rdparty/7zip/7zip.vcxproj
index f39c506b64..f8420f7b10 100644
--- a/3rdparty/7z/7zlib.vcxproj
+++ b/3rdparty/7zip/7zip.vcxproj
@@ -19,108 +19,112 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
16.0
@@ -175,8 +179,8 @@
- $(SolutionDir)lib/$(Configuration)-$(Platform)/
- $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/
+ $(SolutionDir)build\lib\$(Configuration)-$(Platform)\
+ $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\
false
diff --git a/3rdparty/7zip/CMakeLists.txt b/3rdparty/7zip/CMakeLists.txt
new file mode 100644
index 0000000000..706d869472
--- /dev/null
+++ b/3rdparty/7zip/CMakeLists.txt
@@ -0,0 +1,72 @@
+# 7zip sdk
+if(WIN32 OR APPLE)
+ add_library(3rdparty_7zip STATIC EXCLUDE_FROM_ALL
+ 7zip/C/7zAlloc.c
+ 7zip/C/7zArcIn.c
+ 7zip/C/7zBuf.c
+ 7zip/C/7zBuf2.c
+ 7zip/C/7zCrc.c
+ 7zip/C/7zCrcOpt.c
+ 7zip/C/7zDec.c
+ 7zip/C/7zFile.c
+ 7zip/C/7zStream.c
+ 7zip/C/Aes.c
+ 7zip/C/AesOpt.c
+ 7zip/C/Alloc.c
+ 7zip/C/Bcj2.c
+ 7zip/C/Bcj2Enc.c
+ 7zip/C/Blake2s.c
+ 7zip/C/Bra.c
+ 7zip/C/Bra86.c
+ 7zip/C/BraIA64.c
+ 7zip/C/BwtSort.c
+ 7zip/C/CpuArch.c
+ 7zip/C/Delta.c
+ 7zip/C/DllSecur.c
+ 7zip/C/HuffEnc.c
+ 7zip/C/LzFind.c
+ 7zip/C/LzFindMt.c
+ 7zip/C/LzFindOpt.c
+ 7zip/C/Lzma2Dec.c
+ 7zip/C/Lzma2DecMt.c
+ 7zip/C/Lzma2Enc.c
+ 7zip/C/Lzma86Dec.c
+ 7zip/C/Lzma86Enc.c
+ 7zip/C/LzmaDec.c
+ 7zip/C/LzmaEnc.c
+ 7zip/C/LzmaLib.c
+ 7zip/C/MtCoder.c
+ 7zip/C/MtDec.c
+ 7zip/C/Ppmd7.c
+ 7zip/C/Ppmd7aDec.c
+ 7zip/C/Ppmd7Dec.c
+ 7zip/C/Ppmd7Enc.c
+ 7zip/C/Ppmd8.c
+ 7zip/C/Ppmd8Dec.c
+ 7zip/C/Ppmd8Enc.c
+ 7zip/C/Sha1.c
+ 7zip/C/Sha1Opt.c
+ 7zip/C/Sha256.c
+ 7zip/C/Sha256Opt.c
+ 7zip/C/Sort.c
+ 7zip/C/SwapBytes.c
+ 7zip/C/Threads.c
+ 7zip/C/Xxh64.c
+ 7zip/C/Xz.c
+ 7zip/C/XzCrc64.c
+ 7zip/C/XzCrc64Opt.c
+ 7zip/C/XzDec.c
+ 7zip/C/XzEnc.c
+ 7zip/C/XzIn.c
+ 7zip/C/ZstdDec.c)
+ target_include_directories(3rdparty_7zip INTERFACE
+ $
+ $)
+
+ target_include_directories(3rdparty_7zip INTERFACE 7zip)
+
+ set_property(TARGET 3rdparty_7zip PROPERTY FOLDER "3rdparty/")
+
+else()
+ add_library(3rdparty_7zip INTERFACE)
+endif()
diff --git a/3rdparty/CMakeLists.txt b/3rdparty/CMakeLists.txt
index 4006a4920c..6c49a889ba 100644
--- a/3rdparty/CMakeLists.txt
+++ b/3rdparty/CMakeLists.txt
@@ -4,6 +4,9 @@ include(CMakeDependentOption)
set(CMAKE_CXX_STANDARD 20)
+# Defines the ARCHITECTURE variable
+include("DetectArchitecture.cmake")
+
# Warnings are silenced for 3rdparty code
if(NOT MSVC)
add_compile_options("$<$:-w>")
@@ -16,8 +19,11 @@ add_library(3rdparty_dummy_lib INTERFACE)
# ZLib
add_subdirectory(zlib EXCLUDE_FROM_ALL)
-# 7z sdk
-add_subdirectory(7z EXCLUDE_FROM_ALL)
+# ZSTD
+add_subdirectory(zstd EXCLUDE_FROM_ALL)
+
+# 7zip sdk
+add_subdirectory(7zip EXCLUDE_FROM_ALL)
add_library(3rdparty_flatbuffers INTERFACE)
if (USE_SYSTEM_FLATBUFFERS)
@@ -38,7 +44,7 @@ add_subdirectory(libpng EXCLUDE_FROM_ALL)
# pugixml
if (USE_SYSTEM_PUGIXML)
- pkg_check_modules(PUGIXML REQUIRED IMPORTED_TARGET pugixml>=1.11)
+ pkg_check_modules(PUGIXML REQUIRED IMPORTED_TARGET pugixml>=1.15)
add_library(pugixml INTERFACE)
target_link_libraries(pugixml INTERFACE PkgConfig::PUGIXML)
else()
@@ -73,6 +79,10 @@ else()
# we don't have the system libusb, so we compile from submodule
unset(LIBUSB_LIBRARIES CACHE)
add_subdirectory(libusb EXCLUDE_FROM_ALL)
+
+ if (NOT TARGET usb-1.0 AND TARGET usb-1.0-static)
+ add_library(usb-1.0 ALIAS usb-1.0-static)
+ endif()
endif()
endif()
@@ -80,59 +90,55 @@ endif()
# hidapi
add_subdirectory(hidapi)
-
-# Vulkan
+# glslang
add_subdirectory(glslang EXCLUDE_FROM_ALL)
-#add_subdirectory(SPIRV EXCLUDE_FROM_ALL)
+add_library(3rdparty_glslang INTERFACE)
+target_link_libraries(3rdparty_glslang INTERFACE SPIRV)
# yaml-cpp
add_subdirectory(yaml-cpp)
-# xxHash
-if (USE_SYSTEM_XXHASH)
- pkg_check_modules(XXHASH REQUIRED IMPORTED_TARGET libxxhash)
- add_library(xxhash INTERFACE)
- target_link_libraries(xxhash INTERFACE PkgConfig::XXHASH)
-else()
- set(XXHASH_BUNDLED_MODE ON)
- set(XXHASH_BUILD_XXHSUM OFF)
- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Make xxHash build static libs")
- add_subdirectory(xxHash/cmake_unofficial EXCLUDE_FROM_ALL)
- target_include_directories(xxhash INTERFACE xxHash)
-endif()
-
# OpenGL
-find_package(OpenGL REQUIRED)
+if (NOT ANDROID)
+ find_package(OpenGL REQUIRED OPTIONAL_COMPONENTS EGL)
-add_library(3rdparty_opengl INTERFACE)
-target_include_directories(3rdparty_opengl INTERFACE GL)
+ add_library(3rdparty_opengl INTERFACE)
+ target_include_directories(3rdparty_opengl INTERFACE GL)
-if (WIN32)
- if(NOT MSVC)
+ if (WIN32)
+ if(NOT MSVC)
+ target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU)
+ else()
+ target_link_libraries(3rdparty_opengl INTERFACE dxgi.lib d2d1.lib dwrite.lib)
+ endif()
+ elseif(APPLE)
target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU)
else()
- target_link_libraries(3rdparty_opengl INTERFACE dxgi.lib d2d1.lib dwrite.lib)
+ target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU OpenGL::GLX)
endif()
-elseif(APPLE)
- target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU)
else()
- target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU OpenGL::GLX)
+ add_library(3rdparty_opengl INTERFACE)
+ target_compile_definitions(3rdparty_opengl INTERFACE WITHOUT_OPENGL=1)
endif()
-
# stblib
-add_library(3rdparty_stblib INTERFACE)
-target_include_directories(3rdparty_stblib INTERFACE stblib/include)
-
+add_subdirectory(stblib)
# DiscordRPC
add_subdirectory(discord-rpc)
# Cubeb
-add_subdirectory(cubeb EXCLUDE_FROM_ALL)
+if(USE_SYSTEM_CUBEB)
+ find_package(cubeb REQUIRED GLOBAL)
+ message(STATUS "Using system cubeb version '${cubeb_VERSION}'")
+ add_library(3rdparty::cubeb ALIAS cubeb::cubeb)
+else()
+ message(STATUS "Using static cubeb from 3rdparty")
+ add_subdirectory(cubeb EXCLUDE_FROM_ALL)
+endif()
# SoundTouch
add_subdirectory(SoundTouch EXCLUDE_FROM_ALL)
@@ -187,16 +193,13 @@ if(USE_VULKAN)
if(VULKAN_FOUND)
add_library(3rdparty_vulkan INTERFACE)
target_compile_definitions(3rdparty_vulkan INTERFACE -DHAVE_VULKAN)
- target_link_libraries(3rdparty_vulkan INTERFACE SPIRV Vulkan::Vulkan)
+ target_link_libraries(3rdparty_vulkan INTERFACE Vulkan::Vulkan)
- if(UNIX AND NOT APPLE)
+ if(UNIX AND NOT APPLE AND NOT ANDROID)
find_package(Wayland)
if (WAYLAND_FOUND)
target_include_directories(3rdparty_vulkan
INTERFACE ${WAYLAND_INCLUDE_DIR})
-
- target_compile_definitions(3rdparty_vulkan
- INTERFACE -DVK_USE_PLATFORM_WAYLAND_KHR)
endif()
endif()
@@ -212,42 +215,68 @@ endif()
# AsmJit
add_subdirectory(asmjit EXCLUDE_FROM_ALL)
+# SDL3
+set(SDL3_TARGET 3rdparty_dummy_lib)
+if(USE_SDL)
+ if(USE_SYSTEM_SDL)
+ find_package(SDL3)
+ if(SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0)
+ message(STATUS "Using system SDL3 version '${SDL3_VERSION}'")
+ add_library(3rdparty_sdl3 INTERFACE)
+ target_compile_definitions(3rdparty_sdl3 INTERFACE -DHAVE_SDL3=1)
+ target_link_libraries(3rdparty_sdl3 INTERFACE SDL3::SDL3)
+ set(SDL3_TARGET 3rdparty_sdl3)
+ else()
+ message(FATAL_ERROR "SDL3 is not available on this system")
+ endif()
+ else()
+ message(STATUS "Using static SDL3 from 3rdparty")
+ add_subdirectory(libsdl-org EXCLUDE_FROM_ALL)
+ target_compile_definitions(SDL3-static INTERFACE -DHAVE_SDL3=1)
+ set(SDL3_TARGET SDL3-static)
+ set(SDL3_DIR "${CMAKE_CURRENT_BINARY_DIR}/libsdl-org/SDL" CACHE STRING "")
+ endif()
+endif()
+
# OpenAL
-add_subdirectory(OpenAL EXCLUDE_FROM_ALL)
+if (NOT ANDROID)
+ add_subdirectory(OpenAL EXCLUDE_FROM_ALL)
+else()
+ add_library(3rdparty_openal INTERFACE)
+ target_compile_definitions(3rdparty_openal INTERFACE WITHOUT_OPENAL=1)
+endif()
# FAudio
set(FAUDIO_TARGET 3rdparty_dummy_lib)
if(USE_FAUDIO)
- # FAudio depends on SDL2
- find_package(SDL2)
+ # FAudio depends on SDL3
+ find_package(SDL3)
if (USE_SYSTEM_FAUDIO)
- if (NOT SDL2_FOUND OR SDL2_VERSION VERSION_LESS 2.0.12)
- message(WARNING
- "RPCS3: FAudio requires SDL 2.0.9 or newer. Please note, this warning"
- "can also be displayed with SDL2 versions between 2.0.9-2.0.12, as the"
- "CMake config files are not correctly installed. Since a valid SDL2"
- ">=2.0.9 version cannot be found, building with FAudio will be skipped.")
- set(USE_FAUDIO OFF CACHE BOOL "Disabled using system FAudio with SDL < 2.0.12" FORCE)
- else()
+ if (SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0)
message(STATUS "RPCS3: Using system FAudio")
find_package(FAudio REQUIRED CONFIGS FAudioConfig.cmake FAudio-config.cmake)
add_library(3rdparty_FAudio INTERFACE)
target_link_libraries(3rdparty_FAudio INTERFACE FAudio)
target_compile_definitions(3rdparty_FAudio INTERFACE -DHAVE_FAUDIO)
set(FAUDIO_TARGET 3rdparty_FAudio)
+ else()
+ message(WARNING
+ "RPCS3: System FAudio requires SDL 3.2.0 or newer. Since a valid SDL3"
+ ">=3.2.0 version cannot be found, building with FAudio will be skipped.")
+ set(USE_FAUDIO OFF CACHE BOOL "Disabled using system FAudio with SDL < 3.2.0" FORCE)
endif()
else()
- if (NOT SDL2_FOUND OR SDL2_VERSION VERSION_LESS 2.24.0)
- message(WARNING
- "-- RPCS3: 3rdparty FAudio requires SDL 2.24.0 or newer. Since a valid SDL2"
- ">=2.24.0 version cannot be found, building with FAudio will be skipped.")
- set(USE_FAUDIO OFF CACHE BOOL "Disabled FAudio with SDL < 2.24.0" FORCE)
- else()
+ if (SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0)
message(STATUS "RPCS3: Using builtin FAudio")
set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared library")
add_subdirectory(FAudio EXCLUDE_FROM_ALL)
- target_compile_definitions(FAudio INTERFACE -DHAVE_FAUDIO)
- set(FAUDIO_TARGET FAudio)
+ target_compile_definitions(FAudio-static INTERFACE -DHAVE_FAUDIO)
+ set(FAUDIO_TARGET FAudio-static)
+ else()
+ message(FATAL_ERROR
+ "-- RPCS3: 3rdparty FAudio requires SDL 3.2.0 or newer. Since a valid SDL3"
+ ">=3.2.0 version cannot be found, building with FAudio will be skipped.")
+ set(USE_FAUDIO OFF CACHE BOOL "Disabled FAudio with SDL < 3.2.0" FORCE)
endif()
endif()
endif()
@@ -256,30 +285,24 @@ set_property(TARGET ${FAUDIO_TARGET} PROPERTY FOLDER "3rdparty/")
# FFMPEG
-add_library(3rdparty_ffmpeg INTERFACE)
+if(NOT ANDROID)
+ add_library(3rdparty_ffmpeg INTERFACE)
-# Select the version of ffmpeg to use, default is builtin
-if(USE_SYSTEM_FFMPEG)
- message(STATUS "RPCS3: using shared ffmpeg")
- find_package(FFMPEG REQUIRED)
+ # Select the version of ffmpeg to use, default is builtin
+ if(USE_SYSTEM_FFMPEG)
+ message(STATUS "RPCS3: using shared ffmpeg")
+ find_package(FFMPEG REQUIRED)
- target_include_directories(3rdparty_ffmpeg INTERFACE ${FFMPEG_INCLUDE_DIR})
- target_link_libraries(3rdparty_ffmpeg INTERFACE ${FFMPEG_LIBRARIES})
-else()
- if (NOT MSVC AND WIN32)
- message(FATAL_ERROR "-- RPCS3: building ffmpeg submodule is currently not supported")
+ target_include_directories(3rdparty_ffmpeg INTERFACE ${FFMPEG_INCLUDE_DIR})
+ target_link_libraries(3rdparty_ffmpeg INTERFACE ${FFMPEG_LIBRARIES})
else()
message(STATUS "RPCS3: using builtin ffmpeg")
+ add_subdirectory(ffmpeg EXCLUDE_FROM_ALL)
+ # ffmpeg-core libraries are extracted to CMAKE_BINARY_DIR
+ set(FFMPEG_LIB_DIR "${CMAKE_BINARY_DIR}/3rdparty/ffmpeg/lib")
if (WIN32)
- set(FFMPEG_LIB_DIR "ffmpeg/lib/windows/x86_64")
target_link_libraries(3rdparty_ffmpeg INTERFACE "Bcrypt.lib")
- elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
- set(FFMPEG_LIB_DIR "ffmpeg/lib/linux/ubuntu-20.04/x86_64")
- elseif(APPLE)
- set(FFMPEG_LIB_DIR "ffmpeg/lib/macos/x86_64")
- else()
- message(FATAL_ERROR "Prebuilt ffmpeg is not available on this platform! Try USE_SYSTEM_FFMPEG=ON.")
endif()
find_library(FFMPEG_LIB_AVFORMAT avformat PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH)
@@ -288,6 +311,10 @@ else()
find_library(FFMPEG_LIB_SWSCALE swscale PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH)
find_library(FFMPEG_LIB_SWRESAMPLE swresample PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH)
+ if (FFMPEG_LIB_AVFORMAT MATCHES "FFMPEG_LIB_AVFORMAT-NOTFOUND")
+ message(FATAL_ERROR "@#$%! FFMPEG NOT FOUND! ${FFMPEG_LIB_DIR}")
+ endif()
+
target_link_libraries(3rdparty_ffmpeg
INTERFACE
${FFMPEG_LIB_AVFORMAT}
@@ -296,15 +323,14 @@ else()
${FFMPEG_LIB_SWSCALE}
${FFMPEG_LIB_SWRESAMPLE}
)
+ target_include_directories(3rdparty_ffmpeg INTERFACE "ffmpeg/include")
endif()
-
- target_include_directories(3rdparty_ffmpeg INTERFACE "ffmpeg/include")
endif()
# GLEW
add_library(3rdparty_glew INTERFACE)
-if(NOT MSVC)
+if(NOT MSVC AND NOT ANDROID)
find_package(GLEW REQUIRED)
target_link_libraries(3rdparty_glew INTERFACE GLEW::GLEW)
endif()
@@ -319,36 +345,18 @@ add_subdirectory(wolfssl EXCLUDE_FROM_ALL)
# CURL
add_subdirectory(curl EXCLUDE_FROM_ALL)
-# SDL2
-set(SDL2_TARGET 3rdparty_dummy_lib)
-if(USE_SDL)
- if(USE_SYSTEM_SDL)
- find_package(SDL2)
- if(SDL2_FOUND AND NOT SDL2_VERSION VERSION_LESS 2.24.0)
- message(STATUS "Using system SDL2")
- add_library(3rdparty_sdl2 INTERFACE)
- target_compile_definitions(3rdparty_sdl2 INTERFACE -DHAVE_SDL2=1)
- target_include_directories(3rdparty_sdl2 INTERFACE ${SDL2_INCLUDE_DIRS})
- target_link_libraries(3rdparty_sdl2 INTERFACE ${SDL2_LIBRARIES})
- set(SDL2_TARGET 3rdparty_sdl2)
- else()
- message(FATAL_ERROR "SDL2 is not available on this system")
- endif()
- else()
- message(STATUS "Using static SDL2 from 3rdparty")
- add_library(3rdparty_sdl2 INTERFACE)
- target_compile_definitions(3rdparty_sdl2 INTERFACE -DHAVE_SDL2=1)
- add_subdirectory(libsdl-org EXCLUDE_FROM_ALL)
- set(SDL2_TARGET 3rdparty_sdl2)
- endif()
-endif()
-
# MINIUPNP
add_subdirectory(miniupnp EXCLUDE_FROM_ALL)
# RTMIDI
add_subdirectory(rtmidi EXCLUDE_FROM_ALL)
+# OPENCV
+add_subdirectory(opencv EXCLUDE_FROM_ALL)
+
+# FUSION
+add_subdirectory(fusion EXCLUDE_FROM_ALL)
+
# add nice ALIAS targets for ease of use
if(USE_SYSTEM_LIBUSB)
add_library(3rdparty::libusb ALIAS usb-1.0-shared)
@@ -356,11 +364,12 @@ else()
add_library(3rdparty::libusb ALIAS usb-1.0-static)
endif()
add_library(3rdparty::zlib ALIAS 3rdparty_zlib)
-add_library(3rdparty::7z ALIAS 3rdparty_7z)
+add_library(3rdparty::zstd ALIAS 3rdparty_zstd)
+add_library(3rdparty::7zip ALIAS 3rdparty_7zip)
add_library(3rdparty::flatbuffers ALIAS 3rdparty_flatbuffers)
add_library(3rdparty::pugixml ALIAS pugixml)
+add_library(3rdparty::glslang ALIAS 3rdparty_glslang)
add_library(3rdparty::yaml-cpp ALIAS yaml-cpp)
-add_library(3rdparty::xxhash ALIAS xxhash)
add_library(3rdparty::hidapi ALIAS 3rdparty_hidapi)
add_library(3rdparty::libpng ALIAS ${LIBPNG_TARGET})
add_library(3rdparty::opengl ALIAS 3rdparty_opengl)
@@ -375,6 +384,8 @@ add_library(3rdparty::glew ALIAS 3rdparty_glew)
add_library(3rdparty::wolfssl ALIAS wolfssl)
add_library(3rdparty::libcurl ALIAS 3rdparty_libcurl)
add_library(3rdparty::soundtouch ALIAS soundtouch)
-add_library(3rdparty::sdl2 ALIAS ${SDL2_TARGET})
+add_library(3rdparty::sdl3 ALIAS ${SDL3_TARGET})
add_library(3rdparty::miniupnpc ALIAS libminiupnpc-static)
add_library(3rdparty::rtmidi ALIAS rtmidi)
+add_library(3rdparty::opencv ALIAS ${OPENCV_TARGET})
+add_library(3rdparty::fusion ALIAS Fusion)
diff --git a/3rdparty/DetectArchitecture.cmake b/3rdparty/DetectArchitecture.cmake
new file mode 100644
index 0000000000..dcdb0e2a70
--- /dev/null
+++ b/3rdparty/DetectArchitecture.cmake
@@ -0,0 +1,63 @@
+# From https://github.com/merryhime/dynarmic
+include(CheckSymbolExists)
+
+if (CMAKE_OSX_ARCHITECTURES)
+ set(DYNARMIC_MULTIARCH_BUILD 1)
+ set(ARCHITECTURE "${CMAKE_OSX_ARCHITECTURES}")
+ return()
+endif()
+
+function(detect_architecture symbol arch)
+ if (NOT DEFINED ARCHITECTURE)
+ set(CMAKE_REQUIRED_QUIET YES)
+ check_symbol_exists("${symbol}" "" DETECT_ARCHITECTURE_${arch})
+ unset(CMAKE_REQUIRED_QUIET)
+
+ if (DETECT_ARCHITECTURE_${arch})
+ set(ARCHITECTURE "${arch}" PARENT_SCOPE)
+ endif()
+
+ unset(DETECT_ARCHITECTURE_${arch} CACHE)
+ endif()
+endfunction()
+
+detect_architecture("__ARM64__" arm64)
+detect_architecture("__aarch64__" arm64)
+detect_architecture("_M_ARM64" arm64)
+
+detect_architecture("__arm__" arm)
+detect_architecture("__TARGET_ARCH_ARM" arm)
+detect_architecture("_M_ARM" arm)
+
+detect_architecture("__x86_64" x86_64)
+detect_architecture("__x86_64__" x86_64)
+detect_architecture("__amd64" x86_64)
+detect_architecture("_M_X64" x86_64)
+
+detect_architecture("__i386" x86)
+detect_architecture("__i386__" x86)
+detect_architecture("_M_IX86" x86)
+
+detect_architecture("__ia64" ia64)
+detect_architecture("__ia64__" ia64)
+detect_architecture("_M_IA64" ia64)
+
+detect_architecture("__mips" mips)
+detect_architecture("__mips__" mips)
+detect_architecture("_M_MRX000" mips)
+
+detect_architecture("__ppc64__" ppc64)
+detect_architecture("__powerpc64__" ppc64)
+
+detect_architecture("__ppc__" ppc)
+detect_architecture("__ppc" ppc)
+detect_architecture("__powerpc__" ppc)
+detect_architecture("_ARCH_COM" ppc)
+detect_architecture("_ARCH_PWR" ppc)
+detect_architecture("_ARCH_PPC" ppc)
+detect_architecture("_M_MPPC" ppc)
+detect_architecture("_M_PPC" ppc)
+
+detect_architecture("__riscv" riscv)
+
+detect_architecture("__EMSCRIPTEN__" wasm)
diff --git a/3rdparty/FAudio b/3rdparty/FAudio
index 758c90af12..6077ea740a 160000
--- a/3rdparty/FAudio
+++ b/3rdparty/FAudio
@@ -1 +1 @@
-Subproject commit 758c90af12c2869ba1e93736faa5e909fcd8a44c
+Subproject commit 6077ea740a7114a54f76ed9b7abe08cffc0034b6
diff --git a/3rdparty/GL/glext.h b/3rdparty/GL/glext.h
index 61ff1b0708..276a962a96 100644
--- a/3rdparty/GL/glext.h
+++ b/3rdparty/GL/glext.h
@@ -32,7 +32,7 @@ extern "C" {
#define GLAPI extern
#endif
-#define GL_GLEXT_VERSION 20220530
+#define GL_GLEXT_VERSION 20250203
#include
@@ -5397,12 +5397,12 @@ typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severi
typedef void (APIENTRYP PFNGLDEBUGMESSAGEENABLEAMDPROC) (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTAMDPROC) (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf);
typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKAMDPROC) (GLDEBUGPROCAMD callback, void *userParam);
-typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
+typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
#ifdef GL_GLEXT_PROTOTYPES
GLAPI void APIENTRY glDebugMessageEnableAMD (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
GLAPI void APIENTRY glDebugMessageInsertAMD (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf);
GLAPI void APIENTRY glDebugMessageCallbackAMD (GLDEBUGPROCAMD callback, void *userParam);
-GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
+GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
#endif
#endif /* GL_AMD_debug_output */
@@ -7370,6 +7370,16 @@ GLAPI void APIENTRY glBlitFramebufferEXT (GLint srcX0, GLint srcY0, GLint srcX1,
#endif
#endif /* GL_EXT_framebuffer_blit */
+#ifndef GL_EXT_framebuffer_blit_layers
+#define GL_EXT_framebuffer_blit_layers 1
+typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYERSEXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYEREXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlitFramebufferLayersEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+GLAPI void APIENTRY glBlitFramebufferLayerEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter);
+#endif
+#endif /* GL_EXT_framebuffer_blit_layers */
+
#ifndef GL_EXT_framebuffer_multisample
#define GL_EXT_framebuffer_multisample 1
#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB
@@ -9394,6 +9404,11 @@ GLAPI void APIENTRY glResizeBuffersMESA (void);
#define GL_MESA_shader_integer_functions 1
#endif /* GL_MESA_shader_integer_functions */
+#ifndef GL_MESA_texture_const_bandwidth
+#define GL_MESA_texture_const_bandwidth 1
+#define GL_CONST_BW_TILING_MESA 0x8BBE
+#endif /* GL_MESA_texture_const_bandwidth */
+
#ifndef GL_MESA_tile_raster_order
#define GL_MESA_tile_raster_order 1
#define GL_TILE_RASTER_ORDER_FIXED_MESA 0x8BB8
@@ -10248,12 +10263,6 @@ typedef void (APIENTRYP PFNGLMULTITEXCOORD3HNVPROC) (GLenum target, GLhalfNV s,
typedef void (APIENTRYP PFNGLMULTITEXCOORD3HVNVPROC) (GLenum target, const GLhalfNV *v);
typedef void (APIENTRYP PFNGLMULTITEXCOORD4HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
typedef void (APIENTRYP PFNGLMULTITEXCOORD4HVNVPROC) (GLenum target, const GLhalfNV *v);
-typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog);
-typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog);
-typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
-typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v);
-typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight);
-typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight);
typedef void (APIENTRYP PFNGLVERTEXATTRIB1HNVPROC) (GLuint index, GLhalfNV x);
typedef void (APIENTRYP PFNGLVERTEXATTRIB1HVNVPROC) (GLuint index, const GLhalfNV *v);
typedef void (APIENTRYP PFNGLVERTEXATTRIB2HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y);
@@ -10266,6 +10275,12 @@ typedef void (APIENTRYP PFNGLVERTEXATTRIBS1HVNVPROC) (GLuint index, GLsizei n, c
typedef void (APIENTRYP PFNGLVERTEXATTRIBS2HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
typedef void (APIENTRYP PFNGLVERTEXATTRIBS3HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
typedef void (APIENTRYP PFNGLVERTEXATTRIBS4HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog);
+typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight);
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight);
#ifdef GL_GLEXT_PROTOTYPES
GLAPI void APIENTRY glVertex2hNV (GLhalfNV x, GLhalfNV y);
GLAPI void APIENTRY glVertex2hvNV (const GLhalfNV *v);
@@ -10295,12 +10310,6 @@ GLAPI void APIENTRY glMultiTexCoord3hNV (GLenum target, GLhalfNV s, GLhalfNV t,
GLAPI void APIENTRY glMultiTexCoord3hvNV (GLenum target, const GLhalfNV *v);
GLAPI void APIENTRY glMultiTexCoord4hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
GLAPI void APIENTRY glMultiTexCoord4hvNV (GLenum target, const GLhalfNV *v);
-GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog);
-GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog);
-GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
-GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v);
-GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight);
-GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight);
GLAPI void APIENTRY glVertexAttrib1hNV (GLuint index, GLhalfNV x);
GLAPI void APIENTRY glVertexAttrib1hvNV (GLuint index, const GLhalfNV *v);
GLAPI void APIENTRY glVertexAttrib2hNV (GLuint index, GLhalfNV x, GLhalfNV y);
@@ -10313,6 +10322,12 @@ GLAPI void APIENTRY glVertexAttribs1hvNV (GLuint index, GLsizei n, const GLhalfN
GLAPI void APIENTRY glVertexAttribs2hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
GLAPI void APIENTRY glVertexAttribs3hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
GLAPI void APIENTRY glVertexAttribs4hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
+GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog);
+GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog);
+GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
+GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight);
+GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight);
#endif
#endif /* GL_NV_half_float */
@@ -11449,6 +11464,10 @@ GLAPI void APIENTRY glDrawTransformFeedbackNV (GLenum mode, GLuint id);
#endif
#endif /* GL_NV_transform_feedback2 */
+#ifndef GL_NV_uniform_buffer_std430_layout
+#define GL_NV_uniform_buffer_std430_layout 1
+#endif /* GL_NV_uniform_buffer_std430_layout */
+
#ifndef GL_NV_uniform_buffer_unified_memory
#define GL_NV_uniform_buffer_unified_memory 1
#define GL_UNIFORM_BUFFER_UNIFIED_NV 0x936E
@@ -11964,8 +11983,10 @@ GLAPI void APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum s
#define GL_MAX_VIEWS_OVR 0x9631
#define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633
typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
#ifdef GL_GLEXT_PROTOTYPES
GLAPI void APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
+GLAPI void APIENTRY glNamedFramebufferTextureMultiviewOVR (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
#endif
#endif /* GL_OVR_multiview */
diff --git a/3rdparty/GPUOpen/VulkanMemoryAllocator b/3rdparty/GPUOpen/VulkanMemoryAllocator
new file mode 160000
index 0000000000..1d8f600fd4
--- /dev/null
+++ b/3rdparty/GPUOpen/VulkanMemoryAllocator
@@ -0,0 +1 @@
+Subproject commit 1d8f600fd424278486eade7ed3e877c99f0846b1
diff --git a/3rdparty/GPUOpen/include/vk_mem_alloc.h b/3rdparty/GPUOpen/include/vk_mem_alloc.h
deleted file mode 100644
index bb72d53f57..0000000000
--- a/3rdparty/GPUOpen/include/vk_mem_alloc.h
+++ /dev/null
@@ -1,18120 +0,0 @@
-//
-// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-//
-
-#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
-#define AMD_VULKAN_MEMORY_ALLOCATOR_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** \mainpage Vulkan Memory Allocator
-
-Version 2.3.0 (2019-12-04)
-
-Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. \n
-License: MIT
-
-Documentation of all members: vk_mem_alloc.h
-
-\section main_table_of_contents Table of contents
-
-- User guide
- - \subpage quick_start
- - [Project setup](@ref quick_start_project_setup)
- - [Initialization](@ref quick_start_initialization)
- - [Resource allocation](@ref quick_start_resource_allocation)
- - \subpage choosing_memory_type
- - [Usage](@ref choosing_memory_type_usage)
- - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
- - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
- - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
- - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
- - \subpage memory_mapping
- - [Mapping functions](@ref memory_mapping_mapping_functions)
- - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
- - [Cache flush and invalidate](@ref memory_mapping_cache_control)
- - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable)
- - \subpage staying_within_budget
- - [Querying for budget](@ref staying_within_budget_querying_for_budget)
- - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
- - \subpage custom_memory_pools
- - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
- - [Linear allocation algorithm](@ref linear_algorithm)
- - [Free-at-once](@ref linear_algorithm_free_at_once)
- - [Stack](@ref linear_algorithm_stack)
- - [Double stack](@ref linear_algorithm_double_stack)
- - [Ring buffer](@ref linear_algorithm_ring_buffer)
- - [Buddy allocation algorithm](@ref buddy_algorithm)
- - \subpage defragmentation
- - [Defragmenting CPU memory](@ref defragmentation_cpu)
- - [Defragmenting GPU memory](@ref defragmentation_gpu)
- - [Additional notes](@ref defragmentation_additional_notes)
- - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm)
- - \subpage lost_allocations
- - \subpage statistics
- - [Numeric statistics](@ref statistics_numeric_statistics)
- - [JSON dump](@ref statistics_json_dump)
- - \subpage allocation_annotation
- - [Allocation user data](@ref allocation_user_data)
- - [Allocation names](@ref allocation_names)
- - \subpage debugging_memory_usage
- - [Memory initialization](@ref debugging_memory_usage_initialization)
- - [Margins](@ref debugging_memory_usage_margins)
- - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
- - \subpage record_and_replay
-- \subpage usage_patterns
- - [Common mistakes](@ref usage_patterns_common_mistakes)
- - [Simple patterns](@ref usage_patterns_simple)
- - [Advanced patterns](@ref usage_patterns_advanced)
-- \subpage configuration
- - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
- - [Custom host memory allocator](@ref custom_memory_allocator)
- - [Device memory allocation callbacks](@ref allocation_callbacks)
- - [Device heap memory limit](@ref heap_memory_limit)
- - \subpage vk_khr_dedicated_allocation
-- \subpage general_considerations
- - [Thread safety](@ref general_considerations_thread_safety)
- - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
- - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
- - [Features not supported](@ref general_considerations_features_not_supported)
-
-\section main_see_also See also
-
-- [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
-- [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
-
-
-
-
-\page quick_start Quick start
-
-\section quick_start_project_setup Project setup
-
-Vulkan Memory Allocator comes in form of a "stb-style" single header file.
-You don't need to build it as a separate library project.
-You can add this file directly to your project and submit it to code repository next to your other source files.
-
-"Single header" doesn't mean that everything is contained in C/C++ declarations,
-like it tends to be in case of inline functions or C++ templates.
-It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
-If you don't do it properly, you will get linker errors.
-
-To do it properly:
-
--# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
- This includes declarations of all members of the library.
--# In exacly one CPP file define following macro before this include.
- It enables also internal definitions.
-
-\code
-#define VMA_IMPLEMENTATION
-#include "vk_mem_alloc.h"
-\endcode
-
-It may be a good idea to create dedicated CPP file just for this purpose.
-
-Note on language: This library is written in C++, but has C-compatible interface.
-Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
-implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
-
-Please note that this library includes header ``, which in turn
-includes `` on Windows. If you need some specific macros defined
-before including these headers (like `WIN32_LEAN_AND_MEAN` or
-`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
-them before every `#include` of this library.
-
-
-\section quick_start_initialization Initialization
-
-At program startup:
-
--# Initialize Vulkan to have `VkPhysicalDevice` and `VkDevice` object.
--# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
- calling vmaCreateAllocator().
-
-\code
-VmaAllocatorCreateInfo allocatorInfo = {};
-allocatorInfo.physicalDevice = physicalDevice;
-allocatorInfo.device = device;
-
-VmaAllocator allocator;
-vmaCreateAllocator(&allocatorInfo, &allocator);
-\endcode
-
-\section quick_start_resource_allocation Resource allocation
-
-When you want to create a buffer or image:
-
--# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
--# Fill VmaAllocationCreateInfo structure.
--# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
- already allocated and bound to it.
-
-\code
-VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-bufferInfo.size = 65536;
-bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
-VmaAllocationCreateInfo allocInfo = {};
-allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-
-VkBuffer buffer;
-VmaAllocation allocation;
-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-\endcode
-
-Don't forget to destroy your objects when no longer needed:
-
-\code
-vmaDestroyBuffer(allocator, buffer, allocation);
-vmaDestroyAllocator(allocator);
-\endcode
-
-
-\page choosing_memory_type Choosing memory type
-
-Physical devices in Vulkan support various combinations of memory heaps and
-types. Help with choosing correct and optimal memory type for your specific
-resource is one of the key features of this library. You can use it by filling
-appropriate members of VmaAllocationCreateInfo structure, as described below.
-You can also combine multiple methods.
-
--# If you just want to find memory type index that meets your requirements, you
- can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(),
- vmaFindMemoryTypeIndexForImageInfo().
--# If you want to allocate a region of device memory without association with any
- specific image or buffer, you can use function vmaAllocateMemory(). Usage of
- this function is not recommended and usually not needed.
- vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
- which may be useful for sparse binding.
--# If you already have a buffer or an image created, you want to allocate memory
- for it and then you will bind it yourself, you can use function
- vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
- For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
- or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
--# If you want to create a buffer or an image, allocate memory for it and bind
- them together, all in one call, you can use function vmaCreateBuffer(),
- vmaCreateImage(). This is the easiest and recommended way to use this library.
-
-When using 3. or 4., the library internally queries Vulkan for memory types
-supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
-and uses only one of these types.
-
-If no memory type can be found that meets all the requirements, these functions
-return `VK_ERROR_FEATURE_NOT_PRESENT`.
-
-You can leave VmaAllocationCreateInfo structure completely filled with zeros.
-It means no requirements are specified for memory type.
-It is valid, although not very useful.
-
-\section choosing_memory_type_usage Usage
-
-The easiest way to specify memory requirements is to fill member
-VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
-It defines high level, common usage types.
-For more details, see description of this enum.
-
-For example, if you want to create a uniform buffer that will be filled using
-transfer only once or infrequently and used for rendering every frame, you can
-do it using following code:
-
-\code
-VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-bufferInfo.size = 65536;
-bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
-VmaAllocationCreateInfo allocInfo = {};
-allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-
-VkBuffer buffer;
-VmaAllocation allocation;
-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-\endcode
-
-\section choosing_memory_type_required_preferred_flags Required and preferred flags
-
-You can specify more detailed requirements by filling members
-VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
-with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
-if you want to create a buffer that will be persistently mapped on host (so it
-must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
-use following code:
-
-\code
-VmaAllocationCreateInfo allocInfo = {};
-allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
-VkBuffer buffer;
-VmaAllocation allocation;
-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-\endcode
-
-A memory type is chosen that has all the required flags and as many preferred
-flags set as possible.
-
-If you use VmaAllocationCreateInfo::usage, it is just internally converted to
-a set of required and preferred flags.
-
-\section choosing_memory_type_explicit_memory_types Explicit memory types
-
-If you inspected memory types available on the physical device and you have
-a preference for memory types that you want to use, you can fill member
-VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
-means that a memory type with that index is allowed to be used for the
-allocation. Special value 0, just like `UINT32_MAX`, means there are no
-restrictions to memory type index.
-
-Please note that this member is NOT just a memory type index.
-Still you can use it to choose just one, specific memory type.
-For example, if you already determined that your buffer should be created in
-memory type 2, use following code:
-
-\code
-uint32_t memoryTypeIndex = 2;
-
-VmaAllocationCreateInfo allocInfo = {};
-allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
-
-VkBuffer buffer;
-VmaAllocation allocation;
-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-\endcode
-
-\section choosing_memory_type_custom_memory_pools Custom memory pools
-
-If you allocate from custom memory pool, all the ways of specifying memory
-requirements described above are not applicable and the aforementioned members
-of VmaAllocationCreateInfo structure are ignored. Memory type is selected
-explicitly when creating the pool and then used to make all the allocations from
-that pool. For further details, see \ref custom_memory_pools.
-
-\section choosing_memory_type_dedicated_allocations Dedicated allocations
-
-Memory for allocations is reserved out of larger block of `VkDeviceMemory`
-allocated from Vulkan internally. That's the main feature of this whole library.
-You can still request a separate memory block to be created for an allocation,
-just like you would do in a trivial solution without using any allocator.
-In that case, a buffer or image is always bound to that memory at offset 0.
-This is called a "dedicated allocation".
-You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-The library can also internally decide to use dedicated allocation in some cases, e.g.:
-
-- When the size of the allocation is large.
-- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
- and it reports that dedicated allocation is required or recommended for the resource.
-- When allocation of next big memory block fails due to not enough device memory,
- but allocation with the exact requested size succeeds.
-
-
-\page memory_mapping Memory mapping
-
-To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
-to be able to read from it or write to it in CPU code.
-Mapping is possible only of memory allocated from a memory type that has
-`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
-Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
-You can use them directly with memory allocated by this library,
-but it is not recommended because of following issue:
-Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
-This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
-Because of this, Vulkan Memory Allocator provides following facilities:
-
-\section memory_mapping_mapping_functions Mapping functions
-
-The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
-They are safer and more convenient to use than standard Vulkan functions.
-You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
-You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
-The way it's implemented is that the library always maps entire memory block, not just region of the allocation.
-For further details, see description of vmaMapMemory() function.
-Example:
-
-\code
-// Having these objects initialized:
-
-struct ConstantBuffer
-{
- ...
-};
-ConstantBuffer constantBufferData;
-
-VmaAllocator allocator;
-VkBuffer constantBuffer;
-VmaAllocation constantBufferAllocation;
-
-// You can map and fill your buffer using following code:
-
-void* mappedData;
-vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
-memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
-vmaUnmapMemory(allocator, constantBufferAllocation);
-\endcode
-
-When mapping, you may see a warning from Vulkan validation layer similar to this one:
-
-Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.
-
-It happens because the library maps entire `VkDeviceMemory` block, where different
-types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
-You can safely ignore it if you are sure you access only memory of the intended
-object that you wanted to map.
-
-
-\section memory_mapping_persistently_mapped_memory Persistently mapped memory
-
-Kepping your memory persistently mapped is generally OK in Vulkan.
-You don't need to unmap it before using its data on the GPU.
-The library provides a special feature designed for that:
-Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
-VmaAllocationCreateInfo::flags stay mapped all the time,
-so you can just access CPU pointer to it any time
-without a need to call any "map" or "unmap" function.
-Example:
-
-\code
-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-bufCreateInfo.size = sizeof(ConstantBuffer);
-bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
-VkBuffer buf;
-VmaAllocation alloc;
-VmaAllocationInfo allocInfo;
-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-
-// Buffer is already mapped. You can access its memory.
-memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
-\endcode
-
-There are some exceptions though, when you should consider mapping memory only for a short period of time:
-
-- When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2),
- device is discrete AMD GPU,
- and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory
- (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU),
- then whenever a memory block allocated from this memory type stays mapped
- for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this
- block is migrated by WDDM to system RAM, which degrades performance. It doesn't
- matter if that particular memory block is actually used by the command buffer
- being submitted.
-- On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175)
- which requires unmapping before GPU can see updated texture.
-- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools.
-
-\section memory_mapping_cache_control Cache flush and invalidate
-
-Memory in Vulkan doesn't need to be unmapped before using it on GPU,
-but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
-you need to manually **invalidate** cache before reading of mapped pointer
-and **flush** cache after writing to mapped pointer.
-Map/unmap operations don't do that automatically.
-Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
-`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
-functions that refer to given allocation object: vmaFlushAllocation(),
-vmaInvalidateAllocation().
-
-Regions of memory specified for flush/invalidate must be aligned to
-`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
-In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
-within blocks are aligned to this value, so their offsets are always multiply of
-`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
-
-Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`.
-
-Also, Windows drivers from all 3 **PC** GPU vendors (AMD, Intel, NVIDIA)
-currently provide `HOST_COHERENT` flag on all memory types that are
-`HOST_VISIBLE`, so on this platform you may not need to bother.
-
-\section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable
-
-It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping)
-despite it wasn't explicitly requested.
-For example, application may work on integrated graphics with unified memory (like Intel) or
-allocation from video memory might have failed, so the library chose system memory as fallback.
-
-You can detect this case and map such allocation to access its memory on CPU directly,
-instead of launching a transfer operation.
-In order to do that: inspect `allocInfo.memoryType`, call vmaGetMemoryTypeProperties(),
-and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag in properties of that memory type.
-
-\code
-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-bufCreateInfo.size = sizeof(ConstantBuffer);
-bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
-VkBuffer buf;
-VmaAllocation alloc;
-VmaAllocationInfo allocInfo;
-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-
-VkMemoryPropertyFlags memFlags;
-vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
-if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-{
- // Allocation ended up in mappable memory. You can map it and access it directly.
- void* mappedData;
- vmaMapMemory(allocator, alloc, &mappedData);
- memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
- vmaUnmapMemory(allocator, alloc);
-}
-else
-{
- // Allocation ended up in non-mappable memory.
- // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer.
-}
-\endcode
-
-You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations
-that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY).
-If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly.
-If not, the flag is just ignored.
-Example:
-
-\code
-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-bufCreateInfo.size = sizeof(ConstantBuffer);
-bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
-VkBuffer buf;
-VmaAllocation alloc;
-VmaAllocationInfo allocInfo;
-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-
-if(allocInfo.pUserData != nullptr)
-{
- // Allocation ended up in mappable memory.
- // It's persistently mapped. You can access it directly.
- memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
-}
-else
-{
- // Allocation ended up in non-mappable memory.
- // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer.
-}
-\endcode
-
-
-\page staying_within_budget Staying within budget
-
-When developing a graphics-intensive game or program, it is important to avoid allocating
-more GPU memory than it's physically available. When the memory is over-committed,
-various bad things can happen, depending on the specific GPU, graphics driver, and
-operating system:
-
-- It may just work without any problems.
-- The application may slow down because some memory blocks are moved to system RAM
- and the GPU has to access them through PCI Express bus.
-- A new allocation may take very long time to complete, even few seconds, and possibly
- freeze entire system.
-- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
- returned somewhere later.
-
-\section staying_within_budget_querying_for_budget Querying for budget
-
-To query for current memory usage and available budget, use function vmaGetBudget().
-Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
-
-Please note that this function returns different information and works faster than
-vmaCalculateStats(). vmaGetBudget() can be called every frame or even before every
-allocation, while vmaCalculateStats() is intended to be used rarely,
-only to obtain statistical information, e.g. for debugging purposes.
-
-It is recommended to use VK_EXT_memory_budget device extension to obtain information
-about the budget from Vulkan device. VMA is able to use this extension automatically.
-When not enabled, the allocator behaves same way, but then it estimates current usage
-and available budget based on its internal information and Vulkan memory heap sizes,
-which may be less precise. In order to use this extension:
-
-1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
- required by it are available and enable them. Please note that the first is a device
- extension and the second is instance extension!
-2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
-3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
- Vulkan inside of it to avoid overhead of querying it with every allocation.
-
-\section staying_within_budget_controlling_memory_usage Controlling memory usage
-
-There are many ways in which you can try to stay within the budget.
-
-First, when making new allocation requires allocating a new memory block, the library
-tries not to exceed the budget automatically. If a block with default recommended size
-(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
-dedicated memory for just this resource.
-
-If the size of the requested resource plus current memory usage is more than the
-budget, by default the library still tries to create it, leaving it to the Vulkan
-implementation whether the allocation succeeds or fails. You can change this behavior
-by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
-not made if it would exceed the budget or if the budget is already exceeded.
-Some other allocations become lost instead to make room for it, if the mechanism of
-[lost allocations](@ref lost_allocations) is used.
-If that is not possible, the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
-when creating resources that are not essential for the application (e.g. the texture
-of a specific object) and not to pass it when creating critically important resources
-(e.g. render targets).
-
-Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
-a new allocation is created only when it fits inside one of the existing memory blocks.
-If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-This also ensures that the function call is very fast because it never goes to Vulkan
-to obtain a new block.
-
-Please note that creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
-set to more than 0 will try to allocate memory blocks without checking whether they
-fit within budget.
-
-
-\page custom_memory_pools Custom memory pools
-
-A memory pool contains a number of `VkDeviceMemory` blocks.
-The library automatically creates and manages default pool for each memory type available on the device.
-Default memory pool automatically grows in size.
-Size of allocated blocks is also variable and managed automatically.
-
-You can create custom pool and allocate memory out of it.
-It can be useful if you want to:
-
-- Keep certain kind of allocations separate from others.
-- Enforce particular, fixed size of Vulkan memory blocks.
-- Limit maximum amount of Vulkan memory allocated for that pool.
-- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
-
-To use custom memory pools:
-
--# Fill VmaPoolCreateInfo structure.
--# Call vmaCreatePool() to obtain #VmaPool handle.
--# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
- You don't need to specify any other parameters of this structure, like `usage`.
-
-Example:
-
-\code
-// Create a pool that can have at most 2 blocks, 128 MiB each.
-VmaPoolCreateInfo poolCreateInfo = {};
-poolCreateInfo.memoryTypeIndex = ...
-poolCreateInfo.blockSize = 128ull * 1024 * 1024;
-poolCreateInfo.maxBlockCount = 2;
-
-VmaPool pool;
-vmaCreatePool(allocator, &poolCreateInfo, &pool);
-
-// Allocate a buffer out of it.
-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-bufCreateInfo.size = 1024;
-bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.pool = pool;
-
-VkBuffer buf;
-VmaAllocation alloc;
-VmaAllocationInfo allocInfo;
-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-\endcode
-
-You have to free all allocations made from this pool before destroying it.
-
-\code
-vmaDestroyBuffer(allocator, buf, alloc);
-vmaDestroyPool(allocator, pool);
-\endcode
-
-\section custom_memory_pools_MemTypeIndex Choosing memory type index
-
-When creating a pool, you must explicitly specify memory type index.
-To find the one suitable for your buffers or images, you can use helper functions
-vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
-You need to provide structures with example parameters of buffers or images
-that you are going to create in that pool.
-
-\code
-VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-exampleBufCreateInfo.size = 1024; // Whatever.
-exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed.
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed.
-
-uint32_t memTypeIndex;
-vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
-
-VmaPoolCreateInfo poolCreateInfo = {};
-poolCreateInfo.memoryTypeIndex = memTypeIndex;
-// ...
-\endcode
-
-When creating buffers/images allocated in that pool, provide following parameters:
-
-- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
- Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
- Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
- or the other way around.
-- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
- Other members are ignored anyway.
-
-\section linear_algorithm Linear allocation algorithm
-
-Each Vulkan memory block managed by this library has accompanying metadata that
-keeps track of used and unused regions. By default, the metadata structure and
-algorithm tries to find best place for new allocations among free regions to
-optimize memory usage. This way you can allocate and free objects in any order.
-
-
-
-Sometimes there is a need to use simpler, linear allocation algorithm. You can
-create custom pool that uses such algorithm by adding flag
-#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
-#VmaPool object. Then an alternative metadata management is used. It always
-creates new allocations after last one and doesn't reuse free regions after
-allocations freed in the middle. It results in better allocation performance and
-less memory consumed by metadata.
-
-
-
-With this one flag, you can create a custom pool that can be used in many ways:
-free-at-once, stack, double stack, and ring buffer. See below for details.
-
-\subsection linear_algorithm_free_at_once Free-at-once
-
-In a pool that uses linear algorithm, you still need to free all the allocations
-individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
-them in any order. New allocations are always made after last one - free space
-in the middle is not reused. However, when you release all the allocation and
-the pool becomes empty, allocation starts from the beginning again. This way you
-can use linear algorithm to speed up creation of allocations that you are going
-to release all at once.
-
-
-
-This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
-value that allows multiple memory blocks.
-
-\subsection linear_algorithm_stack Stack
-
-When you free an allocation that was created last, its space can be reused.
-Thanks to this, if you always release allocations in the order opposite to their
-creation (LIFO - Last In First Out), you can achieve behavior of a stack.
-
-
-
-This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
-value that allows multiple memory blocks.
-
-\subsection linear_algorithm_double_stack Double stack
-
-The space reserved by a custom pool with linear algorithm may be used by two
-stacks:
-
-- First, default one, growing up from offset 0.
-- Second, "upper" one, growing down from the end towards lower offsets.
-
-To make allocation from upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
-to VmaAllocationCreateInfo::flags.
-
-
-
-Double stack is available only in pools with one memory block -
-VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
-
-When the two stacks' ends meet so there is not enough space between them for a
-new allocation, such allocation fails with usual
-`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
-
-\subsection linear_algorithm_ring_buffer Ring buffer
-
-When you free some allocations from the beginning and there is not enough free space
-for a new one at the end of a pool, allocator's "cursor" wraps around to the
-beginning and starts allocation there. Thanks to this, if you always release
-allocations in the same order as you created them (FIFO - First In First Out),
-you can achieve behavior of a ring buffer / queue.
-
-
-
-Pools with linear algorithm support [lost allocations](@ref lost_allocations) when used as ring buffer.
-If there is not enough free space for a new allocation, but existing allocations
-from the front of the queue can become lost, they become lost and the allocation
-succeeds.
-
-
-
-Ring buffer is available only in pools with one memory block -
-VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
-
-\section buddy_algorithm Buddy allocation algorithm
-
-There is another allocation algorithm that can be used with custom pools, called
-"buddy". Its internal data structure is based on a tree of blocks, each having
-size that is a power of two and a half of its parent's size. When you want to
-allocate memory of certain size, a free node in the tree is located. If it's too
-large, it is recursively split into two halves (called "buddies"). However, if
-requested allocation size is not a power of two, the size of a tree node is
-aligned up to the nearest power of two and the remaining space is wasted. When
-two buddy nodes become free, they are merged back into one larger node.
-
-
-
-The advantage of buddy allocation algorithm over default algorithm is faster
-allocation and deallocation, as well as smaller external fragmentation. The
-disadvantage is more wasted space (internal fragmentation).
-
-For more information, please read ["Buddy memory allocation" on Wikipedia](https://en.wikipedia.org/wiki/Buddy_memory_allocation)
-or other sources that describe this concept in general.
-
-To use buddy allocation algorithm with a custom pool, add flag
-#VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
-#VmaPool object.
-
-Several limitations apply to pools that use buddy algorithm:
-
-- It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two.
- Otherwise, only largest power of two smaller than the size is used for
- allocations. The remaining space always stays unused.
-- [Margins](@ref debugging_memory_usage_margins) and
- [corruption detection](@ref debugging_memory_usage_corruption_detection)
- don't work in such pools.
-- [Lost allocations](@ref lost_allocations) don't work in such pools. You can
- use them, but they never become lost. Support may be added in the future.
-- [Defragmentation](@ref defragmentation) doesn't work with allocations made from
- such pool.
-
-\page defragmentation Defragmentation
-
-Interleaved allocations and deallocations of many objects of varying size can
-cause fragmentation over time, which can lead to a situation where the library is unable
-to find a continuous range of free memory for a new allocation despite there is
-enough free space, just scattered across many small free ranges between existing
-allocations.
-
-To mitigate this problem, you can use defragmentation feature:
-structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd().
-Given set of allocations,
-this function can move them to compact used memory, ensure more continuous free
-space and possibly also free some `VkDeviceMemory` blocks.
-
-What the defragmentation does is:
-
-- Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset.
- After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or
- VmaAllocationInfo::offset changes. You must query them again using
- vmaGetAllocationInfo() if you need them.
-- Moves actual data in memory.
-
-What it doesn't do, so you need to do it yourself:
-
-- Recreate buffers and images that were bound to allocations that were defragmented and
- bind them with their new places in memory.
- You must use `vkDestroyBuffer()`, `vkDestroyImage()`,
- `vkCreateBuffer()`, `vkCreateImage()`, vmaBindBufferMemory(), vmaBindImageMemory()
- for that purpose and NOT vmaDestroyBuffer(),
- vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to
- destroy or create allocation objects!
-- Recreate views and update descriptors that point to these buffers and images.
-
-\section defragmentation_cpu Defragmenting CPU memory
-
-Following example demonstrates how you can run defragmentation on CPU.
-Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented.
-Others are ignored.
-
-The way it works is:
-
-- It temporarily maps entire memory blocks when necessary.
-- It moves data using `memmove()` function.
-
-\code
-// Given following variables already initialized:
-VkDevice device;
-VmaAllocator allocator;
-std::vector buffers;
-std::vector allocations;
-
-
-const uint32_t allocCount = (uint32_t)allocations.size();
-std::vector allocationsChanged(allocCount);
-
-VmaDefragmentationInfo2 defragInfo = {};
-defragInfo.allocationCount = allocCount;
-defragInfo.pAllocations = allocations.data();
-defragInfo.pAllocationsChanged = allocationsChanged.data();
-defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit.
-defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit.
-
-VmaDefragmentationContext defragCtx;
-vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
-vmaDefragmentationEnd(allocator, defragCtx);
-
-for(uint32_t i = 0; i < allocCount; ++i)
-{
- if(allocationsChanged[i])
- {
- // Destroy buffer that is immutably bound to memory region which is no longer valid.
- vkDestroyBuffer(device, buffers[i], nullptr);
-
- // Create new buffer with same parameters.
- VkBufferCreateInfo bufferInfo = ...;
- vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
-
- // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
-
- // Bind new buffer to new memory region. Data contained in it is already moved.
- VmaAllocationInfo allocInfo;
- vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
- vmaBindBufferMemory(allocator, allocations[i], buffers[i]);
- }
-}
-\endcode
-
-Setting VmaDefragmentationInfo2::pAllocationsChanged is optional.
-This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index
-has been modified during defragmentation.
-You can pass null, but you then need to query every allocation passed to defragmentation
-for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it.
-
-If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools),
-you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools
-instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations
-to defragment all allocations in given pools.
-You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case.
-You can also combine both methods.
-
-\section defragmentation_gpu Defragmenting GPU memory
-
-It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`.
-To do that, you need to pass a command buffer that meets requirements as described in
-VmaDefragmentationInfo2::commandBuffer. The way it works is:
-
-- It creates temporary buffers and binds them to entire memory blocks when necessary.
-- It issues `vkCmdCopyBuffer()` to passed command buffer.
-
-Example:
-
-\code
-// Given following variables already initialized:
-VkDevice device;
-VmaAllocator allocator;
-VkCommandBuffer commandBuffer;
-std::vector buffers;
-std::vector allocations;
-
-
-const uint32_t allocCount = (uint32_t)allocations.size();
-std::vector allocationsChanged(allocCount);
-
-VkCommandBufferBeginInfo cmdBufBeginInfo = ...;
-vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo);
-
-VmaDefragmentationInfo2 defragInfo = {};
-defragInfo.allocationCount = allocCount;
-defragInfo.pAllocations = allocations.data();
-defragInfo.pAllocationsChanged = allocationsChanged.data();
-defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time.
-defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time.
-defragInfo.commandBuffer = commandBuffer;
-
-VmaDefragmentationContext defragCtx;
-vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
-
-vkEndCommandBuffer(commandBuffer);
-
-// Submit commandBuffer.
-// Wait for a fence that ensures commandBuffer execution finished.
-
-vmaDefragmentationEnd(allocator, defragCtx);
-
-for(uint32_t i = 0; i < allocCount; ++i)
-{
- if(allocationsChanged[i])
- {
- // Destroy buffer that is immutably bound to memory region which is no longer valid.
- vkDestroyBuffer(device, buffers[i], nullptr);
-
- // Create new buffer with same parameters.
- VkBufferCreateInfo bufferInfo = ...;
- vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
-
- // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
-
- // Bind new buffer to new memory region. Data contained in it is already moved.
- VmaAllocationInfo allocInfo;
- vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
- vmaBindBufferMemory(allocator, allocations[i], buffers[i]);
- }
-}
-\endcode
-
-You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters.
-The library automatically chooses best method to defragment each memory pool.
-
-You may try not to block your entire program to wait until defragmentation finishes,
-but do it in the background, as long as you carefully fullfill requirements described
-in function vmaDefragmentationBegin().
-
-\section defragmentation_additional_notes Additional notes
-
-It is only legal to defragment allocations bound to:
-
-- buffers
-- images created with `VK_IMAGE_CREATE_ALIAS_BIT`, `VK_IMAGE_TILING_LINEAR`, and
- being currently in `VK_IMAGE_LAYOUT_GENERAL` or `VK_IMAGE_LAYOUT_PREINITIALIZED`.
-
-Defragmentation of images created with `VK_IMAGE_TILING_OPTIMAL` or in any other
-layout may give undefined results.
-
-If you defragment allocations bound to images, new images to be bound to new
-memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED`
-and then transitioned to their original layout from before defragmentation if
-needed using an image memory barrier.
-
-While using defragmentation, you may experience validation layer warnings, which you just need to ignore.
-See [Validation layer warnings](@ref general_considerations_validation_layer_warnings).
-
-Please don't expect memory to be fully compacted after defragmentation.
-Algorithms inside are based on some heuristics that try to maximize number of Vulkan
-memory blocks to make totally empty to release them, as well as to maximimze continuous
-empty space inside remaining blocks, while minimizing the number and size of allocations that
-need to be moved. Some fragmentation may still remain - this is normal.
-
-\section defragmentation_custom_algorithm Writing custom defragmentation algorithm
-
-If you want to implement your own, custom defragmentation algorithm,
-there is infrastructure prepared for that,
-but it is not exposed through the library API - you need to hack its source code.
-Here are steps needed to do this:
-
--# Main thing you need to do is to define your own class derived from base abstract
- class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods.
- See definition and comments of this class for details.
--# Your code needs to interact with device memory block metadata.
- If you need more access to its data than it's provided by its public interface,
- declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`.
--# If you want to create a flag that would enable your algorithm or pass some additional
- flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in
- VmaDefragmentationInfo2::flags.
--# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object
- of your new class whenever needed.
-
-
-\page lost_allocations Lost allocations
-
-If your game oversubscribes video memory, if may work OK in previous-generation
-graphics APIs (DirectX 9, 10, 11, OpenGL) because resources are automatically
-paged to system RAM. In Vulkan you can't do it because when you run out of
-memory, an allocation just fails. If you have more data (e.g. textures) that can
-fit into VRAM and you don't need it all at once, you may want to upload them to
-GPU on demand and "push out" ones that are not used for a long time to make room
-for the new ones, effectively using VRAM (or a cartain memory pool) as a form of
-cache. Vulkan Memory Allocator can help you with that by supporting a concept of
-"lost allocations".
-
-To create an allocation that can become lost, include #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
-flag in VmaAllocationCreateInfo::flags. Before using a buffer or image bound to
-such allocation in every new frame, you need to query it if it's not lost.
-To check it, call vmaTouchAllocation().
-If the allocation is lost, you should not use it or buffer/image bound to it.
-You mustn't forget to destroy this allocation and this buffer/image.
-vmaGetAllocationInfo() can also be used for checking status of the allocation.
-Allocation is lost when returned VmaAllocationInfo::deviceMemory == `VK_NULL_HANDLE`.
-
-To create an allocation that can make some other allocations lost to make room
-for it, use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag. You will
-usually use both flags #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT and
-#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT at the same time.
-
-Warning! Current implementation uses quite naive, brute force algorithm,
-which can make allocation calls that use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
-flag quite slow. A new, more optimal algorithm and data structure to speed this
-up is planned for the future.
-
-Q: When interleaving creation of new allocations with usage of existing ones,
-how do you make sure that an allocation won't become lost while it's used in the
-current frame?
-
-It is ensured because vmaTouchAllocation() / vmaGetAllocationInfo() not only returns allocation
-status/parameters and checks whether it's not lost, but when it's not, it also
-atomically marks it as used in the current frame, which makes it impossible to
-become lost in that frame. It uses lockless algorithm, so it works fast and
-doesn't involve locking any internal mutex.
-
-Q: What if my allocation may still be in use by the GPU when it's rendering a
-previous frame while I already submit new frame on the CPU?
-
-You can make sure that allocations "touched" by vmaTouchAllocation() / vmaGetAllocationInfo() will not
-become lost for a number of additional frames back from the current one by
-specifying this number as VmaAllocatorCreateInfo::frameInUseCount (for default
-memory pool) and VmaPoolCreateInfo::frameInUseCount (for custom pool).
-
-Q: How do you inform the library when new frame starts?
-
-You need to call function vmaSetCurrentFrameIndex().
-
-Example code:
-
-\code
-struct MyBuffer
-{
- VkBuffer m_Buf = nullptr;
- VmaAllocation m_Alloc = nullptr;
-
- // Called when the buffer is really needed in the current frame.
- void EnsureBuffer();
-};
-
-void MyBuffer::EnsureBuffer()
-{
- // Buffer has been created.
- if(m_Buf != VK_NULL_HANDLE)
- {
- // Check if its allocation is not lost + mark it as used in current frame.
- if(vmaTouchAllocation(allocator, m_Alloc))
- {
- // It's all OK - safe to use m_Buf.
- return;
- }
- }
-
- // Buffer not yet exists or lost - destroy and recreate it.
-
- vmaDestroyBuffer(allocator, m_Buf, m_Alloc);
-
- VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
- bufCreateInfo.size = 1024;
- bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
- VmaAllocationCreateInfo allocCreateInfo = {};
- allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
- allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
- VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
-
- vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr);
-}
-\endcode
-
-When using lost allocations, you may see some Vulkan validation layer warnings
-about overlapping regions of memory bound to different kinds of buffers and
-images. This is still valid as long as you implement proper handling of lost
-allocations (like in the example above) and don't use them.
-
-You can create an allocation that is already in lost state from the beginning using function
-vmaCreateLostAllocation(). It may be useful if you need a "dummy" allocation that is not null.
-
-You can call function vmaMakePoolAllocationsLost() to set all eligible allocations
-in a specified custom pool to lost state.
-Allocations that have been "touched" in current frame or VmaPoolCreateInfo::frameInUseCount frames back
-cannot become lost.
-
-Q: Can I touch allocation that cannot become lost?
-
-Yes, although it has no visible effect.
-Calls to vmaGetAllocationInfo() and vmaTouchAllocation() update last use frame index
-also for allocations that cannot become lost, but the only way to observe it is to dump
-internal allocator state using vmaBuildStatsString().
-You can use this feature for debugging purposes to explicitly mark allocations that you use
-in current frame and then analyze JSON dump to see for how long each allocation stays unused.
-
-
-\page statistics Statistics
-
-This library contains functions that return information about its internal state,
-especially the amount of memory allocated from Vulkan.
-Please keep in mind that these functions need to traverse all internal data structures
-to gather these information, so they may be quite time-consuming.
-Don't call them too often.
-
-\section statistics_numeric_statistics Numeric statistics
-
-You can query for overall statistics of the allocator using function vmaCalculateStats().
-Information are returned using structure #VmaStats.
-It contains #VmaStatInfo - number of allocated blocks, number of allocations
-(occupied ranges in these blocks), number of unused (free) ranges in these blocks,
-number of bytes used and unused (but still allocated from Vulkan) and other information.
-They are summed across memory heaps, memory types and total for whole allocator.
-
-You can query for statistics of a custom pool using function vmaGetPoolStats().
-Information are returned using structure #VmaPoolStats.
-
-You can query for information about specific allocation using function vmaGetAllocationInfo().
-It fill structure #VmaAllocationInfo.
-
-\section statistics_json_dump JSON dump
-
-You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
-The result is guaranteed to be correct JSON.
-It uses ANSI encoding.
-Any strings provided by user (see [Allocation names](@ref allocation_names))
-are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
-this JSON string can be treated as using this encoding.
-It must be freed using function vmaFreeStatsString().
-
-The format of this JSON string is not part of official documentation of the library,
-but it will not change in backward-incompatible way without increasing library major version number
-and appropriate mention in changelog.
-
-The JSON string contains all the data that can be obtained using vmaCalculateStats().
-It can also contain detailed map of allocated memory blocks and their regions -
-free and occupied by allocations.
-This allows e.g. to visualize the memory or assess fragmentation.
-
-
-\page allocation_annotation Allocation names and user data
-
-\section allocation_user_data Allocation user data
-
-You can annotate allocations with your own information, e.g. for debugging purposes.
-To do that, fill VmaAllocationCreateInfo::pUserData field when creating
-an allocation. It's an opaque `void*` pointer. You can use it e.g. as a pointer,
-some handle, index, key, ordinal number or any other value that would associate
-the allocation with your custom metadata.
-
-\code
-VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-// Fill bufferInfo...
-
-MyBufferMetadata* pMetadata = CreateBufferMetadata();
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-allocCreateInfo.pUserData = pMetadata;
-
-VkBuffer buffer;
-VmaAllocation allocation;
-vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
-\endcode
-
-The pointer may be later retrieved as VmaAllocationInfo::pUserData:
-
-\code
-VmaAllocationInfo allocInfo;
-vmaGetAllocationInfo(allocator, allocation, &allocInfo);
-MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
-\endcode
-
-It can also be changed using function vmaSetAllocationUserData().
-
-Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
-vmaBuildStatsString(), in hexadecimal form.
-
-\section allocation_names Allocation names
-
-There is alternative mode available where `pUserData` pointer is used to point to
-a null-terminated string, giving a name to the allocation. To use this mode,
-set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags.
-Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to
-vmaSetAllocationUserData() must be either null or pointer to a null-terminated string.
-The library creates internal copy of the string, so the pointer you pass doesn't need
-to be valid for whole lifetime of the allocation. You can free it after the call.
-
-\code
-VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
-// Fill imageInfo...
-
-std::string imageName = "Texture: ";
-imageName += fileName;
-
-VmaAllocationCreateInfo allocCreateInfo = {};
-allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
-allocCreateInfo.pUserData = imageName.c_str();
-
-VkImage image;
-VmaAllocation allocation;
-vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr);
-\endcode
-
-The value of `pUserData` pointer of the allocation will be different than the one
-you passed when setting allocation's name - pointing to a buffer managed
-internally that holds copy of the string.
-
-\code
-VmaAllocationInfo allocInfo;
-vmaGetAllocationInfo(allocator, allocation, &allocInfo);
-const char* imageName = (const char*)allocInfo.pUserData;
-printf("Image name: %s\n", imageName);
-\endcode
-
-That string is also printed in JSON report created by vmaBuildStatsString().
-
-\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
-You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
-
-
-\page debugging_memory_usage Debugging incorrect memory usage
-
-If you suspect a bug with memory usage, like usage of uninitialized memory or
-memory being overwritten out of bounds of an allocation,
-you can use debug features of this library to verify this.
-
-\section debugging_memory_usage_initialization Memory initialization
-
-If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
-you can enable automatic memory initialization to verify this.
-To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
-
-\code
-#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
-#include "vk_mem_alloc.h"
-\endcode
-
-It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`.
-Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
-Memory is automatically mapped and unmapped if necessary.
-
-If you find these values while debugging your program, good chances are that you incorrectly
-read Vulkan memory that is allocated but not initialized, or already freed, respectively.
-
-Memory initialization works only with memory types that are `HOST_VISIBLE`.
-It works also with dedicated allocations.
-It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
-as they cannot be mapped.
-
-\section debugging_memory_usage_margins Margins
-
-By default, allocations are laid out in memory blocks next to each other if possible
-(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
-
-
-
-Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
-number of bytes as a margin before and after every allocation.
-
-\code
-#define VMA_DEBUG_MARGIN 16
-#include "vk_mem_alloc.h"
-\endcode
-
-
-
-If your bug goes away after enabling margins, it means it may be caused by memory
-being overwritten outside of allocation boundaries. It is not 100% certain though.
-Change in application behavior may also be caused by different order and distribution
-of allocations across memory blocks after margins are applied.
-
-The margin is applied also before first and after last allocation in a block.
-It may occur only once between two adjacent allocations.
-
-Margins work with all types of memory.
-
-Margin is applied only to allocations made out of memory blocks and not to dedicated
-allocations, which have their own memory block of specific size.
-It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
-or those automatically decided to put into dedicated allocations, e.g. due to its
-large size or recommended by VK_KHR_dedicated_allocation extension.
-Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag.
-
-Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
-
-Note that enabling margins increases memory usage and fragmentation.
-
-\section debugging_memory_usage_corruption_detection Corruption detection
-
-You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
-of contents of the margins.
-
-\code
-#define VMA_DEBUG_MARGIN 16
-#define VMA_DEBUG_DETECT_CORRUPTION 1
-#include "vk_mem_alloc.h"
-\endcode
-
-When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
-(it must be multiply of 4) before and after every allocation is filled with a magic number.
-This idea is also know as "canary".
-Memory is automatically mapped and unmapped if necessary.
-
-This number is validated automatically when the allocation is destroyed.
-If it's not equal to the expected value, `VMA_ASSERT()` is executed.
-It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
-which indicates a serious bug.
-
-You can also explicitly request checking margins of all allocations in all memory blocks
-that belong to specified memory types by using function vmaCheckCorruption(),
-or in memory blocks that belong to specified custom pool, by using function
-vmaCheckPoolCorruption().
-
-Margin validation (corruption detection) works only for memory types that are
-`HOST_VISIBLE` and `HOST_COHERENT`.
-
-
-\page record_and_replay Record and replay
-
-\section record_and_replay_introduction Introduction
-
-While using the library, sequence of calls to its functions together with their
-parameters can be recorded to a file and later replayed using standalone player
-application. It can be useful to:
-
-- Test correctness - check if same sequence of calls will not cause crash or
- failures on a target platform.
-- Gather statistics - see number of allocations, peak memory usage, number of
- calls etc.
-- Benchmark performance - see how much time it takes to replay the whole
- sequence.
-
-\section record_and_replay_usage Usage
-
-Recording functionality is disabled by default.
-To enable it, define following macro before every include of this library:
-
-\code
-#define VMA_RECORDING_ENABLED 1
-\endcode
-
-To record sequence of calls to a file: Fill in
-VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator
-object. File is opened and written during whole lifetime of the allocator.
-
-To replay file: Use VmaReplay - standalone command-line program.
-Precompiled binary can be found in "bin" directory.
-Its source can be found in "src/VmaReplay" directory.
-Its project is generated by Premake.
-Command line syntax is printed when the program is launched without parameters.
-Basic usage:
-
- VmaReplay.exe MyRecording.csv
-
-Documentation of file format can be found in file: "docs/Recording file format.md".
-It's a human-readable, text file in CSV format (Comma Separated Values).
-
-\section record_and_replay_additional_considerations Additional considerations
-
-- Replaying file that was recorded on a different GPU (with different parameters
- like `bufferImageGranularity`, `nonCoherentAtomSize`, and especially different
- set of memory heaps and types) may give different performance and memory usage
- results, as well as issue some warnings and errors.
-- Current implementation of recording in VMA, as well as VmaReplay application, is
- coded and tested only on Windows. Inclusion of recording code is driven by
- `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to
- add. Contributions are welcomed.
-
-
-\page usage_patterns Recommended usage patterns
-
-See also slides from talk:
-[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
-
-
-\section usage_patterns_common_mistakes Common mistakes
-
-Use of CPU_TO_GPU instead of CPU_ONLY memory
-
-#VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be
-mapped and written by the CPU, as well as read directly by the GPU - like some
-buffers or textures updated every frame (dynamic). If you create a staging copy
-of a resource to be written by CPU and then used as a source of transfer to
-another resource placed in the GPU memory, that staging resource should be
-created with #VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these
-enums carefully for details.
-
-Unnecessary use of custom pools
-
-\ref custom_memory_pools may be useful for special purposes - when you want to
-keep certain type of resources separate e.g. to reserve minimum amount of memory
-for them, limit maximum amount of memory they can occupy, or make some of them
-push out the other through the mechanism of \ref lost_allocations. For most
-resources this is not needed and so it is not recommended to create #VmaPool
-objects and allocations out of them. Allocating from the default pool is sufficient.
-
-\section usage_patterns_simple Simple patterns
-
-\subsection usage_patterns_simple_render_targets Render targets
-
-When:
-Any resources that you frequently write and read on GPU,
-e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
-images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
-
-What to do:
-Create them in video memory that is fastest to access from GPU using
-#VMA_MEMORY_USAGE_GPU_ONLY.
-
-Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension
-and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
-especially if they are large or if you plan to destroy and recreate them e.g. when
-display resolution changes.
-Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
-
-\subsection usage_patterns_simple_immutable_resources Immutable resources
-
-When:
-Any resources that you fill on CPU only once (aka "immutable") or infrequently
-and then read frequently on GPU,
-e.g. textures, vertex and index buffers, constant buffers that don't change often.
-
-What to do:
-Create them in video memory that is fastest to access from GPU using
-#VMA_MEMORY_USAGE_GPU_ONLY.
-
-To initialize content of such resource, create a CPU-side (aka "staging") copy of it
-in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it,
-and submit a transfer from it to the GPU resource.
-You can keep the staging copy if you need it for another upload transfer in the future.
-If you don't, you can destroy it or reuse this buffer for uploading different resource
-after the transfer finishes.
-
-Prefer to create just buffers in system memory rather than images, even for uploading textures.
-Use `vkCmdCopyBufferToImage()`.
-Dont use images with `VK_IMAGE_TILING_LINEAR`.
-
-\subsection usage_patterns_dynamic_resources Dynamic resources
-
-When:
-Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call,
-written on CPU, read on GPU.
-
-What to do:
-Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU.
-You can map it and write to it directly on CPU, as well as read from it on GPU.
-
-This is a more complex situation. Different solutions are possible,
-and the best one depends on specific GPU type, but you can use this simple approach for the start.
-Prefer to write to such resource sequentially (e.g. using `memcpy`).
-Don't perform random access or any reads from it on CPU, as it may be very slow.
-
-\subsection usage_patterns_readback Readback
-
-When:
-Resources that contain data written by GPU that you want to read back on CPU,
-e.g. results of some computations.
-
-What to do:
-Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU.
-You can write to them directly on GPU, as well as map and read them on CPU.
-
-\section usage_patterns_advanced Advanced patterns
-
-\subsection usage_patterns_integrated_graphics Detecting integrated graphics
-
-You can support integrated graphics (like Intel HD Graphics, AMD APU) better
-by detecting it in Vulkan.
-To do it, call `vkGetPhysicalDeviceProperties()`, inspect
-`VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`.
-When you find it, you can assume that memory is unified and all memory types are comparably fast
-to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-
-You can then sum up sizes of all available memory heaps and treat them as useful for
-your GPU resources, instead of only `DEVICE_LOCAL` ones.
-You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them
-directly instead of submitting explicit transfer (see below).
-
-\subsection usage_patterns_direct_vs_transfer Direct access versus transfer
-
-For resources that you frequently write on CPU and read on GPU, many solutions are possible:
-
--# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
- second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit tranfer each time.
--# Create just single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU,
- read it directly on GPU.
--# Create just single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU,
- read it directly on GPU.
-
-Which solution is the most efficient depends on your resource and especially on the GPU.
-It is best to measure it and then make the decision.
-Some general recommendations:
-
-- On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead
- related to using a second copy and making transfer.
-- For small resources (e.g. constant buffers) use (2).
- Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable.
- Even if the resource ends up in system memory, its data may be cached on GPU after first
- fetch over PCIe bus.
-- For larger resources (e.g. textures), decide between (1) and (2).
- You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is
- both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1).
-
-Similarly, for resources that you frequently write on GPU and read on CPU, multiple
-solutions are possible:
-
--# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
- second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time.
--# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU,
- map it and read it on CPU.
-
-You should take some measurements to decide which option is faster in case of your specific
-resource.
-
-If you don't want to specialize your code for specific types of GPUs, you can still make
-an simple optimization for cases when your resource ends up in mappable memory to use it
-directly in this case instead of creating CPU-side staging copy.
-For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable).
-
-
-\page configuration Configuration
-
-Please check "CONFIGURATION SECTION" in the code to find macros that you can define
-before each include of this file or change directly in this file to provide
-your own implementation of basic facilities like assert, `min()` and `max()` functions,
-mutex, atomic etc.
-The library uses its own implementation of containers by default, but you can switch to using
-STL containers instead.
-
-For example, define `VMA_ASSERT(expr)` before including the library to provide
-custom implementation of the assertion, compatible with your project.
-By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
-and empty otherwise.
-
-\section config_Vulkan_functions Pointers to Vulkan functions
-
-The library uses Vulkan functions straight from the `vulkan.h` header by default.
-If you want to provide your own pointers to these functions, e.g. fetched using
-`vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`:
-
--# Define `VMA_STATIC_VULKAN_FUNCTIONS 0`.
--# Provide valid pointers through VmaAllocatorCreateInfo::pVulkanFunctions.
-
-\section custom_memory_allocator Custom host memory allocator
-
-If you use custom allocator for CPU memory rather than default operator `new`
-and `delete` from C++, you can make this library using your allocator as well
-by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
-functions will be passed to Vulkan, as well as used by the library itself to
-make any CPU-side allocations.
-
-\section allocation_callbacks Device memory allocation callbacks
-
-The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
-You can setup callbacks to be informed about these calls, e.g. for the purpose
-of gathering some statistics. To do it, fill optional member
-VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
-
-\section heap_memory_limit Device heap memory limit
-
-When device memory of certain heap runs out of free space, new allocations may
-fail (returning error code) or they may succeed, silently pushing some existing
-memory blocks from GPU VRAM to system RAM (which degrades performance). This
-behavior is implementation-dependant - it depends on GPU vendor and graphics
-driver.
-
-On AMD cards it can be controlled while creating Vulkan device object by using
-VK_AMD_memory_overallocation_behavior extension, if available.
-
-Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
-memory available without switching your graphics card to one that really has
-smaller VRAM, you can use a feature of this library intended for this purpose.
-To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
-
-
-
-\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
-
-VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
-performance on some GPUs. It augments Vulkan API with possibility to query
-driver whether it prefers particular buffer or image to have its own, dedicated
-allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
-to do some internal optimizations.
-
-The extension is supported by this library. It will be used automatically when
-enabled. To enable it:
-
-1 . When creating Vulkan device, check if following 2 device extensions are
-supported (call `vkEnumerateDeviceExtensionProperties()`).
-If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
-
-- VK_KHR_get_memory_requirements2
-- VK_KHR_dedicated_allocation
-
-If you enabled these extensions:
-
-2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
-your #VmaAllocator`to inform the library that you enabled required extensions
-and you want the library to use them.
-
-\code
-allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
-
-vmaCreateAllocator(&allocatorInfo, &allocator);
-\endcode
-
-That's all. The extension will be automatically used whenever you create a
-buffer using vmaCreateBuffer() or image using vmaCreateImage().
-
-When using the extension together with Vulkan Validation Layer, you will receive
-warnings like this:
-
- vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer.
-
-It is OK, you should just ignore it. It happens because you use function
-`vkGetBufferMemoryRequirements2KHR()` instead of standard
-`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
-unaware of it.
-
-To learn more about this extension, see:
-
-- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation)
-- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
-
-
-
-\page general_considerations General considerations
-
-\section general_considerations_thread_safety Thread safety
-
-- The library has no global state, so separate #VmaAllocator objects can be used
- independently.
- There should be no need to create multiple such objects though - one per `VkDevice` is enough.
-- By default, all calls to functions that take #VmaAllocator as first parameter
- are safe to call from multiple threads simultaneously because they are
- synchronized internally when needed.
-- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
- flag, calls to functions that take such #VmaAllocator object must be
- synchronized externally.
-- Access to a #VmaAllocation object must be externally synchronized. For example,
- you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
- threads at the same time if you pass the same #VmaAllocation object to these
- functions.
-
-\section general_considerations_validation_layer_warnings Validation layer warnings
-
-When using this library, you can meet following types of warnings issued by
-Vulkan validation layer. They don't necessarily indicate a bug, so you may need
-to just ignore them.
-
-- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
- - It happens when VK_KHR_dedicated_allocation extension is enabled.
- `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
-- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
- - It happens when you map a buffer or image, because the library maps entire
- `VkDeviceMemory` block, where different types of images and buffers may end
- up together, especially on GPUs with unified memory like Intel.
-- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
- - It happens when you use lost allocations, and a new image or buffer is
- created in place of an existing object that bacame lost.
- - It may happen also when you use [defragmentation](@ref defragmentation).
-
-\section general_considerations_allocation_algorithm Allocation algorithm
-
-The library uses following algorithm for allocation, in order:
-
--# Try to find free range of memory in existing blocks.
--# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
--# If failed, try to create such block with size/2, size/4, size/8.
--# If failed and #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was
- specified, try to find space in existing blocks, possilby making some other
- allocations lost.
--# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
- just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
--# If failed, choose other memory type that meets the requirements specified in
- VmaAllocationCreateInfo and go to point 1.
--# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-
-\section general_considerations_features_not_supported Features not supported
-
-Features deliberately excluded from the scope of this library:
-
-- Data transfer. Uploading (straming) and downloading data of buffers and images
- between CPU and GPU memory and related synchronization is responsibility of the user.
- Defining some "texture" object that would automatically stream its data from a
- staging copy in CPU memory to GPU memory would rather be a feature of another,
- higher-level library implemented on top of VMA.
-- Allocations for imported/exported external memory. They tend to require
- explicit memory type index and dedicated allocation anyway, so they don't
- interact with main features of this library. Such special purpose allocations
- should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`.
-- Recreation of buffers and images. Although the library has functions for
- buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to
- recreate these objects yourself after defragmentation. That's because the big
- structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
- #VmaAllocation object.
-- Handling CPU memory allocation failures. When dynamically creating small C++
- objects in CPU memory (not Vulkan memory), allocation failures are not checked
- and handled gracefully, because that would complicate code significantly and
- is usually not needed in desktop PC applications anyway.
-- Code free of any compiler warnings. Maintaining the library to compile and
- work correctly on so many different platforms is hard enough. Being free of
- any warnings, on any version of any compiler, is simply not feasible.
-- This is a C++ library with C interface.
- Bindings or ports to any other programming languages are welcomed as external projects and
- are not going to be included into this repository.
-
-*/
-
-/*
-Define this macro to 0/1 to disable/enable support for recording functionality,
-available through VmaAllocatorCreateInfo::pRecordSettings.
-*/
-#ifndef VMA_RECORDING_ENABLED
- #define VMA_RECORDING_ENABLED 0
-#endif
-
-#ifndef NOMINMAX
- #define NOMINMAX // For windows.h
-#endif
-
-#ifndef VULKAN_H_
- #include
-#endif
-
-#if VMA_RECORDING_ENABLED
- #include
-#endif
-
-// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
-// where AAA = major, BBB = minor, CCC = patch.
-// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
-#if !defined(VMA_VULKAN_VERSION)
- #if defined(VK_VERSION_1_1)
- #define VMA_VULKAN_VERSION 1001000
- #else
- #define VMA_VULKAN_VERSION 1000000
- #endif
-#endif
-
-#if !defined(VMA_DEDICATED_ALLOCATION)
- #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
- #define VMA_DEDICATED_ALLOCATION 1
- #else
- #define VMA_DEDICATED_ALLOCATION 0
- #endif
-#endif
-
-#if !defined(VMA_BIND_MEMORY2)
- #if VK_KHR_bind_memory2
- #define VMA_BIND_MEMORY2 1
- #else
- #define VMA_BIND_MEMORY2 0
- #endif
-#endif
-
-#if !defined(VMA_MEMORY_BUDGET)
- #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
- #define VMA_MEMORY_BUDGET 1
- #else
- #define VMA_MEMORY_BUDGET 0
- #endif
-#endif
-
-// Define these macros to decorate all public functions with additional code,
-// before and after returned type, appropriately. This may be useful for
-// exporing the functions when compiling VMA as a separate library. Example:
-// #define VMA_CALL_PRE __declspec(dllexport)
-// #define VMA_CALL_POST __cdecl
-#ifndef VMA_CALL_PRE
- #define VMA_CALL_PRE
-#endif
-#ifndef VMA_CALL_POST
- #define VMA_CALL_POST
-#endif
-
-/** \struct VmaAllocator
-\brief Represents main object of this library initialized.
-
-Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
-Call function vmaDestroyAllocator() to destroy it.
-
-It is recommended to create just one object of this type per `VkDevice` object,
-right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
-*/
-VK_DEFINE_HANDLE(VmaAllocator)
-
-/// Callback function called after successful vkAllocateMemory.
-typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
- VmaAllocator allocator,
- uint32_t memoryType,
- VkDeviceMemory memory,
- VkDeviceSize size);
-/// Callback function called before vkFreeMemory.
-typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
- VmaAllocator allocator,
- uint32_t memoryType,
- VkDeviceMemory memory,
- VkDeviceSize size);
-
-/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
-
-Provided for informative purpose, e.g. to gather statistics about number of
-allocations or total amount of memory allocated in Vulkan.
-
-Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
-*/
-typedef struct VmaDeviceMemoryCallbacks {
- /// Optional, can be null.
- PFN_vmaAllocateDeviceMemoryFunction pfnAllocate;
- /// Optional, can be null.
- PFN_vmaFreeDeviceMemoryFunction pfnFree;
-} VmaDeviceMemoryCallbacks;
-
-/// Flags for created #VmaAllocator.
-typedef enum VmaAllocatorCreateFlagBits {
- /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
-
- Using this flag may increase performance because internal mutexes are not used.
- */
- VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
- /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
-
- The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
- When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
-
- Using this extenion will automatically allocate dedicated blocks of memory for
- some buffers and images instead of suballocating place for them out of bigger
- memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
- flag) when it is recommended by the driver. It may improve performance on some
- GPUs.
-
- You may set this flag only if you found out that following device extensions are
- supported, you enabled them while creating Vulkan device passed as
- VmaAllocatorCreateInfo::device, and you want them to be used internally by this
- library:
-
- - VK_KHR_get_memory_requirements2 (device extension)
- - VK_KHR_dedicated_allocation (device extension)
-
- When this flag is set, you can experience following warnings reported by Vulkan
- validation layer. You can ignore them.
-
- > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
- */
- VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
- /**
- Enables usage of VK_KHR_bind_memory2 extension.
-
- The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
- When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
-
- You may set this flag only if you found out that this device extension is supported,
- you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
- and you want it to be used internally by this library.
-
- The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
- which allow to pass a chain of `pNext` structures while binding.
- This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
- */
- VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
- /**
- Enables usage of VK_EXT_memory_budget extension.
-
- You may set this flag only if you found out that this device extension is supported,
- you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
- and you want it to be used internally by this library, along with another instance extension
- VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
-
- The extension provides query for current memory usage and budget, which will probably
- be more accurate than an estimation used by the library otherwise.
- */
- VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
-
- VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-} VmaAllocatorCreateFlagBits;
-typedef VkFlags VmaAllocatorCreateFlags;
-
-/** \brief Pointers to some Vulkan functions - a subset used by the library.
-
-Used in VmaAllocatorCreateInfo::pVulkanFunctions.
-*/
-typedef struct VmaVulkanFunctions {
- PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
- PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
- PFN_vkAllocateMemory vkAllocateMemory;
- PFN_vkFreeMemory vkFreeMemory;
- PFN_vkMapMemory vkMapMemory;
- PFN_vkUnmapMemory vkUnmapMemory;
- PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
- PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
- PFN_vkBindBufferMemory vkBindBufferMemory;
- PFN_vkBindImageMemory vkBindImageMemory;
- PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
- PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
- PFN_vkCreateBuffer vkCreateBuffer;
- PFN_vkDestroyBuffer vkDestroyBuffer;
- PFN_vkCreateImage vkCreateImage;
- PFN_vkDestroyImage vkDestroyImage;
- PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
- PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
- PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
-#endif
-#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
- PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
- PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
-#endif
-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
- PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
-#endif
-} VmaVulkanFunctions;
-
-/// Flags to be used in VmaRecordSettings::flags.
-typedef enum VmaRecordFlagBits {
- /** \brief Enables flush after recording every function call.
-
- Enable it if you expect your application to crash, which may leave recording file truncated.
- It may degrade performance though.
- */
- VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001,
-
- VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-} VmaRecordFlagBits;
-typedef VkFlags VmaRecordFlags;
-
-/// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings.
-typedef struct VmaRecordSettings
-{
- /// Flags for recording. Use #VmaRecordFlagBits enum.
- VmaRecordFlags flags;
- /** \brief Path to the file that should be written by the recording.
-
- Suggested extension: "csv".
- If the file already exists, it will be overwritten.
- It will be opened for the whole time #VmaAllocator object is alive.
- If opening this file fails, creation of the whole allocator object fails.
- */
- const char* pFilePath;
-} VmaRecordSettings;
-
-/// Description of a Allocator to be created.
-typedef struct VmaAllocatorCreateInfo
-{
- /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
- VmaAllocatorCreateFlags flags;
- /// Vulkan physical device.
- /** It must be valid throughout whole lifetime of created allocator. */
- VkPhysicalDevice physicalDevice;
- /// Vulkan device.
- /** It must be valid throughout whole lifetime of created allocator. */
- VkDevice device;
- /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
- /** Set to 0 to use default, which is currently 256 MiB. */
- VkDeviceSize preferredLargeHeapBlockSize;
- /// Custom CPU memory allocation callbacks. Optional.
- /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
- const VkAllocationCallbacks* pAllocationCallbacks;
- /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
- /** Optional, can be null. */
- const VmaDeviceMemoryCallbacks* pDeviceMemoryCallbacks;
- /** \brief Maximum number of additional frames that are in use at the same time as current frame.
-
- This value is used only when you make allocations with
- VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become
- lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount.
-
- For example, if you double-buffer your command buffers, so resources used for
- rendering in previous frame may still be in use by the GPU at the moment you
- allocate resources needed for the current frame, set this value to 1.
-
- If you want to allow any allocations other than used in the current frame to
- become lost, set this value to 0.
- */
- uint32_t frameInUseCount;
- /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
-
- If not NULL, it must be a pointer to an array of
- `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
- maximum number of bytes that can be allocated out of particular Vulkan memory
- heap.
-
- Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
- heap. This is also the default in case of `pHeapSizeLimit` = NULL.
-
- If there is a limit defined for a heap:
-
- - If user tries to allocate more memory from that heap using this allocator,
- the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
- - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
- value of this limit will be reported instead when using vmaGetMemoryProperties().
-
- Warning! Using this feature may not be equivalent to installing a GPU with
- smaller amount of memory, because graphics driver doesn't necessary fail new
- allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
- exceeded. It may return success and just silently migrate some device memory
- blocks to system RAM. This driver behavior can also be controlled using
- VK_AMD_memory_overallocation_behavior extension.
- */
- const VkDeviceSize* pHeapSizeLimit;
- /** \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`.
-
- If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section,
- you can pass null as this member, because the library will fetch pointers to
- Vulkan functions internally in a static way, like:
-
- vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
-
- Fill this member if you want to provide your own pointers to Vulkan functions,
- e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`.
- */
- const VmaVulkanFunctions* pVulkanFunctions;
- /** \brief Parameters for recording of VMA calls. Can be null.
-
- If not null, it enables recording of calls to VMA functions to a file.
- If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro,
- creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`.
- */
- const VmaRecordSettings* pRecordSettings;
- /** \brief Optional handle to Vulkan instance object.
-
- Optional, can be null. Must be set if #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT flas is used
- or if `vulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)`.
- */
- VkInstance instance;
- /** \brief Optional. The highest version of Vulkan that the application is designed to use.
-
- It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
- The patch version number specified is ignored. Only the major and minor versions are considered.
- It must be less or euqal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
- Only versions 1.0 and 1.1 are supported by the current implementation.
- Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
- */
- uint32_t vulkanApiVersion;
-} VmaAllocatorCreateInfo;
-
-/// Creates Allocator object.
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
- const VmaAllocatorCreateInfo* pCreateInfo,
- VmaAllocator* pAllocator);
-
-/// Destroys allocator object.
-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
- VmaAllocator allocator);
-
-/**
-PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
-You can access it here, without fetching it again on your own.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
- VmaAllocator allocator,
- const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
-
-/**
-PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
-You can access it here, without fetching it again on your own.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
- VmaAllocator allocator,
- const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
-
-/**
-\brief Given Memory Type Index, returns Property Flags of this memory type.
-
-This is just a convenience function. Same information can be obtained using
-vmaGetMemoryProperties().
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
- VmaAllocator allocator,
- uint32_t memoryTypeIndex,
- VkMemoryPropertyFlags* pFlags);
-
-/** \brief Sets index of the current frame.
-
-This function must be used if you make allocations with
-#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and
-#VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator
-when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot
-become lost in the current frame.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
- VmaAllocator allocator,
- uint32_t frameIndex);
-
-/** \brief Calculated statistics of memory usage in entire allocator.
-*/
-typedef struct VmaStatInfo
-{
- /// Number of `VkDeviceMemory` Vulkan memory blocks allocated.
- uint32_t blockCount;
- /// Number of #VmaAllocation allocation objects allocated.
- uint32_t allocationCount;
- /// Number of free ranges of memory between allocations.
- uint32_t unusedRangeCount;
- /// Total number of bytes occupied by all allocations.
- VkDeviceSize usedBytes;
- /// Total number of bytes occupied by unused ranges.
- VkDeviceSize unusedBytes;
- VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
- VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
-} VmaStatInfo;
-
-/// General statistics from current state of Allocator.
-typedef struct VmaStats
-{
- VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
- VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
- VmaStatInfo total;
-} VmaStats;
-
-/** \brief Retrieves statistics from current state of the Allocator.
-
-This function is called "calculate" not "get" because it has to traverse all
-internal data structures, so it may be quite slow. For faster but more brief statistics
-suitable to be called every frame or every allocation, use vmaGetBudget().
-
-Note that when using allocator from multiple threads, returned information may immediately
-become outdated.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
- VmaAllocator allocator,
- VmaStats* pStats);
-
-/** \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap.
-*/
-typedef struct VmaBudget
-{
- /** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes.
- */
- VkDeviceSize blockBytes;
-
- /** \brief Sum size of all allocations created in particular heap, in bytes.
-
- Usually less or equal than `blockBytes`.
- Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused -
- available for new allocations or wasted due to fragmentation.
-
- It might be greater than `blockBytes` if there are some allocations in lost state, as they account
- to this value as well.
- */
- VkDeviceSize allocationBytes;
-
- /** \brief Estimated current memory usage of the program, in bytes.
-
- Fetched from system using `VK_EXT_memory_budget` extension if enabled.
-
- It might be different than `blockBytes` (usually higher) due to additional implicit objects
- also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
- `VkDeviceMemory` blocks allocated outside of this library, if any.
- */
- VkDeviceSize usage;
-
- /** \brief Estimated amount of memory available to the program, in bytes.
-
- Fetched from system using `VK_EXT_memory_budget` extension if enabled.
-
- It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
- external to the program, like other programs also consuming system resources.
- Difference `budget - usage` is the amount of additional memory that can probably
- be allocated without problems. Exceeding the budget may result in various problems.
- */
- VkDeviceSize budget;
-} VmaBudget;
-
-/** \brief Retrieves information about current memory budget for all memory heaps.
-
-\param[out] pBudget Must point to array with number of elements at least equal to number of memory heaps in physical device used.
-
-This function is called "get" not "calculate" because it is very fast, suitable to be called
-every frame or every allocation. For more detailed statistics use vmaCalculateStats().
-
-Note that when using allocator from multiple threads, returned information may immediately
-become outdated.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
- VmaAllocator allocator,
- VmaBudget* pBudget);
-
-#ifndef VMA_STATS_STRING_ENABLED
-#define VMA_STATS_STRING_ENABLED 1
-#endif
-
-#if VMA_STATS_STRING_ENABLED
-
-/// Builds and returns statistics as string in JSON format.
-/** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
- VmaAllocator allocator,
- char** ppStatsString,
- VkBool32 detailedMap);
-
-VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
- VmaAllocator allocator,
- char* pStatsString);
-
-#endif // #if VMA_STATS_STRING_ENABLED
-
-/** \struct VmaPool
-\brief Represents custom memory pool
-
-Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
-Call function vmaDestroyPool() to destroy it.
-
-For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
-*/
-VK_DEFINE_HANDLE(VmaPool)
-
-typedef enum VmaMemoryUsage
-{
- /** No intended memory usage specified.
- Use other members of VmaAllocationCreateInfo to specify your requirements.
- */
- VMA_MEMORY_USAGE_UNKNOWN = 0,
- /** Memory will be used on device only, so fast access from the device is preferred.
- It usually means device-local GPU (video) memory.
- No need to be mappable on host.
- It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`.
-
- Usage:
-
- - Resources written and read by device, e.g. images used as attachments.
- - Resources transferred from host once (immutable) or infrequently and read by
- device multiple times, e.g. textures to be sampled, vertex buffers, uniform
- (constant) buffers, and majority of other types of resources used on GPU.
-
- Allocation may still end up in `HOST_VISIBLE` memory on some implementations.
- In such case, you are free to map it.
- You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type.
- */
- VMA_MEMORY_USAGE_GPU_ONLY = 1,
- /** Memory will be mappable on host.
- It usually means CPU (system) memory.
- Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`.
- CPU access is typically uncached. Writes may be write-combined.
- Resources created in this pool may still be accessible to the device, but access to them can be slow.
- It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`.
-
- Usage: Staging copy of resources used as transfer source.
- */
- VMA_MEMORY_USAGE_CPU_ONLY = 2,
- /**
- Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU.
- CPU access is typically uncached. Writes may be write-combined.
-
- Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call.
- */
- VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
- /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached.
- It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`.
-
- Usage:
-
- - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping.
- - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection.
- */
- VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
- /** CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`.
-
- Usage: Staging copy of resources moved from GPU memory to CPU memory as part
- of custom paging/residency mechanism, to be moved back to GPU memory when needed.
- */
- VMA_MEMORY_USAGE_CPU_COPY = 5,
- /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
- Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
-
- Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
-
- Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
- */
- VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
-
- VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
-} VmaMemoryUsage;
-
-/// Flags to be passed as VmaAllocationCreateInfo::flags.
-typedef enum VmaAllocationCreateFlagBits {
- /** \brief Set this flag if the allocation should have its own memory block.
-
- Use it for special, big resources, like fullscreen images used as attachments.
-
- You should not use this flag if VmaAllocationCreateInfo::pool is not null.
- */
- VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
-
- /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
-
- If new allocation cannot be placed in any of the existing blocks, allocation
- fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
-
- You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
- #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
-
- If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */
- VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
- /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
-
- Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
-
- Is it valid to use this flag for allocation made from memory type that is not
- `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
- useful if you need an allocation that is efficient to use on GPU
- (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
- support it (e.g. Intel GPU).
-
- You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT.
- */
- VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
- /** Allocation created with this flag can become lost as a result of another
- allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you
- must check it before use.
-
- To check if allocation is not lost, call vmaGetAllocationInfo() and check if
- VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`.
-
- For details about supporting lost allocations, see Lost Allocations
- chapter of User Guide on Main Page.
-
- You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT.
- */
- VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008,
- /** While creating allocation using this flag, other allocations that were
- created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost.
-
- For details about supporting lost allocations, see Lost Allocations
- chapter of User Guide on Main Page.
- */
- VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010,
- /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
- null-terminated string. Instead of copying pointer value, a local copy of the
- string is made and stored in allocation's `pUserData`. The string is automatically
- freed together with the allocation. It is also used in vmaBuildStatsString().
- */
- VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
- /** Allocation will be created from upper stack in a double stack pool.
-
- This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
- */
- VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
- /** Create both buffer/image and allocation, but don't bind them together.
- It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
- The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
- Otherwise it is ignored.
- */
- VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
- /** Create allocation only if additional device memory required for it, if any, won't exceed
- memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
- */
- VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
-
- /** Allocation strategy that chooses smallest possible free range for the
- allocation.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000,
- /** Allocation strategy that chooses biggest possible free range for the
- allocation.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000,
- /** Allocation strategy that chooses first suitable free range for the
- allocation.
-
- "First" doesn't necessarily means the one with smallest offset in memory,
- but rather the one that is easiest and fastest to find.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000,
-
- /** Allocation strategy that tries to minimize memory usage.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT,
- /** Allocation strategy that tries to minimize allocation time.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
- /** Allocation strategy that tries to minimize memory fragmentation.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT,
-
- /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
- */
- VMA_ALLOCATION_CREATE_STRATEGY_MASK =
- VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT |
- VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT |
- VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
-
- VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-} VmaAllocationCreateFlagBits;
-typedef VkFlags VmaAllocationCreateFlags;
-
-typedef struct VmaAllocationCreateInfo
-{
- /// Use #VmaAllocationCreateFlagBits enum.
- VmaAllocationCreateFlags flags;
- /** \brief Intended usage of memory.
-
- You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
- If `pool` is not null, this member is ignored.
- */
- VmaMemoryUsage usage;
- /** \brief Flags that must be set in a Memory Type chosen for an allocation.
-
- Leave 0 if you specify memory requirements in other way. \n
- If `pool` is not null, this member is ignored.*/
- VkMemoryPropertyFlags requiredFlags;
- /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
-
- Set to 0 if no additional flags are prefered. \n
- If `pool` is not null, this member is ignored. */
- VkMemoryPropertyFlags preferredFlags;
- /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
-
- Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
- it meets other requirements specified by this structure, with no further
- restrictions on memory type index. \n
- If `pool` is not null, this member is ignored.
- */
- uint32_t memoryTypeBits;
- /** \brief Pool that this allocation should be created in.
-
- Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
- `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
- */
- VmaPool pool;
- /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
-
- If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
- null or pointer to a null-terminated string. The string will be then copied to
- internal buffer, so it doesn't need to be valid after allocation call.
- */
- void* pUserData;
-} VmaAllocationCreateInfo;
-
-/**
-\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-
-This algorithm tries to find a memory type that:
-
-- Is allowed by memoryTypeBits.
-- Contains all the flags from pAllocationCreateInfo->requiredFlags.
-- Matches intended usage.
-- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
-
-\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
-from this function or any other allocating function probably means that your
-device doesn't support any memory type with requested features for the specific
-type of resource you want to use it for. Please check parameters of your
-resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
- VmaAllocator allocator,
- uint32_t memoryTypeBits,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- uint32_t* pMemoryTypeIndex);
-
-/**
-\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-
-It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
-It internally creates a temporary, dummy buffer that never has memory bound.
-It is just a convenience function, equivalent to calling:
-
-- `vkCreateBuffer`
-- `vkGetBufferMemoryRequirements`
-- `vmaFindMemoryTypeIndex`
-- `vkDestroyBuffer`
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
- VmaAllocator allocator,
- const VkBufferCreateInfo* pBufferCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- uint32_t* pMemoryTypeIndex);
-
-/**
-\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-
-It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
-It internally creates a temporary, dummy image that never has memory bound.
-It is just a convenience function, equivalent to calling:
-
-- `vkCreateImage`
-- `vkGetImageMemoryRequirements`
-- `vmaFindMemoryTypeIndex`
-- `vkDestroyImage`
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
- VmaAllocator allocator,
- const VkImageCreateInfo* pImageCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- uint32_t* pMemoryTypeIndex);
-
-/// Flags to be passed as VmaPoolCreateInfo::flags.
-typedef enum VmaPoolCreateFlagBits {
- /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
-
- This is an optional optimization flag.
-
- If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
- vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
- knows exact type of your allocations so it can handle Buffer-Image Granularity
- in the optimal way.
-
- If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
- exact type of such allocations is not known, so allocator must be conservative
- in handling Buffer-Image Granularity, which can lead to suboptimal allocation
- (wasted memory). In that case, if you can make sure you always allocate only
- buffers and linear images or only optimal images out of this pool, use this flag
- to make allocator disregard Buffer-Image Granularity and so make allocations
- faster and more optimal.
- */
- VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
-
- /** \brief Enables alternative, linear allocation algorithm in this pool.
-
- Specify this flag to enable linear allocation algorithm, which always creates
- new allocations after last one and doesn't reuse space from allocations freed in
- between. It trades memory consumption for simplified algorithm and data
- structure, which has better performance and uses less memory for metadata.
-
- By using this flag, you can achieve behavior of free-at-once, stack,
- ring buffer, and double stack. For details, see documentation chapter
- \ref linear_algorithm.
-
- When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default).
-
- For more details, see [Linear allocation algorithm](@ref linear_algorithm).
- */
- VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
-
- /** \brief Enables alternative, buddy allocation algorithm in this pool.
-
- It operates on a tree of blocks, each having size that is a power of two and
- a half of its parent's size. Comparing to default algorithm, this one provides
- faster allocation and deallocation and decreased external fragmentation,
- at the expense of more memory wasted (internal fragmentation).
-
- For more details, see [Buddy allocation algorithm](@ref buddy_algorithm).
- */
- VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008,
-
- /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
- */
- VMA_POOL_CREATE_ALGORITHM_MASK =
- VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT |
- VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT,
-
- VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-} VmaPoolCreateFlagBits;
-typedef VkFlags VmaPoolCreateFlags;
-
-/** \brief Describes parameter of created #VmaPool.
-*/
-typedef struct VmaPoolCreateInfo {
- /** \brief Vulkan memory type index to allocate this pool from.
- */
- uint32_t memoryTypeIndex;
- /** \brief Use combination of #VmaPoolCreateFlagBits.
- */
- VmaPoolCreateFlags flags;
- /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
-
- Specify nonzero to set explicit, constant size of memory blocks used by this
- pool.
-
- Leave 0 to use default and let the library manage block sizes automatically.
- Sizes of particular blocks may vary.
- */
- VkDeviceSize blockSize;
- /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
-
- Set to 0 to have no preallocated blocks and allow the pool be completely empty.
- */
- size_t minBlockCount;
- /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
-
- Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
-
- Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
- throughout whole lifetime of this pool.
- */
- size_t maxBlockCount;
- /** \brief Maximum number of additional frames that are in use at the same time as current frame.
-
- This value is used only when you make allocations with
- #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become
- lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount.
-
- For example, if you double-buffer your command buffers, so resources used for
- rendering in previous frame may still be in use by the GPU at the moment you
- allocate resources needed for the current frame, set this value to 1.
-
- If you want to allow any allocations other than used in the current frame to
- become lost, set this value to 0.
- */
- uint32_t frameInUseCount;
-} VmaPoolCreateInfo;
-
-/** \brief Describes parameter of existing #VmaPool.
-*/
-typedef struct VmaPoolStats {
- /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes.
- */
- VkDeviceSize size;
- /** \brief Total number of bytes in the pool not used by any #VmaAllocation.
- */
- VkDeviceSize unusedSize;
- /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost.
- */
- size_t allocationCount;
- /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation.
- */
- size_t unusedRangeCount;
- /** \brief Size of the largest continuous free memory region available for new allocation.
-
- Making a new allocation of that size is not guaranteed to succeed because of
- possible additional margin required to respect alignment and buffer/image
- granularity.
- */
- VkDeviceSize unusedRangeSizeMax;
- /** \brief Number of `VkDeviceMemory` blocks allocated for this pool.
- */
- size_t blockCount;
-} VmaPoolStats;
-
-/** \brief Allocates Vulkan device memory and creates #VmaPool object.
-
-@param allocator Allocator object.
-@param pCreateInfo Parameters of pool to create.
-@param[out] pPool Handle to created pool.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
- VmaAllocator allocator,
- const VmaPoolCreateInfo* pCreateInfo,
- VmaPool* pPool);
-
-/** \brief Destroys #VmaPool object and frees Vulkan device memory.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
- VmaAllocator allocator,
- VmaPool pool);
-
-/** \brief Retrieves statistics of existing #VmaPool object.
-
-@param allocator Allocator object.
-@param pool Pool object.
-@param[out] pPoolStats Statistics of specified pool.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
- VmaAllocator allocator,
- VmaPool pool,
- VmaPoolStats* pPoolStats);
-
-/** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now.
-
-@param allocator Allocator object.
-@param pool Pool.
-@param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
- VmaAllocator allocator,
- VmaPool pool,
- size_t* pLostAllocationCount);
-
-/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
-
-Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
-`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
-`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
-
-Possible return values:
-
-- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
-- `VK_SUCCESS` - corruption detection has been performed and succeeded.
-- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations.
- `VMA_ASSERT` is also fired in that case.
-- Other value: Error returned by Vulkan, e.g. memory mapping failure.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
-
-/** \brief Retrieves name of a custom pool.
-
-After the call `ppName` is either null or points to an internally-owned null-terminated string
-containing name of the pool that was previously set. The pointer becomes invalid when the pool is
-destroyed or its name is changed using vmaSetPoolName().
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
- VmaAllocator allocator,
- VmaPool pool,
- const char** ppName);
-
-/** \brief Sets name of a custom pool.
-
-`pName` can be either null or pointer to a null-terminated string with new name for the pool.
-Function makes internal copy of the string, so it can be changed or freed immediately after this call.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
- VmaAllocator allocator,
- VmaPool pool,
- const char* pName);
-
-/** \struct VmaAllocation
-\brief Represents single memory allocation.
-
-It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
-plus unique offset.
-
-There are multiple ways to create such object.
-You need to fill structure VmaAllocationCreateInfo.
-For more information see [Choosing memory type](@ref choosing_memory_type).
-
-Although the library provides convenience functions that create Vulkan buffer or image,
-allocate memory for it and bind them together,
-binding of the allocation to a buffer or an image is out of scope of the allocation itself.
-Allocation object can exist without buffer/image bound,
-binding can be done manually by the user, and destruction of it can be done
-independently of destruction of the allocation.
-
-The object also remembers its size and some other information.
-To retrieve this information, use function vmaGetAllocationInfo() and inspect
-returned structure VmaAllocationInfo.
-
-Some kinds allocations can be in lost state.
-For more information, see [Lost allocations](@ref lost_allocations).
-*/
-VK_DEFINE_HANDLE(VmaAllocation)
-
-/** \brief Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-*/
-typedef struct VmaAllocationInfo {
- /** \brief Memory type index that this allocation was allocated from.
-
- It never changes.
- */
- uint32_t memoryType;
- /** \brief Handle to Vulkan memory object.
-
- Same memory object can be shared by multiple allocations.
-
- It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
-
- If the allocation is lost, it is equal to `VK_NULL_HANDLE`.
- */
- VkDeviceMemory deviceMemory;
- /** \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
-
- It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
- */
- VkDeviceSize offset;
- /** \brief Size of this allocation, in bytes.
-
- It never changes, unless allocation is lost.
- */
- VkDeviceSize size;
- /** \brief Pointer to the beginning of this allocation as mapped data.
-
- If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
- created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null.
-
- It can change after call to vmaMapMemory(), vmaUnmapMemory().
- It can also change after call to vmaDefragment() if this allocation is passed to the function.
- */
- void* pMappedData;
- /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
-
- It can change after call to vmaSetAllocationUserData() for this allocation.
- */
- void* pUserData;
-} VmaAllocationInfo;
-
-/** \brief General purpose memory allocation.
-
-@param[out] pAllocation Handle to allocated memory.
-@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-
-You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
-
-It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
-vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
- VmaAllocator allocator,
- const VkMemoryRequirements* pVkMemoryRequirements,
- const VmaAllocationCreateInfo* pCreateInfo,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
-
-/** \brief General purpose memory allocation for multiple allocation objects at once.
-
-@param allocator Allocator object.
-@param pVkMemoryRequirements Memory requirements for each allocation.
-@param pCreateInfo Creation parameters for each alloction.
-@param allocationCount Number of allocations to make.
-@param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
-@param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
-
-You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
-
-Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
-It is just a general purpose allocation function able to make multiple allocations at once.
-It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
-
-All allocations are made using same parameters. All of them are created out of the same memory pool and type.
-If any allocation fails, all allocations already made within this function call are also freed, so that when
-returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
- VmaAllocator allocator,
- const VkMemoryRequirements* pVkMemoryRequirements,
- const VmaAllocationCreateInfo* pCreateInfo,
- size_t allocationCount,
- VmaAllocation* pAllocations,
- VmaAllocationInfo* pAllocationInfo);
-
-/**
-@param[out] pAllocation Handle to allocated memory.
-@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-
-You should free the memory using vmaFreeMemory().
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
- VmaAllocator allocator,
- VkBuffer buffer,
- const VmaAllocationCreateInfo* pCreateInfo,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
-
-/// Function similar to vmaAllocateMemoryForBuffer().
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
- VmaAllocator allocator,
- VkImage image,
- const VmaAllocationCreateInfo* pCreateInfo,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
-
-/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
-
-Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
- VmaAllocator allocator,
- VmaAllocation allocation);
-
-/** \brief Frees memory and destroys multiple allocations.
-
-Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
-It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
-vmaAllocateMemoryPages() and other functions.
-It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
-
-Allocations in `pAllocations` array can come from any memory pools and types.
-Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
- VmaAllocator allocator,
- size_t allocationCount,
- VmaAllocation* pAllocations);
-
-/** \brief Deprecated.
-
-In version 2.2.0 it used to try to change allocation's size without moving or reallocating it.
-In current version it returns `VK_SUCCESS` only if `newSize` equals current allocation's size.
-Otherwise returns `VK_ERROR_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize newSize);
-
-/** \brief Returns current information about specified allocation and atomically marks it as used in current frame.
-
-Current paramters of given allocation are returned in `pAllocationInfo`.
-
-This function also atomically "touches" allocation - marks it as used in current frame,
-just like vmaTouchAllocation().
-If the allocation is in lost state, `pAllocationInfo->deviceMemory == VK_NULL_HANDLE`.
-
-Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient,
-you can avoid calling it too often.
-
-- You can retrieve same VmaAllocationInfo structure while creating your resource, from function
- vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
- (e.g. due to defragmentation or allocation becoming lost).
-- If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VmaAllocationInfo* pAllocationInfo);
-
-/** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame.
-
-If the allocation has been created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
-this function returns `VK_TRUE` if it's not in lost state, so it can still be used.
-It then also atomically "touches" the allocation - marks it as used in current frame,
-so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames.
-
-If the allocation is in lost state, the function returns `VK_FALSE`.
-Memory of such allocation, as well as buffer or image bound to it, should not be used.
-Lost allocation and the buffer/image still need to be destroyed.
-
-If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
-this function always returns `VK_TRUE`.
-*/
-VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
- VmaAllocator allocator,
- VmaAllocation allocation);
-
-/** \brief Sets pUserData in given allocation to new value.
-
-If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT,
-pUserData must be either null, or pointer to a null-terminated string. The function
-makes local copy of the string and sets it as allocation's `pUserData`. String
-passed as pUserData doesn't need to be valid for whole lifetime of the allocation -
-you can free it after this call. String previously pointed by allocation's
-pUserData is freed from memory.
-
-If the flag was not used, the value of pointer `pUserData` is just copied to
-allocation's `pUserData`. It is opaque, so you can use it however you want - e.g.
-as a pointer, ordinal number or some handle to you own data.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
- VmaAllocator allocator,
- VmaAllocation allocation,
- void* pUserData);
-
-/** \brief Creates new allocation that is in lost state from the beginning.
-
-It can be useful if you need a dummy, non-null allocation.
-
-You still need to destroy created object using vmaFreeMemory().
-
-Returned allocation is not tied to any specific memory pool or memory type and
-not bound to any image or buffer. It has size = 0. It cannot be turned into
-a real, non-empty allocation.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
- VmaAllocator allocator,
- VmaAllocation* pAllocation);
-
-/** \brief Maps memory represented by given allocation and returns pointer to it.
-
-Maps memory represented by given allocation to make it accessible to CPU code.
-When succeeded, `*ppData` contains pointer to first byte of this memory.
-If the allocation is part of bigger `VkDeviceMemory` block, the pointer is
-correctly offseted to the beginning of region assigned to this particular
-allocation.
-
-Mapping is internally reference-counted and synchronized, so despite raw Vulkan
-function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
-multiple times simultaneously, it is safe to call this function on allocations
-assigned to the same memory block. Actual Vulkan memory will be mapped on first
-mapping and unmapped on last unmapping.
-
-If the function succeeded, you must call vmaUnmapMemory() to unmap the
-allocation when mapping is no longer needed or before freeing the allocation, at
-the latest.
-
-It also safe to call this function multiple times on the same allocation. You
-must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
-
-It is also safe to call this function on allocation created with
-#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
-You must still call vmaUnmapMemory() same number of times as you called
-vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
-"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
-
-This function fails when used on allocation made in memory type that is not
-`HOST_VISIBLE`.
-
-This function always fails when called for allocation that was created with
-#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be
-mapped.
-
-This function doesn't automatically flush or invalidate caches.
-If the allocation is made from a memory types that is not `HOST_COHERENT`,
-you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
- VmaAllocator allocator,
- VmaAllocation allocation,
- void** ppData);
-
-/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-
-For details, see description of vmaMapMemory().
-
-This function doesn't automatically flush or invalidate caches.
-If the allocation is made from a memory types that is not `HOST_COHERENT`,
-you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
- VmaAllocator allocator,
- VmaAllocation allocation);
-
-/** \brief Flushes memory of given allocation.
-
-Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
-It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
-Unmap operation doesn't do that automatically.
-
-- `offset` must be relative to the beginning of allocation.
-- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
-- `offset` and `size` don't have to be aligned.
- They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
-- If `size` is 0, this call is ignored.
-- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
- this call is ignored.
-
-Warning! `offset` and `size` are relative to the contents of given `allocation`.
-If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
-Do not pass allocation's offset as `offset`!!!
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
-
-/** \brief Invalidates memory of given allocation.
-
-Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
-It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
-Map operation doesn't do that automatically.
-
-- `offset` must be relative to the beginning of allocation.
-- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
-- `offset` and `size` don't have to be aligned.
- They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
-- If `size` is 0, this call is ignored.
-- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
- this call is ignored.
-
-Warning! `offset` and `size` are relative to the contents of given `allocation`.
-If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
-Do not pass allocation's offset as `offset`!!!
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
-
-/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
-
-@param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
-
-Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
-`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
-`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
-
-Possible return values:
-
-- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
-- `VK_SUCCESS` - corruption detection has been performed and succeeded.
-- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations.
- `VMA_ASSERT` is also fired in that case.
-- Other value: Error returned by Vulkan, e.g. memory mapping failure.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
-
-/** \struct VmaDefragmentationContext
-\brief Represents Opaque object that represents started defragmentation process.
-
-Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it.
-Call function vmaDefragmentationEnd() to destroy it.
-*/
-VK_DEFINE_HANDLE(VmaDefragmentationContext)
-
-/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
-typedef enum VmaDefragmentationFlagBits {
- VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1,
- VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-} VmaDefragmentationFlagBits;
-typedef VkFlags VmaDefragmentationFlags;
-
-/** \brief Parameters for defragmentation.
-
-To be used with function vmaDefragmentationBegin().
-*/
-typedef struct VmaDefragmentationInfo2 {
- /** \brief Reserved for future use. Should be 0.
- */
- VmaDefragmentationFlags flags;
- /** \brief Number of allocations in `pAllocations` array.
- */
- uint32_t allocationCount;
- /** \brief Pointer to array of allocations that can be defragmented.
-
- The array should have `allocationCount` elements.
- The array should not contain nulls.
- Elements in the array should be unique - same allocation cannot occur twice.
- It is safe to pass allocations that are in the lost state - they are ignored.
- All allocations not present in this array are considered non-moveable during this defragmentation.
- */
- VmaAllocation* pAllocations;
- /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation.
-
- The array should have `allocationCount` elements.
- You can pass null if you are not interested in this information.
- */
- VkBool32* pAllocationsChanged;
- /** \brief Numer of pools in `pPools` array.
- */
- uint32_t poolCount;
- /** \brief Either null or pointer to array of pools to be defragmented.
-
- All the allocations in the specified pools can be moved during defragmentation
- and there is no way to check if they were really moved as in `pAllocationsChanged`,
- so you must query all the allocations in all these pools for new `VkDeviceMemory`
- and offset using vmaGetAllocationInfo() if you might need to recreate buffers
- and images bound to them.
-
- The array should have `poolCount` elements.
- The array should not contain nulls.
- Elements in the array should be unique - same pool cannot occur twice.
-
- Using this array is equivalent to specifying all allocations from the pools in `pAllocations`.
- It might be more efficient.
- */
- VmaPool* pPools;
- /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`.
-
- `VK_WHOLE_SIZE` means no limit.
- */
- VkDeviceSize maxCpuBytesToMove;
- /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`.
-
- `UINT32_MAX` means no limit.
- */
- uint32_t maxCpuAllocationsToMove;
- /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`.
-
- `VK_WHOLE_SIZE` means no limit.
- */
- VkDeviceSize maxGpuBytesToMove;
- /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`.
-
- `UINT32_MAX` means no limit.
- */
- uint32_t maxGpuAllocationsToMove;
- /** \brief Optional. Command buffer where GPU copy commands will be posted.
-
- If not null, it must be a valid command buffer handle that supports Transfer queue type.
- It must be in the recording state and outside of a render pass instance.
- You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd().
-
- Passing null means that only CPU defragmentation will be performed.
- */
- VkCommandBuffer commandBuffer;
-} VmaDefragmentationInfo2;
-
-typedef struct VmaDefragmentationPassMoveInfo {
- VmaAllocation allocation;
- VkDeviceMemory memory;
- VkDeviceSize offset;
-} VmaDefragmentationPassMoveInfo;
-
-/** \brief Parameters for incremental defragmentation steps.
-
-To be used with function vmaBeginDefragmentationPass().
-*/
-typedef struct VmaDefragmentationPassInfo {
- uint32_t moveCount;
- VmaDefragmentationPassMoveInfo* pMoves;
-} VmaDefragmentationPassInfo;
-
-/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
-
-\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
-*/
-typedef struct VmaDefragmentationInfo {
- /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places.
-
- Default is `VK_WHOLE_SIZE`, which means no limit.
- */
- VkDeviceSize maxBytesToMove;
- /** \brief Maximum number of allocations that can be moved to different place.
-
- Default is `UINT32_MAX`, which means no limit.
- */
- uint32_t maxAllocationsToMove;
-} VmaDefragmentationInfo;
-
-/** \brief Statistics returned by function vmaDefragment(). */
-typedef struct VmaDefragmentationStats {
- /// Total number of bytes that have been copied while moving allocations to different places.
- VkDeviceSize bytesMoved;
- /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
- VkDeviceSize bytesFreed;
- /// Number of allocations that have been moved to different places.
- uint32_t allocationsMoved;
- /// Number of empty `VkDeviceMemory` objects that have been released to the system.
- uint32_t deviceMemoryBlocksFreed;
-} VmaDefragmentationStats;
-
-/** \brief Begins defragmentation process.
-
-@param allocator Allocator object.
-@param pInfo Structure filled with parameters of defragmentation.
-@param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information.
-@param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation.
-@return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error.
-
-Use this function instead of old, deprecated vmaDefragment().
-
-Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd():
-
-- You should not use any of allocations passed as `pInfo->pAllocations` or
- any allocations that belong to pools passed as `pInfo->pPools`,
- including calling vmaGetAllocationInfo(), vmaTouchAllocation(), or access
- their data.
-- Some mutexes protecting internal data structures may be locked, so trying to
- make or free any allocations, bind buffers or images, map memory, or launch
- another simultaneous defragmentation in between may cause stall (when done on
- another thread) or deadlock (when done on the same thread), unless you are
- 100% sure that defragmented allocations are in different pools.
-- Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined.
- They become valid after call to vmaDefragmentationEnd().
-- If `pInfo->commandBuffer` is not null, you must submit that command buffer
- and make sure it finished execution before calling vmaDefragmentationEnd().
-
-For more information and important limitations regarding defragmentation, see documentation chapter:
-[Defragmentation](@ref defragmentation).
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
- VmaAllocator allocator,
- const VmaDefragmentationInfo2* pInfo,
- VmaDefragmentationStats* pStats,
- VmaDefragmentationContext *pContext);
-
-/** \brief Ends defragmentation process.
-
-Use this function to finish defragmentation started by vmaDefragmentationBegin().
-It is safe to pass `context == null`. The function then does nothing.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
- VmaAllocator allocator,
- VmaDefragmentationContext context);
-
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
- VmaAllocator allocator,
- VmaDefragmentationContext context,
- VmaDefragmentationPassInfo* pInfo
-);
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
- VmaAllocator allocator,
- VmaDefragmentationContext context
-);
-
-/** \brief Deprecated. Compacts memory by moving allocations.
-
-@param pAllocations Array of allocations that can be moved during this compation.
-@param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays.
-@param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information.
-@param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values.
-@param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information.
-@return `VK_SUCCESS` if completed, negative error code in case of error.
-
-\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
-
-This function works by moving allocations to different places (different
-`VkDeviceMemory` objects and/or different offsets) in order to optimize memory
-usage. Only allocations that are in `pAllocations` array can be moved. All other
-allocations are considered nonmovable in this call. Basic rules:
-
-- Only allocations made in memory types that have
- `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`
- flags can be compacted. You may pass other allocations but it makes no sense -
- these will never be moved.
-- Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or
- #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations
- passed to this function that come from such pools are ignored.
-- Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or
- created as dedicated allocations for any other reason are also ignored.
-- Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT
- flag can be compacted. If not persistently mapped, memory will be mapped
- temporarily inside this function if needed.
-- You must not pass same #VmaAllocation object multiple times in `pAllocations` array.
-
-The function also frees empty `VkDeviceMemory` blocks.
-
-Warning: This function may be time-consuming, so you shouldn't call it too often
-(like after every resource creation/destruction).
-You can call it on special occasions (like when reloading a game level or
-when you just destroyed a lot of objects). Calling it every frame may be OK, but
-you should measure that on your platform.
-
-For more information, see [Defragmentation](@ref defragmentation) chapter.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
- VmaAllocator allocator,
- VmaAllocation* pAllocations,
- size_t allocationCount,
- VkBool32* pAllocationsChanged,
- const VmaDefragmentationInfo *pDefragmentationInfo,
- VmaDefragmentationStats* pDefragmentationStats);
-
-/** \brief Binds buffer to allocation.
-
-Binds specified buffer to region of memory represented by specified allocation.
-Gets `VkDeviceMemory` handle and offset from the allocation.
-If you want to create a buffer, allocate memory for it and bind them together separately,
-you should use this function for binding instead of standard `vkBindBufferMemory()`,
-because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
-allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
-(which is illegal in Vulkan).
-
-It is recommended to use function vmaCreateBuffer() instead of this one.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkBuffer buffer);
-
-/** \brief Binds buffer to allocation with additional parameters.
-
-@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0.
-@param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
-
-This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
-
-If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
-or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize allocationLocalOffset,
- VkBuffer buffer,
- const void* pNext);
-
-/** \brief Binds image to allocation.
-
-Binds specified image to region of memory represented by specified allocation.
-Gets `VkDeviceMemory` handle and offset from the allocation.
-If you want to create an image, allocate memory for it and bind them together separately,
-you should use this function for binding instead of standard `vkBindImageMemory()`,
-because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
-allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
-(which is illegal in Vulkan).
-
-It is recommended to use function vmaCreateImage() instead of this one.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkImage image);
-
-/** \brief Binds image to allocation with additional parameters.
-
-@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0.
-@param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
-
-This function is similar to vmaBindImageMemory(), but it provides additional parameters.
-
-If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
-or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize allocationLocalOffset,
- VkImage image,
- const void* pNext);
-
-/**
-@param[out] pBuffer Buffer that was created.
-@param[out] pAllocation Allocation that was created.
-@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-
-This function automatically:
-
--# Creates buffer.
--# Allocates appropriate memory for it.
--# Binds the buffer with the memory.
-
-If any of these operations fail, buffer and allocation are not created,
-returned value is negative error code, *pBuffer and *pAllocation are null.
-
-If the function succeeded, you must destroy both buffer and allocation when you
-no longer need them using either convenience function vmaDestroyBuffer() or
-separately, using `vkDestroyBuffer()` and vmaFreeMemory().
-
-If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
-VK_KHR_dedicated_allocation extension is used internally to query driver whether
-it requires or prefers the new buffer to have dedicated allocation. If yes,
-and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null
-and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
-allocation for this buffer, just like when using
-VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
- VmaAllocator allocator,
- const VkBufferCreateInfo* pBufferCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- VkBuffer* pBuffer,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
-
-/** \brief Destroys Vulkan buffer and frees allocated memory.
-
-This is just a convenience function equivalent to:
-
-\code
-vkDestroyBuffer(device, buffer, allocationCallbacks);
-vmaFreeMemory(allocator, allocation);
-\endcode
-
-It it safe to pass null as buffer and/or allocation.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
- VmaAllocator allocator,
- VkBuffer buffer,
- VmaAllocation allocation);
-
-/// Function similar to vmaCreateBuffer().
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
- VmaAllocator allocator,
- const VkImageCreateInfo* pImageCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- VkImage* pImage,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
-
-/** \brief Destroys Vulkan image and frees allocated memory.
-
-This is just a convenience function equivalent to:
-
-\code
-vkDestroyImage(device, image, allocationCallbacks);
-vmaFreeMemory(allocator, allocation);
-\endcode
-
-It it safe to pass null as image and/or allocation.
-*/
-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
- VmaAllocator allocator,
- VkImage image,
- VmaAllocation allocation);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
-
-// For Visual Studio IntelliSense.
-#if defined(__cplusplus) && defined(__INTELLISENSE__)
-#define VMA_IMPLEMENTATION
-#endif
-
-#ifdef VMA_IMPLEMENTATION
-#undef VMA_IMPLEMENTATION
-
-#include
-#include
-#include
-
-/*******************************************************************************
-CONFIGURATION SECTION
-
-Define some of these macros before each #include of this header or change them
-here if you need other then default behavior depending on your environment.
-*/
-
-/*
-Define this macro to 1 to make the library fetch pointers to Vulkan functions
-internally, like:
-
- vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
-
-Define to 0 if you are going to provide you own pointers to Vulkan functions via
-VmaAllocatorCreateInfo::pVulkanFunctions.
-*/
-#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
-#define VMA_STATIC_VULKAN_FUNCTIONS 1
-#endif
-
-// Define this macro to 1 to make the library use STL containers instead of its own implementation.
-//#define VMA_USE_STL_CONTAINERS 1
-
-/* Set this macro to 1 to make the library including and using STL containers:
-std::pair, std::vector, std::list, std::unordered_map.
-
-Set it to 0 or undefined to make the library using its own implementation of
-the containers.
-*/
-#if VMA_USE_STL_CONTAINERS
- #define VMA_USE_STL_VECTOR 1
- #define VMA_USE_STL_UNORDERED_MAP 1
- #define VMA_USE_STL_LIST 1
-#endif
-
-#ifndef VMA_USE_STL_SHARED_MUTEX
- // Compiler conforms to C++17.
- #if __cplusplus >= 201703L
- #define VMA_USE_STL_SHARED_MUTEX 1
- // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
- // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
- // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
- #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
- #define VMA_USE_STL_SHARED_MUTEX 1
- #else
- #define VMA_USE_STL_SHARED_MUTEX 0
- #endif
-#endif
-
-/*
-THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
-Library has its own container implementation.
-*/
-#if VMA_USE_STL_VECTOR
- #include
-#endif
-
-#if VMA_USE_STL_UNORDERED_MAP
- #include
-#endif
-
-#if VMA_USE_STL_LIST
- #include
-#endif
-
-/*
-Following headers are used in this CONFIGURATION section only, so feel free to
-remove them if not needed.
-*/
-#include // for assert
-#include // for min, max
-#include
-
-#ifndef VMA_NULL
- // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
- #define VMA_NULL nullptr
-#endif
-
-#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
-#include
-void *aligned_alloc(size_t alignment, size_t size)
-{
- // alignment must be >= sizeof(void*)
- if(alignment < sizeof(void*))
- {
- alignment = sizeof(void*);
- }
-
- return memalign(alignment, size);
-}
-#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
-#include
-void *aligned_alloc(size_t alignment, size_t size)
-{
- // alignment must be >= sizeof(void*)
- if(alignment < sizeof(void*))
- {
- alignment = sizeof(void*);
- }
-
- void *pointer;
- if(posix_memalign(&pointer, alignment, size) == 0)
- return pointer;
- return VMA_NULL;
-}
-#endif
-
-// If your compiler is not compatible with C++11 and definition of
-// aligned_alloc() function is missing, uncommeting following line may help:
-
-//#include
-
-// Normal assert to check for programmer's errors, especially in Debug configuration.
-#ifndef VMA_ASSERT
- #ifdef NDEBUG
- #define VMA_ASSERT(expr)
- #else
- #define VMA_ASSERT(expr) assert(expr)
- #endif
-#endif
-
-// Assert that will be called very often, like inside data structures e.g. operator[].
-// Making it non-empty can make program slow.
-#ifndef VMA_HEAVY_ASSERT
- #ifdef NDEBUG
- #define VMA_HEAVY_ASSERT(expr)
- #else
- #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
- #endif
-#endif
-
-#ifndef VMA_ALIGN_OF
- #define VMA_ALIGN_OF(type) (__alignof(type))
-#endif
-
-#ifndef VMA_SYSTEM_ALIGNED_MALLOC
- #if defined(_WIN32)
- #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
- #else
- #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
- #endif
-#endif
-
-#ifndef VMA_SYSTEM_FREE
- #if defined(_WIN32)
- #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
- #else
- #define VMA_SYSTEM_FREE(ptr) free(ptr)
- #endif
-#endif
-
-#ifndef VMA_MIN
- #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
-#endif
-
-#ifndef VMA_MAX
- #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
-#endif
-
-#ifndef VMA_SWAP
- #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
-#endif
-
-#ifndef VMA_SORT
- #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
-#endif
-
-#ifndef VMA_DEBUG_LOG
- #define VMA_DEBUG_LOG(format, ...)
- /*
- #define VMA_DEBUG_LOG(format, ...) do { \
- printf(format, __VA_ARGS__); \
- printf("\n"); \
- } while(false)
- */
-#endif
-
-// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
-#if VMA_STATS_STRING_ENABLED
- static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
- {
- snprintf(outStr, strLen, "%u", static_cast(num));
- }
- static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
- {
- snprintf(outStr, strLen, "%llu", static_cast(num));
- }
- static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
- {
- snprintf(outStr, strLen, "%p", ptr);
- }
-#endif
-
-#ifndef VMA_MUTEX
- class VmaMutex
- {
- public:
- void Lock() { m_Mutex.lock(); }
- void Unlock() { m_Mutex.unlock(); }
- bool TryLock() { return m_Mutex.try_lock(); }
- private:
- std::mutex m_Mutex;
- };
- #define VMA_MUTEX VmaMutex
-#endif
-
-// Read-write mutex, where "read" is shared access, "write" is exclusive access.
-#ifndef VMA_RW_MUTEX
- #if VMA_USE_STL_SHARED_MUTEX
- // Use std::shared_mutex from C++17.
- #include
- class VmaRWMutex
- {
- public:
- void LockRead() { m_Mutex.lock_shared(); }
- void UnlockRead() { m_Mutex.unlock_shared(); }
- bool TryLockRead() { return m_Mutex.try_lock_shared(); }
- void LockWrite() { m_Mutex.lock(); }
- void UnlockWrite() { m_Mutex.unlock(); }
- bool TryLockWrite() { return m_Mutex.try_lock(); }
- private:
- std::shared_mutex m_Mutex;
- };
- #define VMA_RW_MUTEX VmaRWMutex
- #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
- // Use SRWLOCK from WinAPI.
- // Minimum supported client = Windows Vista, server = Windows Server 2008.
- class VmaRWMutex
- {
- public:
- VmaRWMutex() { InitializeSRWLock(&m_Lock); }
- void LockRead() { AcquireSRWLockShared(&m_Lock); }
- void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
- bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
- void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
- void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
- bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
- private:
- SRWLOCK m_Lock;
- };
- #define VMA_RW_MUTEX VmaRWMutex
- #else
- // Less efficient fallback: Use normal mutex.
- class VmaRWMutex
- {
- public:
- void LockRead() { m_Mutex.Lock(); }
- void UnlockRead() { m_Mutex.Unlock(); }
- bool TryLockRead() { return m_Mutex.TryLock(); }
- void LockWrite() { m_Mutex.Lock(); }
- void UnlockWrite() { m_Mutex.Unlock(); }
- bool TryLockWrite() { return m_Mutex.TryLock(); }
- private:
- VMA_MUTEX m_Mutex;
- };
- #define VMA_RW_MUTEX VmaRWMutex
- #endif // #if VMA_USE_STL_SHARED_MUTEX
-#endif // #ifndef VMA_RW_MUTEX
-
-/*
-If providing your own implementation, you need to implement a subset of std::atomic.
-*/
-#ifndef VMA_ATOMIC_UINT32
- #include
- #define VMA_ATOMIC_UINT32 std::atomic
-#endif
-
-#ifndef VMA_ATOMIC_UINT64
- #include
- #define VMA_ATOMIC_UINT64 std::atomic
-#endif
-
-#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
- /**
- Every allocation will have its own memory block.
- Define to 1 for debugging purposes only.
- */
- #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
-#endif
-
-#ifndef VMA_DEBUG_ALIGNMENT
- /**
- Minimum alignment of all allocations, in bytes.
- Set to more than 1 for debugging purposes only. Must be power of two.
- */
- #define VMA_DEBUG_ALIGNMENT (1)
-#endif
-
-#ifndef VMA_DEBUG_MARGIN
- /**
- Minimum margin before and after every allocation, in bytes.
- Set nonzero for debugging purposes only.
- */
- #define VMA_DEBUG_MARGIN (0)
-#endif
-
-#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
- /**
- Define this macro to 1 to automatically fill new allocations and destroyed
- allocations with some bit pattern.
- */
- #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
-#endif
-
-#ifndef VMA_DEBUG_DETECT_CORRUPTION
- /**
- Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
- enable writing magic value to the margin before and after every allocation and
- validating it, so that memory corruptions (out-of-bounds writes) are detected.
- */
- #define VMA_DEBUG_DETECT_CORRUPTION (0)
-#endif
-
-#ifndef VMA_DEBUG_GLOBAL_MUTEX
- /**
- Set this to 1 for debugging purposes only, to enable single mutex protecting all
- entry calls to the library. Can be useful for debugging multithreading issues.
- */
- #define VMA_DEBUG_GLOBAL_MUTEX (0)
-#endif
-
-#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
- /**
- Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
- Set to more than 1 for debugging purposes only. Must be power of two.
- */
- #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
-#endif
-
-#ifndef VMA_SMALL_HEAP_MAX_SIZE
- /// Maximum size of a memory heap in Vulkan to consider it "small".
- #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
-#endif
-
-#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
- /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
- #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
-#endif
-
-#ifndef VMA_CLASS_NO_COPY
- #define VMA_CLASS_NO_COPY(className) \
- private: \
- className(const className&) = delete; \
- className& operator=(const className&) = delete;
-#endif
-
-static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
-
-// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
-static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
-
-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
-
-/*******************************************************************************
-END OF CONFIGURATION
-*/
-
-static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
-
-static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
- VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
-
-// Returns number of bits set to 1 in (v).
-static inline uint32_t VmaCountBitsSet(uint32_t v)
-{
- uint32_t c = v - ((v >> 1) & 0x55555555);
- c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
- c = ((c >> 4) + c) & 0x0F0F0F0F;
- c = ((c >> 8) + c) & 0x00FF00FF;
- c = ((c >> 16) + c) & 0x0000FFFF;
- return c;
-}
-
-// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
-// Use types like uint32_t, uint64_t as T.
-template
-static inline T VmaAlignUp(T val, T align)
-{
- return (val + align - 1) / align * align;
-}
-// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
-// Use types like uint32_t, uint64_t as T.
-template
-static inline T VmaAlignDown(T val, T align)
-{
- return val / align * align;
-}
-
-// Division with mathematical rounding to nearest number.
-template
-static inline T VmaRoundDiv(T x, T y)
-{
- return (x + (y / (T)2)) / y;
-}
-
-/*
-Returns true if given number is a power of two.
-T must be unsigned integer number or signed integer but always nonnegative.
-For 0 returns true.
-*/
-template
-inline bool VmaIsPow2(T x)
-{
- return (x & (x-1)) == 0;
-}
-
-// Returns smallest power of 2 greater or equal to v.
-static inline uint32_t VmaNextPow2(uint32_t v)
-{
- v--;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v++;
- return v;
-}
-static inline uint64_t VmaNextPow2(uint64_t v)
-{
- v--;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- v++;
- return v;
-}
-
-// Returns largest power of 2 less or equal to v.
-static inline uint32_t VmaPrevPow2(uint32_t v)
-{
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v = v ^ (v >> 1);
- return v;
-}
-static inline uint64_t VmaPrevPow2(uint64_t v)
-{
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- v = v ^ (v >> 1);
- return v;
-}
-
-static inline bool VmaStrIsEmpty(const char* pStr)
-{
- return pStr == VMA_NULL || *pStr == '\0';
-}
-
-#if VMA_STATS_STRING_ENABLED
-
-static const char* VmaAlgorithmToStr(uint32_t algorithm)
-{
- switch(algorithm)
- {
- case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
- return "Linear";
- case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT:
- return "Buddy";
- case 0:
- return "Default";
- default:
- VMA_ASSERT(0);
- return "";
- }
-}
-
-#endif // #if VMA_STATS_STRING_ENABLED
-
-#ifndef VMA_SORT
-
-template
-Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
-{
- Iterator centerValue = end; --centerValue;
- Iterator insertIndex = beg;
- for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
- {
- if(cmp(*memTypeIndex, *centerValue))
- {
- if(insertIndex != memTypeIndex)
- {
- VMA_SWAP(*memTypeIndex, *insertIndex);
- }
- ++insertIndex;
- }
- }
- if(insertIndex != centerValue)
- {
- VMA_SWAP(*insertIndex, *centerValue);
- }
- return insertIndex;
-}
-
-template
-void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
-{
- if(beg < end)
- {
- Iterator it = VmaQuickSortPartition(beg, end, cmp);
- VmaQuickSort(beg, it, cmp);
- VmaQuickSort(it + 1, end, cmp);
- }
-}
-
-#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
-
-#endif // #ifndef VMA_SORT
-
-/*
-Returns true if two memory blocks occupy overlapping pages.
-ResourceA must be in less memory offset than ResourceB.
-
-Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
-chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
-*/
-static inline bool VmaBlocksOnSamePage(
- VkDeviceSize resourceAOffset,
- VkDeviceSize resourceASize,
- VkDeviceSize resourceBOffset,
- VkDeviceSize pageSize)
-{
- VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
- VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
- VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
- VkDeviceSize resourceBStart = resourceBOffset;
- VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
- return resourceAEndPage == resourceBStartPage;
-}
-
-enum VmaSuballocationType
-{
- VMA_SUBALLOCATION_TYPE_FREE = 0,
- VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
- VMA_SUBALLOCATION_TYPE_BUFFER = 2,
- VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
- VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
- VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
- VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
-};
-
-/*
-Returns true if given suballocation types could conflict and must respect
-VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
-or linear image and another one is optimal image. If type is unknown, behave
-conservatively.
-*/
-static inline bool VmaIsBufferImageGranularityConflict(
- VmaSuballocationType suballocType1,
- VmaSuballocationType suballocType2)
-{
- if(suballocType1 > suballocType2)
- {
- VMA_SWAP(suballocType1, suballocType2);
- }
-
- switch(suballocType1)
- {
- case VMA_SUBALLOCATION_TYPE_FREE:
- return false;
- case VMA_SUBALLOCATION_TYPE_UNKNOWN:
- return true;
- case VMA_SUBALLOCATION_TYPE_BUFFER:
- return
- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
- case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
- return
- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
- case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
- return
- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
- case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
- return false;
- default:
- VMA_ASSERT(0);
- return true;
- }
-}
-
-static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
-{
-#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
- uint32_t* pDst = (uint32_t*)((char*)pData + offset);
- const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
- for(size_t i = 0; i < numberCount; ++i, ++pDst)
- {
- *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
- }
-#else
- // no-op
-#endif
-}
-
-static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
-{
-#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
- const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
- const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
- for(size_t i = 0; i < numberCount; ++i, ++pSrc)
- {
- if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
- {
- return false;
- }
- }
-#endif
- return true;
-}
-
-/*
-Fills structure with parameters of an example buffer to be used for transfers
-during GPU memory defragmentation.
-*/
-static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
-{
- memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
- outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
- outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
-}
-
-// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
-struct VmaMutexLock
-{
- VMA_CLASS_NO_COPY(VmaMutexLock)
-public:
- VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
- m_pMutex(useMutex ? &mutex : VMA_NULL)
- { if(m_pMutex) { m_pMutex->Lock(); } }
- ~VmaMutexLock()
- { if(m_pMutex) { m_pMutex->Unlock(); } }
-private:
- VMA_MUTEX* m_pMutex;
-};
-
-// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
-struct VmaMutexLockRead
-{
- VMA_CLASS_NO_COPY(VmaMutexLockRead)
-public:
- VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
- m_pMutex(useMutex ? &mutex : VMA_NULL)
- { if(m_pMutex) { m_pMutex->LockRead(); } }
- ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
-private:
- VMA_RW_MUTEX* m_pMutex;
-};
-
-// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
-struct VmaMutexLockWrite
-{
- VMA_CLASS_NO_COPY(VmaMutexLockWrite)
-public:
- VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
- m_pMutex(useMutex ? &mutex : VMA_NULL)
- { if(m_pMutex) { m_pMutex->LockWrite(); } }
- ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
-private:
- VMA_RW_MUTEX* m_pMutex;
-};
-
-#if VMA_DEBUG_GLOBAL_MUTEX
- static VMA_MUTEX gDebugGlobalMutex;
- #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
-#else
- #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
-#endif
-
-// Minimum size of a free suballocation to register it in the free suballocation collection.
-static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
-
-/*
-Performs binary search and returns iterator to first element that is greater or
-equal to (key), according to comparison (cmp).
-
-Cmp should return true if first argument is less than second argument.
-
-Returned value is the found element, if present in the collection or place where
-new element with value (key) should be inserted.
-*/
-template
-static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
-{
- size_t down = 0, up = (end - beg);
- while(down < up)
- {
- const size_t mid = (down + up) / 2;
- if(cmp(*(beg+mid), key))
- {
- down = mid + 1;
- }
- else
- {
- up = mid;
- }
- }
- return beg + down;
-}
-
-template
-IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
-{
- IterT it = VmaBinaryFindFirstNotLess(
- beg, end, value, cmp);
- if(it == end ||
- (!cmp(*it, value) && !cmp(value, *it)))
- {
- return it;
- }
- return end;
-}
-
-/*
-Returns true if all pointers in the array are not-null and unique.
-Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
-T must be pointer type, e.g. VmaAllocation, VmaPool.
-*/
-template
-static bool VmaValidatePointerArray(uint32_t count, const T* arr)
-{
- for(uint32_t i = 0; i < count; ++i)
- {
- const T iPtr = arr[i];
- if(iPtr == VMA_NULL)
- {
- return false;
- }
- for(uint32_t j = i + 1; j < count; ++j)
- {
- if(iPtr == arr[j])
- {
- return false;
- }
- }
- }
- return true;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Memory allocation
-
-static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
-{
- if((pAllocationCallbacks != VMA_NULL) &&
- (pAllocationCallbacks->pfnAllocation != VMA_NULL))
- {
- return (*pAllocationCallbacks->pfnAllocation)(
- pAllocationCallbacks->pUserData,
- size,
- alignment,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- }
- else
- {
- return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
- }
-}
-
-static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
-{
- if((pAllocationCallbacks != VMA_NULL) &&
- (pAllocationCallbacks->pfnFree != VMA_NULL))
- {
- (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
- }
- else
- {
- VMA_SYSTEM_FREE(ptr);
- }
-}
-
-template
-static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
-{
- return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
-}
-
-template
-static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
-{
- return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
-}
-
-#define vma_new(allocator, type) new(VmaAllocate(allocator))(type)
-
-#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type)
-
-template
-static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
-{
- ptr->~T();
- VmaFree(pAllocationCallbacks, ptr);
-}
-
-template
-static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
-{
- if(ptr != VMA_NULL)
- {
- for(size_t i = count; i--; )
- {
- ptr[i].~T();
- }
- VmaFree(pAllocationCallbacks, ptr);
- }
-}
-
-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
-{
- if(srcStr != VMA_NULL)
- {
- const size_t len = strlen(srcStr);
- char* const result = vma_new_array(allocs, char, len + 1);
- memcpy(result, srcStr, len + 1);
- return result;
- }
- else
- {
- return VMA_NULL;
- }
-}
-
-static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
-{
- if(str != VMA_NULL)
- {
- const size_t len = strlen(str);
- vma_delete_array(allocs, str, len + 1);
- }
-}
-
-// STL-compatible allocator.
-template
-class VmaStlAllocator
-{
-public:
- const VkAllocationCallbacks* const m_pCallbacks;
- typedef T value_type;
-
- VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
- template VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) { }
-
- T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); }
- void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
-
- template
- bool operator==(const VmaStlAllocator& rhs) const
- {
- return m_pCallbacks == rhs.m_pCallbacks;
- }
- template
- bool operator!=(const VmaStlAllocator& rhs) const
- {
- return m_pCallbacks != rhs.m_pCallbacks;
- }
-
- VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
-};
-
-#if VMA_USE_STL_VECTOR
-
-#define VmaVector std::vector
-
-template
-static void VmaVectorInsert(std::vector& vec, size_t index, const T& item)
-{
- vec.insert(vec.begin() + index, item);
-}
-
-template
-static void VmaVectorRemove(std::vector& vec, size_t index)
-{
- vec.erase(vec.begin() + index);
-}
-
-#else // #if VMA_USE_STL_VECTOR
-
-/* Class with interface compatible with subset of std::vector.
-T must be POD because constructors and destructors are not called and memcpy is
-used for these objects. */
-template
-class VmaVector
-{
-public:
- typedef T value_type;
-
- VmaVector(const AllocatorT& allocator) :
- m_Allocator(allocator),
- m_pArray(VMA_NULL),
- m_Count(0),
- m_Capacity(0)
- {
- }
-
- VmaVector(size_t count, const AllocatorT& allocator) :
- m_Allocator(allocator),
- m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL),
- m_Count(count),
- m_Capacity(count)
- {
- }
-
- // This version of the constructor is here for compatibility with pre-C++14 std::vector.
- // value is unused.
- VmaVector(size_t count, const T& value, const AllocatorT& allocator)
- : VmaVector(count, allocator) {}
-
- VmaVector(const VmaVector& src) :
- m_Allocator(src.m_Allocator),
- m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
- m_Count(src.m_Count),
- m_Capacity(src.m_Count)
- {
- if(m_Count != 0)
- {
- memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
- }
- }
-
- ~VmaVector()
- {
- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
- }
-
- VmaVector& operator=(const VmaVector& rhs)
- {
- if(&rhs != this)
- {
- resize(rhs.m_Count);
- if(m_Count != 0)
- {
- memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
- }
- }
- return *this;
- }
-
- bool empty() const { return m_Count == 0; }
- size_t size() const { return m_Count; }
- T* data() { return m_pArray; }
- const T* data() const { return m_pArray; }
-
- T& operator[](size_t index)
- {
- VMA_HEAVY_ASSERT(index < m_Count);
- return m_pArray[index];
- }
- const T& operator[](size_t index) const
- {
- VMA_HEAVY_ASSERT(index < m_Count);
- return m_pArray[index];
- }
-
- T& front()
- {
- VMA_HEAVY_ASSERT(m_Count > 0);
- return m_pArray[0];
- }
- const T& front() const
- {
- VMA_HEAVY_ASSERT(m_Count > 0);
- return m_pArray[0];
- }
- T& back()
- {
- VMA_HEAVY_ASSERT(m_Count > 0);
- return m_pArray[m_Count - 1];
- }
- const T& back() const
- {
- VMA_HEAVY_ASSERT(m_Count > 0);
- return m_pArray[m_Count - 1];
- }
-
- void reserve(size_t newCapacity, bool freeMemory = false)
- {
- newCapacity = VMA_MAX(newCapacity, m_Count);
-
- if((newCapacity < m_Capacity) && !freeMemory)
- {
- newCapacity = m_Capacity;
- }
-
- if(newCapacity != m_Capacity)
- {
- T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL;
- if(m_Count != 0)
- {
- memcpy(newArray, m_pArray, m_Count * sizeof(T));
- }
- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
- m_Capacity = newCapacity;
- m_pArray = newArray;
- }
- }
-
- void resize(size_t newCount, bool freeMemory = false)
- {
- size_t newCapacity = m_Capacity;
- if(newCount > m_Capacity)
- {
- newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
- }
- else if(freeMemory)
- {
- newCapacity = newCount;
- }
-
- if(newCapacity != m_Capacity)
- {
- T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
- const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
- if(elementsToCopy != 0)
- {
- memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
- }
- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
- m_Capacity = newCapacity;
- m_pArray = newArray;
- }
-
- m_Count = newCount;
- }
-
- void clear(bool freeMemory = false)
- {
- resize(0, freeMemory);
- }
-
- void insert(size_t index, const T& src)
- {
- VMA_HEAVY_ASSERT(index <= m_Count);
- const size_t oldCount = size();
- resize(oldCount + 1);
- if(index < oldCount)
- {
- memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
- }
- m_pArray[index] = src;
- }
-
- void remove(size_t index)
- {
- VMA_HEAVY_ASSERT(index < m_Count);
- const size_t oldCount = size();
- if(index < oldCount - 1)
- {
- memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
- }
- resize(oldCount - 1);
- }
-
- void push_back(const T& src)
- {
- const size_t newIndex = size();
- resize(newIndex + 1);
- m_pArray[newIndex] = src;
- }
-
- void pop_back()
- {
- VMA_HEAVY_ASSERT(m_Count > 0);
- resize(size() - 1);
- }
-
- void push_front(const T& src)
- {
- insert(0, src);
- }
-
- void pop_front()
- {
- VMA_HEAVY_ASSERT(m_Count > 0);
- remove(0);
- }
-
- typedef T* iterator;
-
- iterator begin() { return m_pArray; }
- iterator end() { return m_pArray + m_Count; }
-
-private:
- AllocatorT m_Allocator;
- T* m_pArray;
- size_t m_Count;
- size_t m_Capacity;
-};
-
-template
-static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item)
-{
- vec.insert(index, item);
-}
-
-template
-static void VmaVectorRemove(VmaVector& vec, size_t index)
-{
- vec.remove(index);
-}
-
-#endif // #if VMA_USE_STL_VECTOR
-
-template
-size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
-{
- const size_t indexToInsert = VmaBinaryFindFirstNotLess(
- vector.data(),
- vector.data() + vector.size(),
- value,
- CmpLess()) - vector.data();
- VmaVectorInsert(vector, indexToInsert, value);
- return indexToInsert;
-}
-
-template
-bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
-{
- CmpLess comparator;
- typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
- vector.begin(),
- vector.end(),
- value,
- comparator);
- if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
- {
- size_t indexToRemove = it - vector.begin();
- VmaVectorRemove(vector, indexToRemove);
- return true;
- }
- return false;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// class VmaPoolAllocator
-
-/*
-Allocator for objects of type T using a list of arrays (pools) to speed up
-allocation. Number of elements that can be allocated is not bounded because
-allocator can create multiple blocks.
-*/
-template
-class VmaPoolAllocator
-{
- VMA_CLASS_NO_COPY(VmaPoolAllocator)
-public:
- VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
- ~VmaPoolAllocator();
- T* Alloc();
- void Free(T* ptr);
-
-private:
- union Item
- {
- uint32_t NextFreeIndex;
- alignas(T) char Value[sizeof(T)];
- };
-
- struct ItemBlock
- {
- Item* pItems;
- uint32_t Capacity;
- uint32_t FirstFreeIndex;
- };
-
- const VkAllocationCallbacks* m_pAllocationCallbacks;
- const uint32_t m_FirstBlockCapacity;
- VmaVector< ItemBlock, VmaStlAllocator > m_ItemBlocks;
-
- ItemBlock& CreateNewBlock();
-};
-
-template
-VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
- m_pAllocationCallbacks(pAllocationCallbacks),
- m_FirstBlockCapacity(firstBlockCapacity),
- m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks))
-{
- VMA_ASSERT(m_FirstBlockCapacity > 1);
-}
-
-template
-VmaPoolAllocator::~VmaPoolAllocator()
-{
- for(size_t i = m_ItemBlocks.size(); i--; )
- vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
- m_ItemBlocks.clear();
-}
-
-template
-T* VmaPoolAllocator::Alloc()
-{
- for(size_t i = m_ItemBlocks.size(); i--; )
- {
- ItemBlock& block = m_ItemBlocks[i];
- // This block has some free items: Use first one.
- if(block.FirstFreeIndex != UINT32_MAX)
- {
- Item* const pItem = &block.pItems[block.FirstFreeIndex];
- block.FirstFreeIndex = pItem->NextFreeIndex;
- T* result = (T*)&pItem->Value;
- new(result)T(); // Explicit constructor call.
- return result;
- }
- }
-
- // No block has free item: Create new one and use it.
- ItemBlock& newBlock = CreateNewBlock();
- Item* const pItem = &newBlock.pItems[0];
- newBlock.FirstFreeIndex = pItem->NextFreeIndex;
- T* result = (T*)&pItem->Value;
- new(result)T(); // Explicit constructor call.
- return result;
-}
-
-template
-void VmaPoolAllocator::Free(T* ptr)
-{
- // Search all memory blocks to find ptr.
- for(size_t i = m_ItemBlocks.size(); i--; )
- {
- ItemBlock& block = m_ItemBlocks[i];
-
- // Casting to union.
- Item* pItemPtr;
- memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
-
- // Check if pItemPtr is in address range of this block.
- if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
- {
- ptr->~T(); // Explicit destructor call.
- const uint32_t index = static_cast(pItemPtr - block.pItems);
- pItemPtr->NextFreeIndex = block.FirstFreeIndex;
- block.FirstFreeIndex = index;
- return;
- }
- }
- VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
-}
-
-template
-typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock()
-{
- const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
- m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
-
- const ItemBlock newBlock = {
- vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
- newBlockCapacity,
- 0 };
-
- m_ItemBlocks.push_back(newBlock);
-
- // Setup singly-linked list of all free items in this block.
- for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
- newBlock.pItems[i].NextFreeIndex = i + 1;
- newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
- return m_ItemBlocks.back();
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// class VmaRawList, VmaList
-
-#if VMA_USE_STL_LIST
-
-#define VmaList std::list
-
-#else // #if VMA_USE_STL_LIST
-
-template
-struct VmaListItem
-{
- VmaListItem* pPrev;
- VmaListItem* pNext;
- T Value;
-};
-
-// Doubly linked list.
-template
-class VmaRawList
-{
- VMA_CLASS_NO_COPY(VmaRawList)
-public:
- typedef VmaListItem ItemType;
-
- VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
- ~VmaRawList();
- void Clear();
-
- size_t GetCount() const { return m_Count; }
- bool IsEmpty() const { return m_Count == 0; }
-
- ItemType* Front() { return m_pFront; }
- const ItemType* Front() const { return m_pFront; }
- ItemType* Back() { return m_pBack; }
- const ItemType* Back() const { return m_pBack; }
-
- ItemType* PushBack();
- ItemType* PushFront();
- ItemType* PushBack(const T& value);
- ItemType* PushFront(const T& value);
- void PopBack();
- void PopFront();
-
- // Item can be null - it means PushBack.
- ItemType* InsertBefore(ItemType* pItem);
- // Item can be null - it means PushFront.
- ItemType* InsertAfter(ItemType* pItem);
-
- ItemType* InsertBefore(ItemType* pItem, const T& value);
- ItemType* InsertAfter(ItemType* pItem, const T& value);
-
- void Remove(ItemType* pItem);
-
-private:
- const VkAllocationCallbacks* const m_pAllocationCallbacks;
- VmaPoolAllocator m_ItemAllocator;
- ItemType* m_pFront;
- ItemType* m_pBack;
- size_t m_Count;
-};
-
-template
-VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
- m_pAllocationCallbacks(pAllocationCallbacks),
- m_ItemAllocator(pAllocationCallbacks, 128),
- m_pFront(VMA_NULL),
- m_pBack(VMA_NULL),
- m_Count(0)
-{
-}
-
-template
-VmaRawList::~VmaRawList()
-{
- // Intentionally not calling Clear, because that would be unnecessary
- // computations to return all items to m_ItemAllocator as free.
-}
-
-template